]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
Add support for secidx relocations to aarch64-w64-mingw32
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 /* Types of processor to assemble for. */
42 #ifndef CPU_DEFAULT
43 #define CPU_DEFAULT AARCH64_ARCH_V8
44 #endif
45
46 #define streq(a, b) (strcmp (a, b) == 0)
47
48 #define END_OF_INSN '\0'
49
50 static aarch64_feature_set cpu_variant;
51
52 /* Variables that we set while parsing command-line options. Once all
53 options have been read we re-process these values to set the real
54 assembly flags. */
55 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
56 static const aarch64_feature_set *march_cpu_opt = NULL;
57
58 /* Constants for known architecture features. */
59 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
60
61 /* Currently active instruction sequence. */
62 static aarch64_instr_sequence *insn_sequence = NULL;
63
64 #ifdef OBJ_ELF
65 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
66 static symbolS *GOT_symbol;
67 #endif
68
69 /* Which ABI to use. */
70 enum aarch64_abi_type
71 {
72 AARCH64_ABI_NONE = 0,
73 AARCH64_ABI_LP64 = 1,
74 AARCH64_ABI_ILP32 = 2,
75 AARCH64_ABI_LLP64 = 3
76 };
77
78 unsigned int aarch64_sframe_cfa_sp_reg;
79 /* The other CFA base register for SFrame unwind info. */
80 unsigned int aarch64_sframe_cfa_fp_reg;
81 unsigned int aarch64_sframe_cfa_ra_reg;
82
83 #ifndef DEFAULT_ARCH
84 #define DEFAULT_ARCH "aarch64"
85 #endif
86
87 #ifdef OBJ_ELF
88 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
89 static const char *default_arch = DEFAULT_ARCH;
90 #endif
91
92 /* AArch64 ABI for the output file. */
93 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
94
95 /* When non-zero, program to a 32-bit model, in which the C data types
96 int, long and all pointer types are 32-bit objects (ILP32); or to a
97 64-bit model, in which the C int type is 32-bits but the C long type
98 and all pointer types are 64-bit objects (LP64). */
99 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
100
101 /* When non zero, C types int and long are 32 bit,
102 pointers, however are 64 bit */
103 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
104
105 enum vector_el_type
106 {
107 NT_invtype = -1,
108 NT_b,
109 NT_h,
110 NT_s,
111 NT_d,
112 NT_q,
113 NT_zero,
114 NT_merge
115 };
116
117 /* SME horizontal or vertical slice indicator, encoded in "V".
118 Values:
119 0 - Horizontal
120 1 - vertical
121 */
122 enum sme_hv_slice
123 {
124 HV_horizontal = 0,
125 HV_vertical = 1
126 };
127
128 /* Bits for DEFINED field in vector_type_el. */
129 #define NTA_HASTYPE 1
130 #define NTA_HASINDEX 2
131 #define NTA_HASVARWIDTH 4
132
133 struct vector_type_el
134 {
135 enum vector_el_type type;
136 unsigned char defined;
137 unsigned width;
138 int64_t index;
139 };
140
141 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
142
143 struct reloc
144 {
145 bfd_reloc_code_real_type type;
146 expressionS exp;
147 int pc_rel;
148 enum aarch64_opnd opnd;
149 uint32_t flags;
150 unsigned need_libopcodes_p : 1;
151 };
152
153 struct aarch64_instruction
154 {
155 /* libopcodes structure for instruction intermediate representation. */
156 aarch64_inst base;
157 /* Record assembly errors found during the parsing. */
158 struct
159 {
160 enum aarch64_operand_error_kind kind;
161 const char *error;
162 } parsing_error;
163 /* The condition that appears in the assembly line. */
164 int cond;
165 /* Relocation information (including the GAS internal fixup). */
166 struct reloc reloc;
167 /* Need to generate an immediate in the literal pool. */
168 unsigned gen_lit_pool : 1;
169 };
170
171 typedef struct aarch64_instruction aarch64_instruction;
172
173 static aarch64_instruction inst;
174
175 static bool parse_operands (char *, const aarch64_opcode *);
176 static bool programmer_friendly_fixup (aarch64_instruction *);
177
178 /* Diagnostics inline function utilities.
179
180 These are lightweight utilities which should only be called by parse_operands
181 and other parsers. GAS processes each assembly line by parsing it against
182 instruction template(s), in the case of multiple templates (for the same
183 mnemonic name), those templates are tried one by one until one succeeds or
184 all fail. An assembly line may fail a few templates before being
185 successfully parsed; an error saved here in most cases is not a user error
186 but an error indicating the current template is not the right template.
187 Therefore it is very important that errors can be saved at a low cost during
188 the parsing; we don't want to slow down the whole parsing by recording
189 non-user errors in detail.
190
191 Remember that the objective is to help GAS pick up the most appropriate
192 error message in the case of multiple templates, e.g. FMOV which has 8
193 templates. */
194
195 static inline void
196 clear_error (void)
197 {
198 inst.parsing_error.kind = AARCH64_OPDE_NIL;
199 inst.parsing_error.error = NULL;
200 }
201
202 static inline bool
203 error_p (void)
204 {
205 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
206 }
207
208 static inline const char *
209 get_error_message (void)
210 {
211 return inst.parsing_error.error;
212 }
213
214 static inline enum aarch64_operand_error_kind
215 get_error_kind (void)
216 {
217 return inst.parsing_error.kind;
218 }
219
220 static inline void
221 set_error (enum aarch64_operand_error_kind kind, const char *error)
222 {
223 inst.parsing_error.kind = kind;
224 inst.parsing_error.error = error;
225 }
226
227 static inline void
228 set_recoverable_error (const char *error)
229 {
230 set_error (AARCH64_OPDE_RECOVERABLE, error);
231 }
232
233 /* Use the DESC field of the corresponding aarch64_operand entry to compose
234 the error message. */
235 static inline void
236 set_default_error (void)
237 {
238 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
239 }
240
241 static inline void
242 set_syntax_error (const char *error)
243 {
244 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
245 }
246
247 static inline void
248 set_first_syntax_error (const char *error)
249 {
250 if (! error_p ())
251 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
252 }
253
254 static inline void
255 set_fatal_syntax_error (const char *error)
256 {
257 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
258 }
259 \f
260 /* Return value for certain parsers when the parsing fails; those parsers
261 return the information of the parsed result, e.g. register number, on
262 success. */
263 #define PARSE_FAIL -1
264
265 /* This is an invalid condition code that means no conditional field is
266 present. */
267 #define COND_ALWAYS 0x10
268
269 typedef struct
270 {
271 const char *template;
272 uint32_t value;
273 } asm_nzcv;
274
275 struct reloc_entry
276 {
277 char *name;
278 bfd_reloc_code_real_type reloc;
279 };
280
281 /* Macros to define the register types and masks for the purpose
282 of parsing. */
283
284 #undef AARCH64_REG_TYPES
285 #define AARCH64_REG_TYPES \
286 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
287 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
288 BASIC_REG_TYPE(SP_32) /* wsp */ \
289 BASIC_REG_TYPE(SP_64) /* sp */ \
290 BASIC_REG_TYPE(Z_32) /* wzr */ \
291 BASIC_REG_TYPE(Z_64) /* xzr */ \
292 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
293 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
294 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
295 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
296 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
297 BASIC_REG_TYPE(VN) /* v[0-31] */ \
298 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
299 BASIC_REG_TYPE(PN) /* p[0-15] */ \
300 BASIC_REG_TYPE(ZA) /* za[0-15] */ \
301 BASIC_REG_TYPE(ZAH) /* za[0-15]h */ \
302 BASIC_REG_TYPE(ZAV) /* za[0-15]v */ \
303 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
304 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
305 /* Typecheck: same, plus SVE registers. */ \
306 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
307 | REG_TYPE(ZN)) \
308 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
309 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
310 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
311 /* Typecheck: same, plus SVE registers. */ \
312 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
313 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
314 | REG_TYPE(ZN)) \
315 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
316 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
317 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
318 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
319 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
320 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
321 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
322 /* Typecheck: any [BHSDQ]P FP. */ \
323 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
324 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
325 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
326 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
327 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
328 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
329 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
330 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
331 be used for SVE instructions, since Zn and Pn are valid symbols \
332 in other contexts. */ \
333 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
334 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
335 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
336 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
337 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
338 | REG_TYPE(ZN) | REG_TYPE(PN)) \
339 /* Any integer register; used for error messages only. */ \
340 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
341 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
342 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
343 /* Pseudo type to mark the end of the enumerator sequence. */ \
344 BASIC_REG_TYPE(MAX)
345
346 #undef BASIC_REG_TYPE
347 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
348 #undef MULTI_REG_TYPE
349 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
350
351 /* Register type enumerators. */
352 typedef enum aarch64_reg_type_
353 {
354 /* A list of REG_TYPE_*. */
355 AARCH64_REG_TYPES
356 } aarch64_reg_type;
357
358 #undef BASIC_REG_TYPE
359 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
360 #undef REG_TYPE
361 #define REG_TYPE(T) (1 << REG_TYPE_##T)
362 #undef MULTI_REG_TYPE
363 #define MULTI_REG_TYPE(T,V) V,
364
365 /* Structure for a hash table entry for a register. */
366 typedef struct
367 {
368 const char *name;
369 unsigned char number;
370 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
371 unsigned char builtin;
372 } reg_entry;
373
374 /* Values indexed by aarch64_reg_type to assist the type checking. */
375 static const unsigned reg_type_masks[] =
376 {
377 AARCH64_REG_TYPES
378 };
379
380 #undef BASIC_REG_TYPE
381 #undef REG_TYPE
382 #undef MULTI_REG_TYPE
383 #undef AARCH64_REG_TYPES
384
385 /* Diagnostics used when we don't get a register of the expected type.
386 Note: this has to synchronized with aarch64_reg_type definitions
387 above. */
388 static const char *
389 get_reg_expected_msg (aarch64_reg_type reg_type)
390 {
391 const char *msg;
392
393 switch (reg_type)
394 {
395 case REG_TYPE_R_32:
396 msg = N_("integer 32-bit register expected");
397 break;
398 case REG_TYPE_R_64:
399 msg = N_("integer 64-bit register expected");
400 break;
401 case REG_TYPE_R_N:
402 msg = N_("integer register expected");
403 break;
404 case REG_TYPE_R64_SP:
405 msg = N_("64-bit integer or SP register expected");
406 break;
407 case REG_TYPE_SVE_BASE:
408 msg = N_("base register expected");
409 break;
410 case REG_TYPE_R_Z:
411 msg = N_("integer or zero register expected");
412 break;
413 case REG_TYPE_SVE_OFFSET:
414 msg = N_("offset register expected");
415 break;
416 case REG_TYPE_R_SP:
417 msg = N_("integer or SP register expected");
418 break;
419 case REG_TYPE_R_Z_SP:
420 msg = N_("integer, zero or SP register expected");
421 break;
422 case REG_TYPE_FP_B:
423 msg = N_("8-bit SIMD scalar register expected");
424 break;
425 case REG_TYPE_FP_H:
426 msg = N_("16-bit SIMD scalar or floating-point half precision "
427 "register expected");
428 break;
429 case REG_TYPE_FP_S:
430 msg = N_("32-bit SIMD scalar or floating-point single precision "
431 "register expected");
432 break;
433 case REG_TYPE_FP_D:
434 msg = N_("64-bit SIMD scalar or floating-point double precision "
435 "register expected");
436 break;
437 case REG_TYPE_FP_Q:
438 msg = N_("128-bit SIMD scalar or floating-point quad precision "
439 "register expected");
440 break;
441 case REG_TYPE_R_Z_BHSDQ_V:
442 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
443 msg = N_("register expected");
444 break;
445 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
446 msg = N_("SIMD scalar or floating-point register expected");
447 break;
448 case REG_TYPE_VN: /* any V reg */
449 msg = N_("vector register expected");
450 break;
451 case REG_TYPE_ZN:
452 msg = N_("SVE vector register expected");
453 break;
454 case REG_TYPE_PN:
455 msg = N_("SVE predicate register expected");
456 break;
457 default:
458 as_fatal (_("invalid register type %d"), reg_type);
459 }
460 return msg;
461 }
462
463 /* Some well known registers that we refer to directly elsewhere. */
464 #define REG_SP 31
465 #define REG_ZR 31
466
467 /* Instructions take 4 bytes in the object file. */
468 #define INSN_SIZE 4
469
470 static htab_t aarch64_ops_hsh;
471 static htab_t aarch64_cond_hsh;
472 static htab_t aarch64_shift_hsh;
473 static htab_t aarch64_sys_regs_hsh;
474 static htab_t aarch64_pstatefield_hsh;
475 static htab_t aarch64_sys_regs_ic_hsh;
476 static htab_t aarch64_sys_regs_dc_hsh;
477 static htab_t aarch64_sys_regs_at_hsh;
478 static htab_t aarch64_sys_regs_tlbi_hsh;
479 static htab_t aarch64_sys_regs_sr_hsh;
480 static htab_t aarch64_reg_hsh;
481 static htab_t aarch64_barrier_opt_hsh;
482 static htab_t aarch64_nzcv_hsh;
483 static htab_t aarch64_pldop_hsh;
484 static htab_t aarch64_hint_opt_hsh;
485
486 /* Stuff needed to resolve the label ambiguity
487 As:
488 ...
489 label: <insn>
490 may differ from:
491 ...
492 label:
493 <insn> */
494
495 static symbolS *last_label_seen;
496
497 /* Literal pool structure. Held on a per-section
498 and per-sub-section basis. */
499
500 #define MAX_LITERAL_POOL_SIZE 1024
501 typedef struct literal_expression
502 {
503 expressionS exp;
504 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
505 LITTLENUM_TYPE * bignum;
506 } literal_expression;
507
508 typedef struct literal_pool
509 {
510 literal_expression literals[MAX_LITERAL_POOL_SIZE];
511 unsigned int next_free_entry;
512 unsigned int id;
513 symbolS *symbol;
514 segT section;
515 subsegT sub_section;
516 int size;
517 struct literal_pool *next;
518 } literal_pool;
519
520 /* Pointer to a linked list of literal pools. */
521 static literal_pool *list_of_pools = NULL;
522 \f
523 /* Pure syntax. */
524
525 /* This array holds the chars that always start a comment. If the
526 pre-processor is disabled, these aren't very useful. */
527 const char comment_chars[] = "";
528
529 /* This array holds the chars that only start a comment at the beginning of
530 a line. If the line seems to have the form '# 123 filename'
531 .line and .file directives will appear in the pre-processed output. */
532 /* Note that input_file.c hand checks for '#' at the beginning of the
533 first line of the input file. This is because the compiler outputs
534 #NO_APP at the beginning of its output. */
535 /* Also note that comments like this one will always work. */
536 const char line_comment_chars[] = "#";
537
538 const char line_separator_chars[] = ";";
539
540 /* Chars that can be used to separate mant
541 from exp in floating point numbers. */
542 const char EXP_CHARS[] = "eE";
543
544 /* Chars that mean this number is a floating point constant. */
545 /* As in 0f12.456 */
546 /* or 0d1.2345e12 */
547
548 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
549
550 /* Prefix character that indicates the start of an immediate value. */
551 #define is_immediate_prefix(C) ((C) == '#')
552
553 /* Separator character handling. */
554
555 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
556
557 static inline bool
558 skip_past_char (char **str, char c)
559 {
560 if (**str == c)
561 {
562 (*str)++;
563 return true;
564 }
565 else
566 return false;
567 }
568
569 #define skip_past_comma(str) skip_past_char (str, ',')
570
571 /* Arithmetic expressions (possibly involving symbols). */
572
573 static bool in_aarch64_get_expression = false;
574
575 /* Third argument to aarch64_get_expression. */
576 #define GE_NO_PREFIX false
577 #define GE_OPT_PREFIX true
578
579 /* Fourth argument to aarch64_get_expression. */
580 #define ALLOW_ABSENT false
581 #define REJECT_ABSENT true
582
583 /* Return TRUE if the string pointed by *STR is successfully parsed
584 as an valid expression; *EP will be filled with the information of
585 such an expression. Otherwise return FALSE.
586
587 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
588 If REJECT_ABSENT is true then trat missing expressions as an error. */
589
590 static bool
591 aarch64_get_expression (expressionS * ep,
592 char ** str,
593 bool allow_immediate_prefix,
594 bool reject_absent)
595 {
596 char *save_in;
597 segT seg;
598 bool prefix_present = false;
599
600 if (allow_immediate_prefix)
601 {
602 if (is_immediate_prefix (**str))
603 {
604 (*str)++;
605 prefix_present = true;
606 }
607 }
608
609 memset (ep, 0, sizeof (expressionS));
610
611 save_in = input_line_pointer;
612 input_line_pointer = *str;
613 in_aarch64_get_expression = true;
614 seg = expression (ep);
615 in_aarch64_get_expression = false;
616
617 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
618 {
619 /* We found a bad expression in md_operand(). */
620 *str = input_line_pointer;
621 input_line_pointer = save_in;
622 if (prefix_present && ! error_p ())
623 set_fatal_syntax_error (_("bad expression"));
624 else
625 set_first_syntax_error (_("bad expression"));
626 return false;
627 }
628
629 #ifdef OBJ_AOUT
630 if (seg != absolute_section
631 && seg != text_section
632 && seg != data_section
633 && seg != bss_section
634 && seg != undefined_section)
635 {
636 set_syntax_error (_("bad segment"));
637 *str = input_line_pointer;
638 input_line_pointer = save_in;
639 return false;
640 }
641 #else
642 (void) seg;
643 #endif
644
645 *str = input_line_pointer;
646 input_line_pointer = save_in;
647 return true;
648 }
649
650 /* Turn a string in input_line_pointer into a floating point constant
651 of type TYPE, and store the appropriate bytes in *LITP. The number
652 of LITTLENUMS emitted is stored in *SIZEP. An error message is
653 returned, or NULL on OK. */
654
655 const char *
656 md_atof (int type, char *litP, int *sizeP)
657 {
658 return ieee_md_atof (type, litP, sizeP, target_big_endian);
659 }
660
661 /* We handle all bad expressions here, so that we can report the faulty
662 instruction in the error message. */
663 void
664 md_operand (expressionS * exp)
665 {
666 if (in_aarch64_get_expression)
667 exp->X_op = O_illegal;
668 }
669
670 /* Immediate values. */
671
672 /* Errors may be set multiple times during parsing or bit encoding
673 (particularly in the Neon bits), but usually the earliest error which is set
674 will be the most meaningful. Avoid overwriting it with later (cascading)
675 errors by calling this function. */
676
677 static void
678 first_error (const char *error)
679 {
680 if (! error_p ())
681 set_syntax_error (error);
682 }
683
684 /* Similar to first_error, but this function accepts formatted error
685 message. */
686 static void
687 first_error_fmt (const char *format, ...)
688 {
689 va_list args;
690 enum
691 { size = 100 };
692 /* N.B. this single buffer will not cause error messages for different
693 instructions to pollute each other; this is because at the end of
694 processing of each assembly line, error message if any will be
695 collected by as_bad. */
696 static char buffer[size];
697
698 if (! error_p ())
699 {
700 int ret ATTRIBUTE_UNUSED;
701 va_start (args, format);
702 ret = vsnprintf (buffer, size, format, args);
703 know (ret <= size - 1 && ret >= 0);
704 va_end (args);
705 set_syntax_error (buffer);
706 }
707 }
708
709 /* Register parsing. */
710
711 /* Generic register parser which is called by other specialized
712 register parsers.
713 CCP points to what should be the beginning of a register name.
714 If it is indeed a valid register name, advance CCP over it and
715 return the reg_entry structure; otherwise return NULL.
716 It does not issue diagnostics. */
717
718 static reg_entry *
719 parse_reg (char **ccp)
720 {
721 char *start = *ccp;
722 char *p;
723 reg_entry *reg;
724
725 #ifdef REGISTER_PREFIX
726 if (*start != REGISTER_PREFIX)
727 return NULL;
728 start++;
729 #endif
730
731 p = start;
732 if (!ISALPHA (*p) || !is_name_beginner (*p))
733 return NULL;
734
735 do
736 p++;
737 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
738
739 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
740
741 if (!reg)
742 return NULL;
743
744 *ccp = p;
745 return reg;
746 }
747
748 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
749 return FALSE. */
750 static bool
751 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
752 {
753 return (reg_type_masks[type] & (1 << reg->type)) != 0;
754 }
755
756 /* Try to parse a base or offset register. Allow SVE base and offset
757 registers if REG_TYPE includes SVE registers. Return the register
758 entry on success, setting *QUALIFIER to the register qualifier.
759 Return null otherwise.
760
761 Note that this function does not issue any diagnostics. */
762
763 static const reg_entry *
764 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
765 aarch64_opnd_qualifier_t *qualifier)
766 {
767 char *str = *ccp;
768 const reg_entry *reg = parse_reg (&str);
769
770 if (reg == NULL)
771 return NULL;
772
773 switch (reg->type)
774 {
775 case REG_TYPE_R_32:
776 case REG_TYPE_SP_32:
777 case REG_TYPE_Z_32:
778 *qualifier = AARCH64_OPND_QLF_W;
779 break;
780
781 case REG_TYPE_R_64:
782 case REG_TYPE_SP_64:
783 case REG_TYPE_Z_64:
784 *qualifier = AARCH64_OPND_QLF_X;
785 break;
786
787 case REG_TYPE_ZN:
788 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
789 || str[0] != '.')
790 return NULL;
791 switch (TOLOWER (str[1]))
792 {
793 case 's':
794 *qualifier = AARCH64_OPND_QLF_S_S;
795 break;
796 case 'd':
797 *qualifier = AARCH64_OPND_QLF_S_D;
798 break;
799 default:
800 return NULL;
801 }
802 str += 2;
803 break;
804
805 default:
806 return NULL;
807 }
808
809 *ccp = str;
810
811 return reg;
812 }
813
814 /* Try to parse a base or offset register. Return the register entry
815 on success, setting *QUALIFIER to the register qualifier. Return null
816 otherwise.
817
818 Note that this function does not issue any diagnostics. */
819
820 static const reg_entry *
821 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
822 {
823 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
824 }
825
826 /* Parse the qualifier of a vector register or vector element of type
827 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
828 succeeds; otherwise return FALSE.
829
830 Accept only one occurrence of:
831 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
832 b h s d q */
833 static bool
834 parse_vector_type_for_operand (aarch64_reg_type reg_type,
835 struct vector_type_el *parsed_type, char **str)
836 {
837 char *ptr = *str;
838 unsigned width;
839 unsigned element_size;
840 enum vector_el_type type;
841
842 /* skip '.' */
843 gas_assert (*ptr == '.');
844 ptr++;
845
846 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
847 {
848 width = 0;
849 goto elt_size;
850 }
851 width = strtoul (ptr, &ptr, 10);
852 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
853 {
854 first_error_fmt (_("bad size %d in vector width specifier"), width);
855 return false;
856 }
857
858 elt_size:
859 switch (TOLOWER (*ptr))
860 {
861 case 'b':
862 type = NT_b;
863 element_size = 8;
864 break;
865 case 'h':
866 type = NT_h;
867 element_size = 16;
868 break;
869 case 's':
870 type = NT_s;
871 element_size = 32;
872 break;
873 case 'd':
874 type = NT_d;
875 element_size = 64;
876 break;
877 case 'q':
878 if (reg_type == REG_TYPE_ZN || width == 1)
879 {
880 type = NT_q;
881 element_size = 128;
882 break;
883 }
884 /* fall through. */
885 default:
886 if (*ptr != '\0')
887 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
888 else
889 first_error (_("missing element size"));
890 return false;
891 }
892 if (width != 0 && width * element_size != 64
893 && width * element_size != 128
894 && !(width == 2 && element_size == 16)
895 && !(width == 4 && element_size == 8))
896 {
897 first_error_fmt (_
898 ("invalid element size %d and vector size combination %c"),
899 width, *ptr);
900 return false;
901 }
902 ptr++;
903
904 parsed_type->type = type;
905 parsed_type->width = width;
906
907 *str = ptr;
908
909 return true;
910 }
911
912 /* *STR contains an SVE zero/merge predication suffix. Parse it into
913 *PARSED_TYPE and point *STR at the end of the suffix. */
914
915 static bool
916 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
917 {
918 char *ptr = *str;
919
920 /* Skip '/'. */
921 gas_assert (*ptr == '/');
922 ptr++;
923 switch (TOLOWER (*ptr))
924 {
925 case 'z':
926 parsed_type->type = NT_zero;
927 break;
928 case 'm':
929 parsed_type->type = NT_merge;
930 break;
931 default:
932 if (*ptr != '\0' && *ptr != ',')
933 first_error_fmt (_("unexpected character `%c' in predication type"),
934 *ptr);
935 else
936 first_error (_("missing predication type"));
937 return false;
938 }
939 parsed_type->width = 0;
940 *str = ptr + 1;
941 return true;
942 }
943
944 /* Parse a register of the type TYPE.
945
946 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
947 name or the parsed register is not of TYPE.
948
949 Otherwise return the register number, and optionally fill in the actual
950 type of the register in *RTYPE when multiple alternatives were given, and
951 return the register shape and element index information in *TYPEINFO.
952
953 IN_REG_LIST should be set with TRUE if the caller is parsing a register
954 list. */
955
956 static int
957 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
958 struct vector_type_el *typeinfo, bool in_reg_list)
959 {
960 char *str = *ccp;
961 const reg_entry *reg = parse_reg (&str);
962 struct vector_type_el atype;
963 struct vector_type_el parsetype;
964 bool is_typed_vecreg = false;
965
966 atype.defined = 0;
967 atype.type = NT_invtype;
968 atype.width = -1;
969 atype.index = 0;
970
971 if (reg == NULL)
972 {
973 if (typeinfo)
974 *typeinfo = atype;
975 set_default_error ();
976 return PARSE_FAIL;
977 }
978
979 if (! aarch64_check_reg_type (reg, type))
980 {
981 DEBUG_TRACE ("reg type check failed");
982 set_default_error ();
983 return PARSE_FAIL;
984 }
985 type = reg->type;
986
987 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
988 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
989 {
990 if (*str == '.')
991 {
992 if (!parse_vector_type_for_operand (type, &parsetype, &str))
993 return PARSE_FAIL;
994 }
995 else
996 {
997 if (!parse_predication_for_operand (&parsetype, &str))
998 return PARSE_FAIL;
999 }
1000
1001 /* Register if of the form Vn.[bhsdq]. */
1002 is_typed_vecreg = true;
1003
1004 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1005 {
1006 /* The width is always variable; we don't allow an integer width
1007 to be specified. */
1008 gas_assert (parsetype.width == 0);
1009 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1010 }
1011 else if (parsetype.width == 0)
1012 /* Expect index. In the new scheme we cannot have
1013 Vn.[bhsdq] represent a scalar. Therefore any
1014 Vn.[bhsdq] should have an index following it.
1015 Except in reglists of course. */
1016 atype.defined |= NTA_HASINDEX;
1017 else
1018 atype.defined |= NTA_HASTYPE;
1019
1020 atype.type = parsetype.type;
1021 atype.width = parsetype.width;
1022 }
1023
1024 if (skip_past_char (&str, '['))
1025 {
1026 expressionS exp;
1027
1028 /* Reject Sn[index] syntax. */
1029 if (!is_typed_vecreg)
1030 {
1031 first_error (_("this type of register can't be indexed"));
1032 return PARSE_FAIL;
1033 }
1034
1035 if (in_reg_list)
1036 {
1037 first_error (_("index not allowed inside register list"));
1038 return PARSE_FAIL;
1039 }
1040
1041 atype.defined |= NTA_HASINDEX;
1042
1043 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1044
1045 if (exp.X_op != O_constant)
1046 {
1047 first_error (_("constant expression required"));
1048 return PARSE_FAIL;
1049 }
1050
1051 if (! skip_past_char (&str, ']'))
1052 return PARSE_FAIL;
1053
1054 atype.index = exp.X_add_number;
1055 }
1056 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1057 {
1058 /* Indexed vector register expected. */
1059 first_error (_("indexed vector register expected"));
1060 return PARSE_FAIL;
1061 }
1062
1063 /* A vector reg Vn should be typed or indexed. */
1064 if (type == REG_TYPE_VN && atype.defined == 0)
1065 {
1066 first_error (_("invalid use of vector register"));
1067 }
1068
1069 if (typeinfo)
1070 *typeinfo = atype;
1071
1072 if (rtype)
1073 *rtype = type;
1074
1075 *ccp = str;
1076
1077 return reg->number;
1078 }
1079
1080 /* Parse register.
1081
1082 Return the register number on success; return PARSE_FAIL otherwise.
1083
1084 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1085 the register (e.g. NEON double or quad reg when either has been requested).
1086
1087 If this is a NEON vector register with additional type information, fill
1088 in the struct pointed to by VECTYPE (if non-NULL).
1089
1090 This parser does not handle register list. */
1091
1092 static int
1093 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1094 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1095 {
1096 struct vector_type_el atype;
1097 char *str = *ccp;
1098 int reg = parse_typed_reg (&str, type, rtype, &atype,
1099 /*in_reg_list= */ false);
1100
1101 if (reg == PARSE_FAIL)
1102 return PARSE_FAIL;
1103
1104 if (vectype)
1105 *vectype = atype;
1106
1107 *ccp = str;
1108
1109 return reg;
1110 }
1111
1112 static inline bool
1113 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1114 {
1115 return
1116 e1.type == e2.type
1117 && e1.defined == e2.defined
1118 && e1.width == e2.width && e1.index == e2.index;
1119 }
1120
1121 /* This function parses a list of vector registers of type TYPE.
1122 On success, it returns the parsed register list information in the
1123 following encoded format:
1124
1125 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1126 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1127
1128 The information of the register shape and/or index is returned in
1129 *VECTYPE.
1130
1131 It returns PARSE_FAIL if the register list is invalid.
1132
1133 The list contains one to four registers.
1134 Each register can be one of:
1135 <Vt>.<T>[<index>]
1136 <Vt>.<T>
1137 All <T> should be identical.
1138 All <index> should be identical.
1139 There are restrictions on <Vt> numbers which are checked later
1140 (by reg_list_valid_p). */
1141
1142 static int
1143 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1144 struct vector_type_el *vectype)
1145 {
1146 char *str = *ccp;
1147 int nb_regs;
1148 struct vector_type_el typeinfo, typeinfo_first;
1149 int val, val_range;
1150 int in_range;
1151 int ret_val;
1152 int i;
1153 bool error = false;
1154 bool expect_index = false;
1155
1156 if (*str != '{')
1157 {
1158 set_syntax_error (_("expecting {"));
1159 return PARSE_FAIL;
1160 }
1161 str++;
1162
1163 nb_regs = 0;
1164 typeinfo_first.defined = 0;
1165 typeinfo_first.type = NT_invtype;
1166 typeinfo_first.width = -1;
1167 typeinfo_first.index = 0;
1168 ret_val = 0;
1169 val = -1;
1170 val_range = -1;
1171 in_range = 0;
1172 do
1173 {
1174 if (in_range)
1175 {
1176 str++; /* skip over '-' */
1177 val_range = val;
1178 }
1179 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1180 /*in_reg_list= */ true);
1181 if (val == PARSE_FAIL)
1182 {
1183 set_first_syntax_error (_("invalid vector register in list"));
1184 error = true;
1185 continue;
1186 }
1187 /* reject [bhsd]n */
1188 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1189 {
1190 set_first_syntax_error (_("invalid scalar register in list"));
1191 error = true;
1192 continue;
1193 }
1194
1195 if (typeinfo.defined & NTA_HASINDEX)
1196 expect_index = true;
1197
1198 if (in_range)
1199 {
1200 if (val < val_range)
1201 {
1202 set_first_syntax_error
1203 (_("invalid range in vector register list"));
1204 error = true;
1205 }
1206 val_range++;
1207 }
1208 else
1209 {
1210 val_range = val;
1211 if (nb_regs == 0)
1212 typeinfo_first = typeinfo;
1213 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1214 {
1215 set_first_syntax_error
1216 (_("type mismatch in vector register list"));
1217 error = true;
1218 }
1219 }
1220 if (! error)
1221 for (i = val_range; i <= val; i++)
1222 {
1223 ret_val |= i << (5 * nb_regs);
1224 nb_regs++;
1225 }
1226 in_range = 0;
1227 }
1228 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1229
1230 skip_whitespace (str);
1231 if (*str != '}')
1232 {
1233 set_first_syntax_error (_("end of vector register list not found"));
1234 error = true;
1235 }
1236 str++;
1237
1238 skip_whitespace (str);
1239
1240 if (expect_index)
1241 {
1242 if (skip_past_char (&str, '['))
1243 {
1244 expressionS exp;
1245
1246 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1247 if (exp.X_op != O_constant)
1248 {
1249 set_first_syntax_error (_("constant expression required."));
1250 error = true;
1251 }
1252 if (! skip_past_char (&str, ']'))
1253 error = true;
1254 else
1255 typeinfo_first.index = exp.X_add_number;
1256 }
1257 else
1258 {
1259 set_first_syntax_error (_("expected index"));
1260 error = true;
1261 }
1262 }
1263
1264 if (nb_regs > 4)
1265 {
1266 set_first_syntax_error (_("too many registers in vector register list"));
1267 error = true;
1268 }
1269 else if (nb_regs == 0)
1270 {
1271 set_first_syntax_error (_("empty vector register list"));
1272 error = true;
1273 }
1274
1275 *ccp = str;
1276 if (! error)
1277 *vectype = typeinfo_first;
1278
1279 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1280 }
1281
1282 /* Directives: register aliases. */
1283
1284 static reg_entry *
1285 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1286 {
1287 reg_entry *new;
1288 const char *name;
1289
1290 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1291 {
1292 if (new->builtin)
1293 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1294 str);
1295
1296 /* Only warn about a redefinition if it's not defined as the
1297 same register. */
1298 else if (new->number != number || new->type != type)
1299 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1300
1301 return NULL;
1302 }
1303
1304 name = xstrdup (str);
1305 new = XNEW (reg_entry);
1306
1307 new->name = name;
1308 new->number = number;
1309 new->type = type;
1310 new->builtin = false;
1311
1312 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1313
1314 return new;
1315 }
1316
1317 /* Look for the .req directive. This is of the form:
1318
1319 new_register_name .req existing_register_name
1320
1321 If we find one, or if it looks sufficiently like one that we want to
1322 handle any error here, return TRUE. Otherwise return FALSE. */
1323
1324 static bool
1325 create_register_alias (char *newname, char *p)
1326 {
1327 const reg_entry *old;
1328 char *oldname, *nbuf;
1329 size_t nlen;
1330
1331 /* The input scrubber ensures that whitespace after the mnemonic is
1332 collapsed to single spaces. */
1333 oldname = p;
1334 if (!startswith (oldname, " .req "))
1335 return false;
1336
1337 oldname += 6;
1338 if (*oldname == '\0')
1339 return false;
1340
1341 old = str_hash_find (aarch64_reg_hsh, oldname);
1342 if (!old)
1343 {
1344 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1345 return true;
1346 }
1347
1348 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1349 the desired alias name, and p points to its end. If not, then
1350 the desired alias name is in the global original_case_string. */
1351 #ifdef TC_CASE_SENSITIVE
1352 nlen = p - newname;
1353 #else
1354 newname = original_case_string;
1355 nlen = strlen (newname);
1356 #endif
1357
1358 nbuf = xmemdup0 (newname, nlen);
1359
1360 /* Create aliases under the new name as stated; an all-lowercase
1361 version of the new name; and an all-uppercase version of the new
1362 name. */
1363 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1364 {
1365 for (p = nbuf; *p; p++)
1366 *p = TOUPPER (*p);
1367
1368 if (strncmp (nbuf, newname, nlen))
1369 {
1370 /* If this attempt to create an additional alias fails, do not bother
1371 trying to create the all-lower case alias. We will fail and issue
1372 a second, duplicate error message. This situation arises when the
1373 programmer does something like:
1374 foo .req r0
1375 Foo .req r1
1376 The second .req creates the "Foo" alias but then fails to create
1377 the artificial FOO alias because it has already been created by the
1378 first .req. */
1379 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1380 {
1381 free (nbuf);
1382 return true;
1383 }
1384 }
1385
1386 for (p = nbuf; *p; p++)
1387 *p = TOLOWER (*p);
1388
1389 if (strncmp (nbuf, newname, nlen))
1390 insert_reg_alias (nbuf, old->number, old->type);
1391 }
1392
1393 free (nbuf);
1394 return true;
1395 }
1396
1397 /* Should never be called, as .req goes between the alias and the
1398 register name, not at the beginning of the line. */
1399 static void
1400 s_req (int a ATTRIBUTE_UNUSED)
1401 {
1402 as_bad (_("invalid syntax for .req directive"));
1403 }
1404
1405 /* The .unreq directive deletes an alias which was previously defined
1406 by .req. For example:
1407
1408 my_alias .req r11
1409 .unreq my_alias */
1410
1411 static void
1412 s_unreq (int a ATTRIBUTE_UNUSED)
1413 {
1414 char *name;
1415 char saved_char;
1416
1417 name = input_line_pointer;
1418 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1419 saved_char = *input_line_pointer;
1420 *input_line_pointer = 0;
1421
1422 if (!*name)
1423 as_bad (_("invalid syntax for .unreq directive"));
1424 else
1425 {
1426 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1427
1428 if (!reg)
1429 as_bad (_("unknown register alias '%s'"), name);
1430 else if (reg->builtin)
1431 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1432 name);
1433 else
1434 {
1435 char *p;
1436 char *nbuf;
1437
1438 str_hash_delete (aarch64_reg_hsh, name);
1439 free ((char *) reg->name);
1440 free (reg);
1441
1442 /* Also locate the all upper case and all lower case versions.
1443 Do not complain if we cannot find one or the other as it
1444 was probably deleted above. */
1445
1446 nbuf = strdup (name);
1447 for (p = nbuf; *p; p++)
1448 *p = TOUPPER (*p);
1449 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1450 if (reg)
1451 {
1452 str_hash_delete (aarch64_reg_hsh, nbuf);
1453 free ((char *) reg->name);
1454 free (reg);
1455 }
1456
1457 for (p = nbuf; *p; p++)
1458 *p = TOLOWER (*p);
1459 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1460 if (reg)
1461 {
1462 str_hash_delete (aarch64_reg_hsh, nbuf);
1463 free ((char *) reg->name);
1464 free (reg);
1465 }
1466
1467 free (nbuf);
1468 }
1469 }
1470
1471 *input_line_pointer = saved_char;
1472 demand_empty_rest_of_line ();
1473 }
1474
1475 /* Directives: Instruction set selection. */
1476
1477 #if defined OBJ_ELF || defined OBJ_COFF
1478 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1479 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1480 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1481 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1482
1483 /* Create a new mapping symbol for the transition to STATE. */
1484
1485 static void
1486 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1487 {
1488 symbolS *symbolP;
1489 const char *symname;
1490 int type;
1491
1492 switch (state)
1493 {
1494 case MAP_DATA:
1495 symname = "$d";
1496 type = BSF_NO_FLAGS;
1497 break;
1498 case MAP_INSN:
1499 symname = "$x";
1500 type = BSF_NO_FLAGS;
1501 break;
1502 default:
1503 abort ();
1504 }
1505
1506 symbolP = symbol_new (symname, now_seg, frag, value);
1507 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1508
1509 /* Save the mapping symbols for future reference. Also check that
1510 we do not place two mapping symbols at the same offset within a
1511 frag. We'll handle overlap between frags in
1512 check_mapping_symbols.
1513
1514 If .fill or other data filling directive generates zero sized data,
1515 the mapping symbol for the following code will have the same value
1516 as the one generated for the data filling directive. In this case,
1517 we replace the old symbol with the new one at the same address. */
1518 if (value == 0)
1519 {
1520 if (frag->tc_frag_data.first_map != NULL)
1521 {
1522 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1523 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1524 &symbol_lastP);
1525 }
1526 frag->tc_frag_data.first_map = symbolP;
1527 }
1528 if (frag->tc_frag_data.last_map != NULL)
1529 {
1530 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1531 S_GET_VALUE (symbolP));
1532 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1533 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1534 &symbol_lastP);
1535 }
1536 frag->tc_frag_data.last_map = symbolP;
1537 }
1538
1539 /* We must sometimes convert a region marked as code to data during
1540 code alignment, if an odd number of bytes have to be padded. The
1541 code mapping symbol is pushed to an aligned address. */
1542
1543 static void
1544 insert_data_mapping_symbol (enum mstate state,
1545 valueT value, fragS * frag, offsetT bytes)
1546 {
1547 /* If there was already a mapping symbol, remove it. */
1548 if (frag->tc_frag_data.last_map != NULL
1549 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1550 frag->fr_address + value)
1551 {
1552 symbolS *symp = frag->tc_frag_data.last_map;
1553
1554 if (value == 0)
1555 {
1556 know (frag->tc_frag_data.first_map == symp);
1557 frag->tc_frag_data.first_map = NULL;
1558 }
1559 frag->tc_frag_data.last_map = NULL;
1560 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1561 }
1562
1563 make_mapping_symbol (MAP_DATA, value, frag);
1564 make_mapping_symbol (state, value + bytes, frag);
1565 }
1566
1567 static void mapping_state_2 (enum mstate state, int max_chars);
1568
1569 /* Set the mapping state to STATE. Only call this when about to
1570 emit some STATE bytes to the file. */
1571
1572 void
1573 mapping_state (enum mstate state)
1574 {
1575 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1576
1577 if (state == MAP_INSN)
1578 /* AArch64 instructions require 4-byte alignment. When emitting
1579 instructions into any section, record the appropriate section
1580 alignment. */
1581 record_alignment (now_seg, 2);
1582
1583 if (mapstate == state)
1584 /* The mapping symbol has already been emitted.
1585 There is nothing else to do. */
1586 return;
1587
1588 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1589 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1590 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1591 evaluated later in the next else. */
1592 return;
1593 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1594 {
1595 /* Only add the symbol if the offset is > 0:
1596 if we're at the first frag, check it's size > 0;
1597 if we're not at the first frag, then for sure
1598 the offset is > 0. */
1599 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1600 const int add_symbol = (frag_now != frag_first)
1601 || (frag_now_fix () > 0);
1602
1603 if (add_symbol)
1604 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1605 }
1606 #undef TRANSITION
1607
1608 mapping_state_2 (state, 0);
1609 }
1610
1611 /* Same as mapping_state, but MAX_CHARS bytes have already been
1612 allocated. Put the mapping symbol that far back. */
1613
1614 static void
1615 mapping_state_2 (enum mstate state, int max_chars)
1616 {
1617 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1618
1619 if (!SEG_NORMAL (now_seg))
1620 return;
1621
1622 if (mapstate == state)
1623 /* The mapping symbol has already been emitted.
1624 There is nothing else to do. */
1625 return;
1626
1627 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1628 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1629 }
1630 #else
1631 #define mapping_state(x) /* nothing */
1632 #define mapping_state_2(x, y) /* nothing */
1633 #endif
1634
1635 /* Directives: sectioning and alignment. */
1636
1637 static void
1638 s_bss (int ignore ATTRIBUTE_UNUSED)
1639 {
1640 /* We don't support putting frags in the BSS segment, we fake it by
1641 marking in_bss, then looking at s_skip for clues. */
1642 subseg_set (bss_section, 0);
1643 demand_empty_rest_of_line ();
1644 mapping_state (MAP_DATA);
1645 }
1646
1647 static void
1648 s_even (int ignore ATTRIBUTE_UNUSED)
1649 {
1650 /* Never make frag if expect extra pass. */
1651 if (!need_pass_2)
1652 frag_align (1, 0, 0);
1653
1654 record_alignment (now_seg, 1);
1655
1656 demand_empty_rest_of_line ();
1657 }
1658
1659 /* Directives: Literal pools. */
1660
1661 static literal_pool *
1662 find_literal_pool (int size)
1663 {
1664 literal_pool *pool;
1665
1666 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1667 {
1668 if (pool->section == now_seg
1669 && pool->sub_section == now_subseg && pool->size == size)
1670 break;
1671 }
1672
1673 return pool;
1674 }
1675
1676 static literal_pool *
1677 find_or_make_literal_pool (int size)
1678 {
1679 /* Next literal pool ID number. */
1680 static unsigned int latest_pool_num = 1;
1681 literal_pool *pool;
1682
1683 pool = find_literal_pool (size);
1684
1685 if (pool == NULL)
1686 {
1687 /* Create a new pool. */
1688 pool = XNEW (literal_pool);
1689 if (!pool)
1690 return NULL;
1691
1692 /* Currently we always put the literal pool in the current text
1693 section. If we were generating "small" model code where we
1694 knew that all code and initialised data was within 1MB then
1695 we could output literals to mergeable, read-only data
1696 sections. */
1697
1698 pool->next_free_entry = 0;
1699 pool->section = now_seg;
1700 pool->sub_section = now_subseg;
1701 pool->size = size;
1702 pool->next = list_of_pools;
1703 pool->symbol = NULL;
1704
1705 /* Add it to the list. */
1706 list_of_pools = pool;
1707 }
1708
1709 /* New pools, and emptied pools, will have a NULL symbol. */
1710 if (pool->symbol == NULL)
1711 {
1712 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1713 &zero_address_frag, 0);
1714 pool->id = latest_pool_num++;
1715 }
1716
1717 /* Done. */
1718 return pool;
1719 }
1720
1721 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1722 Return TRUE on success, otherwise return FALSE. */
1723 static bool
1724 add_to_lit_pool (expressionS *exp, int size)
1725 {
1726 literal_pool *pool;
1727 unsigned int entry;
1728
1729 pool = find_or_make_literal_pool (size);
1730
1731 /* Check if this literal value is already in the pool. */
1732 for (entry = 0; entry < pool->next_free_entry; entry++)
1733 {
1734 expressionS * litexp = & pool->literals[entry].exp;
1735
1736 if ((litexp->X_op == exp->X_op)
1737 && (exp->X_op == O_constant)
1738 && (litexp->X_add_number == exp->X_add_number)
1739 && (litexp->X_unsigned == exp->X_unsigned))
1740 break;
1741
1742 if ((litexp->X_op == exp->X_op)
1743 && (exp->X_op == O_symbol)
1744 && (litexp->X_add_number == exp->X_add_number)
1745 && (litexp->X_add_symbol == exp->X_add_symbol)
1746 && (litexp->X_op_symbol == exp->X_op_symbol))
1747 break;
1748 }
1749
1750 /* Do we need to create a new entry? */
1751 if (entry == pool->next_free_entry)
1752 {
1753 if (entry >= MAX_LITERAL_POOL_SIZE)
1754 {
1755 set_syntax_error (_("literal pool overflow"));
1756 return false;
1757 }
1758
1759 pool->literals[entry].exp = *exp;
1760 pool->next_free_entry += 1;
1761 if (exp->X_op == O_big)
1762 {
1763 /* PR 16688: Bignums are held in a single global array. We must
1764 copy and preserve that value now, before it is overwritten. */
1765 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1766 exp->X_add_number);
1767 memcpy (pool->literals[entry].bignum, generic_bignum,
1768 CHARS_PER_LITTLENUM * exp->X_add_number);
1769 }
1770 else
1771 pool->literals[entry].bignum = NULL;
1772 }
1773
1774 exp->X_op = O_symbol;
1775 exp->X_add_number = ((int) entry) * size;
1776 exp->X_add_symbol = pool->symbol;
1777
1778 return true;
1779 }
1780
1781 /* Can't use symbol_new here, so have to create a symbol and then at
1782 a later date assign it a value. That's what these functions do. */
1783
1784 static void
1785 symbol_locate (symbolS * symbolP,
1786 const char *name,/* It is copied, the caller can modify. */
1787 segT segment, /* Segment identifier (SEG_<something>). */
1788 valueT valu, /* Symbol value. */
1789 fragS * frag) /* Associated fragment. */
1790 {
1791 size_t name_length;
1792 char *preserved_copy_of_name;
1793
1794 name_length = strlen (name) + 1; /* +1 for \0. */
1795 obstack_grow (&notes, name, name_length);
1796 preserved_copy_of_name = obstack_finish (&notes);
1797
1798 #ifdef tc_canonicalize_symbol_name
1799 preserved_copy_of_name =
1800 tc_canonicalize_symbol_name (preserved_copy_of_name);
1801 #endif
1802
1803 S_SET_NAME (symbolP, preserved_copy_of_name);
1804
1805 S_SET_SEGMENT (symbolP, segment);
1806 S_SET_VALUE (symbolP, valu);
1807 symbol_clear_list_pointers (symbolP);
1808
1809 symbol_set_frag (symbolP, frag);
1810
1811 /* Link to end of symbol chain. */
1812 {
1813 extern int symbol_table_frozen;
1814
1815 if (symbol_table_frozen)
1816 abort ();
1817 }
1818
1819 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1820
1821 obj_symbol_new_hook (symbolP);
1822
1823 #ifdef tc_symbol_new_hook
1824 tc_symbol_new_hook (symbolP);
1825 #endif
1826
1827 #ifdef DEBUG_SYMS
1828 verify_symbol_chain (symbol_rootP, symbol_lastP);
1829 #endif /* DEBUG_SYMS */
1830 }
1831
1832
1833 static void
1834 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1835 {
1836 unsigned int entry;
1837 literal_pool *pool;
1838 char sym_name[20];
1839 int align;
1840
1841 for (align = 2; align <= 4; align++)
1842 {
1843 int size = 1 << align;
1844
1845 pool = find_literal_pool (size);
1846 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1847 continue;
1848
1849 /* Align pool as you have word accesses.
1850 Only make a frag if we have to. */
1851 if (!need_pass_2)
1852 frag_align (align, 0, 0);
1853
1854 mapping_state (MAP_DATA);
1855
1856 record_alignment (now_seg, align);
1857
1858 sprintf (sym_name, "$$lit_\002%x", pool->id);
1859
1860 symbol_locate (pool->symbol, sym_name, now_seg,
1861 (valueT) frag_now_fix (), frag_now);
1862 symbol_table_insert (pool->symbol);
1863
1864 for (entry = 0; entry < pool->next_free_entry; entry++)
1865 {
1866 expressionS * exp = & pool->literals[entry].exp;
1867
1868 if (exp->X_op == O_big)
1869 {
1870 /* PR 16688: Restore the global bignum value. */
1871 gas_assert (pool->literals[entry].bignum != NULL);
1872 memcpy (generic_bignum, pool->literals[entry].bignum,
1873 CHARS_PER_LITTLENUM * exp->X_add_number);
1874 }
1875
1876 /* First output the expression in the instruction to the pool. */
1877 emit_expr (exp, size); /* .word|.xword */
1878
1879 if (exp->X_op == O_big)
1880 {
1881 free (pool->literals[entry].bignum);
1882 pool->literals[entry].bignum = NULL;
1883 }
1884 }
1885
1886 /* Mark the pool as empty. */
1887 pool->next_free_entry = 0;
1888 pool->symbol = NULL;
1889 }
1890 }
1891
1892 #if defined(OBJ_ELF) || defined(OBJ_COFF)
1893 /* Forward declarations for functions below, in the MD interface
1894 section. */
1895 static struct reloc_table_entry * find_reloc_table_entry (char **);
1896
1897 /* Directives: Data. */
1898 /* N.B. the support for relocation suffix in this directive needs to be
1899 implemented properly. */
1900
1901 static void
1902 s_aarch64_cons (int nbytes)
1903 {
1904 expressionS exp;
1905
1906 #ifdef md_flush_pending_output
1907 md_flush_pending_output ();
1908 #endif
1909
1910 if (is_it_end_of_statement ())
1911 {
1912 demand_empty_rest_of_line ();
1913 return;
1914 }
1915
1916 #ifdef md_cons_align
1917 md_cons_align (nbytes);
1918 #endif
1919
1920 mapping_state (MAP_DATA);
1921 do
1922 {
1923 struct reloc_table_entry *reloc;
1924
1925 expression (&exp);
1926
1927 if (exp.X_op != O_symbol)
1928 emit_expr (&exp, (unsigned int) nbytes);
1929 else
1930 {
1931 skip_past_char (&input_line_pointer, '#');
1932 if (skip_past_char (&input_line_pointer, ':'))
1933 {
1934 reloc = find_reloc_table_entry (&input_line_pointer);
1935 if (reloc == NULL)
1936 as_bad (_("unrecognized relocation suffix"));
1937 else
1938 as_bad (_("unimplemented relocation suffix"));
1939 ignore_rest_of_line ();
1940 return;
1941 }
1942 else
1943 emit_expr (&exp, (unsigned int) nbytes);
1944 }
1945 }
1946 while (*input_line_pointer++ == ',');
1947
1948 /* Put terminator back into stream. */
1949 input_line_pointer--;
1950 demand_empty_rest_of_line ();
1951 }
1952 #endif
1953
1954 #ifdef OBJ_ELF
1955 /* Forward declarations for functions below, in the MD interface
1956 section. */
1957 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1958
1959 /* Mark symbol that it follows a variant PCS convention. */
1960
1961 static void
1962 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1963 {
1964 char *name;
1965 char c;
1966 symbolS *sym;
1967 asymbol *bfdsym;
1968 elf_symbol_type *elfsym;
1969
1970 c = get_symbol_name (&name);
1971 if (!*name)
1972 as_bad (_("Missing symbol name in directive"));
1973 sym = symbol_find_or_make (name);
1974 restore_line_pointer (c);
1975 demand_empty_rest_of_line ();
1976 bfdsym = symbol_get_bfdsym (sym);
1977 elfsym = elf_symbol_from (bfdsym);
1978 gas_assert (elfsym);
1979 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
1980 }
1981 #endif /* OBJ_ELF */
1982
1983 /* Output a 32-bit word, but mark as an instruction. */
1984
1985 static void
1986 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1987 {
1988 expressionS exp;
1989 unsigned n = 0;
1990
1991 #ifdef md_flush_pending_output
1992 md_flush_pending_output ();
1993 #endif
1994
1995 if (is_it_end_of_statement ())
1996 {
1997 demand_empty_rest_of_line ();
1998 return;
1999 }
2000
2001 /* Sections are assumed to start aligned. In executable section, there is no
2002 MAP_DATA symbol pending. So we only align the address during
2003 MAP_DATA --> MAP_INSN transition.
2004 For other sections, this is not guaranteed. */
2005 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2006 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2007 frag_align_code (2, 0);
2008
2009 #ifdef OBJ_ELF
2010 mapping_state (MAP_INSN);
2011 #endif
2012
2013 do
2014 {
2015 expression (&exp);
2016 if (exp.X_op != O_constant)
2017 {
2018 as_bad (_("constant expression required"));
2019 ignore_rest_of_line ();
2020 return;
2021 }
2022
2023 if (target_big_endian)
2024 {
2025 unsigned int val = exp.X_add_number;
2026 exp.X_add_number = SWAP_32 (val);
2027 }
2028 emit_expr (&exp, INSN_SIZE);
2029 ++n;
2030 }
2031 while (*input_line_pointer++ == ',');
2032
2033 dwarf2_emit_insn (n * INSN_SIZE);
2034
2035 /* Put terminator back into stream. */
2036 input_line_pointer--;
2037 demand_empty_rest_of_line ();
2038 }
2039
2040 static void
2041 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2042 {
2043 demand_empty_rest_of_line ();
2044 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2045 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2046 }
2047
2048 #ifdef OBJ_ELF
2049 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2050
2051 static void
2052 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2053 {
2054 expressionS exp;
2055
2056 expression (&exp);
2057 frag_grow (4);
2058 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2059 BFD_RELOC_AARCH64_TLSDESC_ADD);
2060
2061 demand_empty_rest_of_line ();
2062 }
2063
2064 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2065
2066 static void
2067 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2068 {
2069 expressionS exp;
2070
2071 /* Since we're just labelling the code, there's no need to define a
2072 mapping symbol. */
2073 expression (&exp);
2074 /* Make sure there is enough room in this frag for the following
2075 blr. This trick only works if the blr follows immediately after
2076 the .tlsdesc directive. */
2077 frag_grow (4);
2078 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2079 BFD_RELOC_AARCH64_TLSDESC_CALL);
2080
2081 demand_empty_rest_of_line ();
2082 }
2083
2084 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2085
2086 static void
2087 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2088 {
2089 expressionS exp;
2090
2091 expression (&exp);
2092 frag_grow (4);
2093 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2094 BFD_RELOC_AARCH64_TLSDESC_LDR);
2095
2096 demand_empty_rest_of_line ();
2097 }
2098 #endif /* OBJ_ELF */
2099
2100 #ifdef TE_PE
2101 static void
2102 s_secrel (int dummy ATTRIBUTE_UNUSED)
2103 {
2104 expressionS exp;
2105
2106 do
2107 {
2108 expression (&exp);
2109 if (exp.X_op == O_symbol)
2110 exp.X_op = O_secrel;
2111
2112 emit_expr (&exp, 4);
2113 }
2114 while (*input_line_pointer++ == ',');
2115
2116 input_line_pointer--;
2117 demand_empty_rest_of_line ();
2118 }
2119
2120 void
2121 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2122 {
2123 expressionS exp;
2124
2125 exp.X_op = O_secrel;
2126 exp.X_add_symbol = symbol;
2127 exp.X_add_number = 0;
2128 emit_expr (&exp, size);
2129 }
2130
2131 static void
2132 s_secidx (int dummy ATTRIBUTE_UNUSED)
2133 {
2134 expressionS exp;
2135
2136 do
2137 {
2138 expression (&exp);
2139 if (exp.X_op == O_symbol)
2140 exp.X_op = O_secidx;
2141
2142 emit_expr (&exp, 2);
2143 }
2144 while (*input_line_pointer++ == ',');
2145
2146 input_line_pointer--;
2147 demand_empty_rest_of_line ();
2148 }
2149 #endif /* TE_PE */
2150
2151 static void s_aarch64_arch (int);
2152 static void s_aarch64_cpu (int);
2153 static void s_aarch64_arch_extension (int);
2154
2155 /* This table describes all the machine specific pseudo-ops the assembler
2156 has to support. The fields are:
2157 pseudo-op name without dot
2158 function to call to execute this pseudo-op
2159 Integer arg to pass to the function. */
2160
2161 const pseudo_typeS md_pseudo_table[] = {
2162 /* Never called because '.req' does not start a line. */
2163 {"req", s_req, 0},
2164 {"unreq", s_unreq, 0},
2165 {"bss", s_bss, 0},
2166 {"even", s_even, 0},
2167 {"ltorg", s_ltorg, 0},
2168 {"pool", s_ltorg, 0},
2169 {"cpu", s_aarch64_cpu, 0},
2170 {"arch", s_aarch64_arch, 0},
2171 {"arch_extension", s_aarch64_arch_extension, 0},
2172 {"inst", s_aarch64_inst, 0},
2173 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2174 #ifdef OBJ_ELF
2175 {"tlsdescadd", s_tlsdescadd, 0},
2176 {"tlsdesccall", s_tlsdesccall, 0},
2177 {"tlsdescldr", s_tlsdescldr, 0},
2178 {"variant_pcs", s_variant_pcs, 0},
2179 #endif
2180 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2181 {"word", s_aarch64_cons, 4},
2182 {"long", s_aarch64_cons, 4},
2183 {"xword", s_aarch64_cons, 8},
2184 {"dword", s_aarch64_cons, 8},
2185 #endif
2186 #ifdef TE_PE
2187 {"secrel32", s_secrel, 0},
2188 {"secidx", s_secidx, 0},
2189 #endif
2190 {"float16", float_cons, 'h'},
2191 {"bfloat16", float_cons, 'b'},
2192 {0, 0, 0}
2193 };
2194 \f
2195
2196 /* Check whether STR points to a register name followed by a comma or the
2197 end of line; REG_TYPE indicates which register types are checked
2198 against. Return TRUE if STR is such a register name; otherwise return
2199 FALSE. The function does not intend to produce any diagnostics, but since
2200 the register parser aarch64_reg_parse, which is called by this function,
2201 does produce diagnostics, we call clear_error to clear any diagnostics
2202 that may be generated by aarch64_reg_parse.
2203 Also, the function returns FALSE directly if there is any user error
2204 present at the function entry. This prevents the existing diagnostics
2205 state from being spoiled.
2206 The function currently serves parse_constant_immediate and
2207 parse_big_immediate only. */
2208 static bool
2209 reg_name_p (char *str, aarch64_reg_type reg_type)
2210 {
2211 int reg;
2212
2213 /* Prevent the diagnostics state from being spoiled. */
2214 if (error_p ())
2215 return false;
2216
2217 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2218
2219 /* Clear the parsing error that may be set by the reg parser. */
2220 clear_error ();
2221
2222 if (reg == PARSE_FAIL)
2223 return false;
2224
2225 skip_whitespace (str);
2226 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2227 return true;
2228
2229 return false;
2230 }
2231
2232 /* Parser functions used exclusively in instruction operands. */
2233
2234 /* Parse an immediate expression which may not be constant.
2235
2236 To prevent the expression parser from pushing a register name
2237 into the symbol table as an undefined symbol, firstly a check is
2238 done to find out whether STR is a register of type REG_TYPE followed
2239 by a comma or the end of line. Return FALSE if STR is such a string. */
2240
2241 static bool
2242 parse_immediate_expression (char **str, expressionS *exp,
2243 aarch64_reg_type reg_type)
2244 {
2245 if (reg_name_p (*str, reg_type))
2246 {
2247 set_recoverable_error (_("immediate operand required"));
2248 return false;
2249 }
2250
2251 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2252
2253 if (exp->X_op == O_absent)
2254 {
2255 set_fatal_syntax_error (_("missing immediate expression"));
2256 return false;
2257 }
2258
2259 return true;
2260 }
2261
2262 /* Constant immediate-value read function for use in insn parsing.
2263 STR points to the beginning of the immediate (with the optional
2264 leading #); *VAL receives the value. REG_TYPE says which register
2265 names should be treated as registers rather than as symbolic immediates.
2266
2267 Return TRUE on success; otherwise return FALSE. */
2268
2269 static bool
2270 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2271 {
2272 expressionS exp;
2273
2274 if (! parse_immediate_expression (str, &exp, reg_type))
2275 return false;
2276
2277 if (exp.X_op != O_constant)
2278 {
2279 set_syntax_error (_("constant expression required"));
2280 return false;
2281 }
2282
2283 *val = exp.X_add_number;
2284 return true;
2285 }
2286
2287 static uint32_t
2288 encode_imm_float_bits (uint32_t imm)
2289 {
2290 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2291 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2292 }
2293
2294 /* Return TRUE if the single-precision floating-point value encoded in IMM
2295 can be expressed in the AArch64 8-bit signed floating-point format with
2296 3-bit exponent and normalized 4 bits of precision; in other words, the
2297 floating-point value must be expressable as
2298 (+/-) n / 16 * power (2, r)
2299 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2300
2301 static bool
2302 aarch64_imm_float_p (uint32_t imm)
2303 {
2304 /* If a single-precision floating-point value has the following bit
2305 pattern, it can be expressed in the AArch64 8-bit floating-point
2306 format:
2307
2308 3 32222222 2221111111111
2309 1 09876543 21098765432109876543210
2310 n Eeeeeexx xxxx0000000000000000000
2311
2312 where n, e and each x are either 0 or 1 independently, with
2313 E == ~ e. */
2314
2315 uint32_t pattern;
2316
2317 /* Prepare the pattern for 'Eeeeee'. */
2318 if (((imm >> 30) & 0x1) == 0)
2319 pattern = 0x3e000000;
2320 else
2321 pattern = 0x40000000;
2322
2323 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2324 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2325 }
2326
2327 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2328 as an IEEE float without any loss of precision. Store the value in
2329 *FPWORD if so. */
2330
2331 static bool
2332 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2333 {
2334 /* If a double-precision floating-point value has the following bit
2335 pattern, it can be expressed in a float:
2336
2337 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2338 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2339 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2340
2341 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2342 if Eeee_eeee != 1111_1111
2343
2344 where n, e, s and S are either 0 or 1 independently and where ~ is the
2345 inverse of E. */
2346
2347 uint32_t pattern;
2348 uint32_t high32 = imm >> 32;
2349 uint32_t low32 = imm;
2350
2351 /* Lower 29 bits need to be 0s. */
2352 if ((imm & 0x1fffffff) != 0)
2353 return false;
2354
2355 /* Prepare the pattern for 'Eeeeeeeee'. */
2356 if (((high32 >> 30) & 0x1) == 0)
2357 pattern = 0x38000000;
2358 else
2359 pattern = 0x40000000;
2360
2361 /* Check E~~~. */
2362 if ((high32 & 0x78000000) != pattern)
2363 return false;
2364
2365 /* Check Eeee_eeee != 1111_1111. */
2366 if ((high32 & 0x7ff00000) == 0x47f00000)
2367 return false;
2368
2369 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2370 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2371 | (low32 >> 29)); /* 3 S bits. */
2372 return true;
2373 }
2374
2375 /* Return true if we should treat OPERAND as a double-precision
2376 floating-point operand rather than a single-precision one. */
2377 static bool
2378 double_precision_operand_p (const aarch64_opnd_info *operand)
2379 {
2380 /* Check for unsuffixed SVE registers, which are allowed
2381 for LDR and STR but not in instructions that require an
2382 immediate. We get better error messages if we arbitrarily
2383 pick one size, parse the immediate normally, and then
2384 report the match failure in the normal way. */
2385 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2386 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2387 }
2388
2389 /* Parse a floating-point immediate. Return TRUE on success and return the
2390 value in *IMMED in the format of IEEE754 single-precision encoding.
2391 *CCP points to the start of the string; DP_P is TRUE when the immediate
2392 is expected to be in double-precision (N.B. this only matters when
2393 hexadecimal representation is involved). REG_TYPE says which register
2394 names should be treated as registers rather than as symbolic immediates.
2395
2396 This routine accepts any IEEE float; it is up to the callers to reject
2397 invalid ones. */
2398
2399 static bool
2400 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2401 aarch64_reg_type reg_type)
2402 {
2403 char *str = *ccp;
2404 char *fpnum;
2405 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2406 int64_t val = 0;
2407 unsigned fpword = 0;
2408 bool hex_p = false;
2409
2410 skip_past_char (&str, '#');
2411
2412 fpnum = str;
2413 skip_whitespace (fpnum);
2414
2415 if (startswith (fpnum, "0x"))
2416 {
2417 /* Support the hexadecimal representation of the IEEE754 encoding.
2418 Double-precision is expected when DP_P is TRUE, otherwise the
2419 representation should be in single-precision. */
2420 if (! parse_constant_immediate (&str, &val, reg_type))
2421 goto invalid_fp;
2422
2423 if (dp_p)
2424 {
2425 if (!can_convert_double_to_float (val, &fpword))
2426 goto invalid_fp;
2427 }
2428 else if ((uint64_t) val > 0xffffffff)
2429 goto invalid_fp;
2430 else
2431 fpword = val;
2432
2433 hex_p = true;
2434 }
2435 else if (reg_name_p (str, reg_type))
2436 {
2437 set_recoverable_error (_("immediate operand required"));
2438 return false;
2439 }
2440
2441 if (! hex_p)
2442 {
2443 int i;
2444
2445 if ((str = atof_ieee (str, 's', words)) == NULL)
2446 goto invalid_fp;
2447
2448 /* Our FP word must be 32 bits (single-precision FP). */
2449 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2450 {
2451 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2452 fpword |= words[i];
2453 }
2454 }
2455
2456 *immed = fpword;
2457 *ccp = str;
2458 return true;
2459
2460 invalid_fp:
2461 set_fatal_syntax_error (_("invalid floating-point constant"));
2462 return false;
2463 }
2464
2465 /* Less-generic immediate-value read function with the possibility of loading
2466 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2467 instructions.
2468
2469 To prevent the expression parser from pushing a register name into the
2470 symbol table as an undefined symbol, a check is firstly done to find
2471 out whether STR is a register of type REG_TYPE followed by a comma or
2472 the end of line. Return FALSE if STR is such a register. */
2473
2474 static bool
2475 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2476 {
2477 char *ptr = *str;
2478
2479 if (reg_name_p (ptr, reg_type))
2480 {
2481 set_syntax_error (_("immediate operand required"));
2482 return false;
2483 }
2484
2485 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2486
2487 if (inst.reloc.exp.X_op == O_constant)
2488 *imm = inst.reloc.exp.X_add_number;
2489
2490 *str = ptr;
2491
2492 return true;
2493 }
2494
2495 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2496 if NEED_LIBOPCODES is non-zero, the fixup will need
2497 assistance from the libopcodes. */
2498
2499 static inline void
2500 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2501 const aarch64_opnd_info *operand,
2502 int need_libopcodes_p)
2503 {
2504 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2505 reloc->opnd = operand->type;
2506 if (need_libopcodes_p)
2507 reloc->need_libopcodes_p = 1;
2508 };
2509
2510 /* Return TRUE if the instruction needs to be fixed up later internally by
2511 the GAS; otherwise return FALSE. */
2512
2513 static inline bool
2514 aarch64_gas_internal_fixup_p (void)
2515 {
2516 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2517 }
2518
2519 /* Assign the immediate value to the relevant field in *OPERAND if
2520 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2521 needs an internal fixup in a later stage.
2522 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2523 IMM.VALUE that may get assigned with the constant. */
2524 static inline void
2525 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2526 aarch64_opnd_info *operand,
2527 int addr_off_p,
2528 int need_libopcodes_p,
2529 int skip_p)
2530 {
2531 if (reloc->exp.X_op == O_constant)
2532 {
2533 if (addr_off_p)
2534 operand->addr.offset.imm = reloc->exp.X_add_number;
2535 else
2536 operand->imm.value = reloc->exp.X_add_number;
2537 reloc->type = BFD_RELOC_UNUSED;
2538 }
2539 else
2540 {
2541 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2542 /* Tell libopcodes to ignore this operand or not. This is helpful
2543 when one of the operands needs to be fixed up later but we need
2544 libopcodes to check the other operands. */
2545 operand->skip = skip_p;
2546 }
2547 }
2548
2549 /* Relocation modifiers. Each entry in the table contains the textual
2550 name for the relocation which may be placed before a symbol used as
2551 a load/store offset, or add immediate. It must be surrounded by a
2552 leading and trailing colon, for example:
2553
2554 ldr x0, [x1, #:rello:varsym]
2555 add x0, x1, #:rello:varsym */
2556
2557 struct reloc_table_entry
2558 {
2559 const char *name;
2560 int pc_rel;
2561 bfd_reloc_code_real_type adr_type;
2562 bfd_reloc_code_real_type adrp_type;
2563 bfd_reloc_code_real_type movw_type;
2564 bfd_reloc_code_real_type add_type;
2565 bfd_reloc_code_real_type ldst_type;
2566 bfd_reloc_code_real_type ld_literal_type;
2567 };
2568
2569 static struct reloc_table_entry reloc_table[] =
2570 {
2571 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2572 {"lo12", 0,
2573 0, /* adr_type */
2574 0,
2575 0,
2576 BFD_RELOC_AARCH64_ADD_LO12,
2577 BFD_RELOC_AARCH64_LDST_LO12,
2578 0},
2579
2580 /* Higher 21 bits of pc-relative page offset: ADRP */
2581 {"pg_hi21", 1,
2582 0, /* adr_type */
2583 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2584 0,
2585 0,
2586 0,
2587 0},
2588
2589 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2590 {"pg_hi21_nc", 1,
2591 0, /* adr_type */
2592 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2593 0,
2594 0,
2595 0,
2596 0},
2597
2598 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2599 {"abs_g0", 0,
2600 0, /* adr_type */
2601 0,
2602 BFD_RELOC_AARCH64_MOVW_G0,
2603 0,
2604 0,
2605 0},
2606
2607 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2608 {"abs_g0_s", 0,
2609 0, /* adr_type */
2610 0,
2611 BFD_RELOC_AARCH64_MOVW_G0_S,
2612 0,
2613 0,
2614 0},
2615
2616 /* Less significant bits 0-15 of address/value: MOVK, no check */
2617 {"abs_g0_nc", 0,
2618 0, /* adr_type */
2619 0,
2620 BFD_RELOC_AARCH64_MOVW_G0_NC,
2621 0,
2622 0,
2623 0},
2624
2625 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2626 {"abs_g1", 0,
2627 0, /* adr_type */
2628 0,
2629 BFD_RELOC_AARCH64_MOVW_G1,
2630 0,
2631 0,
2632 0},
2633
2634 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2635 {"abs_g1_s", 0,
2636 0, /* adr_type */
2637 0,
2638 BFD_RELOC_AARCH64_MOVW_G1_S,
2639 0,
2640 0,
2641 0},
2642
2643 /* Less significant bits 16-31 of address/value: MOVK, no check */
2644 {"abs_g1_nc", 0,
2645 0, /* adr_type */
2646 0,
2647 BFD_RELOC_AARCH64_MOVW_G1_NC,
2648 0,
2649 0,
2650 0},
2651
2652 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2653 {"abs_g2", 0,
2654 0, /* adr_type */
2655 0,
2656 BFD_RELOC_AARCH64_MOVW_G2,
2657 0,
2658 0,
2659 0},
2660
2661 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2662 {"abs_g2_s", 0,
2663 0, /* adr_type */
2664 0,
2665 BFD_RELOC_AARCH64_MOVW_G2_S,
2666 0,
2667 0,
2668 0},
2669
2670 /* Less significant bits 32-47 of address/value: MOVK, no check */
2671 {"abs_g2_nc", 0,
2672 0, /* adr_type */
2673 0,
2674 BFD_RELOC_AARCH64_MOVW_G2_NC,
2675 0,
2676 0,
2677 0},
2678
2679 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2680 {"abs_g3", 0,
2681 0, /* adr_type */
2682 0,
2683 BFD_RELOC_AARCH64_MOVW_G3,
2684 0,
2685 0,
2686 0},
2687
2688 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2689 {"prel_g0", 1,
2690 0, /* adr_type */
2691 0,
2692 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2693 0,
2694 0,
2695 0},
2696
2697 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2698 {"prel_g0_nc", 1,
2699 0, /* adr_type */
2700 0,
2701 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2702 0,
2703 0,
2704 0},
2705
2706 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2707 {"prel_g1", 1,
2708 0, /* adr_type */
2709 0,
2710 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2711 0,
2712 0,
2713 0},
2714
2715 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2716 {"prel_g1_nc", 1,
2717 0, /* adr_type */
2718 0,
2719 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2720 0,
2721 0,
2722 0},
2723
2724 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2725 {"prel_g2", 1,
2726 0, /* adr_type */
2727 0,
2728 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2729 0,
2730 0,
2731 0},
2732
2733 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2734 {"prel_g2_nc", 1,
2735 0, /* adr_type */
2736 0,
2737 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2738 0,
2739 0,
2740 0},
2741
2742 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2743 {"prel_g3", 1,
2744 0, /* adr_type */
2745 0,
2746 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2747 0,
2748 0,
2749 0},
2750
2751 /* Get to the page containing GOT entry for a symbol. */
2752 {"got", 1,
2753 0, /* adr_type */
2754 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2755 0,
2756 0,
2757 0,
2758 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2759
2760 /* 12 bit offset into the page containing GOT entry for that symbol. */
2761 {"got_lo12", 0,
2762 0, /* adr_type */
2763 0,
2764 0,
2765 0,
2766 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2767 0},
2768
2769 /* 0-15 bits of address/value: MOVk, no check. */
2770 {"gotoff_g0_nc", 0,
2771 0, /* adr_type */
2772 0,
2773 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2774 0,
2775 0,
2776 0},
2777
2778 /* Most significant bits 16-31 of address/value: MOVZ. */
2779 {"gotoff_g1", 0,
2780 0, /* adr_type */
2781 0,
2782 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2783 0,
2784 0,
2785 0},
2786
2787 /* 15 bit offset into the page containing GOT entry for that symbol. */
2788 {"gotoff_lo15", 0,
2789 0, /* adr_type */
2790 0,
2791 0,
2792 0,
2793 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2794 0},
2795
2796 /* Get to the page containing GOT TLS entry for a symbol */
2797 {"gottprel_g0_nc", 0,
2798 0, /* adr_type */
2799 0,
2800 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2801 0,
2802 0,
2803 0},
2804
2805 /* Get to the page containing GOT TLS entry for a symbol */
2806 {"gottprel_g1", 0,
2807 0, /* adr_type */
2808 0,
2809 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2810 0,
2811 0,
2812 0},
2813
2814 /* Get to the page containing GOT TLS entry for a symbol */
2815 {"tlsgd", 0,
2816 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2817 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2818 0,
2819 0,
2820 0,
2821 0},
2822
2823 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2824 {"tlsgd_lo12", 0,
2825 0, /* adr_type */
2826 0,
2827 0,
2828 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2829 0,
2830 0},
2831
2832 /* Lower 16 bits address/value: MOVk. */
2833 {"tlsgd_g0_nc", 0,
2834 0, /* adr_type */
2835 0,
2836 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2837 0,
2838 0,
2839 0},
2840
2841 /* Most significant bits 16-31 of address/value: MOVZ. */
2842 {"tlsgd_g1", 0,
2843 0, /* adr_type */
2844 0,
2845 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2846 0,
2847 0,
2848 0},
2849
2850 /* Get to the page containing GOT TLS entry for a symbol */
2851 {"tlsdesc", 0,
2852 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2853 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2854 0,
2855 0,
2856 0,
2857 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2858
2859 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2860 {"tlsdesc_lo12", 0,
2861 0, /* adr_type */
2862 0,
2863 0,
2864 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2865 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2866 0},
2867
2868 /* Get to the page containing GOT TLS entry for a symbol.
2869 The same as GD, we allocate two consecutive GOT slots
2870 for module index and module offset, the only difference
2871 with GD is the module offset should be initialized to
2872 zero without any outstanding runtime relocation. */
2873 {"tlsldm", 0,
2874 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2875 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2876 0,
2877 0,
2878 0,
2879 0},
2880
2881 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2882 {"tlsldm_lo12_nc", 0,
2883 0, /* adr_type */
2884 0,
2885 0,
2886 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2887 0,
2888 0},
2889
2890 /* 12 bit offset into the module TLS base address. */
2891 {"dtprel_lo12", 0,
2892 0, /* adr_type */
2893 0,
2894 0,
2895 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2896 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2897 0},
2898
2899 /* Same as dtprel_lo12, no overflow check. */
2900 {"dtprel_lo12_nc", 0,
2901 0, /* adr_type */
2902 0,
2903 0,
2904 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2905 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2906 0},
2907
2908 /* bits[23:12] of offset to the module TLS base address. */
2909 {"dtprel_hi12", 0,
2910 0, /* adr_type */
2911 0,
2912 0,
2913 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2914 0,
2915 0},
2916
2917 /* bits[15:0] of offset to the module TLS base address. */
2918 {"dtprel_g0", 0,
2919 0, /* adr_type */
2920 0,
2921 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2922 0,
2923 0,
2924 0},
2925
2926 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2927 {"dtprel_g0_nc", 0,
2928 0, /* adr_type */
2929 0,
2930 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2931 0,
2932 0,
2933 0},
2934
2935 /* bits[31:16] of offset to the module TLS base address. */
2936 {"dtprel_g1", 0,
2937 0, /* adr_type */
2938 0,
2939 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2940 0,
2941 0,
2942 0},
2943
2944 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2945 {"dtprel_g1_nc", 0,
2946 0, /* adr_type */
2947 0,
2948 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2949 0,
2950 0,
2951 0},
2952
2953 /* bits[47:32] of offset to the module TLS base address. */
2954 {"dtprel_g2", 0,
2955 0, /* adr_type */
2956 0,
2957 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2958 0,
2959 0,
2960 0},
2961
2962 /* Lower 16 bit offset into GOT entry for a symbol */
2963 {"tlsdesc_off_g0_nc", 0,
2964 0, /* adr_type */
2965 0,
2966 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2967 0,
2968 0,
2969 0},
2970
2971 /* Higher 16 bit offset into GOT entry for a symbol */
2972 {"tlsdesc_off_g1", 0,
2973 0, /* adr_type */
2974 0,
2975 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2976 0,
2977 0,
2978 0},
2979
2980 /* Get to the page containing GOT TLS entry for a symbol */
2981 {"gottprel", 0,
2982 0, /* adr_type */
2983 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2984 0,
2985 0,
2986 0,
2987 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2988
2989 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2990 {"gottprel_lo12", 0,
2991 0, /* adr_type */
2992 0,
2993 0,
2994 0,
2995 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2996 0},
2997
2998 /* Get tp offset for a symbol. */
2999 {"tprel", 0,
3000 0, /* adr_type */
3001 0,
3002 0,
3003 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3004 0,
3005 0},
3006
3007 /* Get tp offset for a symbol. */
3008 {"tprel_lo12", 0,
3009 0, /* adr_type */
3010 0,
3011 0,
3012 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3013 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3014 0},
3015
3016 /* Get tp offset for a symbol. */
3017 {"tprel_hi12", 0,
3018 0, /* adr_type */
3019 0,
3020 0,
3021 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3022 0,
3023 0},
3024
3025 /* Get tp offset for a symbol. */
3026 {"tprel_lo12_nc", 0,
3027 0, /* adr_type */
3028 0,
3029 0,
3030 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3031 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3032 0},
3033
3034 /* Most significant bits 32-47 of address/value: MOVZ. */
3035 {"tprel_g2", 0,
3036 0, /* adr_type */
3037 0,
3038 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3039 0,
3040 0,
3041 0},
3042
3043 /* Most significant bits 16-31 of address/value: MOVZ. */
3044 {"tprel_g1", 0,
3045 0, /* adr_type */
3046 0,
3047 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3048 0,
3049 0,
3050 0},
3051
3052 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3053 {"tprel_g1_nc", 0,
3054 0, /* adr_type */
3055 0,
3056 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3057 0,
3058 0,
3059 0},
3060
3061 /* Most significant bits 0-15 of address/value: MOVZ. */
3062 {"tprel_g0", 0,
3063 0, /* adr_type */
3064 0,
3065 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3066 0,
3067 0,
3068 0},
3069
3070 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3071 {"tprel_g0_nc", 0,
3072 0, /* adr_type */
3073 0,
3074 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3075 0,
3076 0,
3077 0},
3078
3079 /* 15bit offset from got entry to base address of GOT table. */
3080 {"gotpage_lo15", 0,
3081 0,
3082 0,
3083 0,
3084 0,
3085 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3086 0},
3087
3088 /* 14bit offset from got entry to base address of GOT table. */
3089 {"gotpage_lo14", 0,
3090 0,
3091 0,
3092 0,
3093 0,
3094 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3095 0},
3096 };
3097
3098 /* Given the address of a pointer pointing to the textual name of a
3099 relocation as may appear in assembler source, attempt to find its
3100 details in reloc_table. The pointer will be updated to the character
3101 after the trailing colon. On failure, NULL will be returned;
3102 otherwise return the reloc_table_entry. */
3103
3104 static struct reloc_table_entry *
3105 find_reloc_table_entry (char **str)
3106 {
3107 unsigned int i;
3108 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3109 {
3110 int length = strlen (reloc_table[i].name);
3111
3112 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3113 && (*str)[length] == ':')
3114 {
3115 *str += (length + 1);
3116 return &reloc_table[i];
3117 }
3118 }
3119
3120 return NULL;
3121 }
3122
3123 /* Returns 0 if the relocation should never be forced,
3124 1 if the relocation must be forced, and -1 if either
3125 result is OK. */
3126
3127 static signed int
3128 aarch64_force_reloc (unsigned int type)
3129 {
3130 switch (type)
3131 {
3132 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3133 /* Perform these "immediate" internal relocations
3134 even if the symbol is extern or weak. */
3135 return 0;
3136
3137 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3138 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3139 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3140 /* Pseudo relocs that need to be fixed up according to
3141 ilp32_p. */
3142 return 1;
3143
3144 case BFD_RELOC_AARCH64_ADD_LO12:
3145 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3146 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3147 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3148 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3149 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3150 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3151 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3152 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3153 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3154 case BFD_RELOC_AARCH64_LDST128_LO12:
3155 case BFD_RELOC_AARCH64_LDST16_LO12:
3156 case BFD_RELOC_AARCH64_LDST32_LO12:
3157 case BFD_RELOC_AARCH64_LDST64_LO12:
3158 case BFD_RELOC_AARCH64_LDST8_LO12:
3159 case BFD_RELOC_AARCH64_LDST_LO12:
3160 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3161 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3162 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3163 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3164 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3165 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3166 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3167 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3168 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3169 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3170 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3171 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3172 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3173 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3174 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3175 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3176 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3177 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3178 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3179 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3180 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3181 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3182 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3183 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3184 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3185 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3186 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3187 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3188 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3189 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3190 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3191 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3192 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3193 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3194 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3195 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3196 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3197 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3198 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3199 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3200 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3201 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3202 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3203 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3204 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3205 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3206 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3207 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3208 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3209 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3210 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3211 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3212 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3213 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3214 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3215 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3216 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3217 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3218 /* Always leave these relocations for the linker. */
3219 return 1;
3220
3221 default:
3222 return -1;
3223 }
3224 }
3225
3226 int
3227 aarch64_force_relocation (struct fix *fixp)
3228 {
3229 int res = aarch64_force_reloc (fixp->fx_r_type);
3230
3231 if (res == -1)
3232 return generic_force_reloc (fixp);
3233 return res;
3234 }
3235
3236 /* Mode argument to parse_shift and parser_shifter_operand. */
3237 enum parse_shift_mode
3238 {
3239 SHIFTED_NONE, /* no shifter allowed */
3240 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3241 "#imm{,lsl #n}" */
3242 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3243 "#imm" */
3244 SHIFTED_LSL, /* bare "lsl #n" */
3245 SHIFTED_MUL, /* bare "mul #n" */
3246 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3247 SHIFTED_MUL_VL, /* "mul vl" */
3248 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3249 };
3250
3251 /* Parse a <shift> operator on an AArch64 data processing instruction.
3252 Return TRUE on success; otherwise return FALSE. */
3253 static bool
3254 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3255 {
3256 const struct aarch64_name_value_pair *shift_op;
3257 enum aarch64_modifier_kind kind;
3258 expressionS exp;
3259 int exp_has_prefix;
3260 char *s = *str;
3261 char *p = s;
3262
3263 for (p = *str; ISALPHA (*p); p++)
3264 ;
3265
3266 if (p == *str)
3267 {
3268 set_syntax_error (_("shift expression expected"));
3269 return false;
3270 }
3271
3272 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3273
3274 if (shift_op == NULL)
3275 {
3276 set_syntax_error (_("shift operator expected"));
3277 return false;
3278 }
3279
3280 kind = aarch64_get_operand_modifier (shift_op);
3281
3282 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3283 {
3284 set_syntax_error (_("invalid use of 'MSL'"));
3285 return false;
3286 }
3287
3288 if (kind == AARCH64_MOD_MUL
3289 && mode != SHIFTED_MUL
3290 && mode != SHIFTED_MUL_VL)
3291 {
3292 set_syntax_error (_("invalid use of 'MUL'"));
3293 return false;
3294 }
3295
3296 switch (mode)
3297 {
3298 case SHIFTED_LOGIC_IMM:
3299 if (aarch64_extend_operator_p (kind))
3300 {
3301 set_syntax_error (_("extending shift is not permitted"));
3302 return false;
3303 }
3304 break;
3305
3306 case SHIFTED_ARITH_IMM:
3307 if (kind == AARCH64_MOD_ROR)
3308 {
3309 set_syntax_error (_("'ROR' shift is not permitted"));
3310 return false;
3311 }
3312 break;
3313
3314 case SHIFTED_LSL:
3315 if (kind != AARCH64_MOD_LSL)
3316 {
3317 set_syntax_error (_("only 'LSL' shift is permitted"));
3318 return false;
3319 }
3320 break;
3321
3322 case SHIFTED_MUL:
3323 if (kind != AARCH64_MOD_MUL)
3324 {
3325 set_syntax_error (_("only 'MUL' is permitted"));
3326 return false;
3327 }
3328 break;
3329
3330 case SHIFTED_MUL_VL:
3331 /* "MUL VL" consists of two separate tokens. Require the first
3332 token to be "MUL" and look for a following "VL". */
3333 if (kind == AARCH64_MOD_MUL)
3334 {
3335 skip_whitespace (p);
3336 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3337 {
3338 p += 2;
3339 kind = AARCH64_MOD_MUL_VL;
3340 break;
3341 }
3342 }
3343 set_syntax_error (_("only 'MUL VL' is permitted"));
3344 return false;
3345
3346 case SHIFTED_REG_OFFSET:
3347 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3348 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3349 {
3350 set_fatal_syntax_error
3351 (_("invalid shift for the register offset addressing mode"));
3352 return false;
3353 }
3354 break;
3355
3356 case SHIFTED_LSL_MSL:
3357 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3358 {
3359 set_syntax_error (_("invalid shift operator"));
3360 return false;
3361 }
3362 break;
3363
3364 default:
3365 abort ();
3366 }
3367
3368 /* Whitespace can appear here if the next thing is a bare digit. */
3369 skip_whitespace (p);
3370
3371 /* Parse shift amount. */
3372 exp_has_prefix = 0;
3373 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3374 exp.X_op = O_absent;
3375 else
3376 {
3377 if (is_immediate_prefix (*p))
3378 {
3379 p++;
3380 exp_has_prefix = 1;
3381 }
3382 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3383 }
3384 if (kind == AARCH64_MOD_MUL_VL)
3385 /* For consistency, give MUL VL the same shift amount as an implicit
3386 MUL #1. */
3387 operand->shifter.amount = 1;
3388 else if (exp.X_op == O_absent)
3389 {
3390 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3391 {
3392 set_syntax_error (_("missing shift amount"));
3393 return false;
3394 }
3395 operand->shifter.amount = 0;
3396 }
3397 else if (exp.X_op != O_constant)
3398 {
3399 set_syntax_error (_("constant shift amount required"));
3400 return false;
3401 }
3402 /* For parsing purposes, MUL #n has no inherent range. The range
3403 depends on the operand and will be checked by operand-specific
3404 routines. */
3405 else if (kind != AARCH64_MOD_MUL
3406 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3407 {
3408 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3409 return false;
3410 }
3411 else
3412 {
3413 operand->shifter.amount = exp.X_add_number;
3414 operand->shifter.amount_present = 1;
3415 }
3416
3417 operand->shifter.operator_present = 1;
3418 operand->shifter.kind = kind;
3419
3420 *str = p;
3421 return true;
3422 }
3423
3424 /* Parse a <shifter_operand> for a data processing instruction:
3425
3426 #<immediate>
3427 #<immediate>, LSL #imm
3428
3429 Validation of immediate operands is deferred to md_apply_fix.
3430
3431 Return TRUE on success; otherwise return FALSE. */
3432
3433 static bool
3434 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3435 enum parse_shift_mode mode)
3436 {
3437 char *p;
3438
3439 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3440 return false;
3441
3442 p = *str;
3443
3444 /* Accept an immediate expression. */
3445 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3446 REJECT_ABSENT))
3447 return false;
3448
3449 /* Accept optional LSL for arithmetic immediate values. */
3450 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3451 if (! parse_shift (&p, operand, SHIFTED_LSL))
3452 return false;
3453
3454 /* Not accept any shifter for logical immediate values. */
3455 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3456 && parse_shift (&p, operand, mode))
3457 {
3458 set_syntax_error (_("unexpected shift operator"));
3459 return false;
3460 }
3461
3462 *str = p;
3463 return true;
3464 }
3465
3466 /* Parse a <shifter_operand> for a data processing instruction:
3467
3468 <Rm>
3469 <Rm>, <shift>
3470 #<immediate>
3471 #<immediate>, LSL #imm
3472
3473 where <shift> is handled by parse_shift above, and the last two
3474 cases are handled by the function above.
3475
3476 Validation of immediate operands is deferred to md_apply_fix.
3477
3478 Return TRUE on success; otherwise return FALSE. */
3479
3480 static bool
3481 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3482 enum parse_shift_mode mode)
3483 {
3484 const reg_entry *reg;
3485 aarch64_opnd_qualifier_t qualifier;
3486 enum aarch64_operand_class opd_class
3487 = aarch64_get_operand_class (operand->type);
3488
3489 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3490 if (reg)
3491 {
3492 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3493 {
3494 set_syntax_error (_("unexpected register in the immediate operand"));
3495 return false;
3496 }
3497
3498 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3499 {
3500 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3501 return false;
3502 }
3503
3504 operand->reg.regno = reg->number;
3505 operand->qualifier = qualifier;
3506
3507 /* Accept optional shift operation on register. */
3508 if (! skip_past_comma (str))
3509 return true;
3510
3511 if (! parse_shift (str, operand, mode))
3512 return false;
3513
3514 return true;
3515 }
3516 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3517 {
3518 set_syntax_error
3519 (_("integer register expected in the extended/shifted operand "
3520 "register"));
3521 return false;
3522 }
3523
3524 /* We have a shifted immediate variable. */
3525 return parse_shifter_operand_imm (str, operand, mode);
3526 }
3527
3528 /* Return TRUE on success; return FALSE otherwise. */
3529
3530 static bool
3531 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3532 enum parse_shift_mode mode)
3533 {
3534 char *p = *str;
3535
3536 /* Determine if we have the sequence of characters #: or just :
3537 coming next. If we do, then we check for a :rello: relocation
3538 modifier. If we don't, punt the whole lot to
3539 parse_shifter_operand. */
3540
3541 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3542 {
3543 struct reloc_table_entry *entry;
3544
3545 if (p[0] == '#')
3546 p += 2;
3547 else
3548 p++;
3549 *str = p;
3550
3551 /* Try to parse a relocation. Anything else is an error. */
3552 if (!(entry = find_reloc_table_entry (str)))
3553 {
3554 set_syntax_error (_("unknown relocation modifier"));
3555 return false;
3556 }
3557
3558 if (entry->add_type == 0)
3559 {
3560 set_syntax_error
3561 (_("this relocation modifier is not allowed on this instruction"));
3562 return false;
3563 }
3564
3565 /* Save str before we decompose it. */
3566 p = *str;
3567
3568 /* Next, we parse the expression. */
3569 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3570 REJECT_ABSENT))
3571 return false;
3572
3573 /* Record the relocation type (use the ADD variant here). */
3574 inst.reloc.type = entry->add_type;
3575 inst.reloc.pc_rel = entry->pc_rel;
3576
3577 /* If str is empty, we've reached the end, stop here. */
3578 if (**str == '\0')
3579 return true;
3580
3581 /* Otherwise, we have a shifted reloc modifier, so rewind to
3582 recover the variable name and continue parsing for the shifter. */
3583 *str = p;
3584 return parse_shifter_operand_imm (str, operand, mode);
3585 }
3586
3587 return parse_shifter_operand (str, operand, mode);
3588 }
3589
3590 /* Parse all forms of an address expression. Information is written
3591 to *OPERAND and/or inst.reloc.
3592
3593 The A64 instruction set has the following addressing modes:
3594
3595 Offset
3596 [base] // in SIMD ld/st structure
3597 [base{,#0}] // in ld/st exclusive
3598 [base{,#imm}]
3599 [base,Xm{,LSL #imm}]
3600 [base,Xm,SXTX {#imm}]
3601 [base,Wm,(S|U)XTW {#imm}]
3602 Pre-indexed
3603 [base]! // in ldraa/ldrab exclusive
3604 [base,#imm]!
3605 Post-indexed
3606 [base],#imm
3607 [base],Xm // in SIMD ld/st structure
3608 PC-relative (literal)
3609 label
3610 SVE:
3611 [base,#imm,MUL VL]
3612 [base,Zm.D{,LSL #imm}]
3613 [base,Zm.S,(S|U)XTW {#imm}]
3614 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3615 [Zn.S,#imm]
3616 [Zn.D,#imm]
3617 [Zn.S{, Xm}]
3618 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3619 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3620 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3621
3622 (As a convenience, the notation "=immediate" is permitted in conjunction
3623 with the pc-relative literal load instructions to automatically place an
3624 immediate value or symbolic address in a nearby literal pool and generate
3625 a hidden label which references it.)
3626
3627 Upon a successful parsing, the address structure in *OPERAND will be
3628 filled in the following way:
3629
3630 .base_regno = <base>
3631 .offset.is_reg // 1 if the offset is a register
3632 .offset.imm = <imm>
3633 .offset.regno = <Rm>
3634
3635 For different addressing modes defined in the A64 ISA:
3636
3637 Offset
3638 .pcrel=0; .preind=1; .postind=0; .writeback=0
3639 Pre-indexed
3640 .pcrel=0; .preind=1; .postind=0; .writeback=1
3641 Post-indexed
3642 .pcrel=0; .preind=0; .postind=1; .writeback=1
3643 PC-relative (literal)
3644 .pcrel=1; .preind=1; .postind=0; .writeback=0
3645
3646 The shift/extension information, if any, will be stored in .shifter.
3647 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3648 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3649 corresponding register.
3650
3651 BASE_TYPE says which types of base register should be accepted and
3652 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3653 is the type of shifter that is allowed for immediate offsets,
3654 or SHIFTED_NONE if none.
3655
3656 In all other respects, it is the caller's responsibility to check
3657 for addressing modes not supported by the instruction, and to set
3658 inst.reloc.type. */
3659
3660 static bool
3661 parse_address_main (char **str, aarch64_opnd_info *operand,
3662 aarch64_opnd_qualifier_t *base_qualifier,
3663 aarch64_opnd_qualifier_t *offset_qualifier,
3664 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3665 enum parse_shift_mode imm_shift_mode)
3666 {
3667 char *p = *str;
3668 const reg_entry *reg;
3669 expressionS *exp = &inst.reloc.exp;
3670
3671 *base_qualifier = AARCH64_OPND_QLF_NIL;
3672 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3673 if (! skip_past_char (&p, '['))
3674 {
3675 /* =immediate or label. */
3676 operand->addr.pcrel = 1;
3677 operand->addr.preind = 1;
3678
3679 /* #:<reloc_op>:<symbol> */
3680 skip_past_char (&p, '#');
3681 if (skip_past_char (&p, ':'))
3682 {
3683 bfd_reloc_code_real_type ty;
3684 struct reloc_table_entry *entry;
3685
3686 /* Try to parse a relocation modifier. Anything else is
3687 an error. */
3688 entry = find_reloc_table_entry (&p);
3689 if (! entry)
3690 {
3691 set_syntax_error (_("unknown relocation modifier"));
3692 return false;
3693 }
3694
3695 switch (operand->type)
3696 {
3697 case AARCH64_OPND_ADDR_PCREL21:
3698 /* adr */
3699 ty = entry->adr_type;
3700 break;
3701
3702 default:
3703 ty = entry->ld_literal_type;
3704 break;
3705 }
3706
3707 if (ty == 0)
3708 {
3709 set_syntax_error
3710 (_("this relocation modifier is not allowed on this "
3711 "instruction"));
3712 return false;
3713 }
3714
3715 /* #:<reloc_op>: */
3716 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3717 {
3718 set_syntax_error (_("invalid relocation expression"));
3719 return false;
3720 }
3721 /* #:<reloc_op>:<expr> */
3722 /* Record the relocation type. */
3723 inst.reloc.type = ty;
3724 inst.reloc.pc_rel = entry->pc_rel;
3725 }
3726 else
3727 {
3728 if (skip_past_char (&p, '='))
3729 /* =immediate; need to generate the literal in the literal pool. */
3730 inst.gen_lit_pool = 1;
3731
3732 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3733 {
3734 set_syntax_error (_("invalid address"));
3735 return false;
3736 }
3737 }
3738
3739 *str = p;
3740 return true;
3741 }
3742
3743 /* [ */
3744
3745 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3746 if (!reg || !aarch64_check_reg_type (reg, base_type))
3747 {
3748 set_syntax_error (_(get_reg_expected_msg (base_type)));
3749 return false;
3750 }
3751 operand->addr.base_regno = reg->number;
3752
3753 /* [Xn */
3754 if (skip_past_comma (&p))
3755 {
3756 /* [Xn, */
3757 operand->addr.preind = 1;
3758
3759 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3760 if (reg)
3761 {
3762 if (!aarch64_check_reg_type (reg, offset_type))
3763 {
3764 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3765 return false;
3766 }
3767
3768 /* [Xn,Rm */
3769 operand->addr.offset.regno = reg->number;
3770 operand->addr.offset.is_reg = 1;
3771 /* Shifted index. */
3772 if (skip_past_comma (&p))
3773 {
3774 /* [Xn,Rm, */
3775 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3776 /* Use the diagnostics set in parse_shift, so not set new
3777 error message here. */
3778 return false;
3779 }
3780 /* We only accept:
3781 [base,Xm] # For vector plus scalar SVE2 indexing.
3782 [base,Xm{,LSL #imm}]
3783 [base,Xm,SXTX {#imm}]
3784 [base,Wm,(S|U)XTW {#imm}] */
3785 if (operand->shifter.kind == AARCH64_MOD_NONE
3786 || operand->shifter.kind == AARCH64_MOD_LSL
3787 || operand->shifter.kind == AARCH64_MOD_SXTX)
3788 {
3789 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3790 {
3791 set_syntax_error (_("invalid use of 32-bit register offset"));
3792 return false;
3793 }
3794 if (aarch64_get_qualifier_esize (*base_qualifier)
3795 != aarch64_get_qualifier_esize (*offset_qualifier)
3796 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3797 || *base_qualifier != AARCH64_OPND_QLF_S_S
3798 || *offset_qualifier != AARCH64_OPND_QLF_X))
3799 {
3800 set_syntax_error (_("offset has different size from base"));
3801 return false;
3802 }
3803 }
3804 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3805 {
3806 set_syntax_error (_("invalid use of 64-bit register offset"));
3807 return false;
3808 }
3809 }
3810 else
3811 {
3812 /* [Xn,#:<reloc_op>:<symbol> */
3813 skip_past_char (&p, '#');
3814 if (skip_past_char (&p, ':'))
3815 {
3816 struct reloc_table_entry *entry;
3817
3818 /* Try to parse a relocation modifier. Anything else is
3819 an error. */
3820 if (!(entry = find_reloc_table_entry (&p)))
3821 {
3822 set_syntax_error (_("unknown relocation modifier"));
3823 return false;
3824 }
3825
3826 if (entry->ldst_type == 0)
3827 {
3828 set_syntax_error
3829 (_("this relocation modifier is not allowed on this "
3830 "instruction"));
3831 return false;
3832 }
3833
3834 /* [Xn,#:<reloc_op>: */
3835 /* We now have the group relocation table entry corresponding to
3836 the name in the assembler source. Next, we parse the
3837 expression. */
3838 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3839 {
3840 set_syntax_error (_("invalid relocation expression"));
3841 return false;
3842 }
3843
3844 /* [Xn,#:<reloc_op>:<expr> */
3845 /* Record the load/store relocation type. */
3846 inst.reloc.type = entry->ldst_type;
3847 inst.reloc.pc_rel = entry->pc_rel;
3848 }
3849 else
3850 {
3851 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3852 {
3853 set_syntax_error (_("invalid expression in the address"));
3854 return false;
3855 }
3856 /* [Xn,<expr> */
3857 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3858 /* [Xn,<expr>,<shifter> */
3859 if (! parse_shift (&p, operand, imm_shift_mode))
3860 return false;
3861 }
3862 }
3863 }
3864
3865 if (! skip_past_char (&p, ']'))
3866 {
3867 set_syntax_error (_("']' expected"));
3868 return false;
3869 }
3870
3871 if (skip_past_char (&p, '!'))
3872 {
3873 if (operand->addr.preind && operand->addr.offset.is_reg)
3874 {
3875 set_syntax_error (_("register offset not allowed in pre-indexed "
3876 "addressing mode"));
3877 return false;
3878 }
3879 /* [Xn]! */
3880 operand->addr.writeback = 1;
3881 }
3882 else if (skip_past_comma (&p))
3883 {
3884 /* [Xn], */
3885 operand->addr.postind = 1;
3886 operand->addr.writeback = 1;
3887
3888 if (operand->addr.preind)
3889 {
3890 set_syntax_error (_("cannot combine pre- and post-indexing"));
3891 return false;
3892 }
3893
3894 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3895 if (reg)
3896 {
3897 /* [Xn],Xm */
3898 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3899 {
3900 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3901 return false;
3902 }
3903
3904 operand->addr.offset.regno = reg->number;
3905 operand->addr.offset.is_reg = 1;
3906 }
3907 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3908 {
3909 /* [Xn],#expr */
3910 set_syntax_error (_("invalid expression in the address"));
3911 return false;
3912 }
3913 }
3914
3915 /* If at this point neither .preind nor .postind is set, we have a
3916 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3917 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3918 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3919 [Zn.<T>, xzr]. */
3920 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3921 {
3922 if (operand->addr.writeback)
3923 {
3924 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3925 {
3926 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3927 operand->addr.offset.is_reg = 0;
3928 operand->addr.offset.imm = 0;
3929 operand->addr.preind = 1;
3930 }
3931 else
3932 {
3933 /* Reject [Rn]! */
3934 set_syntax_error (_("missing offset in the pre-indexed address"));
3935 return false;
3936 }
3937 }
3938 else
3939 {
3940 operand->addr.preind = 1;
3941 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3942 {
3943 operand->addr.offset.is_reg = 1;
3944 operand->addr.offset.regno = REG_ZR;
3945 *offset_qualifier = AARCH64_OPND_QLF_X;
3946 }
3947 else
3948 {
3949 inst.reloc.exp.X_op = O_constant;
3950 inst.reloc.exp.X_add_number = 0;
3951 }
3952 }
3953 }
3954
3955 *str = p;
3956 return true;
3957 }
3958
3959 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3960 on success. */
3961 static bool
3962 parse_address (char **str, aarch64_opnd_info *operand)
3963 {
3964 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3965 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3966 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3967 }
3968
3969 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3970 The arguments have the same meaning as for parse_address_main.
3971 Return TRUE on success. */
3972 static bool
3973 parse_sve_address (char **str, aarch64_opnd_info *operand,
3974 aarch64_opnd_qualifier_t *base_qualifier,
3975 aarch64_opnd_qualifier_t *offset_qualifier)
3976 {
3977 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3978 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3979 SHIFTED_MUL_VL);
3980 }
3981
3982 /* Parse a register X0-X30. The register must be 64-bit and register 31
3983 is unallocated. */
3984 static bool
3985 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
3986 {
3987 const reg_entry *reg = parse_reg (str);
3988 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
3989 {
3990 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3991 return false;
3992 }
3993 operand->reg.regno = reg->number;
3994 operand->qualifier = AARCH64_OPND_QLF_X;
3995 return true;
3996 }
3997
3998 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3999 Return TRUE on success; otherwise return FALSE. */
4000 static bool
4001 parse_half (char **str, int *internal_fixup_p)
4002 {
4003 char *p = *str;
4004
4005 skip_past_char (&p, '#');
4006
4007 gas_assert (internal_fixup_p);
4008 *internal_fixup_p = 0;
4009
4010 if (*p == ':')
4011 {
4012 struct reloc_table_entry *entry;
4013
4014 /* Try to parse a relocation. Anything else is an error. */
4015 ++p;
4016
4017 if (!(entry = find_reloc_table_entry (&p)))
4018 {
4019 set_syntax_error (_("unknown relocation modifier"));
4020 return false;
4021 }
4022
4023 if (entry->movw_type == 0)
4024 {
4025 set_syntax_error
4026 (_("this relocation modifier is not allowed on this instruction"));
4027 return false;
4028 }
4029
4030 inst.reloc.type = entry->movw_type;
4031 }
4032 else
4033 *internal_fixup_p = 1;
4034
4035 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4036 return false;
4037
4038 *str = p;
4039 return true;
4040 }
4041
4042 /* Parse an operand for an ADRP instruction:
4043 ADRP <Xd>, <label>
4044 Return TRUE on success; otherwise return FALSE. */
4045
4046 static bool
4047 parse_adrp (char **str)
4048 {
4049 char *p;
4050
4051 p = *str;
4052 if (*p == ':')
4053 {
4054 struct reloc_table_entry *entry;
4055
4056 /* Try to parse a relocation. Anything else is an error. */
4057 ++p;
4058 if (!(entry = find_reloc_table_entry (&p)))
4059 {
4060 set_syntax_error (_("unknown relocation modifier"));
4061 return false;
4062 }
4063
4064 if (entry->adrp_type == 0)
4065 {
4066 set_syntax_error
4067 (_("this relocation modifier is not allowed on this instruction"));
4068 return false;
4069 }
4070
4071 inst.reloc.type = entry->adrp_type;
4072 }
4073 else
4074 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4075
4076 inst.reloc.pc_rel = 1;
4077 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4078 return false;
4079 *str = p;
4080 return true;
4081 }
4082
4083 /* Miscellaneous. */
4084
4085 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4086 of SIZE tokens in which index I gives the token for field value I,
4087 or is null if field value I is invalid. REG_TYPE says which register
4088 names should be treated as registers rather than as symbolic immediates.
4089
4090 Return true on success, moving *STR past the operand and storing the
4091 field value in *VAL. */
4092
4093 static int
4094 parse_enum_string (char **str, int64_t *val, const char *const *array,
4095 size_t size, aarch64_reg_type reg_type)
4096 {
4097 expressionS exp;
4098 char *p, *q;
4099 size_t i;
4100
4101 /* Match C-like tokens. */
4102 p = q = *str;
4103 while (ISALNUM (*q))
4104 q++;
4105
4106 for (i = 0; i < size; ++i)
4107 if (array[i]
4108 && strncasecmp (array[i], p, q - p) == 0
4109 && array[i][q - p] == 0)
4110 {
4111 *val = i;
4112 *str = q;
4113 return true;
4114 }
4115
4116 if (!parse_immediate_expression (&p, &exp, reg_type))
4117 return false;
4118
4119 if (exp.X_op == O_constant
4120 && (uint64_t) exp.X_add_number < size)
4121 {
4122 *val = exp.X_add_number;
4123 *str = p;
4124 return true;
4125 }
4126
4127 /* Use the default error for this operand. */
4128 return false;
4129 }
4130
4131 /* Parse an option for a preload instruction. Returns the encoding for the
4132 option, or PARSE_FAIL. */
4133
4134 static int
4135 parse_pldop (char **str)
4136 {
4137 char *p, *q;
4138 const struct aarch64_name_value_pair *o;
4139
4140 p = q = *str;
4141 while (ISALNUM (*q))
4142 q++;
4143
4144 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4145 if (!o)
4146 return PARSE_FAIL;
4147
4148 *str = q;
4149 return o->value;
4150 }
4151
4152 /* Parse an option for a barrier instruction. Returns the encoding for the
4153 option, or PARSE_FAIL. */
4154
4155 static int
4156 parse_barrier (char **str)
4157 {
4158 char *p, *q;
4159 const struct aarch64_name_value_pair *o;
4160
4161 p = q = *str;
4162 while (ISALPHA (*q))
4163 q++;
4164
4165 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4166 if (!o)
4167 return PARSE_FAIL;
4168
4169 *str = q;
4170 return o->value;
4171 }
4172
4173 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4174 return 0 if successful. Otherwise return PARSE_FAIL. */
4175
4176 static int
4177 parse_barrier_psb (char **str,
4178 const struct aarch64_name_value_pair ** hint_opt)
4179 {
4180 char *p, *q;
4181 const struct aarch64_name_value_pair *o;
4182
4183 p = q = *str;
4184 while (ISALPHA (*q))
4185 q++;
4186
4187 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4188 if (!o)
4189 {
4190 set_fatal_syntax_error
4191 ( _("unknown or missing option to PSB/TSB"));
4192 return PARSE_FAIL;
4193 }
4194
4195 if (o->value != 0x11)
4196 {
4197 /* PSB only accepts option name 'CSYNC'. */
4198 set_syntax_error
4199 (_("the specified option is not accepted for PSB/TSB"));
4200 return PARSE_FAIL;
4201 }
4202
4203 *str = q;
4204 *hint_opt = o;
4205 return 0;
4206 }
4207
4208 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4209 return 0 if successful. Otherwise return PARSE_FAIL. */
4210
4211 static int
4212 parse_bti_operand (char **str,
4213 const struct aarch64_name_value_pair ** hint_opt)
4214 {
4215 char *p, *q;
4216 const struct aarch64_name_value_pair *o;
4217
4218 p = q = *str;
4219 while (ISALPHA (*q))
4220 q++;
4221
4222 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4223 if (!o)
4224 {
4225 set_fatal_syntax_error
4226 ( _("unknown option to BTI"));
4227 return PARSE_FAIL;
4228 }
4229
4230 switch (o->value)
4231 {
4232 /* Valid BTI operands. */
4233 case HINT_OPD_C:
4234 case HINT_OPD_J:
4235 case HINT_OPD_JC:
4236 break;
4237
4238 default:
4239 set_syntax_error
4240 (_("unknown option to BTI"));
4241 return PARSE_FAIL;
4242 }
4243
4244 *str = q;
4245 *hint_opt = o;
4246 return 0;
4247 }
4248
4249 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4250 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4251 on failure. Format:
4252
4253 REG_TYPE.QUALIFIER
4254
4255 Side effect: Update STR with current parse position of success.
4256 */
4257
4258 static const reg_entry *
4259 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4260 aarch64_opnd_qualifier_t *qualifier)
4261 {
4262 char *q;
4263
4264 reg_entry *reg = parse_reg (str);
4265 if (reg != NULL && reg->type == reg_type)
4266 {
4267 if (!skip_past_char (str, '.'))
4268 {
4269 set_syntax_error (_("missing ZA tile element size separator"));
4270 return NULL;
4271 }
4272
4273 q = *str;
4274 switch (TOLOWER (*q))
4275 {
4276 case 'b':
4277 *qualifier = AARCH64_OPND_QLF_S_B;
4278 break;
4279 case 'h':
4280 *qualifier = AARCH64_OPND_QLF_S_H;
4281 break;
4282 case 's':
4283 *qualifier = AARCH64_OPND_QLF_S_S;
4284 break;
4285 case 'd':
4286 *qualifier = AARCH64_OPND_QLF_S_D;
4287 break;
4288 case 'q':
4289 *qualifier = AARCH64_OPND_QLF_S_Q;
4290 break;
4291 default:
4292 return NULL;
4293 }
4294 q++;
4295
4296 *str = q;
4297 return reg;
4298 }
4299
4300 return NULL;
4301 }
4302
4303 /* Parse SME ZA tile encoded in <ZAda> assembler symbol.
4304 Function return tile QUALIFIER on success.
4305
4306 Tiles are in example format: za[0-9]\.[bhsd]
4307
4308 Function returns <ZAda> register number or PARSE_FAIL.
4309 */
4310 static int
4311 parse_sme_zada_operand (char **str, aarch64_opnd_qualifier_t *qualifier)
4312 {
4313 int regno;
4314 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_ZA, qualifier);
4315
4316 if (reg == NULL)
4317 return PARSE_FAIL;
4318 regno = reg->number;
4319
4320 switch (*qualifier)
4321 {
4322 case AARCH64_OPND_QLF_S_B:
4323 if (regno != 0x00)
4324 {
4325 set_syntax_error (_("invalid ZA tile register number, expected za0"));
4326 return PARSE_FAIL;
4327 }
4328 break;
4329 case AARCH64_OPND_QLF_S_H:
4330 if (regno > 0x01)
4331 {
4332 set_syntax_error (_("invalid ZA tile register number, expected za0-za1"));
4333 return PARSE_FAIL;
4334 }
4335 break;
4336 case AARCH64_OPND_QLF_S_S:
4337 if (regno > 0x03)
4338 {
4339 /* For the 32-bit variant: is the name of the ZA tile ZA0-ZA3. */
4340 set_syntax_error (_("invalid ZA tile register number, expected za0-za3"));
4341 return PARSE_FAIL;
4342 }
4343 break;
4344 case AARCH64_OPND_QLF_S_D:
4345 if (regno > 0x07)
4346 {
4347 /* For the 64-bit variant: is the name of the ZA tile ZA0-ZA7 */
4348 set_syntax_error (_("invalid ZA tile register number, expected za0-za7"));
4349 return PARSE_FAIL;
4350 }
4351 break;
4352 default:
4353 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s and d"));
4354 return PARSE_FAIL;
4355 }
4356
4357 return regno;
4358 }
4359
4360 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4361
4362 #<imm>
4363 <imm>
4364
4365 Function return TRUE if immediate was found, or FALSE.
4366 */
4367 static bool
4368 parse_sme_immediate (char **str, int64_t *imm)
4369 {
4370 int64_t val;
4371 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4372 return false;
4373
4374 *imm = val;
4375 return true;
4376 }
4377
4378 /* Parse index with vector select register and immediate:
4379
4380 [<Wv>, <imm>]
4381 [<Wv>, #<imm>]
4382 where <Wv> is in W12-W15 range and # is optional for immediate.
4383
4384 Function performs extra check for mandatory immediate value if REQUIRE_IMM
4385 is set to true.
4386
4387 On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4388 IMM output.
4389 */
4390 static bool
4391 parse_sme_za_hv_tiles_operand_index (char **str,
4392 int *vector_select_register,
4393 int64_t *imm)
4394 {
4395 const reg_entry *reg;
4396
4397 if (!skip_past_char (str, '['))
4398 {
4399 set_syntax_error (_("expected '['"));
4400 return false;
4401 }
4402
4403 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4404 reg = parse_reg (str);
4405 if (reg == NULL || reg->type != REG_TYPE_R_32
4406 || reg->number < 12 || reg->number > 15)
4407 {
4408 set_syntax_error (_("expected vector select register W12-W15"));
4409 return false;
4410 }
4411 *vector_select_register = reg->number;
4412
4413 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4414 {
4415 set_syntax_error (_("expected ','"));
4416 return false;
4417 }
4418
4419 if (!parse_sme_immediate (str, imm))
4420 {
4421 set_syntax_error (_("index offset immediate expected"));
4422 return false;
4423 }
4424
4425 if (!skip_past_char (str, ']'))
4426 {
4427 set_syntax_error (_("expected ']'"));
4428 return false;
4429 }
4430
4431 return true;
4432 }
4433
4434 /* Parse SME ZA horizontal or vertical vector access to tiles.
4435 Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4436 vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4437 contains <Wv> select register and corresponding optional IMMEDIATE.
4438 In addition QUALIFIER is extracted.
4439
4440 Field format examples:
4441
4442 ZA0<HV>.B[<Wv>, #<imm>]
4443 <ZAn><HV>.H[<Wv>, #<imm>]
4444 <ZAn><HV>.S[<Wv>, #<imm>]
4445 <ZAn><HV>.D[<Wv>, #<imm>]
4446 <ZAn><HV>.Q[<Wv>, #<imm>]
4447
4448 Function returns <ZAda> register number or PARSE_FAIL.
4449 */
4450 static int
4451 parse_sme_za_hv_tiles_operand (char **str,
4452 enum sme_hv_slice *slice_indicator,
4453 int *vector_select_register,
4454 int *imm,
4455 aarch64_opnd_qualifier_t *qualifier)
4456 {
4457 char *qh, *qv;
4458 int regno;
4459 int regno_limit;
4460 int64_t imm_limit;
4461 int64_t imm_value;
4462 const reg_entry *reg;
4463
4464 qh = qv = *str;
4465 if ((reg = parse_reg_with_qual (&qh, REG_TYPE_ZAH, qualifier)) != NULL)
4466 {
4467 *slice_indicator = HV_horizontal;
4468 *str = qh;
4469 }
4470 else if ((reg = parse_reg_with_qual (&qv, REG_TYPE_ZAV, qualifier)) != NULL)
4471 {
4472 *slice_indicator = HV_vertical;
4473 *str = qv;
4474 }
4475 else
4476 return PARSE_FAIL;
4477 regno = reg->number;
4478
4479 switch (*qualifier)
4480 {
4481 case AARCH64_OPND_QLF_S_B:
4482 regno_limit = 0;
4483 imm_limit = 15;
4484 break;
4485 case AARCH64_OPND_QLF_S_H:
4486 regno_limit = 1;
4487 imm_limit = 7;
4488 break;
4489 case AARCH64_OPND_QLF_S_S:
4490 regno_limit = 3;
4491 imm_limit = 3;
4492 break;
4493 case AARCH64_OPND_QLF_S_D:
4494 regno_limit = 7;
4495 imm_limit = 1;
4496 break;
4497 case AARCH64_OPND_QLF_S_Q:
4498 regno_limit = 15;
4499 imm_limit = 0;
4500 break;
4501 default:
4502 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4503 return PARSE_FAIL;
4504 }
4505
4506 /* Check if destination register ZA tile vector is in range for given
4507 instruction variant. */
4508 if (regno < 0 || regno > regno_limit)
4509 {
4510 set_syntax_error (_("ZA tile vector out of range"));
4511 return PARSE_FAIL;
4512 }
4513
4514 if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4515 &imm_value))
4516 return PARSE_FAIL;
4517
4518 /* Check if optional index offset is in the range for instruction
4519 variant. */
4520 if (imm_value < 0 || imm_value > imm_limit)
4521 {
4522 set_syntax_error (_("index offset out of range"));
4523 return PARSE_FAIL;
4524 }
4525
4526 *imm = imm_value;
4527
4528 return regno;
4529 }
4530
4531
4532 static int
4533 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4534 enum sme_hv_slice *slice_indicator,
4535 int *vector_select_register,
4536 int *imm,
4537 aarch64_opnd_qualifier_t *qualifier)
4538 {
4539 int regno;
4540
4541 if (!skip_past_char (str, '{'))
4542 {
4543 set_syntax_error (_("expected '{'"));
4544 return PARSE_FAIL;
4545 }
4546
4547 regno = parse_sme_za_hv_tiles_operand (str, slice_indicator,
4548 vector_select_register, imm,
4549 qualifier);
4550
4551 if (regno == PARSE_FAIL)
4552 return PARSE_FAIL;
4553
4554 if (!skip_past_char (str, '}'))
4555 {
4556 set_syntax_error (_("expected '}'"));
4557 return PARSE_FAIL;
4558 }
4559
4560 return regno;
4561 }
4562
4563 /* Parse list of up to eight 64-bit element tile names separated by commas in
4564 SME's ZERO instruction:
4565
4566 ZERO { <mask> }
4567
4568 Function returns <mask>:
4569
4570 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4571 */
4572 static int
4573 parse_sme_zero_mask(char **str)
4574 {
4575 char *q;
4576 int mask;
4577 aarch64_opnd_qualifier_t qualifier;
4578
4579 mask = 0x00;
4580 q = *str;
4581 do
4582 {
4583 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA, &qualifier);
4584 if (reg)
4585 {
4586 int regno = reg->number;
4587 if (qualifier == AARCH64_OPND_QLF_S_B && regno == 0)
4588 {
4589 /* { ZA0.B } is assembled as all-ones immediate. */
4590 mask = 0xff;
4591 }
4592 else if (qualifier == AARCH64_OPND_QLF_S_H && regno < 2)
4593 mask |= 0x55 << regno;
4594 else if (qualifier == AARCH64_OPND_QLF_S_S && regno < 4)
4595 mask |= 0x11 << regno;
4596 else if (qualifier == AARCH64_OPND_QLF_S_D && regno < 8)
4597 mask |= 0x01 << regno;
4598 else
4599 {
4600 set_syntax_error (_("wrong ZA tile element format"));
4601 return PARSE_FAIL;
4602 }
4603 continue;
4604 }
4605 else if (strncasecmp (q, "za", 2) == 0
4606 && !ISALNUM (q[2]))
4607 {
4608 /* { ZA } is assembled as all-ones immediate. */
4609 mask = 0xff;
4610 q += 2;
4611 continue;
4612 }
4613 else
4614 {
4615 set_syntax_error (_("wrong ZA tile element format"));
4616 return PARSE_FAIL;
4617 }
4618 }
4619 while (skip_past_char (&q, ','));
4620
4621 *str = q;
4622 return mask;
4623 }
4624
4625 /* Wraps in curly braces <mask> operand ZERO instruction:
4626
4627 ZERO { <mask> }
4628
4629 Function returns value of <mask> bit-field.
4630 */
4631 static int
4632 parse_sme_list_of_64bit_tiles (char **str)
4633 {
4634 int regno;
4635
4636 if (!skip_past_char (str, '{'))
4637 {
4638 set_syntax_error (_("expected '{'"));
4639 return PARSE_FAIL;
4640 }
4641
4642 /* Empty <mask> list is an all-zeros immediate. */
4643 if (!skip_past_char (str, '}'))
4644 {
4645 regno = parse_sme_zero_mask (str);
4646 if (regno == PARSE_FAIL)
4647 return PARSE_FAIL;
4648
4649 if (!skip_past_char (str, '}'))
4650 {
4651 set_syntax_error (_("expected '}'"));
4652 return PARSE_FAIL;
4653 }
4654 }
4655 else
4656 regno = 0x00;
4657
4658 return regno;
4659 }
4660
4661 /* Parse ZA array operand used in e.g. STR and LDR instruction.
4662 Operand format:
4663
4664 ZA[<Wv>, <imm>]
4665 ZA[<Wv>, #<imm>]
4666
4667 Function returns <Wv> or PARSE_FAIL.
4668 */
4669 static int
4670 parse_sme_za_array (char **str, int *imm)
4671 {
4672 char *p, *q;
4673 int regno;
4674 int64_t imm_value;
4675
4676 p = q = *str;
4677 while (ISALPHA (*q))
4678 q++;
4679
4680 if ((q - p != 2) || strncasecmp ("za", p, q - p) != 0)
4681 {
4682 set_syntax_error (_("expected ZA array"));
4683 return PARSE_FAIL;
4684 }
4685
4686 if (! parse_sme_za_hv_tiles_operand_index (&q, &regno, &imm_value))
4687 return PARSE_FAIL;
4688
4689 if (imm_value < 0 || imm_value > 15)
4690 {
4691 set_syntax_error (_("offset out of range"));
4692 return PARSE_FAIL;
4693 }
4694
4695 *imm = imm_value;
4696 *str = q;
4697 return regno;
4698 }
4699
4700 /* Parse streaming mode operand for SMSTART and SMSTOP.
4701
4702 {SM | ZA}
4703
4704 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4705 */
4706 static int
4707 parse_sme_sm_za (char **str)
4708 {
4709 char *p, *q;
4710
4711 p = q = *str;
4712 while (ISALPHA (*q))
4713 q++;
4714
4715 if ((q - p != 2)
4716 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4717 {
4718 set_syntax_error (_("expected SM or ZA operand"));
4719 return PARSE_FAIL;
4720 }
4721
4722 *str = q;
4723 return TOLOWER (p[0]);
4724 }
4725
4726 /* Parse the name of the source scalable predicate register, the index base
4727 register W12-W15 and the element index. Function performs element index
4728 limit checks as well as qualifier type checks.
4729
4730 <Pn>.<T>[<Wv>, <imm>]
4731 <Pn>.<T>[<Wv>, #<imm>]
4732
4733 On success function sets <Wv> to INDEX_BASE_REG, <T> to QUALIFIER and
4734 <imm> to IMM.
4735 Function returns <Pn>, or PARSE_FAIL.
4736 */
4737 static int
4738 parse_sme_pred_reg_with_index(char **str,
4739 int *index_base_reg,
4740 int *imm,
4741 aarch64_opnd_qualifier_t *qualifier)
4742 {
4743 int regno;
4744 int64_t imm_limit;
4745 int64_t imm_value;
4746 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_PN, qualifier);
4747
4748 if (reg == NULL)
4749 return PARSE_FAIL;
4750 regno = reg->number;
4751
4752 switch (*qualifier)
4753 {
4754 case AARCH64_OPND_QLF_S_B:
4755 imm_limit = 15;
4756 break;
4757 case AARCH64_OPND_QLF_S_H:
4758 imm_limit = 7;
4759 break;
4760 case AARCH64_OPND_QLF_S_S:
4761 imm_limit = 3;
4762 break;
4763 case AARCH64_OPND_QLF_S_D:
4764 imm_limit = 1;
4765 break;
4766 default:
4767 set_syntax_error (_("wrong predicate register element size, allowed b, h, s and d"));
4768 return PARSE_FAIL;
4769 }
4770
4771 if (! parse_sme_za_hv_tiles_operand_index (str, index_base_reg, &imm_value))
4772 return PARSE_FAIL;
4773
4774 if (imm_value < 0 || imm_value > imm_limit)
4775 {
4776 set_syntax_error (_("element index out of range for given variant"));
4777 return PARSE_FAIL;
4778 }
4779
4780 *imm = imm_value;
4781
4782 return regno;
4783 }
4784
4785 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4786 Returns the encoding for the option, or PARSE_FAIL.
4787
4788 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4789 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4790
4791 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4792 field, otherwise as a system register.
4793 */
4794
4795 static int
4796 parse_sys_reg (char **str, htab_t sys_regs,
4797 int imple_defined_p, int pstatefield_p,
4798 uint32_t* flags)
4799 {
4800 char *p, *q;
4801 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4802 const aarch64_sys_reg *o;
4803 int value;
4804
4805 p = buf;
4806 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4807 if (p < buf + (sizeof (buf) - 1))
4808 *p++ = TOLOWER (*q);
4809 *p = '\0';
4810
4811 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4812 valid system register. This is enforced by construction of the hash
4813 table. */
4814 if (p - buf != q - *str)
4815 return PARSE_FAIL;
4816
4817 o = str_hash_find (sys_regs, buf);
4818 if (!o)
4819 {
4820 if (!imple_defined_p)
4821 return PARSE_FAIL;
4822 else
4823 {
4824 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4825 unsigned int op0, op1, cn, cm, op2;
4826
4827 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4828 != 5)
4829 return PARSE_FAIL;
4830 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4831 return PARSE_FAIL;
4832 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4833 if (flags)
4834 *flags = 0;
4835 }
4836 }
4837 else
4838 {
4839 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4840 as_bad (_("selected processor does not support PSTATE field "
4841 "name '%s'"), buf);
4842 if (!pstatefield_p
4843 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4844 o->value, o->flags, o->features))
4845 as_bad (_("selected processor does not support system register "
4846 "name '%s'"), buf);
4847 if (aarch64_sys_reg_deprecated_p (o->flags))
4848 as_warn (_("system register name '%s' is deprecated and may be "
4849 "removed in a future release"), buf);
4850 value = o->value;
4851 if (flags)
4852 *flags = o->flags;
4853 }
4854
4855 *str = q;
4856 return value;
4857 }
4858
4859 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4860 for the option, or NULL. */
4861
4862 static const aarch64_sys_ins_reg *
4863 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4864 {
4865 char *p, *q;
4866 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4867 const aarch64_sys_ins_reg *o;
4868
4869 p = buf;
4870 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4871 if (p < buf + (sizeof (buf) - 1))
4872 *p++ = TOLOWER (*q);
4873 *p = '\0';
4874
4875 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4876 valid system register. This is enforced by construction of the hash
4877 table. */
4878 if (p - buf != q - *str)
4879 return NULL;
4880
4881 o = str_hash_find (sys_ins_regs, buf);
4882 if (!o)
4883 return NULL;
4884
4885 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4886 o->name, o->value, o->flags, 0))
4887 as_bad (_("selected processor does not support system register "
4888 "name '%s'"), buf);
4889 if (aarch64_sys_reg_deprecated_p (o->flags))
4890 as_warn (_("system register name '%s' is deprecated and may be "
4891 "removed in a future release"), buf);
4892
4893 *str = q;
4894 return o;
4895 }
4896 \f
4897 #define po_char_or_fail(chr) do { \
4898 if (! skip_past_char (&str, chr)) \
4899 goto failure; \
4900 } while (0)
4901
4902 #define po_reg_or_fail(regtype) do { \
4903 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4904 if (val == PARSE_FAIL) \
4905 { \
4906 set_default_error (); \
4907 goto failure; \
4908 } \
4909 } while (0)
4910
4911 #define po_int_reg_or_fail(reg_type) do { \
4912 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4913 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4914 { \
4915 set_default_error (); \
4916 goto failure; \
4917 } \
4918 info->reg.regno = reg->number; \
4919 info->qualifier = qualifier; \
4920 } while (0)
4921
4922 #define po_imm_nc_or_fail() do { \
4923 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4924 goto failure; \
4925 } while (0)
4926
4927 #define po_imm_or_fail(min, max) do { \
4928 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4929 goto failure; \
4930 if (val < min || val > max) \
4931 { \
4932 set_fatal_syntax_error (_("immediate value out of range "\
4933 #min " to "#max)); \
4934 goto failure; \
4935 } \
4936 } while (0)
4937
4938 #define po_enum_or_fail(array) do { \
4939 if (!parse_enum_string (&str, &val, array, \
4940 ARRAY_SIZE (array), imm_reg_type)) \
4941 goto failure; \
4942 } while (0)
4943
4944 #define po_misc_or_fail(expr) do { \
4945 if (!expr) \
4946 goto failure; \
4947 } while (0)
4948 \f
4949 /* encode the 12-bit imm field of Add/sub immediate */
4950 static inline uint32_t
4951 encode_addsub_imm (uint32_t imm)
4952 {
4953 return imm << 10;
4954 }
4955
4956 /* encode the shift amount field of Add/sub immediate */
4957 static inline uint32_t
4958 encode_addsub_imm_shift_amount (uint32_t cnt)
4959 {
4960 return cnt << 22;
4961 }
4962
4963
4964 /* encode the imm field of Adr instruction */
4965 static inline uint32_t
4966 encode_adr_imm (uint32_t imm)
4967 {
4968 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4969 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4970 }
4971
4972 /* encode the immediate field of Move wide immediate */
4973 static inline uint32_t
4974 encode_movw_imm (uint32_t imm)
4975 {
4976 return imm << 5;
4977 }
4978
4979 /* encode the 26-bit offset of unconditional branch */
4980 static inline uint32_t
4981 encode_branch_ofs_26 (uint32_t ofs)
4982 {
4983 return ofs & ((1 << 26) - 1);
4984 }
4985
4986 /* encode the 19-bit offset of conditional branch and compare & branch */
4987 static inline uint32_t
4988 encode_cond_branch_ofs_19 (uint32_t ofs)
4989 {
4990 return (ofs & ((1 << 19) - 1)) << 5;
4991 }
4992
4993 /* encode the 19-bit offset of ld literal */
4994 static inline uint32_t
4995 encode_ld_lit_ofs_19 (uint32_t ofs)
4996 {
4997 return (ofs & ((1 << 19) - 1)) << 5;
4998 }
4999
5000 /* Encode the 14-bit offset of test & branch. */
5001 static inline uint32_t
5002 encode_tst_branch_ofs_14 (uint32_t ofs)
5003 {
5004 return (ofs & ((1 << 14) - 1)) << 5;
5005 }
5006
5007 /* Encode the 16-bit imm field of svc/hvc/smc. */
5008 static inline uint32_t
5009 encode_svc_imm (uint32_t imm)
5010 {
5011 return imm << 5;
5012 }
5013
5014 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
5015 static inline uint32_t
5016 reencode_addsub_switch_add_sub (uint32_t opcode)
5017 {
5018 return opcode ^ (1 << 30);
5019 }
5020
5021 static inline uint32_t
5022 reencode_movzn_to_movz (uint32_t opcode)
5023 {
5024 return opcode | (1 << 30);
5025 }
5026
5027 static inline uint32_t
5028 reencode_movzn_to_movn (uint32_t opcode)
5029 {
5030 return opcode & ~(1 << 30);
5031 }
5032
5033 /* Overall per-instruction processing. */
5034
5035 /* We need to be able to fix up arbitrary expressions in some statements.
5036 This is so that we can handle symbols that are an arbitrary distance from
5037 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
5038 which returns part of an address in a form which will be valid for
5039 a data instruction. We do this by pushing the expression into a symbol
5040 in the expr_section, and creating a fix for that. */
5041
5042 static fixS *
5043 fix_new_aarch64 (fragS * frag,
5044 int where,
5045 short int size,
5046 expressionS * exp,
5047 int pc_rel,
5048 int reloc)
5049 {
5050 fixS *new_fix;
5051
5052 switch (exp->X_op)
5053 {
5054 case O_constant:
5055 case O_symbol:
5056 case O_add:
5057 case O_subtract:
5058 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5059 break;
5060
5061 default:
5062 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5063 pc_rel, reloc);
5064 break;
5065 }
5066 return new_fix;
5067 }
5068 \f
5069 /* Diagnostics on operands errors. */
5070
5071 /* By default, output verbose error message.
5072 Disable the verbose error message by -mno-verbose-error. */
5073 static int verbose_error_p = 1;
5074
5075 #ifdef DEBUG_AARCH64
5076 /* N.B. this is only for the purpose of debugging. */
5077 const char* operand_mismatch_kind_names[] =
5078 {
5079 "AARCH64_OPDE_NIL",
5080 "AARCH64_OPDE_RECOVERABLE",
5081 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5082 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5083 "AARCH64_OPDE_SYNTAX_ERROR",
5084 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5085 "AARCH64_OPDE_INVALID_VARIANT",
5086 "AARCH64_OPDE_OUT_OF_RANGE",
5087 "AARCH64_OPDE_UNALIGNED",
5088 "AARCH64_OPDE_REG_LIST",
5089 "AARCH64_OPDE_OTHER_ERROR",
5090 };
5091 #endif /* DEBUG_AARCH64 */
5092
5093 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5094
5095 When multiple errors of different kinds are found in the same assembly
5096 line, only the error of the highest severity will be picked up for
5097 issuing the diagnostics. */
5098
5099 static inline bool
5100 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5101 enum aarch64_operand_error_kind rhs)
5102 {
5103 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5104 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5105 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5106 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5107 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5108 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5109 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5110 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
5111 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5112 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
5113 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5114 return lhs > rhs;
5115 }
5116
5117 /* Helper routine to get the mnemonic name from the assembly instruction
5118 line; should only be called for the diagnosis purpose, as there is
5119 string copy operation involved, which may affect the runtime
5120 performance if used in elsewhere. */
5121
5122 static const char*
5123 get_mnemonic_name (const char *str)
5124 {
5125 static char mnemonic[32];
5126 char *ptr;
5127
5128 /* Get the first 15 bytes and assume that the full name is included. */
5129 strncpy (mnemonic, str, 31);
5130 mnemonic[31] = '\0';
5131
5132 /* Scan up to the end of the mnemonic, which must end in white space,
5133 '.', or end of string. */
5134 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5135 ;
5136
5137 *ptr = '\0';
5138
5139 /* Append '...' to the truncated long name. */
5140 if (ptr - mnemonic == 31)
5141 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5142
5143 return mnemonic;
5144 }
5145
5146 static void
5147 reset_aarch64_instruction (aarch64_instruction *instruction)
5148 {
5149 memset (instruction, '\0', sizeof (aarch64_instruction));
5150 instruction->reloc.type = BFD_RELOC_UNUSED;
5151 }
5152
5153 /* Data structures storing one user error in the assembly code related to
5154 operands. */
5155
5156 struct operand_error_record
5157 {
5158 const aarch64_opcode *opcode;
5159 aarch64_operand_error detail;
5160 struct operand_error_record *next;
5161 };
5162
5163 typedef struct operand_error_record operand_error_record;
5164
5165 struct operand_errors
5166 {
5167 operand_error_record *head;
5168 operand_error_record *tail;
5169 };
5170
5171 typedef struct operand_errors operand_errors;
5172
5173 /* Top-level data structure reporting user errors for the current line of
5174 the assembly code.
5175 The way md_assemble works is that all opcodes sharing the same mnemonic
5176 name are iterated to find a match to the assembly line. In this data
5177 structure, each of the such opcodes will have one operand_error_record
5178 allocated and inserted. In other words, excessive errors related with
5179 a single opcode are disregarded. */
5180 operand_errors operand_error_report;
5181
5182 /* Free record nodes. */
5183 static operand_error_record *free_opnd_error_record_nodes = NULL;
5184
5185 /* Initialize the data structure that stores the operand mismatch
5186 information on assembling one line of the assembly code. */
5187 static void
5188 init_operand_error_report (void)
5189 {
5190 if (operand_error_report.head != NULL)
5191 {
5192 gas_assert (operand_error_report.tail != NULL);
5193 operand_error_report.tail->next = free_opnd_error_record_nodes;
5194 free_opnd_error_record_nodes = operand_error_report.head;
5195 operand_error_report.head = NULL;
5196 operand_error_report.tail = NULL;
5197 return;
5198 }
5199 gas_assert (operand_error_report.tail == NULL);
5200 }
5201
5202 /* Return TRUE if some operand error has been recorded during the
5203 parsing of the current assembly line using the opcode *OPCODE;
5204 otherwise return FALSE. */
5205 static inline bool
5206 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5207 {
5208 operand_error_record *record = operand_error_report.head;
5209 return record && record->opcode == opcode;
5210 }
5211
5212 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5213 OPCODE field is initialized with OPCODE.
5214 N.B. only one record for each opcode, i.e. the maximum of one error is
5215 recorded for each instruction template. */
5216
5217 static void
5218 add_operand_error_record (const operand_error_record* new_record)
5219 {
5220 const aarch64_opcode *opcode = new_record->opcode;
5221 operand_error_record* record = operand_error_report.head;
5222
5223 /* The record may have been created for this opcode. If not, we need
5224 to prepare one. */
5225 if (! opcode_has_operand_error_p (opcode))
5226 {
5227 /* Get one empty record. */
5228 if (free_opnd_error_record_nodes == NULL)
5229 {
5230 record = XNEW (operand_error_record);
5231 }
5232 else
5233 {
5234 record = free_opnd_error_record_nodes;
5235 free_opnd_error_record_nodes = record->next;
5236 }
5237 record->opcode = opcode;
5238 /* Insert at the head. */
5239 record->next = operand_error_report.head;
5240 operand_error_report.head = record;
5241 if (operand_error_report.tail == NULL)
5242 operand_error_report.tail = record;
5243 }
5244 else if (record->detail.kind != AARCH64_OPDE_NIL
5245 && record->detail.index <= new_record->detail.index
5246 && operand_error_higher_severity_p (record->detail.kind,
5247 new_record->detail.kind))
5248 {
5249 /* In the case of multiple errors found on operands related with a
5250 single opcode, only record the error of the leftmost operand and
5251 only if the error is of higher severity. */
5252 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5253 " the existing error %s on operand %d",
5254 operand_mismatch_kind_names[new_record->detail.kind],
5255 new_record->detail.index,
5256 operand_mismatch_kind_names[record->detail.kind],
5257 record->detail.index);
5258 return;
5259 }
5260
5261 record->detail = new_record->detail;
5262 }
5263
5264 static inline void
5265 record_operand_error_info (const aarch64_opcode *opcode,
5266 aarch64_operand_error *error_info)
5267 {
5268 operand_error_record record;
5269 record.opcode = opcode;
5270 record.detail = *error_info;
5271 add_operand_error_record (&record);
5272 }
5273
5274 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5275 error message *ERROR, for operand IDX (count from 0). */
5276
5277 static void
5278 record_operand_error (const aarch64_opcode *opcode, int idx,
5279 enum aarch64_operand_error_kind kind,
5280 const char* error)
5281 {
5282 aarch64_operand_error info;
5283 memset(&info, 0, sizeof (info));
5284 info.index = idx;
5285 info.kind = kind;
5286 info.error = error;
5287 info.non_fatal = false;
5288 record_operand_error_info (opcode, &info);
5289 }
5290
5291 static void
5292 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5293 enum aarch64_operand_error_kind kind,
5294 const char* error, const int *extra_data)
5295 {
5296 aarch64_operand_error info;
5297 info.index = idx;
5298 info.kind = kind;
5299 info.error = error;
5300 info.data[0].i = extra_data[0];
5301 info.data[1].i = extra_data[1];
5302 info.data[2].i = extra_data[2];
5303 info.non_fatal = false;
5304 record_operand_error_info (opcode, &info);
5305 }
5306
5307 static void
5308 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5309 const char* error, int lower_bound,
5310 int upper_bound)
5311 {
5312 int data[3] = {lower_bound, upper_bound, 0};
5313 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5314 error, data);
5315 }
5316
5317 /* Remove the operand error record for *OPCODE. */
5318 static void ATTRIBUTE_UNUSED
5319 remove_operand_error_record (const aarch64_opcode *opcode)
5320 {
5321 if (opcode_has_operand_error_p (opcode))
5322 {
5323 operand_error_record* record = operand_error_report.head;
5324 gas_assert (record != NULL && operand_error_report.tail != NULL);
5325 operand_error_report.head = record->next;
5326 record->next = free_opnd_error_record_nodes;
5327 free_opnd_error_record_nodes = record;
5328 if (operand_error_report.head == NULL)
5329 {
5330 gas_assert (operand_error_report.tail == record);
5331 operand_error_report.tail = NULL;
5332 }
5333 }
5334 }
5335
5336 /* Given the instruction in *INSTR, return the index of the best matched
5337 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5338
5339 Return -1 if there is no qualifier sequence; return the first match
5340 if there is multiple matches found. */
5341
5342 static int
5343 find_best_match (const aarch64_inst *instr,
5344 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5345 {
5346 int i, num_opnds, max_num_matched, idx;
5347
5348 num_opnds = aarch64_num_of_operands (instr->opcode);
5349 if (num_opnds == 0)
5350 {
5351 DEBUG_TRACE ("no operand");
5352 return -1;
5353 }
5354
5355 max_num_matched = 0;
5356 idx = 0;
5357
5358 /* For each pattern. */
5359 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5360 {
5361 int j, num_matched;
5362 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5363
5364 /* Most opcodes has much fewer patterns in the list. */
5365 if (empty_qualifier_sequence_p (qualifiers))
5366 {
5367 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5368 break;
5369 }
5370
5371 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5372 if (*qualifiers == instr->operands[j].qualifier)
5373 ++num_matched;
5374
5375 if (num_matched > max_num_matched)
5376 {
5377 max_num_matched = num_matched;
5378 idx = i;
5379 }
5380 }
5381
5382 DEBUG_TRACE ("return with %d", idx);
5383 return idx;
5384 }
5385
5386 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5387 corresponding operands in *INSTR. */
5388
5389 static inline void
5390 assign_qualifier_sequence (aarch64_inst *instr,
5391 const aarch64_opnd_qualifier_t *qualifiers)
5392 {
5393 int i = 0;
5394 int num_opnds = aarch64_num_of_operands (instr->opcode);
5395 gas_assert (num_opnds);
5396 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5397 instr->operands[i].qualifier = *qualifiers;
5398 }
5399
5400 /* Callback used by aarch64_print_operand to apply STYLE to the
5401 disassembler output created from FMT and ARGS. The STYLER object holds
5402 any required state. Must return a pointer to a string (created from FMT
5403 and ARGS) that will continue to be valid until the complete disassembled
5404 instruction has been printed.
5405
5406 We don't currently add any styling to the output of the disassembler as
5407 used within assembler error messages, and so STYLE is ignored here. A
5408 new string is allocated on the obstack help within STYLER and returned
5409 to the caller. */
5410
5411 static const char *aarch64_apply_style
5412 (struct aarch64_styler *styler,
5413 enum disassembler_style style ATTRIBUTE_UNUSED,
5414 const char *fmt, va_list args)
5415 {
5416 int res;
5417 char *ptr;
5418 struct obstack *stack = (struct obstack *) styler->state;
5419 va_list ap;
5420
5421 /* Calculate the required space. */
5422 va_copy (ap, args);
5423 res = vsnprintf (NULL, 0, fmt, ap);
5424 va_end (ap);
5425 gas_assert (res >= 0);
5426
5427 /* Allocate space on the obstack and format the result. */
5428 ptr = (char *) obstack_alloc (stack, res + 1);
5429 res = vsnprintf (ptr, (res + 1), fmt, args);
5430 gas_assert (res >= 0);
5431
5432 return ptr;
5433 }
5434
5435 /* Print operands for the diagnosis purpose. */
5436
5437 static void
5438 print_operands (char *buf, const aarch64_opcode *opcode,
5439 const aarch64_opnd_info *opnds)
5440 {
5441 int i;
5442 struct aarch64_styler styler;
5443 struct obstack content;
5444 obstack_init (&content);
5445
5446 styler.apply_style = aarch64_apply_style;
5447 styler.state = (void *) &content;
5448
5449 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5450 {
5451 char str[128];
5452 char cmt[128];
5453
5454 /* We regard the opcode operand info more, however we also look into
5455 the inst->operands to support the disassembling of the optional
5456 operand.
5457 The two operand code should be the same in all cases, apart from
5458 when the operand can be optional. */
5459 if (opcode->operands[i] == AARCH64_OPND_NIL
5460 || opnds[i].type == AARCH64_OPND_NIL)
5461 break;
5462
5463 /* Generate the operand string in STR. */
5464 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5465 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5466
5467 /* Delimiter. */
5468 if (str[0] != '\0')
5469 strcat (buf, i == 0 ? " " : ", ");
5470
5471 /* Append the operand string. */
5472 strcat (buf, str);
5473
5474 /* Append a comment. This works because only the last operand ever
5475 adds a comment. If that ever changes then we'll need to be
5476 smarter here. */
5477 if (cmt[0] != '\0')
5478 {
5479 strcat (buf, "\t// ");
5480 strcat (buf, cmt);
5481 }
5482 }
5483
5484 obstack_free (&content, NULL);
5485 }
5486
5487 /* Send to stderr a string as information. */
5488
5489 static void
5490 output_info (const char *format, ...)
5491 {
5492 const char *file;
5493 unsigned int line;
5494 va_list args;
5495
5496 file = as_where (&line);
5497 if (file)
5498 {
5499 if (line != 0)
5500 fprintf (stderr, "%s:%u: ", file, line);
5501 else
5502 fprintf (stderr, "%s: ", file);
5503 }
5504 fprintf (stderr, _("Info: "));
5505 va_start (args, format);
5506 vfprintf (stderr, format, args);
5507 va_end (args);
5508 (void) putc ('\n', stderr);
5509 }
5510
5511 /* Output one operand error record. */
5512
5513 static void
5514 output_operand_error_record (const operand_error_record *record, char *str)
5515 {
5516 const aarch64_operand_error *detail = &record->detail;
5517 int idx = detail->index;
5518 const aarch64_opcode *opcode = record->opcode;
5519 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5520 : AARCH64_OPND_NIL);
5521
5522 typedef void (*handler_t)(const char *format, ...);
5523 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5524
5525 switch (detail->kind)
5526 {
5527 case AARCH64_OPDE_NIL:
5528 gas_assert (0);
5529 break;
5530
5531 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5532 handler (_("this `%s' should have an immediately preceding `%s'"
5533 " -- `%s'"),
5534 detail->data[0].s, detail->data[1].s, str);
5535 break;
5536
5537 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5538 handler (_("the preceding `%s' should be followed by `%s` rather"
5539 " than `%s` -- `%s'"),
5540 detail->data[1].s, detail->data[0].s, opcode->name, str);
5541 break;
5542
5543 case AARCH64_OPDE_SYNTAX_ERROR:
5544 case AARCH64_OPDE_RECOVERABLE:
5545 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5546 case AARCH64_OPDE_OTHER_ERROR:
5547 /* Use the prepared error message if there is, otherwise use the
5548 operand description string to describe the error. */
5549 if (detail->error != NULL)
5550 {
5551 if (idx < 0)
5552 handler (_("%s -- `%s'"), detail->error, str);
5553 else
5554 handler (_("%s at operand %d -- `%s'"),
5555 detail->error, idx + 1, str);
5556 }
5557 else
5558 {
5559 gas_assert (idx >= 0);
5560 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5561 aarch64_get_operand_desc (opd_code), str);
5562 }
5563 break;
5564
5565 case AARCH64_OPDE_INVALID_VARIANT:
5566 handler (_("operand mismatch -- `%s'"), str);
5567 if (verbose_error_p)
5568 {
5569 /* We will try to correct the erroneous instruction and also provide
5570 more information e.g. all other valid variants.
5571
5572 The string representation of the corrected instruction and other
5573 valid variants are generated by
5574
5575 1) obtaining the intermediate representation of the erroneous
5576 instruction;
5577 2) manipulating the IR, e.g. replacing the operand qualifier;
5578 3) printing out the instruction by calling the printer functions
5579 shared with the disassembler.
5580
5581 The limitation of this method is that the exact input assembly
5582 line cannot be accurately reproduced in some cases, for example an
5583 optional operand present in the actual assembly line will be
5584 omitted in the output; likewise for the optional syntax rules,
5585 e.g. the # before the immediate. Another limitation is that the
5586 assembly symbols and relocation operations in the assembly line
5587 currently cannot be printed out in the error report. Last but not
5588 least, when there is other error(s) co-exist with this error, the
5589 'corrected' instruction may be still incorrect, e.g. given
5590 'ldnp h0,h1,[x0,#6]!'
5591 this diagnosis will provide the version:
5592 'ldnp s0,s1,[x0,#6]!'
5593 which is still not right. */
5594 size_t len = strlen (get_mnemonic_name (str));
5595 int i, qlf_idx;
5596 bool result;
5597 char buf[2048];
5598 aarch64_inst *inst_base = &inst.base;
5599 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5600
5601 /* Init inst. */
5602 reset_aarch64_instruction (&inst);
5603 inst_base->opcode = opcode;
5604
5605 /* Reset the error report so that there is no side effect on the
5606 following operand parsing. */
5607 init_operand_error_report ();
5608
5609 /* Fill inst. */
5610 result = parse_operands (str + len, opcode)
5611 && programmer_friendly_fixup (&inst);
5612 gas_assert (result);
5613 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5614 NULL, NULL, insn_sequence);
5615 gas_assert (!result);
5616
5617 /* Find the most matched qualifier sequence. */
5618 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5619 gas_assert (qlf_idx > -1);
5620
5621 /* Assign the qualifiers. */
5622 assign_qualifier_sequence (inst_base,
5623 opcode->qualifiers_list[qlf_idx]);
5624
5625 /* Print the hint. */
5626 output_info (_(" did you mean this?"));
5627 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5628 print_operands (buf, opcode, inst_base->operands);
5629 output_info (_(" %s"), buf);
5630
5631 /* Print out other variant(s) if there is any. */
5632 if (qlf_idx != 0 ||
5633 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5634 output_info (_(" other valid variant(s):"));
5635
5636 /* For each pattern. */
5637 qualifiers_list = opcode->qualifiers_list;
5638 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5639 {
5640 /* Most opcodes has much fewer patterns in the list.
5641 First NIL qualifier indicates the end in the list. */
5642 if (empty_qualifier_sequence_p (*qualifiers_list))
5643 break;
5644
5645 if (i != qlf_idx)
5646 {
5647 /* Mnemonics name. */
5648 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5649
5650 /* Assign the qualifiers. */
5651 assign_qualifier_sequence (inst_base, *qualifiers_list);
5652
5653 /* Print instruction. */
5654 print_operands (buf, opcode, inst_base->operands);
5655
5656 output_info (_(" %s"), buf);
5657 }
5658 }
5659 }
5660 break;
5661
5662 case AARCH64_OPDE_UNTIED_IMMS:
5663 handler (_("operand %d must have the same immediate value "
5664 "as operand 1 -- `%s'"),
5665 detail->index + 1, str);
5666 break;
5667
5668 case AARCH64_OPDE_UNTIED_OPERAND:
5669 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5670 detail->index + 1, str);
5671 break;
5672
5673 case AARCH64_OPDE_OUT_OF_RANGE:
5674 if (detail->data[0].i != detail->data[1].i)
5675 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5676 detail->error ? detail->error : _("immediate value"),
5677 detail->data[0].i, detail->data[1].i, idx + 1, str);
5678 else
5679 handler (_("%s must be %d at operand %d -- `%s'"),
5680 detail->error ? detail->error : _("immediate value"),
5681 detail->data[0].i, idx + 1, str);
5682 break;
5683
5684 case AARCH64_OPDE_REG_LIST:
5685 if (detail->data[0].i == 1)
5686 handler (_("invalid number of registers in the list; "
5687 "only 1 register is expected at operand %d -- `%s'"),
5688 idx + 1, str);
5689 else
5690 handler (_("invalid number of registers in the list; "
5691 "%d registers are expected at operand %d -- `%s'"),
5692 detail->data[0].i, idx + 1, str);
5693 break;
5694
5695 case AARCH64_OPDE_UNALIGNED:
5696 handler (_("immediate value must be a multiple of "
5697 "%d at operand %d -- `%s'"),
5698 detail->data[0].i, idx + 1, str);
5699 break;
5700
5701 default:
5702 gas_assert (0);
5703 break;
5704 }
5705 }
5706
5707 /* Process and output the error message about the operand mismatching.
5708
5709 When this function is called, the operand error information had
5710 been collected for an assembly line and there will be multiple
5711 errors in the case of multiple instruction templates; output the
5712 error message that most closely describes the problem.
5713
5714 The errors to be printed can be filtered on printing all errors
5715 or only non-fatal errors. This distinction has to be made because
5716 the error buffer may already be filled with fatal errors we don't want to
5717 print due to the different instruction templates. */
5718
5719 static void
5720 output_operand_error_report (char *str, bool non_fatal_only)
5721 {
5722 int largest_error_pos;
5723 const char *msg = NULL;
5724 enum aarch64_operand_error_kind kind;
5725 operand_error_record *curr;
5726 operand_error_record *head = operand_error_report.head;
5727 operand_error_record *record = NULL;
5728
5729 /* No error to report. */
5730 if (head == NULL)
5731 return;
5732
5733 gas_assert (head != NULL && operand_error_report.tail != NULL);
5734
5735 /* Only one error. */
5736 if (head == operand_error_report.tail)
5737 {
5738 /* If the only error is a non-fatal one and we don't want to print it,
5739 just exit. */
5740 if (!non_fatal_only || head->detail.non_fatal)
5741 {
5742 DEBUG_TRACE ("single opcode entry with error kind: %s",
5743 operand_mismatch_kind_names[head->detail.kind]);
5744 output_operand_error_record (head, str);
5745 }
5746 return;
5747 }
5748
5749 /* Find the error kind of the highest severity. */
5750 DEBUG_TRACE ("multiple opcode entries with error kind");
5751 kind = AARCH64_OPDE_NIL;
5752 for (curr = head; curr != NULL; curr = curr->next)
5753 {
5754 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5755 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5756 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5757 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5758 kind = curr->detail.kind;
5759 }
5760
5761 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5762
5763 /* Pick up one of errors of KIND to report. */
5764 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5765 for (curr = head; curr != NULL; curr = curr->next)
5766 {
5767 /* If we don't want to print non-fatal errors then don't consider them
5768 at all. */
5769 if (curr->detail.kind != kind
5770 || (non_fatal_only && !curr->detail.non_fatal))
5771 continue;
5772 /* If there are multiple errors, pick up the one with the highest
5773 mismatching operand index. In the case of multiple errors with
5774 the equally highest operand index, pick up the first one or the
5775 first one with non-NULL error message. */
5776 if (curr->detail.index > largest_error_pos
5777 || (curr->detail.index == largest_error_pos && msg == NULL
5778 && curr->detail.error != NULL))
5779 {
5780 largest_error_pos = curr->detail.index;
5781 record = curr;
5782 msg = record->detail.error;
5783 }
5784 }
5785
5786 /* The way errors are collected in the back-end is a bit non-intuitive. But
5787 essentially, because each operand template is tried recursively you may
5788 always have errors collected from the previous tried OPND. These are
5789 usually skipped if there is one successful match. However now with the
5790 non-fatal errors we have to ignore those previously collected hard errors
5791 when we're only interested in printing the non-fatal ones. This condition
5792 prevents us from printing errors that are not appropriate, since we did
5793 match a condition, but it also has warnings that it wants to print. */
5794 if (non_fatal_only && !record)
5795 return;
5796
5797 gas_assert (largest_error_pos != -2 && record != NULL);
5798 DEBUG_TRACE ("Pick up error kind %s to report",
5799 operand_mismatch_kind_names[record->detail.kind]);
5800
5801 /* Output. */
5802 output_operand_error_record (record, str);
5803 }
5804 \f
5805 /* Write an AARCH64 instruction to buf - always little-endian. */
5806 static void
5807 put_aarch64_insn (char *buf, uint32_t insn)
5808 {
5809 unsigned char *where = (unsigned char *) buf;
5810 where[0] = insn;
5811 where[1] = insn >> 8;
5812 where[2] = insn >> 16;
5813 where[3] = insn >> 24;
5814 }
5815
5816 static uint32_t
5817 get_aarch64_insn (char *buf)
5818 {
5819 unsigned char *where = (unsigned char *) buf;
5820 uint32_t result;
5821 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5822 | ((uint32_t) where[3] << 24)));
5823 return result;
5824 }
5825
5826 static void
5827 output_inst (struct aarch64_inst *new_inst)
5828 {
5829 char *to = NULL;
5830
5831 to = frag_more (INSN_SIZE);
5832
5833 frag_now->tc_frag_data.recorded = 1;
5834
5835 put_aarch64_insn (to, inst.base.value);
5836
5837 if (inst.reloc.type != BFD_RELOC_UNUSED)
5838 {
5839 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5840 INSN_SIZE, &inst.reloc.exp,
5841 inst.reloc.pc_rel,
5842 inst.reloc.type);
5843 DEBUG_TRACE ("Prepared relocation fix up");
5844 /* Don't check the addend value against the instruction size,
5845 that's the job of our code in md_apply_fix(). */
5846 fixp->fx_no_overflow = 1;
5847 if (new_inst != NULL)
5848 fixp->tc_fix_data.inst = new_inst;
5849 if (aarch64_gas_internal_fixup_p ())
5850 {
5851 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5852 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5853 fixp->fx_addnumber = inst.reloc.flags;
5854 }
5855 }
5856
5857 dwarf2_emit_insn (INSN_SIZE);
5858 }
5859
5860 /* Link together opcodes of the same name. */
5861
5862 struct templates
5863 {
5864 const aarch64_opcode *opcode;
5865 struct templates *next;
5866 };
5867
5868 typedef struct templates templates;
5869
5870 static templates *
5871 lookup_mnemonic (const char *start, int len)
5872 {
5873 templates *templ = NULL;
5874
5875 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5876 return templ;
5877 }
5878
5879 /* Subroutine of md_assemble, responsible for looking up the primary
5880 opcode from the mnemonic the user wrote. BASE points to the beginning
5881 of the mnemonic, DOT points to the first '.' within the mnemonic
5882 (if any) and END points to the end of the mnemonic. */
5883
5884 static templates *
5885 opcode_lookup (char *base, char *dot, char *end)
5886 {
5887 const aarch64_cond *cond;
5888 char condname[16];
5889 int len;
5890
5891 if (dot == end)
5892 return 0;
5893
5894 inst.cond = COND_ALWAYS;
5895
5896 /* Handle a possible condition. */
5897 if (dot)
5898 {
5899 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5900 if (!cond)
5901 return 0;
5902 inst.cond = cond->value;
5903 len = dot - base;
5904 }
5905 else
5906 len = end - base;
5907
5908 if (inst.cond == COND_ALWAYS)
5909 {
5910 /* Look for unaffixed mnemonic. */
5911 return lookup_mnemonic (base, len);
5912 }
5913 else if (len <= 13)
5914 {
5915 /* append ".c" to mnemonic if conditional */
5916 memcpy (condname, base, len);
5917 memcpy (condname + len, ".c", 2);
5918 base = condname;
5919 len += 2;
5920 return lookup_mnemonic (base, len);
5921 }
5922
5923 return NULL;
5924 }
5925
5926 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5927 to a corresponding operand qualifier. */
5928
5929 static inline aarch64_opnd_qualifier_t
5930 vectype_to_qualifier (const struct vector_type_el *vectype)
5931 {
5932 /* Element size in bytes indexed by vector_el_type. */
5933 const unsigned char ele_size[5]
5934 = {1, 2, 4, 8, 16};
5935 const unsigned int ele_base [5] =
5936 {
5937 AARCH64_OPND_QLF_V_4B,
5938 AARCH64_OPND_QLF_V_2H,
5939 AARCH64_OPND_QLF_V_2S,
5940 AARCH64_OPND_QLF_V_1D,
5941 AARCH64_OPND_QLF_V_1Q
5942 };
5943
5944 if (!vectype->defined || vectype->type == NT_invtype)
5945 goto vectype_conversion_fail;
5946
5947 if (vectype->type == NT_zero)
5948 return AARCH64_OPND_QLF_P_Z;
5949 if (vectype->type == NT_merge)
5950 return AARCH64_OPND_QLF_P_M;
5951
5952 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5953
5954 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5955 {
5956 /* Special case S_4B. */
5957 if (vectype->type == NT_b && vectype->width == 4)
5958 return AARCH64_OPND_QLF_S_4B;
5959
5960 /* Special case S_2H. */
5961 if (vectype->type == NT_h && vectype->width == 2)
5962 return AARCH64_OPND_QLF_S_2H;
5963
5964 /* Vector element register. */
5965 return AARCH64_OPND_QLF_S_B + vectype->type;
5966 }
5967 else
5968 {
5969 /* Vector register. */
5970 int reg_size = ele_size[vectype->type] * vectype->width;
5971 unsigned offset;
5972 unsigned shift;
5973 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5974 goto vectype_conversion_fail;
5975
5976 /* The conversion is by calculating the offset from the base operand
5977 qualifier for the vector type. The operand qualifiers are regular
5978 enough that the offset can established by shifting the vector width by
5979 a vector-type dependent amount. */
5980 shift = 0;
5981 if (vectype->type == NT_b)
5982 shift = 3;
5983 else if (vectype->type == NT_h || vectype->type == NT_s)
5984 shift = 2;
5985 else if (vectype->type >= NT_d)
5986 shift = 1;
5987 else
5988 gas_assert (0);
5989
5990 offset = ele_base [vectype->type] + (vectype->width >> shift);
5991 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5992 && offset <= AARCH64_OPND_QLF_V_1Q);
5993 return offset;
5994 }
5995
5996 vectype_conversion_fail:
5997 first_error (_("bad vector arrangement type"));
5998 return AARCH64_OPND_QLF_NIL;
5999 }
6000
6001 /* Process an optional operand that is found omitted from the assembly line.
6002 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
6003 instruction's opcode entry while IDX is the index of this omitted operand.
6004 */
6005
6006 static void
6007 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
6008 int idx, aarch64_opnd_info *operand)
6009 {
6010 aarch64_insn default_value = get_optional_operand_default_value (opcode);
6011 gas_assert (optional_operand_p (opcode, idx));
6012 gas_assert (!operand->present);
6013
6014 switch (type)
6015 {
6016 case AARCH64_OPND_Rd:
6017 case AARCH64_OPND_Rn:
6018 case AARCH64_OPND_Rm:
6019 case AARCH64_OPND_Rt:
6020 case AARCH64_OPND_Rt2:
6021 case AARCH64_OPND_Rt_LS64:
6022 case AARCH64_OPND_Rt_SP:
6023 case AARCH64_OPND_Rs:
6024 case AARCH64_OPND_Ra:
6025 case AARCH64_OPND_Rt_SYS:
6026 case AARCH64_OPND_Rd_SP:
6027 case AARCH64_OPND_Rn_SP:
6028 case AARCH64_OPND_Rm_SP:
6029 case AARCH64_OPND_Fd:
6030 case AARCH64_OPND_Fn:
6031 case AARCH64_OPND_Fm:
6032 case AARCH64_OPND_Fa:
6033 case AARCH64_OPND_Ft:
6034 case AARCH64_OPND_Ft2:
6035 case AARCH64_OPND_Sd:
6036 case AARCH64_OPND_Sn:
6037 case AARCH64_OPND_Sm:
6038 case AARCH64_OPND_Va:
6039 case AARCH64_OPND_Vd:
6040 case AARCH64_OPND_Vn:
6041 case AARCH64_OPND_Vm:
6042 case AARCH64_OPND_VdD1:
6043 case AARCH64_OPND_VnD1:
6044 operand->reg.regno = default_value;
6045 break;
6046
6047 case AARCH64_OPND_Ed:
6048 case AARCH64_OPND_En:
6049 case AARCH64_OPND_Em:
6050 case AARCH64_OPND_Em16:
6051 case AARCH64_OPND_SM3_IMM2:
6052 operand->reglane.regno = default_value;
6053 break;
6054
6055 case AARCH64_OPND_IDX:
6056 case AARCH64_OPND_BIT_NUM:
6057 case AARCH64_OPND_IMMR:
6058 case AARCH64_OPND_IMMS:
6059 case AARCH64_OPND_SHLL_IMM:
6060 case AARCH64_OPND_IMM_VLSL:
6061 case AARCH64_OPND_IMM_VLSR:
6062 case AARCH64_OPND_CCMP_IMM:
6063 case AARCH64_OPND_FBITS:
6064 case AARCH64_OPND_UIMM4:
6065 case AARCH64_OPND_UIMM3_OP1:
6066 case AARCH64_OPND_UIMM3_OP2:
6067 case AARCH64_OPND_IMM:
6068 case AARCH64_OPND_IMM_2:
6069 case AARCH64_OPND_WIDTH:
6070 case AARCH64_OPND_UIMM7:
6071 case AARCH64_OPND_NZCV:
6072 case AARCH64_OPND_SVE_PATTERN:
6073 case AARCH64_OPND_SVE_PRFOP:
6074 operand->imm.value = default_value;
6075 break;
6076
6077 case AARCH64_OPND_SVE_PATTERN_SCALED:
6078 operand->imm.value = default_value;
6079 operand->shifter.kind = AARCH64_MOD_MUL;
6080 operand->shifter.amount = 1;
6081 break;
6082
6083 case AARCH64_OPND_EXCEPTION:
6084 inst.reloc.type = BFD_RELOC_UNUSED;
6085 break;
6086
6087 case AARCH64_OPND_BARRIER_ISB:
6088 operand->barrier = aarch64_barrier_options + default_value;
6089 break;
6090
6091 case AARCH64_OPND_BTI_TARGET:
6092 operand->hint_option = aarch64_hint_options + default_value;
6093 break;
6094
6095 default:
6096 break;
6097 }
6098 }
6099
6100 /* Process the relocation type for move wide instructions.
6101 Return TRUE on success; otherwise return FALSE. */
6102
6103 static bool
6104 process_movw_reloc_info (void)
6105 {
6106 int is32;
6107 unsigned shift;
6108
6109 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6110
6111 if (inst.base.opcode->op == OP_MOVK)
6112 switch (inst.reloc.type)
6113 {
6114 case BFD_RELOC_AARCH64_MOVW_G0_S:
6115 case BFD_RELOC_AARCH64_MOVW_G1_S:
6116 case BFD_RELOC_AARCH64_MOVW_G2_S:
6117 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6118 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6119 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6120 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6121 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6122 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6123 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6124 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6125 set_syntax_error
6126 (_("the specified relocation type is not allowed for MOVK"));
6127 return false;
6128 default:
6129 break;
6130 }
6131
6132 switch (inst.reloc.type)
6133 {
6134 case BFD_RELOC_AARCH64_MOVW_G0:
6135 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6136 case BFD_RELOC_AARCH64_MOVW_G0_S:
6137 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6138 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6139 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6140 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6141 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6142 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6143 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6144 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6145 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6146 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6147 shift = 0;
6148 break;
6149 case BFD_RELOC_AARCH64_MOVW_G1:
6150 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6151 case BFD_RELOC_AARCH64_MOVW_G1_S:
6152 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6153 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6154 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6155 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6156 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6157 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6158 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6159 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6160 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6161 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6162 shift = 16;
6163 break;
6164 case BFD_RELOC_AARCH64_MOVW_G2:
6165 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6166 case BFD_RELOC_AARCH64_MOVW_G2_S:
6167 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6168 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6169 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6170 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6171 if (is32)
6172 {
6173 set_fatal_syntax_error
6174 (_("the specified relocation type is not allowed for 32-bit "
6175 "register"));
6176 return false;
6177 }
6178 shift = 32;
6179 break;
6180 case BFD_RELOC_AARCH64_MOVW_G3:
6181 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6182 if (is32)
6183 {
6184 set_fatal_syntax_error
6185 (_("the specified relocation type is not allowed for 32-bit "
6186 "register"));
6187 return false;
6188 }
6189 shift = 48;
6190 break;
6191 default:
6192 /* More cases should be added when more MOVW-related relocation types
6193 are supported in GAS. */
6194 gas_assert (aarch64_gas_internal_fixup_p ());
6195 /* The shift amount should have already been set by the parser. */
6196 return true;
6197 }
6198 inst.base.operands[1].shifter.amount = shift;
6199 return true;
6200 }
6201
6202 /* A primitive log calculator. */
6203
6204 static inline unsigned int
6205 get_logsz (unsigned int size)
6206 {
6207 const unsigned char ls[16] =
6208 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6209 if (size > 16)
6210 {
6211 gas_assert (0);
6212 return -1;
6213 }
6214 gas_assert (ls[size - 1] != (unsigned char)-1);
6215 return ls[size - 1];
6216 }
6217
6218 /* Determine and return the real reloc type code for an instruction
6219 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6220
6221 static inline bfd_reloc_code_real_type
6222 ldst_lo12_determine_real_reloc_type (void)
6223 {
6224 unsigned logsz, max_logsz;
6225 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6226 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6227
6228 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6229 {
6230 BFD_RELOC_AARCH64_LDST8_LO12,
6231 BFD_RELOC_AARCH64_LDST16_LO12,
6232 BFD_RELOC_AARCH64_LDST32_LO12,
6233 BFD_RELOC_AARCH64_LDST64_LO12,
6234 BFD_RELOC_AARCH64_LDST128_LO12
6235 },
6236 {
6237 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6238 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6239 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6240 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6241 BFD_RELOC_AARCH64_NONE
6242 },
6243 {
6244 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6245 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6246 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6247 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6248 BFD_RELOC_AARCH64_NONE
6249 },
6250 {
6251 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6252 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6253 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6254 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6255 BFD_RELOC_AARCH64_NONE
6256 },
6257 {
6258 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6259 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6260 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6261 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6262 BFD_RELOC_AARCH64_NONE
6263 }
6264 };
6265
6266 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6267 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6268 || (inst.reloc.type
6269 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6270 || (inst.reloc.type
6271 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6272 || (inst.reloc.type
6273 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6274 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6275
6276 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6277 opd1_qlf =
6278 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6279 1, opd0_qlf, 0);
6280 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6281
6282 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6283
6284 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6285 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6286 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6287 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6288 max_logsz = 3;
6289 else
6290 max_logsz = 4;
6291
6292 if (logsz > max_logsz)
6293 {
6294 /* SEE PR 27904 for an example of this. */
6295 set_fatal_syntax_error
6296 (_("relocation qualifier does not match instruction size"));
6297 return BFD_RELOC_AARCH64_NONE;
6298 }
6299
6300 /* In reloc.c, these pseudo relocation types should be defined in similar
6301 order as above reloc_ldst_lo12 array. Because the array index calculation
6302 below relies on this. */
6303 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6304 }
6305
6306 /* Check whether a register list REGINFO is valid. The registers must be
6307 numbered in increasing order (modulo 32), in increments of one or two.
6308
6309 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6310 increments of two.
6311
6312 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6313
6314 static bool
6315 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6316 {
6317 uint32_t i, nb_regs, prev_regno, incr;
6318
6319 nb_regs = 1 + (reginfo & 0x3);
6320 reginfo >>= 2;
6321 prev_regno = reginfo & 0x1f;
6322 incr = accept_alternate ? 2 : 1;
6323
6324 for (i = 1; i < nb_regs; ++i)
6325 {
6326 uint32_t curr_regno;
6327 reginfo >>= 5;
6328 curr_regno = reginfo & 0x1f;
6329 if (curr_regno != ((prev_regno + incr) & 0x1f))
6330 return false;
6331 prev_regno = curr_regno;
6332 }
6333
6334 return true;
6335 }
6336
6337 /* Generic instruction operand parser. This does no encoding and no
6338 semantic validation; it merely squirrels values away in the inst
6339 structure. Returns TRUE or FALSE depending on whether the
6340 specified grammar matched. */
6341
6342 static bool
6343 parse_operands (char *str, const aarch64_opcode *opcode)
6344 {
6345 int i;
6346 char *backtrack_pos = 0;
6347 const enum aarch64_opnd *operands = opcode->operands;
6348 aarch64_reg_type imm_reg_type;
6349
6350 clear_error ();
6351 skip_whitespace (str);
6352
6353 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
6354 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6355 else
6356 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6357
6358 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6359 {
6360 int64_t val;
6361 const reg_entry *reg;
6362 int comma_skipped_p = 0;
6363 aarch64_reg_type rtype;
6364 struct vector_type_el vectype;
6365 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6366 aarch64_opnd_info *info = &inst.base.operands[i];
6367 aarch64_reg_type reg_type;
6368
6369 DEBUG_TRACE ("parse operand %d", i);
6370
6371 /* Assign the operand code. */
6372 info->type = operands[i];
6373
6374 if (optional_operand_p (opcode, i))
6375 {
6376 /* Remember where we are in case we need to backtrack. */
6377 gas_assert (!backtrack_pos);
6378 backtrack_pos = str;
6379 }
6380
6381 /* Expect comma between operands; the backtrack mechanism will take
6382 care of cases of omitted optional operand. */
6383 if (i > 0 && ! skip_past_char (&str, ','))
6384 {
6385 set_syntax_error (_("comma expected between operands"));
6386 goto failure;
6387 }
6388 else
6389 comma_skipped_p = 1;
6390
6391 switch (operands[i])
6392 {
6393 case AARCH64_OPND_Rd:
6394 case AARCH64_OPND_Rn:
6395 case AARCH64_OPND_Rm:
6396 case AARCH64_OPND_Rt:
6397 case AARCH64_OPND_Rt2:
6398 case AARCH64_OPND_Rs:
6399 case AARCH64_OPND_Ra:
6400 case AARCH64_OPND_Rt_LS64:
6401 case AARCH64_OPND_Rt_SYS:
6402 case AARCH64_OPND_PAIRREG:
6403 case AARCH64_OPND_SVE_Rm:
6404 po_int_reg_or_fail (REG_TYPE_R_Z);
6405
6406 /* In LS64 load/store instructions Rt register number must be even
6407 and <=22. */
6408 if (operands[i] == AARCH64_OPND_Rt_LS64)
6409 {
6410 /* We've already checked if this is valid register.
6411 This will check if register number (Rt) is not undefined for LS64
6412 instructions:
6413 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6414 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6415 {
6416 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6417 goto failure;
6418 }
6419 }
6420 break;
6421
6422 case AARCH64_OPND_Rd_SP:
6423 case AARCH64_OPND_Rn_SP:
6424 case AARCH64_OPND_Rt_SP:
6425 case AARCH64_OPND_SVE_Rn_SP:
6426 case AARCH64_OPND_Rm_SP:
6427 po_int_reg_or_fail (REG_TYPE_R_SP);
6428 break;
6429
6430 case AARCH64_OPND_Rm_EXT:
6431 case AARCH64_OPND_Rm_SFT:
6432 po_misc_or_fail (parse_shifter_operand
6433 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6434 ? SHIFTED_ARITH_IMM
6435 : SHIFTED_LOGIC_IMM)));
6436 if (!info->shifter.operator_present)
6437 {
6438 /* Default to LSL if not present. Libopcodes prefers shifter
6439 kind to be explicit. */
6440 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6441 info->shifter.kind = AARCH64_MOD_LSL;
6442 /* For Rm_EXT, libopcodes will carry out further check on whether
6443 or not stack pointer is used in the instruction (Recall that
6444 "the extend operator is not optional unless at least one of
6445 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6446 }
6447 break;
6448
6449 case AARCH64_OPND_Fd:
6450 case AARCH64_OPND_Fn:
6451 case AARCH64_OPND_Fm:
6452 case AARCH64_OPND_Fa:
6453 case AARCH64_OPND_Ft:
6454 case AARCH64_OPND_Ft2:
6455 case AARCH64_OPND_Sd:
6456 case AARCH64_OPND_Sn:
6457 case AARCH64_OPND_Sm:
6458 case AARCH64_OPND_SVE_VZn:
6459 case AARCH64_OPND_SVE_Vd:
6460 case AARCH64_OPND_SVE_Vm:
6461 case AARCH64_OPND_SVE_Vn:
6462 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
6463 if (val == PARSE_FAIL)
6464 {
6465 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6466 goto failure;
6467 }
6468 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
6469
6470 info->reg.regno = val;
6471 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
6472 break;
6473
6474 case AARCH64_OPND_SVE_Pd:
6475 case AARCH64_OPND_SVE_Pg3:
6476 case AARCH64_OPND_SVE_Pg4_5:
6477 case AARCH64_OPND_SVE_Pg4_10:
6478 case AARCH64_OPND_SVE_Pg4_16:
6479 case AARCH64_OPND_SVE_Pm:
6480 case AARCH64_OPND_SVE_Pn:
6481 case AARCH64_OPND_SVE_Pt:
6482 case AARCH64_OPND_SME_Pm:
6483 reg_type = REG_TYPE_PN;
6484 goto vector_reg;
6485
6486 case AARCH64_OPND_SVE_Za_5:
6487 case AARCH64_OPND_SVE_Za_16:
6488 case AARCH64_OPND_SVE_Zd:
6489 case AARCH64_OPND_SVE_Zm_5:
6490 case AARCH64_OPND_SVE_Zm_16:
6491 case AARCH64_OPND_SVE_Zn:
6492 case AARCH64_OPND_SVE_Zt:
6493 reg_type = REG_TYPE_ZN;
6494 goto vector_reg;
6495
6496 case AARCH64_OPND_Va:
6497 case AARCH64_OPND_Vd:
6498 case AARCH64_OPND_Vn:
6499 case AARCH64_OPND_Vm:
6500 reg_type = REG_TYPE_VN;
6501 vector_reg:
6502 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6503 if (val == PARSE_FAIL)
6504 {
6505 first_error (_(get_reg_expected_msg (reg_type)));
6506 goto failure;
6507 }
6508 if (vectype.defined & NTA_HASINDEX)
6509 goto failure;
6510
6511 info->reg.regno = val;
6512 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6513 && vectype.type == NT_invtype)
6514 /* Unqualified Pn and Zn registers are allowed in certain
6515 contexts. Rely on F_STRICT qualifier checking to catch
6516 invalid uses. */
6517 info->qualifier = AARCH64_OPND_QLF_NIL;
6518 else
6519 {
6520 info->qualifier = vectype_to_qualifier (&vectype);
6521 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6522 goto failure;
6523 }
6524 break;
6525
6526 case AARCH64_OPND_VdD1:
6527 case AARCH64_OPND_VnD1:
6528 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
6529 if (val == PARSE_FAIL)
6530 {
6531 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6532 goto failure;
6533 }
6534 if (vectype.type != NT_d || vectype.index != 1)
6535 {
6536 set_fatal_syntax_error
6537 (_("the top half of a 128-bit FP/SIMD register is expected"));
6538 goto failure;
6539 }
6540 info->reg.regno = val;
6541 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6542 here; it is correct for the purpose of encoding/decoding since
6543 only the register number is explicitly encoded in the related
6544 instructions, although this appears a bit hacky. */
6545 info->qualifier = AARCH64_OPND_QLF_S_D;
6546 break;
6547
6548 case AARCH64_OPND_SVE_Zm3_INDEX:
6549 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6550 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6551 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6552 case AARCH64_OPND_SVE_Zm4_INDEX:
6553 case AARCH64_OPND_SVE_Zn_INDEX:
6554 reg_type = REG_TYPE_ZN;
6555 goto vector_reg_index;
6556
6557 case AARCH64_OPND_Ed:
6558 case AARCH64_OPND_En:
6559 case AARCH64_OPND_Em:
6560 case AARCH64_OPND_Em16:
6561 case AARCH64_OPND_SM3_IMM2:
6562 reg_type = REG_TYPE_VN;
6563 vector_reg_index:
6564 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6565 if (val == PARSE_FAIL)
6566 {
6567 first_error (_(get_reg_expected_msg (reg_type)));
6568 goto failure;
6569 }
6570 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6571 goto failure;
6572
6573 info->reglane.regno = val;
6574 info->reglane.index = vectype.index;
6575 info->qualifier = vectype_to_qualifier (&vectype);
6576 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6577 goto failure;
6578 break;
6579
6580 case AARCH64_OPND_SVE_ZnxN:
6581 case AARCH64_OPND_SVE_ZtxN:
6582 reg_type = REG_TYPE_ZN;
6583 goto vector_reg_list;
6584
6585 case AARCH64_OPND_LVn:
6586 case AARCH64_OPND_LVt:
6587 case AARCH64_OPND_LVt_AL:
6588 case AARCH64_OPND_LEt:
6589 reg_type = REG_TYPE_VN;
6590 vector_reg_list:
6591 if (reg_type == REG_TYPE_ZN
6592 && get_opcode_dependent_value (opcode) == 1
6593 && *str != '{')
6594 {
6595 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
6596 if (val == PARSE_FAIL)
6597 {
6598 first_error (_(get_reg_expected_msg (reg_type)));
6599 goto failure;
6600 }
6601 info->reglist.first_regno = val;
6602 info->reglist.num_regs = 1;
6603 }
6604 else
6605 {
6606 val = parse_vector_reg_list (&str, reg_type, &vectype);
6607 if (val == PARSE_FAIL)
6608 goto failure;
6609
6610 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6611 {
6612 set_fatal_syntax_error (_("invalid register list"));
6613 goto failure;
6614 }
6615
6616 if (vectype.width != 0 && *str != ',')
6617 {
6618 set_fatal_syntax_error
6619 (_("expected element type rather than vector type"));
6620 goto failure;
6621 }
6622
6623 info->reglist.first_regno = (val >> 2) & 0x1f;
6624 info->reglist.num_regs = (val & 0x3) + 1;
6625 }
6626 if (operands[i] == AARCH64_OPND_LEt)
6627 {
6628 if (!(vectype.defined & NTA_HASINDEX))
6629 goto failure;
6630 info->reglist.has_index = 1;
6631 info->reglist.index = vectype.index;
6632 }
6633 else
6634 {
6635 if (vectype.defined & NTA_HASINDEX)
6636 goto failure;
6637 if (!(vectype.defined & NTA_HASTYPE))
6638 {
6639 if (reg_type == REG_TYPE_ZN)
6640 set_fatal_syntax_error (_("missing type suffix"));
6641 goto failure;
6642 }
6643 }
6644 info->qualifier = vectype_to_qualifier (&vectype);
6645 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6646 goto failure;
6647 break;
6648
6649 case AARCH64_OPND_CRn:
6650 case AARCH64_OPND_CRm:
6651 {
6652 char prefix = *(str++);
6653 if (prefix != 'c' && prefix != 'C')
6654 goto failure;
6655
6656 po_imm_nc_or_fail ();
6657 if (val > 15)
6658 {
6659 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6660 goto failure;
6661 }
6662 info->qualifier = AARCH64_OPND_QLF_CR;
6663 info->imm.value = val;
6664 break;
6665 }
6666
6667 case AARCH64_OPND_SHLL_IMM:
6668 case AARCH64_OPND_IMM_VLSR:
6669 po_imm_or_fail (1, 64);
6670 info->imm.value = val;
6671 break;
6672
6673 case AARCH64_OPND_CCMP_IMM:
6674 case AARCH64_OPND_SIMM5:
6675 case AARCH64_OPND_FBITS:
6676 case AARCH64_OPND_TME_UIMM16:
6677 case AARCH64_OPND_UIMM4:
6678 case AARCH64_OPND_UIMM4_ADDG:
6679 case AARCH64_OPND_UIMM10:
6680 case AARCH64_OPND_UIMM3_OP1:
6681 case AARCH64_OPND_UIMM3_OP2:
6682 case AARCH64_OPND_IMM_VLSL:
6683 case AARCH64_OPND_IMM:
6684 case AARCH64_OPND_IMM_2:
6685 case AARCH64_OPND_WIDTH:
6686 case AARCH64_OPND_SVE_INV_LIMM:
6687 case AARCH64_OPND_SVE_LIMM:
6688 case AARCH64_OPND_SVE_LIMM_MOV:
6689 case AARCH64_OPND_SVE_SHLIMM_PRED:
6690 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6691 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6692 case AARCH64_OPND_SVE_SHRIMM_PRED:
6693 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6694 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6695 case AARCH64_OPND_SVE_SIMM5:
6696 case AARCH64_OPND_SVE_SIMM5B:
6697 case AARCH64_OPND_SVE_SIMM6:
6698 case AARCH64_OPND_SVE_SIMM8:
6699 case AARCH64_OPND_SVE_UIMM3:
6700 case AARCH64_OPND_SVE_UIMM7:
6701 case AARCH64_OPND_SVE_UIMM8:
6702 case AARCH64_OPND_SVE_UIMM8_53:
6703 case AARCH64_OPND_IMM_ROT1:
6704 case AARCH64_OPND_IMM_ROT2:
6705 case AARCH64_OPND_IMM_ROT3:
6706 case AARCH64_OPND_SVE_IMM_ROT1:
6707 case AARCH64_OPND_SVE_IMM_ROT2:
6708 case AARCH64_OPND_SVE_IMM_ROT3:
6709 case AARCH64_OPND_CSSC_SIMM8:
6710 case AARCH64_OPND_CSSC_UIMM8:
6711 po_imm_nc_or_fail ();
6712 info->imm.value = val;
6713 break;
6714
6715 case AARCH64_OPND_SVE_AIMM:
6716 case AARCH64_OPND_SVE_ASIMM:
6717 po_imm_nc_or_fail ();
6718 info->imm.value = val;
6719 skip_whitespace (str);
6720 if (skip_past_comma (&str))
6721 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6722 else
6723 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6724 break;
6725
6726 case AARCH64_OPND_SVE_PATTERN:
6727 po_enum_or_fail (aarch64_sve_pattern_array);
6728 info->imm.value = val;
6729 break;
6730
6731 case AARCH64_OPND_SVE_PATTERN_SCALED:
6732 po_enum_or_fail (aarch64_sve_pattern_array);
6733 info->imm.value = val;
6734 if (skip_past_comma (&str)
6735 && !parse_shift (&str, info, SHIFTED_MUL))
6736 goto failure;
6737 if (!info->shifter.operator_present)
6738 {
6739 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6740 info->shifter.kind = AARCH64_MOD_MUL;
6741 info->shifter.amount = 1;
6742 }
6743 break;
6744
6745 case AARCH64_OPND_SVE_PRFOP:
6746 po_enum_or_fail (aarch64_sve_prfop_array);
6747 info->imm.value = val;
6748 break;
6749
6750 case AARCH64_OPND_UIMM7:
6751 po_imm_or_fail (0, 127);
6752 info->imm.value = val;
6753 break;
6754
6755 case AARCH64_OPND_IDX:
6756 case AARCH64_OPND_MASK:
6757 case AARCH64_OPND_BIT_NUM:
6758 case AARCH64_OPND_IMMR:
6759 case AARCH64_OPND_IMMS:
6760 po_imm_or_fail (0, 63);
6761 info->imm.value = val;
6762 break;
6763
6764 case AARCH64_OPND_IMM0:
6765 po_imm_nc_or_fail ();
6766 if (val != 0)
6767 {
6768 set_fatal_syntax_error (_("immediate zero expected"));
6769 goto failure;
6770 }
6771 info->imm.value = 0;
6772 break;
6773
6774 case AARCH64_OPND_FPIMM0:
6775 {
6776 int qfloat;
6777 bool res1 = false, res2 = false;
6778 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6779 it is probably not worth the effort to support it. */
6780 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6781 imm_reg_type))
6782 && (error_p ()
6783 || !(res2 = parse_constant_immediate (&str, &val,
6784 imm_reg_type))))
6785 goto failure;
6786 if ((res1 && qfloat == 0) || (res2 && val == 0))
6787 {
6788 info->imm.value = 0;
6789 info->imm.is_fp = 1;
6790 break;
6791 }
6792 set_fatal_syntax_error (_("immediate zero expected"));
6793 goto failure;
6794 }
6795
6796 case AARCH64_OPND_IMM_MOV:
6797 {
6798 char *saved = str;
6799 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6800 reg_name_p (str, REG_TYPE_VN))
6801 goto failure;
6802 str = saved;
6803 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6804 GE_OPT_PREFIX, REJECT_ABSENT));
6805 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6806 later. fix_mov_imm_insn will try to determine a machine
6807 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6808 message if the immediate cannot be moved by a single
6809 instruction. */
6810 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6811 inst.base.operands[i].skip = 1;
6812 }
6813 break;
6814
6815 case AARCH64_OPND_SIMD_IMM:
6816 case AARCH64_OPND_SIMD_IMM_SFT:
6817 if (! parse_big_immediate (&str, &val, imm_reg_type))
6818 goto failure;
6819 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6820 /* addr_off_p */ 0,
6821 /* need_libopcodes_p */ 1,
6822 /* skip_p */ 1);
6823 /* Parse shift.
6824 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6825 shift, we don't check it here; we leave the checking to
6826 the libopcodes (operand_general_constraint_met_p). By
6827 doing this, we achieve better diagnostics. */
6828 if (skip_past_comma (&str)
6829 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6830 goto failure;
6831 if (!info->shifter.operator_present
6832 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6833 {
6834 /* Default to LSL if not present. Libopcodes prefers shifter
6835 kind to be explicit. */
6836 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6837 info->shifter.kind = AARCH64_MOD_LSL;
6838 }
6839 break;
6840
6841 case AARCH64_OPND_FPIMM:
6842 case AARCH64_OPND_SIMD_FPIMM:
6843 case AARCH64_OPND_SVE_FPIMM8:
6844 {
6845 int qfloat;
6846 bool dp_p;
6847
6848 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6849 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6850 || !aarch64_imm_float_p (qfloat))
6851 {
6852 if (!error_p ())
6853 set_fatal_syntax_error (_("invalid floating-point"
6854 " constant"));
6855 goto failure;
6856 }
6857 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6858 inst.base.operands[i].imm.is_fp = 1;
6859 }
6860 break;
6861
6862 case AARCH64_OPND_SVE_I1_HALF_ONE:
6863 case AARCH64_OPND_SVE_I1_HALF_TWO:
6864 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6865 {
6866 int qfloat;
6867 bool dp_p;
6868
6869 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6870 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6871 {
6872 if (!error_p ())
6873 set_fatal_syntax_error (_("invalid floating-point"
6874 " constant"));
6875 goto failure;
6876 }
6877 inst.base.operands[i].imm.value = qfloat;
6878 inst.base.operands[i].imm.is_fp = 1;
6879 }
6880 break;
6881
6882 case AARCH64_OPND_LIMM:
6883 po_misc_or_fail (parse_shifter_operand (&str, info,
6884 SHIFTED_LOGIC_IMM));
6885 if (info->shifter.operator_present)
6886 {
6887 set_fatal_syntax_error
6888 (_("shift not allowed for bitmask immediate"));
6889 goto failure;
6890 }
6891 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6892 /* addr_off_p */ 0,
6893 /* need_libopcodes_p */ 1,
6894 /* skip_p */ 1);
6895 break;
6896
6897 case AARCH64_OPND_AIMM:
6898 if (opcode->op == OP_ADD)
6899 /* ADD may have relocation types. */
6900 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6901 SHIFTED_ARITH_IMM));
6902 else
6903 po_misc_or_fail (parse_shifter_operand (&str, info,
6904 SHIFTED_ARITH_IMM));
6905 switch (inst.reloc.type)
6906 {
6907 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6908 info->shifter.amount = 12;
6909 break;
6910 case BFD_RELOC_UNUSED:
6911 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6912 if (info->shifter.kind != AARCH64_MOD_NONE)
6913 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6914 inst.reloc.pc_rel = 0;
6915 break;
6916 default:
6917 break;
6918 }
6919 info->imm.value = 0;
6920 if (!info->shifter.operator_present)
6921 {
6922 /* Default to LSL if not present. Libopcodes prefers shifter
6923 kind to be explicit. */
6924 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6925 info->shifter.kind = AARCH64_MOD_LSL;
6926 }
6927 break;
6928
6929 case AARCH64_OPND_HALF:
6930 {
6931 /* #<imm16> or relocation. */
6932 int internal_fixup_p;
6933 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6934 if (internal_fixup_p)
6935 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6936 skip_whitespace (str);
6937 if (skip_past_comma (&str))
6938 {
6939 /* {, LSL #<shift>} */
6940 if (! aarch64_gas_internal_fixup_p ())
6941 {
6942 set_fatal_syntax_error (_("can't mix relocation modifier "
6943 "with explicit shift"));
6944 goto failure;
6945 }
6946 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6947 }
6948 else
6949 inst.base.operands[i].shifter.amount = 0;
6950 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6951 inst.base.operands[i].imm.value = 0;
6952 if (! process_movw_reloc_info ())
6953 goto failure;
6954 }
6955 break;
6956
6957 case AARCH64_OPND_EXCEPTION:
6958 case AARCH64_OPND_UNDEFINED:
6959 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6960 imm_reg_type));
6961 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6962 /* addr_off_p */ 0,
6963 /* need_libopcodes_p */ 0,
6964 /* skip_p */ 1);
6965 break;
6966
6967 case AARCH64_OPND_NZCV:
6968 {
6969 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6970 if (nzcv != NULL)
6971 {
6972 str += 4;
6973 info->imm.value = nzcv->value;
6974 break;
6975 }
6976 po_imm_or_fail (0, 15);
6977 info->imm.value = val;
6978 }
6979 break;
6980
6981 case AARCH64_OPND_COND:
6982 case AARCH64_OPND_COND1:
6983 {
6984 char *start = str;
6985 do
6986 str++;
6987 while (ISALPHA (*str));
6988 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6989 if (info->cond == NULL)
6990 {
6991 set_syntax_error (_("invalid condition"));
6992 goto failure;
6993 }
6994 else if (operands[i] == AARCH64_OPND_COND1
6995 && (info->cond->value & 0xe) == 0xe)
6996 {
6997 /* Do not allow AL or NV. */
6998 set_default_error ();
6999 goto failure;
7000 }
7001 }
7002 break;
7003
7004 case AARCH64_OPND_ADDR_ADRP:
7005 po_misc_or_fail (parse_adrp (&str));
7006 /* Clear the value as operand needs to be relocated. */
7007 info->imm.value = 0;
7008 break;
7009
7010 case AARCH64_OPND_ADDR_PCREL14:
7011 case AARCH64_OPND_ADDR_PCREL19:
7012 case AARCH64_OPND_ADDR_PCREL21:
7013 case AARCH64_OPND_ADDR_PCREL26:
7014 po_misc_or_fail (parse_address (&str, info));
7015 if (!info->addr.pcrel)
7016 {
7017 set_syntax_error (_("invalid pc-relative address"));
7018 goto failure;
7019 }
7020 if (inst.gen_lit_pool
7021 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
7022 {
7023 /* Only permit "=value" in the literal load instructions.
7024 The literal will be generated by programmer_friendly_fixup. */
7025 set_syntax_error (_("invalid use of \"=immediate\""));
7026 goto failure;
7027 }
7028 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
7029 {
7030 set_syntax_error (_("unrecognized relocation suffix"));
7031 goto failure;
7032 }
7033 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
7034 {
7035 info->imm.value = inst.reloc.exp.X_add_number;
7036 inst.reloc.type = BFD_RELOC_UNUSED;
7037 }
7038 else
7039 {
7040 info->imm.value = 0;
7041 if (inst.reloc.type == BFD_RELOC_UNUSED)
7042 switch (opcode->iclass)
7043 {
7044 case compbranch:
7045 case condbranch:
7046 /* e.g. CBZ or B.COND */
7047 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7048 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
7049 break;
7050 case testbranch:
7051 /* e.g. TBZ */
7052 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
7053 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
7054 break;
7055 case branch_imm:
7056 /* e.g. B or BL */
7057 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
7058 inst.reloc.type =
7059 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
7060 : BFD_RELOC_AARCH64_JUMP26;
7061 break;
7062 case loadlit:
7063 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
7064 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
7065 break;
7066 case pcreladdr:
7067 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
7068 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
7069 break;
7070 default:
7071 gas_assert (0);
7072 abort ();
7073 }
7074 inst.reloc.pc_rel = 1;
7075 }
7076 break;
7077
7078 case AARCH64_OPND_ADDR_SIMPLE:
7079 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7080 {
7081 /* [<Xn|SP>{, #<simm>}] */
7082 char *start = str;
7083 /* First use the normal address-parsing routines, to get
7084 the usual syntax errors. */
7085 po_misc_or_fail (parse_address (&str, info));
7086 if (info->addr.pcrel || info->addr.offset.is_reg
7087 || !info->addr.preind || info->addr.postind
7088 || info->addr.writeback)
7089 {
7090 set_syntax_error (_("invalid addressing mode"));
7091 goto failure;
7092 }
7093
7094 /* Then retry, matching the specific syntax of these addresses. */
7095 str = start;
7096 po_char_or_fail ('[');
7097 po_reg_or_fail (REG_TYPE_R64_SP);
7098 /* Accept optional ", #0". */
7099 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7100 && skip_past_char (&str, ','))
7101 {
7102 skip_past_char (&str, '#');
7103 if (! skip_past_char (&str, '0'))
7104 {
7105 set_fatal_syntax_error
7106 (_("the optional immediate offset can only be 0"));
7107 goto failure;
7108 }
7109 }
7110 po_char_or_fail (']');
7111 break;
7112 }
7113
7114 case AARCH64_OPND_ADDR_REGOFF:
7115 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7116 po_misc_or_fail (parse_address (&str, info));
7117 regoff_addr:
7118 if (info->addr.pcrel || !info->addr.offset.is_reg
7119 || !info->addr.preind || info->addr.postind
7120 || info->addr.writeback)
7121 {
7122 set_syntax_error (_("invalid addressing mode"));
7123 goto failure;
7124 }
7125 if (!info->shifter.operator_present)
7126 {
7127 /* Default to LSL if not present. Libopcodes prefers shifter
7128 kind to be explicit. */
7129 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7130 info->shifter.kind = AARCH64_MOD_LSL;
7131 }
7132 /* Qualifier to be deduced by libopcodes. */
7133 break;
7134
7135 case AARCH64_OPND_ADDR_SIMM7:
7136 po_misc_or_fail (parse_address (&str, info));
7137 if (info->addr.pcrel || info->addr.offset.is_reg
7138 || (!info->addr.preind && !info->addr.postind))
7139 {
7140 set_syntax_error (_("invalid addressing mode"));
7141 goto failure;
7142 }
7143 if (inst.reloc.type != BFD_RELOC_UNUSED)
7144 {
7145 set_syntax_error (_("relocation not allowed"));
7146 goto failure;
7147 }
7148 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7149 /* addr_off_p */ 1,
7150 /* need_libopcodes_p */ 1,
7151 /* skip_p */ 0);
7152 break;
7153
7154 case AARCH64_OPND_ADDR_SIMM9:
7155 case AARCH64_OPND_ADDR_SIMM9_2:
7156 case AARCH64_OPND_ADDR_SIMM11:
7157 case AARCH64_OPND_ADDR_SIMM13:
7158 po_misc_or_fail (parse_address (&str, info));
7159 if (info->addr.pcrel || info->addr.offset.is_reg
7160 || (!info->addr.preind && !info->addr.postind)
7161 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7162 && info->addr.writeback))
7163 {
7164 set_syntax_error (_("invalid addressing mode"));
7165 goto failure;
7166 }
7167 if (inst.reloc.type != BFD_RELOC_UNUSED)
7168 {
7169 set_syntax_error (_("relocation not allowed"));
7170 goto failure;
7171 }
7172 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7173 /* addr_off_p */ 1,
7174 /* need_libopcodes_p */ 1,
7175 /* skip_p */ 0);
7176 break;
7177
7178 case AARCH64_OPND_ADDR_SIMM10:
7179 case AARCH64_OPND_ADDR_OFFSET:
7180 po_misc_or_fail (parse_address (&str, info));
7181 if (info->addr.pcrel || info->addr.offset.is_reg
7182 || !info->addr.preind || info->addr.postind)
7183 {
7184 set_syntax_error (_("invalid addressing mode"));
7185 goto failure;
7186 }
7187 if (inst.reloc.type != BFD_RELOC_UNUSED)
7188 {
7189 set_syntax_error (_("relocation not allowed"));
7190 goto failure;
7191 }
7192 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7193 /* addr_off_p */ 1,
7194 /* need_libopcodes_p */ 1,
7195 /* skip_p */ 0);
7196 break;
7197
7198 case AARCH64_OPND_ADDR_UIMM12:
7199 po_misc_or_fail (parse_address (&str, info));
7200 if (info->addr.pcrel || info->addr.offset.is_reg
7201 || !info->addr.preind || info->addr.writeback)
7202 {
7203 set_syntax_error (_("invalid addressing mode"));
7204 goto failure;
7205 }
7206 if (inst.reloc.type == BFD_RELOC_UNUSED)
7207 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7208 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7209 || (inst.reloc.type
7210 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7211 || (inst.reloc.type
7212 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7213 || (inst.reloc.type
7214 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7215 || (inst.reloc.type
7216 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7217 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7218 /* Leave qualifier to be determined by libopcodes. */
7219 break;
7220
7221 case AARCH64_OPND_SIMD_ADDR_POST:
7222 /* [<Xn|SP>], <Xm|#<amount>> */
7223 po_misc_or_fail (parse_address (&str, info));
7224 if (!info->addr.postind || !info->addr.writeback)
7225 {
7226 set_syntax_error (_("invalid addressing mode"));
7227 goto failure;
7228 }
7229 if (!info->addr.offset.is_reg)
7230 {
7231 if (inst.reloc.exp.X_op == O_constant)
7232 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7233 else
7234 {
7235 set_fatal_syntax_error
7236 (_("writeback value must be an immediate constant"));
7237 goto failure;
7238 }
7239 }
7240 /* No qualifier. */
7241 break;
7242
7243 case AARCH64_OPND_SME_SM_ZA:
7244 /* { SM | ZA } */
7245 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7246 {
7247 set_syntax_error (_("unknown or missing PSTATE field name"));
7248 goto failure;
7249 }
7250 info->reg.regno = val;
7251 break;
7252
7253 case AARCH64_OPND_SME_PnT_Wm_imm:
7254 /* <Pn>.<T>[<Wm>, #<imm>] */
7255 {
7256 int index_base_reg;
7257 int imm;
7258 val = parse_sme_pred_reg_with_index (&str,
7259 &index_base_reg,
7260 &imm,
7261 &qualifier);
7262 if (val == PARSE_FAIL)
7263 goto failure;
7264
7265 info->za_tile_vector.regno = val;
7266 info->za_tile_vector.index.regno = index_base_reg;
7267 info->za_tile_vector.index.imm = imm;
7268 info->qualifier = qualifier;
7269 break;
7270 }
7271
7272 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7273 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7274 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7275 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7276 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7277 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7278 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7279 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7280 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7281 case AARCH64_OPND_SVE_ADDR_RI_U6:
7282 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7283 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7284 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7285 /* [X<n>{, #imm, MUL VL}]
7286 [X<n>{, #imm}]
7287 but recognizing SVE registers. */
7288 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7289 &offset_qualifier));
7290 if (base_qualifier != AARCH64_OPND_QLF_X)
7291 {
7292 set_syntax_error (_("invalid addressing mode"));
7293 goto failure;
7294 }
7295 sve_regimm:
7296 if (info->addr.pcrel || info->addr.offset.is_reg
7297 || !info->addr.preind || info->addr.writeback)
7298 {
7299 set_syntax_error (_("invalid addressing mode"));
7300 goto failure;
7301 }
7302 if (inst.reloc.type != BFD_RELOC_UNUSED
7303 || inst.reloc.exp.X_op != O_constant)
7304 {
7305 /* Make sure this has priority over
7306 "invalid addressing mode". */
7307 set_fatal_syntax_error (_("constant offset required"));
7308 goto failure;
7309 }
7310 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7311 break;
7312
7313 case AARCH64_OPND_SVE_ADDR_R:
7314 /* [<Xn|SP>{, <R><m>}]
7315 but recognizing SVE registers. */
7316 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7317 &offset_qualifier));
7318 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7319 {
7320 offset_qualifier = AARCH64_OPND_QLF_X;
7321 info->addr.offset.is_reg = 1;
7322 info->addr.offset.regno = 31;
7323 }
7324 else if (base_qualifier != AARCH64_OPND_QLF_X
7325 || offset_qualifier != AARCH64_OPND_QLF_X)
7326 {
7327 set_syntax_error (_("invalid addressing mode"));
7328 goto failure;
7329 }
7330 goto regoff_addr;
7331
7332 case AARCH64_OPND_SVE_ADDR_RR:
7333 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7334 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7335 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7336 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7337 case AARCH64_OPND_SVE_ADDR_RX:
7338 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7339 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7340 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7341 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7342 but recognizing SVE registers. */
7343 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7344 &offset_qualifier));
7345 if (base_qualifier != AARCH64_OPND_QLF_X
7346 || offset_qualifier != AARCH64_OPND_QLF_X)
7347 {
7348 set_syntax_error (_("invalid addressing mode"));
7349 goto failure;
7350 }
7351 goto regoff_addr;
7352
7353 case AARCH64_OPND_SVE_ADDR_RZ:
7354 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7355 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7356 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7357 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7358 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7359 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7360 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7361 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7362 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7363 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7364 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7365 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7366 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7367 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7368 &offset_qualifier));
7369 if (base_qualifier != AARCH64_OPND_QLF_X
7370 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7371 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7372 {
7373 set_syntax_error (_("invalid addressing mode"));
7374 goto failure;
7375 }
7376 info->qualifier = offset_qualifier;
7377 goto regoff_addr;
7378
7379 case AARCH64_OPND_SVE_ADDR_ZX:
7380 /* [Zn.<T>{, <Xm>}]. */
7381 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7382 &offset_qualifier));
7383 /* Things to check:
7384 base_qualifier either S_S or S_D
7385 offset_qualifier must be X
7386 */
7387 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7388 && base_qualifier != AARCH64_OPND_QLF_S_D)
7389 || offset_qualifier != AARCH64_OPND_QLF_X)
7390 {
7391 set_syntax_error (_("invalid addressing mode"));
7392 goto failure;
7393 }
7394 info->qualifier = base_qualifier;
7395 if (!info->addr.offset.is_reg || info->addr.pcrel
7396 || !info->addr.preind || info->addr.writeback
7397 || info->shifter.operator_present != 0)
7398 {
7399 set_syntax_error (_("invalid addressing mode"));
7400 goto failure;
7401 }
7402 info->shifter.kind = AARCH64_MOD_LSL;
7403 break;
7404
7405
7406 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7407 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7408 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7409 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7410 /* [Z<n>.<T>{, #imm}] */
7411 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7412 &offset_qualifier));
7413 if (base_qualifier != AARCH64_OPND_QLF_S_S
7414 && base_qualifier != AARCH64_OPND_QLF_S_D)
7415 {
7416 set_syntax_error (_("invalid addressing mode"));
7417 goto failure;
7418 }
7419 info->qualifier = base_qualifier;
7420 goto sve_regimm;
7421
7422 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7423 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7424 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7425 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7426 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7427
7428 We don't reject:
7429
7430 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7431
7432 here since we get better error messages by leaving it to
7433 the qualifier checking routines. */
7434 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7435 &offset_qualifier));
7436 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7437 && base_qualifier != AARCH64_OPND_QLF_S_D)
7438 || offset_qualifier != base_qualifier)
7439 {
7440 set_syntax_error (_("invalid addressing mode"));
7441 goto failure;
7442 }
7443 info->qualifier = base_qualifier;
7444 goto regoff_addr;
7445
7446 case AARCH64_OPND_SYSREG:
7447 {
7448 uint32_t sysreg_flags;
7449 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7450 &sysreg_flags)) == PARSE_FAIL)
7451 {
7452 set_syntax_error (_("unknown or missing system register name"));
7453 goto failure;
7454 }
7455 inst.base.operands[i].sysreg.value = val;
7456 inst.base.operands[i].sysreg.flags = sysreg_flags;
7457 break;
7458 }
7459
7460 case AARCH64_OPND_PSTATEFIELD:
7461 {
7462 uint32_t sysreg_flags;
7463 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7464 &sysreg_flags)) == PARSE_FAIL)
7465 {
7466 set_syntax_error (_("unknown or missing PSTATE field name"));
7467 goto failure;
7468 }
7469 inst.base.operands[i].pstatefield = val;
7470 inst.base.operands[i].sysreg.flags = sysreg_flags;
7471 break;
7472 }
7473
7474 case AARCH64_OPND_SYSREG_IC:
7475 inst.base.operands[i].sysins_op =
7476 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7477 goto sys_reg_ins;
7478
7479 case AARCH64_OPND_SYSREG_DC:
7480 inst.base.operands[i].sysins_op =
7481 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7482 goto sys_reg_ins;
7483
7484 case AARCH64_OPND_SYSREG_AT:
7485 inst.base.operands[i].sysins_op =
7486 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7487 goto sys_reg_ins;
7488
7489 case AARCH64_OPND_SYSREG_SR:
7490 inst.base.operands[i].sysins_op =
7491 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7492 goto sys_reg_ins;
7493
7494 case AARCH64_OPND_SYSREG_TLBI:
7495 inst.base.operands[i].sysins_op =
7496 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7497 sys_reg_ins:
7498 if (inst.base.operands[i].sysins_op == NULL)
7499 {
7500 set_fatal_syntax_error ( _("unknown or missing operation name"));
7501 goto failure;
7502 }
7503 break;
7504
7505 case AARCH64_OPND_BARRIER:
7506 case AARCH64_OPND_BARRIER_ISB:
7507 val = parse_barrier (&str);
7508 if (val != PARSE_FAIL
7509 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7510 {
7511 /* ISB only accepts options name 'sy'. */
7512 set_syntax_error
7513 (_("the specified option is not accepted in ISB"));
7514 /* Turn off backtrack as this optional operand is present. */
7515 backtrack_pos = 0;
7516 goto failure;
7517 }
7518 if (val != PARSE_FAIL
7519 && operands[i] == AARCH64_OPND_BARRIER)
7520 {
7521 /* Regular barriers accept options CRm (C0-C15).
7522 DSB nXS barrier variant accepts values > 15. */
7523 if (val < 0 || val > 15)
7524 {
7525 set_syntax_error (_("the specified option is not accepted in DSB"));
7526 goto failure;
7527 }
7528 }
7529 /* This is an extension to accept a 0..15 immediate. */
7530 if (val == PARSE_FAIL)
7531 po_imm_or_fail (0, 15);
7532 info->barrier = aarch64_barrier_options + val;
7533 break;
7534
7535 case AARCH64_OPND_BARRIER_DSB_NXS:
7536 val = parse_barrier (&str);
7537 if (val != PARSE_FAIL)
7538 {
7539 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7540 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7541 {
7542 set_syntax_error (_("the specified option is not accepted in DSB"));
7543 /* Turn off backtrack as this optional operand is present. */
7544 backtrack_pos = 0;
7545 goto failure;
7546 }
7547 }
7548 else
7549 {
7550 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7551 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7552 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7553 goto failure;
7554 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7555 {
7556 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7557 goto failure;
7558 }
7559 }
7560 /* Option index is encoded as 2-bit value in val<3:2>. */
7561 val = (val >> 2) - 4;
7562 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7563 break;
7564
7565 case AARCH64_OPND_PRFOP:
7566 val = parse_pldop (&str);
7567 /* This is an extension to accept a 0..31 immediate. */
7568 if (val == PARSE_FAIL)
7569 po_imm_or_fail (0, 31);
7570 inst.base.operands[i].prfop = aarch64_prfops + val;
7571 break;
7572
7573 case AARCH64_OPND_BARRIER_PSB:
7574 val = parse_barrier_psb (&str, &(info->hint_option));
7575 if (val == PARSE_FAIL)
7576 goto failure;
7577 break;
7578
7579 case AARCH64_OPND_BTI_TARGET:
7580 val = parse_bti_operand (&str, &(info->hint_option));
7581 if (val == PARSE_FAIL)
7582 goto failure;
7583 break;
7584
7585 case AARCH64_OPND_SME_ZAda_2b:
7586 case AARCH64_OPND_SME_ZAda_3b:
7587 val = parse_sme_zada_operand (&str, &qualifier);
7588 if (val == PARSE_FAIL)
7589 goto failure;
7590 info->reg.regno = val;
7591 info->qualifier = qualifier;
7592 break;
7593
7594 case AARCH64_OPND_SME_ZA_HV_idx_src:
7595 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7596 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7597 {
7598 enum sme_hv_slice slice_indicator;
7599 int vector_select_register;
7600 int imm;
7601
7602 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr)
7603 val = parse_sme_za_hv_tiles_operand_with_braces (&str,
7604 &slice_indicator,
7605 &vector_select_register,
7606 &imm,
7607 &qualifier);
7608 else
7609 val = parse_sme_za_hv_tiles_operand (&str, &slice_indicator,
7610 &vector_select_register,
7611 &imm,
7612 &qualifier);
7613 if (val == PARSE_FAIL)
7614 goto failure;
7615 info->za_tile_vector.regno = val;
7616 info->za_tile_vector.index.regno = vector_select_register;
7617 info->za_tile_vector.index.imm = imm;
7618 info->za_tile_vector.v = slice_indicator;
7619 info->qualifier = qualifier;
7620 break;
7621 }
7622
7623 case AARCH64_OPND_SME_list_of_64bit_tiles:
7624 val = parse_sme_list_of_64bit_tiles (&str);
7625 if (val == PARSE_FAIL)
7626 goto failure;
7627 info->imm.value = val;
7628 break;
7629
7630 case AARCH64_OPND_SME_ZA_array:
7631 {
7632 int imm;
7633 val = parse_sme_za_array (&str, &imm);
7634 if (val == PARSE_FAIL)
7635 goto failure;
7636 info->za_tile_vector.index.regno = val;
7637 info->za_tile_vector.index.imm = imm;
7638 break;
7639 }
7640
7641 case AARCH64_OPND_MOPS_ADDR_Rd:
7642 case AARCH64_OPND_MOPS_ADDR_Rs:
7643 po_char_or_fail ('[');
7644 if (!parse_x0_to_x30 (&str, info))
7645 goto failure;
7646 po_char_or_fail (']');
7647 po_char_or_fail ('!');
7648 break;
7649
7650 case AARCH64_OPND_MOPS_WB_Rn:
7651 if (!parse_x0_to_x30 (&str, info))
7652 goto failure;
7653 po_char_or_fail ('!');
7654 break;
7655
7656 default:
7657 as_fatal (_("unhandled operand code %d"), operands[i]);
7658 }
7659
7660 /* If we get here, this operand was successfully parsed. */
7661 inst.base.operands[i].present = 1;
7662 continue;
7663
7664 failure:
7665 /* The parse routine should already have set the error, but in case
7666 not, set a default one here. */
7667 if (! error_p ())
7668 set_default_error ();
7669
7670 if (! backtrack_pos)
7671 goto parse_operands_return;
7672
7673 {
7674 /* We reach here because this operand is marked as optional, and
7675 either no operand was supplied or the operand was supplied but it
7676 was syntactically incorrect. In the latter case we report an
7677 error. In the former case we perform a few more checks before
7678 dropping through to the code to insert the default operand. */
7679
7680 char *tmp = backtrack_pos;
7681 char endchar = END_OF_INSN;
7682
7683 if (i != (aarch64_num_of_operands (opcode) - 1))
7684 endchar = ',';
7685 skip_past_char (&tmp, ',');
7686
7687 if (*tmp != endchar)
7688 /* The user has supplied an operand in the wrong format. */
7689 goto parse_operands_return;
7690
7691 /* Make sure there is not a comma before the optional operand.
7692 For example the fifth operand of 'sys' is optional:
7693
7694 sys #0,c0,c0,#0, <--- wrong
7695 sys #0,c0,c0,#0 <--- correct. */
7696 if (comma_skipped_p && i && endchar == END_OF_INSN)
7697 {
7698 set_fatal_syntax_error
7699 (_("unexpected comma before the omitted optional operand"));
7700 goto parse_operands_return;
7701 }
7702 }
7703
7704 /* Reaching here means we are dealing with an optional operand that is
7705 omitted from the assembly line. */
7706 gas_assert (optional_operand_p (opcode, i));
7707 info->present = 0;
7708 process_omitted_operand (operands[i], opcode, i, info);
7709
7710 /* Try again, skipping the optional operand at backtrack_pos. */
7711 str = backtrack_pos;
7712 backtrack_pos = 0;
7713
7714 /* Clear any error record after the omitted optional operand has been
7715 successfully handled. */
7716 clear_error ();
7717 }
7718
7719 /* Check if we have parsed all the operands. */
7720 if (*str != '\0' && ! error_p ())
7721 {
7722 /* Set I to the index of the last present operand; this is
7723 for the purpose of diagnostics. */
7724 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7725 ;
7726 set_fatal_syntax_error
7727 (_("unexpected characters following instruction"));
7728 }
7729
7730 parse_operands_return:
7731
7732 if (error_p ())
7733 {
7734 DEBUG_TRACE ("parsing FAIL: %s - %s",
7735 operand_mismatch_kind_names[get_error_kind ()],
7736 get_error_message ());
7737 /* Record the operand error properly; this is useful when there
7738 are multiple instruction templates for a mnemonic name, so that
7739 later on, we can select the error that most closely describes
7740 the problem. */
7741 record_operand_error (opcode, i, get_error_kind (),
7742 get_error_message ());
7743 return false;
7744 }
7745 else
7746 {
7747 DEBUG_TRACE ("parsing SUCCESS");
7748 return true;
7749 }
7750 }
7751
7752 /* It does some fix-up to provide some programmer friendly feature while
7753 keeping the libopcodes happy, i.e. libopcodes only accepts
7754 the preferred architectural syntax.
7755 Return FALSE if there is any failure; otherwise return TRUE. */
7756
7757 static bool
7758 programmer_friendly_fixup (aarch64_instruction *instr)
7759 {
7760 aarch64_inst *base = &instr->base;
7761 const aarch64_opcode *opcode = base->opcode;
7762 enum aarch64_op op = opcode->op;
7763 aarch64_opnd_info *operands = base->operands;
7764
7765 DEBUG_TRACE ("enter");
7766
7767 switch (opcode->iclass)
7768 {
7769 case testbranch:
7770 /* TBNZ Xn|Wn, #uimm6, label
7771 Test and Branch Not Zero: conditionally jumps to label if bit number
7772 uimm6 in register Xn is not zero. The bit number implies the width of
7773 the register, which may be written and should be disassembled as Wn if
7774 uimm is less than 32. */
7775 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7776 {
7777 if (operands[1].imm.value >= 32)
7778 {
7779 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7780 0, 31);
7781 return false;
7782 }
7783 operands[0].qualifier = AARCH64_OPND_QLF_X;
7784 }
7785 break;
7786 case loadlit:
7787 /* LDR Wt, label | =value
7788 As a convenience assemblers will typically permit the notation
7789 "=value" in conjunction with the pc-relative literal load instructions
7790 to automatically place an immediate value or symbolic address in a
7791 nearby literal pool and generate a hidden label which references it.
7792 ISREG has been set to 0 in the case of =value. */
7793 if (instr->gen_lit_pool
7794 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7795 {
7796 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7797 if (op == OP_LDRSW_LIT)
7798 size = 4;
7799 if (instr->reloc.exp.X_op != O_constant
7800 && instr->reloc.exp.X_op != O_big
7801 && instr->reloc.exp.X_op != O_symbol)
7802 {
7803 record_operand_error (opcode, 1,
7804 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7805 _("constant expression expected"));
7806 return false;
7807 }
7808 if (! add_to_lit_pool (&instr->reloc.exp, size))
7809 {
7810 record_operand_error (opcode, 1,
7811 AARCH64_OPDE_OTHER_ERROR,
7812 _("literal pool insertion failed"));
7813 return false;
7814 }
7815 }
7816 break;
7817 case log_shift:
7818 case bitfield:
7819 /* UXT[BHW] Wd, Wn
7820 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7821 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7822 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7823 A programmer-friendly assembler should accept a destination Xd in
7824 place of Wd, however that is not the preferred form for disassembly.
7825 */
7826 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7827 && operands[1].qualifier == AARCH64_OPND_QLF_W
7828 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7829 operands[0].qualifier = AARCH64_OPND_QLF_W;
7830 break;
7831
7832 case addsub_ext:
7833 {
7834 /* In the 64-bit form, the final register operand is written as Wm
7835 for all but the (possibly omitted) UXTX/LSL and SXTX
7836 operators.
7837 As a programmer-friendly assembler, we accept e.g.
7838 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7839 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7840 int idx = aarch64_operand_index (opcode->operands,
7841 AARCH64_OPND_Rm_EXT);
7842 gas_assert (idx == 1 || idx == 2);
7843 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7844 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7845 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7846 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7847 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7848 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7849 }
7850 break;
7851
7852 default:
7853 break;
7854 }
7855
7856 DEBUG_TRACE ("exit with SUCCESS");
7857 return true;
7858 }
7859
7860 /* Check for loads and stores that will cause unpredictable behavior. */
7861
7862 static void
7863 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7864 {
7865 aarch64_inst *base = &instr->base;
7866 const aarch64_opcode *opcode = base->opcode;
7867 const aarch64_opnd_info *opnds = base->operands;
7868 switch (opcode->iclass)
7869 {
7870 case ldst_pos:
7871 case ldst_imm9:
7872 case ldst_imm10:
7873 case ldst_unscaled:
7874 case ldst_unpriv:
7875 /* Loading/storing the base register is unpredictable if writeback. */
7876 if ((aarch64_get_operand_class (opnds[0].type)
7877 == AARCH64_OPND_CLASS_INT_REG)
7878 && opnds[0].reg.regno == opnds[1].addr.base_regno
7879 && opnds[1].addr.base_regno != REG_SP
7880 /* Exempt STG/STZG/ST2G/STZ2G. */
7881 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7882 && opnds[1].addr.writeback)
7883 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7884 break;
7885
7886 case ldstpair_off:
7887 case ldstnapair_offs:
7888 case ldstpair_indexed:
7889 /* Loading/storing the base register is unpredictable if writeback. */
7890 if ((aarch64_get_operand_class (opnds[0].type)
7891 == AARCH64_OPND_CLASS_INT_REG)
7892 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7893 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7894 && opnds[2].addr.base_regno != REG_SP
7895 /* Exempt STGP. */
7896 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7897 && opnds[2].addr.writeback)
7898 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7899 /* Load operations must load different registers. */
7900 if ((opcode->opcode & (1 << 22))
7901 && opnds[0].reg.regno == opnds[1].reg.regno)
7902 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7903 break;
7904
7905 case ldstexcl:
7906 if ((aarch64_get_operand_class (opnds[0].type)
7907 == AARCH64_OPND_CLASS_INT_REG)
7908 && (aarch64_get_operand_class (opnds[1].type)
7909 == AARCH64_OPND_CLASS_INT_REG))
7910 {
7911 if ((opcode->opcode & (1 << 22)))
7912 {
7913 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7914 if ((opcode->opcode & (1 << 21))
7915 && opnds[0].reg.regno == opnds[1].reg.regno)
7916 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7917 }
7918 else
7919 {
7920 /* Store-Exclusive is unpredictable if Rt == Rs. */
7921 if (opnds[0].reg.regno == opnds[1].reg.regno)
7922 as_warn
7923 (_("unpredictable: identical transfer and status registers"
7924 " --`%s'"),str);
7925
7926 if (opnds[0].reg.regno == opnds[2].reg.regno)
7927 {
7928 if (!(opcode->opcode & (1 << 21)))
7929 /* Store-Exclusive is unpredictable if Rn == Rs. */
7930 as_warn
7931 (_("unpredictable: identical base and status registers"
7932 " --`%s'"),str);
7933 else
7934 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7935 as_warn
7936 (_("unpredictable: "
7937 "identical transfer and status registers"
7938 " --`%s'"),str);
7939 }
7940
7941 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7942 if ((opcode->opcode & (1 << 21))
7943 && opnds[0].reg.regno == opnds[3].reg.regno
7944 && opnds[3].reg.regno != REG_SP)
7945 as_warn (_("unpredictable: identical base and status registers"
7946 " --`%s'"),str);
7947 }
7948 }
7949 break;
7950
7951 default:
7952 break;
7953 }
7954 }
7955
7956 static void
7957 force_automatic_sequence_close (void)
7958 {
7959 struct aarch64_segment_info_type *tc_seg_info;
7960
7961 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7962 if (tc_seg_info->insn_sequence.instr)
7963 {
7964 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7965 _("previous `%s' sequence has not been closed"),
7966 tc_seg_info->insn_sequence.instr->opcode->name);
7967 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7968 }
7969 }
7970
7971 /* A wrapper function to interface with libopcodes on encoding and
7972 record the error message if there is any.
7973
7974 Return TRUE on success; otherwise return FALSE. */
7975
7976 static bool
7977 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7978 aarch64_insn *code)
7979 {
7980 aarch64_operand_error error_info;
7981 memset (&error_info, '\0', sizeof (error_info));
7982 error_info.kind = AARCH64_OPDE_NIL;
7983 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7984 && !error_info.non_fatal)
7985 return true;
7986
7987 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7988 record_operand_error_info (opcode, &error_info);
7989 return error_info.non_fatal;
7990 }
7991
7992 #ifdef DEBUG_AARCH64
7993 static inline void
7994 dump_opcode_operands (const aarch64_opcode *opcode)
7995 {
7996 int i = 0;
7997 while (opcode->operands[i] != AARCH64_OPND_NIL)
7998 {
7999 aarch64_verbose ("\t\t opnd%d: %s", i,
8000 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
8001 ? aarch64_get_operand_name (opcode->operands[i])
8002 : aarch64_get_operand_desc (opcode->operands[i]));
8003 ++i;
8004 }
8005 }
8006 #endif /* DEBUG_AARCH64 */
8007
8008 /* This is the guts of the machine-dependent assembler. STR points to a
8009 machine dependent instruction. This function is supposed to emit
8010 the frags/bytes it assembles to. */
8011
8012 void
8013 md_assemble (char *str)
8014 {
8015 templates *template;
8016 const aarch64_opcode *opcode;
8017 struct aarch64_segment_info_type *tc_seg_info;
8018 aarch64_inst *inst_base;
8019 unsigned saved_cond;
8020
8021 /* Align the previous label if needed. */
8022 if (last_label_seen != NULL)
8023 {
8024 symbol_set_frag (last_label_seen, frag_now);
8025 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
8026 S_SET_SEGMENT (last_label_seen, now_seg);
8027 }
8028
8029 /* Update the current insn_sequence from the segment. */
8030 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
8031 insn_sequence = &tc_seg_info->insn_sequence;
8032 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
8033
8034 inst.reloc.type = BFD_RELOC_UNUSED;
8035
8036 DEBUG_TRACE ("\n\n");
8037 DEBUG_TRACE ("==============================");
8038 DEBUG_TRACE ("Enter md_assemble with %s", str);
8039
8040 /* Scan up to the end of the mnemonic, which must end in whitespace,
8041 '.', or end of string. */
8042 char *p = str;
8043 char *dot = 0;
8044 for (; is_part_of_name (*p); p++)
8045 if (*p == '.' && !dot)
8046 dot = p;
8047
8048 if (p == str)
8049 {
8050 as_bad (_("unknown mnemonic -- `%s'"), str);
8051 return;
8052 }
8053
8054 if (!dot && create_register_alias (str, p))
8055 return;
8056
8057 template = opcode_lookup (str, dot, p);
8058 if (!template)
8059 {
8060 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
8061 str);
8062 return;
8063 }
8064
8065 skip_whitespace (p);
8066 if (*p == ',')
8067 {
8068 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
8069 get_mnemonic_name (str), str);
8070 return;
8071 }
8072
8073 init_operand_error_report ();
8074
8075 /* Sections are assumed to start aligned. In executable section, there is no
8076 MAP_DATA symbol pending. So we only align the address during
8077 MAP_DATA --> MAP_INSN transition.
8078 For other sections, this is not guaranteed. */
8079 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8080 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8081 frag_align_code (2, 0);
8082
8083 saved_cond = inst.cond;
8084 reset_aarch64_instruction (&inst);
8085 inst.cond = saved_cond;
8086
8087 /* Iterate through all opcode entries with the same mnemonic name. */
8088 do
8089 {
8090 opcode = template->opcode;
8091
8092 DEBUG_TRACE ("opcode %s found", opcode->name);
8093 #ifdef DEBUG_AARCH64
8094 if (debug_dump)
8095 dump_opcode_operands (opcode);
8096 #endif /* DEBUG_AARCH64 */
8097
8098 mapping_state (MAP_INSN);
8099
8100 inst_base = &inst.base;
8101 inst_base->opcode = opcode;
8102
8103 /* Truly conditionally executed instructions, e.g. b.cond. */
8104 if (opcode->flags & F_COND)
8105 {
8106 gas_assert (inst.cond != COND_ALWAYS);
8107 inst_base->cond = get_cond_from_value (inst.cond);
8108 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8109 }
8110 else if (inst.cond != COND_ALWAYS)
8111 {
8112 /* It shouldn't arrive here, where the assembly looks like a
8113 conditional instruction but the found opcode is unconditional. */
8114 gas_assert (0);
8115 continue;
8116 }
8117
8118 if (parse_operands (p, opcode)
8119 && programmer_friendly_fixup (&inst)
8120 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8121 {
8122 /* Check that this instruction is supported for this CPU. */
8123 if (!opcode->avariant
8124 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
8125 {
8126 as_bad (_("selected processor does not support `%s'"), str);
8127 return;
8128 }
8129
8130 warn_unpredictable_ldst (&inst, str);
8131
8132 if (inst.reloc.type == BFD_RELOC_UNUSED
8133 || !inst.reloc.need_libopcodes_p)
8134 output_inst (NULL);
8135 else
8136 {
8137 /* If there is relocation generated for the instruction,
8138 store the instruction information for the future fix-up. */
8139 struct aarch64_inst *copy;
8140 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8141 copy = XNEW (struct aarch64_inst);
8142 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8143 output_inst (copy);
8144 }
8145
8146 /* Issue non-fatal messages if any. */
8147 output_operand_error_report (str, true);
8148 return;
8149 }
8150
8151 template = template->next;
8152 if (template != NULL)
8153 {
8154 reset_aarch64_instruction (&inst);
8155 inst.cond = saved_cond;
8156 }
8157 }
8158 while (template != NULL);
8159
8160 /* Issue the error messages if any. */
8161 output_operand_error_report (str, false);
8162 }
8163
8164 /* Various frobbings of labels and their addresses. */
8165
8166 void
8167 aarch64_start_line_hook (void)
8168 {
8169 last_label_seen = NULL;
8170 }
8171
8172 void
8173 aarch64_frob_label (symbolS * sym)
8174 {
8175 last_label_seen = sym;
8176
8177 dwarf2_emit_label (sym);
8178 }
8179
8180 void
8181 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8182 {
8183 /* Check to see if we have a block to close. */
8184 force_automatic_sequence_close ();
8185 }
8186
8187 int
8188 aarch64_data_in_code (void)
8189 {
8190 if (startswith (input_line_pointer + 1, "data:"))
8191 {
8192 *input_line_pointer = '/';
8193 input_line_pointer += 5;
8194 *input_line_pointer = 0;
8195 return 1;
8196 }
8197
8198 return 0;
8199 }
8200
8201 char *
8202 aarch64_canonicalize_symbol_name (char *name)
8203 {
8204 int len;
8205
8206 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8207 *(name + len - 5) = 0;
8208
8209 return name;
8210 }
8211 \f
8212 /* Table of all register names defined by default. The user can
8213 define additional names with .req. Note that all register names
8214 should appear in both upper and lowercase variants. Some registers
8215 also have mixed-case names. */
8216
8217 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8218 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8219 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8220 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8221 #define REGSET16(p,t) \
8222 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8223 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8224 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8225 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8226 #define REGSET16S(p,s,t) \
8227 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8228 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8229 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8230 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8231 #define REGSET31(p,t) \
8232 REGSET16(p, t), \
8233 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8234 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8235 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8236 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8237 #define REGSET(p,t) \
8238 REGSET31(p,t), REGNUM(p,31,t)
8239
8240 /* These go into aarch64_reg_hsh hash-table. */
8241 static const reg_entry reg_names[] = {
8242 /* Integer registers. */
8243 REGSET31 (x, R_64), REGSET31 (X, R_64),
8244 REGSET31 (w, R_32), REGSET31 (W, R_32),
8245
8246 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8247 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8248 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8249 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8250 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8251 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8252
8253 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8254 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8255
8256 /* Floating-point single precision registers. */
8257 REGSET (s, FP_S), REGSET (S, FP_S),
8258
8259 /* Floating-point double precision registers. */
8260 REGSET (d, FP_D), REGSET (D, FP_D),
8261
8262 /* Floating-point half precision registers. */
8263 REGSET (h, FP_H), REGSET (H, FP_H),
8264
8265 /* Floating-point byte precision registers. */
8266 REGSET (b, FP_B), REGSET (B, FP_B),
8267
8268 /* Floating-point quad precision registers. */
8269 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8270
8271 /* FP/SIMD registers. */
8272 REGSET (v, VN), REGSET (V, VN),
8273
8274 /* SVE vector registers. */
8275 REGSET (z, ZN), REGSET (Z, ZN),
8276
8277 /* SVE predicate registers. */
8278 REGSET16 (p, PN), REGSET16 (P, PN),
8279
8280 /* SME ZA tile registers. */
8281 REGSET16 (za, ZA), REGSET16 (ZA, ZA),
8282
8283 /* SME ZA tile registers (horizontal slice). */
8284 REGSET16S (za, h, ZAH), REGSET16S (ZA, H, ZAH),
8285
8286 /* SME ZA tile registers (vertical slice). */
8287 REGSET16S (za, v, ZAV), REGSET16S (ZA, V, ZAV)
8288 };
8289
8290 #undef REGDEF
8291 #undef REGDEF_ALIAS
8292 #undef REGNUM
8293 #undef REGSET16
8294 #undef REGSET31
8295 #undef REGSET
8296
8297 #define N 1
8298 #define n 0
8299 #define Z 1
8300 #define z 0
8301 #define C 1
8302 #define c 0
8303 #define V 1
8304 #define v 0
8305 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8306 static const asm_nzcv nzcv_names[] = {
8307 {"nzcv", B (n, z, c, v)},
8308 {"nzcV", B (n, z, c, V)},
8309 {"nzCv", B (n, z, C, v)},
8310 {"nzCV", B (n, z, C, V)},
8311 {"nZcv", B (n, Z, c, v)},
8312 {"nZcV", B (n, Z, c, V)},
8313 {"nZCv", B (n, Z, C, v)},
8314 {"nZCV", B (n, Z, C, V)},
8315 {"Nzcv", B (N, z, c, v)},
8316 {"NzcV", B (N, z, c, V)},
8317 {"NzCv", B (N, z, C, v)},
8318 {"NzCV", B (N, z, C, V)},
8319 {"NZcv", B (N, Z, c, v)},
8320 {"NZcV", B (N, Z, c, V)},
8321 {"NZCv", B (N, Z, C, v)},
8322 {"NZCV", B (N, Z, C, V)}
8323 };
8324
8325 #undef N
8326 #undef n
8327 #undef Z
8328 #undef z
8329 #undef C
8330 #undef c
8331 #undef V
8332 #undef v
8333 #undef B
8334 \f
8335 /* MD interface: bits in the object file. */
8336
8337 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8338 for use in the a.out file, and stores them in the array pointed to by buf.
8339 This knows about the endian-ness of the target machine and does
8340 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8341 2 (short) and 4 (long) Floating numbers are put out as a series of
8342 LITTLENUMS (shorts, here at least). */
8343
8344 void
8345 md_number_to_chars (char *buf, valueT val, int n)
8346 {
8347 if (target_big_endian)
8348 number_to_chars_bigendian (buf, val, n);
8349 else
8350 number_to_chars_littleendian (buf, val, n);
8351 }
8352
8353 /* MD interface: Sections. */
8354
8355 /* Estimate the size of a frag before relaxing. Assume everything fits in
8356 4 bytes. */
8357
8358 int
8359 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8360 {
8361 fragp->fr_var = 4;
8362 return 4;
8363 }
8364
8365 /* Round up a section size to the appropriate boundary. */
8366
8367 valueT
8368 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8369 {
8370 return size;
8371 }
8372
8373 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8374 of an rs_align_code fragment.
8375
8376 Here we fill the frag with the appropriate info for padding the
8377 output stream. The resulting frag will consist of a fixed (fr_fix)
8378 and of a repeating (fr_var) part.
8379
8380 The fixed content is always emitted before the repeating content and
8381 these two parts are used as follows in constructing the output:
8382 - the fixed part will be used to align to a valid instruction word
8383 boundary, in case that we start at a misaligned address; as no
8384 executable instruction can live at the misaligned location, we
8385 simply fill with zeros;
8386 - the variable part will be used to cover the remaining padding and
8387 we fill using the AArch64 NOP instruction.
8388
8389 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8390 enough storage space for up to 3 bytes for padding the back to a valid
8391 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8392
8393 void
8394 aarch64_handle_align (fragS * fragP)
8395 {
8396 /* NOP = d503201f */
8397 /* AArch64 instructions are always little-endian. */
8398 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8399
8400 int bytes, fix, noop_size;
8401 char *p;
8402
8403 if (fragP->fr_type != rs_align_code)
8404 return;
8405
8406 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8407 p = fragP->fr_literal + fragP->fr_fix;
8408
8409 #ifdef OBJ_ELF
8410 gas_assert (fragP->tc_frag_data.recorded);
8411 #endif
8412
8413 noop_size = sizeof (aarch64_noop);
8414
8415 fix = bytes & (noop_size - 1);
8416 if (fix)
8417 {
8418 #if defined OBJ_ELF || defined OBJ_COFF
8419 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8420 #endif
8421 memset (p, 0, fix);
8422 p += fix;
8423 fragP->fr_fix += fix;
8424 }
8425
8426 if (noop_size)
8427 memcpy (p, aarch64_noop, noop_size);
8428 fragP->fr_var = noop_size;
8429 }
8430
8431 /* Perform target specific initialisation of a frag.
8432 Note - despite the name this initialisation is not done when the frag
8433 is created, but only when its type is assigned. A frag can be created
8434 and used a long time before its type is set, so beware of assuming that
8435 this initialisation is performed first. */
8436
8437 #ifndef OBJ_ELF
8438 void
8439 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8440 int max_chars ATTRIBUTE_UNUSED)
8441 {
8442 }
8443
8444 #else /* OBJ_ELF is defined. */
8445 void
8446 aarch64_init_frag (fragS * fragP, int max_chars)
8447 {
8448 /* Record a mapping symbol for alignment frags. We will delete this
8449 later if the alignment ends up empty. */
8450 if (!fragP->tc_frag_data.recorded)
8451 fragP->tc_frag_data.recorded = 1;
8452
8453 /* PR 21809: Do not set a mapping state for debug sections
8454 - it just confuses other tools. */
8455 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8456 return;
8457
8458 switch (fragP->fr_type)
8459 {
8460 case rs_align_test:
8461 case rs_fill:
8462 mapping_state_2 (MAP_DATA, max_chars);
8463 break;
8464 case rs_align:
8465 /* PR 20364: We can get alignment frags in code sections,
8466 so do not just assume that we should use the MAP_DATA state. */
8467 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8468 break;
8469 case rs_align_code:
8470 mapping_state_2 (MAP_INSN, max_chars);
8471 break;
8472 default:
8473 break;
8474 }
8475 }
8476
8477 /* Whether SFrame unwind info is supported. */
8478
8479 bool
8480 aarch64_support_sframe_p (void)
8481 {
8482 /* At this time, SFrame is supported for aarch64 only. */
8483 return (aarch64_abi == AARCH64_ABI_LP64);
8484 }
8485
8486 /* Specify if RA tracking is needed. */
8487
8488 bool
8489 aarch64_sframe_ra_tracking_p (void)
8490 {
8491 return true;
8492 }
8493
8494 /* Specify the fixed offset to recover RA from CFA.
8495 (useful only when RA tracking is not needed). */
8496
8497 offsetT
8498 aarch64_sframe_cfa_ra_offset (void)
8499 {
8500 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8501 }
8502
8503 /* Get the abi/arch indentifier for SFrame. */
8504
8505 unsigned char
8506 aarch64_sframe_get_abi_arch (void)
8507 {
8508 unsigned char sframe_abi_arch = 0;
8509
8510 if (aarch64_support_sframe_p ())
8511 {
8512 sframe_abi_arch = target_big_endian
8513 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8514 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8515 }
8516
8517 return sframe_abi_arch;
8518 }
8519
8520 #endif /* OBJ_ELF */
8521 \f
8522 /* Initialize the DWARF-2 unwind information for this procedure. */
8523
8524 void
8525 tc_aarch64_frame_initial_instructions (void)
8526 {
8527 cfi_add_CFA_def_cfa (REG_SP, 0);
8528 }
8529
8530 /* Convert REGNAME to a DWARF-2 register number. */
8531
8532 int
8533 tc_aarch64_regname_to_dw2regnum (char *regname)
8534 {
8535 const reg_entry *reg = parse_reg (&regname);
8536 if (reg == NULL)
8537 return -1;
8538
8539 switch (reg->type)
8540 {
8541 case REG_TYPE_SP_32:
8542 case REG_TYPE_SP_64:
8543 case REG_TYPE_R_32:
8544 case REG_TYPE_R_64:
8545 return reg->number;
8546
8547 case REG_TYPE_FP_B:
8548 case REG_TYPE_FP_H:
8549 case REG_TYPE_FP_S:
8550 case REG_TYPE_FP_D:
8551 case REG_TYPE_FP_Q:
8552 return reg->number + 64;
8553
8554 default:
8555 break;
8556 }
8557 return -1;
8558 }
8559
8560 /* Implement DWARF2_ADDR_SIZE. */
8561
8562 int
8563 aarch64_dwarf2_addr_size (void)
8564 {
8565 if (ilp32_p)
8566 return 4;
8567 else if (llp64_p)
8568 return 8;
8569 return bfd_arch_bits_per_address (stdoutput) / 8;
8570 }
8571
8572 /* MD interface: Symbol and relocation handling. */
8573
8574 /* Return the address within the segment that a PC-relative fixup is
8575 relative to. For AArch64 PC-relative fixups applied to instructions
8576 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8577
8578 long
8579 md_pcrel_from_section (fixS * fixP, segT seg)
8580 {
8581 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8582
8583 /* If this is pc-relative and we are going to emit a relocation
8584 then we just want to put out any pipeline compensation that the linker
8585 will need. Otherwise we want to use the calculated base. */
8586 if (fixP->fx_pcrel
8587 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8588 || aarch64_force_relocation (fixP)))
8589 base = 0;
8590
8591 /* AArch64 should be consistent for all pc-relative relocations. */
8592 return base + AARCH64_PCREL_OFFSET;
8593 }
8594
8595 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8596 Otherwise we have no need to default values of symbols. */
8597
8598 symbolS *
8599 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8600 {
8601 #ifdef OBJ_ELF
8602 if (name[0] == '_' && name[1] == 'G'
8603 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8604 {
8605 if (!GOT_symbol)
8606 {
8607 if (symbol_find (name))
8608 as_bad (_("GOT already in the symbol table"));
8609
8610 GOT_symbol = symbol_new (name, undefined_section,
8611 &zero_address_frag, 0);
8612 }
8613
8614 return GOT_symbol;
8615 }
8616 #endif
8617
8618 return 0;
8619 }
8620
8621 /* Return non-zero if the indicated VALUE has overflowed the maximum
8622 range expressible by a unsigned number with the indicated number of
8623 BITS. */
8624
8625 static bool
8626 unsigned_overflow (valueT value, unsigned bits)
8627 {
8628 valueT lim;
8629 if (bits >= sizeof (valueT) * 8)
8630 return false;
8631 lim = (valueT) 1 << bits;
8632 return (value >= lim);
8633 }
8634
8635
8636 /* Return non-zero if the indicated VALUE has overflowed the maximum
8637 range expressible by an signed number with the indicated number of
8638 BITS. */
8639
8640 static bool
8641 signed_overflow (offsetT value, unsigned bits)
8642 {
8643 offsetT lim;
8644 if (bits >= sizeof (offsetT) * 8)
8645 return false;
8646 lim = (offsetT) 1 << (bits - 1);
8647 return (value < -lim || value >= lim);
8648 }
8649
8650 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8651 unsigned immediate offset load/store instruction, try to encode it as
8652 an unscaled, 9-bit, signed immediate offset load/store instruction.
8653 Return TRUE if it is successful; otherwise return FALSE.
8654
8655 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8656 in response to the standard LDR/STR mnemonics when the immediate offset is
8657 unambiguous, i.e. when it is negative or unaligned. */
8658
8659 static bool
8660 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8661 {
8662 int idx;
8663 enum aarch64_op new_op;
8664 const aarch64_opcode *new_opcode;
8665
8666 gas_assert (instr->opcode->iclass == ldst_pos);
8667
8668 switch (instr->opcode->op)
8669 {
8670 case OP_LDRB_POS:new_op = OP_LDURB; break;
8671 case OP_STRB_POS: new_op = OP_STURB; break;
8672 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8673 case OP_LDRH_POS: new_op = OP_LDURH; break;
8674 case OP_STRH_POS: new_op = OP_STURH; break;
8675 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8676 case OP_LDR_POS: new_op = OP_LDUR; break;
8677 case OP_STR_POS: new_op = OP_STUR; break;
8678 case OP_LDRF_POS: new_op = OP_LDURV; break;
8679 case OP_STRF_POS: new_op = OP_STURV; break;
8680 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8681 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8682 default: new_op = OP_NIL; break;
8683 }
8684
8685 if (new_op == OP_NIL)
8686 return false;
8687
8688 new_opcode = aarch64_get_opcode (new_op);
8689 gas_assert (new_opcode != NULL);
8690
8691 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8692 instr->opcode->op, new_opcode->op);
8693
8694 aarch64_replace_opcode (instr, new_opcode);
8695
8696 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8697 qualifier matching may fail because the out-of-date qualifier will
8698 prevent the operand being updated with a new and correct qualifier. */
8699 idx = aarch64_operand_index (instr->opcode->operands,
8700 AARCH64_OPND_ADDR_SIMM9);
8701 gas_assert (idx == 1);
8702 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8703
8704 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8705
8706 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8707 insn_sequence))
8708 return false;
8709
8710 return true;
8711 }
8712
8713 /* Called by fix_insn to fix a MOV immediate alias instruction.
8714
8715 Operand for a generic move immediate instruction, which is an alias
8716 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8717 a 32-bit/64-bit immediate value into general register. An assembler error
8718 shall result if the immediate cannot be created by a single one of these
8719 instructions. If there is a choice, then to ensure reversability an
8720 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8721
8722 static void
8723 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8724 {
8725 const aarch64_opcode *opcode;
8726
8727 /* Need to check if the destination is SP/ZR. The check has to be done
8728 before any aarch64_replace_opcode. */
8729 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8730 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8731
8732 instr->operands[1].imm.value = value;
8733 instr->operands[1].skip = 0;
8734
8735 if (try_mov_wide_p)
8736 {
8737 /* Try the MOVZ alias. */
8738 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8739 aarch64_replace_opcode (instr, opcode);
8740 if (aarch64_opcode_encode (instr->opcode, instr,
8741 &instr->value, NULL, NULL, insn_sequence))
8742 {
8743 put_aarch64_insn (buf, instr->value);
8744 return;
8745 }
8746 /* Try the MOVK alias. */
8747 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8748 aarch64_replace_opcode (instr, opcode);
8749 if (aarch64_opcode_encode (instr->opcode, instr,
8750 &instr->value, NULL, NULL, insn_sequence))
8751 {
8752 put_aarch64_insn (buf, instr->value);
8753 return;
8754 }
8755 }
8756
8757 if (try_mov_bitmask_p)
8758 {
8759 /* Try the ORR alias. */
8760 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8761 aarch64_replace_opcode (instr, opcode);
8762 if (aarch64_opcode_encode (instr->opcode, instr,
8763 &instr->value, NULL, NULL, insn_sequence))
8764 {
8765 put_aarch64_insn (buf, instr->value);
8766 return;
8767 }
8768 }
8769
8770 as_bad_where (fixP->fx_file, fixP->fx_line,
8771 _("immediate cannot be moved by a single instruction"));
8772 }
8773
8774 /* An instruction operand which is immediate related may have symbol used
8775 in the assembly, e.g.
8776
8777 mov w0, u32
8778 .set u32, 0x00ffff00
8779
8780 At the time when the assembly instruction is parsed, a referenced symbol,
8781 like 'u32' in the above example may not have been seen; a fixS is created
8782 in such a case and is handled here after symbols have been resolved.
8783 Instruction is fixed up with VALUE using the information in *FIXP plus
8784 extra information in FLAGS.
8785
8786 This function is called by md_apply_fix to fix up instructions that need
8787 a fix-up described above but does not involve any linker-time relocation. */
8788
8789 static void
8790 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8791 {
8792 int idx;
8793 uint32_t insn;
8794 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8795 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8796 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8797
8798 if (new_inst)
8799 {
8800 /* Now the instruction is about to be fixed-up, so the operand that
8801 was previously marked as 'ignored' needs to be unmarked in order
8802 to get the encoding done properly. */
8803 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8804 new_inst->operands[idx].skip = 0;
8805 }
8806
8807 gas_assert (opnd != AARCH64_OPND_NIL);
8808
8809 switch (opnd)
8810 {
8811 case AARCH64_OPND_EXCEPTION:
8812 case AARCH64_OPND_UNDEFINED:
8813 if (unsigned_overflow (value, 16))
8814 as_bad_where (fixP->fx_file, fixP->fx_line,
8815 _("immediate out of range"));
8816 insn = get_aarch64_insn (buf);
8817 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8818 put_aarch64_insn (buf, insn);
8819 break;
8820
8821 case AARCH64_OPND_AIMM:
8822 /* ADD or SUB with immediate.
8823 NOTE this assumes we come here with a add/sub shifted reg encoding
8824 3 322|2222|2 2 2 21111 111111
8825 1 098|7654|3 2 1 09876 543210 98765 43210
8826 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8827 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8828 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8829 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8830 ->
8831 3 322|2222|2 2 221111111111
8832 1 098|7654|3 2 109876543210 98765 43210
8833 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8834 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8835 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8836 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8837 Fields sf Rn Rd are already set. */
8838 insn = get_aarch64_insn (buf);
8839 if (value < 0)
8840 {
8841 /* Add <-> sub. */
8842 insn = reencode_addsub_switch_add_sub (insn);
8843 value = -value;
8844 }
8845
8846 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8847 && unsigned_overflow (value, 12))
8848 {
8849 /* Try to shift the value by 12 to make it fit. */
8850 if (((value >> 12) << 12) == value
8851 && ! unsigned_overflow (value, 12 + 12))
8852 {
8853 value >>= 12;
8854 insn |= encode_addsub_imm_shift_amount (1);
8855 }
8856 }
8857
8858 if (unsigned_overflow (value, 12))
8859 as_bad_where (fixP->fx_file, fixP->fx_line,
8860 _("immediate out of range"));
8861
8862 insn |= encode_addsub_imm (value);
8863
8864 put_aarch64_insn (buf, insn);
8865 break;
8866
8867 case AARCH64_OPND_SIMD_IMM:
8868 case AARCH64_OPND_SIMD_IMM_SFT:
8869 case AARCH64_OPND_LIMM:
8870 /* Bit mask immediate. */
8871 gas_assert (new_inst != NULL);
8872 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8873 new_inst->operands[idx].imm.value = value;
8874 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8875 &new_inst->value, NULL, NULL, insn_sequence))
8876 put_aarch64_insn (buf, new_inst->value);
8877 else
8878 as_bad_where (fixP->fx_file, fixP->fx_line,
8879 _("invalid immediate"));
8880 break;
8881
8882 case AARCH64_OPND_HALF:
8883 /* 16-bit unsigned immediate. */
8884 if (unsigned_overflow (value, 16))
8885 as_bad_where (fixP->fx_file, fixP->fx_line,
8886 _("immediate out of range"));
8887 insn = get_aarch64_insn (buf);
8888 insn |= encode_movw_imm (value & 0xffff);
8889 put_aarch64_insn (buf, insn);
8890 break;
8891
8892 case AARCH64_OPND_IMM_MOV:
8893 /* Operand for a generic move immediate instruction, which is
8894 an alias instruction that generates a single MOVZ, MOVN or ORR
8895 instruction to loads a 32-bit/64-bit immediate value into general
8896 register. An assembler error shall result if the immediate cannot be
8897 created by a single one of these instructions. If there is a choice,
8898 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8899 and MOVZ or MOVN to ORR. */
8900 gas_assert (new_inst != NULL);
8901 fix_mov_imm_insn (fixP, buf, new_inst, value);
8902 break;
8903
8904 case AARCH64_OPND_ADDR_SIMM7:
8905 case AARCH64_OPND_ADDR_SIMM9:
8906 case AARCH64_OPND_ADDR_SIMM9_2:
8907 case AARCH64_OPND_ADDR_SIMM10:
8908 case AARCH64_OPND_ADDR_UIMM12:
8909 case AARCH64_OPND_ADDR_SIMM11:
8910 case AARCH64_OPND_ADDR_SIMM13:
8911 /* Immediate offset in an address. */
8912 insn = get_aarch64_insn (buf);
8913
8914 gas_assert (new_inst != NULL && new_inst->value == insn);
8915 gas_assert (new_inst->opcode->operands[1] == opnd
8916 || new_inst->opcode->operands[2] == opnd);
8917
8918 /* Get the index of the address operand. */
8919 if (new_inst->opcode->operands[1] == opnd)
8920 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8921 idx = 1;
8922 else
8923 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8924 idx = 2;
8925
8926 /* Update the resolved offset value. */
8927 new_inst->operands[idx].addr.offset.imm = value;
8928
8929 /* Encode/fix-up. */
8930 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8931 &new_inst->value, NULL, NULL, insn_sequence))
8932 {
8933 put_aarch64_insn (buf, new_inst->value);
8934 break;
8935 }
8936 else if (new_inst->opcode->iclass == ldst_pos
8937 && try_to_encode_as_unscaled_ldst (new_inst))
8938 {
8939 put_aarch64_insn (buf, new_inst->value);
8940 break;
8941 }
8942
8943 as_bad_where (fixP->fx_file, fixP->fx_line,
8944 _("immediate offset out of range"));
8945 break;
8946
8947 default:
8948 gas_assert (0);
8949 as_fatal (_("unhandled operand code %d"), opnd);
8950 }
8951 }
8952
8953 /* Apply a fixup (fixP) to segment data, once it has been determined
8954 by our caller that we have all the info we need to fix it up.
8955
8956 Parameter valP is the pointer to the value of the bits. */
8957
8958 void
8959 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8960 {
8961 offsetT value = *valP;
8962 uint32_t insn;
8963 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8964 int scale;
8965 unsigned flags = fixP->fx_addnumber;
8966
8967 DEBUG_TRACE ("\n\n");
8968 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8969 DEBUG_TRACE ("Enter md_apply_fix");
8970
8971 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8972
8973 /* Note whether this will delete the relocation. */
8974
8975 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
8976 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
8977 fixP->fx_done = 1;
8978
8979 /* Process the relocations. */
8980 switch (fixP->fx_r_type)
8981 {
8982 case BFD_RELOC_NONE:
8983 /* This will need to go in the object file. */
8984 fixP->fx_done = 0;
8985 break;
8986
8987 case BFD_RELOC_8:
8988 case BFD_RELOC_8_PCREL:
8989 if (fixP->fx_done || !seg->use_rela_p)
8990 md_number_to_chars (buf, value, 1);
8991 break;
8992
8993 case BFD_RELOC_16:
8994 case BFD_RELOC_16_PCREL:
8995 if (fixP->fx_done || !seg->use_rela_p)
8996 md_number_to_chars (buf, value, 2);
8997 break;
8998
8999 case BFD_RELOC_32:
9000 case BFD_RELOC_32_PCREL:
9001 if (fixP->fx_done || !seg->use_rela_p)
9002 md_number_to_chars (buf, value, 4);
9003 break;
9004
9005 case BFD_RELOC_64:
9006 case BFD_RELOC_64_PCREL:
9007 if (fixP->fx_done || !seg->use_rela_p)
9008 md_number_to_chars (buf, value, 8);
9009 break;
9010
9011 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
9012 /* We claim that these fixups have been processed here, even if
9013 in fact we generate an error because we do not have a reloc
9014 for them, so tc_gen_reloc() will reject them. */
9015 fixP->fx_done = 1;
9016 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
9017 {
9018 as_bad_where (fixP->fx_file, fixP->fx_line,
9019 _("undefined symbol %s used as an immediate value"),
9020 S_GET_NAME (fixP->fx_addsy));
9021 goto apply_fix_return;
9022 }
9023 fix_insn (fixP, flags, value);
9024 break;
9025
9026 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
9027 if (fixP->fx_done || !seg->use_rela_p)
9028 {
9029 if (value & 3)
9030 as_bad_where (fixP->fx_file, fixP->fx_line,
9031 _("pc-relative load offset not word aligned"));
9032 if (signed_overflow (value, 21))
9033 as_bad_where (fixP->fx_file, fixP->fx_line,
9034 _("pc-relative load offset out of range"));
9035 insn = get_aarch64_insn (buf);
9036 insn |= encode_ld_lit_ofs_19 (value >> 2);
9037 put_aarch64_insn (buf, insn);
9038 }
9039 break;
9040
9041 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
9042 if (fixP->fx_done || !seg->use_rela_p)
9043 {
9044 if (signed_overflow (value, 21))
9045 as_bad_where (fixP->fx_file, fixP->fx_line,
9046 _("pc-relative address offset out of range"));
9047 insn = get_aarch64_insn (buf);
9048 insn |= encode_adr_imm (value);
9049 put_aarch64_insn (buf, insn);
9050 }
9051 break;
9052
9053 case BFD_RELOC_AARCH64_BRANCH19:
9054 if (fixP->fx_done || !seg->use_rela_p)
9055 {
9056 if (value & 3)
9057 as_bad_where (fixP->fx_file, fixP->fx_line,
9058 _("conditional branch target not word aligned"));
9059 if (signed_overflow (value, 21))
9060 as_bad_where (fixP->fx_file, fixP->fx_line,
9061 _("conditional branch out of range"));
9062 insn = get_aarch64_insn (buf);
9063 insn |= encode_cond_branch_ofs_19 (value >> 2);
9064 put_aarch64_insn (buf, insn);
9065 }
9066 break;
9067
9068 case BFD_RELOC_AARCH64_TSTBR14:
9069 if (fixP->fx_done || !seg->use_rela_p)
9070 {
9071 if (value & 3)
9072 as_bad_where (fixP->fx_file, fixP->fx_line,
9073 _("conditional branch target not word aligned"));
9074 if (signed_overflow (value, 16))
9075 as_bad_where (fixP->fx_file, fixP->fx_line,
9076 _("conditional branch out of range"));
9077 insn = get_aarch64_insn (buf);
9078 insn |= encode_tst_branch_ofs_14 (value >> 2);
9079 put_aarch64_insn (buf, insn);
9080 }
9081 break;
9082
9083 case BFD_RELOC_AARCH64_CALL26:
9084 case BFD_RELOC_AARCH64_JUMP26:
9085 if (fixP->fx_done || !seg->use_rela_p)
9086 {
9087 if (value & 3)
9088 as_bad_where (fixP->fx_file, fixP->fx_line,
9089 _("branch target not word aligned"));
9090 if (signed_overflow (value, 28))
9091 as_bad_where (fixP->fx_file, fixP->fx_line,
9092 _("branch out of range"));
9093 insn = get_aarch64_insn (buf);
9094 insn |= encode_branch_ofs_26 (value >> 2);
9095 put_aarch64_insn (buf, insn);
9096 }
9097 break;
9098
9099 case BFD_RELOC_AARCH64_MOVW_G0:
9100 case BFD_RELOC_AARCH64_MOVW_G0_NC:
9101 case BFD_RELOC_AARCH64_MOVW_G0_S:
9102 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9103 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9104 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9105 scale = 0;
9106 goto movw_common;
9107 case BFD_RELOC_AARCH64_MOVW_G1:
9108 case BFD_RELOC_AARCH64_MOVW_G1_NC:
9109 case BFD_RELOC_AARCH64_MOVW_G1_S:
9110 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9111 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9112 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9113 scale = 16;
9114 goto movw_common;
9115 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9116 scale = 0;
9117 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9118 /* Should always be exported to object file, see
9119 aarch64_force_relocation(). */
9120 gas_assert (!fixP->fx_done);
9121 gas_assert (seg->use_rela_p);
9122 goto movw_common;
9123 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9124 scale = 16;
9125 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9126 /* Should always be exported to object file, see
9127 aarch64_force_relocation(). */
9128 gas_assert (!fixP->fx_done);
9129 gas_assert (seg->use_rela_p);
9130 goto movw_common;
9131 case BFD_RELOC_AARCH64_MOVW_G2:
9132 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9133 case BFD_RELOC_AARCH64_MOVW_G2_S:
9134 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9135 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9136 scale = 32;
9137 goto movw_common;
9138 case BFD_RELOC_AARCH64_MOVW_G3:
9139 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9140 scale = 48;
9141 movw_common:
9142 if (fixP->fx_done || !seg->use_rela_p)
9143 {
9144 insn = get_aarch64_insn (buf);
9145
9146 if (!fixP->fx_done)
9147 {
9148 /* REL signed addend must fit in 16 bits */
9149 if (signed_overflow (value, 16))
9150 as_bad_where (fixP->fx_file, fixP->fx_line,
9151 _("offset out of range"));
9152 }
9153 else
9154 {
9155 /* Check for overflow and scale. */
9156 switch (fixP->fx_r_type)
9157 {
9158 case BFD_RELOC_AARCH64_MOVW_G0:
9159 case BFD_RELOC_AARCH64_MOVW_G1:
9160 case BFD_RELOC_AARCH64_MOVW_G2:
9161 case BFD_RELOC_AARCH64_MOVW_G3:
9162 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9163 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9164 if (unsigned_overflow (value, scale + 16))
9165 as_bad_where (fixP->fx_file, fixP->fx_line,
9166 _("unsigned value out of range"));
9167 break;
9168 case BFD_RELOC_AARCH64_MOVW_G0_S:
9169 case BFD_RELOC_AARCH64_MOVW_G1_S:
9170 case BFD_RELOC_AARCH64_MOVW_G2_S:
9171 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9172 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9173 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9174 /* NOTE: We can only come here with movz or movn. */
9175 if (signed_overflow (value, scale + 16))
9176 as_bad_where (fixP->fx_file, fixP->fx_line,
9177 _("signed value out of range"));
9178 if (value < 0)
9179 {
9180 /* Force use of MOVN. */
9181 value = ~value;
9182 insn = reencode_movzn_to_movn (insn);
9183 }
9184 else
9185 {
9186 /* Force use of MOVZ. */
9187 insn = reencode_movzn_to_movz (insn);
9188 }
9189 break;
9190 default:
9191 /* Unchecked relocations. */
9192 break;
9193 }
9194 value >>= scale;
9195 }
9196
9197 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9198 insn |= encode_movw_imm (value & 0xffff);
9199
9200 put_aarch64_insn (buf, insn);
9201 }
9202 break;
9203
9204 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9205 fixP->fx_r_type = (ilp32_p
9206 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9207 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9208 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9209 /* Should always be exported to object file, see
9210 aarch64_force_relocation(). */
9211 gas_assert (!fixP->fx_done);
9212 gas_assert (seg->use_rela_p);
9213 break;
9214
9215 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9216 fixP->fx_r_type = (ilp32_p
9217 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9218 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9219 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9220 /* Should always be exported to object file, see
9221 aarch64_force_relocation(). */
9222 gas_assert (!fixP->fx_done);
9223 gas_assert (seg->use_rela_p);
9224 break;
9225
9226 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9227 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9228 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9229 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9230 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9231 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9232 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9233 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9234 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9235 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9236 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9237 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9238 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9239 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9240 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9241 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9242 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9243 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9244 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9245 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9246 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9247 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9248 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9249 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9250 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9251 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9252 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9253 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9254 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9255 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9256 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9257 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9258 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9259 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9260 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9261 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9262 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9263 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9264 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9265 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9266 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9267 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9268 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9269 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9270 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9271 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9272 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9273 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9274 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9275 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9276 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9277 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9278 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9279 /* Should always be exported to object file, see
9280 aarch64_force_relocation(). */
9281 gas_assert (!fixP->fx_done);
9282 gas_assert (seg->use_rela_p);
9283 break;
9284
9285 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9286 /* Should always be exported to object file, see
9287 aarch64_force_relocation(). */
9288 fixP->fx_r_type = (ilp32_p
9289 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9290 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9291 gas_assert (!fixP->fx_done);
9292 gas_assert (seg->use_rela_p);
9293 break;
9294
9295 case BFD_RELOC_AARCH64_ADD_LO12:
9296 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9297 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9298 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9299 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9300 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9301 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9302 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9303 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9304 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9305 case BFD_RELOC_AARCH64_LDST128_LO12:
9306 case BFD_RELOC_AARCH64_LDST16_LO12:
9307 case BFD_RELOC_AARCH64_LDST32_LO12:
9308 case BFD_RELOC_AARCH64_LDST64_LO12:
9309 case BFD_RELOC_AARCH64_LDST8_LO12:
9310 /* Should always be exported to object file, see
9311 aarch64_force_relocation(). */
9312 gas_assert (!fixP->fx_done);
9313 gas_assert (seg->use_rela_p);
9314 break;
9315
9316 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9317 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9318 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9319 break;
9320
9321 case BFD_RELOC_UNUSED:
9322 /* An error will already have been reported. */
9323 break;
9324
9325 case BFD_RELOC_RVA:
9326 case BFD_RELOC_32_SECREL:
9327 case BFD_RELOC_16_SECIDX:
9328 break;
9329
9330 default:
9331 as_bad_where (fixP->fx_file, fixP->fx_line,
9332 _("unexpected %s fixup"),
9333 bfd_get_reloc_code_name (fixP->fx_r_type));
9334 break;
9335 }
9336
9337 apply_fix_return:
9338 /* Free the allocated the struct aarch64_inst.
9339 N.B. currently there are very limited number of fix-up types actually use
9340 this field, so the impact on the performance should be minimal . */
9341 free (fixP->tc_fix_data.inst);
9342
9343 return;
9344 }
9345
9346 /* Translate internal representation of relocation info to BFD target
9347 format. */
9348
9349 arelent *
9350 tc_gen_reloc (asection * section, fixS * fixp)
9351 {
9352 arelent *reloc;
9353 bfd_reloc_code_real_type code;
9354
9355 reloc = XNEW (arelent);
9356
9357 reloc->sym_ptr_ptr = XNEW (asymbol *);
9358 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9359 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9360
9361 if (fixp->fx_pcrel)
9362 {
9363 if (section->use_rela_p)
9364 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9365 else
9366 fixp->fx_offset = reloc->address;
9367 }
9368 reloc->addend = fixp->fx_offset;
9369
9370 code = fixp->fx_r_type;
9371 switch (code)
9372 {
9373 case BFD_RELOC_16:
9374 if (fixp->fx_pcrel)
9375 code = BFD_RELOC_16_PCREL;
9376 break;
9377
9378 case BFD_RELOC_32:
9379 if (fixp->fx_pcrel)
9380 code = BFD_RELOC_32_PCREL;
9381 break;
9382
9383 case BFD_RELOC_64:
9384 if (fixp->fx_pcrel)
9385 code = BFD_RELOC_64_PCREL;
9386 break;
9387
9388 default:
9389 break;
9390 }
9391
9392 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9393 if (reloc->howto == NULL)
9394 {
9395 as_bad_where (fixp->fx_file, fixp->fx_line,
9396 _
9397 ("cannot represent %s relocation in this object file format"),
9398 bfd_get_reloc_code_name (code));
9399 return NULL;
9400 }
9401
9402 return reloc;
9403 }
9404
9405 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9406
9407 void
9408 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9409 {
9410 bfd_reloc_code_real_type type;
9411 int pcrel = 0;
9412
9413 #ifdef TE_PE
9414 if (exp->X_op == O_secrel)
9415 {
9416 exp->X_op = O_symbol;
9417 type = BFD_RELOC_32_SECREL;
9418 }
9419 else if (exp->X_op == O_secidx)
9420 {
9421 exp->X_op = O_symbol;
9422 type = BFD_RELOC_16_SECIDX;
9423 }
9424 else
9425 {
9426 #endif
9427 /* Pick a reloc.
9428 FIXME: @@ Should look at CPU word size. */
9429 switch (size)
9430 {
9431 case 1:
9432 type = BFD_RELOC_8;
9433 break;
9434 case 2:
9435 type = BFD_RELOC_16;
9436 break;
9437 case 4:
9438 type = BFD_RELOC_32;
9439 break;
9440 case 8:
9441 type = BFD_RELOC_64;
9442 break;
9443 default:
9444 as_bad (_("cannot do %u-byte relocation"), size);
9445 type = BFD_RELOC_UNUSED;
9446 break;
9447 }
9448 #ifdef TE_PE
9449 }
9450 #endif
9451
9452 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9453 }
9454
9455 /* Implement md_after_parse_args. This is the earliest time we need to decide
9456 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9457
9458 void
9459 aarch64_after_parse_args (void)
9460 {
9461 if (aarch64_abi != AARCH64_ABI_NONE)
9462 return;
9463
9464 #ifdef OBJ_ELF
9465 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9466 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9467 aarch64_abi = AARCH64_ABI_ILP32;
9468 else
9469 aarch64_abi = AARCH64_ABI_LP64;
9470 #else
9471 aarch64_abi = AARCH64_ABI_LLP64;
9472 #endif
9473 }
9474
9475 #ifdef OBJ_ELF
9476 const char *
9477 elf64_aarch64_target_format (void)
9478 {
9479 #ifdef TE_CLOUDABI
9480 /* FIXME: What to do for ilp32_p ? */
9481 if (target_big_endian)
9482 return "elf64-bigaarch64-cloudabi";
9483 else
9484 return "elf64-littleaarch64-cloudabi";
9485 #else
9486 if (target_big_endian)
9487 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9488 else
9489 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9490 #endif
9491 }
9492
9493 void
9494 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9495 {
9496 elf_frob_symbol (symp, puntp);
9497 }
9498 #elif defined OBJ_COFF
9499 const char *
9500 coff_aarch64_target_format (void)
9501 {
9502 return "pe-aarch64-little";
9503 }
9504 #endif
9505
9506 /* MD interface: Finalization. */
9507
9508 /* A good place to do this, although this was probably not intended
9509 for this kind of use. We need to dump the literal pool before
9510 references are made to a null symbol pointer. */
9511
9512 void
9513 aarch64_cleanup (void)
9514 {
9515 literal_pool *pool;
9516
9517 for (pool = list_of_pools; pool; pool = pool->next)
9518 {
9519 /* Put it at the end of the relevant section. */
9520 subseg_set (pool->section, pool->sub_section);
9521 s_ltorg (0);
9522 }
9523 }
9524
9525 #ifdef OBJ_ELF
9526 /* Remove any excess mapping symbols generated for alignment frags in
9527 SEC. We may have created a mapping symbol before a zero byte
9528 alignment; remove it if there's a mapping symbol after the
9529 alignment. */
9530 static void
9531 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9532 void *dummy ATTRIBUTE_UNUSED)
9533 {
9534 segment_info_type *seginfo = seg_info (sec);
9535 fragS *fragp;
9536
9537 if (seginfo == NULL || seginfo->frchainP == NULL)
9538 return;
9539
9540 for (fragp = seginfo->frchainP->frch_root;
9541 fragp != NULL; fragp = fragp->fr_next)
9542 {
9543 symbolS *sym = fragp->tc_frag_data.last_map;
9544 fragS *next = fragp->fr_next;
9545
9546 /* Variable-sized frags have been converted to fixed size by
9547 this point. But if this was variable-sized to start with,
9548 there will be a fixed-size frag after it. So don't handle
9549 next == NULL. */
9550 if (sym == NULL || next == NULL)
9551 continue;
9552
9553 if (S_GET_VALUE (sym) < next->fr_address)
9554 /* Not at the end of this frag. */
9555 continue;
9556 know (S_GET_VALUE (sym) == next->fr_address);
9557
9558 do
9559 {
9560 if (next->tc_frag_data.first_map != NULL)
9561 {
9562 /* Next frag starts with a mapping symbol. Discard this
9563 one. */
9564 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9565 break;
9566 }
9567
9568 if (next->fr_next == NULL)
9569 {
9570 /* This mapping symbol is at the end of the section. Discard
9571 it. */
9572 know (next->fr_fix == 0 && next->fr_var == 0);
9573 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9574 break;
9575 }
9576
9577 /* As long as we have empty frags without any mapping symbols,
9578 keep looking. */
9579 /* If the next frag is non-empty and does not start with a
9580 mapping symbol, then this mapping symbol is required. */
9581 if (next->fr_address != next->fr_next->fr_address)
9582 break;
9583
9584 next = next->fr_next;
9585 }
9586 while (next != NULL);
9587 }
9588 }
9589 #endif
9590
9591 /* Adjust the symbol table. */
9592
9593 void
9594 aarch64_adjust_symtab (void)
9595 {
9596 #ifdef OBJ_ELF
9597 /* Remove any overlapping mapping symbols generated by alignment frags. */
9598 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9599 /* Now do generic ELF adjustments. */
9600 elf_adjust_symtab ();
9601 #endif
9602 }
9603
9604 static void
9605 checked_hash_insert (htab_t table, const char *key, void *value)
9606 {
9607 str_hash_insert (table, key, value, 0);
9608 }
9609
9610 static void
9611 sysreg_hash_insert (htab_t table, const char *key, void *value)
9612 {
9613 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9614 checked_hash_insert (table, key, value);
9615 }
9616
9617 static void
9618 fill_instruction_hash_table (void)
9619 {
9620 const aarch64_opcode *opcode = aarch64_opcode_table;
9621
9622 while (opcode->name != NULL)
9623 {
9624 templates *templ, *new_templ;
9625 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9626
9627 new_templ = XNEW (templates);
9628 new_templ->opcode = opcode;
9629 new_templ->next = NULL;
9630
9631 if (!templ)
9632 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9633 else
9634 {
9635 new_templ->next = templ->next;
9636 templ->next = new_templ;
9637 }
9638 ++opcode;
9639 }
9640 }
9641
9642 static inline void
9643 convert_to_upper (char *dst, const char *src, size_t num)
9644 {
9645 unsigned int i;
9646 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9647 *dst = TOUPPER (*src);
9648 *dst = '\0';
9649 }
9650
9651 /* Assume STR point to a lower-case string, allocate, convert and return
9652 the corresponding upper-case string. */
9653 static inline const char*
9654 get_upper_str (const char *str)
9655 {
9656 char *ret;
9657 size_t len = strlen (str);
9658 ret = XNEWVEC (char, len + 1);
9659 convert_to_upper (ret, str, len);
9660 return ret;
9661 }
9662
9663 /* MD interface: Initialization. */
9664
9665 void
9666 md_begin (void)
9667 {
9668 unsigned mach;
9669 unsigned int i;
9670
9671 aarch64_ops_hsh = str_htab_create ();
9672 aarch64_cond_hsh = str_htab_create ();
9673 aarch64_shift_hsh = str_htab_create ();
9674 aarch64_sys_regs_hsh = str_htab_create ();
9675 aarch64_pstatefield_hsh = str_htab_create ();
9676 aarch64_sys_regs_ic_hsh = str_htab_create ();
9677 aarch64_sys_regs_dc_hsh = str_htab_create ();
9678 aarch64_sys_regs_at_hsh = str_htab_create ();
9679 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9680 aarch64_sys_regs_sr_hsh = str_htab_create ();
9681 aarch64_reg_hsh = str_htab_create ();
9682 aarch64_barrier_opt_hsh = str_htab_create ();
9683 aarch64_nzcv_hsh = str_htab_create ();
9684 aarch64_pldop_hsh = str_htab_create ();
9685 aarch64_hint_opt_hsh = str_htab_create ();
9686
9687 fill_instruction_hash_table ();
9688
9689 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9690 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9691 (void *) (aarch64_sys_regs + i));
9692
9693 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9694 sysreg_hash_insert (aarch64_pstatefield_hsh,
9695 aarch64_pstatefields[i].name,
9696 (void *) (aarch64_pstatefields + i));
9697
9698 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9699 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9700 aarch64_sys_regs_ic[i].name,
9701 (void *) (aarch64_sys_regs_ic + i));
9702
9703 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9704 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9705 aarch64_sys_regs_dc[i].name,
9706 (void *) (aarch64_sys_regs_dc + i));
9707
9708 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9709 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9710 aarch64_sys_regs_at[i].name,
9711 (void *) (aarch64_sys_regs_at + i));
9712
9713 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9714 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9715 aarch64_sys_regs_tlbi[i].name,
9716 (void *) (aarch64_sys_regs_tlbi + i));
9717
9718 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9719 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9720 aarch64_sys_regs_sr[i].name,
9721 (void *) (aarch64_sys_regs_sr + i));
9722
9723 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9724 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9725 (void *) (reg_names + i));
9726
9727 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9728 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9729 (void *) (nzcv_names + i));
9730
9731 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9732 {
9733 const char *name = aarch64_operand_modifiers[i].name;
9734 checked_hash_insert (aarch64_shift_hsh, name,
9735 (void *) (aarch64_operand_modifiers + i));
9736 /* Also hash the name in the upper case. */
9737 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9738 (void *) (aarch64_operand_modifiers + i));
9739 }
9740
9741 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9742 {
9743 unsigned int j;
9744 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9745 the same condition code. */
9746 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9747 {
9748 const char *name = aarch64_conds[i].names[j];
9749 if (name == NULL)
9750 break;
9751 checked_hash_insert (aarch64_cond_hsh, name,
9752 (void *) (aarch64_conds + i));
9753 /* Also hash the name in the upper case. */
9754 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9755 (void *) (aarch64_conds + i));
9756 }
9757 }
9758
9759 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9760 {
9761 const char *name = aarch64_barrier_options[i].name;
9762 /* Skip xx00 - the unallocated values of option. */
9763 if ((i & 0x3) == 0)
9764 continue;
9765 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9766 (void *) (aarch64_barrier_options + i));
9767 /* Also hash the name in the upper case. */
9768 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9769 (void *) (aarch64_barrier_options + i));
9770 }
9771
9772 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9773 {
9774 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9775 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9776 (void *) (aarch64_barrier_dsb_nxs_options + i));
9777 /* Also hash the name in the upper case. */
9778 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9779 (void *) (aarch64_barrier_dsb_nxs_options + i));
9780 }
9781
9782 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9783 {
9784 const char* name = aarch64_prfops[i].name;
9785 /* Skip the unallocated hint encodings. */
9786 if (name == NULL)
9787 continue;
9788 checked_hash_insert (aarch64_pldop_hsh, name,
9789 (void *) (aarch64_prfops + i));
9790 /* Also hash the name in the upper case. */
9791 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9792 (void *) (aarch64_prfops + i));
9793 }
9794
9795 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9796 {
9797 const char* name = aarch64_hint_options[i].name;
9798 const char* upper_name = get_upper_str(name);
9799
9800 checked_hash_insert (aarch64_hint_opt_hsh, name,
9801 (void *) (aarch64_hint_options + i));
9802
9803 /* Also hash the name in the upper case if not the same. */
9804 if (strcmp (name, upper_name) != 0)
9805 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9806 (void *) (aarch64_hint_options + i));
9807 }
9808
9809 /* Set the cpu variant based on the command-line options. */
9810 if (!mcpu_cpu_opt)
9811 mcpu_cpu_opt = march_cpu_opt;
9812
9813 if (!mcpu_cpu_opt)
9814 mcpu_cpu_opt = &cpu_default;
9815
9816 cpu_variant = *mcpu_cpu_opt;
9817
9818 /* Record the CPU type. */
9819 if(ilp32_p)
9820 mach = bfd_mach_aarch64_ilp32;
9821 else if (llp64_p)
9822 mach = bfd_mach_aarch64_llp64;
9823 else
9824 mach = bfd_mach_aarch64;
9825
9826 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9827 #ifdef OBJ_ELF
9828 /* FIXME - is there a better way to do it ? */
9829 aarch64_sframe_cfa_sp_reg = 31;
9830 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
9831 aarch64_sframe_cfa_ra_reg = 30;
9832 #endif
9833 }
9834
9835 /* Command line processing. */
9836
9837 const char *md_shortopts = "m:";
9838
9839 #ifdef AARCH64_BI_ENDIAN
9840 #define OPTION_EB (OPTION_MD_BASE + 0)
9841 #define OPTION_EL (OPTION_MD_BASE + 1)
9842 #else
9843 #if TARGET_BYTES_BIG_ENDIAN
9844 #define OPTION_EB (OPTION_MD_BASE + 0)
9845 #else
9846 #define OPTION_EL (OPTION_MD_BASE + 1)
9847 #endif
9848 #endif
9849
9850 struct option md_longopts[] = {
9851 #ifdef OPTION_EB
9852 {"EB", no_argument, NULL, OPTION_EB},
9853 #endif
9854 #ifdef OPTION_EL
9855 {"EL", no_argument, NULL, OPTION_EL},
9856 #endif
9857 {NULL, no_argument, NULL, 0}
9858 };
9859
9860 size_t md_longopts_size = sizeof (md_longopts);
9861
9862 struct aarch64_option_table
9863 {
9864 const char *option; /* Option name to match. */
9865 const char *help; /* Help information. */
9866 int *var; /* Variable to change. */
9867 int value; /* What to change it to. */
9868 char *deprecated; /* If non-null, print this message. */
9869 };
9870
9871 static struct aarch64_option_table aarch64_opts[] = {
9872 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9873 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9874 NULL},
9875 #ifdef DEBUG_AARCH64
9876 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9877 #endif /* DEBUG_AARCH64 */
9878 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9879 NULL},
9880 {"mno-verbose-error", N_("do not output verbose error messages"),
9881 &verbose_error_p, 0, NULL},
9882 {NULL, NULL, NULL, 0, NULL}
9883 };
9884
9885 struct aarch64_cpu_option_table
9886 {
9887 const char *name;
9888 const aarch64_feature_set value;
9889 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9890 case. */
9891 const char *canonical_name;
9892 };
9893
9894 /* This list should, at a minimum, contain all the cpu names
9895 recognized by GCC. */
9896 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9897 {"all", AARCH64_ANY, NULL},
9898 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9899 AARCH64_FEATURE_CRC), "Cortex-A34"},
9900 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9901 AARCH64_FEATURE_CRC), "Cortex-A35"},
9902 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9903 AARCH64_FEATURE_CRC), "Cortex-A53"},
9904 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9905 AARCH64_FEATURE_CRC), "Cortex-A57"},
9906 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9907 AARCH64_FEATURE_CRC), "Cortex-A72"},
9908 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9909 AARCH64_FEATURE_CRC), "Cortex-A73"},
9910 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9911 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9912 "Cortex-A55"},
9913 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9914 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9915 "Cortex-A75"},
9916 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9917 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9918 "Cortex-A76"},
9919 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9920 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9921 | AARCH64_FEATURE_DOTPROD
9922 | AARCH64_FEATURE_SSBS),
9923 "Cortex-A76AE"},
9924 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9925 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9926 | AARCH64_FEATURE_DOTPROD
9927 | AARCH64_FEATURE_SSBS),
9928 "Cortex-A77"},
9929 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9930 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9931 | AARCH64_FEATURE_DOTPROD
9932 | AARCH64_FEATURE_SSBS),
9933 "Cortex-A65"},
9934 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9935 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9936 | AARCH64_FEATURE_DOTPROD
9937 | AARCH64_FEATURE_SSBS),
9938 "Cortex-A65AE"},
9939 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9940 AARCH64_FEATURE_F16
9941 | AARCH64_FEATURE_RCPC
9942 | AARCH64_FEATURE_DOTPROD
9943 | AARCH64_FEATURE_SSBS
9944 | AARCH64_FEATURE_PROFILE),
9945 "Cortex-A78"},
9946 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9947 AARCH64_FEATURE_F16
9948 | AARCH64_FEATURE_RCPC
9949 | AARCH64_FEATURE_DOTPROD
9950 | AARCH64_FEATURE_SSBS
9951 | AARCH64_FEATURE_PROFILE),
9952 "Cortex-A78AE"},
9953 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9954 AARCH64_FEATURE_DOTPROD
9955 | AARCH64_FEATURE_F16
9956 | AARCH64_FEATURE_FLAGM
9957 | AARCH64_FEATURE_PAC
9958 | AARCH64_FEATURE_PROFILE
9959 | AARCH64_FEATURE_RCPC
9960 | AARCH64_FEATURE_SSBS),
9961 "Cortex-A78C"},
9962 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9963 AARCH64_FEATURE_BFLOAT16
9964 | AARCH64_FEATURE_I8MM
9965 | AARCH64_FEATURE_MEMTAG
9966 | AARCH64_FEATURE_SVE2_BITPERM),
9967 "Cortex-A510"},
9968 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9969 AARCH64_FEATURE_BFLOAT16
9970 | AARCH64_FEATURE_I8MM
9971 | AARCH64_FEATURE_MEMTAG
9972 | AARCH64_FEATURE_SVE2_BITPERM),
9973 "Cortex-A710"},
9974 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9975 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9976 | AARCH64_FEATURE_DOTPROD
9977 | AARCH64_FEATURE_PROFILE),
9978 "Ares"},
9979 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9980 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9981 "Samsung Exynos M1"},
9982 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9983 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9984 | AARCH64_FEATURE_RDMA),
9985 "Qualcomm Falkor"},
9986 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9987 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9988 | AARCH64_FEATURE_DOTPROD
9989 | AARCH64_FEATURE_SSBS),
9990 "Neoverse E1"},
9991 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9992 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9993 | AARCH64_FEATURE_DOTPROD
9994 | AARCH64_FEATURE_PROFILE),
9995 "Neoverse N1"},
9996 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9997 AARCH64_FEATURE_BFLOAT16
9998 | AARCH64_FEATURE_I8MM
9999 | AARCH64_FEATURE_F16
10000 | AARCH64_FEATURE_SVE
10001 | AARCH64_FEATURE_SVE2
10002 | AARCH64_FEATURE_SVE2_BITPERM
10003 | AARCH64_FEATURE_MEMTAG
10004 | AARCH64_FEATURE_RNG),
10005 "Neoverse N2"},
10006 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
10007 AARCH64_FEATURE_PROFILE
10008 | AARCH64_FEATURE_CVADP
10009 | AARCH64_FEATURE_SVE
10010 | AARCH64_FEATURE_SSBS
10011 | AARCH64_FEATURE_RNG
10012 | AARCH64_FEATURE_F16
10013 | AARCH64_FEATURE_BFLOAT16
10014 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
10015 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
10016 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
10017 | AARCH64_FEATURE_RDMA),
10018 "Qualcomm QDF24XX"},
10019 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
10020 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
10021 "Qualcomm Saphira"},
10022 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
10023 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
10024 "Cavium ThunderX"},
10025 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
10026 AARCH64_FEATURE_CRYPTO),
10027 "Broadcom Vulcan"},
10028 /* The 'xgene-1' name is an older name for 'xgene1', which was used
10029 in earlier releases and is superseded by 'xgene1' in all
10030 tools. */
10031 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
10032 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
10033 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
10034 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
10035 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
10036 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
10037 AARCH64_FEATURE_F16
10038 | AARCH64_FEATURE_RCPC
10039 | AARCH64_FEATURE_DOTPROD
10040 | AARCH64_FEATURE_SSBS
10041 | AARCH64_FEATURE_PROFILE),
10042 "Cortex-X1"},
10043 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
10044 AARCH64_FEATURE_BFLOAT16
10045 | AARCH64_FEATURE_I8MM
10046 | AARCH64_FEATURE_MEMTAG
10047 | AARCH64_FEATURE_SVE2_BITPERM),
10048 "Cortex-X2"},
10049 {"generic", AARCH64_ARCH_V8, NULL},
10050
10051 {NULL, AARCH64_ARCH_NONE, NULL}
10052 };
10053
10054 struct aarch64_arch_option_table
10055 {
10056 const char *name;
10057 const aarch64_feature_set value;
10058 };
10059
10060 /* This list should, at a minimum, contain all the architecture names
10061 recognized by GCC. */
10062 static const struct aarch64_arch_option_table aarch64_archs[] = {
10063 {"all", AARCH64_ANY},
10064 {"armv8-a", AARCH64_ARCH_V8},
10065 {"armv8.1-a", AARCH64_ARCH_V8_1},
10066 {"armv8.2-a", AARCH64_ARCH_V8_2},
10067 {"armv8.3-a", AARCH64_ARCH_V8_3},
10068 {"armv8.4-a", AARCH64_ARCH_V8_4},
10069 {"armv8.5-a", AARCH64_ARCH_V8_5},
10070 {"armv8.6-a", AARCH64_ARCH_V8_6},
10071 {"armv8.7-a", AARCH64_ARCH_V8_7},
10072 {"armv8.8-a", AARCH64_ARCH_V8_8},
10073 {"armv8-r", AARCH64_ARCH_V8_R},
10074 {"armv9-a", AARCH64_ARCH_V9},
10075 {"armv9.1-a", AARCH64_ARCH_V9_1},
10076 {"armv9.2-a", AARCH64_ARCH_V9_2},
10077 {"armv9.3-a", AARCH64_ARCH_V9_3},
10078 {NULL, AARCH64_ARCH_NONE}
10079 };
10080
10081 /* ISA extensions. */
10082 struct aarch64_option_cpu_value_table
10083 {
10084 const char *name;
10085 const aarch64_feature_set value;
10086 const aarch64_feature_set require; /* Feature dependencies. */
10087 };
10088
10089 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
10090 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
10091 AARCH64_ARCH_NONE},
10092 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
10093 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10094 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
10095 AARCH64_ARCH_NONE},
10096 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
10097 AARCH64_ARCH_NONE},
10098 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
10099 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10100 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
10101 AARCH64_ARCH_NONE},
10102 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
10103 AARCH64_ARCH_NONE},
10104 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
10105 AARCH64_ARCH_NONE},
10106 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
10107 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10108 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
10109 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10110 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
10111 AARCH64_FEATURE (AARCH64_FEATURE_FP
10112 | AARCH64_FEATURE_F16, 0)},
10113 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
10114 AARCH64_ARCH_NONE},
10115 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
10116 AARCH64_FEATURE (AARCH64_FEATURE_F16
10117 | AARCH64_FEATURE_SIMD
10118 | AARCH64_FEATURE_COMPNUM, 0)},
10119 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
10120 AARCH64_ARCH_NONE},
10121 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
10122 AARCH64_FEATURE (AARCH64_FEATURE_F16
10123 | AARCH64_FEATURE_SIMD, 0)},
10124 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
10125 AARCH64_ARCH_NONE},
10126 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
10127 AARCH64_ARCH_NONE},
10128 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
10129 AARCH64_ARCH_NONE},
10130 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
10131 AARCH64_ARCH_NONE},
10132 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
10133 AARCH64_ARCH_NONE},
10134 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
10135 AARCH64_ARCH_NONE},
10136 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
10137 AARCH64_ARCH_NONE},
10138 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
10139 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
10140 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
10141 AARCH64_ARCH_NONE},
10142 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
10143 AARCH64_ARCH_NONE},
10144 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
10145 AARCH64_ARCH_NONE},
10146 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
10147 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10148 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
10149 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10150 | AARCH64_FEATURE_SM4, 0)},
10151 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
10152 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10153 | AARCH64_FEATURE_AES, 0)},
10154 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
10155 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10156 | AARCH64_FEATURE_SHA3, 0)},
10157 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
10158 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
10159 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
10160 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10161 | AARCH64_FEATURE_BFLOAT16, 0)},
10162 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64, 0),
10163 AARCH64_FEATURE (AARCH64_FEATURE_SME
10164 | AARCH64_FEATURE_SVE2
10165 | AARCH64_FEATURE_BFLOAT16, 0)},
10166 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I64, 0),
10167 AARCH64_FEATURE (AARCH64_FEATURE_SME
10168 | AARCH64_FEATURE_SVE2
10169 | AARCH64_FEATURE_BFLOAT16, 0)},
10170 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
10171 AARCH64_ARCH_NONE},
10172 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
10173 AARCH64_ARCH_NONE},
10174 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
10175 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10176 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
10177 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10178 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
10179 AARCH64_ARCH_NONE},
10180 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
10181 AARCH64_ARCH_NONE},
10182 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
10183 AARCH64_ARCH_NONE},
10184 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
10185 AARCH64_ARCH_NONE},
10186 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
10187 AARCH64_ARCH_NONE},
10188 {"cssc", AARCH64_FEATURE (AARCH64_FEATURE_CSSC, 0),
10189 AARCH64_ARCH_NONE},
10190 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
10191 };
10192
10193 struct aarch64_long_option_table
10194 {
10195 const char *option; /* Substring to match. */
10196 const char *help; /* Help information. */
10197 int (*func) (const char *subopt); /* Function to decode sub-option. */
10198 char *deprecated; /* If non-null, print this message. */
10199 };
10200
10201 /* Transitive closure of features depending on set. */
10202 static aarch64_feature_set
10203 aarch64_feature_disable_set (aarch64_feature_set set)
10204 {
10205 const struct aarch64_option_cpu_value_table *opt;
10206 aarch64_feature_set prev = 0;
10207
10208 while (prev != set) {
10209 prev = set;
10210 for (opt = aarch64_features; opt->name != NULL; opt++)
10211 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10212 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10213 }
10214 return set;
10215 }
10216
10217 /* Transitive closure of dependencies of set. */
10218 static aarch64_feature_set
10219 aarch64_feature_enable_set (aarch64_feature_set set)
10220 {
10221 const struct aarch64_option_cpu_value_table *opt;
10222 aarch64_feature_set prev = 0;
10223
10224 while (prev != set) {
10225 prev = set;
10226 for (opt = aarch64_features; opt->name != NULL; opt++)
10227 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10228 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10229 }
10230 return set;
10231 }
10232
10233 static int
10234 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10235 bool ext_only)
10236 {
10237 /* We insist on extensions being added before being removed. We achieve
10238 this by using the ADDING_VALUE variable to indicate whether we are
10239 adding an extension (1) or removing it (0) and only allowing it to
10240 change in the order -1 -> 1 -> 0. */
10241 int adding_value = -1;
10242 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10243
10244 /* Copy the feature set, so that we can modify it. */
10245 *ext_set = **opt_p;
10246 *opt_p = ext_set;
10247
10248 while (str != NULL && *str != 0)
10249 {
10250 const struct aarch64_option_cpu_value_table *opt;
10251 const char *ext = NULL;
10252 int optlen;
10253
10254 if (!ext_only)
10255 {
10256 if (*str != '+')
10257 {
10258 as_bad (_("invalid architectural extension"));
10259 return 0;
10260 }
10261
10262 ext = strchr (++str, '+');
10263 }
10264
10265 if (ext != NULL)
10266 optlen = ext - str;
10267 else
10268 optlen = strlen (str);
10269
10270 if (optlen >= 2 && startswith (str, "no"))
10271 {
10272 if (adding_value != 0)
10273 adding_value = 0;
10274 optlen -= 2;
10275 str += 2;
10276 }
10277 else if (optlen > 0)
10278 {
10279 if (adding_value == -1)
10280 adding_value = 1;
10281 else if (adding_value != 1)
10282 {
10283 as_bad (_("must specify extensions to add before specifying "
10284 "those to remove"));
10285 return false;
10286 }
10287 }
10288
10289 if (optlen == 0)
10290 {
10291 as_bad (_("missing architectural extension"));
10292 return 0;
10293 }
10294
10295 gas_assert (adding_value != -1);
10296
10297 for (opt = aarch64_features; opt->name != NULL; opt++)
10298 if (strncmp (opt->name, str, optlen) == 0)
10299 {
10300 aarch64_feature_set set;
10301
10302 /* Add or remove the extension. */
10303 if (adding_value)
10304 {
10305 set = aarch64_feature_enable_set (opt->value);
10306 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10307 }
10308 else
10309 {
10310 set = aarch64_feature_disable_set (opt->value);
10311 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10312 }
10313 break;
10314 }
10315
10316 if (opt->name == NULL)
10317 {
10318 as_bad (_("unknown architectural extension `%s'"), str);
10319 return 0;
10320 }
10321
10322 str = ext;
10323 };
10324
10325 return 1;
10326 }
10327
10328 static int
10329 aarch64_parse_cpu (const char *str)
10330 {
10331 const struct aarch64_cpu_option_table *opt;
10332 const char *ext = strchr (str, '+');
10333 size_t optlen;
10334
10335 if (ext != NULL)
10336 optlen = ext - str;
10337 else
10338 optlen = strlen (str);
10339
10340 if (optlen == 0)
10341 {
10342 as_bad (_("missing cpu name `%s'"), str);
10343 return 0;
10344 }
10345
10346 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10347 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10348 {
10349 mcpu_cpu_opt = &opt->value;
10350 if (ext != NULL)
10351 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10352
10353 return 1;
10354 }
10355
10356 as_bad (_("unknown cpu `%s'"), str);
10357 return 0;
10358 }
10359
10360 static int
10361 aarch64_parse_arch (const char *str)
10362 {
10363 const struct aarch64_arch_option_table *opt;
10364 const char *ext = strchr (str, '+');
10365 size_t optlen;
10366
10367 if (ext != NULL)
10368 optlen = ext - str;
10369 else
10370 optlen = strlen (str);
10371
10372 if (optlen == 0)
10373 {
10374 as_bad (_("missing architecture name `%s'"), str);
10375 return 0;
10376 }
10377
10378 for (opt = aarch64_archs; opt->name != NULL; opt++)
10379 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10380 {
10381 march_cpu_opt = &opt->value;
10382 if (ext != NULL)
10383 return aarch64_parse_features (ext, &march_cpu_opt, false);
10384
10385 return 1;
10386 }
10387
10388 as_bad (_("unknown architecture `%s'\n"), str);
10389 return 0;
10390 }
10391
10392 /* ABIs. */
10393 struct aarch64_option_abi_value_table
10394 {
10395 const char *name;
10396 enum aarch64_abi_type value;
10397 };
10398
10399 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10400 #ifdef OBJ_ELF
10401 {"ilp32", AARCH64_ABI_ILP32},
10402 {"lp64", AARCH64_ABI_LP64},
10403 #else
10404 {"llp64", AARCH64_ABI_LLP64},
10405 #endif
10406 };
10407
10408 static int
10409 aarch64_parse_abi (const char *str)
10410 {
10411 unsigned int i;
10412
10413 if (str[0] == '\0')
10414 {
10415 as_bad (_("missing abi name `%s'"), str);
10416 return 0;
10417 }
10418
10419 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10420 if (strcmp (str, aarch64_abis[i].name) == 0)
10421 {
10422 aarch64_abi = aarch64_abis[i].value;
10423 return 1;
10424 }
10425
10426 as_bad (_("unknown abi `%s'\n"), str);
10427 return 0;
10428 }
10429
10430 static struct aarch64_long_option_table aarch64_long_opts[] = {
10431 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10432 aarch64_parse_abi, NULL},
10433 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10434 aarch64_parse_cpu, NULL},
10435 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10436 aarch64_parse_arch, NULL},
10437 {NULL, NULL, 0, NULL}
10438 };
10439
10440 int
10441 md_parse_option (int c, const char *arg)
10442 {
10443 struct aarch64_option_table *opt;
10444 struct aarch64_long_option_table *lopt;
10445
10446 switch (c)
10447 {
10448 #ifdef OPTION_EB
10449 case OPTION_EB:
10450 target_big_endian = 1;
10451 break;
10452 #endif
10453
10454 #ifdef OPTION_EL
10455 case OPTION_EL:
10456 target_big_endian = 0;
10457 break;
10458 #endif
10459
10460 case 'a':
10461 /* Listing option. Just ignore these, we don't support additional
10462 ones. */
10463 return 0;
10464
10465 default:
10466 for (opt = aarch64_opts; opt->option != NULL; opt++)
10467 {
10468 if (c == opt->option[0]
10469 && ((arg == NULL && opt->option[1] == 0)
10470 || streq (arg, opt->option + 1)))
10471 {
10472 /* If the option is deprecated, tell the user. */
10473 if (opt->deprecated != NULL)
10474 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10475 arg ? arg : "", _(opt->deprecated));
10476
10477 if (opt->var != NULL)
10478 *opt->var = opt->value;
10479
10480 return 1;
10481 }
10482 }
10483
10484 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10485 {
10486 /* These options are expected to have an argument. */
10487 if (c == lopt->option[0]
10488 && arg != NULL
10489 && startswith (arg, lopt->option + 1))
10490 {
10491 /* If the option is deprecated, tell the user. */
10492 if (lopt->deprecated != NULL)
10493 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10494 _(lopt->deprecated));
10495
10496 /* Call the sup-option parser. */
10497 return lopt->func (arg + strlen (lopt->option) - 1);
10498 }
10499 }
10500
10501 return 0;
10502 }
10503
10504 return 1;
10505 }
10506
10507 void
10508 md_show_usage (FILE * fp)
10509 {
10510 struct aarch64_option_table *opt;
10511 struct aarch64_long_option_table *lopt;
10512
10513 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10514
10515 for (opt = aarch64_opts; opt->option != NULL; opt++)
10516 if (opt->help != NULL)
10517 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10518
10519 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10520 if (lopt->help != NULL)
10521 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10522
10523 #ifdef OPTION_EB
10524 fprintf (fp, _("\
10525 -EB assemble code for a big-endian cpu\n"));
10526 #endif
10527
10528 #ifdef OPTION_EL
10529 fprintf (fp, _("\
10530 -EL assemble code for a little-endian cpu\n"));
10531 #endif
10532 }
10533
10534 /* Parse a .cpu directive. */
10535
10536 static void
10537 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10538 {
10539 const struct aarch64_cpu_option_table *opt;
10540 char saved_char;
10541 char *name;
10542 char *ext;
10543 size_t optlen;
10544
10545 name = input_line_pointer;
10546 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10547 saved_char = *input_line_pointer;
10548 *input_line_pointer = 0;
10549
10550 ext = strchr (name, '+');
10551
10552 if (ext != NULL)
10553 optlen = ext - name;
10554 else
10555 optlen = strlen (name);
10556
10557 /* Skip the first "all" entry. */
10558 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10559 if (strlen (opt->name) == optlen
10560 && strncmp (name, opt->name, optlen) == 0)
10561 {
10562 mcpu_cpu_opt = &opt->value;
10563 if (ext != NULL)
10564 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10565 return;
10566
10567 cpu_variant = *mcpu_cpu_opt;
10568
10569 *input_line_pointer = saved_char;
10570 demand_empty_rest_of_line ();
10571 return;
10572 }
10573 as_bad (_("unknown cpu `%s'"), name);
10574 *input_line_pointer = saved_char;
10575 ignore_rest_of_line ();
10576 }
10577
10578
10579 /* Parse a .arch directive. */
10580
10581 static void
10582 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10583 {
10584 const struct aarch64_arch_option_table *opt;
10585 char saved_char;
10586 char *name;
10587 char *ext;
10588 size_t optlen;
10589
10590 name = input_line_pointer;
10591 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10592 saved_char = *input_line_pointer;
10593 *input_line_pointer = 0;
10594
10595 ext = strchr (name, '+');
10596
10597 if (ext != NULL)
10598 optlen = ext - name;
10599 else
10600 optlen = strlen (name);
10601
10602 /* Skip the first "all" entry. */
10603 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10604 if (strlen (opt->name) == optlen
10605 && strncmp (name, opt->name, optlen) == 0)
10606 {
10607 mcpu_cpu_opt = &opt->value;
10608 if (ext != NULL)
10609 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10610 return;
10611
10612 cpu_variant = *mcpu_cpu_opt;
10613
10614 *input_line_pointer = saved_char;
10615 demand_empty_rest_of_line ();
10616 return;
10617 }
10618
10619 as_bad (_("unknown architecture `%s'\n"), name);
10620 *input_line_pointer = saved_char;
10621 ignore_rest_of_line ();
10622 }
10623
10624 /* Parse a .arch_extension directive. */
10625
10626 static void
10627 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10628 {
10629 char saved_char;
10630 char *ext = input_line_pointer;
10631
10632 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10633 saved_char = *input_line_pointer;
10634 *input_line_pointer = 0;
10635
10636 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10637 return;
10638
10639 cpu_variant = *mcpu_cpu_opt;
10640
10641 *input_line_pointer = saved_char;
10642 demand_empty_rest_of_line ();
10643 }
10644
10645 /* Copy symbol information. */
10646
10647 void
10648 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10649 {
10650 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10651 }
10652
10653 #ifdef OBJ_ELF
10654 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10655 This is needed so AArch64 specific st_other values can be independently
10656 specified for an IFUNC resolver (that is called by the dynamic linker)
10657 and the symbol it resolves (aliased to the resolver). In particular,
10658 if a function symbol has special st_other value set via directives,
10659 then attaching an IFUNC resolver to that symbol should not override
10660 the st_other setting. Requiring the directive on the IFUNC resolver
10661 symbol would be unexpected and problematic in C code, where the two
10662 symbols appear as two independent function declarations. */
10663
10664 void
10665 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10666 {
10667 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10668 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10669 /* If size is unset, copy size from src. Because we don't track whether
10670 .size has been used, we can't differentiate .size dest, 0 from the case
10671 where dest's size is unset. */
10672 if (!destelf->size && S_GET_SIZE (dest) == 0)
10673 {
10674 if (srcelf->size)
10675 {
10676 destelf->size = XNEW (expressionS);
10677 *destelf->size = *srcelf->size;
10678 }
10679 S_SET_SIZE (dest, S_GET_SIZE (src));
10680 }
10681 }
10682 #endif