]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
aarch64: Treat ZA as a register
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2023 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #include "sframe.h"
35 #include "gen-sframe.h"
36 #endif
37
38 #include "dw2gencfi.h"
39 #include "dwarf2dbg.h"
40
41 /* Types of processor to assemble for. */
42 #ifndef CPU_DEFAULT
43 #define CPU_DEFAULT AARCH64_ARCH_V8
44 #endif
45
46 #define streq(a, b) (strcmp (a, b) == 0)
47
48 #define END_OF_INSN '\0'
49
50 static aarch64_feature_set cpu_variant;
51
52 /* Variables that we set while parsing command-line options. Once all
53 options have been read we re-process these values to set the real
54 assembly flags. */
55 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
56 static const aarch64_feature_set *march_cpu_opt = NULL;
57
58 /* Constants for known architecture features. */
59 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
60
61 /* Currently active instruction sequence. */
62 static aarch64_instr_sequence *insn_sequence = NULL;
63
64 #ifdef OBJ_ELF
65 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
66 static symbolS *GOT_symbol;
67 #endif
68
69 /* Which ABI to use. */
70 enum aarch64_abi_type
71 {
72 AARCH64_ABI_NONE = 0,
73 AARCH64_ABI_LP64 = 1,
74 AARCH64_ABI_ILP32 = 2,
75 AARCH64_ABI_LLP64 = 3
76 };
77
78 unsigned int aarch64_sframe_cfa_sp_reg;
79 /* The other CFA base register for SFrame stack trace info. */
80 unsigned int aarch64_sframe_cfa_fp_reg;
81 unsigned int aarch64_sframe_cfa_ra_reg;
82
83 #ifndef DEFAULT_ARCH
84 #define DEFAULT_ARCH "aarch64"
85 #endif
86
87 #ifdef OBJ_ELF
88 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
89 static const char *default_arch = DEFAULT_ARCH;
90 #endif
91
92 /* AArch64 ABI for the output file. */
93 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
94
95 /* When non-zero, program to a 32-bit model, in which the C data types
96 int, long and all pointer types are 32-bit objects (ILP32); or to a
97 64-bit model, in which the C int type is 32-bits but the C long type
98 and all pointer types are 64-bit objects (LP64). */
99 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
100
101 /* When non zero, C types int and long are 32 bit,
102 pointers, however are 64 bit */
103 #define llp64_p (aarch64_abi == AARCH64_ABI_LLP64)
104
105 enum vector_el_type
106 {
107 NT_invtype = -1,
108 NT_b,
109 NT_h,
110 NT_s,
111 NT_d,
112 NT_q,
113 NT_zero,
114 NT_merge
115 };
116
117 /* SME horizontal or vertical slice indicator, encoded in "V".
118 Values:
119 0 - Horizontal
120 1 - vertical
121 */
122 enum sme_hv_slice
123 {
124 HV_horizontal = 0,
125 HV_vertical = 1
126 };
127
128 /* Bits for DEFINED field in vector_type_el. */
129 #define NTA_HASTYPE 1
130 #define NTA_HASINDEX 2
131 #define NTA_HASVARWIDTH 4
132
133 struct vector_type_el
134 {
135 enum vector_el_type type;
136 unsigned char defined;
137 unsigned element_size;
138 unsigned width;
139 int64_t index;
140 };
141
142 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
143
144 struct reloc
145 {
146 bfd_reloc_code_real_type type;
147 expressionS exp;
148 int pc_rel;
149 enum aarch64_opnd opnd;
150 uint32_t flags;
151 unsigned need_libopcodes_p : 1;
152 };
153
154 struct aarch64_instruction
155 {
156 /* libopcodes structure for instruction intermediate representation. */
157 aarch64_inst base;
158 /* Record assembly errors found during the parsing. */
159 aarch64_operand_error parsing_error;
160 /* The condition that appears in the assembly line. */
161 int cond;
162 /* Relocation information (including the GAS internal fixup). */
163 struct reloc reloc;
164 /* Need to generate an immediate in the literal pool. */
165 unsigned gen_lit_pool : 1;
166 };
167
168 typedef struct aarch64_instruction aarch64_instruction;
169
170 static aarch64_instruction inst;
171
172 static bool parse_operands (char *, const aarch64_opcode *);
173 static bool programmer_friendly_fixup (aarch64_instruction *);
174
175 /* Diagnostics inline function utilities.
176
177 These are lightweight utilities which should only be called by parse_operands
178 and other parsers. GAS processes each assembly line by parsing it against
179 instruction template(s), in the case of multiple templates (for the same
180 mnemonic name), those templates are tried one by one until one succeeds or
181 all fail. An assembly line may fail a few templates before being
182 successfully parsed; an error saved here in most cases is not a user error
183 but an error indicating the current template is not the right template.
184 Therefore it is very important that errors can be saved at a low cost during
185 the parsing; we don't want to slow down the whole parsing by recording
186 non-user errors in detail.
187
188 Remember that the objective is to help GAS pick up the most appropriate
189 error message in the case of multiple templates, e.g. FMOV which has 8
190 templates. */
191
192 static inline void
193 clear_error (void)
194 {
195 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
196 inst.parsing_error.kind = AARCH64_OPDE_NIL;
197 }
198
199 static inline bool
200 error_p (void)
201 {
202 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
203 }
204
205 static inline void
206 set_error (enum aarch64_operand_error_kind kind, const char *error)
207 {
208 memset (&inst.parsing_error, 0, sizeof (inst.parsing_error));
209 inst.parsing_error.index = -1;
210 inst.parsing_error.kind = kind;
211 inst.parsing_error.error = error;
212 }
213
214 static inline void
215 set_recoverable_error (const char *error)
216 {
217 set_error (AARCH64_OPDE_RECOVERABLE, error);
218 }
219
220 /* Use the DESC field of the corresponding aarch64_operand entry to compose
221 the error message. */
222 static inline void
223 set_default_error (void)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
226 }
227
228 static inline void
229 set_syntax_error (const char *error)
230 {
231 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
232 }
233
234 static inline void
235 set_first_syntax_error (const char *error)
236 {
237 if (! error_p ())
238 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
239 }
240
241 static inline void
242 set_fatal_syntax_error (const char *error)
243 {
244 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
245 }
246 \f
247 /* Return value for certain parsers when the parsing fails; those parsers
248 return the information of the parsed result, e.g. register number, on
249 success. */
250 #define PARSE_FAIL -1
251
252 /* This is an invalid condition code that means no conditional field is
253 present. */
254 #define COND_ALWAYS 0x10
255
256 typedef struct
257 {
258 const char *template;
259 uint32_t value;
260 } asm_nzcv;
261
262 struct reloc_entry
263 {
264 char *name;
265 bfd_reloc_code_real_type reloc;
266 };
267
268 /* Macros to define the register types and masks for the purpose
269 of parsing. */
270
271 #undef AARCH64_REG_TYPES
272 #define AARCH64_REG_TYPES \
273 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
274 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
275 BASIC_REG_TYPE(SP_32) /* wsp */ \
276 BASIC_REG_TYPE(SP_64) /* sp */ \
277 BASIC_REG_TYPE(Z_32) /* wzr */ \
278 BASIC_REG_TYPE(Z_64) /* xzr */ \
279 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
280 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
281 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
282 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
283 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
284 BASIC_REG_TYPE(VN) /* v[0-31] */ \
285 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
286 BASIC_REG_TYPE(PN) /* p[0-15] */ \
287 BASIC_REG_TYPE(ZA) /* za */ \
288 BASIC_REG_TYPE(ZAT) /* za[0-15] (ZA tile) */ \
289 BASIC_REG_TYPE(ZATH) /* za[0-15]h (ZA tile horizontal slice) */ \
290 BASIC_REG_TYPE(ZATV) /* za[0-15]v (ZA tile vertical slice) */ \
291 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
292 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
293 /* Typecheck: same, plus SVE registers. */ \
294 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
295 | REG_TYPE(ZN)) \
296 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
297 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
298 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
299 /* Typecheck: same, plus SVE registers. */ \
300 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
301 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
302 | REG_TYPE(ZN)) \
303 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
304 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
306 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
307 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
308 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
309 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
310 /* Typecheck: any [BHSDQ]P FP. */ \
311 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
312 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
313 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
314 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
315 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
316 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
317 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
318 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
319 be used for SVE instructions, since Zn and Pn are valid symbols \
320 in other contexts. */ \
321 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
322 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
323 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
324 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
325 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
326 | REG_TYPE(ZN) | REG_TYPE(PN)) \
327 /* Any integer register; used for error messages only. */ \
328 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
329 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
330 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
331 /* The whole of ZA or a single tile. */ \
332 MULTI_REG_TYPE(ZA_ZAT, REG_TYPE(ZA) | REG_TYPE(ZAT)) \
333 /* A horizontal or vertical slice of a ZA tile. */ \
334 MULTI_REG_TYPE(ZATHV, REG_TYPE(ZATH) | REG_TYPE(ZATV)) \
335 /* Pseudo type to mark the end of the enumerator sequence. */ \
336 BASIC_REG_TYPE(MAX)
337
338 #undef BASIC_REG_TYPE
339 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
340 #undef MULTI_REG_TYPE
341 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
342
343 /* Register type enumerators. */
344 typedef enum aarch64_reg_type_
345 {
346 /* A list of REG_TYPE_*. */
347 AARCH64_REG_TYPES
348 } aarch64_reg_type;
349
350 #undef BASIC_REG_TYPE
351 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
352 #undef REG_TYPE
353 #define REG_TYPE(T) (1 << REG_TYPE_##T)
354 #undef MULTI_REG_TYPE
355 #define MULTI_REG_TYPE(T,V) V,
356
357 /* Structure for a hash table entry for a register. */
358 typedef struct
359 {
360 const char *name;
361 unsigned char number;
362 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
363 unsigned char builtin;
364 } reg_entry;
365
366 /* Values indexed by aarch64_reg_type to assist the type checking. */
367 static const unsigned reg_type_masks[] =
368 {
369 AARCH64_REG_TYPES
370 };
371
372 #undef BASIC_REG_TYPE
373 #undef REG_TYPE
374 #undef MULTI_REG_TYPE
375 #undef AARCH64_REG_TYPES
376
377 /* Diagnostics used when we don't get a register of the expected type.
378 Note: this has to synchronized with aarch64_reg_type definitions
379 above. */
380 static const char *
381 get_reg_expected_msg (aarch64_reg_type reg_type)
382 {
383 const char *msg;
384
385 switch (reg_type)
386 {
387 case REG_TYPE_R_32:
388 msg = N_("integer 32-bit register expected");
389 break;
390 case REG_TYPE_R_64:
391 msg = N_("integer 64-bit register expected");
392 break;
393 case REG_TYPE_R_N:
394 msg = N_("integer register expected");
395 break;
396 case REG_TYPE_R64_SP:
397 msg = N_("64-bit integer or SP register expected");
398 break;
399 case REG_TYPE_SVE_BASE:
400 msg = N_("base register expected");
401 break;
402 case REG_TYPE_R_Z:
403 msg = N_("integer or zero register expected");
404 break;
405 case REG_TYPE_SVE_OFFSET:
406 msg = N_("offset register expected");
407 break;
408 case REG_TYPE_R_SP:
409 msg = N_("integer or SP register expected");
410 break;
411 case REG_TYPE_R_Z_SP:
412 msg = N_("integer, zero or SP register expected");
413 break;
414 case REG_TYPE_FP_B:
415 msg = N_("8-bit SIMD scalar register expected");
416 break;
417 case REG_TYPE_FP_H:
418 msg = N_("16-bit SIMD scalar or floating-point half precision "
419 "register expected");
420 break;
421 case REG_TYPE_FP_S:
422 msg = N_("32-bit SIMD scalar or floating-point single precision "
423 "register expected");
424 break;
425 case REG_TYPE_FP_D:
426 msg = N_("64-bit SIMD scalar or floating-point double precision "
427 "register expected");
428 break;
429 case REG_TYPE_FP_Q:
430 msg = N_("128-bit SIMD scalar or floating-point quad precision "
431 "register expected");
432 break;
433 case REG_TYPE_R_Z_BHSDQ_V:
434 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
435 msg = N_("register expected");
436 break;
437 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
438 msg = N_("SIMD scalar or floating-point register expected");
439 break;
440 case REG_TYPE_VN: /* any V reg */
441 msg = N_("vector register expected");
442 break;
443 case REG_TYPE_ZN:
444 msg = N_("SVE vector register expected");
445 break;
446 case REG_TYPE_PN:
447 msg = N_("SVE predicate register expected");
448 break;
449 default:
450 as_fatal (_("invalid register type %d"), reg_type);
451 }
452 return msg;
453 }
454
455 /* Some well known registers that we refer to directly elsewhere. */
456 #define REG_SP 31
457 #define REG_ZR 31
458
459 /* Instructions take 4 bytes in the object file. */
460 #define INSN_SIZE 4
461
462 static htab_t aarch64_ops_hsh;
463 static htab_t aarch64_cond_hsh;
464 static htab_t aarch64_shift_hsh;
465 static htab_t aarch64_sys_regs_hsh;
466 static htab_t aarch64_pstatefield_hsh;
467 static htab_t aarch64_sys_regs_ic_hsh;
468 static htab_t aarch64_sys_regs_dc_hsh;
469 static htab_t aarch64_sys_regs_at_hsh;
470 static htab_t aarch64_sys_regs_tlbi_hsh;
471 static htab_t aarch64_sys_regs_sr_hsh;
472 static htab_t aarch64_reg_hsh;
473 static htab_t aarch64_barrier_opt_hsh;
474 static htab_t aarch64_nzcv_hsh;
475 static htab_t aarch64_pldop_hsh;
476 static htab_t aarch64_hint_opt_hsh;
477
478 /* Stuff needed to resolve the label ambiguity
479 As:
480 ...
481 label: <insn>
482 may differ from:
483 ...
484 label:
485 <insn> */
486
487 static symbolS *last_label_seen;
488
489 /* Literal pool structure. Held on a per-section
490 and per-sub-section basis. */
491
492 #define MAX_LITERAL_POOL_SIZE 1024
493 typedef struct literal_expression
494 {
495 expressionS exp;
496 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
497 LITTLENUM_TYPE * bignum;
498 } literal_expression;
499
500 typedef struct literal_pool
501 {
502 literal_expression literals[MAX_LITERAL_POOL_SIZE];
503 unsigned int next_free_entry;
504 unsigned int id;
505 symbolS *symbol;
506 segT section;
507 subsegT sub_section;
508 int size;
509 struct literal_pool *next;
510 } literal_pool;
511
512 /* Pointer to a linked list of literal pools. */
513 static literal_pool *list_of_pools = NULL;
514 \f
515 /* Pure syntax. */
516
517 /* This array holds the chars that always start a comment. If the
518 pre-processor is disabled, these aren't very useful. */
519 const char comment_chars[] = "";
520
521 /* This array holds the chars that only start a comment at the beginning of
522 a line. If the line seems to have the form '# 123 filename'
523 .line and .file directives will appear in the pre-processed output. */
524 /* Note that input_file.c hand checks for '#' at the beginning of the
525 first line of the input file. This is because the compiler outputs
526 #NO_APP at the beginning of its output. */
527 /* Also note that comments like this one will always work. */
528 const char line_comment_chars[] = "#";
529
530 const char line_separator_chars[] = ";";
531
532 /* Chars that can be used to separate mant
533 from exp in floating point numbers. */
534 const char EXP_CHARS[] = "eE";
535
536 /* Chars that mean this number is a floating point constant. */
537 /* As in 0f12.456 */
538 /* or 0d1.2345e12 */
539
540 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhHb";
541
542 /* Prefix character that indicates the start of an immediate value. */
543 #define is_immediate_prefix(C) ((C) == '#')
544
545 /* Separator character handling. */
546
547 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
548
549 static inline bool
550 skip_past_char (char **str, char c)
551 {
552 if (**str == c)
553 {
554 (*str)++;
555 return true;
556 }
557 else
558 return false;
559 }
560
561 #define skip_past_comma(str) skip_past_char (str, ',')
562
563 /* Arithmetic expressions (possibly involving symbols). */
564
565 static bool in_aarch64_get_expression = false;
566
567 /* Third argument to aarch64_get_expression. */
568 #define GE_NO_PREFIX false
569 #define GE_OPT_PREFIX true
570
571 /* Fourth argument to aarch64_get_expression. */
572 #define ALLOW_ABSENT false
573 #define REJECT_ABSENT true
574
575 /* Return TRUE if the string pointed by *STR is successfully parsed
576 as an valid expression; *EP will be filled with the information of
577 such an expression. Otherwise return FALSE.
578
579 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
580 If REJECT_ABSENT is true then trat missing expressions as an error. */
581
582 static bool
583 aarch64_get_expression (expressionS * ep,
584 char ** str,
585 bool allow_immediate_prefix,
586 bool reject_absent)
587 {
588 char *save_in;
589 segT seg;
590 bool prefix_present = false;
591
592 if (allow_immediate_prefix)
593 {
594 if (is_immediate_prefix (**str))
595 {
596 (*str)++;
597 prefix_present = true;
598 }
599 }
600
601 memset (ep, 0, sizeof (expressionS));
602
603 save_in = input_line_pointer;
604 input_line_pointer = *str;
605 in_aarch64_get_expression = true;
606 seg = expression (ep);
607 in_aarch64_get_expression = false;
608
609 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
610 {
611 /* We found a bad expression in md_operand(). */
612 *str = input_line_pointer;
613 input_line_pointer = save_in;
614 if (prefix_present && ! error_p ())
615 set_fatal_syntax_error (_("bad expression"));
616 else
617 set_first_syntax_error (_("bad expression"));
618 return false;
619 }
620
621 #ifdef OBJ_AOUT
622 if (seg != absolute_section
623 && seg != text_section
624 && seg != data_section
625 && seg != bss_section
626 && seg != undefined_section)
627 {
628 set_syntax_error (_("bad segment"));
629 *str = input_line_pointer;
630 input_line_pointer = save_in;
631 return false;
632 }
633 #else
634 (void) seg;
635 #endif
636
637 *str = input_line_pointer;
638 input_line_pointer = save_in;
639 return true;
640 }
641
642 /* Turn a string in input_line_pointer into a floating point constant
643 of type TYPE, and store the appropriate bytes in *LITP. The number
644 of LITTLENUMS emitted is stored in *SIZEP. An error message is
645 returned, or NULL on OK. */
646
647 const char *
648 md_atof (int type, char *litP, int *sizeP)
649 {
650 return ieee_md_atof (type, litP, sizeP, target_big_endian);
651 }
652
653 /* We handle all bad expressions here, so that we can report the faulty
654 instruction in the error message. */
655 void
656 md_operand (expressionS * exp)
657 {
658 if (in_aarch64_get_expression)
659 exp->X_op = O_illegal;
660 }
661
662 /* Immediate values. */
663
664 /* Errors may be set multiple times during parsing or bit encoding
665 (particularly in the Neon bits), but usually the earliest error which is set
666 will be the most meaningful. Avoid overwriting it with later (cascading)
667 errors by calling this function. */
668
669 static void
670 first_error (const char *error)
671 {
672 if (! error_p ())
673 set_syntax_error (error);
674 }
675
676 /* Similar to first_error, but this function accepts formatted error
677 message. */
678 static void
679 first_error_fmt (const char *format, ...)
680 {
681 va_list args;
682 enum
683 { size = 100 };
684 /* N.B. this single buffer will not cause error messages for different
685 instructions to pollute each other; this is because at the end of
686 processing of each assembly line, error message if any will be
687 collected by as_bad. */
688 static char buffer[size];
689
690 if (! error_p ())
691 {
692 int ret ATTRIBUTE_UNUSED;
693 va_start (args, format);
694 ret = vsnprintf (buffer, size, format, args);
695 know (ret <= size - 1 && ret >= 0);
696 va_end (args);
697 set_syntax_error (buffer);
698 }
699 }
700
701 /* Internal helper routine converting a vector_type_el structure *VECTYPE
702 to a corresponding operand qualifier. */
703
704 static inline aarch64_opnd_qualifier_t
705 vectype_to_qualifier (const struct vector_type_el *vectype)
706 {
707 /* Element size in bytes indexed by vector_el_type. */
708 const unsigned char ele_size[5]
709 = {1, 2, 4, 8, 16};
710 const unsigned int ele_base [5] =
711 {
712 AARCH64_OPND_QLF_V_4B,
713 AARCH64_OPND_QLF_V_2H,
714 AARCH64_OPND_QLF_V_2S,
715 AARCH64_OPND_QLF_V_1D,
716 AARCH64_OPND_QLF_V_1Q
717 };
718
719 if (!vectype->defined || vectype->type == NT_invtype)
720 goto vectype_conversion_fail;
721
722 if (vectype->type == NT_zero)
723 return AARCH64_OPND_QLF_P_Z;
724 if (vectype->type == NT_merge)
725 return AARCH64_OPND_QLF_P_M;
726
727 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
728
729 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
730 {
731 /* Special case S_4B. */
732 if (vectype->type == NT_b && vectype->width == 4)
733 return AARCH64_OPND_QLF_S_4B;
734
735 /* Special case S_2H. */
736 if (vectype->type == NT_h && vectype->width == 2)
737 return AARCH64_OPND_QLF_S_2H;
738
739 /* Vector element register. */
740 return AARCH64_OPND_QLF_S_B + vectype->type;
741 }
742 else
743 {
744 /* Vector register. */
745 int reg_size = ele_size[vectype->type] * vectype->width;
746 unsigned offset;
747 unsigned shift;
748 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
749 goto vectype_conversion_fail;
750
751 /* The conversion is by calculating the offset from the base operand
752 qualifier for the vector type. The operand qualifiers are regular
753 enough that the offset can established by shifting the vector width by
754 a vector-type dependent amount. */
755 shift = 0;
756 if (vectype->type == NT_b)
757 shift = 3;
758 else if (vectype->type == NT_h || vectype->type == NT_s)
759 shift = 2;
760 else if (vectype->type >= NT_d)
761 shift = 1;
762 else
763 gas_assert (0);
764
765 offset = ele_base [vectype->type] + (vectype->width >> shift);
766 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
767 && offset <= AARCH64_OPND_QLF_V_1Q);
768 return offset;
769 }
770
771 vectype_conversion_fail:
772 first_error (_("bad vector arrangement type"));
773 return AARCH64_OPND_QLF_NIL;
774 }
775
776 /* Register parsing. */
777
778 /* Generic register parser which is called by other specialized
779 register parsers.
780 CCP points to what should be the beginning of a register name.
781 If it is indeed a valid register name, advance CCP over it and
782 return the reg_entry structure; otherwise return NULL.
783 It does not issue diagnostics. */
784
785 static reg_entry *
786 parse_reg (char **ccp)
787 {
788 char *start = *ccp;
789 char *p;
790 reg_entry *reg;
791
792 #ifdef REGISTER_PREFIX
793 if (*start != REGISTER_PREFIX)
794 return NULL;
795 start++;
796 #endif
797
798 p = start;
799 if (!ISALPHA (*p) || !is_name_beginner (*p))
800 return NULL;
801
802 do
803 p++;
804 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
805
806 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
807
808 if (!reg)
809 return NULL;
810
811 *ccp = p;
812 return reg;
813 }
814
815 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
816 return FALSE. */
817 static bool
818 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
819 {
820 return (reg_type_masks[type] & (1 << reg->type)) != 0;
821 }
822
823 /* Try to parse a base or offset register. Allow SVE base and offset
824 registers if REG_TYPE includes SVE registers. Return the register
825 entry on success, setting *QUALIFIER to the register qualifier.
826 Return null otherwise.
827
828 Note that this function does not issue any diagnostics. */
829
830 static const reg_entry *
831 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
832 aarch64_opnd_qualifier_t *qualifier)
833 {
834 char *str = *ccp;
835 const reg_entry *reg = parse_reg (&str);
836
837 if (reg == NULL)
838 return NULL;
839
840 switch (reg->type)
841 {
842 case REG_TYPE_R_32:
843 case REG_TYPE_SP_32:
844 case REG_TYPE_Z_32:
845 *qualifier = AARCH64_OPND_QLF_W;
846 break;
847
848 case REG_TYPE_R_64:
849 case REG_TYPE_SP_64:
850 case REG_TYPE_Z_64:
851 *qualifier = AARCH64_OPND_QLF_X;
852 break;
853
854 case REG_TYPE_ZN:
855 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
856 || str[0] != '.')
857 return NULL;
858 switch (TOLOWER (str[1]))
859 {
860 case 's':
861 *qualifier = AARCH64_OPND_QLF_S_S;
862 break;
863 case 'd':
864 *qualifier = AARCH64_OPND_QLF_S_D;
865 break;
866 default:
867 return NULL;
868 }
869 str += 2;
870 break;
871
872 default:
873 return NULL;
874 }
875
876 *ccp = str;
877
878 return reg;
879 }
880
881 /* Try to parse a base or offset register. Return the register entry
882 on success, setting *QUALIFIER to the register qualifier. Return null
883 otherwise.
884
885 Note that this function does not issue any diagnostics. */
886
887 static const reg_entry *
888 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
889 {
890 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
891 }
892
893 /* Parse the qualifier of a vector register or vector element of type
894 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
895 succeeds; otherwise return FALSE.
896
897 Accept only one occurrence of:
898 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
899 b h s d q */
900 static bool
901 parse_vector_type_for_operand (aarch64_reg_type reg_type,
902 struct vector_type_el *parsed_type, char **str)
903 {
904 char *ptr = *str;
905 unsigned width;
906 unsigned element_size;
907 enum vector_el_type type;
908
909 /* skip '.' */
910 gas_assert (*ptr == '.');
911 ptr++;
912
913 if (reg_type != REG_TYPE_VN || !ISDIGIT (*ptr))
914 {
915 width = 0;
916 goto elt_size;
917 }
918 width = strtoul (ptr, &ptr, 10);
919 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
920 {
921 first_error_fmt (_("bad size %d in vector width specifier"), width);
922 return false;
923 }
924
925 elt_size:
926 switch (TOLOWER (*ptr))
927 {
928 case 'b':
929 type = NT_b;
930 element_size = 8;
931 break;
932 case 'h':
933 type = NT_h;
934 element_size = 16;
935 break;
936 case 's':
937 type = NT_s;
938 element_size = 32;
939 break;
940 case 'd':
941 type = NT_d;
942 element_size = 64;
943 break;
944 case 'q':
945 if (reg_type != REG_TYPE_VN || width == 1)
946 {
947 type = NT_q;
948 element_size = 128;
949 break;
950 }
951 /* fall through. */
952 default:
953 if (*ptr != '\0')
954 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
955 else
956 first_error (_("missing element size"));
957 return false;
958 }
959 if (width != 0 && width * element_size != 64
960 && width * element_size != 128
961 && !(width == 2 && element_size == 16)
962 && !(width == 4 && element_size == 8))
963 {
964 first_error_fmt (_
965 ("invalid element size %d and vector size combination %c"),
966 width, *ptr);
967 return false;
968 }
969 ptr++;
970
971 parsed_type->type = type;
972 parsed_type->width = width;
973 parsed_type->element_size = element_size;
974
975 *str = ptr;
976
977 return true;
978 }
979
980 /* *STR contains an SVE zero/merge predication suffix. Parse it into
981 *PARSED_TYPE and point *STR at the end of the suffix. */
982
983 static bool
984 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
985 {
986 char *ptr = *str;
987
988 /* Skip '/'. */
989 gas_assert (*ptr == '/');
990 ptr++;
991 switch (TOLOWER (*ptr))
992 {
993 case 'z':
994 parsed_type->type = NT_zero;
995 break;
996 case 'm':
997 parsed_type->type = NT_merge;
998 break;
999 default:
1000 if (*ptr != '\0' && *ptr != ',')
1001 first_error_fmt (_("unexpected character `%c' in predication type"),
1002 *ptr);
1003 else
1004 first_error (_("missing predication type"));
1005 return false;
1006 }
1007 parsed_type->width = 0;
1008 *str = ptr + 1;
1009 return true;
1010 }
1011
1012 /* Return true if CH is a valid suffix character for registers of
1013 type TYPE. */
1014
1015 static bool
1016 aarch64_valid_suffix_char_p (aarch64_reg_type type, char ch)
1017 {
1018 switch (type)
1019 {
1020 case REG_TYPE_VN:
1021 case REG_TYPE_ZN:
1022 case REG_TYPE_ZA:
1023 case REG_TYPE_ZAT:
1024 case REG_TYPE_ZATH:
1025 case REG_TYPE_ZATV:
1026 return ch == '.';
1027
1028 case REG_TYPE_PN:
1029 return ch == '.' || ch == '/';
1030
1031 default:
1032 return false;
1033 }
1034 }
1035
1036 /* Parse a register of the type TYPE.
1037
1038 Return null if the string pointed to by *CCP is not a valid register
1039 name or the parsed register is not of TYPE.
1040
1041 Otherwise return the register, and optionally return the register
1042 shape and element index information in *TYPEINFO.
1043
1044 FLAGS includes PTR_IN_REGLIST if the caller is parsing a register list.
1045
1046 FLAGS includes PTR_FULL_REG if the function should ignore any potential
1047 register index. */
1048
1049 #define PTR_IN_REGLIST (1U << 0)
1050 #define PTR_FULL_REG (1U << 1)
1051
1052 static const reg_entry *
1053 parse_typed_reg (char **ccp, aarch64_reg_type type,
1054 struct vector_type_el *typeinfo, unsigned int flags)
1055 {
1056 char *str = *ccp;
1057 const reg_entry *reg = parse_reg (&str);
1058 struct vector_type_el atype;
1059 struct vector_type_el parsetype;
1060 bool is_typed_vecreg = false;
1061
1062 atype.defined = 0;
1063 atype.type = NT_invtype;
1064 atype.width = -1;
1065 atype.element_size = 0;
1066 atype.index = 0;
1067
1068 if (reg == NULL)
1069 {
1070 if (typeinfo)
1071 *typeinfo = atype;
1072 set_default_error ();
1073 return NULL;
1074 }
1075
1076 if (! aarch64_check_reg_type (reg, type))
1077 {
1078 DEBUG_TRACE ("reg type check failed");
1079 set_default_error ();
1080 return NULL;
1081 }
1082 type = reg->type;
1083
1084 if (aarch64_valid_suffix_char_p (reg->type, *str))
1085 {
1086 if (*str == '.')
1087 {
1088 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1089 return NULL;
1090 if ((reg->type == REG_TYPE_ZAT
1091 || reg->type == REG_TYPE_ZATH
1092 || reg->type == REG_TYPE_ZATV)
1093 && reg->number * 8 >= parsetype.element_size)
1094 {
1095 set_syntax_error (_("ZA tile number out of range"));
1096 return NULL;
1097 }
1098 }
1099 else
1100 {
1101 if (!parse_predication_for_operand (&parsetype, &str))
1102 return NULL;
1103 }
1104
1105 /* Register if of the form Vn.[bhsdq]. */
1106 is_typed_vecreg = true;
1107
1108 if (type != REG_TYPE_VN)
1109 {
1110 /* The width is always variable; we don't allow an integer width
1111 to be specified. */
1112 gas_assert (parsetype.width == 0);
1113 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1114 }
1115 else if (parsetype.width == 0)
1116 /* Expect index. In the new scheme we cannot have
1117 Vn.[bhsdq] represent a scalar. Therefore any
1118 Vn.[bhsdq] should have an index following it.
1119 Except in reglists of course. */
1120 atype.defined |= NTA_HASINDEX;
1121 else
1122 atype.defined |= NTA_HASTYPE;
1123
1124 atype.type = parsetype.type;
1125 atype.width = parsetype.width;
1126 }
1127
1128 if (!(flags & PTR_FULL_REG) && skip_past_char (&str, '['))
1129 {
1130 expressionS exp;
1131
1132 /* Reject Sn[index] syntax. */
1133 if (!is_typed_vecreg)
1134 {
1135 first_error (_("this type of register can't be indexed"));
1136 return NULL;
1137 }
1138
1139 if (flags & PTR_IN_REGLIST)
1140 {
1141 first_error (_("index not allowed inside register list"));
1142 return NULL;
1143 }
1144
1145 atype.defined |= NTA_HASINDEX;
1146
1147 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1148
1149 if (exp.X_op != O_constant)
1150 {
1151 first_error (_("constant expression required"));
1152 return NULL;
1153 }
1154
1155 if (! skip_past_char (&str, ']'))
1156 return NULL;
1157
1158 atype.index = exp.X_add_number;
1159 }
1160 else if (!(flags & PTR_IN_REGLIST) && (atype.defined & NTA_HASINDEX) != 0)
1161 {
1162 /* Indexed vector register expected. */
1163 first_error (_("indexed vector register expected"));
1164 return NULL;
1165 }
1166
1167 /* A vector reg Vn should be typed or indexed. */
1168 if (type == REG_TYPE_VN && atype.defined == 0)
1169 {
1170 first_error (_("invalid use of vector register"));
1171 }
1172
1173 if (typeinfo)
1174 *typeinfo = atype;
1175
1176 *ccp = str;
1177
1178 return reg;
1179 }
1180
1181 /* Parse register.
1182
1183 Return the register on success; return null otherwise.
1184
1185 If this is a NEON vector register with additional type information, fill
1186 in the struct pointed to by VECTYPE (if non-NULL).
1187
1188 This parser does not handle register lists. */
1189
1190 static const reg_entry *
1191 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1192 struct vector_type_el *vectype)
1193 {
1194 return parse_typed_reg (ccp, type, vectype, 0);
1195 }
1196
1197 static inline bool
1198 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1199 {
1200 return (e1.type == e2.type
1201 && e1.defined == e2.defined
1202 && e1.width == e2.width
1203 && e1.element_size == e2.element_size
1204 && e1.index == e2.index);
1205 }
1206
1207 /* This function parses a list of vector registers of type TYPE.
1208 On success, it returns the parsed register list information in the
1209 following encoded format:
1210
1211 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1212 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1213
1214 The information of the register shape and/or index is returned in
1215 *VECTYPE.
1216
1217 It returns PARSE_FAIL if the register list is invalid.
1218
1219 The list contains one to four registers.
1220 Each register can be one of:
1221 <Vt>.<T>[<index>]
1222 <Vt>.<T>
1223 All <T> should be identical.
1224 All <index> should be identical.
1225 There are restrictions on <Vt> numbers which are checked later
1226 (by reg_list_valid_p). */
1227
1228 static int
1229 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1230 struct vector_type_el *vectype)
1231 {
1232 char *str = *ccp;
1233 int nb_regs;
1234 struct vector_type_el typeinfo, typeinfo_first;
1235 int val, val_range;
1236 int in_range;
1237 int ret_val;
1238 int i;
1239 bool error = false;
1240 bool expect_index = false;
1241
1242 if (*str != '{')
1243 {
1244 set_syntax_error (_("expecting {"));
1245 return PARSE_FAIL;
1246 }
1247 str++;
1248
1249 nb_regs = 0;
1250 typeinfo_first.defined = 0;
1251 typeinfo_first.type = NT_invtype;
1252 typeinfo_first.width = -1;
1253 typeinfo_first.element_size = 0;
1254 typeinfo_first.index = 0;
1255 ret_val = 0;
1256 val = -1;
1257 val_range = -1;
1258 in_range = 0;
1259 do
1260 {
1261 if (in_range)
1262 {
1263 str++; /* skip over '-' */
1264 val_range = val;
1265 }
1266 const reg_entry *reg = parse_typed_reg (&str, type, &typeinfo,
1267 PTR_IN_REGLIST);
1268 if (!reg)
1269 {
1270 set_first_syntax_error (_("invalid vector register in list"));
1271 error = true;
1272 continue;
1273 }
1274 val = reg->number;
1275 /* reject [bhsd]n */
1276 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1277 {
1278 set_first_syntax_error (_("invalid scalar register in list"));
1279 error = true;
1280 continue;
1281 }
1282
1283 if (typeinfo.defined & NTA_HASINDEX)
1284 expect_index = true;
1285
1286 if (in_range)
1287 {
1288 if (val < val_range)
1289 {
1290 set_first_syntax_error
1291 (_("invalid range in vector register list"));
1292 error = true;
1293 }
1294 val_range++;
1295 }
1296 else
1297 {
1298 val_range = val;
1299 if (nb_regs == 0)
1300 typeinfo_first = typeinfo;
1301 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1302 {
1303 set_first_syntax_error
1304 (_("type mismatch in vector register list"));
1305 error = true;
1306 }
1307 }
1308 if (! error)
1309 for (i = val_range; i <= val; i++)
1310 {
1311 ret_val |= i << (5 * nb_regs);
1312 nb_regs++;
1313 }
1314 in_range = 0;
1315 }
1316 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1317
1318 skip_whitespace (str);
1319 if (*str != '}')
1320 {
1321 set_first_syntax_error (_("end of vector register list not found"));
1322 error = true;
1323 }
1324 str++;
1325
1326 skip_whitespace (str);
1327
1328 if (expect_index)
1329 {
1330 if (skip_past_char (&str, '['))
1331 {
1332 expressionS exp;
1333
1334 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT);
1335 if (exp.X_op != O_constant)
1336 {
1337 set_first_syntax_error (_("constant expression required."));
1338 error = true;
1339 }
1340 if (! skip_past_char (&str, ']'))
1341 error = true;
1342 else
1343 typeinfo_first.index = exp.X_add_number;
1344 }
1345 else
1346 {
1347 set_first_syntax_error (_("expected index"));
1348 error = true;
1349 }
1350 }
1351
1352 if (nb_regs > 4)
1353 {
1354 set_first_syntax_error (_("too many registers in vector register list"));
1355 error = true;
1356 }
1357 else if (nb_regs == 0)
1358 {
1359 set_first_syntax_error (_("empty vector register list"));
1360 error = true;
1361 }
1362
1363 *ccp = str;
1364 if (! error)
1365 *vectype = typeinfo_first;
1366
1367 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1368 }
1369
1370 /* Directives: register aliases. */
1371
1372 static reg_entry *
1373 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1374 {
1375 reg_entry *new;
1376 const char *name;
1377
1378 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1379 {
1380 if (new->builtin)
1381 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1382 str);
1383
1384 /* Only warn about a redefinition if it's not defined as the
1385 same register. */
1386 else if (new->number != number || new->type != type)
1387 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1388
1389 return NULL;
1390 }
1391
1392 name = xstrdup (str);
1393 new = XNEW (reg_entry);
1394
1395 new->name = name;
1396 new->number = number;
1397 new->type = type;
1398 new->builtin = false;
1399
1400 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1401
1402 return new;
1403 }
1404
1405 /* Look for the .req directive. This is of the form:
1406
1407 new_register_name .req existing_register_name
1408
1409 If we find one, or if it looks sufficiently like one that we want to
1410 handle any error here, return TRUE. Otherwise return FALSE. */
1411
1412 static bool
1413 create_register_alias (char *newname, char *p)
1414 {
1415 const reg_entry *old;
1416 char *oldname, *nbuf;
1417 size_t nlen;
1418
1419 /* The input scrubber ensures that whitespace after the mnemonic is
1420 collapsed to single spaces. */
1421 oldname = p;
1422 if (!startswith (oldname, " .req "))
1423 return false;
1424
1425 oldname += 6;
1426 if (*oldname == '\0')
1427 return false;
1428
1429 old = str_hash_find (aarch64_reg_hsh, oldname);
1430 if (!old)
1431 {
1432 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1433 return true;
1434 }
1435
1436 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1437 the desired alias name, and p points to its end. If not, then
1438 the desired alias name is in the global original_case_string. */
1439 #ifdef TC_CASE_SENSITIVE
1440 nlen = p - newname;
1441 #else
1442 newname = original_case_string;
1443 nlen = strlen (newname);
1444 #endif
1445
1446 nbuf = xmemdup0 (newname, nlen);
1447
1448 /* Create aliases under the new name as stated; an all-lowercase
1449 version of the new name; and an all-uppercase version of the new
1450 name. */
1451 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1452 {
1453 for (p = nbuf; *p; p++)
1454 *p = TOUPPER (*p);
1455
1456 if (strncmp (nbuf, newname, nlen))
1457 {
1458 /* If this attempt to create an additional alias fails, do not bother
1459 trying to create the all-lower case alias. We will fail and issue
1460 a second, duplicate error message. This situation arises when the
1461 programmer does something like:
1462 foo .req r0
1463 Foo .req r1
1464 The second .req creates the "Foo" alias but then fails to create
1465 the artificial FOO alias because it has already been created by the
1466 first .req. */
1467 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1468 {
1469 free (nbuf);
1470 return true;
1471 }
1472 }
1473
1474 for (p = nbuf; *p; p++)
1475 *p = TOLOWER (*p);
1476
1477 if (strncmp (nbuf, newname, nlen))
1478 insert_reg_alias (nbuf, old->number, old->type);
1479 }
1480
1481 free (nbuf);
1482 return true;
1483 }
1484
1485 /* Should never be called, as .req goes between the alias and the
1486 register name, not at the beginning of the line. */
1487 static void
1488 s_req (int a ATTRIBUTE_UNUSED)
1489 {
1490 as_bad (_("invalid syntax for .req directive"));
1491 }
1492
1493 /* The .unreq directive deletes an alias which was previously defined
1494 by .req. For example:
1495
1496 my_alias .req r11
1497 .unreq my_alias */
1498
1499 static void
1500 s_unreq (int a ATTRIBUTE_UNUSED)
1501 {
1502 char *name;
1503 char saved_char;
1504
1505 name = input_line_pointer;
1506 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
1507 saved_char = *input_line_pointer;
1508 *input_line_pointer = 0;
1509
1510 if (!*name)
1511 as_bad (_("invalid syntax for .unreq directive"));
1512 else
1513 {
1514 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1515
1516 if (!reg)
1517 as_bad (_("unknown register alias '%s'"), name);
1518 else if (reg->builtin)
1519 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1520 name);
1521 else
1522 {
1523 char *p;
1524 char *nbuf;
1525
1526 str_hash_delete (aarch64_reg_hsh, name);
1527 free ((char *) reg->name);
1528 free (reg);
1529
1530 /* Also locate the all upper case and all lower case versions.
1531 Do not complain if we cannot find one or the other as it
1532 was probably deleted above. */
1533
1534 nbuf = strdup (name);
1535 for (p = nbuf; *p; p++)
1536 *p = TOUPPER (*p);
1537 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1538 if (reg)
1539 {
1540 str_hash_delete (aarch64_reg_hsh, nbuf);
1541 free ((char *) reg->name);
1542 free (reg);
1543 }
1544
1545 for (p = nbuf; *p; p++)
1546 *p = TOLOWER (*p);
1547 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1548 if (reg)
1549 {
1550 str_hash_delete (aarch64_reg_hsh, nbuf);
1551 free ((char *) reg->name);
1552 free (reg);
1553 }
1554
1555 free (nbuf);
1556 }
1557 }
1558
1559 *input_line_pointer = saved_char;
1560 demand_empty_rest_of_line ();
1561 }
1562
1563 /* Directives: Instruction set selection. */
1564
1565 #if defined OBJ_ELF || defined OBJ_COFF
1566 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1567 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1568 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1569 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1570
1571 /* Create a new mapping symbol for the transition to STATE. */
1572
1573 static void
1574 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1575 {
1576 symbolS *symbolP;
1577 const char *symname;
1578 int type;
1579
1580 switch (state)
1581 {
1582 case MAP_DATA:
1583 symname = "$d";
1584 type = BSF_NO_FLAGS;
1585 break;
1586 case MAP_INSN:
1587 symname = "$x";
1588 type = BSF_NO_FLAGS;
1589 break;
1590 default:
1591 abort ();
1592 }
1593
1594 symbolP = symbol_new (symname, now_seg, frag, value);
1595 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1596
1597 /* Save the mapping symbols for future reference. Also check that
1598 we do not place two mapping symbols at the same offset within a
1599 frag. We'll handle overlap between frags in
1600 check_mapping_symbols.
1601
1602 If .fill or other data filling directive generates zero sized data,
1603 the mapping symbol for the following code will have the same value
1604 as the one generated for the data filling directive. In this case,
1605 we replace the old symbol with the new one at the same address. */
1606 if (value == 0)
1607 {
1608 if (frag->tc_frag_data.first_map != NULL)
1609 {
1610 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1611 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1612 &symbol_lastP);
1613 }
1614 frag->tc_frag_data.first_map = symbolP;
1615 }
1616 if (frag->tc_frag_data.last_map != NULL)
1617 {
1618 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1619 S_GET_VALUE (symbolP));
1620 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1621 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1622 &symbol_lastP);
1623 }
1624 frag->tc_frag_data.last_map = symbolP;
1625 }
1626
1627 /* We must sometimes convert a region marked as code to data during
1628 code alignment, if an odd number of bytes have to be padded. The
1629 code mapping symbol is pushed to an aligned address. */
1630
1631 static void
1632 insert_data_mapping_symbol (enum mstate state,
1633 valueT value, fragS * frag, offsetT bytes)
1634 {
1635 /* If there was already a mapping symbol, remove it. */
1636 if (frag->tc_frag_data.last_map != NULL
1637 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1638 frag->fr_address + value)
1639 {
1640 symbolS *symp = frag->tc_frag_data.last_map;
1641
1642 if (value == 0)
1643 {
1644 know (frag->tc_frag_data.first_map == symp);
1645 frag->tc_frag_data.first_map = NULL;
1646 }
1647 frag->tc_frag_data.last_map = NULL;
1648 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1649 }
1650
1651 make_mapping_symbol (MAP_DATA, value, frag);
1652 make_mapping_symbol (state, value + bytes, frag);
1653 }
1654
1655 static void mapping_state_2 (enum mstate state, int max_chars);
1656
1657 /* Set the mapping state to STATE. Only call this when about to
1658 emit some STATE bytes to the file. */
1659
1660 void
1661 mapping_state (enum mstate state)
1662 {
1663 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1664
1665 if (state == MAP_INSN)
1666 /* AArch64 instructions require 4-byte alignment. When emitting
1667 instructions into any section, record the appropriate section
1668 alignment. */
1669 record_alignment (now_seg, 2);
1670
1671 if (mapstate == state)
1672 /* The mapping symbol has already been emitted.
1673 There is nothing else to do. */
1674 return;
1675
1676 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1677 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1678 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1679 evaluated later in the next else. */
1680 return;
1681 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1682 {
1683 /* Only add the symbol if the offset is > 0:
1684 if we're at the first frag, check it's size > 0;
1685 if we're not at the first frag, then for sure
1686 the offset is > 0. */
1687 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1688 const int add_symbol = (frag_now != frag_first)
1689 || (frag_now_fix () > 0);
1690
1691 if (add_symbol)
1692 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1693 }
1694 #undef TRANSITION
1695
1696 mapping_state_2 (state, 0);
1697 }
1698
1699 /* Same as mapping_state, but MAX_CHARS bytes have already been
1700 allocated. Put the mapping symbol that far back. */
1701
1702 static void
1703 mapping_state_2 (enum mstate state, int max_chars)
1704 {
1705 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1706
1707 if (!SEG_NORMAL (now_seg))
1708 return;
1709
1710 if (mapstate == state)
1711 /* The mapping symbol has already been emitted.
1712 There is nothing else to do. */
1713 return;
1714
1715 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1716 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1717 }
1718 #else
1719 #define mapping_state(x) /* nothing */
1720 #define mapping_state_2(x, y) /* nothing */
1721 #endif
1722
1723 /* Directives: sectioning and alignment. */
1724
1725 static void
1726 s_bss (int ignore ATTRIBUTE_UNUSED)
1727 {
1728 /* We don't support putting frags in the BSS segment, we fake it by
1729 marking in_bss, then looking at s_skip for clues. */
1730 subseg_set (bss_section, 0);
1731 demand_empty_rest_of_line ();
1732 mapping_state (MAP_DATA);
1733 }
1734
1735 static void
1736 s_even (int ignore ATTRIBUTE_UNUSED)
1737 {
1738 /* Never make frag if expect extra pass. */
1739 if (!need_pass_2)
1740 frag_align (1, 0, 0);
1741
1742 record_alignment (now_seg, 1);
1743
1744 demand_empty_rest_of_line ();
1745 }
1746
1747 /* Directives: Literal pools. */
1748
1749 static literal_pool *
1750 find_literal_pool (int size)
1751 {
1752 literal_pool *pool;
1753
1754 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1755 {
1756 if (pool->section == now_seg
1757 && pool->sub_section == now_subseg && pool->size == size)
1758 break;
1759 }
1760
1761 return pool;
1762 }
1763
1764 static literal_pool *
1765 find_or_make_literal_pool (int size)
1766 {
1767 /* Next literal pool ID number. */
1768 static unsigned int latest_pool_num = 1;
1769 literal_pool *pool;
1770
1771 pool = find_literal_pool (size);
1772
1773 if (pool == NULL)
1774 {
1775 /* Create a new pool. */
1776 pool = XNEW (literal_pool);
1777 if (!pool)
1778 return NULL;
1779
1780 /* Currently we always put the literal pool in the current text
1781 section. If we were generating "small" model code where we
1782 knew that all code and initialised data was within 1MB then
1783 we could output literals to mergeable, read-only data
1784 sections. */
1785
1786 pool->next_free_entry = 0;
1787 pool->section = now_seg;
1788 pool->sub_section = now_subseg;
1789 pool->size = size;
1790 pool->next = list_of_pools;
1791 pool->symbol = NULL;
1792
1793 /* Add it to the list. */
1794 list_of_pools = pool;
1795 }
1796
1797 /* New pools, and emptied pools, will have a NULL symbol. */
1798 if (pool->symbol == NULL)
1799 {
1800 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1801 &zero_address_frag, 0);
1802 pool->id = latest_pool_num++;
1803 }
1804
1805 /* Done. */
1806 return pool;
1807 }
1808
1809 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1810 Return TRUE on success, otherwise return FALSE. */
1811 static bool
1812 add_to_lit_pool (expressionS *exp, int size)
1813 {
1814 literal_pool *pool;
1815 unsigned int entry;
1816
1817 pool = find_or_make_literal_pool (size);
1818
1819 /* Check if this literal value is already in the pool. */
1820 for (entry = 0; entry < pool->next_free_entry; entry++)
1821 {
1822 expressionS * litexp = & pool->literals[entry].exp;
1823
1824 if ((litexp->X_op == exp->X_op)
1825 && (exp->X_op == O_constant)
1826 && (litexp->X_add_number == exp->X_add_number)
1827 && (litexp->X_unsigned == exp->X_unsigned))
1828 break;
1829
1830 if ((litexp->X_op == exp->X_op)
1831 && (exp->X_op == O_symbol)
1832 && (litexp->X_add_number == exp->X_add_number)
1833 && (litexp->X_add_symbol == exp->X_add_symbol)
1834 && (litexp->X_op_symbol == exp->X_op_symbol))
1835 break;
1836 }
1837
1838 /* Do we need to create a new entry? */
1839 if (entry == pool->next_free_entry)
1840 {
1841 if (entry >= MAX_LITERAL_POOL_SIZE)
1842 {
1843 set_syntax_error (_("literal pool overflow"));
1844 return false;
1845 }
1846
1847 pool->literals[entry].exp = *exp;
1848 pool->next_free_entry += 1;
1849 if (exp->X_op == O_big)
1850 {
1851 /* PR 16688: Bignums are held in a single global array. We must
1852 copy and preserve that value now, before it is overwritten. */
1853 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1854 exp->X_add_number);
1855 memcpy (pool->literals[entry].bignum, generic_bignum,
1856 CHARS_PER_LITTLENUM * exp->X_add_number);
1857 }
1858 else
1859 pool->literals[entry].bignum = NULL;
1860 }
1861
1862 exp->X_op = O_symbol;
1863 exp->X_add_number = ((int) entry) * size;
1864 exp->X_add_symbol = pool->symbol;
1865
1866 return true;
1867 }
1868
1869 /* Can't use symbol_new here, so have to create a symbol and then at
1870 a later date assign it a value. That's what these functions do. */
1871
1872 static void
1873 symbol_locate (symbolS * symbolP,
1874 const char *name,/* It is copied, the caller can modify. */
1875 segT segment, /* Segment identifier (SEG_<something>). */
1876 valueT valu, /* Symbol value. */
1877 fragS * frag) /* Associated fragment. */
1878 {
1879 size_t name_length;
1880 char *preserved_copy_of_name;
1881
1882 name_length = strlen (name) + 1; /* +1 for \0. */
1883 obstack_grow (&notes, name, name_length);
1884 preserved_copy_of_name = obstack_finish (&notes);
1885
1886 #ifdef tc_canonicalize_symbol_name
1887 preserved_copy_of_name =
1888 tc_canonicalize_symbol_name (preserved_copy_of_name);
1889 #endif
1890
1891 S_SET_NAME (symbolP, preserved_copy_of_name);
1892
1893 S_SET_SEGMENT (symbolP, segment);
1894 S_SET_VALUE (symbolP, valu);
1895 symbol_clear_list_pointers (symbolP);
1896
1897 symbol_set_frag (symbolP, frag);
1898
1899 /* Link to end of symbol chain. */
1900 {
1901 extern int symbol_table_frozen;
1902
1903 if (symbol_table_frozen)
1904 abort ();
1905 }
1906
1907 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1908
1909 obj_symbol_new_hook (symbolP);
1910
1911 #ifdef tc_symbol_new_hook
1912 tc_symbol_new_hook (symbolP);
1913 #endif
1914
1915 #ifdef DEBUG_SYMS
1916 verify_symbol_chain (symbol_rootP, symbol_lastP);
1917 #endif /* DEBUG_SYMS */
1918 }
1919
1920
1921 static void
1922 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1923 {
1924 unsigned int entry;
1925 literal_pool *pool;
1926 char sym_name[20];
1927 int align;
1928
1929 for (align = 2; align <= 4; align++)
1930 {
1931 int size = 1 << align;
1932
1933 pool = find_literal_pool (size);
1934 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1935 continue;
1936
1937 /* Align pool as you have word accesses.
1938 Only make a frag if we have to. */
1939 if (!need_pass_2)
1940 frag_align (align, 0, 0);
1941
1942 mapping_state (MAP_DATA);
1943
1944 record_alignment (now_seg, align);
1945
1946 sprintf (sym_name, "$$lit_\002%x", pool->id);
1947
1948 symbol_locate (pool->symbol, sym_name, now_seg,
1949 (valueT) frag_now_fix (), frag_now);
1950 symbol_table_insert (pool->symbol);
1951
1952 for (entry = 0; entry < pool->next_free_entry; entry++)
1953 {
1954 expressionS * exp = & pool->literals[entry].exp;
1955
1956 if (exp->X_op == O_big)
1957 {
1958 /* PR 16688: Restore the global bignum value. */
1959 gas_assert (pool->literals[entry].bignum != NULL);
1960 memcpy (generic_bignum, pool->literals[entry].bignum,
1961 CHARS_PER_LITTLENUM * exp->X_add_number);
1962 }
1963
1964 /* First output the expression in the instruction to the pool. */
1965 emit_expr (exp, size); /* .word|.xword */
1966
1967 if (exp->X_op == O_big)
1968 {
1969 free (pool->literals[entry].bignum);
1970 pool->literals[entry].bignum = NULL;
1971 }
1972 }
1973
1974 /* Mark the pool as empty. */
1975 pool->next_free_entry = 0;
1976 pool->symbol = NULL;
1977 }
1978 }
1979
1980 #if defined(OBJ_ELF) || defined(OBJ_COFF)
1981 /* Forward declarations for functions below, in the MD interface
1982 section. */
1983 static struct reloc_table_entry * find_reloc_table_entry (char **);
1984
1985 /* Directives: Data. */
1986 /* N.B. the support for relocation suffix in this directive needs to be
1987 implemented properly. */
1988
1989 static void
1990 s_aarch64_cons (int nbytes)
1991 {
1992 expressionS exp;
1993
1994 #ifdef md_flush_pending_output
1995 md_flush_pending_output ();
1996 #endif
1997
1998 if (is_it_end_of_statement ())
1999 {
2000 demand_empty_rest_of_line ();
2001 return;
2002 }
2003
2004 #ifdef md_cons_align
2005 md_cons_align (nbytes);
2006 #endif
2007
2008 mapping_state (MAP_DATA);
2009 do
2010 {
2011 struct reloc_table_entry *reloc;
2012
2013 expression (&exp);
2014
2015 if (exp.X_op != O_symbol)
2016 emit_expr (&exp, (unsigned int) nbytes);
2017 else
2018 {
2019 skip_past_char (&input_line_pointer, '#');
2020 if (skip_past_char (&input_line_pointer, ':'))
2021 {
2022 reloc = find_reloc_table_entry (&input_line_pointer);
2023 if (reloc == NULL)
2024 as_bad (_("unrecognized relocation suffix"));
2025 else
2026 as_bad (_("unimplemented relocation suffix"));
2027 ignore_rest_of_line ();
2028 return;
2029 }
2030 else
2031 emit_expr (&exp, (unsigned int) nbytes);
2032 }
2033 }
2034 while (*input_line_pointer++ == ',');
2035
2036 /* Put terminator back into stream. */
2037 input_line_pointer--;
2038 demand_empty_rest_of_line ();
2039 }
2040 #endif
2041
2042 #ifdef OBJ_ELF
2043 /* Forward declarations for functions below, in the MD interface
2044 section. */
2045 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
2046
2047 /* Mark symbol that it follows a variant PCS convention. */
2048
2049 static void
2050 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2051 {
2052 char *name;
2053 char c;
2054 symbolS *sym;
2055 asymbol *bfdsym;
2056 elf_symbol_type *elfsym;
2057
2058 c = get_symbol_name (&name);
2059 if (!*name)
2060 as_bad (_("Missing symbol name in directive"));
2061 sym = symbol_find_or_make (name);
2062 restore_line_pointer (c);
2063 demand_empty_rest_of_line ();
2064 bfdsym = symbol_get_bfdsym (sym);
2065 elfsym = elf_symbol_from (bfdsym);
2066 gas_assert (elfsym);
2067 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2068 }
2069 #endif /* OBJ_ELF */
2070
2071 /* Output a 32-bit word, but mark as an instruction. */
2072
2073 static void
2074 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2075 {
2076 expressionS exp;
2077 unsigned n = 0;
2078
2079 #ifdef md_flush_pending_output
2080 md_flush_pending_output ();
2081 #endif
2082
2083 if (is_it_end_of_statement ())
2084 {
2085 demand_empty_rest_of_line ();
2086 return;
2087 }
2088
2089 /* Sections are assumed to start aligned. In executable section, there is no
2090 MAP_DATA symbol pending. So we only align the address during
2091 MAP_DATA --> MAP_INSN transition.
2092 For other sections, this is not guaranteed. */
2093 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2094 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2095 frag_align_code (2, 0);
2096
2097 #ifdef OBJ_ELF
2098 mapping_state (MAP_INSN);
2099 #endif
2100
2101 do
2102 {
2103 expression (&exp);
2104 if (exp.X_op != O_constant)
2105 {
2106 as_bad (_("constant expression required"));
2107 ignore_rest_of_line ();
2108 return;
2109 }
2110
2111 if (target_big_endian)
2112 {
2113 unsigned int val = exp.X_add_number;
2114 exp.X_add_number = SWAP_32 (val);
2115 }
2116 emit_expr (&exp, INSN_SIZE);
2117 ++n;
2118 }
2119 while (*input_line_pointer++ == ',');
2120
2121 dwarf2_emit_insn (n * INSN_SIZE);
2122
2123 /* Put terminator back into stream. */
2124 input_line_pointer--;
2125 demand_empty_rest_of_line ();
2126 }
2127
2128 static void
2129 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2130 {
2131 demand_empty_rest_of_line ();
2132 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2133 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2134 }
2135
2136 #ifdef OBJ_ELF
2137 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2138
2139 static void
2140 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2141 {
2142 expressionS exp;
2143
2144 expression (&exp);
2145 frag_grow (4);
2146 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2147 BFD_RELOC_AARCH64_TLSDESC_ADD);
2148
2149 demand_empty_rest_of_line ();
2150 }
2151
2152 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2153
2154 static void
2155 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2156 {
2157 expressionS exp;
2158
2159 /* Since we're just labelling the code, there's no need to define a
2160 mapping symbol. */
2161 expression (&exp);
2162 /* Make sure there is enough room in this frag for the following
2163 blr. This trick only works if the blr follows immediately after
2164 the .tlsdesc directive. */
2165 frag_grow (4);
2166 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2167 BFD_RELOC_AARCH64_TLSDESC_CALL);
2168
2169 demand_empty_rest_of_line ();
2170 }
2171
2172 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2173
2174 static void
2175 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2176 {
2177 expressionS exp;
2178
2179 expression (&exp);
2180 frag_grow (4);
2181 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2182 BFD_RELOC_AARCH64_TLSDESC_LDR);
2183
2184 demand_empty_rest_of_line ();
2185 }
2186 #endif /* OBJ_ELF */
2187
2188 #ifdef TE_PE
2189 static void
2190 s_secrel (int dummy ATTRIBUTE_UNUSED)
2191 {
2192 expressionS exp;
2193
2194 do
2195 {
2196 expression (&exp);
2197 if (exp.X_op == O_symbol)
2198 exp.X_op = O_secrel;
2199
2200 emit_expr (&exp, 4);
2201 }
2202 while (*input_line_pointer++ == ',');
2203
2204 input_line_pointer--;
2205 demand_empty_rest_of_line ();
2206 }
2207
2208 void
2209 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
2210 {
2211 expressionS exp;
2212
2213 exp.X_op = O_secrel;
2214 exp.X_add_symbol = symbol;
2215 exp.X_add_number = 0;
2216 emit_expr (&exp, size);
2217 }
2218
2219 static void
2220 s_secidx (int dummy ATTRIBUTE_UNUSED)
2221 {
2222 expressionS exp;
2223
2224 do
2225 {
2226 expression (&exp);
2227 if (exp.X_op == O_symbol)
2228 exp.X_op = O_secidx;
2229
2230 emit_expr (&exp, 2);
2231 }
2232 while (*input_line_pointer++ == ',');
2233
2234 input_line_pointer--;
2235 demand_empty_rest_of_line ();
2236 }
2237 #endif /* TE_PE */
2238
2239 static void s_aarch64_arch (int);
2240 static void s_aarch64_cpu (int);
2241 static void s_aarch64_arch_extension (int);
2242
2243 /* This table describes all the machine specific pseudo-ops the assembler
2244 has to support. The fields are:
2245 pseudo-op name without dot
2246 function to call to execute this pseudo-op
2247 Integer arg to pass to the function. */
2248
2249 const pseudo_typeS md_pseudo_table[] = {
2250 /* Never called because '.req' does not start a line. */
2251 {"req", s_req, 0},
2252 {"unreq", s_unreq, 0},
2253 {"bss", s_bss, 0},
2254 {"even", s_even, 0},
2255 {"ltorg", s_ltorg, 0},
2256 {"pool", s_ltorg, 0},
2257 {"cpu", s_aarch64_cpu, 0},
2258 {"arch", s_aarch64_arch, 0},
2259 {"arch_extension", s_aarch64_arch_extension, 0},
2260 {"inst", s_aarch64_inst, 0},
2261 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2262 #ifdef OBJ_ELF
2263 {"tlsdescadd", s_tlsdescadd, 0},
2264 {"tlsdesccall", s_tlsdesccall, 0},
2265 {"tlsdescldr", s_tlsdescldr, 0},
2266 {"variant_pcs", s_variant_pcs, 0},
2267 #endif
2268 #if defined(OBJ_ELF) || defined(OBJ_COFF)
2269 {"word", s_aarch64_cons, 4},
2270 {"long", s_aarch64_cons, 4},
2271 {"xword", s_aarch64_cons, 8},
2272 {"dword", s_aarch64_cons, 8},
2273 #endif
2274 #ifdef TE_PE
2275 {"secrel32", s_secrel, 0},
2276 {"secidx", s_secidx, 0},
2277 #endif
2278 {"float16", float_cons, 'h'},
2279 {"bfloat16", float_cons, 'b'},
2280 {0, 0, 0}
2281 };
2282 \f
2283
2284 /* Check whether STR points to a register name followed by a comma or the
2285 end of line; REG_TYPE indicates which register types are checked
2286 against. Return TRUE if STR is such a register name; otherwise return
2287 FALSE. The function does not intend to produce any diagnostics, but since
2288 the register parser aarch64_reg_parse, which is called by this function,
2289 does produce diagnostics, we call clear_error to clear any diagnostics
2290 that may be generated by aarch64_reg_parse.
2291 Also, the function returns FALSE directly if there is any user error
2292 present at the function entry. This prevents the existing diagnostics
2293 state from being spoiled.
2294 The function currently serves parse_constant_immediate and
2295 parse_big_immediate only. */
2296 static bool
2297 reg_name_p (char *str, aarch64_reg_type reg_type)
2298 {
2299 const reg_entry *reg;
2300
2301 /* Prevent the diagnostics state from being spoiled. */
2302 if (error_p ())
2303 return false;
2304
2305 reg = aarch64_reg_parse (&str, reg_type, NULL);
2306
2307 /* Clear the parsing error that may be set by the reg parser. */
2308 clear_error ();
2309
2310 if (!reg)
2311 return false;
2312
2313 skip_whitespace (str);
2314 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2315 return true;
2316
2317 return false;
2318 }
2319
2320 /* Parser functions used exclusively in instruction operands. */
2321
2322 /* Parse an immediate expression which may not be constant.
2323
2324 To prevent the expression parser from pushing a register name
2325 into the symbol table as an undefined symbol, firstly a check is
2326 done to find out whether STR is a register of type REG_TYPE followed
2327 by a comma or the end of line. Return FALSE if STR is such a string. */
2328
2329 static bool
2330 parse_immediate_expression (char **str, expressionS *exp,
2331 aarch64_reg_type reg_type)
2332 {
2333 if (reg_name_p (*str, reg_type))
2334 {
2335 set_recoverable_error (_("immediate operand required"));
2336 return false;
2337 }
2338
2339 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT);
2340
2341 if (exp->X_op == O_absent)
2342 {
2343 set_fatal_syntax_error (_("missing immediate expression"));
2344 return false;
2345 }
2346
2347 return true;
2348 }
2349
2350 /* Constant immediate-value read function for use in insn parsing.
2351 STR points to the beginning of the immediate (with the optional
2352 leading #); *VAL receives the value. REG_TYPE says which register
2353 names should be treated as registers rather than as symbolic immediates.
2354
2355 Return TRUE on success; otherwise return FALSE. */
2356
2357 static bool
2358 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2359 {
2360 expressionS exp;
2361
2362 if (! parse_immediate_expression (str, &exp, reg_type))
2363 return false;
2364
2365 if (exp.X_op != O_constant)
2366 {
2367 set_syntax_error (_("constant expression required"));
2368 return false;
2369 }
2370
2371 *val = exp.X_add_number;
2372 return true;
2373 }
2374
2375 static uint32_t
2376 encode_imm_float_bits (uint32_t imm)
2377 {
2378 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2379 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2380 }
2381
2382 /* Return TRUE if the single-precision floating-point value encoded in IMM
2383 can be expressed in the AArch64 8-bit signed floating-point format with
2384 3-bit exponent and normalized 4 bits of precision; in other words, the
2385 floating-point value must be expressable as
2386 (+/-) n / 16 * power (2, r)
2387 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2388
2389 static bool
2390 aarch64_imm_float_p (uint32_t imm)
2391 {
2392 /* If a single-precision floating-point value has the following bit
2393 pattern, it can be expressed in the AArch64 8-bit floating-point
2394 format:
2395
2396 3 32222222 2221111111111
2397 1 09876543 21098765432109876543210
2398 n Eeeeeexx xxxx0000000000000000000
2399
2400 where n, e and each x are either 0 or 1 independently, with
2401 E == ~ e. */
2402
2403 uint32_t pattern;
2404
2405 /* Prepare the pattern for 'Eeeeee'. */
2406 if (((imm >> 30) & 0x1) == 0)
2407 pattern = 0x3e000000;
2408 else
2409 pattern = 0x40000000;
2410
2411 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2412 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2413 }
2414
2415 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2416 as an IEEE float without any loss of precision. Store the value in
2417 *FPWORD if so. */
2418
2419 static bool
2420 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2421 {
2422 /* If a double-precision floating-point value has the following bit
2423 pattern, it can be expressed in a float:
2424
2425 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2426 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2427 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2428
2429 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2430 if Eeee_eeee != 1111_1111
2431
2432 where n, e, s and S are either 0 or 1 independently and where ~ is the
2433 inverse of E. */
2434
2435 uint32_t pattern;
2436 uint32_t high32 = imm >> 32;
2437 uint32_t low32 = imm;
2438
2439 /* Lower 29 bits need to be 0s. */
2440 if ((imm & 0x1fffffff) != 0)
2441 return false;
2442
2443 /* Prepare the pattern for 'Eeeeeeeee'. */
2444 if (((high32 >> 30) & 0x1) == 0)
2445 pattern = 0x38000000;
2446 else
2447 pattern = 0x40000000;
2448
2449 /* Check E~~~. */
2450 if ((high32 & 0x78000000) != pattern)
2451 return false;
2452
2453 /* Check Eeee_eeee != 1111_1111. */
2454 if ((high32 & 0x7ff00000) == 0x47f00000)
2455 return false;
2456
2457 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2458 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2459 | (low32 >> 29)); /* 3 S bits. */
2460 return true;
2461 }
2462
2463 /* Return true if we should treat OPERAND as a double-precision
2464 floating-point operand rather than a single-precision one. */
2465 static bool
2466 double_precision_operand_p (const aarch64_opnd_info *operand)
2467 {
2468 /* Check for unsuffixed SVE registers, which are allowed
2469 for LDR and STR but not in instructions that require an
2470 immediate. We get better error messages if we arbitrarily
2471 pick one size, parse the immediate normally, and then
2472 report the match failure in the normal way. */
2473 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2474 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2475 }
2476
2477 /* Parse a floating-point immediate. Return TRUE on success and return the
2478 value in *IMMED in the format of IEEE754 single-precision encoding.
2479 *CCP points to the start of the string; DP_P is TRUE when the immediate
2480 is expected to be in double-precision (N.B. this only matters when
2481 hexadecimal representation is involved). REG_TYPE says which register
2482 names should be treated as registers rather than as symbolic immediates.
2483
2484 This routine accepts any IEEE float; it is up to the callers to reject
2485 invalid ones. */
2486
2487 static bool
2488 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2489 aarch64_reg_type reg_type)
2490 {
2491 char *str = *ccp;
2492 char *fpnum;
2493 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2494 int64_t val = 0;
2495 unsigned fpword = 0;
2496 bool hex_p = false;
2497
2498 skip_past_char (&str, '#');
2499
2500 fpnum = str;
2501 skip_whitespace (fpnum);
2502
2503 if (startswith (fpnum, "0x"))
2504 {
2505 /* Support the hexadecimal representation of the IEEE754 encoding.
2506 Double-precision is expected when DP_P is TRUE, otherwise the
2507 representation should be in single-precision. */
2508 if (! parse_constant_immediate (&str, &val, reg_type))
2509 goto invalid_fp;
2510
2511 if (dp_p)
2512 {
2513 if (!can_convert_double_to_float (val, &fpword))
2514 goto invalid_fp;
2515 }
2516 else if ((uint64_t) val > 0xffffffff)
2517 goto invalid_fp;
2518 else
2519 fpword = val;
2520
2521 hex_p = true;
2522 }
2523 else if (reg_name_p (str, reg_type))
2524 {
2525 set_recoverable_error (_("immediate operand required"));
2526 return false;
2527 }
2528
2529 if (! hex_p)
2530 {
2531 int i;
2532
2533 if ((str = atof_ieee (str, 's', words)) == NULL)
2534 goto invalid_fp;
2535
2536 /* Our FP word must be 32 bits (single-precision FP). */
2537 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2538 {
2539 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2540 fpword |= words[i];
2541 }
2542 }
2543
2544 *immed = fpword;
2545 *ccp = str;
2546 return true;
2547
2548 invalid_fp:
2549 set_fatal_syntax_error (_("invalid floating-point constant"));
2550 return false;
2551 }
2552
2553 /* Less-generic immediate-value read function with the possibility of loading
2554 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2555 instructions.
2556
2557 To prevent the expression parser from pushing a register name into the
2558 symbol table as an undefined symbol, a check is firstly done to find
2559 out whether STR is a register of type REG_TYPE followed by a comma or
2560 the end of line. Return FALSE if STR is such a register. */
2561
2562 static bool
2563 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2564 {
2565 char *ptr = *str;
2566
2567 if (reg_name_p (ptr, reg_type))
2568 {
2569 set_syntax_error (_("immediate operand required"));
2570 return false;
2571 }
2572
2573 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT);
2574
2575 if (inst.reloc.exp.X_op == O_constant)
2576 *imm = inst.reloc.exp.X_add_number;
2577
2578 *str = ptr;
2579
2580 return true;
2581 }
2582
2583 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2584 if NEED_LIBOPCODES is non-zero, the fixup will need
2585 assistance from the libopcodes. */
2586
2587 static inline void
2588 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2589 const aarch64_opnd_info *operand,
2590 int need_libopcodes_p)
2591 {
2592 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2593 reloc->opnd = operand->type;
2594 if (need_libopcodes_p)
2595 reloc->need_libopcodes_p = 1;
2596 };
2597
2598 /* Return TRUE if the instruction needs to be fixed up later internally by
2599 the GAS; otherwise return FALSE. */
2600
2601 static inline bool
2602 aarch64_gas_internal_fixup_p (void)
2603 {
2604 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2605 }
2606
2607 /* Assign the immediate value to the relevant field in *OPERAND if
2608 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2609 needs an internal fixup in a later stage.
2610 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2611 IMM.VALUE that may get assigned with the constant. */
2612 static inline void
2613 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2614 aarch64_opnd_info *operand,
2615 int addr_off_p,
2616 int need_libopcodes_p,
2617 int skip_p)
2618 {
2619 if (reloc->exp.X_op == O_constant)
2620 {
2621 if (addr_off_p)
2622 operand->addr.offset.imm = reloc->exp.X_add_number;
2623 else
2624 operand->imm.value = reloc->exp.X_add_number;
2625 reloc->type = BFD_RELOC_UNUSED;
2626 }
2627 else
2628 {
2629 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2630 /* Tell libopcodes to ignore this operand or not. This is helpful
2631 when one of the operands needs to be fixed up later but we need
2632 libopcodes to check the other operands. */
2633 operand->skip = skip_p;
2634 }
2635 }
2636
2637 /* Relocation modifiers. Each entry in the table contains the textual
2638 name for the relocation which may be placed before a symbol used as
2639 a load/store offset, or add immediate. It must be surrounded by a
2640 leading and trailing colon, for example:
2641
2642 ldr x0, [x1, #:rello:varsym]
2643 add x0, x1, #:rello:varsym */
2644
2645 struct reloc_table_entry
2646 {
2647 const char *name;
2648 int pc_rel;
2649 bfd_reloc_code_real_type adr_type;
2650 bfd_reloc_code_real_type adrp_type;
2651 bfd_reloc_code_real_type movw_type;
2652 bfd_reloc_code_real_type add_type;
2653 bfd_reloc_code_real_type ldst_type;
2654 bfd_reloc_code_real_type ld_literal_type;
2655 };
2656
2657 static struct reloc_table_entry reloc_table[] =
2658 {
2659 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2660 {"lo12", 0,
2661 0, /* adr_type */
2662 0,
2663 0,
2664 BFD_RELOC_AARCH64_ADD_LO12,
2665 BFD_RELOC_AARCH64_LDST_LO12,
2666 0},
2667
2668 /* Higher 21 bits of pc-relative page offset: ADRP */
2669 {"pg_hi21", 1,
2670 0, /* adr_type */
2671 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2672 0,
2673 0,
2674 0,
2675 0},
2676
2677 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2678 {"pg_hi21_nc", 1,
2679 0, /* adr_type */
2680 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2681 0,
2682 0,
2683 0,
2684 0},
2685
2686 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2687 {"abs_g0", 0,
2688 0, /* adr_type */
2689 0,
2690 BFD_RELOC_AARCH64_MOVW_G0,
2691 0,
2692 0,
2693 0},
2694
2695 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2696 {"abs_g0_s", 0,
2697 0, /* adr_type */
2698 0,
2699 BFD_RELOC_AARCH64_MOVW_G0_S,
2700 0,
2701 0,
2702 0},
2703
2704 /* Less significant bits 0-15 of address/value: MOVK, no check */
2705 {"abs_g0_nc", 0,
2706 0, /* adr_type */
2707 0,
2708 BFD_RELOC_AARCH64_MOVW_G0_NC,
2709 0,
2710 0,
2711 0},
2712
2713 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2714 {"abs_g1", 0,
2715 0, /* adr_type */
2716 0,
2717 BFD_RELOC_AARCH64_MOVW_G1,
2718 0,
2719 0,
2720 0},
2721
2722 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2723 {"abs_g1_s", 0,
2724 0, /* adr_type */
2725 0,
2726 BFD_RELOC_AARCH64_MOVW_G1_S,
2727 0,
2728 0,
2729 0},
2730
2731 /* Less significant bits 16-31 of address/value: MOVK, no check */
2732 {"abs_g1_nc", 0,
2733 0, /* adr_type */
2734 0,
2735 BFD_RELOC_AARCH64_MOVW_G1_NC,
2736 0,
2737 0,
2738 0},
2739
2740 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2741 {"abs_g2", 0,
2742 0, /* adr_type */
2743 0,
2744 BFD_RELOC_AARCH64_MOVW_G2,
2745 0,
2746 0,
2747 0},
2748
2749 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2750 {"abs_g2_s", 0,
2751 0, /* adr_type */
2752 0,
2753 BFD_RELOC_AARCH64_MOVW_G2_S,
2754 0,
2755 0,
2756 0},
2757
2758 /* Less significant bits 32-47 of address/value: MOVK, no check */
2759 {"abs_g2_nc", 0,
2760 0, /* adr_type */
2761 0,
2762 BFD_RELOC_AARCH64_MOVW_G2_NC,
2763 0,
2764 0,
2765 0},
2766
2767 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2768 {"abs_g3", 0,
2769 0, /* adr_type */
2770 0,
2771 BFD_RELOC_AARCH64_MOVW_G3,
2772 0,
2773 0,
2774 0},
2775
2776 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2777 {"prel_g0", 1,
2778 0, /* adr_type */
2779 0,
2780 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2781 0,
2782 0,
2783 0},
2784
2785 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2786 {"prel_g0_nc", 1,
2787 0, /* adr_type */
2788 0,
2789 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2790 0,
2791 0,
2792 0},
2793
2794 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2795 {"prel_g1", 1,
2796 0, /* adr_type */
2797 0,
2798 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2799 0,
2800 0,
2801 0},
2802
2803 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2804 {"prel_g1_nc", 1,
2805 0, /* adr_type */
2806 0,
2807 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2808 0,
2809 0,
2810 0},
2811
2812 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2813 {"prel_g2", 1,
2814 0, /* adr_type */
2815 0,
2816 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2817 0,
2818 0,
2819 0},
2820
2821 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2822 {"prel_g2_nc", 1,
2823 0, /* adr_type */
2824 0,
2825 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2826 0,
2827 0,
2828 0},
2829
2830 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2831 {"prel_g3", 1,
2832 0, /* adr_type */
2833 0,
2834 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2835 0,
2836 0,
2837 0},
2838
2839 /* Get to the page containing GOT entry for a symbol. */
2840 {"got", 1,
2841 0, /* adr_type */
2842 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2843 0,
2844 0,
2845 0,
2846 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2847
2848 /* 12 bit offset into the page containing GOT entry for that symbol. */
2849 {"got_lo12", 0,
2850 0, /* adr_type */
2851 0,
2852 0,
2853 0,
2854 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2855 0},
2856
2857 /* 0-15 bits of address/value: MOVk, no check. */
2858 {"gotoff_g0_nc", 0,
2859 0, /* adr_type */
2860 0,
2861 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2862 0,
2863 0,
2864 0},
2865
2866 /* Most significant bits 16-31 of address/value: MOVZ. */
2867 {"gotoff_g1", 0,
2868 0, /* adr_type */
2869 0,
2870 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2871 0,
2872 0,
2873 0},
2874
2875 /* 15 bit offset into the page containing GOT entry for that symbol. */
2876 {"gotoff_lo15", 0,
2877 0, /* adr_type */
2878 0,
2879 0,
2880 0,
2881 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2882 0},
2883
2884 /* Get to the page containing GOT TLS entry for a symbol */
2885 {"gottprel_g0_nc", 0,
2886 0, /* adr_type */
2887 0,
2888 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2889 0,
2890 0,
2891 0},
2892
2893 /* Get to the page containing GOT TLS entry for a symbol */
2894 {"gottprel_g1", 0,
2895 0, /* adr_type */
2896 0,
2897 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2898 0,
2899 0,
2900 0},
2901
2902 /* Get to the page containing GOT TLS entry for a symbol */
2903 {"tlsgd", 0,
2904 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2905 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2906 0,
2907 0,
2908 0,
2909 0},
2910
2911 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2912 {"tlsgd_lo12", 0,
2913 0, /* adr_type */
2914 0,
2915 0,
2916 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2917 0,
2918 0},
2919
2920 /* Lower 16 bits address/value: MOVk. */
2921 {"tlsgd_g0_nc", 0,
2922 0, /* adr_type */
2923 0,
2924 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2925 0,
2926 0,
2927 0},
2928
2929 /* Most significant bits 16-31 of address/value: MOVZ. */
2930 {"tlsgd_g1", 0,
2931 0, /* adr_type */
2932 0,
2933 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2934 0,
2935 0,
2936 0},
2937
2938 /* Get to the page containing GOT TLS entry for a symbol */
2939 {"tlsdesc", 0,
2940 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2941 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2942 0,
2943 0,
2944 0,
2945 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2946
2947 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2948 {"tlsdesc_lo12", 0,
2949 0, /* adr_type */
2950 0,
2951 0,
2952 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2953 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2954 0},
2955
2956 /* Get to the page containing GOT TLS entry for a symbol.
2957 The same as GD, we allocate two consecutive GOT slots
2958 for module index and module offset, the only difference
2959 with GD is the module offset should be initialized to
2960 zero without any outstanding runtime relocation. */
2961 {"tlsldm", 0,
2962 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2963 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2964 0,
2965 0,
2966 0,
2967 0},
2968
2969 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2970 {"tlsldm_lo12_nc", 0,
2971 0, /* adr_type */
2972 0,
2973 0,
2974 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2975 0,
2976 0},
2977
2978 /* 12 bit offset into the module TLS base address. */
2979 {"dtprel_lo12", 0,
2980 0, /* adr_type */
2981 0,
2982 0,
2983 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2984 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2985 0},
2986
2987 /* Same as dtprel_lo12, no overflow check. */
2988 {"dtprel_lo12_nc", 0,
2989 0, /* adr_type */
2990 0,
2991 0,
2992 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2993 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2994 0},
2995
2996 /* bits[23:12] of offset to the module TLS base address. */
2997 {"dtprel_hi12", 0,
2998 0, /* adr_type */
2999 0,
3000 0,
3001 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
3002 0,
3003 0},
3004
3005 /* bits[15:0] of offset to the module TLS base address. */
3006 {"dtprel_g0", 0,
3007 0, /* adr_type */
3008 0,
3009 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
3010 0,
3011 0,
3012 0},
3013
3014 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
3015 {"dtprel_g0_nc", 0,
3016 0, /* adr_type */
3017 0,
3018 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
3019 0,
3020 0,
3021 0},
3022
3023 /* bits[31:16] of offset to the module TLS base address. */
3024 {"dtprel_g1", 0,
3025 0, /* adr_type */
3026 0,
3027 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
3028 0,
3029 0,
3030 0},
3031
3032 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
3033 {"dtprel_g1_nc", 0,
3034 0, /* adr_type */
3035 0,
3036 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
3037 0,
3038 0,
3039 0},
3040
3041 /* bits[47:32] of offset to the module TLS base address. */
3042 {"dtprel_g2", 0,
3043 0, /* adr_type */
3044 0,
3045 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
3046 0,
3047 0,
3048 0},
3049
3050 /* Lower 16 bit offset into GOT entry for a symbol */
3051 {"tlsdesc_off_g0_nc", 0,
3052 0, /* adr_type */
3053 0,
3054 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
3055 0,
3056 0,
3057 0},
3058
3059 /* Higher 16 bit offset into GOT entry for a symbol */
3060 {"tlsdesc_off_g1", 0,
3061 0, /* adr_type */
3062 0,
3063 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
3064 0,
3065 0,
3066 0},
3067
3068 /* Get to the page containing GOT TLS entry for a symbol */
3069 {"gottprel", 0,
3070 0, /* adr_type */
3071 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
3072 0,
3073 0,
3074 0,
3075 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
3076
3077 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
3078 {"gottprel_lo12", 0,
3079 0, /* adr_type */
3080 0,
3081 0,
3082 0,
3083 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
3084 0},
3085
3086 /* Get tp offset for a symbol. */
3087 {"tprel", 0,
3088 0, /* adr_type */
3089 0,
3090 0,
3091 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3092 0,
3093 0},
3094
3095 /* Get tp offset for a symbol. */
3096 {"tprel_lo12", 0,
3097 0, /* adr_type */
3098 0,
3099 0,
3100 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
3101 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
3102 0},
3103
3104 /* Get tp offset for a symbol. */
3105 {"tprel_hi12", 0,
3106 0, /* adr_type */
3107 0,
3108 0,
3109 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3110 0,
3111 0},
3112
3113 /* Get tp offset for a symbol. */
3114 {"tprel_lo12_nc", 0,
3115 0, /* adr_type */
3116 0,
3117 0,
3118 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3119 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3120 0},
3121
3122 /* Most significant bits 32-47 of address/value: MOVZ. */
3123 {"tprel_g2", 0,
3124 0, /* adr_type */
3125 0,
3126 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3127 0,
3128 0,
3129 0},
3130
3131 /* Most significant bits 16-31 of address/value: MOVZ. */
3132 {"tprel_g1", 0,
3133 0, /* adr_type */
3134 0,
3135 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3136 0,
3137 0,
3138 0},
3139
3140 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3141 {"tprel_g1_nc", 0,
3142 0, /* adr_type */
3143 0,
3144 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3145 0,
3146 0,
3147 0},
3148
3149 /* Most significant bits 0-15 of address/value: MOVZ. */
3150 {"tprel_g0", 0,
3151 0, /* adr_type */
3152 0,
3153 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3154 0,
3155 0,
3156 0},
3157
3158 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3159 {"tprel_g0_nc", 0,
3160 0, /* adr_type */
3161 0,
3162 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3163 0,
3164 0,
3165 0},
3166
3167 /* 15bit offset from got entry to base address of GOT table. */
3168 {"gotpage_lo15", 0,
3169 0,
3170 0,
3171 0,
3172 0,
3173 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3174 0},
3175
3176 /* 14bit offset from got entry to base address of GOT table. */
3177 {"gotpage_lo14", 0,
3178 0,
3179 0,
3180 0,
3181 0,
3182 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3183 0},
3184 };
3185
3186 /* Given the address of a pointer pointing to the textual name of a
3187 relocation as may appear in assembler source, attempt to find its
3188 details in reloc_table. The pointer will be updated to the character
3189 after the trailing colon. On failure, NULL will be returned;
3190 otherwise return the reloc_table_entry. */
3191
3192 static struct reloc_table_entry *
3193 find_reloc_table_entry (char **str)
3194 {
3195 unsigned int i;
3196 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3197 {
3198 int length = strlen (reloc_table[i].name);
3199
3200 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3201 && (*str)[length] == ':')
3202 {
3203 *str += (length + 1);
3204 return &reloc_table[i];
3205 }
3206 }
3207
3208 return NULL;
3209 }
3210
3211 /* Returns 0 if the relocation should never be forced,
3212 1 if the relocation must be forced, and -1 if either
3213 result is OK. */
3214
3215 static signed int
3216 aarch64_force_reloc (unsigned int type)
3217 {
3218 switch (type)
3219 {
3220 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3221 /* Perform these "immediate" internal relocations
3222 even if the symbol is extern or weak. */
3223 return 0;
3224
3225 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3226 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3227 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3228 /* Pseudo relocs that need to be fixed up according to
3229 ilp32_p. */
3230 return 1;
3231
3232 case BFD_RELOC_AARCH64_ADD_LO12:
3233 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3234 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3235 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3236 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3237 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3238 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3239 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3240 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3241 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3242 case BFD_RELOC_AARCH64_LDST128_LO12:
3243 case BFD_RELOC_AARCH64_LDST16_LO12:
3244 case BFD_RELOC_AARCH64_LDST32_LO12:
3245 case BFD_RELOC_AARCH64_LDST64_LO12:
3246 case BFD_RELOC_AARCH64_LDST8_LO12:
3247 case BFD_RELOC_AARCH64_LDST_LO12:
3248 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3249 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3250 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3251 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3252 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3253 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3254 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3255 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3256 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3257 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3258 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3259 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3260 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3261 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3262 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3263 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3264 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3265 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3266 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3267 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3268 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3269 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3270 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3271 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3272 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3273 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3274 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3275 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3276 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3277 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3278 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3279 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3280 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3281 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12:
3282 case BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC:
3283 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3284 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3285 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3286 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3287 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3288 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3289 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3290 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3291 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3292 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3293 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3294 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3295 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3296 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12:
3297 case BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC:
3298 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3299 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3300 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3301 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3302 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3303 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3304 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3305 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3306 /* Always leave these relocations for the linker. */
3307 return 1;
3308
3309 default:
3310 return -1;
3311 }
3312 }
3313
3314 int
3315 aarch64_force_relocation (struct fix *fixp)
3316 {
3317 int res = aarch64_force_reloc (fixp->fx_r_type);
3318
3319 if (res == -1)
3320 return generic_force_reloc (fixp);
3321 return res;
3322 }
3323
3324 /* Mode argument to parse_shift and parser_shifter_operand. */
3325 enum parse_shift_mode
3326 {
3327 SHIFTED_NONE, /* no shifter allowed */
3328 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3329 "#imm{,lsl #n}" */
3330 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3331 "#imm" */
3332 SHIFTED_LSL, /* bare "lsl #n" */
3333 SHIFTED_MUL, /* bare "mul #n" */
3334 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3335 SHIFTED_MUL_VL, /* "mul vl" */
3336 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3337 };
3338
3339 /* Parse a <shift> operator on an AArch64 data processing instruction.
3340 Return TRUE on success; otherwise return FALSE. */
3341 static bool
3342 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3343 {
3344 const struct aarch64_name_value_pair *shift_op;
3345 enum aarch64_modifier_kind kind;
3346 expressionS exp;
3347 int exp_has_prefix;
3348 char *s = *str;
3349 char *p = s;
3350
3351 for (p = *str; ISALPHA (*p); p++)
3352 ;
3353
3354 if (p == *str)
3355 {
3356 set_syntax_error (_("shift expression expected"));
3357 return false;
3358 }
3359
3360 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3361
3362 if (shift_op == NULL)
3363 {
3364 set_syntax_error (_("shift operator expected"));
3365 return false;
3366 }
3367
3368 kind = aarch64_get_operand_modifier (shift_op);
3369
3370 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3371 {
3372 set_syntax_error (_("invalid use of 'MSL'"));
3373 return false;
3374 }
3375
3376 if (kind == AARCH64_MOD_MUL
3377 && mode != SHIFTED_MUL
3378 && mode != SHIFTED_MUL_VL)
3379 {
3380 set_syntax_error (_("invalid use of 'MUL'"));
3381 return false;
3382 }
3383
3384 switch (mode)
3385 {
3386 case SHIFTED_LOGIC_IMM:
3387 if (aarch64_extend_operator_p (kind))
3388 {
3389 set_syntax_error (_("extending shift is not permitted"));
3390 return false;
3391 }
3392 break;
3393
3394 case SHIFTED_ARITH_IMM:
3395 if (kind == AARCH64_MOD_ROR)
3396 {
3397 set_syntax_error (_("'ROR' shift is not permitted"));
3398 return false;
3399 }
3400 break;
3401
3402 case SHIFTED_LSL:
3403 if (kind != AARCH64_MOD_LSL)
3404 {
3405 set_syntax_error (_("only 'LSL' shift is permitted"));
3406 return false;
3407 }
3408 break;
3409
3410 case SHIFTED_MUL:
3411 if (kind != AARCH64_MOD_MUL)
3412 {
3413 set_syntax_error (_("only 'MUL' is permitted"));
3414 return false;
3415 }
3416 break;
3417
3418 case SHIFTED_MUL_VL:
3419 /* "MUL VL" consists of two separate tokens. Require the first
3420 token to be "MUL" and look for a following "VL". */
3421 if (kind == AARCH64_MOD_MUL)
3422 {
3423 skip_whitespace (p);
3424 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3425 {
3426 p += 2;
3427 kind = AARCH64_MOD_MUL_VL;
3428 break;
3429 }
3430 }
3431 set_syntax_error (_("only 'MUL VL' is permitted"));
3432 return false;
3433
3434 case SHIFTED_REG_OFFSET:
3435 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3436 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3437 {
3438 set_fatal_syntax_error
3439 (_("invalid shift for the register offset addressing mode"));
3440 return false;
3441 }
3442 break;
3443
3444 case SHIFTED_LSL_MSL:
3445 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3446 {
3447 set_syntax_error (_("invalid shift operator"));
3448 return false;
3449 }
3450 break;
3451
3452 default:
3453 abort ();
3454 }
3455
3456 /* Whitespace can appear here if the next thing is a bare digit. */
3457 skip_whitespace (p);
3458
3459 /* Parse shift amount. */
3460 exp_has_prefix = 0;
3461 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3462 exp.X_op = O_absent;
3463 else
3464 {
3465 if (is_immediate_prefix (*p))
3466 {
3467 p++;
3468 exp_has_prefix = 1;
3469 }
3470 aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT);
3471 }
3472 if (kind == AARCH64_MOD_MUL_VL)
3473 /* For consistency, give MUL VL the same shift amount as an implicit
3474 MUL #1. */
3475 operand->shifter.amount = 1;
3476 else if (exp.X_op == O_absent)
3477 {
3478 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3479 {
3480 set_syntax_error (_("missing shift amount"));
3481 return false;
3482 }
3483 operand->shifter.amount = 0;
3484 }
3485 else if (exp.X_op != O_constant)
3486 {
3487 set_syntax_error (_("constant shift amount required"));
3488 return false;
3489 }
3490 /* For parsing purposes, MUL #n has no inherent range. The range
3491 depends on the operand and will be checked by operand-specific
3492 routines. */
3493 else if (kind != AARCH64_MOD_MUL
3494 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3495 {
3496 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3497 return false;
3498 }
3499 else
3500 {
3501 operand->shifter.amount = exp.X_add_number;
3502 operand->shifter.amount_present = 1;
3503 }
3504
3505 operand->shifter.operator_present = 1;
3506 operand->shifter.kind = kind;
3507
3508 *str = p;
3509 return true;
3510 }
3511
3512 /* Parse a <shifter_operand> for a data processing instruction:
3513
3514 #<immediate>
3515 #<immediate>, LSL #imm
3516
3517 Validation of immediate operands is deferred to md_apply_fix.
3518
3519 Return TRUE on success; otherwise return FALSE. */
3520
3521 static bool
3522 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3523 enum parse_shift_mode mode)
3524 {
3525 char *p;
3526
3527 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3528 return false;
3529
3530 p = *str;
3531
3532 /* Accept an immediate expression. */
3533 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3534 REJECT_ABSENT))
3535 return false;
3536
3537 /* Accept optional LSL for arithmetic immediate values. */
3538 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3539 if (! parse_shift (&p, operand, SHIFTED_LSL))
3540 return false;
3541
3542 /* Not accept any shifter for logical immediate values. */
3543 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3544 && parse_shift (&p, operand, mode))
3545 {
3546 set_syntax_error (_("unexpected shift operator"));
3547 return false;
3548 }
3549
3550 *str = p;
3551 return true;
3552 }
3553
3554 /* Parse a <shifter_operand> for a data processing instruction:
3555
3556 <Rm>
3557 <Rm>, <shift>
3558 #<immediate>
3559 #<immediate>, LSL #imm
3560
3561 where <shift> is handled by parse_shift above, and the last two
3562 cases are handled by the function above.
3563
3564 Validation of immediate operands is deferred to md_apply_fix.
3565
3566 Return TRUE on success; otherwise return FALSE. */
3567
3568 static bool
3569 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3570 enum parse_shift_mode mode)
3571 {
3572 const reg_entry *reg;
3573 aarch64_opnd_qualifier_t qualifier;
3574 enum aarch64_operand_class opd_class
3575 = aarch64_get_operand_class (operand->type);
3576
3577 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3578 if (reg)
3579 {
3580 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3581 {
3582 set_syntax_error (_("unexpected register in the immediate operand"));
3583 return false;
3584 }
3585
3586 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3587 {
3588 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3589 return false;
3590 }
3591
3592 operand->reg.regno = reg->number;
3593 operand->qualifier = qualifier;
3594
3595 /* Accept optional shift operation on register. */
3596 if (! skip_past_comma (str))
3597 return true;
3598
3599 if (! parse_shift (str, operand, mode))
3600 return false;
3601
3602 return true;
3603 }
3604 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3605 {
3606 set_syntax_error
3607 (_("integer register expected in the extended/shifted operand "
3608 "register"));
3609 return false;
3610 }
3611
3612 /* We have a shifted immediate variable. */
3613 return parse_shifter_operand_imm (str, operand, mode);
3614 }
3615
3616 /* Return TRUE on success; return FALSE otherwise. */
3617
3618 static bool
3619 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3620 enum parse_shift_mode mode)
3621 {
3622 char *p = *str;
3623
3624 /* Determine if we have the sequence of characters #: or just :
3625 coming next. If we do, then we check for a :rello: relocation
3626 modifier. If we don't, punt the whole lot to
3627 parse_shifter_operand. */
3628
3629 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3630 {
3631 struct reloc_table_entry *entry;
3632
3633 if (p[0] == '#')
3634 p += 2;
3635 else
3636 p++;
3637 *str = p;
3638
3639 /* Try to parse a relocation. Anything else is an error. */
3640 if (!(entry = find_reloc_table_entry (str)))
3641 {
3642 set_syntax_error (_("unknown relocation modifier"));
3643 return false;
3644 }
3645
3646 if (entry->add_type == 0)
3647 {
3648 set_syntax_error
3649 (_("this relocation modifier is not allowed on this instruction"));
3650 return false;
3651 }
3652
3653 /* Save str before we decompose it. */
3654 p = *str;
3655
3656 /* Next, we parse the expression. */
3657 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3658 REJECT_ABSENT))
3659 return false;
3660
3661 /* Record the relocation type (use the ADD variant here). */
3662 inst.reloc.type = entry->add_type;
3663 inst.reloc.pc_rel = entry->pc_rel;
3664
3665 /* If str is empty, we've reached the end, stop here. */
3666 if (**str == '\0')
3667 return true;
3668
3669 /* Otherwise, we have a shifted reloc modifier, so rewind to
3670 recover the variable name and continue parsing for the shifter. */
3671 *str = p;
3672 return parse_shifter_operand_imm (str, operand, mode);
3673 }
3674
3675 return parse_shifter_operand (str, operand, mode);
3676 }
3677
3678 /* Parse all forms of an address expression. Information is written
3679 to *OPERAND and/or inst.reloc.
3680
3681 The A64 instruction set has the following addressing modes:
3682
3683 Offset
3684 [base] // in SIMD ld/st structure
3685 [base{,#0}] // in ld/st exclusive
3686 [base{,#imm}]
3687 [base,Xm{,LSL #imm}]
3688 [base,Xm,SXTX {#imm}]
3689 [base,Wm,(S|U)XTW {#imm}]
3690 Pre-indexed
3691 [base]! // in ldraa/ldrab exclusive
3692 [base,#imm]!
3693 Post-indexed
3694 [base],#imm
3695 [base],Xm // in SIMD ld/st structure
3696 PC-relative (literal)
3697 label
3698 SVE:
3699 [base,#imm,MUL VL]
3700 [base,Zm.D{,LSL #imm}]
3701 [base,Zm.S,(S|U)XTW {#imm}]
3702 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3703 [Zn.S,#imm]
3704 [Zn.D,#imm]
3705 [Zn.S{, Xm}]
3706 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3707 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3708 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3709
3710 (As a convenience, the notation "=immediate" is permitted in conjunction
3711 with the pc-relative literal load instructions to automatically place an
3712 immediate value or symbolic address in a nearby literal pool and generate
3713 a hidden label which references it.)
3714
3715 Upon a successful parsing, the address structure in *OPERAND will be
3716 filled in the following way:
3717
3718 .base_regno = <base>
3719 .offset.is_reg // 1 if the offset is a register
3720 .offset.imm = <imm>
3721 .offset.regno = <Rm>
3722
3723 For different addressing modes defined in the A64 ISA:
3724
3725 Offset
3726 .pcrel=0; .preind=1; .postind=0; .writeback=0
3727 Pre-indexed
3728 .pcrel=0; .preind=1; .postind=0; .writeback=1
3729 Post-indexed
3730 .pcrel=0; .preind=0; .postind=1; .writeback=1
3731 PC-relative (literal)
3732 .pcrel=1; .preind=1; .postind=0; .writeback=0
3733
3734 The shift/extension information, if any, will be stored in .shifter.
3735 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3736 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3737 corresponding register.
3738
3739 BASE_TYPE says which types of base register should be accepted and
3740 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3741 is the type of shifter that is allowed for immediate offsets,
3742 or SHIFTED_NONE if none.
3743
3744 In all other respects, it is the caller's responsibility to check
3745 for addressing modes not supported by the instruction, and to set
3746 inst.reloc.type. */
3747
3748 static bool
3749 parse_address_main (char **str, aarch64_opnd_info *operand,
3750 aarch64_opnd_qualifier_t *base_qualifier,
3751 aarch64_opnd_qualifier_t *offset_qualifier,
3752 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3753 enum parse_shift_mode imm_shift_mode)
3754 {
3755 char *p = *str;
3756 const reg_entry *reg;
3757 expressionS *exp = &inst.reloc.exp;
3758
3759 *base_qualifier = AARCH64_OPND_QLF_NIL;
3760 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3761 if (! skip_past_char (&p, '['))
3762 {
3763 /* =immediate or label. */
3764 operand->addr.pcrel = 1;
3765 operand->addr.preind = 1;
3766
3767 /* #:<reloc_op>:<symbol> */
3768 skip_past_char (&p, '#');
3769 if (skip_past_char (&p, ':'))
3770 {
3771 bfd_reloc_code_real_type ty;
3772 struct reloc_table_entry *entry;
3773
3774 /* Try to parse a relocation modifier. Anything else is
3775 an error. */
3776 entry = find_reloc_table_entry (&p);
3777 if (! entry)
3778 {
3779 set_syntax_error (_("unknown relocation modifier"));
3780 return false;
3781 }
3782
3783 switch (operand->type)
3784 {
3785 case AARCH64_OPND_ADDR_PCREL21:
3786 /* adr */
3787 ty = entry->adr_type;
3788 break;
3789
3790 default:
3791 ty = entry->ld_literal_type;
3792 break;
3793 }
3794
3795 if (ty == 0)
3796 {
3797 set_syntax_error
3798 (_("this relocation modifier is not allowed on this "
3799 "instruction"));
3800 return false;
3801 }
3802
3803 /* #:<reloc_op>: */
3804 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3805 {
3806 set_syntax_error (_("invalid relocation expression"));
3807 return false;
3808 }
3809 /* #:<reloc_op>:<expr> */
3810 /* Record the relocation type. */
3811 inst.reloc.type = ty;
3812 inst.reloc.pc_rel = entry->pc_rel;
3813 }
3814 else
3815 {
3816 if (skip_past_char (&p, '='))
3817 /* =immediate; need to generate the literal in the literal pool. */
3818 inst.gen_lit_pool = 1;
3819
3820 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3821 {
3822 set_syntax_error (_("invalid address"));
3823 return false;
3824 }
3825 }
3826
3827 *str = p;
3828 return true;
3829 }
3830
3831 /* [ */
3832
3833 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3834 if (!reg || !aarch64_check_reg_type (reg, base_type))
3835 {
3836 set_syntax_error (_(get_reg_expected_msg (base_type)));
3837 return false;
3838 }
3839 operand->addr.base_regno = reg->number;
3840
3841 /* [Xn */
3842 if (skip_past_comma (&p))
3843 {
3844 /* [Xn, */
3845 operand->addr.preind = 1;
3846
3847 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3848 if (reg)
3849 {
3850 if (!aarch64_check_reg_type (reg, offset_type))
3851 {
3852 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3853 return false;
3854 }
3855
3856 /* [Xn,Rm */
3857 operand->addr.offset.regno = reg->number;
3858 operand->addr.offset.is_reg = 1;
3859 /* Shifted index. */
3860 if (skip_past_comma (&p))
3861 {
3862 /* [Xn,Rm, */
3863 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3864 /* Use the diagnostics set in parse_shift, so not set new
3865 error message here. */
3866 return false;
3867 }
3868 /* We only accept:
3869 [base,Xm] # For vector plus scalar SVE2 indexing.
3870 [base,Xm{,LSL #imm}]
3871 [base,Xm,SXTX {#imm}]
3872 [base,Wm,(S|U)XTW {#imm}] */
3873 if (operand->shifter.kind == AARCH64_MOD_NONE
3874 || operand->shifter.kind == AARCH64_MOD_LSL
3875 || operand->shifter.kind == AARCH64_MOD_SXTX)
3876 {
3877 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3878 {
3879 set_syntax_error (_("invalid use of 32-bit register offset"));
3880 return false;
3881 }
3882 if (aarch64_get_qualifier_esize (*base_qualifier)
3883 != aarch64_get_qualifier_esize (*offset_qualifier)
3884 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3885 || *base_qualifier != AARCH64_OPND_QLF_S_S
3886 || *offset_qualifier != AARCH64_OPND_QLF_X))
3887 {
3888 set_syntax_error (_("offset has different size from base"));
3889 return false;
3890 }
3891 }
3892 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3893 {
3894 set_syntax_error (_("invalid use of 64-bit register offset"));
3895 return false;
3896 }
3897 }
3898 else
3899 {
3900 /* [Xn,#:<reloc_op>:<symbol> */
3901 skip_past_char (&p, '#');
3902 if (skip_past_char (&p, ':'))
3903 {
3904 struct reloc_table_entry *entry;
3905
3906 /* Try to parse a relocation modifier. Anything else is
3907 an error. */
3908 if (!(entry = find_reloc_table_entry (&p)))
3909 {
3910 set_syntax_error (_("unknown relocation modifier"));
3911 return false;
3912 }
3913
3914 if (entry->ldst_type == 0)
3915 {
3916 set_syntax_error
3917 (_("this relocation modifier is not allowed on this "
3918 "instruction"));
3919 return false;
3920 }
3921
3922 /* [Xn,#:<reloc_op>: */
3923 /* We now have the group relocation table entry corresponding to
3924 the name in the assembler source. Next, we parse the
3925 expression. */
3926 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
3927 {
3928 set_syntax_error (_("invalid relocation expression"));
3929 return false;
3930 }
3931
3932 /* [Xn,#:<reloc_op>:<expr> */
3933 /* Record the load/store relocation type. */
3934 inst.reloc.type = entry->ldst_type;
3935 inst.reloc.pc_rel = entry->pc_rel;
3936 }
3937 else
3938 {
3939 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3940 {
3941 set_syntax_error (_("invalid expression in the address"));
3942 return false;
3943 }
3944 /* [Xn,<expr> */
3945 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3946 /* [Xn,<expr>,<shifter> */
3947 if (! parse_shift (&p, operand, imm_shift_mode))
3948 return false;
3949 }
3950 }
3951 }
3952
3953 if (! skip_past_char (&p, ']'))
3954 {
3955 set_syntax_error (_("']' expected"));
3956 return false;
3957 }
3958
3959 if (skip_past_char (&p, '!'))
3960 {
3961 if (operand->addr.preind && operand->addr.offset.is_reg)
3962 {
3963 set_syntax_error (_("register offset not allowed in pre-indexed "
3964 "addressing mode"));
3965 return false;
3966 }
3967 /* [Xn]! */
3968 operand->addr.writeback = 1;
3969 }
3970 else if (skip_past_comma (&p))
3971 {
3972 /* [Xn], */
3973 operand->addr.postind = 1;
3974 operand->addr.writeback = 1;
3975
3976 if (operand->addr.preind)
3977 {
3978 set_syntax_error (_("cannot combine pre- and post-indexing"));
3979 return false;
3980 }
3981
3982 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3983 if (reg)
3984 {
3985 /* [Xn],Xm */
3986 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3987 {
3988 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3989 return false;
3990 }
3991
3992 operand->addr.offset.regno = reg->number;
3993 operand->addr.offset.is_reg = 1;
3994 }
3995 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT))
3996 {
3997 /* [Xn],#expr */
3998 set_syntax_error (_("invalid expression in the address"));
3999 return false;
4000 }
4001 }
4002
4003 /* If at this point neither .preind nor .postind is set, we have a
4004 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
4005 ldrab, accept [Rn] as a shorthand for [Rn,#0].
4006 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
4007 [Zn.<T>, xzr]. */
4008 if (operand->addr.preind == 0 && operand->addr.postind == 0)
4009 {
4010 if (operand->addr.writeback)
4011 {
4012 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
4013 {
4014 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
4015 operand->addr.offset.is_reg = 0;
4016 operand->addr.offset.imm = 0;
4017 operand->addr.preind = 1;
4018 }
4019 else
4020 {
4021 /* Reject [Rn]! */
4022 set_syntax_error (_("missing offset in the pre-indexed address"));
4023 return false;
4024 }
4025 }
4026 else
4027 {
4028 operand->addr.preind = 1;
4029 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
4030 {
4031 operand->addr.offset.is_reg = 1;
4032 operand->addr.offset.regno = REG_ZR;
4033 *offset_qualifier = AARCH64_OPND_QLF_X;
4034 }
4035 else
4036 {
4037 inst.reloc.exp.X_op = O_constant;
4038 inst.reloc.exp.X_add_number = 0;
4039 }
4040 }
4041 }
4042
4043 *str = p;
4044 return true;
4045 }
4046
4047 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
4048 on success. */
4049 static bool
4050 parse_address (char **str, aarch64_opnd_info *operand)
4051 {
4052 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
4053 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
4054 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
4055 }
4056
4057 /* Parse an address in which SVE vector registers and MUL VL are allowed.
4058 The arguments have the same meaning as for parse_address_main.
4059 Return TRUE on success. */
4060 static bool
4061 parse_sve_address (char **str, aarch64_opnd_info *operand,
4062 aarch64_opnd_qualifier_t *base_qualifier,
4063 aarch64_opnd_qualifier_t *offset_qualifier)
4064 {
4065 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
4066 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
4067 SHIFTED_MUL_VL);
4068 }
4069
4070 /* Parse a register X0-X30. The register must be 64-bit and register 31
4071 is unallocated. */
4072 static bool
4073 parse_x0_to_x30 (char **str, aarch64_opnd_info *operand)
4074 {
4075 const reg_entry *reg = parse_reg (str);
4076 if (!reg || !aarch64_check_reg_type (reg, REG_TYPE_R_64))
4077 {
4078 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
4079 return false;
4080 }
4081 operand->reg.regno = reg->number;
4082 operand->qualifier = AARCH64_OPND_QLF_X;
4083 return true;
4084 }
4085
4086 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
4087 Return TRUE on success; otherwise return FALSE. */
4088 static bool
4089 parse_half (char **str, int *internal_fixup_p)
4090 {
4091 char *p = *str;
4092
4093 skip_past_char (&p, '#');
4094
4095 gas_assert (internal_fixup_p);
4096 *internal_fixup_p = 0;
4097
4098 if (*p == ':')
4099 {
4100 struct reloc_table_entry *entry;
4101
4102 /* Try to parse a relocation. Anything else is an error. */
4103 ++p;
4104
4105 if (!(entry = find_reloc_table_entry (&p)))
4106 {
4107 set_syntax_error (_("unknown relocation modifier"));
4108 return false;
4109 }
4110
4111 if (entry->movw_type == 0)
4112 {
4113 set_syntax_error
4114 (_("this relocation modifier is not allowed on this instruction"));
4115 return false;
4116 }
4117
4118 inst.reloc.type = entry->movw_type;
4119 }
4120 else
4121 *internal_fixup_p = 1;
4122
4123 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4124 return false;
4125
4126 *str = p;
4127 return true;
4128 }
4129
4130 /* Parse an operand for an ADRP instruction:
4131 ADRP <Xd>, <label>
4132 Return TRUE on success; otherwise return FALSE. */
4133
4134 static bool
4135 parse_adrp (char **str)
4136 {
4137 char *p;
4138
4139 p = *str;
4140 if (*p == ':')
4141 {
4142 struct reloc_table_entry *entry;
4143
4144 /* Try to parse a relocation. Anything else is an error. */
4145 ++p;
4146 if (!(entry = find_reloc_table_entry (&p)))
4147 {
4148 set_syntax_error (_("unknown relocation modifier"));
4149 return false;
4150 }
4151
4152 if (entry->adrp_type == 0)
4153 {
4154 set_syntax_error
4155 (_("this relocation modifier is not allowed on this instruction"));
4156 return false;
4157 }
4158
4159 inst.reloc.type = entry->adrp_type;
4160 }
4161 else
4162 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4163
4164 inst.reloc.pc_rel = 1;
4165 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT))
4166 return false;
4167 *str = p;
4168 return true;
4169 }
4170
4171 /* Miscellaneous. */
4172
4173 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4174 of SIZE tokens in which index I gives the token for field value I,
4175 or is null if field value I is invalid. REG_TYPE says which register
4176 names should be treated as registers rather than as symbolic immediates.
4177
4178 Return true on success, moving *STR past the operand and storing the
4179 field value in *VAL. */
4180
4181 static int
4182 parse_enum_string (char **str, int64_t *val, const char *const *array,
4183 size_t size, aarch64_reg_type reg_type)
4184 {
4185 expressionS exp;
4186 char *p, *q;
4187 size_t i;
4188
4189 /* Match C-like tokens. */
4190 p = q = *str;
4191 while (ISALNUM (*q))
4192 q++;
4193
4194 for (i = 0; i < size; ++i)
4195 if (array[i]
4196 && strncasecmp (array[i], p, q - p) == 0
4197 && array[i][q - p] == 0)
4198 {
4199 *val = i;
4200 *str = q;
4201 return true;
4202 }
4203
4204 if (!parse_immediate_expression (&p, &exp, reg_type))
4205 return false;
4206
4207 if (exp.X_op == O_constant
4208 && (uint64_t) exp.X_add_number < size)
4209 {
4210 *val = exp.X_add_number;
4211 *str = p;
4212 return true;
4213 }
4214
4215 /* Use the default error for this operand. */
4216 return false;
4217 }
4218
4219 /* Parse an option for a preload instruction. Returns the encoding for the
4220 option, or PARSE_FAIL. */
4221
4222 static int
4223 parse_pldop (char **str)
4224 {
4225 char *p, *q;
4226 const struct aarch64_name_value_pair *o;
4227
4228 p = q = *str;
4229 while (ISALNUM (*q))
4230 q++;
4231
4232 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4233 if (!o)
4234 return PARSE_FAIL;
4235
4236 *str = q;
4237 return o->value;
4238 }
4239
4240 /* Parse an option for a barrier instruction. Returns the encoding for the
4241 option, or PARSE_FAIL. */
4242
4243 static int
4244 parse_barrier (char **str)
4245 {
4246 char *p, *q;
4247 const struct aarch64_name_value_pair *o;
4248
4249 p = q = *str;
4250 while (ISALPHA (*q))
4251 q++;
4252
4253 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4254 if (!o)
4255 return PARSE_FAIL;
4256
4257 *str = q;
4258 return o->value;
4259 }
4260
4261 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4262 return 0 if successful. Otherwise return PARSE_FAIL. */
4263
4264 static int
4265 parse_barrier_psb (char **str,
4266 const struct aarch64_name_value_pair ** hint_opt)
4267 {
4268 char *p, *q;
4269 const struct aarch64_name_value_pair *o;
4270
4271 p = q = *str;
4272 while (ISALPHA (*q))
4273 q++;
4274
4275 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4276 if (!o)
4277 {
4278 set_fatal_syntax_error
4279 ( _("unknown or missing option to PSB/TSB"));
4280 return PARSE_FAIL;
4281 }
4282
4283 if (o->value != 0x11)
4284 {
4285 /* PSB only accepts option name 'CSYNC'. */
4286 set_syntax_error
4287 (_("the specified option is not accepted for PSB/TSB"));
4288 return PARSE_FAIL;
4289 }
4290
4291 *str = q;
4292 *hint_opt = o;
4293 return 0;
4294 }
4295
4296 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4297 return 0 if successful. Otherwise return PARSE_FAIL. */
4298
4299 static int
4300 parse_bti_operand (char **str,
4301 const struct aarch64_name_value_pair ** hint_opt)
4302 {
4303 char *p, *q;
4304 const struct aarch64_name_value_pair *o;
4305
4306 p = q = *str;
4307 while (ISALPHA (*q))
4308 q++;
4309
4310 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4311 if (!o)
4312 {
4313 set_fatal_syntax_error
4314 ( _("unknown option to BTI"));
4315 return PARSE_FAIL;
4316 }
4317
4318 switch (o->value)
4319 {
4320 /* Valid BTI operands. */
4321 case HINT_OPD_C:
4322 case HINT_OPD_J:
4323 case HINT_OPD_JC:
4324 break;
4325
4326 default:
4327 set_syntax_error
4328 (_("unknown option to BTI"));
4329 return PARSE_FAIL;
4330 }
4331
4332 *str = q;
4333 *hint_opt = o;
4334 return 0;
4335 }
4336
4337 /* Parse STR for reg of REG_TYPE and following '.' and QUALIFIER.
4338 Function returns REG_ENTRY struct and QUALIFIER [bhsdq] or NULL
4339 on failure. Format:
4340
4341 REG_TYPE.QUALIFIER
4342
4343 Side effect: Update STR with current parse position of success.
4344 */
4345
4346 static const reg_entry *
4347 parse_reg_with_qual (char **str, aarch64_reg_type reg_type,
4348 aarch64_opnd_qualifier_t *qualifier)
4349 {
4350 struct vector_type_el vectype;
4351 const reg_entry *reg = parse_typed_reg (str, reg_type, &vectype,
4352 PTR_FULL_REG);
4353 if (!reg)
4354 return NULL;
4355
4356 if (vectype.type == NT_invtype)
4357 *qualifier = AARCH64_OPND_QLF_NIL;
4358 else
4359 {
4360 *qualifier = vectype_to_qualifier (&vectype);
4361 if (*qualifier == AARCH64_OPND_QLF_NIL)
4362 return NULL;
4363 }
4364
4365 return reg;
4366 }
4367
4368 /* Parse STR for unsigned, immediate (1-2 digits) in format:
4369
4370 #<imm>
4371 <imm>
4372
4373 Function return TRUE if immediate was found, or FALSE.
4374 */
4375 static bool
4376 parse_sme_immediate (char **str, int64_t *imm)
4377 {
4378 int64_t val;
4379 if (! parse_constant_immediate (str, &val, REG_TYPE_R_N))
4380 return false;
4381
4382 *imm = val;
4383 return true;
4384 }
4385
4386 /* Parse index with vector select register and immediate:
4387
4388 [<Wv>, <imm>]
4389 [<Wv>, #<imm>]
4390 where <Wv> is in W12-W15 range and # is optional for immediate.
4391
4392 Function performs extra check for mandatory immediate value if REQUIRE_IMM
4393 is set to true.
4394
4395 On success function returns TRUE and populated VECTOR_SELECT_REGISTER and
4396 IMM output.
4397 */
4398 static bool
4399 parse_sme_za_hv_tiles_operand_index (char **str,
4400 int *vector_select_register,
4401 int64_t *imm)
4402 {
4403 const reg_entry *reg;
4404
4405 if (!skip_past_char (str, '['))
4406 {
4407 set_syntax_error (_("expected '['"));
4408 return false;
4409 }
4410
4411 /* Vector select register W12-W15 encoded in the 2-bit Rv field. */
4412 reg = parse_reg (str);
4413 if (reg == NULL || reg->type != REG_TYPE_R_32
4414 || reg->number < 12 || reg->number > 15)
4415 {
4416 set_syntax_error (_("expected vector select register W12-W15"));
4417 return false;
4418 }
4419 *vector_select_register = reg->number;
4420
4421 if (!skip_past_char (str, ',')) /* Optional index offset immediate. */
4422 {
4423 set_syntax_error (_("expected ','"));
4424 return false;
4425 }
4426
4427 if (!parse_sme_immediate (str, imm))
4428 {
4429 set_syntax_error (_("index offset immediate expected"));
4430 return false;
4431 }
4432
4433 if (!skip_past_char (str, ']'))
4434 {
4435 set_syntax_error (_("expected ']'"));
4436 return false;
4437 }
4438
4439 return true;
4440 }
4441
4442 /* Parse SME ZA horizontal or vertical vector access to tiles.
4443 Function extracts from STR to SLICE_INDICATOR <HV> horizontal (0) or
4444 vertical (1) ZA tile vector orientation. VECTOR_SELECT_REGISTER
4445 contains <Wv> select register and corresponding optional IMMEDIATE.
4446 In addition QUALIFIER is extracted.
4447
4448 Field format examples:
4449
4450 ZA0<HV>.B[<Wv>, #<imm>]
4451 <ZAn><HV>.H[<Wv>, #<imm>]
4452 <ZAn><HV>.S[<Wv>, #<imm>]
4453 <ZAn><HV>.D[<Wv>, #<imm>]
4454 <ZAn><HV>.Q[<Wv>, #<imm>]
4455
4456 Function returns <ZAda> register number or PARSE_FAIL.
4457 */
4458 static int
4459 parse_sme_za_hv_tiles_operand (char **str,
4460 enum sme_hv_slice *slice_indicator,
4461 int *vector_select_register,
4462 int *imm,
4463 aarch64_opnd_qualifier_t *qualifier)
4464 {
4465 int regno;
4466 int64_t imm_limit;
4467 int64_t imm_value;
4468 const reg_entry *reg;
4469
4470 reg = parse_reg_with_qual (str, REG_TYPE_ZATHV, qualifier);
4471 if (!reg)
4472 return PARSE_FAIL;
4473
4474 *slice_indicator = (aarch64_check_reg_type (reg, REG_TYPE_ZATH)
4475 ? HV_horizontal
4476 : HV_vertical);
4477 regno = reg->number;
4478
4479 switch (*qualifier)
4480 {
4481 case AARCH64_OPND_QLF_S_B:
4482 imm_limit = 15;
4483 break;
4484 case AARCH64_OPND_QLF_S_H:
4485 imm_limit = 7;
4486 break;
4487 case AARCH64_OPND_QLF_S_S:
4488 imm_limit = 3;
4489 break;
4490 case AARCH64_OPND_QLF_S_D:
4491 imm_limit = 1;
4492 break;
4493 case AARCH64_OPND_QLF_S_Q:
4494 imm_limit = 0;
4495 break;
4496 default:
4497 set_syntax_error (_("invalid ZA tile element size, allowed b, h, s, d and q"));
4498 return PARSE_FAIL;
4499 }
4500
4501 if (!parse_sme_za_hv_tiles_operand_index (str, vector_select_register,
4502 &imm_value))
4503 return PARSE_FAIL;
4504
4505 /* Check if optional index offset is in the range for instruction
4506 variant. */
4507 if (imm_value < 0 || imm_value > imm_limit)
4508 {
4509 set_syntax_error (_("index offset out of range"));
4510 return PARSE_FAIL;
4511 }
4512
4513 *imm = imm_value;
4514
4515 return regno;
4516 }
4517
4518
4519 static int
4520 parse_sme_za_hv_tiles_operand_with_braces (char **str,
4521 enum sme_hv_slice *slice_indicator,
4522 int *vector_select_register,
4523 int *imm,
4524 aarch64_opnd_qualifier_t *qualifier)
4525 {
4526 int regno;
4527
4528 if (!skip_past_char (str, '{'))
4529 {
4530 set_syntax_error (_("expected '{'"));
4531 return PARSE_FAIL;
4532 }
4533
4534 regno = parse_sme_za_hv_tiles_operand (str, slice_indicator,
4535 vector_select_register, imm,
4536 qualifier);
4537
4538 if (regno == PARSE_FAIL)
4539 return PARSE_FAIL;
4540
4541 if (!skip_past_char (str, '}'))
4542 {
4543 set_syntax_error (_("expected '}'"));
4544 return PARSE_FAIL;
4545 }
4546
4547 return regno;
4548 }
4549
4550 /* Parse list of up to eight 64-bit element tile names separated by commas in
4551 SME's ZERO instruction:
4552
4553 ZERO { <mask> }
4554
4555 Function returns <mask>:
4556
4557 an 8-bit list of 64-bit element tiles named ZA0.D to ZA7.D.
4558 */
4559 static int
4560 parse_sme_zero_mask(char **str)
4561 {
4562 char *q;
4563 int mask;
4564 aarch64_opnd_qualifier_t qualifier;
4565
4566 mask = 0x00;
4567 q = *str;
4568 do
4569 {
4570 const reg_entry *reg = parse_reg_with_qual (&q, REG_TYPE_ZA_ZAT,
4571 &qualifier);
4572 if (!reg)
4573 return PARSE_FAIL;
4574
4575 if (reg->type == REG_TYPE_ZA)
4576 {
4577 if (qualifier != AARCH64_OPND_QLF_NIL)
4578 {
4579 set_syntax_error ("ZA should not have a size suffix");
4580 return PARSE_FAIL;
4581 }
4582 /* { ZA } is assembled as all-ones immediate. */
4583 mask = 0xff;
4584 }
4585 else
4586 {
4587 int regno = reg->number;
4588 if (qualifier == AARCH64_OPND_QLF_S_B)
4589 {
4590 /* { ZA0.B } is assembled as all-ones immediate. */
4591 mask = 0xff;
4592 }
4593 else if (qualifier == AARCH64_OPND_QLF_S_H)
4594 mask |= 0x55 << regno;
4595 else if (qualifier == AARCH64_OPND_QLF_S_S)
4596 mask |= 0x11 << regno;
4597 else if (qualifier == AARCH64_OPND_QLF_S_D)
4598 mask |= 0x01 << regno;
4599 else if (qualifier == AARCH64_OPND_QLF_S_Q)
4600 {
4601 set_syntax_error (_("ZA tile masks do not operate at .Q"
4602 " granularity"));
4603 return PARSE_FAIL;
4604 }
4605 else if (qualifier == AARCH64_OPND_QLF_NIL)
4606 {
4607 set_syntax_error (_("missing ZA tile size"));
4608 return PARSE_FAIL;
4609 }
4610 else
4611 {
4612 set_syntax_error (_("invalid ZA tile"));
4613 return PARSE_FAIL;
4614 }
4615 }
4616 }
4617 while (skip_past_char (&q, ','));
4618
4619 *str = q;
4620 return mask;
4621 }
4622
4623 /* Wraps in curly braces <mask> operand ZERO instruction:
4624
4625 ZERO { <mask> }
4626
4627 Function returns value of <mask> bit-field.
4628 */
4629 static int
4630 parse_sme_list_of_64bit_tiles (char **str)
4631 {
4632 int regno;
4633
4634 if (!skip_past_char (str, '{'))
4635 {
4636 set_syntax_error (_("expected '{'"));
4637 return PARSE_FAIL;
4638 }
4639
4640 /* Empty <mask> list is an all-zeros immediate. */
4641 if (!skip_past_char (str, '}'))
4642 {
4643 regno = parse_sme_zero_mask (str);
4644 if (regno == PARSE_FAIL)
4645 return PARSE_FAIL;
4646
4647 if (!skip_past_char (str, '}'))
4648 {
4649 set_syntax_error (_("expected '}'"));
4650 return PARSE_FAIL;
4651 }
4652 }
4653 else
4654 regno = 0x00;
4655
4656 return regno;
4657 }
4658
4659 /* Parse ZA array operand used in e.g. STR and LDR instruction.
4660 Operand format:
4661
4662 ZA[<Wv>, <imm>]
4663 ZA[<Wv>, #<imm>]
4664
4665 Function returns <Wv> or PARSE_FAIL.
4666 */
4667 static int
4668 parse_sme_za_array (char **str, int *imm)
4669 {
4670 char *q;
4671 int regno;
4672 int64_t imm_value;
4673
4674 q = *str;
4675 const reg_entry *reg = parse_reg (&q);
4676 if (!reg || reg->type != REG_TYPE_ZA)
4677 {
4678 set_syntax_error (_("expected ZA array"));
4679 return PARSE_FAIL;
4680 }
4681
4682 if (! parse_sme_za_hv_tiles_operand_index (&q, &regno, &imm_value))
4683 return PARSE_FAIL;
4684
4685 if (imm_value < 0 || imm_value > 15)
4686 {
4687 set_syntax_error (_("offset out of range"));
4688 return PARSE_FAIL;
4689 }
4690
4691 *imm = imm_value;
4692 *str = q;
4693 return regno;
4694 }
4695
4696 /* Parse streaming mode operand for SMSTART and SMSTOP.
4697
4698 {SM | ZA}
4699
4700 Function returns 's' if SM or 'z' if ZM is parsed. Otherwise PARSE_FAIL.
4701 */
4702 static int
4703 parse_sme_sm_za (char **str)
4704 {
4705 char *p, *q;
4706
4707 p = q = *str;
4708 while (ISALPHA (*q))
4709 q++;
4710
4711 if ((q - p != 2)
4712 || (strncasecmp ("sm", p, 2) != 0 && strncasecmp ("za", p, 2) != 0))
4713 {
4714 set_syntax_error (_("expected SM or ZA operand"));
4715 return PARSE_FAIL;
4716 }
4717
4718 *str = q;
4719 return TOLOWER (p[0]);
4720 }
4721
4722 /* Parse the name of the source scalable predicate register, the index base
4723 register W12-W15 and the element index. Function performs element index
4724 limit checks as well as qualifier type checks.
4725
4726 <Pn>.<T>[<Wv>, <imm>]
4727 <Pn>.<T>[<Wv>, #<imm>]
4728
4729 On success function sets <Wv> to INDEX_BASE_REG, <T> to QUALIFIER and
4730 <imm> to IMM.
4731 Function returns <Pn>, or PARSE_FAIL.
4732 */
4733 static int
4734 parse_sme_pred_reg_with_index(char **str,
4735 int *index_base_reg,
4736 int *imm,
4737 aarch64_opnd_qualifier_t *qualifier)
4738 {
4739 int regno;
4740 int64_t imm_limit;
4741 int64_t imm_value;
4742 const reg_entry *reg = parse_reg_with_qual (str, REG_TYPE_PN, qualifier);
4743
4744 if (reg == NULL)
4745 return PARSE_FAIL;
4746 regno = reg->number;
4747
4748 switch (*qualifier)
4749 {
4750 case AARCH64_OPND_QLF_S_B:
4751 imm_limit = 15;
4752 break;
4753 case AARCH64_OPND_QLF_S_H:
4754 imm_limit = 7;
4755 break;
4756 case AARCH64_OPND_QLF_S_S:
4757 imm_limit = 3;
4758 break;
4759 case AARCH64_OPND_QLF_S_D:
4760 imm_limit = 1;
4761 break;
4762 default:
4763 set_syntax_error (_("wrong predicate register element size, allowed b, h, s and d"));
4764 return PARSE_FAIL;
4765 }
4766
4767 if (! parse_sme_za_hv_tiles_operand_index (str, index_base_reg, &imm_value))
4768 return PARSE_FAIL;
4769
4770 if (imm_value < 0 || imm_value > imm_limit)
4771 {
4772 set_syntax_error (_("element index out of range for given variant"));
4773 return PARSE_FAIL;
4774 }
4775
4776 *imm = imm_value;
4777
4778 return regno;
4779 }
4780
4781 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4782 Returns the encoding for the option, or PARSE_FAIL.
4783
4784 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4785 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4786
4787 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4788 field, otherwise as a system register.
4789 */
4790
4791 static int
4792 parse_sys_reg (char **str, htab_t sys_regs,
4793 int imple_defined_p, int pstatefield_p,
4794 uint32_t* flags)
4795 {
4796 char *p, *q;
4797 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4798 const aarch64_sys_reg *o;
4799 int value;
4800
4801 p = buf;
4802 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4803 if (p < buf + (sizeof (buf) - 1))
4804 *p++ = TOLOWER (*q);
4805 *p = '\0';
4806
4807 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4808 valid system register. This is enforced by construction of the hash
4809 table. */
4810 if (p - buf != q - *str)
4811 return PARSE_FAIL;
4812
4813 o = str_hash_find (sys_regs, buf);
4814 if (!o)
4815 {
4816 if (!imple_defined_p)
4817 return PARSE_FAIL;
4818 else
4819 {
4820 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4821 unsigned int op0, op1, cn, cm, op2;
4822
4823 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4824 != 5)
4825 return PARSE_FAIL;
4826 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4827 return PARSE_FAIL;
4828 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4829 if (flags)
4830 *flags = 0;
4831 }
4832 }
4833 else
4834 {
4835 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4836 as_bad (_("selected processor does not support PSTATE field "
4837 "name '%s'"), buf);
4838 if (!pstatefield_p
4839 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4840 o->value, o->flags, o->features))
4841 as_bad (_("selected processor does not support system register "
4842 "name '%s'"), buf);
4843 if (aarch64_sys_reg_deprecated_p (o->flags))
4844 as_warn (_("system register name '%s' is deprecated and may be "
4845 "removed in a future release"), buf);
4846 value = o->value;
4847 if (flags)
4848 *flags = o->flags;
4849 }
4850
4851 *str = q;
4852 return value;
4853 }
4854
4855 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4856 for the option, or NULL. */
4857
4858 static const aarch64_sys_ins_reg *
4859 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4860 {
4861 char *p, *q;
4862 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4863 const aarch64_sys_ins_reg *o;
4864
4865 p = buf;
4866 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4867 if (p < buf + (sizeof (buf) - 1))
4868 *p++ = TOLOWER (*q);
4869 *p = '\0';
4870
4871 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4872 valid system register. This is enforced by construction of the hash
4873 table. */
4874 if (p - buf != q - *str)
4875 return NULL;
4876
4877 o = str_hash_find (sys_ins_regs, buf);
4878 if (!o)
4879 return NULL;
4880
4881 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4882 o->name, o->value, o->flags, 0))
4883 as_bad (_("selected processor does not support system register "
4884 "name '%s'"), buf);
4885 if (aarch64_sys_reg_deprecated_p (o->flags))
4886 as_warn (_("system register name '%s' is deprecated and may be "
4887 "removed in a future release"), buf);
4888
4889 *str = q;
4890 return o;
4891 }
4892 \f
4893 #define po_char_or_fail(chr) do { \
4894 if (! skip_past_char (&str, chr)) \
4895 goto failure; \
4896 } while (0)
4897
4898 #define po_reg_or_fail(regtype) do { \
4899 reg = aarch64_reg_parse (&str, regtype, NULL); \
4900 if (!reg) \
4901 { \
4902 set_default_error (); \
4903 goto failure; \
4904 } \
4905 } while (0)
4906
4907 #define po_int_reg_or_fail(reg_type) do { \
4908 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4909 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4910 { \
4911 set_default_error (); \
4912 goto failure; \
4913 } \
4914 info->reg.regno = reg->number; \
4915 info->qualifier = qualifier; \
4916 } while (0)
4917
4918 #define po_imm_nc_or_fail() do { \
4919 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4920 goto failure; \
4921 } while (0)
4922
4923 #define po_imm_or_fail(min, max) do { \
4924 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4925 goto failure; \
4926 if (val < min || val > max) \
4927 { \
4928 set_fatal_syntax_error (_("immediate value out of range "\
4929 #min " to "#max)); \
4930 goto failure; \
4931 } \
4932 } while (0)
4933
4934 #define po_enum_or_fail(array) do { \
4935 if (!parse_enum_string (&str, &val, array, \
4936 ARRAY_SIZE (array), imm_reg_type)) \
4937 goto failure; \
4938 } while (0)
4939
4940 #define po_misc_or_fail(expr) do { \
4941 if (!expr) \
4942 goto failure; \
4943 } while (0)
4944 \f
4945 /* encode the 12-bit imm field of Add/sub immediate */
4946 static inline uint32_t
4947 encode_addsub_imm (uint32_t imm)
4948 {
4949 return imm << 10;
4950 }
4951
4952 /* encode the shift amount field of Add/sub immediate */
4953 static inline uint32_t
4954 encode_addsub_imm_shift_amount (uint32_t cnt)
4955 {
4956 return cnt << 22;
4957 }
4958
4959
4960 /* encode the imm field of Adr instruction */
4961 static inline uint32_t
4962 encode_adr_imm (uint32_t imm)
4963 {
4964 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4965 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4966 }
4967
4968 /* encode the immediate field of Move wide immediate */
4969 static inline uint32_t
4970 encode_movw_imm (uint32_t imm)
4971 {
4972 return imm << 5;
4973 }
4974
4975 /* encode the 26-bit offset of unconditional branch */
4976 static inline uint32_t
4977 encode_branch_ofs_26 (uint32_t ofs)
4978 {
4979 return ofs & ((1 << 26) - 1);
4980 }
4981
4982 /* encode the 19-bit offset of conditional branch and compare & branch */
4983 static inline uint32_t
4984 encode_cond_branch_ofs_19 (uint32_t ofs)
4985 {
4986 return (ofs & ((1 << 19) - 1)) << 5;
4987 }
4988
4989 /* encode the 19-bit offset of ld literal */
4990 static inline uint32_t
4991 encode_ld_lit_ofs_19 (uint32_t ofs)
4992 {
4993 return (ofs & ((1 << 19) - 1)) << 5;
4994 }
4995
4996 /* Encode the 14-bit offset of test & branch. */
4997 static inline uint32_t
4998 encode_tst_branch_ofs_14 (uint32_t ofs)
4999 {
5000 return (ofs & ((1 << 14) - 1)) << 5;
5001 }
5002
5003 /* Encode the 16-bit imm field of svc/hvc/smc. */
5004 static inline uint32_t
5005 encode_svc_imm (uint32_t imm)
5006 {
5007 return imm << 5;
5008 }
5009
5010 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
5011 static inline uint32_t
5012 reencode_addsub_switch_add_sub (uint32_t opcode)
5013 {
5014 return opcode ^ (1 << 30);
5015 }
5016
5017 static inline uint32_t
5018 reencode_movzn_to_movz (uint32_t opcode)
5019 {
5020 return opcode | (1 << 30);
5021 }
5022
5023 static inline uint32_t
5024 reencode_movzn_to_movn (uint32_t opcode)
5025 {
5026 return opcode & ~(1 << 30);
5027 }
5028
5029 /* Overall per-instruction processing. */
5030
5031 /* We need to be able to fix up arbitrary expressions in some statements.
5032 This is so that we can handle symbols that are an arbitrary distance from
5033 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
5034 which returns part of an address in a form which will be valid for
5035 a data instruction. We do this by pushing the expression into a symbol
5036 in the expr_section, and creating a fix for that. */
5037
5038 static fixS *
5039 fix_new_aarch64 (fragS * frag,
5040 int where,
5041 short int size,
5042 expressionS * exp,
5043 int pc_rel,
5044 int reloc)
5045 {
5046 fixS *new_fix;
5047
5048 switch (exp->X_op)
5049 {
5050 case O_constant:
5051 case O_symbol:
5052 case O_add:
5053 case O_subtract:
5054 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
5055 break;
5056
5057 default:
5058 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
5059 pc_rel, reloc);
5060 break;
5061 }
5062 return new_fix;
5063 }
5064 \f
5065 /* Diagnostics on operands errors. */
5066
5067 /* By default, output verbose error message.
5068 Disable the verbose error message by -mno-verbose-error. */
5069 static int verbose_error_p = 1;
5070
5071 #ifdef DEBUG_AARCH64
5072 /* N.B. this is only for the purpose of debugging. */
5073 const char* operand_mismatch_kind_names[] =
5074 {
5075 "AARCH64_OPDE_NIL",
5076 "AARCH64_OPDE_RECOVERABLE",
5077 "AARCH64_OPDE_A_SHOULD_FOLLOW_B",
5078 "AARCH64_OPDE_EXPECTED_A_AFTER_B",
5079 "AARCH64_OPDE_SYNTAX_ERROR",
5080 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
5081 "AARCH64_OPDE_INVALID_VARIANT",
5082 "AARCH64_OPDE_OUT_OF_RANGE",
5083 "AARCH64_OPDE_UNALIGNED",
5084 "AARCH64_OPDE_REG_LIST",
5085 "AARCH64_OPDE_OTHER_ERROR",
5086 };
5087 #endif /* DEBUG_AARCH64 */
5088
5089 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
5090
5091 When multiple errors of different kinds are found in the same assembly
5092 line, only the error of the highest severity will be picked up for
5093 issuing the diagnostics. */
5094
5095 static inline bool
5096 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
5097 enum aarch64_operand_error_kind rhs)
5098 {
5099 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
5100 gas_assert (AARCH64_OPDE_A_SHOULD_FOLLOW_B > AARCH64_OPDE_RECOVERABLE);
5101 gas_assert (AARCH64_OPDE_EXPECTED_A_AFTER_B > AARCH64_OPDE_RECOVERABLE);
5102 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_A_SHOULD_FOLLOW_B);
5103 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_EXPECTED_A_AFTER_B);
5104 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
5105 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
5106 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
5107 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
5108 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
5109 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
5110 return lhs > rhs;
5111 }
5112
5113 /* Helper routine to get the mnemonic name from the assembly instruction
5114 line; should only be called for the diagnosis purpose, as there is
5115 string copy operation involved, which may affect the runtime
5116 performance if used in elsewhere. */
5117
5118 static const char*
5119 get_mnemonic_name (const char *str)
5120 {
5121 static char mnemonic[32];
5122 char *ptr;
5123
5124 /* Get the first 15 bytes and assume that the full name is included. */
5125 strncpy (mnemonic, str, 31);
5126 mnemonic[31] = '\0';
5127
5128 /* Scan up to the end of the mnemonic, which must end in white space,
5129 '.', or end of string. */
5130 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
5131 ;
5132
5133 *ptr = '\0';
5134
5135 /* Append '...' to the truncated long name. */
5136 if (ptr - mnemonic == 31)
5137 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
5138
5139 return mnemonic;
5140 }
5141
5142 static void
5143 reset_aarch64_instruction (aarch64_instruction *instruction)
5144 {
5145 memset (instruction, '\0', sizeof (aarch64_instruction));
5146 instruction->reloc.type = BFD_RELOC_UNUSED;
5147 }
5148
5149 /* Data structures storing one user error in the assembly code related to
5150 operands. */
5151
5152 struct operand_error_record
5153 {
5154 const aarch64_opcode *opcode;
5155 aarch64_operand_error detail;
5156 struct operand_error_record *next;
5157 };
5158
5159 typedef struct operand_error_record operand_error_record;
5160
5161 struct operand_errors
5162 {
5163 operand_error_record *head;
5164 operand_error_record *tail;
5165 };
5166
5167 typedef struct operand_errors operand_errors;
5168
5169 /* Top-level data structure reporting user errors for the current line of
5170 the assembly code.
5171 The way md_assemble works is that all opcodes sharing the same mnemonic
5172 name are iterated to find a match to the assembly line. In this data
5173 structure, each of the such opcodes will have one operand_error_record
5174 allocated and inserted. In other words, excessive errors related with
5175 a single opcode are disregarded. */
5176 operand_errors operand_error_report;
5177
5178 /* Free record nodes. */
5179 static operand_error_record *free_opnd_error_record_nodes = NULL;
5180
5181 /* Initialize the data structure that stores the operand mismatch
5182 information on assembling one line of the assembly code. */
5183 static void
5184 init_operand_error_report (void)
5185 {
5186 if (operand_error_report.head != NULL)
5187 {
5188 gas_assert (operand_error_report.tail != NULL);
5189 operand_error_report.tail->next = free_opnd_error_record_nodes;
5190 free_opnd_error_record_nodes = operand_error_report.head;
5191 operand_error_report.head = NULL;
5192 operand_error_report.tail = NULL;
5193 return;
5194 }
5195 gas_assert (operand_error_report.tail == NULL);
5196 }
5197
5198 /* Return TRUE if some operand error has been recorded during the
5199 parsing of the current assembly line using the opcode *OPCODE;
5200 otherwise return FALSE. */
5201 static inline bool
5202 opcode_has_operand_error_p (const aarch64_opcode *opcode)
5203 {
5204 operand_error_record *record = operand_error_report.head;
5205 return record && record->opcode == opcode;
5206 }
5207
5208 /* Add the error record *NEW_RECORD to operand_error_report. The record's
5209 OPCODE field is initialized with OPCODE.
5210 N.B. only one record for each opcode, i.e. the maximum of one error is
5211 recorded for each instruction template. */
5212
5213 static void
5214 add_operand_error_record (const operand_error_record* new_record)
5215 {
5216 const aarch64_opcode *opcode = new_record->opcode;
5217 operand_error_record* record = operand_error_report.head;
5218
5219 /* The record may have been created for this opcode. If not, we need
5220 to prepare one. */
5221 if (! opcode_has_operand_error_p (opcode))
5222 {
5223 /* Get one empty record. */
5224 if (free_opnd_error_record_nodes == NULL)
5225 {
5226 record = XNEW (operand_error_record);
5227 }
5228 else
5229 {
5230 record = free_opnd_error_record_nodes;
5231 free_opnd_error_record_nodes = record->next;
5232 }
5233 record->opcode = opcode;
5234 /* Insert at the head. */
5235 record->next = operand_error_report.head;
5236 operand_error_report.head = record;
5237 if (operand_error_report.tail == NULL)
5238 operand_error_report.tail = record;
5239 }
5240 else if (record->detail.kind != AARCH64_OPDE_NIL
5241 && record->detail.index <= new_record->detail.index
5242 && operand_error_higher_severity_p (record->detail.kind,
5243 new_record->detail.kind))
5244 {
5245 /* In the case of multiple errors found on operands related with a
5246 single opcode, only record the error of the leftmost operand and
5247 only if the error is of higher severity. */
5248 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
5249 " the existing error %s on operand %d",
5250 operand_mismatch_kind_names[new_record->detail.kind],
5251 new_record->detail.index,
5252 operand_mismatch_kind_names[record->detail.kind],
5253 record->detail.index);
5254 return;
5255 }
5256
5257 record->detail = new_record->detail;
5258 }
5259
5260 static inline void
5261 record_operand_error_info (const aarch64_opcode *opcode,
5262 aarch64_operand_error *error_info)
5263 {
5264 operand_error_record record;
5265 record.opcode = opcode;
5266 record.detail = *error_info;
5267 add_operand_error_record (&record);
5268 }
5269
5270 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
5271 error message *ERROR, for operand IDX (count from 0). */
5272
5273 static void
5274 record_operand_error (const aarch64_opcode *opcode, int idx,
5275 enum aarch64_operand_error_kind kind,
5276 const char* error)
5277 {
5278 aarch64_operand_error info;
5279 memset(&info, 0, sizeof (info));
5280 info.index = idx;
5281 info.kind = kind;
5282 info.error = error;
5283 info.non_fatal = false;
5284 record_operand_error_info (opcode, &info);
5285 }
5286
5287 static void
5288 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
5289 enum aarch64_operand_error_kind kind,
5290 const char* error, const int *extra_data)
5291 {
5292 aarch64_operand_error info;
5293 info.index = idx;
5294 info.kind = kind;
5295 info.error = error;
5296 info.data[0].i = extra_data[0];
5297 info.data[1].i = extra_data[1];
5298 info.data[2].i = extra_data[2];
5299 info.non_fatal = false;
5300 record_operand_error_info (opcode, &info);
5301 }
5302
5303 static void
5304 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
5305 const char* error, int lower_bound,
5306 int upper_bound)
5307 {
5308 int data[3] = {lower_bound, upper_bound, 0};
5309 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
5310 error, data);
5311 }
5312
5313 /* Remove the operand error record for *OPCODE. */
5314 static void ATTRIBUTE_UNUSED
5315 remove_operand_error_record (const aarch64_opcode *opcode)
5316 {
5317 if (opcode_has_operand_error_p (opcode))
5318 {
5319 operand_error_record* record = operand_error_report.head;
5320 gas_assert (record != NULL && operand_error_report.tail != NULL);
5321 operand_error_report.head = record->next;
5322 record->next = free_opnd_error_record_nodes;
5323 free_opnd_error_record_nodes = record;
5324 if (operand_error_report.head == NULL)
5325 {
5326 gas_assert (operand_error_report.tail == record);
5327 operand_error_report.tail = NULL;
5328 }
5329 }
5330 }
5331
5332 /* Given the instruction in *INSTR, return the index of the best matched
5333 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
5334
5335 Return -1 if there is no qualifier sequence; return the first match
5336 if there is multiple matches found. */
5337
5338 static int
5339 find_best_match (const aarch64_inst *instr,
5340 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
5341 {
5342 int i, num_opnds, max_num_matched, idx;
5343
5344 num_opnds = aarch64_num_of_operands (instr->opcode);
5345 if (num_opnds == 0)
5346 {
5347 DEBUG_TRACE ("no operand");
5348 return -1;
5349 }
5350
5351 max_num_matched = 0;
5352 idx = 0;
5353
5354 /* For each pattern. */
5355 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5356 {
5357 int j, num_matched;
5358 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
5359
5360 /* Most opcodes has much fewer patterns in the list. */
5361 if (empty_qualifier_sequence_p (qualifiers))
5362 {
5363 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
5364 break;
5365 }
5366
5367 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
5368 if (*qualifiers == instr->operands[j].qualifier)
5369 ++num_matched;
5370
5371 if (num_matched > max_num_matched)
5372 {
5373 max_num_matched = num_matched;
5374 idx = i;
5375 }
5376 }
5377
5378 DEBUG_TRACE ("return with %d", idx);
5379 return idx;
5380 }
5381
5382 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
5383 corresponding operands in *INSTR. */
5384
5385 static inline void
5386 assign_qualifier_sequence (aarch64_inst *instr,
5387 const aarch64_opnd_qualifier_t *qualifiers)
5388 {
5389 int i = 0;
5390 int num_opnds = aarch64_num_of_operands (instr->opcode);
5391 gas_assert (num_opnds);
5392 for (i = 0; i < num_opnds; ++i, ++qualifiers)
5393 instr->operands[i].qualifier = *qualifiers;
5394 }
5395
5396 /* Callback used by aarch64_print_operand to apply STYLE to the
5397 disassembler output created from FMT and ARGS. The STYLER object holds
5398 any required state. Must return a pointer to a string (created from FMT
5399 and ARGS) that will continue to be valid until the complete disassembled
5400 instruction has been printed.
5401
5402 We don't currently add any styling to the output of the disassembler as
5403 used within assembler error messages, and so STYLE is ignored here. A
5404 new string is allocated on the obstack help within STYLER and returned
5405 to the caller. */
5406
5407 static const char *aarch64_apply_style
5408 (struct aarch64_styler *styler,
5409 enum disassembler_style style ATTRIBUTE_UNUSED,
5410 const char *fmt, va_list args)
5411 {
5412 int res;
5413 char *ptr;
5414 struct obstack *stack = (struct obstack *) styler->state;
5415 va_list ap;
5416
5417 /* Calculate the required space. */
5418 va_copy (ap, args);
5419 res = vsnprintf (NULL, 0, fmt, ap);
5420 va_end (ap);
5421 gas_assert (res >= 0);
5422
5423 /* Allocate space on the obstack and format the result. */
5424 ptr = (char *) obstack_alloc (stack, res + 1);
5425 res = vsnprintf (ptr, (res + 1), fmt, args);
5426 gas_assert (res >= 0);
5427
5428 return ptr;
5429 }
5430
5431 /* Print operands for the diagnosis purpose. */
5432
5433 static void
5434 print_operands (char *buf, const aarch64_opcode *opcode,
5435 const aarch64_opnd_info *opnds)
5436 {
5437 int i;
5438 struct aarch64_styler styler;
5439 struct obstack content;
5440 obstack_init (&content);
5441
5442 styler.apply_style = aarch64_apply_style;
5443 styler.state = (void *) &content;
5444
5445 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
5446 {
5447 char str[128];
5448 char cmt[128];
5449
5450 /* We regard the opcode operand info more, however we also look into
5451 the inst->operands to support the disassembling of the optional
5452 operand.
5453 The two operand code should be the same in all cases, apart from
5454 when the operand can be optional. */
5455 if (opcode->operands[i] == AARCH64_OPND_NIL
5456 || opnds[i].type == AARCH64_OPND_NIL)
5457 break;
5458
5459 /* Generate the operand string in STR. */
5460 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
5461 NULL, cmt, sizeof (cmt), cpu_variant, &styler);
5462
5463 /* Delimiter. */
5464 if (str[0] != '\0')
5465 strcat (buf, i == 0 ? " " : ", ");
5466
5467 /* Append the operand string. */
5468 strcat (buf, str);
5469
5470 /* Append a comment. This works because only the last operand ever
5471 adds a comment. If that ever changes then we'll need to be
5472 smarter here. */
5473 if (cmt[0] != '\0')
5474 {
5475 strcat (buf, "\t// ");
5476 strcat (buf, cmt);
5477 }
5478 }
5479
5480 obstack_free (&content, NULL);
5481 }
5482
5483 /* Send to stderr a string as information. */
5484
5485 static void
5486 output_info (const char *format, ...)
5487 {
5488 const char *file;
5489 unsigned int line;
5490 va_list args;
5491
5492 file = as_where (&line);
5493 if (file)
5494 {
5495 if (line != 0)
5496 fprintf (stderr, "%s:%u: ", file, line);
5497 else
5498 fprintf (stderr, "%s: ", file);
5499 }
5500 fprintf (stderr, _("Info: "));
5501 va_start (args, format);
5502 vfprintf (stderr, format, args);
5503 va_end (args);
5504 (void) putc ('\n', stderr);
5505 }
5506
5507 /* Output one operand error record. */
5508
5509 static void
5510 output_operand_error_record (const operand_error_record *record, char *str)
5511 {
5512 const aarch64_operand_error *detail = &record->detail;
5513 int idx = detail->index;
5514 const aarch64_opcode *opcode = record->opcode;
5515 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
5516 : AARCH64_OPND_NIL);
5517
5518 typedef void (*handler_t)(const char *format, ...);
5519 handler_t handler = detail->non_fatal ? as_warn : as_bad;
5520
5521 switch (detail->kind)
5522 {
5523 case AARCH64_OPDE_NIL:
5524 gas_assert (0);
5525 break;
5526
5527 case AARCH64_OPDE_A_SHOULD_FOLLOW_B:
5528 handler (_("this `%s' should have an immediately preceding `%s'"
5529 " -- `%s'"),
5530 detail->data[0].s, detail->data[1].s, str);
5531 break;
5532
5533 case AARCH64_OPDE_EXPECTED_A_AFTER_B:
5534 handler (_("the preceding `%s' should be followed by `%s` rather"
5535 " than `%s` -- `%s'"),
5536 detail->data[1].s, detail->data[0].s, opcode->name, str);
5537 break;
5538
5539 case AARCH64_OPDE_SYNTAX_ERROR:
5540 case AARCH64_OPDE_RECOVERABLE:
5541 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
5542 case AARCH64_OPDE_OTHER_ERROR:
5543 /* Use the prepared error message if there is, otherwise use the
5544 operand description string to describe the error. */
5545 if (detail->error != NULL)
5546 {
5547 if (idx < 0)
5548 handler (_("%s -- `%s'"), detail->error, str);
5549 else
5550 handler (_("%s at operand %d -- `%s'"),
5551 detail->error, idx + 1, str);
5552 }
5553 else
5554 {
5555 gas_assert (idx >= 0);
5556 handler (_("operand %d must be %s -- `%s'"), idx + 1,
5557 aarch64_get_operand_desc (opd_code), str);
5558 }
5559 break;
5560
5561 case AARCH64_OPDE_INVALID_VARIANT:
5562 handler (_("operand mismatch -- `%s'"), str);
5563 if (verbose_error_p)
5564 {
5565 /* We will try to correct the erroneous instruction and also provide
5566 more information e.g. all other valid variants.
5567
5568 The string representation of the corrected instruction and other
5569 valid variants are generated by
5570
5571 1) obtaining the intermediate representation of the erroneous
5572 instruction;
5573 2) manipulating the IR, e.g. replacing the operand qualifier;
5574 3) printing out the instruction by calling the printer functions
5575 shared with the disassembler.
5576
5577 The limitation of this method is that the exact input assembly
5578 line cannot be accurately reproduced in some cases, for example an
5579 optional operand present in the actual assembly line will be
5580 omitted in the output; likewise for the optional syntax rules,
5581 e.g. the # before the immediate. Another limitation is that the
5582 assembly symbols and relocation operations in the assembly line
5583 currently cannot be printed out in the error report. Last but not
5584 least, when there is other error(s) co-exist with this error, the
5585 'corrected' instruction may be still incorrect, e.g. given
5586 'ldnp h0,h1,[x0,#6]!'
5587 this diagnosis will provide the version:
5588 'ldnp s0,s1,[x0,#6]!'
5589 which is still not right. */
5590 size_t len = strlen (get_mnemonic_name (str));
5591 int i, qlf_idx;
5592 bool result;
5593 char buf[2048];
5594 aarch64_inst *inst_base = &inst.base;
5595 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
5596
5597 /* Init inst. */
5598 reset_aarch64_instruction (&inst);
5599 inst_base->opcode = opcode;
5600
5601 /* Reset the error report so that there is no side effect on the
5602 following operand parsing. */
5603 init_operand_error_report ();
5604
5605 /* Fill inst. */
5606 result = parse_operands (str + len, opcode)
5607 && programmer_friendly_fixup (&inst);
5608 gas_assert (result);
5609 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
5610 NULL, NULL, insn_sequence);
5611 gas_assert (!result);
5612
5613 /* Find the most matched qualifier sequence. */
5614 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
5615 gas_assert (qlf_idx > -1);
5616
5617 /* Assign the qualifiers. */
5618 assign_qualifier_sequence (inst_base,
5619 opcode->qualifiers_list[qlf_idx]);
5620
5621 /* Print the hint. */
5622 output_info (_(" did you mean this?"));
5623 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5624 print_operands (buf, opcode, inst_base->operands);
5625 output_info (_(" %s"), buf);
5626
5627 /* Print out other variant(s) if there is any. */
5628 if (qlf_idx != 0 ||
5629 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
5630 output_info (_(" other valid variant(s):"));
5631
5632 /* For each pattern. */
5633 qualifiers_list = opcode->qualifiers_list;
5634 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
5635 {
5636 /* Most opcodes has much fewer patterns in the list.
5637 First NIL qualifier indicates the end in the list. */
5638 if (empty_qualifier_sequence_p (*qualifiers_list))
5639 break;
5640
5641 if (i != qlf_idx)
5642 {
5643 /* Mnemonics name. */
5644 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5645
5646 /* Assign the qualifiers. */
5647 assign_qualifier_sequence (inst_base, *qualifiers_list);
5648
5649 /* Print instruction. */
5650 print_operands (buf, opcode, inst_base->operands);
5651
5652 output_info (_(" %s"), buf);
5653 }
5654 }
5655 }
5656 break;
5657
5658 case AARCH64_OPDE_UNTIED_IMMS:
5659 handler (_("operand %d must have the same immediate value "
5660 "as operand 1 -- `%s'"),
5661 detail->index + 1, str);
5662 break;
5663
5664 case AARCH64_OPDE_UNTIED_OPERAND:
5665 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5666 detail->index + 1, str);
5667 break;
5668
5669 case AARCH64_OPDE_OUT_OF_RANGE:
5670 if (detail->data[0].i != detail->data[1].i)
5671 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5672 detail->error ? detail->error : _("immediate value"),
5673 detail->data[0].i, detail->data[1].i, idx + 1, str);
5674 else
5675 handler (_("%s must be %d at operand %d -- `%s'"),
5676 detail->error ? detail->error : _("immediate value"),
5677 detail->data[0].i, idx + 1, str);
5678 break;
5679
5680 case AARCH64_OPDE_REG_LIST:
5681 if (detail->data[0].i == 1)
5682 handler (_("invalid number of registers in the list; "
5683 "only 1 register is expected at operand %d -- `%s'"),
5684 idx + 1, str);
5685 else
5686 handler (_("invalid number of registers in the list; "
5687 "%d registers are expected at operand %d -- `%s'"),
5688 detail->data[0].i, idx + 1, str);
5689 break;
5690
5691 case AARCH64_OPDE_UNALIGNED:
5692 handler (_("immediate value must be a multiple of "
5693 "%d at operand %d -- `%s'"),
5694 detail->data[0].i, idx + 1, str);
5695 break;
5696
5697 default:
5698 gas_assert (0);
5699 break;
5700 }
5701 }
5702
5703 /* Process and output the error message about the operand mismatching.
5704
5705 When this function is called, the operand error information had
5706 been collected for an assembly line and there will be multiple
5707 errors in the case of multiple instruction templates; output the
5708 error message that most closely describes the problem.
5709
5710 The errors to be printed can be filtered on printing all errors
5711 or only non-fatal errors. This distinction has to be made because
5712 the error buffer may already be filled with fatal errors we don't want to
5713 print due to the different instruction templates. */
5714
5715 static void
5716 output_operand_error_report (char *str, bool non_fatal_only)
5717 {
5718 int largest_error_pos;
5719 const char *msg = NULL;
5720 enum aarch64_operand_error_kind kind;
5721 operand_error_record *curr;
5722 operand_error_record *head = operand_error_report.head;
5723 operand_error_record *record = NULL;
5724
5725 /* No error to report. */
5726 if (head == NULL)
5727 return;
5728
5729 gas_assert (head != NULL && operand_error_report.tail != NULL);
5730
5731 /* Only one error. */
5732 if (head == operand_error_report.tail)
5733 {
5734 /* If the only error is a non-fatal one and we don't want to print it,
5735 just exit. */
5736 if (!non_fatal_only || head->detail.non_fatal)
5737 {
5738 DEBUG_TRACE ("single opcode entry with error kind: %s",
5739 operand_mismatch_kind_names[head->detail.kind]);
5740 output_operand_error_record (head, str);
5741 }
5742 return;
5743 }
5744
5745 /* Find the error kind of the highest severity. */
5746 DEBUG_TRACE ("multiple opcode entries with error kind");
5747 kind = AARCH64_OPDE_NIL;
5748 for (curr = head; curr != NULL; curr = curr->next)
5749 {
5750 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5751 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5752 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5753 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5754 kind = curr->detail.kind;
5755 }
5756
5757 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5758
5759 /* Pick up one of errors of KIND to report. */
5760 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5761 for (curr = head; curr != NULL; curr = curr->next)
5762 {
5763 /* If we don't want to print non-fatal errors then don't consider them
5764 at all. */
5765 if (curr->detail.kind != kind
5766 || (non_fatal_only && !curr->detail.non_fatal))
5767 continue;
5768 /* If there are multiple errors, pick up the one with the highest
5769 mismatching operand index. In the case of multiple errors with
5770 the equally highest operand index, pick up the first one or the
5771 first one with non-NULL error message. */
5772 if (curr->detail.index > largest_error_pos
5773 || (curr->detail.index == largest_error_pos && msg == NULL
5774 && curr->detail.error != NULL))
5775 {
5776 largest_error_pos = curr->detail.index;
5777 record = curr;
5778 msg = record->detail.error;
5779 }
5780 }
5781
5782 /* The way errors are collected in the back-end is a bit non-intuitive. But
5783 essentially, because each operand template is tried recursively you may
5784 always have errors collected from the previous tried OPND. These are
5785 usually skipped if there is one successful match. However now with the
5786 non-fatal errors we have to ignore those previously collected hard errors
5787 when we're only interested in printing the non-fatal ones. This condition
5788 prevents us from printing errors that are not appropriate, since we did
5789 match a condition, but it also has warnings that it wants to print. */
5790 if (non_fatal_only && !record)
5791 return;
5792
5793 gas_assert (largest_error_pos != -2 && record != NULL);
5794 DEBUG_TRACE ("Pick up error kind %s to report",
5795 operand_mismatch_kind_names[record->detail.kind]);
5796
5797 /* Output. */
5798 output_operand_error_record (record, str);
5799 }
5800 \f
5801 /* Write an AARCH64 instruction to buf - always little-endian. */
5802 static void
5803 put_aarch64_insn (char *buf, uint32_t insn)
5804 {
5805 unsigned char *where = (unsigned char *) buf;
5806 where[0] = insn;
5807 where[1] = insn >> 8;
5808 where[2] = insn >> 16;
5809 where[3] = insn >> 24;
5810 }
5811
5812 static uint32_t
5813 get_aarch64_insn (char *buf)
5814 {
5815 unsigned char *where = (unsigned char *) buf;
5816 uint32_t result;
5817 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5818 | ((uint32_t) where[3] << 24)));
5819 return result;
5820 }
5821
5822 static void
5823 output_inst (struct aarch64_inst *new_inst)
5824 {
5825 char *to = NULL;
5826
5827 to = frag_more (INSN_SIZE);
5828
5829 frag_now->tc_frag_data.recorded = 1;
5830
5831 put_aarch64_insn (to, inst.base.value);
5832
5833 if (inst.reloc.type != BFD_RELOC_UNUSED)
5834 {
5835 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5836 INSN_SIZE, &inst.reloc.exp,
5837 inst.reloc.pc_rel,
5838 inst.reloc.type);
5839 DEBUG_TRACE ("Prepared relocation fix up");
5840 /* Don't check the addend value against the instruction size,
5841 that's the job of our code in md_apply_fix(). */
5842 fixp->fx_no_overflow = 1;
5843 if (new_inst != NULL)
5844 fixp->tc_fix_data.inst = new_inst;
5845 if (aarch64_gas_internal_fixup_p ())
5846 {
5847 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5848 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5849 fixp->fx_addnumber = inst.reloc.flags;
5850 }
5851 }
5852
5853 dwarf2_emit_insn (INSN_SIZE);
5854 }
5855
5856 /* Link together opcodes of the same name. */
5857
5858 struct templates
5859 {
5860 const aarch64_opcode *opcode;
5861 struct templates *next;
5862 };
5863
5864 typedef struct templates templates;
5865
5866 static templates *
5867 lookup_mnemonic (const char *start, int len)
5868 {
5869 templates *templ = NULL;
5870
5871 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5872 return templ;
5873 }
5874
5875 /* Subroutine of md_assemble, responsible for looking up the primary
5876 opcode from the mnemonic the user wrote. BASE points to the beginning
5877 of the mnemonic, DOT points to the first '.' within the mnemonic
5878 (if any) and END points to the end of the mnemonic. */
5879
5880 static templates *
5881 opcode_lookup (char *base, char *dot, char *end)
5882 {
5883 const aarch64_cond *cond;
5884 char condname[16];
5885 int len;
5886
5887 if (dot == end)
5888 return 0;
5889
5890 inst.cond = COND_ALWAYS;
5891
5892 /* Handle a possible condition. */
5893 if (dot)
5894 {
5895 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5896 if (!cond)
5897 return 0;
5898 inst.cond = cond->value;
5899 len = dot - base;
5900 }
5901 else
5902 len = end - base;
5903
5904 if (inst.cond == COND_ALWAYS)
5905 {
5906 /* Look for unaffixed mnemonic. */
5907 return lookup_mnemonic (base, len);
5908 }
5909 else if (len <= 13)
5910 {
5911 /* append ".c" to mnemonic if conditional */
5912 memcpy (condname, base, len);
5913 memcpy (condname + len, ".c", 2);
5914 base = condname;
5915 len += 2;
5916 return lookup_mnemonic (base, len);
5917 }
5918
5919 return NULL;
5920 }
5921
5922 /* Process an optional operand that is found omitted from the assembly line.
5923 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5924 instruction's opcode entry while IDX is the index of this omitted operand.
5925 */
5926
5927 static void
5928 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5929 int idx, aarch64_opnd_info *operand)
5930 {
5931 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5932 gas_assert (optional_operand_p (opcode, idx));
5933 gas_assert (!operand->present);
5934
5935 switch (type)
5936 {
5937 case AARCH64_OPND_Rd:
5938 case AARCH64_OPND_Rn:
5939 case AARCH64_OPND_Rm:
5940 case AARCH64_OPND_Rt:
5941 case AARCH64_OPND_Rt2:
5942 case AARCH64_OPND_Rt_LS64:
5943 case AARCH64_OPND_Rt_SP:
5944 case AARCH64_OPND_Rs:
5945 case AARCH64_OPND_Ra:
5946 case AARCH64_OPND_Rt_SYS:
5947 case AARCH64_OPND_Rd_SP:
5948 case AARCH64_OPND_Rn_SP:
5949 case AARCH64_OPND_Rm_SP:
5950 case AARCH64_OPND_Fd:
5951 case AARCH64_OPND_Fn:
5952 case AARCH64_OPND_Fm:
5953 case AARCH64_OPND_Fa:
5954 case AARCH64_OPND_Ft:
5955 case AARCH64_OPND_Ft2:
5956 case AARCH64_OPND_Sd:
5957 case AARCH64_OPND_Sn:
5958 case AARCH64_OPND_Sm:
5959 case AARCH64_OPND_Va:
5960 case AARCH64_OPND_Vd:
5961 case AARCH64_OPND_Vn:
5962 case AARCH64_OPND_Vm:
5963 case AARCH64_OPND_VdD1:
5964 case AARCH64_OPND_VnD1:
5965 operand->reg.regno = default_value;
5966 break;
5967
5968 case AARCH64_OPND_Ed:
5969 case AARCH64_OPND_En:
5970 case AARCH64_OPND_Em:
5971 case AARCH64_OPND_Em16:
5972 case AARCH64_OPND_SM3_IMM2:
5973 operand->reglane.regno = default_value;
5974 break;
5975
5976 case AARCH64_OPND_IDX:
5977 case AARCH64_OPND_BIT_NUM:
5978 case AARCH64_OPND_IMMR:
5979 case AARCH64_OPND_IMMS:
5980 case AARCH64_OPND_SHLL_IMM:
5981 case AARCH64_OPND_IMM_VLSL:
5982 case AARCH64_OPND_IMM_VLSR:
5983 case AARCH64_OPND_CCMP_IMM:
5984 case AARCH64_OPND_FBITS:
5985 case AARCH64_OPND_UIMM4:
5986 case AARCH64_OPND_UIMM3_OP1:
5987 case AARCH64_OPND_UIMM3_OP2:
5988 case AARCH64_OPND_IMM:
5989 case AARCH64_OPND_IMM_2:
5990 case AARCH64_OPND_WIDTH:
5991 case AARCH64_OPND_UIMM7:
5992 case AARCH64_OPND_NZCV:
5993 case AARCH64_OPND_SVE_PATTERN:
5994 case AARCH64_OPND_SVE_PRFOP:
5995 operand->imm.value = default_value;
5996 break;
5997
5998 case AARCH64_OPND_SVE_PATTERN_SCALED:
5999 operand->imm.value = default_value;
6000 operand->shifter.kind = AARCH64_MOD_MUL;
6001 operand->shifter.amount = 1;
6002 break;
6003
6004 case AARCH64_OPND_EXCEPTION:
6005 inst.reloc.type = BFD_RELOC_UNUSED;
6006 break;
6007
6008 case AARCH64_OPND_BARRIER_ISB:
6009 operand->barrier = aarch64_barrier_options + default_value;
6010 break;
6011
6012 case AARCH64_OPND_BTI_TARGET:
6013 operand->hint_option = aarch64_hint_options + default_value;
6014 break;
6015
6016 default:
6017 break;
6018 }
6019 }
6020
6021 /* Process the relocation type for move wide instructions.
6022 Return TRUE on success; otherwise return FALSE. */
6023
6024 static bool
6025 process_movw_reloc_info (void)
6026 {
6027 int is32;
6028 unsigned shift;
6029
6030 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
6031
6032 if (inst.base.opcode->op == OP_MOVK)
6033 switch (inst.reloc.type)
6034 {
6035 case BFD_RELOC_AARCH64_MOVW_G0_S:
6036 case BFD_RELOC_AARCH64_MOVW_G1_S:
6037 case BFD_RELOC_AARCH64_MOVW_G2_S:
6038 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6039 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6040 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6041 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6042 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6043 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6044 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6045 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6046 set_syntax_error
6047 (_("the specified relocation type is not allowed for MOVK"));
6048 return false;
6049 default:
6050 break;
6051 }
6052
6053 switch (inst.reloc.type)
6054 {
6055 case BFD_RELOC_AARCH64_MOVW_G0:
6056 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6057 case BFD_RELOC_AARCH64_MOVW_G0_S:
6058 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
6059 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
6060 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
6061 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
6062 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
6063 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
6064 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
6065 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6066 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6067 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6068 shift = 0;
6069 break;
6070 case BFD_RELOC_AARCH64_MOVW_G1:
6071 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6072 case BFD_RELOC_AARCH64_MOVW_G1_S:
6073 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
6074 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
6075 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
6076 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
6077 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
6078 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
6079 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
6080 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
6081 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6082 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6083 shift = 16;
6084 break;
6085 case BFD_RELOC_AARCH64_MOVW_G2:
6086 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6087 case BFD_RELOC_AARCH64_MOVW_G2_S:
6088 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
6089 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
6090 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
6091 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6092 if (is32)
6093 {
6094 set_fatal_syntax_error
6095 (_("the specified relocation type is not allowed for 32-bit "
6096 "register"));
6097 return false;
6098 }
6099 shift = 32;
6100 break;
6101 case BFD_RELOC_AARCH64_MOVW_G3:
6102 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
6103 if (is32)
6104 {
6105 set_fatal_syntax_error
6106 (_("the specified relocation type is not allowed for 32-bit "
6107 "register"));
6108 return false;
6109 }
6110 shift = 48;
6111 break;
6112 default:
6113 /* More cases should be added when more MOVW-related relocation types
6114 are supported in GAS. */
6115 gas_assert (aarch64_gas_internal_fixup_p ());
6116 /* The shift amount should have already been set by the parser. */
6117 return true;
6118 }
6119 inst.base.operands[1].shifter.amount = shift;
6120 return true;
6121 }
6122
6123 /* A primitive log calculator. */
6124
6125 static inline unsigned int
6126 get_logsz (unsigned int size)
6127 {
6128 const unsigned char ls[16] =
6129 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
6130 if (size > 16)
6131 {
6132 gas_assert (0);
6133 return -1;
6134 }
6135 gas_assert (ls[size - 1] != (unsigned char)-1);
6136 return ls[size - 1];
6137 }
6138
6139 /* Determine and return the real reloc type code for an instruction
6140 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
6141
6142 static inline bfd_reloc_code_real_type
6143 ldst_lo12_determine_real_reloc_type (void)
6144 {
6145 unsigned logsz, max_logsz;
6146 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
6147 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
6148
6149 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
6150 {
6151 BFD_RELOC_AARCH64_LDST8_LO12,
6152 BFD_RELOC_AARCH64_LDST16_LO12,
6153 BFD_RELOC_AARCH64_LDST32_LO12,
6154 BFD_RELOC_AARCH64_LDST64_LO12,
6155 BFD_RELOC_AARCH64_LDST128_LO12
6156 },
6157 {
6158 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
6159 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
6160 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
6161 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
6162 BFD_RELOC_AARCH64_NONE
6163 },
6164 {
6165 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
6166 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
6167 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
6168 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
6169 BFD_RELOC_AARCH64_NONE
6170 },
6171 {
6172 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
6173 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
6174 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
6175 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
6176 BFD_RELOC_AARCH64_NONE
6177 },
6178 {
6179 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
6180 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
6181 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
6182 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
6183 BFD_RELOC_AARCH64_NONE
6184 }
6185 };
6186
6187 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6188 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6189 || (inst.reloc.type
6190 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6191 || (inst.reloc.type
6192 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6193 || (inst.reloc.type
6194 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
6195 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
6196
6197 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
6198 opd1_qlf =
6199 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
6200 1, opd0_qlf, 0);
6201 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
6202
6203 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
6204
6205 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
6206 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
6207 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
6208 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
6209 max_logsz = 3;
6210 else
6211 max_logsz = 4;
6212
6213 if (logsz > max_logsz)
6214 {
6215 /* SEE PR 27904 for an example of this. */
6216 set_fatal_syntax_error
6217 (_("relocation qualifier does not match instruction size"));
6218 return BFD_RELOC_AARCH64_NONE;
6219 }
6220
6221 /* In reloc.c, these pseudo relocation types should be defined in similar
6222 order as above reloc_ldst_lo12 array. Because the array index calculation
6223 below relies on this. */
6224 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
6225 }
6226
6227 /* Check whether a register list REGINFO is valid. The registers must be
6228 numbered in increasing order (modulo 32), in increments of one or two.
6229
6230 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
6231 increments of two.
6232
6233 Return FALSE if such a register list is invalid, otherwise return TRUE. */
6234
6235 static bool
6236 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
6237 {
6238 uint32_t i, nb_regs, prev_regno, incr;
6239
6240 nb_regs = 1 + (reginfo & 0x3);
6241 reginfo >>= 2;
6242 prev_regno = reginfo & 0x1f;
6243 incr = accept_alternate ? 2 : 1;
6244
6245 for (i = 1; i < nb_regs; ++i)
6246 {
6247 uint32_t curr_regno;
6248 reginfo >>= 5;
6249 curr_regno = reginfo & 0x1f;
6250 if (curr_regno != ((prev_regno + incr) & 0x1f))
6251 return false;
6252 prev_regno = curr_regno;
6253 }
6254
6255 return true;
6256 }
6257
6258 /* Generic instruction operand parser. This does no encoding and no
6259 semantic validation; it merely squirrels values away in the inst
6260 structure. Returns TRUE or FALSE depending on whether the
6261 specified grammar matched. */
6262
6263 static bool
6264 parse_operands (char *str, const aarch64_opcode *opcode)
6265 {
6266 int i;
6267 char *backtrack_pos = 0;
6268 const enum aarch64_opnd *operands = opcode->operands;
6269 aarch64_reg_type imm_reg_type;
6270
6271 clear_error ();
6272 skip_whitespace (str);
6273
6274 if (AARCH64_CPU_HAS_ANY_FEATURES (*opcode->avariant,
6275 AARCH64_FEATURE_SVE
6276 | AARCH64_FEATURE_SVE2))
6277 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
6278 else
6279 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
6280
6281 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
6282 {
6283 int64_t val;
6284 const reg_entry *reg;
6285 int comma_skipped_p = 0;
6286 struct vector_type_el vectype;
6287 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
6288 aarch64_opnd_info *info = &inst.base.operands[i];
6289 aarch64_reg_type reg_type;
6290
6291 DEBUG_TRACE ("parse operand %d", i);
6292
6293 /* Assign the operand code. */
6294 info->type = operands[i];
6295
6296 if (optional_operand_p (opcode, i))
6297 {
6298 /* Remember where we are in case we need to backtrack. */
6299 gas_assert (!backtrack_pos);
6300 backtrack_pos = str;
6301 }
6302
6303 /* Expect comma between operands; the backtrack mechanism will take
6304 care of cases of omitted optional operand. */
6305 if (i > 0 && ! skip_past_char (&str, ','))
6306 {
6307 set_syntax_error (_("comma expected between operands"));
6308 goto failure;
6309 }
6310 else
6311 comma_skipped_p = 1;
6312
6313 switch (operands[i])
6314 {
6315 case AARCH64_OPND_Rd:
6316 case AARCH64_OPND_Rn:
6317 case AARCH64_OPND_Rm:
6318 case AARCH64_OPND_Rt:
6319 case AARCH64_OPND_Rt2:
6320 case AARCH64_OPND_Rs:
6321 case AARCH64_OPND_Ra:
6322 case AARCH64_OPND_Rt_LS64:
6323 case AARCH64_OPND_Rt_SYS:
6324 case AARCH64_OPND_PAIRREG:
6325 case AARCH64_OPND_SVE_Rm:
6326 po_int_reg_or_fail (REG_TYPE_R_Z);
6327
6328 /* In LS64 load/store instructions Rt register number must be even
6329 and <=22. */
6330 if (operands[i] == AARCH64_OPND_Rt_LS64)
6331 {
6332 /* We've already checked if this is valid register.
6333 This will check if register number (Rt) is not undefined for LS64
6334 instructions:
6335 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
6336 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
6337 {
6338 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
6339 goto failure;
6340 }
6341 }
6342 break;
6343
6344 case AARCH64_OPND_Rd_SP:
6345 case AARCH64_OPND_Rn_SP:
6346 case AARCH64_OPND_Rt_SP:
6347 case AARCH64_OPND_SVE_Rn_SP:
6348 case AARCH64_OPND_Rm_SP:
6349 po_int_reg_or_fail (REG_TYPE_R_SP);
6350 break;
6351
6352 case AARCH64_OPND_Rm_EXT:
6353 case AARCH64_OPND_Rm_SFT:
6354 po_misc_or_fail (parse_shifter_operand
6355 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
6356 ? SHIFTED_ARITH_IMM
6357 : SHIFTED_LOGIC_IMM)));
6358 if (!info->shifter.operator_present)
6359 {
6360 /* Default to LSL if not present. Libopcodes prefers shifter
6361 kind to be explicit. */
6362 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6363 info->shifter.kind = AARCH64_MOD_LSL;
6364 /* For Rm_EXT, libopcodes will carry out further check on whether
6365 or not stack pointer is used in the instruction (Recall that
6366 "the extend operator is not optional unless at least one of
6367 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
6368 }
6369 break;
6370
6371 case AARCH64_OPND_Fd:
6372 case AARCH64_OPND_Fn:
6373 case AARCH64_OPND_Fm:
6374 case AARCH64_OPND_Fa:
6375 case AARCH64_OPND_Ft:
6376 case AARCH64_OPND_Ft2:
6377 case AARCH64_OPND_Sd:
6378 case AARCH64_OPND_Sn:
6379 case AARCH64_OPND_Sm:
6380 case AARCH64_OPND_SVE_VZn:
6381 case AARCH64_OPND_SVE_Vd:
6382 case AARCH64_OPND_SVE_Vm:
6383 case AARCH64_OPND_SVE_Vn:
6384 reg = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, NULL);
6385 if (!reg)
6386 {
6387 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
6388 goto failure;
6389 }
6390 gas_assert (reg->type >= REG_TYPE_FP_B
6391 && reg->type <= REG_TYPE_FP_Q);
6392
6393 info->reg.regno = reg->number;
6394 info->qualifier = AARCH64_OPND_QLF_S_B + (reg->type - REG_TYPE_FP_B);
6395 break;
6396
6397 case AARCH64_OPND_SVE_Pd:
6398 case AARCH64_OPND_SVE_Pg3:
6399 case AARCH64_OPND_SVE_Pg4_5:
6400 case AARCH64_OPND_SVE_Pg4_10:
6401 case AARCH64_OPND_SVE_Pg4_16:
6402 case AARCH64_OPND_SVE_Pm:
6403 case AARCH64_OPND_SVE_Pn:
6404 case AARCH64_OPND_SVE_Pt:
6405 case AARCH64_OPND_SME_Pm:
6406 reg_type = REG_TYPE_PN;
6407 goto vector_reg;
6408
6409 case AARCH64_OPND_SVE_Za_5:
6410 case AARCH64_OPND_SVE_Za_16:
6411 case AARCH64_OPND_SVE_Zd:
6412 case AARCH64_OPND_SVE_Zm_5:
6413 case AARCH64_OPND_SVE_Zm_16:
6414 case AARCH64_OPND_SVE_Zn:
6415 case AARCH64_OPND_SVE_Zt:
6416 reg_type = REG_TYPE_ZN;
6417 goto vector_reg;
6418
6419 case AARCH64_OPND_Va:
6420 case AARCH64_OPND_Vd:
6421 case AARCH64_OPND_Vn:
6422 case AARCH64_OPND_Vm:
6423 reg_type = REG_TYPE_VN;
6424 vector_reg:
6425 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6426 if (!reg)
6427 {
6428 first_error (_(get_reg_expected_msg (reg_type)));
6429 goto failure;
6430 }
6431 if (vectype.defined & NTA_HASINDEX)
6432 goto failure;
6433
6434 info->reg.regno = reg->number;
6435 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
6436 && vectype.type == NT_invtype)
6437 /* Unqualified Pn and Zn registers are allowed in certain
6438 contexts. Rely on F_STRICT qualifier checking to catch
6439 invalid uses. */
6440 info->qualifier = AARCH64_OPND_QLF_NIL;
6441 else
6442 {
6443 info->qualifier = vectype_to_qualifier (&vectype);
6444 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6445 goto failure;
6446 }
6447 break;
6448
6449 case AARCH64_OPND_VdD1:
6450 case AARCH64_OPND_VnD1:
6451 reg = aarch64_reg_parse (&str, REG_TYPE_VN, &vectype);
6452 if (!reg)
6453 {
6454 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
6455 goto failure;
6456 }
6457 if (vectype.type != NT_d || vectype.index != 1)
6458 {
6459 set_fatal_syntax_error
6460 (_("the top half of a 128-bit FP/SIMD register is expected"));
6461 goto failure;
6462 }
6463 info->reg.regno = reg->number;
6464 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
6465 here; it is correct for the purpose of encoding/decoding since
6466 only the register number is explicitly encoded in the related
6467 instructions, although this appears a bit hacky. */
6468 info->qualifier = AARCH64_OPND_QLF_S_D;
6469 break;
6470
6471 case AARCH64_OPND_SVE_Zm3_INDEX:
6472 case AARCH64_OPND_SVE_Zm3_22_INDEX:
6473 case AARCH64_OPND_SVE_Zm3_11_INDEX:
6474 case AARCH64_OPND_SVE_Zm4_11_INDEX:
6475 case AARCH64_OPND_SVE_Zm4_INDEX:
6476 case AARCH64_OPND_SVE_Zn_INDEX:
6477 reg_type = REG_TYPE_ZN;
6478 goto vector_reg_index;
6479
6480 case AARCH64_OPND_Ed:
6481 case AARCH64_OPND_En:
6482 case AARCH64_OPND_Em:
6483 case AARCH64_OPND_Em16:
6484 case AARCH64_OPND_SM3_IMM2:
6485 reg_type = REG_TYPE_VN;
6486 vector_reg_index:
6487 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6488 if (!reg)
6489 {
6490 first_error (_(get_reg_expected_msg (reg_type)));
6491 goto failure;
6492 }
6493 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
6494 goto failure;
6495
6496 info->reglane.regno = reg->number;
6497 info->reglane.index = vectype.index;
6498 info->qualifier = vectype_to_qualifier (&vectype);
6499 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6500 goto failure;
6501 break;
6502
6503 case AARCH64_OPND_SVE_ZnxN:
6504 case AARCH64_OPND_SVE_ZtxN:
6505 reg_type = REG_TYPE_ZN;
6506 goto vector_reg_list;
6507
6508 case AARCH64_OPND_LVn:
6509 case AARCH64_OPND_LVt:
6510 case AARCH64_OPND_LVt_AL:
6511 case AARCH64_OPND_LEt:
6512 reg_type = REG_TYPE_VN;
6513 vector_reg_list:
6514 if (reg_type == REG_TYPE_ZN
6515 && get_opcode_dependent_value (opcode) == 1
6516 && *str != '{')
6517 {
6518 reg = aarch64_reg_parse (&str, reg_type, &vectype);
6519 if (!reg)
6520 {
6521 first_error (_(get_reg_expected_msg (reg_type)));
6522 goto failure;
6523 }
6524 info->reglist.first_regno = reg->number;
6525 info->reglist.num_regs = 1;
6526 }
6527 else
6528 {
6529 val = parse_vector_reg_list (&str, reg_type, &vectype);
6530 if (val == PARSE_FAIL)
6531 goto failure;
6532
6533 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
6534 {
6535 set_fatal_syntax_error (_("invalid register list"));
6536 goto failure;
6537 }
6538
6539 if (vectype.width != 0 && *str != ',')
6540 {
6541 set_fatal_syntax_error
6542 (_("expected element type rather than vector type"));
6543 goto failure;
6544 }
6545
6546 info->reglist.first_regno = (val >> 2) & 0x1f;
6547 info->reglist.num_regs = (val & 0x3) + 1;
6548 }
6549 if (operands[i] == AARCH64_OPND_LEt)
6550 {
6551 if (!(vectype.defined & NTA_HASINDEX))
6552 goto failure;
6553 info->reglist.has_index = 1;
6554 info->reglist.index = vectype.index;
6555 }
6556 else
6557 {
6558 if (vectype.defined & NTA_HASINDEX)
6559 goto failure;
6560 if (!(vectype.defined & NTA_HASTYPE))
6561 {
6562 if (reg_type == REG_TYPE_ZN)
6563 set_fatal_syntax_error (_("missing type suffix"));
6564 goto failure;
6565 }
6566 }
6567 info->qualifier = vectype_to_qualifier (&vectype);
6568 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6569 goto failure;
6570 break;
6571
6572 case AARCH64_OPND_CRn:
6573 case AARCH64_OPND_CRm:
6574 {
6575 char prefix = *(str++);
6576 if (prefix != 'c' && prefix != 'C')
6577 goto failure;
6578
6579 po_imm_nc_or_fail ();
6580 if (val > 15)
6581 {
6582 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6583 goto failure;
6584 }
6585 info->qualifier = AARCH64_OPND_QLF_CR;
6586 info->imm.value = val;
6587 break;
6588 }
6589
6590 case AARCH64_OPND_SHLL_IMM:
6591 case AARCH64_OPND_IMM_VLSR:
6592 po_imm_or_fail (1, 64);
6593 info->imm.value = val;
6594 break;
6595
6596 case AARCH64_OPND_CCMP_IMM:
6597 case AARCH64_OPND_SIMM5:
6598 case AARCH64_OPND_FBITS:
6599 case AARCH64_OPND_TME_UIMM16:
6600 case AARCH64_OPND_UIMM4:
6601 case AARCH64_OPND_UIMM4_ADDG:
6602 case AARCH64_OPND_UIMM10:
6603 case AARCH64_OPND_UIMM3_OP1:
6604 case AARCH64_OPND_UIMM3_OP2:
6605 case AARCH64_OPND_IMM_VLSL:
6606 case AARCH64_OPND_IMM:
6607 case AARCH64_OPND_IMM_2:
6608 case AARCH64_OPND_WIDTH:
6609 case AARCH64_OPND_SVE_INV_LIMM:
6610 case AARCH64_OPND_SVE_LIMM:
6611 case AARCH64_OPND_SVE_LIMM_MOV:
6612 case AARCH64_OPND_SVE_SHLIMM_PRED:
6613 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6614 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6615 case AARCH64_OPND_SVE_SHRIMM_PRED:
6616 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6617 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6618 case AARCH64_OPND_SVE_SIMM5:
6619 case AARCH64_OPND_SVE_SIMM5B:
6620 case AARCH64_OPND_SVE_SIMM6:
6621 case AARCH64_OPND_SVE_SIMM8:
6622 case AARCH64_OPND_SVE_UIMM3:
6623 case AARCH64_OPND_SVE_UIMM7:
6624 case AARCH64_OPND_SVE_UIMM8:
6625 case AARCH64_OPND_SVE_UIMM8_53:
6626 case AARCH64_OPND_IMM_ROT1:
6627 case AARCH64_OPND_IMM_ROT2:
6628 case AARCH64_OPND_IMM_ROT3:
6629 case AARCH64_OPND_SVE_IMM_ROT1:
6630 case AARCH64_OPND_SVE_IMM_ROT2:
6631 case AARCH64_OPND_SVE_IMM_ROT3:
6632 case AARCH64_OPND_CSSC_SIMM8:
6633 case AARCH64_OPND_CSSC_UIMM8:
6634 po_imm_nc_or_fail ();
6635 info->imm.value = val;
6636 break;
6637
6638 case AARCH64_OPND_SVE_AIMM:
6639 case AARCH64_OPND_SVE_ASIMM:
6640 po_imm_nc_or_fail ();
6641 info->imm.value = val;
6642 skip_whitespace (str);
6643 if (skip_past_comma (&str))
6644 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6645 else
6646 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6647 break;
6648
6649 case AARCH64_OPND_SVE_PATTERN:
6650 po_enum_or_fail (aarch64_sve_pattern_array);
6651 info->imm.value = val;
6652 break;
6653
6654 case AARCH64_OPND_SVE_PATTERN_SCALED:
6655 po_enum_or_fail (aarch64_sve_pattern_array);
6656 info->imm.value = val;
6657 if (skip_past_comma (&str)
6658 && !parse_shift (&str, info, SHIFTED_MUL))
6659 goto failure;
6660 if (!info->shifter.operator_present)
6661 {
6662 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6663 info->shifter.kind = AARCH64_MOD_MUL;
6664 info->shifter.amount = 1;
6665 }
6666 break;
6667
6668 case AARCH64_OPND_SVE_PRFOP:
6669 po_enum_or_fail (aarch64_sve_prfop_array);
6670 info->imm.value = val;
6671 break;
6672
6673 case AARCH64_OPND_UIMM7:
6674 po_imm_or_fail (0, 127);
6675 info->imm.value = val;
6676 break;
6677
6678 case AARCH64_OPND_IDX:
6679 case AARCH64_OPND_MASK:
6680 case AARCH64_OPND_BIT_NUM:
6681 case AARCH64_OPND_IMMR:
6682 case AARCH64_OPND_IMMS:
6683 po_imm_or_fail (0, 63);
6684 info->imm.value = val;
6685 break;
6686
6687 case AARCH64_OPND_IMM0:
6688 po_imm_nc_or_fail ();
6689 if (val != 0)
6690 {
6691 set_fatal_syntax_error (_("immediate zero expected"));
6692 goto failure;
6693 }
6694 info->imm.value = 0;
6695 break;
6696
6697 case AARCH64_OPND_FPIMM0:
6698 {
6699 int qfloat;
6700 bool res1 = false, res2 = false;
6701 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6702 it is probably not worth the effort to support it. */
6703 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6704 imm_reg_type))
6705 && (error_p ()
6706 || !(res2 = parse_constant_immediate (&str, &val,
6707 imm_reg_type))))
6708 goto failure;
6709 if ((res1 && qfloat == 0) || (res2 && val == 0))
6710 {
6711 info->imm.value = 0;
6712 info->imm.is_fp = 1;
6713 break;
6714 }
6715 set_fatal_syntax_error (_("immediate zero expected"));
6716 goto failure;
6717 }
6718
6719 case AARCH64_OPND_IMM_MOV:
6720 {
6721 char *saved = str;
6722 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6723 reg_name_p (str, REG_TYPE_VN))
6724 goto failure;
6725 str = saved;
6726 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6727 GE_OPT_PREFIX, REJECT_ABSENT));
6728 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6729 later. fix_mov_imm_insn will try to determine a machine
6730 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6731 message if the immediate cannot be moved by a single
6732 instruction. */
6733 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6734 inst.base.operands[i].skip = 1;
6735 }
6736 break;
6737
6738 case AARCH64_OPND_SIMD_IMM:
6739 case AARCH64_OPND_SIMD_IMM_SFT:
6740 if (! parse_big_immediate (&str, &val, imm_reg_type))
6741 goto failure;
6742 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6743 /* addr_off_p */ 0,
6744 /* need_libopcodes_p */ 1,
6745 /* skip_p */ 1);
6746 /* Parse shift.
6747 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6748 shift, we don't check it here; we leave the checking to
6749 the libopcodes (operand_general_constraint_met_p). By
6750 doing this, we achieve better diagnostics. */
6751 if (skip_past_comma (&str)
6752 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6753 goto failure;
6754 if (!info->shifter.operator_present
6755 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6756 {
6757 /* Default to LSL if not present. Libopcodes prefers shifter
6758 kind to be explicit. */
6759 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6760 info->shifter.kind = AARCH64_MOD_LSL;
6761 }
6762 break;
6763
6764 case AARCH64_OPND_FPIMM:
6765 case AARCH64_OPND_SIMD_FPIMM:
6766 case AARCH64_OPND_SVE_FPIMM8:
6767 {
6768 int qfloat;
6769 bool dp_p;
6770
6771 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6772 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6773 || !aarch64_imm_float_p (qfloat))
6774 {
6775 if (!error_p ())
6776 set_fatal_syntax_error (_("invalid floating-point"
6777 " constant"));
6778 goto failure;
6779 }
6780 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6781 inst.base.operands[i].imm.is_fp = 1;
6782 }
6783 break;
6784
6785 case AARCH64_OPND_SVE_I1_HALF_ONE:
6786 case AARCH64_OPND_SVE_I1_HALF_TWO:
6787 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6788 {
6789 int qfloat;
6790 bool dp_p;
6791
6792 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6793 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6794 {
6795 if (!error_p ())
6796 set_fatal_syntax_error (_("invalid floating-point"
6797 " constant"));
6798 goto failure;
6799 }
6800 inst.base.operands[i].imm.value = qfloat;
6801 inst.base.operands[i].imm.is_fp = 1;
6802 }
6803 break;
6804
6805 case AARCH64_OPND_LIMM:
6806 po_misc_or_fail (parse_shifter_operand (&str, info,
6807 SHIFTED_LOGIC_IMM));
6808 if (info->shifter.operator_present)
6809 {
6810 set_fatal_syntax_error
6811 (_("shift not allowed for bitmask immediate"));
6812 goto failure;
6813 }
6814 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6815 /* addr_off_p */ 0,
6816 /* need_libopcodes_p */ 1,
6817 /* skip_p */ 1);
6818 break;
6819
6820 case AARCH64_OPND_AIMM:
6821 if (opcode->op == OP_ADD)
6822 /* ADD may have relocation types. */
6823 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6824 SHIFTED_ARITH_IMM));
6825 else
6826 po_misc_or_fail (parse_shifter_operand (&str, info,
6827 SHIFTED_ARITH_IMM));
6828 switch (inst.reloc.type)
6829 {
6830 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6831 info->shifter.amount = 12;
6832 break;
6833 case BFD_RELOC_UNUSED:
6834 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6835 if (info->shifter.kind != AARCH64_MOD_NONE)
6836 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6837 inst.reloc.pc_rel = 0;
6838 break;
6839 default:
6840 break;
6841 }
6842 info->imm.value = 0;
6843 if (!info->shifter.operator_present)
6844 {
6845 /* Default to LSL if not present. Libopcodes prefers shifter
6846 kind to be explicit. */
6847 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6848 info->shifter.kind = AARCH64_MOD_LSL;
6849 }
6850 break;
6851
6852 case AARCH64_OPND_HALF:
6853 {
6854 /* #<imm16> or relocation. */
6855 int internal_fixup_p;
6856 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6857 if (internal_fixup_p)
6858 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6859 skip_whitespace (str);
6860 if (skip_past_comma (&str))
6861 {
6862 /* {, LSL #<shift>} */
6863 if (! aarch64_gas_internal_fixup_p ())
6864 {
6865 set_fatal_syntax_error (_("can't mix relocation modifier "
6866 "with explicit shift"));
6867 goto failure;
6868 }
6869 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6870 }
6871 else
6872 inst.base.operands[i].shifter.amount = 0;
6873 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6874 inst.base.operands[i].imm.value = 0;
6875 if (! process_movw_reloc_info ())
6876 goto failure;
6877 }
6878 break;
6879
6880 case AARCH64_OPND_EXCEPTION:
6881 case AARCH64_OPND_UNDEFINED:
6882 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6883 imm_reg_type));
6884 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6885 /* addr_off_p */ 0,
6886 /* need_libopcodes_p */ 0,
6887 /* skip_p */ 1);
6888 break;
6889
6890 case AARCH64_OPND_NZCV:
6891 {
6892 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6893 if (nzcv != NULL)
6894 {
6895 str += 4;
6896 info->imm.value = nzcv->value;
6897 break;
6898 }
6899 po_imm_or_fail (0, 15);
6900 info->imm.value = val;
6901 }
6902 break;
6903
6904 case AARCH64_OPND_COND:
6905 case AARCH64_OPND_COND1:
6906 {
6907 char *start = str;
6908 do
6909 str++;
6910 while (ISALPHA (*str));
6911 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6912 if (info->cond == NULL)
6913 {
6914 set_syntax_error (_("invalid condition"));
6915 goto failure;
6916 }
6917 else if (operands[i] == AARCH64_OPND_COND1
6918 && (info->cond->value & 0xe) == 0xe)
6919 {
6920 /* Do not allow AL or NV. */
6921 set_default_error ();
6922 goto failure;
6923 }
6924 }
6925 break;
6926
6927 case AARCH64_OPND_ADDR_ADRP:
6928 po_misc_or_fail (parse_adrp (&str));
6929 /* Clear the value as operand needs to be relocated. */
6930 info->imm.value = 0;
6931 break;
6932
6933 case AARCH64_OPND_ADDR_PCREL14:
6934 case AARCH64_OPND_ADDR_PCREL19:
6935 case AARCH64_OPND_ADDR_PCREL21:
6936 case AARCH64_OPND_ADDR_PCREL26:
6937 po_misc_or_fail (parse_address (&str, info));
6938 if (!info->addr.pcrel)
6939 {
6940 set_syntax_error (_("invalid pc-relative address"));
6941 goto failure;
6942 }
6943 if (inst.gen_lit_pool
6944 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6945 {
6946 /* Only permit "=value" in the literal load instructions.
6947 The literal will be generated by programmer_friendly_fixup. */
6948 set_syntax_error (_("invalid use of \"=immediate\""));
6949 goto failure;
6950 }
6951 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6952 {
6953 set_syntax_error (_("unrecognized relocation suffix"));
6954 goto failure;
6955 }
6956 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6957 {
6958 info->imm.value = inst.reloc.exp.X_add_number;
6959 inst.reloc.type = BFD_RELOC_UNUSED;
6960 }
6961 else
6962 {
6963 info->imm.value = 0;
6964 if (inst.reloc.type == BFD_RELOC_UNUSED)
6965 switch (opcode->iclass)
6966 {
6967 case compbranch:
6968 case condbranch:
6969 /* e.g. CBZ or B.COND */
6970 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6971 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6972 break;
6973 case testbranch:
6974 /* e.g. TBZ */
6975 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6976 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6977 break;
6978 case branch_imm:
6979 /* e.g. B or BL */
6980 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6981 inst.reloc.type =
6982 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6983 : BFD_RELOC_AARCH64_JUMP26;
6984 break;
6985 case loadlit:
6986 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6987 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6988 break;
6989 case pcreladdr:
6990 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6991 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6992 break;
6993 default:
6994 gas_assert (0);
6995 abort ();
6996 }
6997 inst.reloc.pc_rel = 1;
6998 }
6999 break;
7000
7001 case AARCH64_OPND_ADDR_SIMPLE:
7002 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
7003 {
7004 /* [<Xn|SP>{, #<simm>}] */
7005 char *start = str;
7006 /* First use the normal address-parsing routines, to get
7007 the usual syntax errors. */
7008 po_misc_or_fail (parse_address (&str, info));
7009 if (info->addr.pcrel || info->addr.offset.is_reg
7010 || !info->addr.preind || info->addr.postind
7011 || info->addr.writeback)
7012 {
7013 set_syntax_error (_("invalid addressing mode"));
7014 goto failure;
7015 }
7016
7017 /* Then retry, matching the specific syntax of these addresses. */
7018 str = start;
7019 po_char_or_fail ('[');
7020 po_reg_or_fail (REG_TYPE_R64_SP);
7021 /* Accept optional ", #0". */
7022 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
7023 && skip_past_char (&str, ','))
7024 {
7025 skip_past_char (&str, '#');
7026 if (! skip_past_char (&str, '0'))
7027 {
7028 set_fatal_syntax_error
7029 (_("the optional immediate offset can only be 0"));
7030 goto failure;
7031 }
7032 }
7033 po_char_or_fail (']');
7034 break;
7035 }
7036
7037 case AARCH64_OPND_ADDR_REGOFF:
7038 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
7039 po_misc_or_fail (parse_address (&str, info));
7040 regoff_addr:
7041 if (info->addr.pcrel || !info->addr.offset.is_reg
7042 || !info->addr.preind || info->addr.postind
7043 || info->addr.writeback)
7044 {
7045 set_syntax_error (_("invalid addressing mode"));
7046 goto failure;
7047 }
7048 if (!info->shifter.operator_present)
7049 {
7050 /* Default to LSL if not present. Libopcodes prefers shifter
7051 kind to be explicit. */
7052 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
7053 info->shifter.kind = AARCH64_MOD_LSL;
7054 }
7055 /* Qualifier to be deduced by libopcodes. */
7056 break;
7057
7058 case AARCH64_OPND_ADDR_SIMM7:
7059 po_misc_or_fail (parse_address (&str, info));
7060 if (info->addr.pcrel || info->addr.offset.is_reg
7061 || (!info->addr.preind && !info->addr.postind))
7062 {
7063 set_syntax_error (_("invalid addressing mode"));
7064 goto failure;
7065 }
7066 if (inst.reloc.type != BFD_RELOC_UNUSED)
7067 {
7068 set_syntax_error (_("relocation not allowed"));
7069 goto failure;
7070 }
7071 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7072 /* addr_off_p */ 1,
7073 /* need_libopcodes_p */ 1,
7074 /* skip_p */ 0);
7075 break;
7076
7077 case AARCH64_OPND_ADDR_SIMM9:
7078 case AARCH64_OPND_ADDR_SIMM9_2:
7079 case AARCH64_OPND_ADDR_SIMM11:
7080 case AARCH64_OPND_ADDR_SIMM13:
7081 po_misc_or_fail (parse_address (&str, info));
7082 if (info->addr.pcrel || info->addr.offset.is_reg
7083 || (!info->addr.preind && !info->addr.postind)
7084 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
7085 && info->addr.writeback))
7086 {
7087 set_syntax_error (_("invalid addressing mode"));
7088 goto failure;
7089 }
7090 if (inst.reloc.type != BFD_RELOC_UNUSED)
7091 {
7092 set_syntax_error (_("relocation not allowed"));
7093 goto failure;
7094 }
7095 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7096 /* addr_off_p */ 1,
7097 /* need_libopcodes_p */ 1,
7098 /* skip_p */ 0);
7099 break;
7100
7101 case AARCH64_OPND_ADDR_SIMM10:
7102 case AARCH64_OPND_ADDR_OFFSET:
7103 po_misc_or_fail (parse_address (&str, info));
7104 if (info->addr.pcrel || info->addr.offset.is_reg
7105 || !info->addr.preind || info->addr.postind)
7106 {
7107 set_syntax_error (_("invalid addressing mode"));
7108 goto failure;
7109 }
7110 if (inst.reloc.type != BFD_RELOC_UNUSED)
7111 {
7112 set_syntax_error (_("relocation not allowed"));
7113 goto failure;
7114 }
7115 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
7116 /* addr_off_p */ 1,
7117 /* need_libopcodes_p */ 1,
7118 /* skip_p */ 0);
7119 break;
7120
7121 case AARCH64_OPND_ADDR_UIMM12:
7122 po_misc_or_fail (parse_address (&str, info));
7123 if (info->addr.pcrel || info->addr.offset.is_reg
7124 || !info->addr.preind || info->addr.writeback)
7125 {
7126 set_syntax_error (_("invalid addressing mode"));
7127 goto failure;
7128 }
7129 if (inst.reloc.type == BFD_RELOC_UNUSED)
7130 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
7131 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
7132 || (inst.reloc.type
7133 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
7134 || (inst.reloc.type
7135 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
7136 || (inst.reloc.type
7137 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
7138 || (inst.reloc.type
7139 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
7140 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
7141 /* Leave qualifier to be determined by libopcodes. */
7142 break;
7143
7144 case AARCH64_OPND_SIMD_ADDR_POST:
7145 /* [<Xn|SP>], <Xm|#<amount>> */
7146 po_misc_or_fail (parse_address (&str, info));
7147 if (!info->addr.postind || !info->addr.writeback)
7148 {
7149 set_syntax_error (_("invalid addressing mode"));
7150 goto failure;
7151 }
7152 if (!info->addr.offset.is_reg)
7153 {
7154 if (inst.reloc.exp.X_op == O_constant)
7155 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7156 else
7157 {
7158 set_fatal_syntax_error
7159 (_("writeback value must be an immediate constant"));
7160 goto failure;
7161 }
7162 }
7163 /* No qualifier. */
7164 break;
7165
7166 case AARCH64_OPND_SME_SM_ZA:
7167 /* { SM | ZA } */
7168 if ((val = parse_sme_sm_za (&str)) == PARSE_FAIL)
7169 {
7170 set_syntax_error (_("unknown or missing PSTATE field name"));
7171 goto failure;
7172 }
7173 info->reg.regno = val;
7174 break;
7175
7176 case AARCH64_OPND_SME_PnT_Wm_imm:
7177 /* <Pn>.<T>[<Wm>, #<imm>] */
7178 {
7179 int index_base_reg;
7180 int imm;
7181 val = parse_sme_pred_reg_with_index (&str,
7182 &index_base_reg,
7183 &imm,
7184 &qualifier);
7185 if (val == PARSE_FAIL)
7186 goto failure;
7187
7188 info->za_tile_vector.regno = val;
7189 info->za_tile_vector.index.regno = index_base_reg;
7190 info->za_tile_vector.index.imm = imm;
7191 info->qualifier = qualifier;
7192 break;
7193 }
7194
7195 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
7196 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
7197 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
7198 case AARCH64_OPND_SME_ADDR_RI_U4xVL:
7199 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
7200 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
7201 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
7202 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
7203 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
7204 case AARCH64_OPND_SVE_ADDR_RI_U6:
7205 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
7206 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
7207 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
7208 /* [X<n>{, #imm, MUL VL}]
7209 [X<n>{, #imm}]
7210 but recognizing SVE registers. */
7211 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7212 &offset_qualifier));
7213 if (base_qualifier != AARCH64_OPND_QLF_X)
7214 {
7215 set_syntax_error (_("invalid addressing mode"));
7216 goto failure;
7217 }
7218 sve_regimm:
7219 if (info->addr.pcrel || info->addr.offset.is_reg
7220 || !info->addr.preind || info->addr.writeback)
7221 {
7222 set_syntax_error (_("invalid addressing mode"));
7223 goto failure;
7224 }
7225 if (inst.reloc.type != BFD_RELOC_UNUSED
7226 || inst.reloc.exp.X_op != O_constant)
7227 {
7228 /* Make sure this has priority over
7229 "invalid addressing mode". */
7230 set_fatal_syntax_error (_("constant offset required"));
7231 goto failure;
7232 }
7233 info->addr.offset.imm = inst.reloc.exp.X_add_number;
7234 break;
7235
7236 case AARCH64_OPND_SVE_ADDR_R:
7237 /* [<Xn|SP>{, <R><m>}]
7238 but recognizing SVE registers. */
7239 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7240 &offset_qualifier));
7241 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
7242 {
7243 offset_qualifier = AARCH64_OPND_QLF_X;
7244 info->addr.offset.is_reg = 1;
7245 info->addr.offset.regno = 31;
7246 }
7247 else if (base_qualifier != AARCH64_OPND_QLF_X
7248 || offset_qualifier != AARCH64_OPND_QLF_X)
7249 {
7250 set_syntax_error (_("invalid addressing mode"));
7251 goto failure;
7252 }
7253 goto regoff_addr;
7254
7255 case AARCH64_OPND_SVE_ADDR_RR:
7256 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
7257 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
7258 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
7259 case AARCH64_OPND_SVE_ADDR_RR_LSL4:
7260 case AARCH64_OPND_SVE_ADDR_RX:
7261 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
7262 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
7263 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
7264 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
7265 but recognizing SVE registers. */
7266 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7267 &offset_qualifier));
7268 if (base_qualifier != AARCH64_OPND_QLF_X
7269 || offset_qualifier != AARCH64_OPND_QLF_X)
7270 {
7271 set_syntax_error (_("invalid addressing mode"));
7272 goto failure;
7273 }
7274 goto regoff_addr;
7275
7276 case AARCH64_OPND_SVE_ADDR_RZ:
7277 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
7278 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
7279 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
7280 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
7281 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
7282 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
7283 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
7284 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
7285 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
7286 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
7287 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
7288 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
7289 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
7290 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7291 &offset_qualifier));
7292 if (base_qualifier != AARCH64_OPND_QLF_X
7293 || (offset_qualifier != AARCH64_OPND_QLF_S_S
7294 && offset_qualifier != AARCH64_OPND_QLF_S_D))
7295 {
7296 set_syntax_error (_("invalid addressing mode"));
7297 goto failure;
7298 }
7299 info->qualifier = offset_qualifier;
7300 goto regoff_addr;
7301
7302 case AARCH64_OPND_SVE_ADDR_ZX:
7303 /* [Zn.<T>{, <Xm>}]. */
7304 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7305 &offset_qualifier));
7306 /* Things to check:
7307 base_qualifier either S_S or S_D
7308 offset_qualifier must be X
7309 */
7310 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7311 && base_qualifier != AARCH64_OPND_QLF_S_D)
7312 || offset_qualifier != AARCH64_OPND_QLF_X)
7313 {
7314 set_syntax_error (_("invalid addressing mode"));
7315 goto failure;
7316 }
7317 info->qualifier = base_qualifier;
7318 if (!info->addr.offset.is_reg || info->addr.pcrel
7319 || !info->addr.preind || info->addr.writeback
7320 || info->shifter.operator_present != 0)
7321 {
7322 set_syntax_error (_("invalid addressing mode"));
7323 goto failure;
7324 }
7325 info->shifter.kind = AARCH64_MOD_LSL;
7326 break;
7327
7328
7329 case AARCH64_OPND_SVE_ADDR_ZI_U5:
7330 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
7331 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
7332 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
7333 /* [Z<n>.<T>{, #imm}] */
7334 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7335 &offset_qualifier));
7336 if (base_qualifier != AARCH64_OPND_QLF_S_S
7337 && base_qualifier != AARCH64_OPND_QLF_S_D)
7338 {
7339 set_syntax_error (_("invalid addressing mode"));
7340 goto failure;
7341 }
7342 info->qualifier = base_qualifier;
7343 goto sve_regimm;
7344
7345 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
7346 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
7347 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
7348 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
7349 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
7350
7351 We don't reject:
7352
7353 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
7354
7355 here since we get better error messages by leaving it to
7356 the qualifier checking routines. */
7357 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
7358 &offset_qualifier));
7359 if ((base_qualifier != AARCH64_OPND_QLF_S_S
7360 && base_qualifier != AARCH64_OPND_QLF_S_D)
7361 || offset_qualifier != base_qualifier)
7362 {
7363 set_syntax_error (_("invalid addressing mode"));
7364 goto failure;
7365 }
7366 info->qualifier = base_qualifier;
7367 goto regoff_addr;
7368
7369 case AARCH64_OPND_SYSREG:
7370 {
7371 uint32_t sysreg_flags;
7372 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
7373 &sysreg_flags)) == PARSE_FAIL)
7374 {
7375 set_syntax_error (_("unknown or missing system register name"));
7376 goto failure;
7377 }
7378 inst.base.operands[i].sysreg.value = val;
7379 inst.base.operands[i].sysreg.flags = sysreg_flags;
7380 break;
7381 }
7382
7383 case AARCH64_OPND_PSTATEFIELD:
7384 {
7385 uint32_t sysreg_flags;
7386 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1,
7387 &sysreg_flags)) == PARSE_FAIL)
7388 {
7389 set_syntax_error (_("unknown or missing PSTATE field name"));
7390 goto failure;
7391 }
7392 inst.base.operands[i].pstatefield = val;
7393 inst.base.operands[i].sysreg.flags = sysreg_flags;
7394 break;
7395 }
7396
7397 case AARCH64_OPND_SYSREG_IC:
7398 inst.base.operands[i].sysins_op =
7399 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
7400 goto sys_reg_ins;
7401
7402 case AARCH64_OPND_SYSREG_DC:
7403 inst.base.operands[i].sysins_op =
7404 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
7405 goto sys_reg_ins;
7406
7407 case AARCH64_OPND_SYSREG_AT:
7408 inst.base.operands[i].sysins_op =
7409 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
7410 goto sys_reg_ins;
7411
7412 case AARCH64_OPND_SYSREG_SR:
7413 inst.base.operands[i].sysins_op =
7414 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
7415 goto sys_reg_ins;
7416
7417 case AARCH64_OPND_SYSREG_TLBI:
7418 inst.base.operands[i].sysins_op =
7419 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
7420 sys_reg_ins:
7421 if (inst.base.operands[i].sysins_op == NULL)
7422 {
7423 set_fatal_syntax_error ( _("unknown or missing operation name"));
7424 goto failure;
7425 }
7426 break;
7427
7428 case AARCH64_OPND_BARRIER:
7429 case AARCH64_OPND_BARRIER_ISB:
7430 val = parse_barrier (&str);
7431 if (val != PARSE_FAIL
7432 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
7433 {
7434 /* ISB only accepts options name 'sy'. */
7435 set_syntax_error
7436 (_("the specified option is not accepted in ISB"));
7437 /* Turn off backtrack as this optional operand is present. */
7438 backtrack_pos = 0;
7439 goto failure;
7440 }
7441 if (val != PARSE_FAIL
7442 && operands[i] == AARCH64_OPND_BARRIER)
7443 {
7444 /* Regular barriers accept options CRm (C0-C15).
7445 DSB nXS barrier variant accepts values > 15. */
7446 if (val < 0 || val > 15)
7447 {
7448 set_syntax_error (_("the specified option is not accepted in DSB"));
7449 goto failure;
7450 }
7451 }
7452 /* This is an extension to accept a 0..15 immediate. */
7453 if (val == PARSE_FAIL)
7454 po_imm_or_fail (0, 15);
7455 info->barrier = aarch64_barrier_options + val;
7456 break;
7457
7458 case AARCH64_OPND_BARRIER_DSB_NXS:
7459 val = parse_barrier (&str);
7460 if (val != PARSE_FAIL)
7461 {
7462 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
7463 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7464 {
7465 set_syntax_error (_("the specified option is not accepted in DSB"));
7466 /* Turn off backtrack as this optional operand is present. */
7467 backtrack_pos = 0;
7468 goto failure;
7469 }
7470 }
7471 else
7472 {
7473 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
7474 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
7475 if (! parse_constant_immediate (&str, &val, imm_reg_type))
7476 goto failure;
7477 if (!(val == 16 || val == 20 || val == 24 || val == 28))
7478 {
7479 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
7480 goto failure;
7481 }
7482 }
7483 /* Option index is encoded as 2-bit value in val<3:2>. */
7484 val = (val >> 2) - 4;
7485 info->barrier = aarch64_barrier_dsb_nxs_options + val;
7486 break;
7487
7488 case AARCH64_OPND_PRFOP:
7489 val = parse_pldop (&str);
7490 /* This is an extension to accept a 0..31 immediate. */
7491 if (val == PARSE_FAIL)
7492 po_imm_or_fail (0, 31);
7493 inst.base.operands[i].prfop = aarch64_prfops + val;
7494 break;
7495
7496 case AARCH64_OPND_BARRIER_PSB:
7497 val = parse_barrier_psb (&str, &(info->hint_option));
7498 if (val == PARSE_FAIL)
7499 goto failure;
7500 break;
7501
7502 case AARCH64_OPND_BTI_TARGET:
7503 val = parse_bti_operand (&str, &(info->hint_option));
7504 if (val == PARSE_FAIL)
7505 goto failure;
7506 break;
7507
7508 case AARCH64_OPND_SME_ZAda_2b:
7509 case AARCH64_OPND_SME_ZAda_3b:
7510 reg = parse_reg_with_qual (&str, REG_TYPE_ZAT, &qualifier);
7511 if (!reg)
7512 goto failure;
7513 info->reg.regno = reg->number;
7514 info->qualifier = qualifier;
7515 break;
7516
7517 case AARCH64_OPND_SME_ZA_HV_idx_src:
7518 case AARCH64_OPND_SME_ZA_HV_idx_dest:
7519 case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
7520 {
7521 enum sme_hv_slice slice_indicator;
7522 int vector_select_register;
7523 int imm;
7524
7525 if (operands[i] == AARCH64_OPND_SME_ZA_HV_idx_ldstr)
7526 val = parse_sme_za_hv_tiles_operand_with_braces (&str,
7527 &slice_indicator,
7528 &vector_select_register,
7529 &imm,
7530 &qualifier);
7531 else
7532 val = parse_sme_za_hv_tiles_operand (&str, &slice_indicator,
7533 &vector_select_register,
7534 &imm,
7535 &qualifier);
7536 if (val == PARSE_FAIL)
7537 goto failure;
7538 info->za_tile_vector.regno = val;
7539 info->za_tile_vector.index.regno = vector_select_register;
7540 info->za_tile_vector.index.imm = imm;
7541 info->za_tile_vector.v = slice_indicator;
7542 info->qualifier = qualifier;
7543 break;
7544 }
7545
7546 case AARCH64_OPND_SME_list_of_64bit_tiles:
7547 val = parse_sme_list_of_64bit_tiles (&str);
7548 if (val == PARSE_FAIL)
7549 goto failure;
7550 info->imm.value = val;
7551 break;
7552
7553 case AARCH64_OPND_SME_ZA_array:
7554 {
7555 int imm;
7556 val = parse_sme_za_array (&str, &imm);
7557 if (val == PARSE_FAIL)
7558 goto failure;
7559 info->za_tile_vector.index.regno = val;
7560 info->za_tile_vector.index.imm = imm;
7561 break;
7562 }
7563
7564 case AARCH64_OPND_MOPS_ADDR_Rd:
7565 case AARCH64_OPND_MOPS_ADDR_Rs:
7566 po_char_or_fail ('[');
7567 if (!parse_x0_to_x30 (&str, info))
7568 goto failure;
7569 po_char_or_fail (']');
7570 po_char_or_fail ('!');
7571 break;
7572
7573 case AARCH64_OPND_MOPS_WB_Rn:
7574 if (!parse_x0_to_x30 (&str, info))
7575 goto failure;
7576 po_char_or_fail ('!');
7577 break;
7578
7579 default:
7580 as_fatal (_("unhandled operand code %d"), operands[i]);
7581 }
7582
7583 /* If we get here, this operand was successfully parsed. */
7584 inst.base.operands[i].present = 1;
7585 continue;
7586
7587 failure:
7588 /* The parse routine should already have set the error, but in case
7589 not, set a default one here. */
7590 if (! error_p ())
7591 set_default_error ();
7592
7593 if (! backtrack_pos)
7594 goto parse_operands_return;
7595
7596 {
7597 /* We reach here because this operand is marked as optional, and
7598 either no operand was supplied or the operand was supplied but it
7599 was syntactically incorrect. In the latter case we report an
7600 error. In the former case we perform a few more checks before
7601 dropping through to the code to insert the default operand. */
7602
7603 char *tmp = backtrack_pos;
7604 char endchar = END_OF_INSN;
7605
7606 if (i != (aarch64_num_of_operands (opcode) - 1))
7607 endchar = ',';
7608 skip_past_char (&tmp, ',');
7609
7610 if (*tmp != endchar)
7611 /* The user has supplied an operand in the wrong format. */
7612 goto parse_operands_return;
7613
7614 /* Make sure there is not a comma before the optional operand.
7615 For example the fifth operand of 'sys' is optional:
7616
7617 sys #0,c0,c0,#0, <--- wrong
7618 sys #0,c0,c0,#0 <--- correct. */
7619 if (comma_skipped_p && i && endchar == END_OF_INSN)
7620 {
7621 set_fatal_syntax_error
7622 (_("unexpected comma before the omitted optional operand"));
7623 goto parse_operands_return;
7624 }
7625 }
7626
7627 /* Reaching here means we are dealing with an optional operand that is
7628 omitted from the assembly line. */
7629 gas_assert (optional_operand_p (opcode, i));
7630 info->present = 0;
7631 process_omitted_operand (operands[i], opcode, i, info);
7632
7633 /* Try again, skipping the optional operand at backtrack_pos. */
7634 str = backtrack_pos;
7635 backtrack_pos = 0;
7636
7637 /* Clear any error record after the omitted optional operand has been
7638 successfully handled. */
7639 clear_error ();
7640 }
7641
7642 /* Check if we have parsed all the operands. */
7643 if (*str != '\0' && ! error_p ())
7644 {
7645 /* Set I to the index of the last present operand; this is
7646 for the purpose of diagnostics. */
7647 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
7648 ;
7649 set_fatal_syntax_error
7650 (_("unexpected characters following instruction"));
7651 }
7652
7653 parse_operands_return:
7654
7655 if (error_p ())
7656 {
7657 inst.parsing_error.index = i;
7658 DEBUG_TRACE ("parsing FAIL: %s - %s",
7659 operand_mismatch_kind_names[inst.parsing_error.kind],
7660 inst.parsing_error.error);
7661 /* Record the operand error properly; this is useful when there
7662 are multiple instruction templates for a mnemonic name, so that
7663 later on, we can select the error that most closely describes
7664 the problem. */
7665 record_operand_error_info (opcode, &inst.parsing_error);
7666 return false;
7667 }
7668 else
7669 {
7670 DEBUG_TRACE ("parsing SUCCESS");
7671 return true;
7672 }
7673 }
7674
7675 /* It does some fix-up to provide some programmer friendly feature while
7676 keeping the libopcodes happy, i.e. libopcodes only accepts
7677 the preferred architectural syntax.
7678 Return FALSE if there is any failure; otherwise return TRUE. */
7679
7680 static bool
7681 programmer_friendly_fixup (aarch64_instruction *instr)
7682 {
7683 aarch64_inst *base = &instr->base;
7684 const aarch64_opcode *opcode = base->opcode;
7685 enum aarch64_op op = opcode->op;
7686 aarch64_opnd_info *operands = base->operands;
7687
7688 DEBUG_TRACE ("enter");
7689
7690 switch (opcode->iclass)
7691 {
7692 case testbranch:
7693 /* TBNZ Xn|Wn, #uimm6, label
7694 Test and Branch Not Zero: conditionally jumps to label if bit number
7695 uimm6 in register Xn is not zero. The bit number implies the width of
7696 the register, which may be written and should be disassembled as Wn if
7697 uimm is less than 32. */
7698 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7699 {
7700 if (operands[1].imm.value >= 32)
7701 {
7702 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7703 0, 31);
7704 return false;
7705 }
7706 operands[0].qualifier = AARCH64_OPND_QLF_X;
7707 }
7708 break;
7709 case loadlit:
7710 /* LDR Wt, label | =value
7711 As a convenience assemblers will typically permit the notation
7712 "=value" in conjunction with the pc-relative literal load instructions
7713 to automatically place an immediate value or symbolic address in a
7714 nearby literal pool and generate a hidden label which references it.
7715 ISREG has been set to 0 in the case of =value. */
7716 if (instr->gen_lit_pool
7717 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7718 {
7719 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7720 if (op == OP_LDRSW_LIT)
7721 size = 4;
7722 if (instr->reloc.exp.X_op != O_constant
7723 && instr->reloc.exp.X_op != O_big
7724 && instr->reloc.exp.X_op != O_symbol)
7725 {
7726 record_operand_error (opcode, 1,
7727 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7728 _("constant expression expected"));
7729 return false;
7730 }
7731 if (! add_to_lit_pool (&instr->reloc.exp, size))
7732 {
7733 record_operand_error (opcode, 1,
7734 AARCH64_OPDE_OTHER_ERROR,
7735 _("literal pool insertion failed"));
7736 return false;
7737 }
7738 }
7739 break;
7740 case log_shift:
7741 case bitfield:
7742 /* UXT[BHW] Wd, Wn
7743 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7744 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7745 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7746 A programmer-friendly assembler should accept a destination Xd in
7747 place of Wd, however that is not the preferred form for disassembly.
7748 */
7749 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7750 && operands[1].qualifier == AARCH64_OPND_QLF_W
7751 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7752 operands[0].qualifier = AARCH64_OPND_QLF_W;
7753 break;
7754
7755 case addsub_ext:
7756 {
7757 /* In the 64-bit form, the final register operand is written as Wm
7758 for all but the (possibly omitted) UXTX/LSL and SXTX
7759 operators.
7760 As a programmer-friendly assembler, we accept e.g.
7761 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7762 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7763 int idx = aarch64_operand_index (opcode->operands,
7764 AARCH64_OPND_Rm_EXT);
7765 gas_assert (idx == 1 || idx == 2);
7766 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7767 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7768 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7769 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7770 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7771 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7772 }
7773 break;
7774
7775 default:
7776 break;
7777 }
7778
7779 DEBUG_TRACE ("exit with SUCCESS");
7780 return true;
7781 }
7782
7783 /* Check for loads and stores that will cause unpredictable behavior. */
7784
7785 static void
7786 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7787 {
7788 aarch64_inst *base = &instr->base;
7789 const aarch64_opcode *opcode = base->opcode;
7790 const aarch64_opnd_info *opnds = base->operands;
7791 switch (opcode->iclass)
7792 {
7793 case ldst_pos:
7794 case ldst_imm9:
7795 case ldst_imm10:
7796 case ldst_unscaled:
7797 case ldst_unpriv:
7798 /* Loading/storing the base register is unpredictable if writeback. */
7799 if ((aarch64_get_operand_class (opnds[0].type)
7800 == AARCH64_OPND_CLASS_INT_REG)
7801 && opnds[0].reg.regno == opnds[1].addr.base_regno
7802 && opnds[1].addr.base_regno != REG_SP
7803 /* Exempt STG/STZG/ST2G/STZ2G. */
7804 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7805 && opnds[1].addr.writeback)
7806 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7807 break;
7808
7809 case ldstpair_off:
7810 case ldstnapair_offs:
7811 case ldstpair_indexed:
7812 /* Loading/storing the base register is unpredictable if writeback. */
7813 if ((aarch64_get_operand_class (opnds[0].type)
7814 == AARCH64_OPND_CLASS_INT_REG)
7815 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7816 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7817 && opnds[2].addr.base_regno != REG_SP
7818 /* Exempt STGP. */
7819 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7820 && opnds[2].addr.writeback)
7821 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7822 /* Load operations must load different registers. */
7823 if ((opcode->opcode & (1 << 22))
7824 && opnds[0].reg.regno == opnds[1].reg.regno)
7825 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7826 break;
7827
7828 case ldstexcl:
7829 if ((aarch64_get_operand_class (opnds[0].type)
7830 == AARCH64_OPND_CLASS_INT_REG)
7831 && (aarch64_get_operand_class (opnds[1].type)
7832 == AARCH64_OPND_CLASS_INT_REG))
7833 {
7834 if ((opcode->opcode & (1 << 22)))
7835 {
7836 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7837 if ((opcode->opcode & (1 << 21))
7838 && opnds[0].reg.regno == opnds[1].reg.regno)
7839 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7840 }
7841 else
7842 {
7843 /* Store-Exclusive is unpredictable if Rt == Rs. */
7844 if (opnds[0].reg.regno == opnds[1].reg.regno)
7845 as_warn
7846 (_("unpredictable: identical transfer and status registers"
7847 " --`%s'"),str);
7848
7849 if (opnds[0].reg.regno == opnds[2].reg.regno)
7850 {
7851 if (!(opcode->opcode & (1 << 21)))
7852 /* Store-Exclusive is unpredictable if Rn == Rs. */
7853 as_warn
7854 (_("unpredictable: identical base and status registers"
7855 " --`%s'"),str);
7856 else
7857 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7858 as_warn
7859 (_("unpredictable: "
7860 "identical transfer and status registers"
7861 " --`%s'"),str);
7862 }
7863
7864 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7865 if ((opcode->opcode & (1 << 21))
7866 && opnds[0].reg.regno == opnds[3].reg.regno
7867 && opnds[3].reg.regno != REG_SP)
7868 as_warn (_("unpredictable: identical base and status registers"
7869 " --`%s'"),str);
7870 }
7871 }
7872 break;
7873
7874 default:
7875 break;
7876 }
7877 }
7878
7879 static void
7880 force_automatic_sequence_close (void)
7881 {
7882 struct aarch64_segment_info_type *tc_seg_info;
7883
7884 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7885 if (tc_seg_info->insn_sequence.instr)
7886 {
7887 as_warn_where (tc_seg_info->last_file, tc_seg_info->last_line,
7888 _("previous `%s' sequence has not been closed"),
7889 tc_seg_info->insn_sequence.instr->opcode->name);
7890 init_insn_sequence (NULL, &tc_seg_info->insn_sequence);
7891 }
7892 }
7893
7894 /* A wrapper function to interface with libopcodes on encoding and
7895 record the error message if there is any.
7896
7897 Return TRUE on success; otherwise return FALSE. */
7898
7899 static bool
7900 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7901 aarch64_insn *code)
7902 {
7903 aarch64_operand_error error_info;
7904 memset (&error_info, '\0', sizeof (error_info));
7905 error_info.kind = AARCH64_OPDE_NIL;
7906 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7907 && !error_info.non_fatal)
7908 return true;
7909
7910 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7911 record_operand_error_info (opcode, &error_info);
7912 return error_info.non_fatal;
7913 }
7914
7915 #ifdef DEBUG_AARCH64
7916 static inline void
7917 dump_opcode_operands (const aarch64_opcode *opcode)
7918 {
7919 int i = 0;
7920 while (opcode->operands[i] != AARCH64_OPND_NIL)
7921 {
7922 aarch64_verbose ("\t\t opnd%d: %s", i,
7923 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7924 ? aarch64_get_operand_name (opcode->operands[i])
7925 : aarch64_get_operand_desc (opcode->operands[i]));
7926 ++i;
7927 }
7928 }
7929 #endif /* DEBUG_AARCH64 */
7930
7931 /* This is the guts of the machine-dependent assembler. STR points to a
7932 machine dependent instruction. This function is supposed to emit
7933 the frags/bytes it assembles to. */
7934
7935 void
7936 md_assemble (char *str)
7937 {
7938 templates *template;
7939 const aarch64_opcode *opcode;
7940 struct aarch64_segment_info_type *tc_seg_info;
7941 aarch64_inst *inst_base;
7942 unsigned saved_cond;
7943
7944 /* Align the previous label if needed. */
7945 if (last_label_seen != NULL)
7946 {
7947 symbol_set_frag (last_label_seen, frag_now);
7948 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7949 S_SET_SEGMENT (last_label_seen, now_seg);
7950 }
7951
7952 /* Update the current insn_sequence from the segment. */
7953 tc_seg_info = &seg_info (now_seg)->tc_segment_info_data;
7954 insn_sequence = &tc_seg_info->insn_sequence;
7955 tc_seg_info->last_file = as_where (&tc_seg_info->last_line);
7956
7957 inst.reloc.type = BFD_RELOC_UNUSED;
7958
7959 DEBUG_TRACE ("\n\n");
7960 DEBUG_TRACE ("==============================");
7961 DEBUG_TRACE ("Enter md_assemble with %s", str);
7962
7963 /* Scan up to the end of the mnemonic, which must end in whitespace,
7964 '.', or end of string. */
7965 char *p = str;
7966 char *dot = 0;
7967 for (; is_part_of_name (*p); p++)
7968 if (*p == '.' && !dot)
7969 dot = p;
7970
7971 if (p == str)
7972 {
7973 as_bad (_("unknown mnemonic -- `%s'"), str);
7974 return;
7975 }
7976
7977 if (!dot && create_register_alias (str, p))
7978 return;
7979
7980 template = opcode_lookup (str, dot, p);
7981 if (!template)
7982 {
7983 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7984 str);
7985 return;
7986 }
7987
7988 skip_whitespace (p);
7989 if (*p == ',')
7990 {
7991 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7992 get_mnemonic_name (str), str);
7993 return;
7994 }
7995
7996 init_operand_error_report ();
7997
7998 /* Sections are assumed to start aligned. In executable section, there is no
7999 MAP_DATA symbol pending. So we only align the address during
8000 MAP_DATA --> MAP_INSN transition.
8001 For other sections, this is not guaranteed. */
8002 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
8003 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
8004 frag_align_code (2, 0);
8005
8006 saved_cond = inst.cond;
8007 reset_aarch64_instruction (&inst);
8008 inst.cond = saved_cond;
8009
8010 /* Iterate through all opcode entries with the same mnemonic name. */
8011 do
8012 {
8013 opcode = template->opcode;
8014
8015 DEBUG_TRACE ("opcode %s found", opcode->name);
8016 #ifdef DEBUG_AARCH64
8017 if (debug_dump)
8018 dump_opcode_operands (opcode);
8019 #endif /* DEBUG_AARCH64 */
8020
8021 mapping_state (MAP_INSN);
8022
8023 inst_base = &inst.base;
8024 inst_base->opcode = opcode;
8025
8026 /* Truly conditionally executed instructions, e.g. b.cond. */
8027 if (opcode->flags & F_COND)
8028 {
8029 gas_assert (inst.cond != COND_ALWAYS);
8030 inst_base->cond = get_cond_from_value (inst.cond);
8031 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
8032 }
8033 else if (inst.cond != COND_ALWAYS)
8034 {
8035 /* It shouldn't arrive here, where the assembly looks like a
8036 conditional instruction but the found opcode is unconditional. */
8037 gas_assert (0);
8038 continue;
8039 }
8040
8041 if (parse_operands (p, opcode)
8042 && programmer_friendly_fixup (&inst)
8043 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
8044 {
8045 /* Check that this instruction is supported for this CPU. */
8046 if (!opcode->avariant
8047 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
8048 {
8049 as_bad (_("selected processor does not support `%s'"), str);
8050 return;
8051 }
8052
8053 warn_unpredictable_ldst (&inst, str);
8054
8055 if (inst.reloc.type == BFD_RELOC_UNUSED
8056 || !inst.reloc.need_libopcodes_p)
8057 output_inst (NULL);
8058 else
8059 {
8060 /* If there is relocation generated for the instruction,
8061 store the instruction information for the future fix-up. */
8062 struct aarch64_inst *copy;
8063 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
8064 copy = XNEW (struct aarch64_inst);
8065 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
8066 output_inst (copy);
8067 }
8068
8069 /* Issue non-fatal messages if any. */
8070 output_operand_error_report (str, true);
8071 return;
8072 }
8073
8074 template = template->next;
8075 if (template != NULL)
8076 {
8077 reset_aarch64_instruction (&inst);
8078 inst.cond = saved_cond;
8079 }
8080 }
8081 while (template != NULL);
8082
8083 /* Issue the error messages if any. */
8084 output_operand_error_report (str, false);
8085 }
8086
8087 /* Various frobbings of labels and their addresses. */
8088
8089 void
8090 aarch64_start_line_hook (void)
8091 {
8092 last_label_seen = NULL;
8093 }
8094
8095 void
8096 aarch64_frob_label (symbolS * sym)
8097 {
8098 last_label_seen = sym;
8099
8100 dwarf2_emit_label (sym);
8101 }
8102
8103 void
8104 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
8105 {
8106 /* Check to see if we have a block to close. */
8107 force_automatic_sequence_close ();
8108 }
8109
8110 int
8111 aarch64_data_in_code (void)
8112 {
8113 if (startswith (input_line_pointer + 1, "data:"))
8114 {
8115 *input_line_pointer = '/';
8116 input_line_pointer += 5;
8117 *input_line_pointer = 0;
8118 return 1;
8119 }
8120
8121 return 0;
8122 }
8123
8124 char *
8125 aarch64_canonicalize_symbol_name (char *name)
8126 {
8127 int len;
8128
8129 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
8130 *(name + len - 5) = 0;
8131
8132 return name;
8133 }
8134 \f
8135 /* Table of all register names defined by default. The user can
8136 define additional names with .req. Note that all register names
8137 should appear in both upper and lowercase variants. Some registers
8138 also have mixed-case names. */
8139
8140 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
8141 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
8142 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
8143 #define REGNUMS(p,n,s,t) REGDEF(p##n##s, n, t)
8144 #define REGSET16(p,t) \
8145 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
8146 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
8147 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
8148 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
8149 #define REGSET16S(p,s,t) \
8150 REGNUMS(p, 0,s,t), REGNUMS(p, 1,s,t), REGNUMS(p, 2,s,t), REGNUMS(p, 3,s,t), \
8151 REGNUMS(p, 4,s,t), REGNUMS(p, 5,s,t), REGNUMS(p, 6,s,t), REGNUMS(p, 7,s,t), \
8152 REGNUMS(p, 8,s,t), REGNUMS(p, 9,s,t), REGNUMS(p,10,s,t), REGNUMS(p,11,s,t), \
8153 REGNUMS(p,12,s,t), REGNUMS(p,13,s,t), REGNUMS(p,14,s,t), REGNUMS(p,15,s,t)
8154 #define REGSET31(p,t) \
8155 REGSET16(p, t), \
8156 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
8157 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
8158 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
8159 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
8160 #define REGSET(p,t) \
8161 REGSET31(p,t), REGNUM(p,31,t)
8162
8163 /* These go into aarch64_reg_hsh hash-table. */
8164 static const reg_entry reg_names[] = {
8165 /* Integer registers. */
8166 REGSET31 (x, R_64), REGSET31 (X, R_64),
8167 REGSET31 (w, R_32), REGSET31 (W, R_32),
8168
8169 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
8170 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
8171 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
8172 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
8173 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
8174 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
8175
8176 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
8177 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
8178
8179 /* Floating-point single precision registers. */
8180 REGSET (s, FP_S), REGSET (S, FP_S),
8181
8182 /* Floating-point double precision registers. */
8183 REGSET (d, FP_D), REGSET (D, FP_D),
8184
8185 /* Floating-point half precision registers. */
8186 REGSET (h, FP_H), REGSET (H, FP_H),
8187
8188 /* Floating-point byte precision registers. */
8189 REGSET (b, FP_B), REGSET (B, FP_B),
8190
8191 /* Floating-point quad precision registers. */
8192 REGSET (q, FP_Q), REGSET (Q, FP_Q),
8193
8194 /* FP/SIMD registers. */
8195 REGSET (v, VN), REGSET (V, VN),
8196
8197 /* SVE vector registers. */
8198 REGSET (z, ZN), REGSET (Z, ZN),
8199
8200 /* SVE predicate registers. */
8201 REGSET16 (p, PN), REGSET16 (P, PN),
8202
8203 /* SME ZA. We model this as a register because it acts syntactically
8204 like ZA0H, supporting qualifier suffixes and indexing. */
8205 REGDEF (za, 0, ZA), REGDEF (ZA, 0, ZA),
8206
8207 /* SME ZA tile registers. */
8208 REGSET16 (za, ZAT), REGSET16 (ZA, ZAT),
8209
8210 /* SME ZA tile registers (horizontal slice). */
8211 REGSET16S (za, h, ZATH), REGSET16S (ZA, H, ZATH),
8212
8213 /* SME ZA tile registers (vertical slice). */
8214 REGSET16S (za, v, ZATV), REGSET16S (ZA, V, ZATV)
8215 };
8216
8217 #undef REGDEF
8218 #undef REGDEF_ALIAS
8219 #undef REGNUM
8220 #undef REGSET16
8221 #undef REGSET31
8222 #undef REGSET
8223
8224 #define N 1
8225 #define n 0
8226 #define Z 1
8227 #define z 0
8228 #define C 1
8229 #define c 0
8230 #define V 1
8231 #define v 0
8232 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
8233 static const asm_nzcv nzcv_names[] = {
8234 {"nzcv", B (n, z, c, v)},
8235 {"nzcV", B (n, z, c, V)},
8236 {"nzCv", B (n, z, C, v)},
8237 {"nzCV", B (n, z, C, V)},
8238 {"nZcv", B (n, Z, c, v)},
8239 {"nZcV", B (n, Z, c, V)},
8240 {"nZCv", B (n, Z, C, v)},
8241 {"nZCV", B (n, Z, C, V)},
8242 {"Nzcv", B (N, z, c, v)},
8243 {"NzcV", B (N, z, c, V)},
8244 {"NzCv", B (N, z, C, v)},
8245 {"NzCV", B (N, z, C, V)},
8246 {"NZcv", B (N, Z, c, v)},
8247 {"NZcV", B (N, Z, c, V)},
8248 {"NZCv", B (N, Z, C, v)},
8249 {"NZCV", B (N, Z, C, V)}
8250 };
8251
8252 #undef N
8253 #undef n
8254 #undef Z
8255 #undef z
8256 #undef C
8257 #undef c
8258 #undef V
8259 #undef v
8260 #undef B
8261 \f
8262 /* MD interface: bits in the object file. */
8263
8264 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
8265 for use in the a.out file, and stores them in the array pointed to by buf.
8266 This knows about the endian-ness of the target machine and does
8267 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
8268 2 (short) and 4 (long) Floating numbers are put out as a series of
8269 LITTLENUMS (shorts, here at least). */
8270
8271 void
8272 md_number_to_chars (char *buf, valueT val, int n)
8273 {
8274 if (target_big_endian)
8275 number_to_chars_bigendian (buf, val, n);
8276 else
8277 number_to_chars_littleendian (buf, val, n);
8278 }
8279
8280 /* MD interface: Sections. */
8281
8282 /* Estimate the size of a frag before relaxing. Assume everything fits in
8283 4 bytes. */
8284
8285 int
8286 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
8287 {
8288 fragp->fr_var = 4;
8289 return 4;
8290 }
8291
8292 /* Round up a section size to the appropriate boundary. */
8293
8294 valueT
8295 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8296 {
8297 return size;
8298 }
8299
8300 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
8301 of an rs_align_code fragment.
8302
8303 Here we fill the frag with the appropriate info for padding the
8304 output stream. The resulting frag will consist of a fixed (fr_fix)
8305 and of a repeating (fr_var) part.
8306
8307 The fixed content is always emitted before the repeating content and
8308 these two parts are used as follows in constructing the output:
8309 - the fixed part will be used to align to a valid instruction word
8310 boundary, in case that we start at a misaligned address; as no
8311 executable instruction can live at the misaligned location, we
8312 simply fill with zeros;
8313 - the variable part will be used to cover the remaining padding and
8314 we fill using the AArch64 NOP instruction.
8315
8316 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
8317 enough storage space for up to 3 bytes for padding the back to a valid
8318 instruction alignment and exactly 4 bytes to store the NOP pattern. */
8319
8320 void
8321 aarch64_handle_align (fragS * fragP)
8322 {
8323 /* NOP = d503201f */
8324 /* AArch64 instructions are always little-endian. */
8325 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
8326
8327 int bytes, fix, noop_size;
8328 char *p;
8329
8330 if (fragP->fr_type != rs_align_code)
8331 return;
8332
8333 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
8334 p = fragP->fr_literal + fragP->fr_fix;
8335
8336 #ifdef OBJ_ELF
8337 gas_assert (fragP->tc_frag_data.recorded);
8338 #endif
8339
8340 noop_size = sizeof (aarch64_noop);
8341
8342 fix = bytes & (noop_size - 1);
8343 if (fix)
8344 {
8345 #if defined OBJ_ELF || defined OBJ_COFF
8346 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
8347 #endif
8348 memset (p, 0, fix);
8349 p += fix;
8350 fragP->fr_fix += fix;
8351 }
8352
8353 if (noop_size)
8354 memcpy (p, aarch64_noop, noop_size);
8355 fragP->fr_var = noop_size;
8356 }
8357
8358 /* Perform target specific initialisation of a frag.
8359 Note - despite the name this initialisation is not done when the frag
8360 is created, but only when its type is assigned. A frag can be created
8361 and used a long time before its type is set, so beware of assuming that
8362 this initialisation is performed first. */
8363
8364 #ifndef OBJ_ELF
8365 void
8366 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
8367 int max_chars ATTRIBUTE_UNUSED)
8368 {
8369 }
8370
8371 #else /* OBJ_ELF is defined. */
8372 void
8373 aarch64_init_frag (fragS * fragP, int max_chars)
8374 {
8375 /* Record a mapping symbol for alignment frags. We will delete this
8376 later if the alignment ends up empty. */
8377 if (!fragP->tc_frag_data.recorded)
8378 fragP->tc_frag_data.recorded = 1;
8379
8380 /* PR 21809: Do not set a mapping state for debug sections
8381 - it just confuses other tools. */
8382 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
8383 return;
8384
8385 switch (fragP->fr_type)
8386 {
8387 case rs_align_test:
8388 case rs_fill:
8389 mapping_state_2 (MAP_DATA, max_chars);
8390 break;
8391 case rs_align:
8392 /* PR 20364: We can get alignment frags in code sections,
8393 so do not just assume that we should use the MAP_DATA state. */
8394 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
8395 break;
8396 case rs_align_code:
8397 mapping_state_2 (MAP_INSN, max_chars);
8398 break;
8399 default:
8400 break;
8401 }
8402 }
8403
8404 /* Whether SFrame stack trace info is supported. */
8405
8406 bool
8407 aarch64_support_sframe_p (void)
8408 {
8409 /* At this time, SFrame is supported for aarch64 only. */
8410 return (aarch64_abi == AARCH64_ABI_LP64);
8411 }
8412
8413 /* Specify if RA tracking is needed. */
8414
8415 bool
8416 aarch64_sframe_ra_tracking_p (void)
8417 {
8418 return true;
8419 }
8420
8421 /* Specify the fixed offset to recover RA from CFA.
8422 (useful only when RA tracking is not needed). */
8423
8424 offsetT
8425 aarch64_sframe_cfa_ra_offset (void)
8426 {
8427 return (offsetT) SFRAME_CFA_FIXED_RA_INVALID;
8428 }
8429
8430 /* Get the abi/arch indentifier for SFrame. */
8431
8432 unsigned char
8433 aarch64_sframe_get_abi_arch (void)
8434 {
8435 unsigned char sframe_abi_arch = 0;
8436
8437 if (aarch64_support_sframe_p ())
8438 {
8439 sframe_abi_arch = target_big_endian
8440 ? SFRAME_ABI_AARCH64_ENDIAN_BIG
8441 : SFRAME_ABI_AARCH64_ENDIAN_LITTLE;
8442 }
8443
8444 return sframe_abi_arch;
8445 }
8446
8447 #endif /* OBJ_ELF */
8448 \f
8449 /* Initialize the DWARF-2 unwind information for this procedure. */
8450
8451 void
8452 tc_aarch64_frame_initial_instructions (void)
8453 {
8454 cfi_add_CFA_def_cfa (REG_SP, 0);
8455 }
8456
8457 /* Convert REGNAME to a DWARF-2 register number. */
8458
8459 int
8460 tc_aarch64_regname_to_dw2regnum (char *regname)
8461 {
8462 const reg_entry *reg = parse_reg (&regname);
8463 if (reg == NULL)
8464 return -1;
8465
8466 switch (reg->type)
8467 {
8468 case REG_TYPE_SP_32:
8469 case REG_TYPE_SP_64:
8470 case REG_TYPE_R_32:
8471 case REG_TYPE_R_64:
8472 return reg->number;
8473
8474 case REG_TYPE_FP_B:
8475 case REG_TYPE_FP_H:
8476 case REG_TYPE_FP_S:
8477 case REG_TYPE_FP_D:
8478 case REG_TYPE_FP_Q:
8479 return reg->number + 64;
8480
8481 default:
8482 break;
8483 }
8484 return -1;
8485 }
8486
8487 /* Implement DWARF2_ADDR_SIZE. */
8488
8489 int
8490 aarch64_dwarf2_addr_size (void)
8491 {
8492 if (ilp32_p)
8493 return 4;
8494 else if (llp64_p)
8495 return 8;
8496 return bfd_arch_bits_per_address (stdoutput) / 8;
8497 }
8498
8499 /* MD interface: Symbol and relocation handling. */
8500
8501 /* Return the address within the segment that a PC-relative fixup is
8502 relative to. For AArch64 PC-relative fixups applied to instructions
8503 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
8504
8505 long
8506 md_pcrel_from_section (fixS * fixP, segT seg)
8507 {
8508 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
8509
8510 /* If this is pc-relative and we are going to emit a relocation
8511 then we just want to put out any pipeline compensation that the linker
8512 will need. Otherwise we want to use the calculated base. */
8513 if (fixP->fx_pcrel
8514 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
8515 || aarch64_force_relocation (fixP)))
8516 base = 0;
8517
8518 /* AArch64 should be consistent for all pc-relative relocations. */
8519 return base + AARCH64_PCREL_OFFSET;
8520 }
8521
8522 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
8523 Otherwise we have no need to default values of symbols. */
8524
8525 symbolS *
8526 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
8527 {
8528 #ifdef OBJ_ELF
8529 if (name[0] == '_' && name[1] == 'G'
8530 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
8531 {
8532 if (!GOT_symbol)
8533 {
8534 if (symbol_find (name))
8535 as_bad (_("GOT already in the symbol table"));
8536
8537 GOT_symbol = symbol_new (name, undefined_section,
8538 &zero_address_frag, 0);
8539 }
8540
8541 return GOT_symbol;
8542 }
8543 #endif
8544
8545 return 0;
8546 }
8547
8548 /* Return non-zero if the indicated VALUE has overflowed the maximum
8549 range expressible by a unsigned number with the indicated number of
8550 BITS. */
8551
8552 static bool
8553 unsigned_overflow (valueT value, unsigned bits)
8554 {
8555 valueT lim;
8556 if (bits >= sizeof (valueT) * 8)
8557 return false;
8558 lim = (valueT) 1 << bits;
8559 return (value >= lim);
8560 }
8561
8562
8563 /* Return non-zero if the indicated VALUE has overflowed the maximum
8564 range expressible by an signed number with the indicated number of
8565 BITS. */
8566
8567 static bool
8568 signed_overflow (offsetT value, unsigned bits)
8569 {
8570 offsetT lim;
8571 if (bits >= sizeof (offsetT) * 8)
8572 return false;
8573 lim = (offsetT) 1 << (bits - 1);
8574 return (value < -lim || value >= lim);
8575 }
8576
8577 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
8578 unsigned immediate offset load/store instruction, try to encode it as
8579 an unscaled, 9-bit, signed immediate offset load/store instruction.
8580 Return TRUE if it is successful; otherwise return FALSE.
8581
8582 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
8583 in response to the standard LDR/STR mnemonics when the immediate offset is
8584 unambiguous, i.e. when it is negative or unaligned. */
8585
8586 static bool
8587 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
8588 {
8589 int idx;
8590 enum aarch64_op new_op;
8591 const aarch64_opcode *new_opcode;
8592
8593 gas_assert (instr->opcode->iclass == ldst_pos);
8594
8595 switch (instr->opcode->op)
8596 {
8597 case OP_LDRB_POS:new_op = OP_LDURB; break;
8598 case OP_STRB_POS: new_op = OP_STURB; break;
8599 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
8600 case OP_LDRH_POS: new_op = OP_LDURH; break;
8601 case OP_STRH_POS: new_op = OP_STURH; break;
8602 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
8603 case OP_LDR_POS: new_op = OP_LDUR; break;
8604 case OP_STR_POS: new_op = OP_STUR; break;
8605 case OP_LDRF_POS: new_op = OP_LDURV; break;
8606 case OP_STRF_POS: new_op = OP_STURV; break;
8607 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
8608 case OP_PRFM_POS: new_op = OP_PRFUM; break;
8609 default: new_op = OP_NIL; break;
8610 }
8611
8612 if (new_op == OP_NIL)
8613 return false;
8614
8615 new_opcode = aarch64_get_opcode (new_op);
8616 gas_assert (new_opcode != NULL);
8617
8618 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
8619 instr->opcode->op, new_opcode->op);
8620
8621 aarch64_replace_opcode (instr, new_opcode);
8622
8623 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
8624 qualifier matching may fail because the out-of-date qualifier will
8625 prevent the operand being updated with a new and correct qualifier. */
8626 idx = aarch64_operand_index (instr->opcode->operands,
8627 AARCH64_OPND_ADDR_SIMM9);
8628 gas_assert (idx == 1);
8629 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
8630
8631 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
8632
8633 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
8634 insn_sequence))
8635 return false;
8636
8637 return true;
8638 }
8639
8640 /* Called by fix_insn to fix a MOV immediate alias instruction.
8641
8642 Operand for a generic move immediate instruction, which is an alias
8643 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
8644 a 32-bit/64-bit immediate value into general register. An assembler error
8645 shall result if the immediate cannot be created by a single one of these
8646 instructions. If there is a choice, then to ensure reversability an
8647 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
8648
8649 static void
8650 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
8651 {
8652 const aarch64_opcode *opcode;
8653
8654 /* Need to check if the destination is SP/ZR. The check has to be done
8655 before any aarch64_replace_opcode. */
8656 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
8657 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
8658
8659 instr->operands[1].imm.value = value;
8660 instr->operands[1].skip = 0;
8661
8662 if (try_mov_wide_p)
8663 {
8664 /* Try the MOVZ alias. */
8665 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
8666 aarch64_replace_opcode (instr, opcode);
8667 if (aarch64_opcode_encode (instr->opcode, instr,
8668 &instr->value, NULL, NULL, insn_sequence))
8669 {
8670 put_aarch64_insn (buf, instr->value);
8671 return;
8672 }
8673 /* Try the MOVK alias. */
8674 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
8675 aarch64_replace_opcode (instr, opcode);
8676 if (aarch64_opcode_encode (instr->opcode, instr,
8677 &instr->value, NULL, NULL, insn_sequence))
8678 {
8679 put_aarch64_insn (buf, instr->value);
8680 return;
8681 }
8682 }
8683
8684 if (try_mov_bitmask_p)
8685 {
8686 /* Try the ORR alias. */
8687 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
8688 aarch64_replace_opcode (instr, opcode);
8689 if (aarch64_opcode_encode (instr->opcode, instr,
8690 &instr->value, NULL, NULL, insn_sequence))
8691 {
8692 put_aarch64_insn (buf, instr->value);
8693 return;
8694 }
8695 }
8696
8697 as_bad_where (fixP->fx_file, fixP->fx_line,
8698 _("immediate cannot be moved by a single instruction"));
8699 }
8700
8701 /* An instruction operand which is immediate related may have symbol used
8702 in the assembly, e.g.
8703
8704 mov w0, u32
8705 .set u32, 0x00ffff00
8706
8707 At the time when the assembly instruction is parsed, a referenced symbol,
8708 like 'u32' in the above example may not have been seen; a fixS is created
8709 in such a case and is handled here after symbols have been resolved.
8710 Instruction is fixed up with VALUE using the information in *FIXP plus
8711 extra information in FLAGS.
8712
8713 This function is called by md_apply_fix to fix up instructions that need
8714 a fix-up described above but does not involve any linker-time relocation. */
8715
8716 static void
8717 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
8718 {
8719 int idx;
8720 uint32_t insn;
8721 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8722 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
8723 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
8724
8725 if (new_inst)
8726 {
8727 /* Now the instruction is about to be fixed-up, so the operand that
8728 was previously marked as 'ignored' needs to be unmarked in order
8729 to get the encoding done properly. */
8730 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8731 new_inst->operands[idx].skip = 0;
8732 }
8733
8734 gas_assert (opnd != AARCH64_OPND_NIL);
8735
8736 switch (opnd)
8737 {
8738 case AARCH64_OPND_EXCEPTION:
8739 case AARCH64_OPND_UNDEFINED:
8740 if (unsigned_overflow (value, 16))
8741 as_bad_where (fixP->fx_file, fixP->fx_line,
8742 _("immediate out of range"));
8743 insn = get_aarch64_insn (buf);
8744 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8745 put_aarch64_insn (buf, insn);
8746 break;
8747
8748 case AARCH64_OPND_AIMM:
8749 /* ADD or SUB with immediate.
8750 NOTE this assumes we come here with a add/sub shifted reg encoding
8751 3 322|2222|2 2 2 21111 111111
8752 1 098|7654|3 2 1 09876 543210 98765 43210
8753 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8754 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8755 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8756 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8757 ->
8758 3 322|2222|2 2 221111111111
8759 1 098|7654|3 2 109876543210 98765 43210
8760 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8761 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8762 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8763 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8764 Fields sf Rn Rd are already set. */
8765 insn = get_aarch64_insn (buf);
8766 if (value < 0)
8767 {
8768 /* Add <-> sub. */
8769 insn = reencode_addsub_switch_add_sub (insn);
8770 value = -value;
8771 }
8772
8773 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8774 && unsigned_overflow (value, 12))
8775 {
8776 /* Try to shift the value by 12 to make it fit. */
8777 if (((value >> 12) << 12) == value
8778 && ! unsigned_overflow (value, 12 + 12))
8779 {
8780 value >>= 12;
8781 insn |= encode_addsub_imm_shift_amount (1);
8782 }
8783 }
8784
8785 if (unsigned_overflow (value, 12))
8786 as_bad_where (fixP->fx_file, fixP->fx_line,
8787 _("immediate out of range"));
8788
8789 insn |= encode_addsub_imm (value);
8790
8791 put_aarch64_insn (buf, insn);
8792 break;
8793
8794 case AARCH64_OPND_SIMD_IMM:
8795 case AARCH64_OPND_SIMD_IMM_SFT:
8796 case AARCH64_OPND_LIMM:
8797 /* Bit mask immediate. */
8798 gas_assert (new_inst != NULL);
8799 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8800 new_inst->operands[idx].imm.value = value;
8801 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8802 &new_inst->value, NULL, NULL, insn_sequence))
8803 put_aarch64_insn (buf, new_inst->value);
8804 else
8805 as_bad_where (fixP->fx_file, fixP->fx_line,
8806 _("invalid immediate"));
8807 break;
8808
8809 case AARCH64_OPND_HALF:
8810 /* 16-bit unsigned immediate. */
8811 if (unsigned_overflow (value, 16))
8812 as_bad_where (fixP->fx_file, fixP->fx_line,
8813 _("immediate out of range"));
8814 insn = get_aarch64_insn (buf);
8815 insn |= encode_movw_imm (value & 0xffff);
8816 put_aarch64_insn (buf, insn);
8817 break;
8818
8819 case AARCH64_OPND_IMM_MOV:
8820 /* Operand for a generic move immediate instruction, which is
8821 an alias instruction that generates a single MOVZ, MOVN or ORR
8822 instruction to loads a 32-bit/64-bit immediate value into general
8823 register. An assembler error shall result if the immediate cannot be
8824 created by a single one of these instructions. If there is a choice,
8825 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8826 and MOVZ or MOVN to ORR. */
8827 gas_assert (new_inst != NULL);
8828 fix_mov_imm_insn (fixP, buf, new_inst, value);
8829 break;
8830
8831 case AARCH64_OPND_ADDR_SIMM7:
8832 case AARCH64_OPND_ADDR_SIMM9:
8833 case AARCH64_OPND_ADDR_SIMM9_2:
8834 case AARCH64_OPND_ADDR_SIMM10:
8835 case AARCH64_OPND_ADDR_UIMM12:
8836 case AARCH64_OPND_ADDR_SIMM11:
8837 case AARCH64_OPND_ADDR_SIMM13:
8838 /* Immediate offset in an address. */
8839 insn = get_aarch64_insn (buf);
8840
8841 gas_assert (new_inst != NULL && new_inst->value == insn);
8842 gas_assert (new_inst->opcode->operands[1] == opnd
8843 || new_inst->opcode->operands[2] == opnd);
8844
8845 /* Get the index of the address operand. */
8846 if (new_inst->opcode->operands[1] == opnd)
8847 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8848 idx = 1;
8849 else
8850 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8851 idx = 2;
8852
8853 /* Update the resolved offset value. */
8854 new_inst->operands[idx].addr.offset.imm = value;
8855
8856 /* Encode/fix-up. */
8857 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8858 &new_inst->value, NULL, NULL, insn_sequence))
8859 {
8860 put_aarch64_insn (buf, new_inst->value);
8861 break;
8862 }
8863 else if (new_inst->opcode->iclass == ldst_pos
8864 && try_to_encode_as_unscaled_ldst (new_inst))
8865 {
8866 put_aarch64_insn (buf, new_inst->value);
8867 break;
8868 }
8869
8870 as_bad_where (fixP->fx_file, fixP->fx_line,
8871 _("immediate offset out of range"));
8872 break;
8873
8874 default:
8875 gas_assert (0);
8876 as_fatal (_("unhandled operand code %d"), opnd);
8877 }
8878 }
8879
8880 /* Apply a fixup (fixP) to segment data, once it has been determined
8881 by our caller that we have all the info we need to fix it up.
8882
8883 Parameter valP is the pointer to the value of the bits. */
8884
8885 void
8886 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8887 {
8888 offsetT value = *valP;
8889 uint32_t insn;
8890 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8891 int scale;
8892 unsigned flags = fixP->fx_addnumber;
8893
8894 DEBUG_TRACE ("\n\n");
8895 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8896 DEBUG_TRACE ("Enter md_apply_fix");
8897
8898 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8899
8900 /* Note whether this will delete the relocation. */
8901
8902 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel
8903 && aarch64_force_reloc (fixP->fx_r_type) <= 0)
8904 fixP->fx_done = 1;
8905
8906 /* Process the relocations. */
8907 switch (fixP->fx_r_type)
8908 {
8909 case BFD_RELOC_NONE:
8910 /* This will need to go in the object file. */
8911 fixP->fx_done = 0;
8912 break;
8913
8914 case BFD_RELOC_8:
8915 case BFD_RELOC_8_PCREL:
8916 if (fixP->fx_done || !seg->use_rela_p)
8917 md_number_to_chars (buf, value, 1);
8918 break;
8919
8920 case BFD_RELOC_16:
8921 case BFD_RELOC_16_PCREL:
8922 if (fixP->fx_done || !seg->use_rela_p)
8923 md_number_to_chars (buf, value, 2);
8924 break;
8925
8926 case BFD_RELOC_32:
8927 case BFD_RELOC_32_PCREL:
8928 if (fixP->fx_done || !seg->use_rela_p)
8929 md_number_to_chars (buf, value, 4);
8930 break;
8931
8932 case BFD_RELOC_64:
8933 case BFD_RELOC_64_PCREL:
8934 if (fixP->fx_done || !seg->use_rela_p)
8935 md_number_to_chars (buf, value, 8);
8936 break;
8937
8938 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8939 /* We claim that these fixups have been processed here, even if
8940 in fact we generate an error because we do not have a reloc
8941 for them, so tc_gen_reloc() will reject them. */
8942 fixP->fx_done = 1;
8943 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8944 {
8945 as_bad_where (fixP->fx_file, fixP->fx_line,
8946 _("undefined symbol %s used as an immediate value"),
8947 S_GET_NAME (fixP->fx_addsy));
8948 goto apply_fix_return;
8949 }
8950 fix_insn (fixP, flags, value);
8951 break;
8952
8953 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8954 if (fixP->fx_done || !seg->use_rela_p)
8955 {
8956 if (value & 3)
8957 as_bad_where (fixP->fx_file, fixP->fx_line,
8958 _("pc-relative load offset not word aligned"));
8959 if (signed_overflow (value, 21))
8960 as_bad_where (fixP->fx_file, fixP->fx_line,
8961 _("pc-relative load offset out of range"));
8962 insn = get_aarch64_insn (buf);
8963 insn |= encode_ld_lit_ofs_19 (value >> 2);
8964 put_aarch64_insn (buf, insn);
8965 }
8966 break;
8967
8968 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8969 if (fixP->fx_done || !seg->use_rela_p)
8970 {
8971 if (signed_overflow (value, 21))
8972 as_bad_where (fixP->fx_file, fixP->fx_line,
8973 _("pc-relative address offset out of range"));
8974 insn = get_aarch64_insn (buf);
8975 insn |= encode_adr_imm (value);
8976 put_aarch64_insn (buf, insn);
8977 }
8978 break;
8979
8980 case BFD_RELOC_AARCH64_BRANCH19:
8981 if (fixP->fx_done || !seg->use_rela_p)
8982 {
8983 if (value & 3)
8984 as_bad_where (fixP->fx_file, fixP->fx_line,
8985 _("conditional branch target not word aligned"));
8986 if (signed_overflow (value, 21))
8987 as_bad_where (fixP->fx_file, fixP->fx_line,
8988 _("conditional branch out of range"));
8989 insn = get_aarch64_insn (buf);
8990 insn |= encode_cond_branch_ofs_19 (value >> 2);
8991 put_aarch64_insn (buf, insn);
8992 }
8993 break;
8994
8995 case BFD_RELOC_AARCH64_TSTBR14:
8996 if (fixP->fx_done || !seg->use_rela_p)
8997 {
8998 if (value & 3)
8999 as_bad_where (fixP->fx_file, fixP->fx_line,
9000 _("conditional branch target not word aligned"));
9001 if (signed_overflow (value, 16))
9002 as_bad_where (fixP->fx_file, fixP->fx_line,
9003 _("conditional branch out of range"));
9004 insn = get_aarch64_insn (buf);
9005 insn |= encode_tst_branch_ofs_14 (value >> 2);
9006 put_aarch64_insn (buf, insn);
9007 }
9008 break;
9009
9010 case BFD_RELOC_AARCH64_CALL26:
9011 case BFD_RELOC_AARCH64_JUMP26:
9012 if (fixP->fx_done || !seg->use_rela_p)
9013 {
9014 if (value & 3)
9015 as_bad_where (fixP->fx_file, fixP->fx_line,
9016 _("branch target not word aligned"));
9017 if (signed_overflow (value, 28))
9018 as_bad_where (fixP->fx_file, fixP->fx_line,
9019 _("branch out of range"));
9020 insn = get_aarch64_insn (buf);
9021 insn |= encode_branch_ofs_26 (value >> 2);
9022 put_aarch64_insn (buf, insn);
9023 }
9024 break;
9025
9026 case BFD_RELOC_AARCH64_MOVW_G0:
9027 case BFD_RELOC_AARCH64_MOVW_G0_NC:
9028 case BFD_RELOC_AARCH64_MOVW_G0_S:
9029 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
9030 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9031 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
9032 scale = 0;
9033 goto movw_common;
9034 case BFD_RELOC_AARCH64_MOVW_G1:
9035 case BFD_RELOC_AARCH64_MOVW_G1_NC:
9036 case BFD_RELOC_AARCH64_MOVW_G1_S:
9037 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9038 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9039 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
9040 scale = 16;
9041 goto movw_common;
9042 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
9043 scale = 0;
9044 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9045 /* Should always be exported to object file, see
9046 aarch64_force_relocation(). */
9047 gas_assert (!fixP->fx_done);
9048 gas_assert (seg->use_rela_p);
9049 goto movw_common;
9050 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9051 scale = 16;
9052 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9053 /* Should always be exported to object file, see
9054 aarch64_force_relocation(). */
9055 gas_assert (!fixP->fx_done);
9056 gas_assert (seg->use_rela_p);
9057 goto movw_common;
9058 case BFD_RELOC_AARCH64_MOVW_G2:
9059 case BFD_RELOC_AARCH64_MOVW_G2_NC:
9060 case BFD_RELOC_AARCH64_MOVW_G2_S:
9061 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9062 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
9063 scale = 32;
9064 goto movw_common;
9065 case BFD_RELOC_AARCH64_MOVW_G3:
9066 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
9067 scale = 48;
9068 movw_common:
9069 if (fixP->fx_done || !seg->use_rela_p)
9070 {
9071 insn = get_aarch64_insn (buf);
9072
9073 if (!fixP->fx_done)
9074 {
9075 /* REL signed addend must fit in 16 bits */
9076 if (signed_overflow (value, 16))
9077 as_bad_where (fixP->fx_file, fixP->fx_line,
9078 _("offset out of range"));
9079 }
9080 else
9081 {
9082 /* Check for overflow and scale. */
9083 switch (fixP->fx_r_type)
9084 {
9085 case BFD_RELOC_AARCH64_MOVW_G0:
9086 case BFD_RELOC_AARCH64_MOVW_G1:
9087 case BFD_RELOC_AARCH64_MOVW_G2:
9088 case BFD_RELOC_AARCH64_MOVW_G3:
9089 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
9090 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
9091 if (unsigned_overflow (value, scale + 16))
9092 as_bad_where (fixP->fx_file, fixP->fx_line,
9093 _("unsigned value out of range"));
9094 break;
9095 case BFD_RELOC_AARCH64_MOVW_G0_S:
9096 case BFD_RELOC_AARCH64_MOVW_G1_S:
9097 case BFD_RELOC_AARCH64_MOVW_G2_S:
9098 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
9099 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
9100 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
9101 /* NOTE: We can only come here with movz or movn. */
9102 if (signed_overflow (value, scale + 16))
9103 as_bad_where (fixP->fx_file, fixP->fx_line,
9104 _("signed value out of range"));
9105 if (value < 0)
9106 {
9107 /* Force use of MOVN. */
9108 value = ~value;
9109 insn = reencode_movzn_to_movn (insn);
9110 }
9111 else
9112 {
9113 /* Force use of MOVZ. */
9114 insn = reencode_movzn_to_movz (insn);
9115 }
9116 break;
9117 default:
9118 /* Unchecked relocations. */
9119 break;
9120 }
9121 value >>= scale;
9122 }
9123
9124 /* Insert value into MOVN/MOVZ/MOVK instruction. */
9125 insn |= encode_movw_imm (value & 0xffff);
9126
9127 put_aarch64_insn (buf, insn);
9128 }
9129 break;
9130
9131 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
9132 fixP->fx_r_type = (ilp32_p
9133 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
9134 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
9135 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9136 /* Should always be exported to object file, see
9137 aarch64_force_relocation(). */
9138 gas_assert (!fixP->fx_done);
9139 gas_assert (seg->use_rela_p);
9140 break;
9141
9142 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
9143 fixP->fx_r_type = (ilp32_p
9144 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
9145 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
9146 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9147 /* Should always be exported to object file, see
9148 aarch64_force_relocation(). */
9149 gas_assert (!fixP->fx_done);
9150 gas_assert (seg->use_rela_p);
9151 break;
9152
9153 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
9154 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
9155 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
9156 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
9157 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
9158 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
9159 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
9160 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
9161 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
9162 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
9163 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
9164 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
9165 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
9166 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
9167 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
9168 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
9169 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
9170 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
9171 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
9172 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
9173 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
9174 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
9175 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
9176 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
9177 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
9178 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
9179 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
9180 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
9181 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
9182 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
9183 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
9184 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
9185 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
9186 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
9187 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
9188 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
9189 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
9190 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
9191 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
9192 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
9193 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
9194 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
9195 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
9196 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
9197 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
9198 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
9199 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
9200 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
9201 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
9202 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
9203 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
9204 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
9205 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9206 /* Should always be exported to object file, see
9207 aarch64_force_relocation(). */
9208 gas_assert (!fixP->fx_done);
9209 gas_assert (seg->use_rela_p);
9210 break;
9211
9212 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
9213 /* Should always be exported to object file, see
9214 aarch64_force_relocation(). */
9215 fixP->fx_r_type = (ilp32_p
9216 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
9217 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
9218 gas_assert (!fixP->fx_done);
9219 gas_assert (seg->use_rela_p);
9220 break;
9221
9222 case BFD_RELOC_AARCH64_ADD_LO12:
9223 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
9224 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
9225 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
9226 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
9227 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
9228 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
9229 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
9230 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
9231 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
9232 case BFD_RELOC_AARCH64_LDST128_LO12:
9233 case BFD_RELOC_AARCH64_LDST16_LO12:
9234 case BFD_RELOC_AARCH64_LDST32_LO12:
9235 case BFD_RELOC_AARCH64_LDST64_LO12:
9236 case BFD_RELOC_AARCH64_LDST8_LO12:
9237 /* Should always be exported to object file, see
9238 aarch64_force_relocation(). */
9239 gas_assert (!fixP->fx_done);
9240 gas_assert (seg->use_rela_p);
9241 break;
9242
9243 case BFD_RELOC_AARCH64_TLSDESC_ADD:
9244 case BFD_RELOC_AARCH64_TLSDESC_CALL:
9245 case BFD_RELOC_AARCH64_TLSDESC_LDR:
9246 break;
9247
9248 case BFD_RELOC_UNUSED:
9249 /* An error will already have been reported. */
9250 break;
9251
9252 case BFD_RELOC_RVA:
9253 case BFD_RELOC_32_SECREL:
9254 case BFD_RELOC_16_SECIDX:
9255 break;
9256
9257 default:
9258 as_bad_where (fixP->fx_file, fixP->fx_line,
9259 _("unexpected %s fixup"),
9260 bfd_get_reloc_code_name (fixP->fx_r_type));
9261 break;
9262 }
9263
9264 apply_fix_return:
9265 /* Free the allocated the struct aarch64_inst.
9266 N.B. currently there are very limited number of fix-up types actually use
9267 this field, so the impact on the performance should be minimal . */
9268 free (fixP->tc_fix_data.inst);
9269
9270 return;
9271 }
9272
9273 /* Translate internal representation of relocation info to BFD target
9274 format. */
9275
9276 arelent *
9277 tc_gen_reloc (asection * section, fixS * fixp)
9278 {
9279 arelent *reloc;
9280 bfd_reloc_code_real_type code;
9281
9282 reloc = XNEW (arelent);
9283
9284 reloc->sym_ptr_ptr = XNEW (asymbol *);
9285 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9286 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
9287
9288 if (fixp->fx_pcrel)
9289 {
9290 if (section->use_rela_p)
9291 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
9292 else
9293 fixp->fx_offset = reloc->address;
9294 }
9295 reloc->addend = fixp->fx_offset;
9296
9297 code = fixp->fx_r_type;
9298 switch (code)
9299 {
9300 case BFD_RELOC_16:
9301 if (fixp->fx_pcrel)
9302 code = BFD_RELOC_16_PCREL;
9303 break;
9304
9305 case BFD_RELOC_32:
9306 if (fixp->fx_pcrel)
9307 code = BFD_RELOC_32_PCREL;
9308 break;
9309
9310 case BFD_RELOC_64:
9311 if (fixp->fx_pcrel)
9312 code = BFD_RELOC_64_PCREL;
9313 break;
9314
9315 default:
9316 break;
9317 }
9318
9319 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
9320 if (reloc->howto == NULL)
9321 {
9322 as_bad_where (fixp->fx_file, fixp->fx_line,
9323 _
9324 ("cannot represent %s relocation in this object file format"),
9325 bfd_get_reloc_code_name (code));
9326 return NULL;
9327 }
9328
9329 return reloc;
9330 }
9331
9332 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
9333
9334 void
9335 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
9336 {
9337 bfd_reloc_code_real_type type;
9338 int pcrel = 0;
9339
9340 #ifdef TE_PE
9341 if (exp->X_op == O_secrel)
9342 {
9343 exp->X_op = O_symbol;
9344 type = BFD_RELOC_32_SECREL;
9345 }
9346 else if (exp->X_op == O_secidx)
9347 {
9348 exp->X_op = O_symbol;
9349 type = BFD_RELOC_16_SECIDX;
9350 }
9351 else
9352 {
9353 #endif
9354 /* Pick a reloc.
9355 FIXME: @@ Should look at CPU word size. */
9356 switch (size)
9357 {
9358 case 1:
9359 type = BFD_RELOC_8;
9360 break;
9361 case 2:
9362 type = BFD_RELOC_16;
9363 break;
9364 case 4:
9365 type = BFD_RELOC_32;
9366 break;
9367 case 8:
9368 type = BFD_RELOC_64;
9369 break;
9370 default:
9371 as_bad (_("cannot do %u-byte relocation"), size);
9372 type = BFD_RELOC_UNUSED;
9373 break;
9374 }
9375 #ifdef TE_PE
9376 }
9377 #endif
9378
9379 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
9380 }
9381
9382 /* Implement md_after_parse_args. This is the earliest time we need to decide
9383 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
9384
9385 void
9386 aarch64_after_parse_args (void)
9387 {
9388 if (aarch64_abi != AARCH64_ABI_NONE)
9389 return;
9390
9391 #ifdef OBJ_ELF
9392 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
9393 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
9394 aarch64_abi = AARCH64_ABI_ILP32;
9395 else
9396 aarch64_abi = AARCH64_ABI_LP64;
9397 #else
9398 aarch64_abi = AARCH64_ABI_LLP64;
9399 #endif
9400 }
9401
9402 #ifdef OBJ_ELF
9403 const char *
9404 elf64_aarch64_target_format (void)
9405 {
9406 #ifdef TE_CLOUDABI
9407 /* FIXME: What to do for ilp32_p ? */
9408 if (target_big_endian)
9409 return "elf64-bigaarch64-cloudabi";
9410 else
9411 return "elf64-littleaarch64-cloudabi";
9412 #else
9413 if (target_big_endian)
9414 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
9415 else
9416 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
9417 #endif
9418 }
9419
9420 void
9421 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
9422 {
9423 elf_frob_symbol (symp, puntp);
9424 }
9425 #elif defined OBJ_COFF
9426 const char *
9427 coff_aarch64_target_format (void)
9428 {
9429 return "pe-aarch64-little";
9430 }
9431 #endif
9432
9433 /* MD interface: Finalization. */
9434
9435 /* A good place to do this, although this was probably not intended
9436 for this kind of use. We need to dump the literal pool before
9437 references are made to a null symbol pointer. */
9438
9439 void
9440 aarch64_cleanup (void)
9441 {
9442 literal_pool *pool;
9443
9444 for (pool = list_of_pools; pool; pool = pool->next)
9445 {
9446 /* Put it at the end of the relevant section. */
9447 subseg_set (pool->section, pool->sub_section);
9448 s_ltorg (0);
9449 }
9450 }
9451
9452 #ifdef OBJ_ELF
9453 /* Remove any excess mapping symbols generated for alignment frags in
9454 SEC. We may have created a mapping symbol before a zero byte
9455 alignment; remove it if there's a mapping symbol after the
9456 alignment. */
9457 static void
9458 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
9459 void *dummy ATTRIBUTE_UNUSED)
9460 {
9461 segment_info_type *seginfo = seg_info (sec);
9462 fragS *fragp;
9463
9464 if (seginfo == NULL || seginfo->frchainP == NULL)
9465 return;
9466
9467 for (fragp = seginfo->frchainP->frch_root;
9468 fragp != NULL; fragp = fragp->fr_next)
9469 {
9470 symbolS *sym = fragp->tc_frag_data.last_map;
9471 fragS *next = fragp->fr_next;
9472
9473 /* Variable-sized frags have been converted to fixed size by
9474 this point. But if this was variable-sized to start with,
9475 there will be a fixed-size frag after it. So don't handle
9476 next == NULL. */
9477 if (sym == NULL || next == NULL)
9478 continue;
9479
9480 if (S_GET_VALUE (sym) < next->fr_address)
9481 /* Not at the end of this frag. */
9482 continue;
9483 know (S_GET_VALUE (sym) == next->fr_address);
9484
9485 do
9486 {
9487 if (next->tc_frag_data.first_map != NULL)
9488 {
9489 /* Next frag starts with a mapping symbol. Discard this
9490 one. */
9491 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9492 break;
9493 }
9494
9495 if (next->fr_next == NULL)
9496 {
9497 /* This mapping symbol is at the end of the section. Discard
9498 it. */
9499 know (next->fr_fix == 0 && next->fr_var == 0);
9500 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
9501 break;
9502 }
9503
9504 /* As long as we have empty frags without any mapping symbols,
9505 keep looking. */
9506 /* If the next frag is non-empty and does not start with a
9507 mapping symbol, then this mapping symbol is required. */
9508 if (next->fr_address != next->fr_next->fr_address)
9509 break;
9510
9511 next = next->fr_next;
9512 }
9513 while (next != NULL);
9514 }
9515 }
9516 #endif
9517
9518 /* Adjust the symbol table. */
9519
9520 void
9521 aarch64_adjust_symtab (void)
9522 {
9523 #ifdef OBJ_ELF
9524 /* Remove any overlapping mapping symbols generated by alignment frags. */
9525 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
9526 /* Now do generic ELF adjustments. */
9527 elf_adjust_symtab ();
9528 #endif
9529 }
9530
9531 static void
9532 checked_hash_insert (htab_t table, const char *key, void *value)
9533 {
9534 str_hash_insert (table, key, value, 0);
9535 }
9536
9537 static void
9538 sysreg_hash_insert (htab_t table, const char *key, void *value)
9539 {
9540 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
9541 checked_hash_insert (table, key, value);
9542 }
9543
9544 static void
9545 fill_instruction_hash_table (void)
9546 {
9547 const aarch64_opcode *opcode = aarch64_opcode_table;
9548
9549 while (opcode->name != NULL)
9550 {
9551 templates *templ, *new_templ;
9552 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
9553
9554 new_templ = XNEW (templates);
9555 new_templ->opcode = opcode;
9556 new_templ->next = NULL;
9557
9558 if (!templ)
9559 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
9560 else
9561 {
9562 new_templ->next = templ->next;
9563 templ->next = new_templ;
9564 }
9565 ++opcode;
9566 }
9567 }
9568
9569 static inline void
9570 convert_to_upper (char *dst, const char *src, size_t num)
9571 {
9572 unsigned int i;
9573 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
9574 *dst = TOUPPER (*src);
9575 *dst = '\0';
9576 }
9577
9578 /* Assume STR point to a lower-case string, allocate, convert and return
9579 the corresponding upper-case string. */
9580 static inline const char*
9581 get_upper_str (const char *str)
9582 {
9583 char *ret;
9584 size_t len = strlen (str);
9585 ret = XNEWVEC (char, len + 1);
9586 convert_to_upper (ret, str, len);
9587 return ret;
9588 }
9589
9590 /* MD interface: Initialization. */
9591
9592 void
9593 md_begin (void)
9594 {
9595 unsigned mach;
9596 unsigned int i;
9597
9598 aarch64_ops_hsh = str_htab_create ();
9599 aarch64_cond_hsh = str_htab_create ();
9600 aarch64_shift_hsh = str_htab_create ();
9601 aarch64_sys_regs_hsh = str_htab_create ();
9602 aarch64_pstatefield_hsh = str_htab_create ();
9603 aarch64_sys_regs_ic_hsh = str_htab_create ();
9604 aarch64_sys_regs_dc_hsh = str_htab_create ();
9605 aarch64_sys_regs_at_hsh = str_htab_create ();
9606 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
9607 aarch64_sys_regs_sr_hsh = str_htab_create ();
9608 aarch64_reg_hsh = str_htab_create ();
9609 aarch64_barrier_opt_hsh = str_htab_create ();
9610 aarch64_nzcv_hsh = str_htab_create ();
9611 aarch64_pldop_hsh = str_htab_create ();
9612 aarch64_hint_opt_hsh = str_htab_create ();
9613
9614 fill_instruction_hash_table ();
9615
9616 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
9617 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
9618 (void *) (aarch64_sys_regs + i));
9619
9620 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
9621 sysreg_hash_insert (aarch64_pstatefield_hsh,
9622 aarch64_pstatefields[i].name,
9623 (void *) (aarch64_pstatefields + i));
9624
9625 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
9626 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
9627 aarch64_sys_regs_ic[i].name,
9628 (void *) (aarch64_sys_regs_ic + i));
9629
9630 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
9631 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
9632 aarch64_sys_regs_dc[i].name,
9633 (void *) (aarch64_sys_regs_dc + i));
9634
9635 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
9636 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
9637 aarch64_sys_regs_at[i].name,
9638 (void *) (aarch64_sys_regs_at + i));
9639
9640 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
9641 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
9642 aarch64_sys_regs_tlbi[i].name,
9643 (void *) (aarch64_sys_regs_tlbi + i));
9644
9645 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
9646 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
9647 aarch64_sys_regs_sr[i].name,
9648 (void *) (aarch64_sys_regs_sr + i));
9649
9650 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
9651 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
9652 (void *) (reg_names + i));
9653
9654 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
9655 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
9656 (void *) (nzcv_names + i));
9657
9658 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
9659 {
9660 const char *name = aarch64_operand_modifiers[i].name;
9661 checked_hash_insert (aarch64_shift_hsh, name,
9662 (void *) (aarch64_operand_modifiers + i));
9663 /* Also hash the name in the upper case. */
9664 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
9665 (void *) (aarch64_operand_modifiers + i));
9666 }
9667
9668 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
9669 {
9670 unsigned int j;
9671 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
9672 the same condition code. */
9673 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
9674 {
9675 const char *name = aarch64_conds[i].names[j];
9676 if (name == NULL)
9677 break;
9678 checked_hash_insert (aarch64_cond_hsh, name,
9679 (void *) (aarch64_conds + i));
9680 /* Also hash the name in the upper case. */
9681 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
9682 (void *) (aarch64_conds + i));
9683 }
9684 }
9685
9686 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
9687 {
9688 const char *name = aarch64_barrier_options[i].name;
9689 /* Skip xx00 - the unallocated values of option. */
9690 if ((i & 0x3) == 0)
9691 continue;
9692 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9693 (void *) (aarch64_barrier_options + i));
9694 /* Also hash the name in the upper case. */
9695 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9696 (void *) (aarch64_barrier_options + i));
9697 }
9698
9699 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
9700 {
9701 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
9702 checked_hash_insert (aarch64_barrier_opt_hsh, name,
9703 (void *) (aarch64_barrier_dsb_nxs_options + i));
9704 /* Also hash the name in the upper case. */
9705 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
9706 (void *) (aarch64_barrier_dsb_nxs_options + i));
9707 }
9708
9709 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
9710 {
9711 const char* name = aarch64_prfops[i].name;
9712 /* Skip the unallocated hint encodings. */
9713 if (name == NULL)
9714 continue;
9715 checked_hash_insert (aarch64_pldop_hsh, name,
9716 (void *) (aarch64_prfops + i));
9717 /* Also hash the name in the upper case. */
9718 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
9719 (void *) (aarch64_prfops + i));
9720 }
9721
9722 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
9723 {
9724 const char* name = aarch64_hint_options[i].name;
9725 const char* upper_name = get_upper_str(name);
9726
9727 checked_hash_insert (aarch64_hint_opt_hsh, name,
9728 (void *) (aarch64_hint_options + i));
9729
9730 /* Also hash the name in the upper case if not the same. */
9731 if (strcmp (name, upper_name) != 0)
9732 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
9733 (void *) (aarch64_hint_options + i));
9734 }
9735
9736 /* Set the cpu variant based on the command-line options. */
9737 if (!mcpu_cpu_opt)
9738 mcpu_cpu_opt = march_cpu_opt;
9739
9740 if (!mcpu_cpu_opt)
9741 mcpu_cpu_opt = &cpu_default;
9742
9743 cpu_variant = *mcpu_cpu_opt;
9744
9745 /* Record the CPU type. */
9746 if(ilp32_p)
9747 mach = bfd_mach_aarch64_ilp32;
9748 else if (llp64_p)
9749 mach = bfd_mach_aarch64_llp64;
9750 else
9751 mach = bfd_mach_aarch64;
9752
9753 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
9754 #ifdef OBJ_ELF
9755 /* FIXME - is there a better way to do it ? */
9756 aarch64_sframe_cfa_sp_reg = 31;
9757 aarch64_sframe_cfa_fp_reg = 29; /* x29. */
9758 aarch64_sframe_cfa_ra_reg = 30;
9759 #endif
9760 }
9761
9762 /* Command line processing. */
9763
9764 const char *md_shortopts = "m:";
9765
9766 #ifdef AARCH64_BI_ENDIAN
9767 #define OPTION_EB (OPTION_MD_BASE + 0)
9768 #define OPTION_EL (OPTION_MD_BASE + 1)
9769 #else
9770 #if TARGET_BYTES_BIG_ENDIAN
9771 #define OPTION_EB (OPTION_MD_BASE + 0)
9772 #else
9773 #define OPTION_EL (OPTION_MD_BASE + 1)
9774 #endif
9775 #endif
9776
9777 struct option md_longopts[] = {
9778 #ifdef OPTION_EB
9779 {"EB", no_argument, NULL, OPTION_EB},
9780 #endif
9781 #ifdef OPTION_EL
9782 {"EL", no_argument, NULL, OPTION_EL},
9783 #endif
9784 {NULL, no_argument, NULL, 0}
9785 };
9786
9787 size_t md_longopts_size = sizeof (md_longopts);
9788
9789 struct aarch64_option_table
9790 {
9791 const char *option; /* Option name to match. */
9792 const char *help; /* Help information. */
9793 int *var; /* Variable to change. */
9794 int value; /* What to change it to. */
9795 char *deprecated; /* If non-null, print this message. */
9796 };
9797
9798 static struct aarch64_option_table aarch64_opts[] = {
9799 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9800 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9801 NULL},
9802 #ifdef DEBUG_AARCH64
9803 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9804 #endif /* DEBUG_AARCH64 */
9805 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9806 NULL},
9807 {"mno-verbose-error", N_("do not output verbose error messages"),
9808 &verbose_error_p, 0, NULL},
9809 {NULL, NULL, NULL, 0, NULL}
9810 };
9811
9812 struct aarch64_cpu_option_table
9813 {
9814 const char *name;
9815 const aarch64_feature_set value;
9816 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9817 case. */
9818 const char *canonical_name;
9819 };
9820
9821 /* This list should, at a minimum, contain all the cpu names
9822 recognized by GCC. */
9823 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9824 {"all", AARCH64_ANY, NULL},
9825 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9826 AARCH64_FEATURE_CRC), "Cortex-A34"},
9827 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9828 AARCH64_FEATURE_CRC), "Cortex-A35"},
9829 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9830 AARCH64_FEATURE_CRC), "Cortex-A53"},
9831 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9832 AARCH64_FEATURE_CRC), "Cortex-A57"},
9833 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9834 AARCH64_FEATURE_CRC), "Cortex-A72"},
9835 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9836 AARCH64_FEATURE_CRC), "Cortex-A73"},
9837 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9838 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9839 "Cortex-A55"},
9840 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9841 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9842 "Cortex-A75"},
9843 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9844 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9845 "Cortex-A76"},
9846 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9847 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9848 | AARCH64_FEATURE_DOTPROD
9849 | AARCH64_FEATURE_SSBS),
9850 "Cortex-A76AE"},
9851 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9852 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9853 | AARCH64_FEATURE_DOTPROD
9854 | AARCH64_FEATURE_SSBS),
9855 "Cortex-A77"},
9856 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9857 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9858 | AARCH64_FEATURE_DOTPROD
9859 | AARCH64_FEATURE_SSBS),
9860 "Cortex-A65"},
9861 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9862 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9863 | AARCH64_FEATURE_DOTPROD
9864 | AARCH64_FEATURE_SSBS),
9865 "Cortex-A65AE"},
9866 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9867 AARCH64_FEATURE_F16
9868 | AARCH64_FEATURE_RCPC
9869 | AARCH64_FEATURE_DOTPROD
9870 | AARCH64_FEATURE_SSBS
9871 | AARCH64_FEATURE_PROFILE),
9872 "Cortex-A78"},
9873 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9874 AARCH64_FEATURE_F16
9875 | AARCH64_FEATURE_RCPC
9876 | AARCH64_FEATURE_DOTPROD
9877 | AARCH64_FEATURE_SSBS
9878 | AARCH64_FEATURE_PROFILE),
9879 "Cortex-A78AE"},
9880 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9881 AARCH64_FEATURE_DOTPROD
9882 | AARCH64_FEATURE_F16
9883 | AARCH64_FEATURE_FLAGM
9884 | AARCH64_FEATURE_PAC
9885 | AARCH64_FEATURE_PROFILE
9886 | AARCH64_FEATURE_RCPC
9887 | AARCH64_FEATURE_SSBS),
9888 "Cortex-A78C"},
9889 {"cortex-a510", AARCH64_FEATURE (AARCH64_ARCH_V9,
9890 AARCH64_FEATURE_BFLOAT16
9891 | AARCH64_FEATURE_I8MM
9892 | AARCH64_FEATURE_MEMTAG
9893 | AARCH64_FEATURE_SVE2_BITPERM),
9894 "Cortex-A510"},
9895 {"cortex-a710", AARCH64_FEATURE (AARCH64_ARCH_V9,
9896 AARCH64_FEATURE_BFLOAT16
9897 | AARCH64_FEATURE_I8MM
9898 | AARCH64_FEATURE_MEMTAG
9899 | AARCH64_FEATURE_SVE2_BITPERM),
9900 "Cortex-A710"},
9901 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9902 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9903 | AARCH64_FEATURE_DOTPROD
9904 | AARCH64_FEATURE_PROFILE),
9905 "Ares"},
9906 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9907 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9908 "Samsung Exynos M1"},
9909 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9910 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9911 | AARCH64_FEATURE_RDMA),
9912 "Qualcomm Falkor"},
9913 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9914 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9915 | AARCH64_FEATURE_DOTPROD
9916 | AARCH64_FEATURE_SSBS),
9917 "Neoverse E1"},
9918 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9919 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9920 | AARCH64_FEATURE_DOTPROD
9921 | AARCH64_FEATURE_PROFILE),
9922 "Neoverse N1"},
9923 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9924 AARCH64_FEATURE_BFLOAT16
9925 | AARCH64_FEATURE_I8MM
9926 | AARCH64_FEATURE_F16
9927 | AARCH64_FEATURE_SVE
9928 | AARCH64_FEATURE_SVE2
9929 | AARCH64_FEATURE_SVE2_BITPERM
9930 | AARCH64_FEATURE_MEMTAG
9931 | AARCH64_FEATURE_RNG),
9932 "Neoverse N2"},
9933 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9934 AARCH64_FEATURE_PROFILE
9935 | AARCH64_FEATURE_CVADP
9936 | AARCH64_FEATURE_SVE
9937 | AARCH64_FEATURE_SSBS
9938 | AARCH64_FEATURE_RNG
9939 | AARCH64_FEATURE_F16
9940 | AARCH64_FEATURE_BFLOAT16
9941 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9942 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9943 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9944 | AARCH64_FEATURE_RDMA),
9945 "Qualcomm QDF24XX"},
9946 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9947 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9948 "Qualcomm Saphira"},
9949 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9950 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9951 "Cavium ThunderX"},
9952 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9953 AARCH64_FEATURE_CRYPTO),
9954 "Broadcom Vulcan"},
9955 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9956 in earlier releases and is superseded by 'xgene1' in all
9957 tools. */
9958 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9959 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9960 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9961 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9962 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9963 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9964 AARCH64_FEATURE_F16
9965 | AARCH64_FEATURE_RCPC
9966 | AARCH64_FEATURE_DOTPROD
9967 | AARCH64_FEATURE_SSBS
9968 | AARCH64_FEATURE_PROFILE),
9969 "Cortex-X1"},
9970 {"cortex-x2", AARCH64_FEATURE (AARCH64_ARCH_V9,
9971 AARCH64_FEATURE_BFLOAT16
9972 | AARCH64_FEATURE_I8MM
9973 | AARCH64_FEATURE_MEMTAG
9974 | AARCH64_FEATURE_SVE2_BITPERM),
9975 "Cortex-X2"},
9976 {"generic", AARCH64_ARCH_V8, NULL},
9977
9978 {NULL, AARCH64_ARCH_NONE, NULL}
9979 };
9980
9981 struct aarch64_arch_option_table
9982 {
9983 const char *name;
9984 const aarch64_feature_set value;
9985 };
9986
9987 /* This list should, at a minimum, contain all the architecture names
9988 recognized by GCC. */
9989 static const struct aarch64_arch_option_table aarch64_archs[] = {
9990 {"all", AARCH64_ANY},
9991 {"armv8-a", AARCH64_ARCH_V8},
9992 {"armv8.1-a", AARCH64_ARCH_V8_1},
9993 {"armv8.2-a", AARCH64_ARCH_V8_2},
9994 {"armv8.3-a", AARCH64_ARCH_V8_3},
9995 {"armv8.4-a", AARCH64_ARCH_V8_4},
9996 {"armv8.5-a", AARCH64_ARCH_V8_5},
9997 {"armv8.6-a", AARCH64_ARCH_V8_6},
9998 {"armv8.7-a", AARCH64_ARCH_V8_7},
9999 {"armv8.8-a", AARCH64_ARCH_V8_8},
10000 {"armv8-r", AARCH64_ARCH_V8_R},
10001 {"armv9-a", AARCH64_ARCH_V9},
10002 {"armv9.1-a", AARCH64_ARCH_V9_1},
10003 {"armv9.2-a", AARCH64_ARCH_V9_2},
10004 {"armv9.3-a", AARCH64_ARCH_V9_3},
10005 {NULL, AARCH64_ARCH_NONE}
10006 };
10007
10008 /* ISA extensions. */
10009 struct aarch64_option_cpu_value_table
10010 {
10011 const char *name;
10012 const aarch64_feature_set value;
10013 const aarch64_feature_set require; /* Feature dependencies. */
10014 };
10015
10016 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
10017 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
10018 AARCH64_ARCH_NONE},
10019 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
10020 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10021 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
10022 AARCH64_ARCH_NONE},
10023 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
10024 AARCH64_ARCH_NONE},
10025 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
10026 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10027 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
10028 AARCH64_ARCH_NONE},
10029 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
10030 AARCH64_ARCH_NONE},
10031 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
10032 AARCH64_ARCH_NONE},
10033 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
10034 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10035 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
10036 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10037 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
10038 AARCH64_FEATURE (AARCH64_FEATURE_F16, 0)},
10039 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
10040 AARCH64_ARCH_NONE},
10041 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
10042 AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0)},
10043 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
10044 AARCH64_ARCH_NONE},
10045 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
10046 AARCH64_FEATURE (AARCH64_FEATURE_F16
10047 | AARCH64_FEATURE_SIMD, 0)},
10048 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
10049 AARCH64_ARCH_NONE},
10050 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
10051 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10052 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
10053 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10054 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
10055 AARCH64_ARCH_NONE},
10056 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
10057 AARCH64_ARCH_NONE},
10058 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
10059 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10060 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
10061 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10062 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
10063 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
10064 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
10065 AARCH64_ARCH_NONE},
10066 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
10067 AARCH64_ARCH_NONE},
10068 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
10069 AARCH64_ARCH_NONE},
10070 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
10071 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10072 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
10073 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10074 | AARCH64_FEATURE_SM4, 0)},
10075 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
10076 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10077 | AARCH64_FEATURE_AES, 0)},
10078 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
10079 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10080 | AARCH64_FEATURE_SHA3, 0)},
10081 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
10082 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
10083 {"sme", AARCH64_FEATURE (AARCH64_FEATURE_SME, 0),
10084 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
10085 | AARCH64_FEATURE_BFLOAT16, 0)},
10086 {"sme-f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10087 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10088 {"sme-f64f64", AARCH64_FEATURE (AARCH64_FEATURE_SME_F64F64, 0),
10089 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10090 {"sme-i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10091 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10092 {"sme-i16i64", AARCH64_FEATURE (AARCH64_FEATURE_SME_I16I64, 0),
10093 AARCH64_FEATURE (AARCH64_FEATURE_SME, 0)},
10094 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
10095 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
10096 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
10097 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
10098 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
10099 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10100 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
10101 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
10102 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
10103 AARCH64_ARCH_NONE},
10104 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
10105 AARCH64_ARCH_NONE},
10106 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
10107 AARCH64_ARCH_NONE},
10108 {"mops", AARCH64_FEATURE (AARCH64_FEATURE_MOPS, 0),
10109 AARCH64_ARCH_NONE},
10110 {"hbc", AARCH64_FEATURE (AARCH64_FEATURE_HBC, 0),
10111 AARCH64_ARCH_NONE},
10112 {"cssc", AARCH64_FEATURE (AARCH64_FEATURE_CSSC, 0),
10113 AARCH64_ARCH_NONE},
10114 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
10115 };
10116
10117 struct aarch64_long_option_table
10118 {
10119 const char *option; /* Substring to match. */
10120 const char *help; /* Help information. */
10121 int (*func) (const char *subopt); /* Function to decode sub-option. */
10122 char *deprecated; /* If non-null, print this message. */
10123 };
10124
10125 /* Transitive closure of features depending on set. */
10126 static aarch64_feature_set
10127 aarch64_feature_disable_set (aarch64_feature_set set)
10128 {
10129 const struct aarch64_option_cpu_value_table *opt;
10130 aarch64_feature_set prev = 0;
10131
10132 while (prev != set) {
10133 prev = set;
10134 for (opt = aarch64_features; opt->name != NULL; opt++)
10135 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
10136 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
10137 }
10138 return set;
10139 }
10140
10141 /* Transitive closure of dependencies of set. */
10142 static aarch64_feature_set
10143 aarch64_feature_enable_set (aarch64_feature_set set)
10144 {
10145 const struct aarch64_option_cpu_value_table *opt;
10146 aarch64_feature_set prev = 0;
10147
10148 while (prev != set) {
10149 prev = set;
10150 for (opt = aarch64_features; opt->name != NULL; opt++)
10151 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
10152 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
10153 }
10154 return set;
10155 }
10156
10157 static int
10158 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
10159 bool ext_only)
10160 {
10161 /* We insist on extensions being added before being removed. We achieve
10162 this by using the ADDING_VALUE variable to indicate whether we are
10163 adding an extension (1) or removing it (0) and only allowing it to
10164 change in the order -1 -> 1 -> 0. */
10165 int adding_value = -1;
10166 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
10167
10168 /* Copy the feature set, so that we can modify it. */
10169 *ext_set = **opt_p;
10170 *opt_p = ext_set;
10171
10172 while (str != NULL && *str != 0)
10173 {
10174 const struct aarch64_option_cpu_value_table *opt;
10175 const char *ext = NULL;
10176 int optlen;
10177
10178 if (!ext_only)
10179 {
10180 if (*str != '+')
10181 {
10182 as_bad (_("invalid architectural extension"));
10183 return 0;
10184 }
10185
10186 ext = strchr (++str, '+');
10187 }
10188
10189 if (ext != NULL)
10190 optlen = ext - str;
10191 else
10192 optlen = strlen (str);
10193
10194 if (optlen >= 2 && startswith (str, "no"))
10195 {
10196 if (adding_value != 0)
10197 adding_value = 0;
10198 optlen -= 2;
10199 str += 2;
10200 }
10201 else if (optlen > 0)
10202 {
10203 if (adding_value == -1)
10204 adding_value = 1;
10205 else if (adding_value != 1)
10206 {
10207 as_bad (_("must specify extensions to add before specifying "
10208 "those to remove"));
10209 return false;
10210 }
10211 }
10212
10213 if (optlen == 0)
10214 {
10215 as_bad (_("missing architectural extension"));
10216 return 0;
10217 }
10218
10219 gas_assert (adding_value != -1);
10220
10221 for (opt = aarch64_features; opt->name != NULL; opt++)
10222 if (strncmp (opt->name, str, optlen) == 0)
10223 {
10224 aarch64_feature_set set;
10225
10226 /* Add or remove the extension. */
10227 if (adding_value)
10228 {
10229 set = aarch64_feature_enable_set (opt->value);
10230 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
10231 }
10232 else
10233 {
10234 set = aarch64_feature_disable_set (opt->value);
10235 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
10236 }
10237 break;
10238 }
10239
10240 if (opt->name == NULL)
10241 {
10242 as_bad (_("unknown architectural extension `%s'"), str);
10243 return 0;
10244 }
10245
10246 str = ext;
10247 };
10248
10249 return 1;
10250 }
10251
10252 static int
10253 aarch64_parse_cpu (const char *str)
10254 {
10255 const struct aarch64_cpu_option_table *opt;
10256 const char *ext = strchr (str, '+');
10257 size_t optlen;
10258
10259 if (ext != NULL)
10260 optlen = ext - str;
10261 else
10262 optlen = strlen (str);
10263
10264 if (optlen == 0)
10265 {
10266 as_bad (_("missing cpu name `%s'"), str);
10267 return 0;
10268 }
10269
10270 for (opt = aarch64_cpus; opt->name != NULL; opt++)
10271 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10272 {
10273 mcpu_cpu_opt = &opt->value;
10274 if (ext != NULL)
10275 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
10276
10277 return 1;
10278 }
10279
10280 as_bad (_("unknown cpu `%s'"), str);
10281 return 0;
10282 }
10283
10284 static int
10285 aarch64_parse_arch (const char *str)
10286 {
10287 const struct aarch64_arch_option_table *opt;
10288 const char *ext = strchr (str, '+');
10289 size_t optlen;
10290
10291 if (ext != NULL)
10292 optlen = ext - str;
10293 else
10294 optlen = strlen (str);
10295
10296 if (optlen == 0)
10297 {
10298 as_bad (_("missing architecture name `%s'"), str);
10299 return 0;
10300 }
10301
10302 for (opt = aarch64_archs; opt->name != NULL; opt++)
10303 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
10304 {
10305 march_cpu_opt = &opt->value;
10306 if (ext != NULL)
10307 return aarch64_parse_features (ext, &march_cpu_opt, false);
10308
10309 return 1;
10310 }
10311
10312 as_bad (_("unknown architecture `%s'\n"), str);
10313 return 0;
10314 }
10315
10316 /* ABIs. */
10317 struct aarch64_option_abi_value_table
10318 {
10319 const char *name;
10320 enum aarch64_abi_type value;
10321 };
10322
10323 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
10324 #ifdef OBJ_ELF
10325 {"ilp32", AARCH64_ABI_ILP32},
10326 {"lp64", AARCH64_ABI_LP64},
10327 #else
10328 {"llp64", AARCH64_ABI_LLP64},
10329 #endif
10330 };
10331
10332 static int
10333 aarch64_parse_abi (const char *str)
10334 {
10335 unsigned int i;
10336
10337 if (str[0] == '\0')
10338 {
10339 as_bad (_("missing abi name `%s'"), str);
10340 return 0;
10341 }
10342
10343 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
10344 if (strcmp (str, aarch64_abis[i].name) == 0)
10345 {
10346 aarch64_abi = aarch64_abis[i].value;
10347 return 1;
10348 }
10349
10350 as_bad (_("unknown abi `%s'\n"), str);
10351 return 0;
10352 }
10353
10354 static struct aarch64_long_option_table aarch64_long_opts[] = {
10355 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
10356 aarch64_parse_abi, NULL},
10357 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
10358 aarch64_parse_cpu, NULL},
10359 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
10360 aarch64_parse_arch, NULL},
10361 {NULL, NULL, 0, NULL}
10362 };
10363
10364 int
10365 md_parse_option (int c, const char *arg)
10366 {
10367 struct aarch64_option_table *opt;
10368 struct aarch64_long_option_table *lopt;
10369
10370 switch (c)
10371 {
10372 #ifdef OPTION_EB
10373 case OPTION_EB:
10374 target_big_endian = 1;
10375 break;
10376 #endif
10377
10378 #ifdef OPTION_EL
10379 case OPTION_EL:
10380 target_big_endian = 0;
10381 break;
10382 #endif
10383
10384 case 'a':
10385 /* Listing option. Just ignore these, we don't support additional
10386 ones. */
10387 return 0;
10388
10389 default:
10390 for (opt = aarch64_opts; opt->option != NULL; opt++)
10391 {
10392 if (c == opt->option[0]
10393 && ((arg == NULL && opt->option[1] == 0)
10394 || streq (arg, opt->option + 1)))
10395 {
10396 /* If the option is deprecated, tell the user. */
10397 if (opt->deprecated != NULL)
10398 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
10399 arg ? arg : "", _(opt->deprecated));
10400
10401 if (opt->var != NULL)
10402 *opt->var = opt->value;
10403
10404 return 1;
10405 }
10406 }
10407
10408 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10409 {
10410 /* These options are expected to have an argument. */
10411 if (c == lopt->option[0]
10412 && arg != NULL
10413 && startswith (arg, lopt->option + 1))
10414 {
10415 /* If the option is deprecated, tell the user. */
10416 if (lopt->deprecated != NULL)
10417 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
10418 _(lopt->deprecated));
10419
10420 /* Call the sup-option parser. */
10421 return lopt->func (arg + strlen (lopt->option) - 1);
10422 }
10423 }
10424
10425 return 0;
10426 }
10427
10428 return 1;
10429 }
10430
10431 void
10432 md_show_usage (FILE * fp)
10433 {
10434 struct aarch64_option_table *opt;
10435 struct aarch64_long_option_table *lopt;
10436
10437 fprintf (fp, _(" AArch64-specific assembler options:\n"));
10438
10439 for (opt = aarch64_opts; opt->option != NULL; opt++)
10440 if (opt->help != NULL)
10441 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
10442
10443 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
10444 if (lopt->help != NULL)
10445 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
10446
10447 #ifdef OPTION_EB
10448 fprintf (fp, _("\
10449 -EB assemble code for a big-endian cpu\n"));
10450 #endif
10451
10452 #ifdef OPTION_EL
10453 fprintf (fp, _("\
10454 -EL assemble code for a little-endian cpu\n"));
10455 #endif
10456 }
10457
10458 /* Parse a .cpu directive. */
10459
10460 static void
10461 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
10462 {
10463 const struct aarch64_cpu_option_table *opt;
10464 char saved_char;
10465 char *name;
10466 char *ext;
10467 size_t optlen;
10468
10469 name = input_line_pointer;
10470 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10471 saved_char = *input_line_pointer;
10472 *input_line_pointer = 0;
10473
10474 ext = strchr (name, '+');
10475
10476 if (ext != NULL)
10477 optlen = ext - name;
10478 else
10479 optlen = strlen (name);
10480
10481 /* Skip the first "all" entry. */
10482 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
10483 if (strlen (opt->name) == optlen
10484 && strncmp (name, opt->name, optlen) == 0)
10485 {
10486 mcpu_cpu_opt = &opt->value;
10487 if (ext != NULL)
10488 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10489 return;
10490
10491 cpu_variant = *mcpu_cpu_opt;
10492
10493 *input_line_pointer = saved_char;
10494 demand_empty_rest_of_line ();
10495 return;
10496 }
10497 as_bad (_("unknown cpu `%s'"), name);
10498 *input_line_pointer = saved_char;
10499 ignore_rest_of_line ();
10500 }
10501
10502
10503 /* Parse a .arch directive. */
10504
10505 static void
10506 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
10507 {
10508 const struct aarch64_arch_option_table *opt;
10509 char saved_char;
10510 char *name;
10511 char *ext;
10512 size_t optlen;
10513
10514 name = input_line_pointer;
10515 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10516 saved_char = *input_line_pointer;
10517 *input_line_pointer = 0;
10518
10519 ext = strchr (name, '+');
10520
10521 if (ext != NULL)
10522 optlen = ext - name;
10523 else
10524 optlen = strlen (name);
10525
10526 /* Skip the first "all" entry. */
10527 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
10528 if (strlen (opt->name) == optlen
10529 && strncmp (name, opt->name, optlen) == 0)
10530 {
10531 mcpu_cpu_opt = &opt->value;
10532 if (ext != NULL)
10533 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
10534 return;
10535
10536 cpu_variant = *mcpu_cpu_opt;
10537
10538 *input_line_pointer = saved_char;
10539 demand_empty_rest_of_line ();
10540 return;
10541 }
10542
10543 as_bad (_("unknown architecture `%s'\n"), name);
10544 *input_line_pointer = saved_char;
10545 ignore_rest_of_line ();
10546 }
10547
10548 /* Parse a .arch_extension directive. */
10549
10550 static void
10551 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
10552 {
10553 char saved_char;
10554 char *ext = input_line_pointer;
10555
10556 input_line_pointer = find_end_of_line (input_line_pointer, flag_m68k_mri);
10557 saved_char = *input_line_pointer;
10558 *input_line_pointer = 0;
10559
10560 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
10561 return;
10562
10563 cpu_variant = *mcpu_cpu_opt;
10564
10565 *input_line_pointer = saved_char;
10566 demand_empty_rest_of_line ();
10567 }
10568
10569 /* Copy symbol information. */
10570
10571 void
10572 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
10573 {
10574 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
10575 }
10576
10577 #ifdef OBJ_ELF
10578 /* Same as elf_copy_symbol_attributes, but without copying st_other.
10579 This is needed so AArch64 specific st_other values can be independently
10580 specified for an IFUNC resolver (that is called by the dynamic linker)
10581 and the symbol it resolves (aliased to the resolver). In particular,
10582 if a function symbol has special st_other value set via directives,
10583 then attaching an IFUNC resolver to that symbol should not override
10584 the st_other setting. Requiring the directive on the IFUNC resolver
10585 symbol would be unexpected and problematic in C code, where the two
10586 symbols appear as two independent function declarations. */
10587
10588 void
10589 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
10590 {
10591 struct elf_obj_sy *srcelf = symbol_get_obj (src);
10592 struct elf_obj_sy *destelf = symbol_get_obj (dest);
10593 /* If size is unset, copy size from src. Because we don't track whether
10594 .size has been used, we can't differentiate .size dest, 0 from the case
10595 where dest's size is unset. */
10596 if (!destelf->size && S_GET_SIZE (dest) == 0)
10597 {
10598 if (srcelf->size)
10599 {
10600 destelf->size = XNEW (expressionS);
10601 *destelf->size = *srcelf->size;
10602 }
10603 S_SET_SIZE (dest, S_GET_SIZE (src));
10604 }
10605 }
10606 #endif