]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
opcodes: constify aarch64_opcode_tables
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bool parse_operands (char *, const aarch64_opcode *);
150 static bool programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bool
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Return value for certain parsers when the parsing fails; those parsers
242 return the information of the parsed result, e.g. register number, on
243 success. */
244 #define PARSE_FAIL -1
245
246 /* This is an invalid condition code that means no conditional field is
247 present. */
248 #define COND_ALWAYS 0x10
249
250 typedef struct
251 {
252 const char *template;
253 uint32_t value;
254 } asm_nzcv;
255
256 struct reloc_entry
257 {
258 char *name;
259 bfd_reloc_code_real_type reloc;
260 };
261
262 /* Macros to define the register types and masks for the purpose
263 of parsing. */
264
265 #undef AARCH64_REG_TYPES
266 #define AARCH64_REG_TYPES \
267 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
268 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
269 BASIC_REG_TYPE(SP_32) /* wsp */ \
270 BASIC_REG_TYPE(SP_64) /* sp */ \
271 BASIC_REG_TYPE(Z_32) /* wzr */ \
272 BASIC_REG_TYPE(Z_64) /* xzr */ \
273 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
274 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
275 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
276 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
277 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
278 BASIC_REG_TYPE(VN) /* v[0-31] */ \
279 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
280 BASIC_REG_TYPE(PN) /* p[0-15] */ \
281 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
282 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
283 /* Typecheck: same, plus SVE registers. */ \
284 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
285 | REG_TYPE(ZN)) \
286 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
287 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
289 /* Typecheck: same, plus SVE registers. */ \
290 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
291 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
292 | REG_TYPE(ZN)) \
293 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
294 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
295 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
296 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
297 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
298 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
299 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
300 /* Typecheck: any [BHSDQ]P FP. */ \
301 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
302 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
303 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
304 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
306 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
307 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
308 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
309 be used for SVE instructions, since Zn and Pn are valid symbols \
310 in other contexts. */ \
311 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
312 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
313 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
314 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
315 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
316 | REG_TYPE(ZN) | REG_TYPE(PN)) \
317 /* Any integer register; used for error messages only. */ \
318 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
319 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
320 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
321 /* Pseudo type to mark the end of the enumerator sequence. */ \
322 BASIC_REG_TYPE(MAX)
323
324 #undef BASIC_REG_TYPE
325 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
326 #undef MULTI_REG_TYPE
327 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
328
329 /* Register type enumerators. */
330 typedef enum aarch64_reg_type_
331 {
332 /* A list of REG_TYPE_*. */
333 AARCH64_REG_TYPES
334 } aarch64_reg_type;
335
336 #undef BASIC_REG_TYPE
337 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
338 #undef REG_TYPE
339 #define REG_TYPE(T) (1 << REG_TYPE_##T)
340 #undef MULTI_REG_TYPE
341 #define MULTI_REG_TYPE(T,V) V,
342
343 /* Structure for a hash table entry for a register. */
344 typedef struct
345 {
346 const char *name;
347 unsigned char number;
348 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
349 unsigned char builtin;
350 } reg_entry;
351
352 /* Values indexed by aarch64_reg_type to assist the type checking. */
353 static const unsigned reg_type_masks[] =
354 {
355 AARCH64_REG_TYPES
356 };
357
358 #undef BASIC_REG_TYPE
359 #undef REG_TYPE
360 #undef MULTI_REG_TYPE
361 #undef AARCH64_REG_TYPES
362
363 /* Diagnostics used when we don't get a register of the expected type.
364 Note: this has to synchronized with aarch64_reg_type definitions
365 above. */
366 static const char *
367 get_reg_expected_msg (aarch64_reg_type reg_type)
368 {
369 const char *msg;
370
371 switch (reg_type)
372 {
373 case REG_TYPE_R_32:
374 msg = N_("integer 32-bit register expected");
375 break;
376 case REG_TYPE_R_64:
377 msg = N_("integer 64-bit register expected");
378 break;
379 case REG_TYPE_R_N:
380 msg = N_("integer register expected");
381 break;
382 case REG_TYPE_R64_SP:
383 msg = N_("64-bit integer or SP register expected");
384 break;
385 case REG_TYPE_SVE_BASE:
386 msg = N_("base register expected");
387 break;
388 case REG_TYPE_R_Z:
389 msg = N_("integer or zero register expected");
390 break;
391 case REG_TYPE_SVE_OFFSET:
392 msg = N_("offset register expected");
393 break;
394 case REG_TYPE_R_SP:
395 msg = N_("integer or SP register expected");
396 break;
397 case REG_TYPE_R_Z_SP:
398 msg = N_("integer, zero or SP register expected");
399 break;
400 case REG_TYPE_FP_B:
401 msg = N_("8-bit SIMD scalar register expected");
402 break;
403 case REG_TYPE_FP_H:
404 msg = N_("16-bit SIMD scalar or floating-point half precision "
405 "register expected");
406 break;
407 case REG_TYPE_FP_S:
408 msg = N_("32-bit SIMD scalar or floating-point single precision "
409 "register expected");
410 break;
411 case REG_TYPE_FP_D:
412 msg = N_("64-bit SIMD scalar or floating-point double precision "
413 "register expected");
414 break;
415 case REG_TYPE_FP_Q:
416 msg = N_("128-bit SIMD scalar or floating-point quad precision "
417 "register expected");
418 break;
419 case REG_TYPE_R_Z_BHSDQ_V:
420 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
421 msg = N_("register expected");
422 break;
423 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
424 msg = N_("SIMD scalar or floating-point register expected");
425 break;
426 case REG_TYPE_VN: /* any V reg */
427 msg = N_("vector register expected");
428 break;
429 case REG_TYPE_ZN:
430 msg = N_("SVE vector register expected");
431 break;
432 case REG_TYPE_PN:
433 msg = N_("SVE predicate register expected");
434 break;
435 default:
436 as_fatal (_("invalid register type %d"), reg_type);
437 }
438 return msg;
439 }
440
441 /* Some well known registers that we refer to directly elsewhere. */
442 #define REG_SP 31
443 #define REG_ZR 31
444
445 /* Instructions take 4 bytes in the object file. */
446 #define INSN_SIZE 4
447
448 static htab_t aarch64_ops_hsh;
449 static htab_t aarch64_cond_hsh;
450 static htab_t aarch64_shift_hsh;
451 static htab_t aarch64_sys_regs_hsh;
452 static htab_t aarch64_pstatefield_hsh;
453 static htab_t aarch64_sys_regs_ic_hsh;
454 static htab_t aarch64_sys_regs_dc_hsh;
455 static htab_t aarch64_sys_regs_at_hsh;
456 static htab_t aarch64_sys_regs_tlbi_hsh;
457 static htab_t aarch64_sys_regs_sr_hsh;
458 static htab_t aarch64_reg_hsh;
459 static htab_t aarch64_barrier_opt_hsh;
460 static htab_t aarch64_nzcv_hsh;
461 static htab_t aarch64_pldop_hsh;
462 static htab_t aarch64_hint_opt_hsh;
463
464 /* Stuff needed to resolve the label ambiguity
465 As:
466 ...
467 label: <insn>
468 may differ from:
469 ...
470 label:
471 <insn> */
472
473 static symbolS *last_label_seen;
474
475 /* Literal pool structure. Held on a per-section
476 and per-sub-section basis. */
477
478 #define MAX_LITERAL_POOL_SIZE 1024
479 typedef struct literal_expression
480 {
481 expressionS exp;
482 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
483 LITTLENUM_TYPE * bignum;
484 } literal_expression;
485
486 typedef struct literal_pool
487 {
488 literal_expression literals[MAX_LITERAL_POOL_SIZE];
489 unsigned int next_free_entry;
490 unsigned int id;
491 symbolS *symbol;
492 segT section;
493 subsegT sub_section;
494 int size;
495 struct literal_pool *next;
496 } literal_pool;
497
498 /* Pointer to a linked list of literal pools. */
499 static literal_pool *list_of_pools = NULL;
500 \f
501 /* Pure syntax. */
502
503 /* This array holds the chars that always start a comment. If the
504 pre-processor is disabled, these aren't very useful. */
505 const char comment_chars[] = "";
506
507 /* This array holds the chars that only start a comment at the beginning of
508 a line. If the line seems to have the form '# 123 filename'
509 .line and .file directives will appear in the pre-processed output. */
510 /* Note that input_file.c hand checks for '#' at the beginning of the
511 first line of the input file. This is because the compiler outputs
512 #NO_APP at the beginning of its output. */
513 /* Also note that comments like this one will always work. */
514 const char line_comment_chars[] = "#";
515
516 const char line_separator_chars[] = ";";
517
518 /* Chars that can be used to separate mant
519 from exp in floating point numbers. */
520 const char EXP_CHARS[] = "eE";
521
522 /* Chars that mean this number is a floating point constant. */
523 /* As in 0f12.456 */
524 /* or 0d1.2345e12 */
525
526 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhH";
527
528 /* Prefix character that indicates the start of an immediate value. */
529 #define is_immediate_prefix(C) ((C) == '#')
530
531 /* Separator character handling. */
532
533 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
534
535 static inline bool
536 skip_past_char (char **str, char c)
537 {
538 if (**str == c)
539 {
540 (*str)++;
541 return true;
542 }
543 else
544 return false;
545 }
546
547 #define skip_past_comma(str) skip_past_char (str, ',')
548
549 /* Arithmetic expressions (possibly involving symbols). */
550
551 static bool in_aarch64_get_expression = false;
552
553 /* Third argument to aarch64_get_expression. */
554 #define GE_NO_PREFIX false
555 #define GE_OPT_PREFIX true
556
557 /* Fourth argument to aarch64_get_expression. */
558 #define ALLOW_ABSENT false
559 #define REJECT_ABSENT true
560
561 /* Fifth argument to aarch64_get_expression. */
562 #define NORMAL_RESOLUTION false
563
564 /* Return TRUE if the string pointed by *STR is successfully parsed
565 as an valid expression; *EP will be filled with the information of
566 such an expression. Otherwise return FALSE.
567
568 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
569 If REJECT_ABSENT is true then trat missing expressions as an error.
570 If DEFER_RESOLUTION is true, then do not resolve expressions against
571 constant symbols. Necessary if the expression is part of a fixup
572 that uses a reloc that must be emitted. */
573
574 static bool
575 aarch64_get_expression (expressionS * ep,
576 char ** str,
577 bool allow_immediate_prefix,
578 bool reject_absent,
579 bool defer_resolution)
580 {
581 char *save_in;
582 segT seg;
583 bool prefix_present = false;
584
585 if (allow_immediate_prefix)
586 {
587 if (is_immediate_prefix (**str))
588 {
589 (*str)++;
590 prefix_present = true;
591 }
592 }
593
594 memset (ep, 0, sizeof (expressionS));
595
596 save_in = input_line_pointer;
597 input_line_pointer = *str;
598 in_aarch64_get_expression = true;
599 if (defer_resolution)
600 seg = deferred_expression (ep);
601 else
602 seg = expression (ep);
603 in_aarch64_get_expression = false;
604
605 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
606 {
607 /* We found a bad expression in md_operand(). */
608 *str = input_line_pointer;
609 input_line_pointer = save_in;
610 if (prefix_present && ! error_p ())
611 set_fatal_syntax_error (_("bad expression"));
612 else
613 set_first_syntax_error (_("bad expression"));
614 return false;
615 }
616
617 #ifdef OBJ_AOUT
618 if (seg != absolute_section
619 && seg != text_section
620 && seg != data_section
621 && seg != bss_section
622 && seg != undefined_section)
623 {
624 set_syntax_error (_("bad segment"));
625 *str = input_line_pointer;
626 input_line_pointer = save_in;
627 return false;
628 }
629 #else
630 (void) seg;
631 #endif
632
633 *str = input_line_pointer;
634 input_line_pointer = save_in;
635 return true;
636 }
637
638 /* Turn a string in input_line_pointer into a floating point constant
639 of type TYPE, and store the appropriate bytes in *LITP. The number
640 of LITTLENUMS emitted is stored in *SIZEP. An error message is
641 returned, or NULL on OK. */
642
643 const char *
644 md_atof (int type, char *litP, int *sizeP)
645 {
646 /* If this is a bfloat16 type, then parse it slightly differently -
647 as it does not follow the IEEE standard exactly. */
648 if (type == 'b')
649 {
650 char * t;
651 LITTLENUM_TYPE words[MAX_LITTLENUMS];
652 FLONUM_TYPE generic_float;
653
654 t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float);
655
656 if (t)
657 input_line_pointer = t;
658 else
659 return _("invalid floating point number");
660
661 switch (generic_float.sign)
662 {
663 /* Is +Inf. */
664 case 'P':
665 words[0] = 0x7f80;
666 break;
667
668 /* Is -Inf. */
669 case 'N':
670 words[0] = 0xff80;
671 break;
672
673 /* Is NaN. */
674 /* bfloat16 has two types of NaN - quiet and signalling.
675 Quiet NaN has bit[6] == 1 && faction != 0, whereas
676 signalling Nan's have bit[0] == 0 && fraction != 0.
677 Chose this specific encoding as it is the same form
678 as used by other IEEE 754 encodings in GAS. */
679 case 0:
680 words[0] = 0x7fff;
681 break;
682
683 default:
684 break;
685 }
686
687 *sizeP = 2;
688
689 md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE));
690
691 return NULL;
692 }
693
694 return ieee_md_atof (type, litP, sizeP, target_big_endian);
695 }
696
697 /* We handle all bad expressions here, so that we can report the faulty
698 instruction in the error message. */
699 void
700 md_operand (expressionS * exp)
701 {
702 if (in_aarch64_get_expression)
703 exp->X_op = O_illegal;
704 }
705
706 /* Immediate values. */
707
708 /* Errors may be set multiple times during parsing or bit encoding
709 (particularly in the Neon bits), but usually the earliest error which is set
710 will be the most meaningful. Avoid overwriting it with later (cascading)
711 errors by calling this function. */
712
713 static void
714 first_error (const char *error)
715 {
716 if (! error_p ())
717 set_syntax_error (error);
718 }
719
720 /* Similar to first_error, but this function accepts formatted error
721 message. */
722 static void
723 first_error_fmt (const char *format, ...)
724 {
725 va_list args;
726 enum
727 { size = 100 };
728 /* N.B. this single buffer will not cause error messages for different
729 instructions to pollute each other; this is because at the end of
730 processing of each assembly line, error message if any will be
731 collected by as_bad. */
732 static char buffer[size];
733
734 if (! error_p ())
735 {
736 int ret ATTRIBUTE_UNUSED;
737 va_start (args, format);
738 ret = vsnprintf (buffer, size, format, args);
739 know (ret <= size - 1 && ret >= 0);
740 va_end (args);
741 set_syntax_error (buffer);
742 }
743 }
744
745 /* Register parsing. */
746
747 /* Generic register parser which is called by other specialized
748 register parsers.
749 CCP points to what should be the beginning of a register name.
750 If it is indeed a valid register name, advance CCP over it and
751 return the reg_entry structure; otherwise return NULL.
752 It does not issue diagnostics. */
753
754 static reg_entry *
755 parse_reg (char **ccp)
756 {
757 char *start = *ccp;
758 char *p;
759 reg_entry *reg;
760
761 #ifdef REGISTER_PREFIX
762 if (*start != REGISTER_PREFIX)
763 return NULL;
764 start++;
765 #endif
766
767 p = start;
768 if (!ISALPHA (*p) || !is_name_beginner (*p))
769 return NULL;
770
771 do
772 p++;
773 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
774
775 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
776
777 if (!reg)
778 return NULL;
779
780 *ccp = p;
781 return reg;
782 }
783
784 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
785 return FALSE. */
786 static bool
787 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
788 {
789 return (reg_type_masks[type] & (1 << reg->type)) != 0;
790 }
791
792 /* Try to parse a base or offset register. Allow SVE base and offset
793 registers if REG_TYPE includes SVE registers. Return the register
794 entry on success, setting *QUALIFIER to the register qualifier.
795 Return null otherwise.
796
797 Note that this function does not issue any diagnostics. */
798
799 static const reg_entry *
800 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
801 aarch64_opnd_qualifier_t *qualifier)
802 {
803 char *str = *ccp;
804 const reg_entry *reg = parse_reg (&str);
805
806 if (reg == NULL)
807 return NULL;
808
809 switch (reg->type)
810 {
811 case REG_TYPE_R_32:
812 case REG_TYPE_SP_32:
813 case REG_TYPE_Z_32:
814 *qualifier = AARCH64_OPND_QLF_W;
815 break;
816
817 case REG_TYPE_R_64:
818 case REG_TYPE_SP_64:
819 case REG_TYPE_Z_64:
820 *qualifier = AARCH64_OPND_QLF_X;
821 break;
822
823 case REG_TYPE_ZN:
824 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
825 || str[0] != '.')
826 return NULL;
827 switch (TOLOWER (str[1]))
828 {
829 case 's':
830 *qualifier = AARCH64_OPND_QLF_S_S;
831 break;
832 case 'd':
833 *qualifier = AARCH64_OPND_QLF_S_D;
834 break;
835 default:
836 return NULL;
837 }
838 str += 2;
839 break;
840
841 default:
842 return NULL;
843 }
844
845 *ccp = str;
846
847 return reg;
848 }
849
850 /* Try to parse a base or offset register. Return the register entry
851 on success, setting *QUALIFIER to the register qualifier. Return null
852 otherwise.
853
854 Note that this function does not issue any diagnostics. */
855
856 static const reg_entry *
857 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
858 {
859 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
860 }
861
862 /* Parse the qualifier of a vector register or vector element of type
863 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
864 succeeds; otherwise return FALSE.
865
866 Accept only one occurrence of:
867 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
868 b h s d q */
869 static bool
870 parse_vector_type_for_operand (aarch64_reg_type reg_type,
871 struct vector_type_el *parsed_type, char **str)
872 {
873 char *ptr = *str;
874 unsigned width;
875 unsigned element_size;
876 enum vector_el_type type;
877
878 /* skip '.' */
879 gas_assert (*ptr == '.');
880 ptr++;
881
882 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
883 {
884 width = 0;
885 goto elt_size;
886 }
887 width = strtoul (ptr, &ptr, 10);
888 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
889 {
890 first_error_fmt (_("bad size %d in vector width specifier"), width);
891 return false;
892 }
893
894 elt_size:
895 switch (TOLOWER (*ptr))
896 {
897 case 'b':
898 type = NT_b;
899 element_size = 8;
900 break;
901 case 'h':
902 type = NT_h;
903 element_size = 16;
904 break;
905 case 's':
906 type = NT_s;
907 element_size = 32;
908 break;
909 case 'd':
910 type = NT_d;
911 element_size = 64;
912 break;
913 case 'q':
914 if (reg_type == REG_TYPE_ZN || width == 1)
915 {
916 type = NT_q;
917 element_size = 128;
918 break;
919 }
920 /* fall through. */
921 default:
922 if (*ptr != '\0')
923 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
924 else
925 first_error (_("missing element size"));
926 return false;
927 }
928 if (width != 0 && width * element_size != 64
929 && width * element_size != 128
930 && !(width == 2 && element_size == 16)
931 && !(width == 4 && element_size == 8))
932 {
933 first_error_fmt (_
934 ("invalid element size %d and vector size combination %c"),
935 width, *ptr);
936 return false;
937 }
938 ptr++;
939
940 parsed_type->type = type;
941 parsed_type->width = width;
942
943 *str = ptr;
944
945 return true;
946 }
947
948 /* *STR contains an SVE zero/merge predication suffix. Parse it into
949 *PARSED_TYPE and point *STR at the end of the suffix. */
950
951 static bool
952 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
953 {
954 char *ptr = *str;
955
956 /* Skip '/'. */
957 gas_assert (*ptr == '/');
958 ptr++;
959 switch (TOLOWER (*ptr))
960 {
961 case 'z':
962 parsed_type->type = NT_zero;
963 break;
964 case 'm':
965 parsed_type->type = NT_merge;
966 break;
967 default:
968 if (*ptr != '\0' && *ptr != ',')
969 first_error_fmt (_("unexpected character `%c' in predication type"),
970 *ptr);
971 else
972 first_error (_("missing predication type"));
973 return false;
974 }
975 parsed_type->width = 0;
976 *str = ptr + 1;
977 return true;
978 }
979
980 /* Parse a register of the type TYPE.
981
982 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
983 name or the parsed register is not of TYPE.
984
985 Otherwise return the register number, and optionally fill in the actual
986 type of the register in *RTYPE when multiple alternatives were given, and
987 return the register shape and element index information in *TYPEINFO.
988
989 IN_REG_LIST should be set with TRUE if the caller is parsing a register
990 list. */
991
992 static int
993 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
994 struct vector_type_el *typeinfo, bool in_reg_list)
995 {
996 char *str = *ccp;
997 const reg_entry *reg = parse_reg (&str);
998 struct vector_type_el atype;
999 struct vector_type_el parsetype;
1000 bool is_typed_vecreg = false;
1001
1002 atype.defined = 0;
1003 atype.type = NT_invtype;
1004 atype.width = -1;
1005 atype.index = 0;
1006
1007 if (reg == NULL)
1008 {
1009 if (typeinfo)
1010 *typeinfo = atype;
1011 set_default_error ();
1012 return PARSE_FAIL;
1013 }
1014
1015 if (! aarch64_check_reg_type (reg, type))
1016 {
1017 DEBUG_TRACE ("reg type check failed");
1018 set_default_error ();
1019 return PARSE_FAIL;
1020 }
1021 type = reg->type;
1022
1023 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
1024 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
1025 {
1026 if (*str == '.')
1027 {
1028 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1029 return PARSE_FAIL;
1030 }
1031 else
1032 {
1033 if (!parse_predication_for_operand (&parsetype, &str))
1034 return PARSE_FAIL;
1035 }
1036
1037 /* Register if of the form Vn.[bhsdq]. */
1038 is_typed_vecreg = true;
1039
1040 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1041 {
1042 /* The width is always variable; we don't allow an integer width
1043 to be specified. */
1044 gas_assert (parsetype.width == 0);
1045 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1046 }
1047 else if (parsetype.width == 0)
1048 /* Expect index. In the new scheme we cannot have
1049 Vn.[bhsdq] represent a scalar. Therefore any
1050 Vn.[bhsdq] should have an index following it.
1051 Except in reglists of course. */
1052 atype.defined |= NTA_HASINDEX;
1053 else
1054 atype.defined |= NTA_HASTYPE;
1055
1056 atype.type = parsetype.type;
1057 atype.width = parsetype.width;
1058 }
1059
1060 if (skip_past_char (&str, '['))
1061 {
1062 expressionS exp;
1063
1064 /* Reject Sn[index] syntax. */
1065 if (!is_typed_vecreg)
1066 {
1067 first_error (_("this type of register can't be indexed"));
1068 return PARSE_FAIL;
1069 }
1070
1071 if (in_reg_list)
1072 {
1073 first_error (_("index not allowed inside register list"));
1074 return PARSE_FAIL;
1075 }
1076
1077 atype.defined |= NTA_HASINDEX;
1078
1079 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1080 NORMAL_RESOLUTION);
1081
1082 if (exp.X_op != O_constant)
1083 {
1084 first_error (_("constant expression required"));
1085 return PARSE_FAIL;
1086 }
1087
1088 if (! skip_past_char (&str, ']'))
1089 return PARSE_FAIL;
1090
1091 atype.index = exp.X_add_number;
1092 }
1093 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1094 {
1095 /* Indexed vector register expected. */
1096 first_error (_("indexed vector register expected"));
1097 return PARSE_FAIL;
1098 }
1099
1100 /* A vector reg Vn should be typed or indexed. */
1101 if (type == REG_TYPE_VN && atype.defined == 0)
1102 {
1103 first_error (_("invalid use of vector register"));
1104 }
1105
1106 if (typeinfo)
1107 *typeinfo = atype;
1108
1109 if (rtype)
1110 *rtype = type;
1111
1112 *ccp = str;
1113
1114 return reg->number;
1115 }
1116
1117 /* Parse register.
1118
1119 Return the register number on success; return PARSE_FAIL otherwise.
1120
1121 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1122 the register (e.g. NEON double or quad reg when either has been requested).
1123
1124 If this is a NEON vector register with additional type information, fill
1125 in the struct pointed to by VECTYPE (if non-NULL).
1126
1127 This parser does not handle register list. */
1128
1129 static int
1130 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1131 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1132 {
1133 struct vector_type_el atype;
1134 char *str = *ccp;
1135 int reg = parse_typed_reg (&str, type, rtype, &atype,
1136 /*in_reg_list= */ false);
1137
1138 if (reg == PARSE_FAIL)
1139 return PARSE_FAIL;
1140
1141 if (vectype)
1142 *vectype = atype;
1143
1144 *ccp = str;
1145
1146 return reg;
1147 }
1148
1149 static inline bool
1150 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1151 {
1152 return
1153 e1.type == e2.type
1154 && e1.defined == e2.defined
1155 && e1.width == e2.width && e1.index == e2.index;
1156 }
1157
1158 /* This function parses a list of vector registers of type TYPE.
1159 On success, it returns the parsed register list information in the
1160 following encoded format:
1161
1162 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1163 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1164
1165 The information of the register shape and/or index is returned in
1166 *VECTYPE.
1167
1168 It returns PARSE_FAIL if the register list is invalid.
1169
1170 The list contains one to four registers.
1171 Each register can be one of:
1172 <Vt>.<T>[<index>]
1173 <Vt>.<T>
1174 All <T> should be identical.
1175 All <index> should be identical.
1176 There are restrictions on <Vt> numbers which are checked later
1177 (by reg_list_valid_p). */
1178
1179 static int
1180 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1181 struct vector_type_el *vectype)
1182 {
1183 char *str = *ccp;
1184 int nb_regs;
1185 struct vector_type_el typeinfo, typeinfo_first;
1186 int val, val_range;
1187 int in_range;
1188 int ret_val;
1189 int i;
1190 bool error = false;
1191 bool expect_index = false;
1192
1193 if (*str != '{')
1194 {
1195 set_syntax_error (_("expecting {"));
1196 return PARSE_FAIL;
1197 }
1198 str++;
1199
1200 nb_regs = 0;
1201 typeinfo_first.defined = 0;
1202 typeinfo_first.type = NT_invtype;
1203 typeinfo_first.width = -1;
1204 typeinfo_first.index = 0;
1205 ret_val = 0;
1206 val = -1;
1207 val_range = -1;
1208 in_range = 0;
1209 do
1210 {
1211 if (in_range)
1212 {
1213 str++; /* skip over '-' */
1214 val_range = val;
1215 }
1216 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1217 /*in_reg_list= */ true);
1218 if (val == PARSE_FAIL)
1219 {
1220 set_first_syntax_error (_("invalid vector register in list"));
1221 error = true;
1222 continue;
1223 }
1224 /* reject [bhsd]n */
1225 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1226 {
1227 set_first_syntax_error (_("invalid scalar register in list"));
1228 error = true;
1229 continue;
1230 }
1231
1232 if (typeinfo.defined & NTA_HASINDEX)
1233 expect_index = true;
1234
1235 if (in_range)
1236 {
1237 if (val < val_range)
1238 {
1239 set_first_syntax_error
1240 (_("invalid range in vector register list"));
1241 error = true;
1242 }
1243 val_range++;
1244 }
1245 else
1246 {
1247 val_range = val;
1248 if (nb_regs == 0)
1249 typeinfo_first = typeinfo;
1250 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1251 {
1252 set_first_syntax_error
1253 (_("type mismatch in vector register list"));
1254 error = true;
1255 }
1256 }
1257 if (! error)
1258 for (i = val_range; i <= val; i++)
1259 {
1260 ret_val |= i << (5 * nb_regs);
1261 nb_regs++;
1262 }
1263 in_range = 0;
1264 }
1265 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1266
1267 skip_whitespace (str);
1268 if (*str != '}')
1269 {
1270 set_first_syntax_error (_("end of vector register list not found"));
1271 error = true;
1272 }
1273 str++;
1274
1275 skip_whitespace (str);
1276
1277 if (expect_index)
1278 {
1279 if (skip_past_char (&str, '['))
1280 {
1281 expressionS exp;
1282
1283 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1284 NORMAL_RESOLUTION);
1285 if (exp.X_op != O_constant)
1286 {
1287 set_first_syntax_error (_("constant expression required."));
1288 error = true;
1289 }
1290 if (! skip_past_char (&str, ']'))
1291 error = true;
1292 else
1293 typeinfo_first.index = exp.X_add_number;
1294 }
1295 else
1296 {
1297 set_first_syntax_error (_("expected index"));
1298 error = true;
1299 }
1300 }
1301
1302 if (nb_regs > 4)
1303 {
1304 set_first_syntax_error (_("too many registers in vector register list"));
1305 error = true;
1306 }
1307 else if (nb_regs == 0)
1308 {
1309 set_first_syntax_error (_("empty vector register list"));
1310 error = true;
1311 }
1312
1313 *ccp = str;
1314 if (! error)
1315 *vectype = typeinfo_first;
1316
1317 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1318 }
1319
1320 /* Directives: register aliases. */
1321
1322 static reg_entry *
1323 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1324 {
1325 reg_entry *new;
1326 const char *name;
1327
1328 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1329 {
1330 if (new->builtin)
1331 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1332 str);
1333
1334 /* Only warn about a redefinition if it's not defined as the
1335 same register. */
1336 else if (new->number != number || new->type != type)
1337 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1338
1339 return NULL;
1340 }
1341
1342 name = xstrdup (str);
1343 new = XNEW (reg_entry);
1344
1345 new->name = name;
1346 new->number = number;
1347 new->type = type;
1348 new->builtin = false;
1349
1350 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1351
1352 return new;
1353 }
1354
1355 /* Look for the .req directive. This is of the form:
1356
1357 new_register_name .req existing_register_name
1358
1359 If we find one, or if it looks sufficiently like one that we want to
1360 handle any error here, return TRUE. Otherwise return FALSE. */
1361
1362 static bool
1363 create_register_alias (char *newname, char *p)
1364 {
1365 const reg_entry *old;
1366 char *oldname, *nbuf;
1367 size_t nlen;
1368
1369 /* The input scrubber ensures that whitespace after the mnemonic is
1370 collapsed to single spaces. */
1371 oldname = p;
1372 if (!startswith (oldname, " .req "))
1373 return false;
1374
1375 oldname += 6;
1376 if (*oldname == '\0')
1377 return false;
1378
1379 old = str_hash_find (aarch64_reg_hsh, oldname);
1380 if (!old)
1381 {
1382 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1383 return true;
1384 }
1385
1386 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1387 the desired alias name, and p points to its end. If not, then
1388 the desired alias name is in the global original_case_string. */
1389 #ifdef TC_CASE_SENSITIVE
1390 nlen = p - newname;
1391 #else
1392 newname = original_case_string;
1393 nlen = strlen (newname);
1394 #endif
1395
1396 nbuf = xmemdup0 (newname, nlen);
1397
1398 /* Create aliases under the new name as stated; an all-lowercase
1399 version of the new name; and an all-uppercase version of the new
1400 name. */
1401 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1402 {
1403 for (p = nbuf; *p; p++)
1404 *p = TOUPPER (*p);
1405
1406 if (strncmp (nbuf, newname, nlen))
1407 {
1408 /* If this attempt to create an additional alias fails, do not bother
1409 trying to create the all-lower case alias. We will fail and issue
1410 a second, duplicate error message. This situation arises when the
1411 programmer does something like:
1412 foo .req r0
1413 Foo .req r1
1414 The second .req creates the "Foo" alias but then fails to create
1415 the artificial FOO alias because it has already been created by the
1416 first .req. */
1417 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1418 {
1419 free (nbuf);
1420 return true;
1421 }
1422 }
1423
1424 for (p = nbuf; *p; p++)
1425 *p = TOLOWER (*p);
1426
1427 if (strncmp (nbuf, newname, nlen))
1428 insert_reg_alias (nbuf, old->number, old->type);
1429 }
1430
1431 free (nbuf);
1432 return true;
1433 }
1434
1435 /* Should never be called, as .req goes between the alias and the
1436 register name, not at the beginning of the line. */
1437 static void
1438 s_req (int a ATTRIBUTE_UNUSED)
1439 {
1440 as_bad (_("invalid syntax for .req directive"));
1441 }
1442
1443 /* The .unreq directive deletes an alias which was previously defined
1444 by .req. For example:
1445
1446 my_alias .req r11
1447 .unreq my_alias */
1448
1449 static void
1450 s_unreq (int a ATTRIBUTE_UNUSED)
1451 {
1452 char *name;
1453 char saved_char;
1454
1455 name = input_line_pointer;
1456
1457 while (*input_line_pointer != 0
1458 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1459 ++input_line_pointer;
1460
1461 saved_char = *input_line_pointer;
1462 *input_line_pointer = 0;
1463
1464 if (!*name)
1465 as_bad (_("invalid syntax for .unreq directive"));
1466 else
1467 {
1468 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1469
1470 if (!reg)
1471 as_bad (_("unknown register alias '%s'"), name);
1472 else if (reg->builtin)
1473 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1474 name);
1475 else
1476 {
1477 char *p;
1478 char *nbuf;
1479
1480 str_hash_delete (aarch64_reg_hsh, name);
1481 free ((char *) reg->name);
1482 free (reg);
1483
1484 /* Also locate the all upper case and all lower case versions.
1485 Do not complain if we cannot find one or the other as it
1486 was probably deleted above. */
1487
1488 nbuf = strdup (name);
1489 for (p = nbuf; *p; p++)
1490 *p = TOUPPER (*p);
1491 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1492 if (reg)
1493 {
1494 str_hash_delete (aarch64_reg_hsh, nbuf);
1495 free ((char *) reg->name);
1496 free (reg);
1497 }
1498
1499 for (p = nbuf; *p; p++)
1500 *p = TOLOWER (*p);
1501 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1502 if (reg)
1503 {
1504 str_hash_delete (aarch64_reg_hsh, nbuf);
1505 free ((char *) reg->name);
1506 free (reg);
1507 }
1508
1509 free (nbuf);
1510 }
1511 }
1512
1513 *input_line_pointer = saved_char;
1514 demand_empty_rest_of_line ();
1515 }
1516
1517 /* Directives: Instruction set selection. */
1518
1519 #ifdef OBJ_ELF
1520 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1521 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1522 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1523 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1524
1525 /* Create a new mapping symbol for the transition to STATE. */
1526
1527 static void
1528 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1529 {
1530 symbolS *symbolP;
1531 const char *symname;
1532 int type;
1533
1534 switch (state)
1535 {
1536 case MAP_DATA:
1537 symname = "$d";
1538 type = BSF_NO_FLAGS;
1539 break;
1540 case MAP_INSN:
1541 symname = "$x";
1542 type = BSF_NO_FLAGS;
1543 break;
1544 default:
1545 abort ();
1546 }
1547
1548 symbolP = symbol_new (symname, now_seg, frag, value);
1549 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1550
1551 /* Save the mapping symbols for future reference. Also check that
1552 we do not place two mapping symbols at the same offset within a
1553 frag. We'll handle overlap between frags in
1554 check_mapping_symbols.
1555
1556 If .fill or other data filling directive generates zero sized data,
1557 the mapping symbol for the following code will have the same value
1558 as the one generated for the data filling directive. In this case,
1559 we replace the old symbol with the new one at the same address. */
1560 if (value == 0)
1561 {
1562 if (frag->tc_frag_data.first_map != NULL)
1563 {
1564 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1565 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1566 &symbol_lastP);
1567 }
1568 frag->tc_frag_data.first_map = symbolP;
1569 }
1570 if (frag->tc_frag_data.last_map != NULL)
1571 {
1572 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1573 S_GET_VALUE (symbolP));
1574 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1575 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1576 &symbol_lastP);
1577 }
1578 frag->tc_frag_data.last_map = symbolP;
1579 }
1580
1581 /* We must sometimes convert a region marked as code to data during
1582 code alignment, if an odd number of bytes have to be padded. The
1583 code mapping symbol is pushed to an aligned address. */
1584
1585 static void
1586 insert_data_mapping_symbol (enum mstate state,
1587 valueT value, fragS * frag, offsetT bytes)
1588 {
1589 /* If there was already a mapping symbol, remove it. */
1590 if (frag->tc_frag_data.last_map != NULL
1591 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1592 frag->fr_address + value)
1593 {
1594 symbolS *symp = frag->tc_frag_data.last_map;
1595
1596 if (value == 0)
1597 {
1598 know (frag->tc_frag_data.first_map == symp);
1599 frag->tc_frag_data.first_map = NULL;
1600 }
1601 frag->tc_frag_data.last_map = NULL;
1602 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1603 }
1604
1605 make_mapping_symbol (MAP_DATA, value, frag);
1606 make_mapping_symbol (state, value + bytes, frag);
1607 }
1608
1609 static void mapping_state_2 (enum mstate state, int max_chars);
1610
1611 /* Set the mapping state to STATE. Only call this when about to
1612 emit some STATE bytes to the file. */
1613
1614 void
1615 mapping_state (enum mstate state)
1616 {
1617 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1618
1619 if (state == MAP_INSN)
1620 /* AArch64 instructions require 4-byte alignment. When emitting
1621 instructions into any section, record the appropriate section
1622 alignment. */
1623 record_alignment (now_seg, 2);
1624
1625 if (mapstate == state)
1626 /* The mapping symbol has already been emitted.
1627 There is nothing else to do. */
1628 return;
1629
1630 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1631 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1632 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1633 evaluated later in the next else. */
1634 return;
1635 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1636 {
1637 /* Only add the symbol if the offset is > 0:
1638 if we're at the first frag, check it's size > 0;
1639 if we're not at the first frag, then for sure
1640 the offset is > 0. */
1641 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1642 const int add_symbol = (frag_now != frag_first)
1643 || (frag_now_fix () > 0);
1644
1645 if (add_symbol)
1646 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1647 }
1648 #undef TRANSITION
1649
1650 mapping_state_2 (state, 0);
1651 }
1652
1653 /* Same as mapping_state, but MAX_CHARS bytes have already been
1654 allocated. Put the mapping symbol that far back. */
1655
1656 static void
1657 mapping_state_2 (enum mstate state, int max_chars)
1658 {
1659 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1660
1661 if (!SEG_NORMAL (now_seg))
1662 return;
1663
1664 if (mapstate == state)
1665 /* The mapping symbol has already been emitted.
1666 There is nothing else to do. */
1667 return;
1668
1669 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1670 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1671 }
1672 #else
1673 #define mapping_state(x) /* nothing */
1674 #define mapping_state_2(x, y) /* nothing */
1675 #endif
1676
1677 /* Directives: sectioning and alignment. */
1678
1679 static void
1680 s_bss (int ignore ATTRIBUTE_UNUSED)
1681 {
1682 /* We don't support putting frags in the BSS segment, we fake it by
1683 marking in_bss, then looking at s_skip for clues. */
1684 subseg_set (bss_section, 0);
1685 demand_empty_rest_of_line ();
1686 mapping_state (MAP_DATA);
1687 }
1688
1689 static void
1690 s_even (int ignore ATTRIBUTE_UNUSED)
1691 {
1692 /* Never make frag if expect extra pass. */
1693 if (!need_pass_2)
1694 frag_align (1, 0, 0);
1695
1696 record_alignment (now_seg, 1);
1697
1698 demand_empty_rest_of_line ();
1699 }
1700
1701 /* Directives: Literal pools. */
1702
1703 static literal_pool *
1704 find_literal_pool (int size)
1705 {
1706 literal_pool *pool;
1707
1708 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1709 {
1710 if (pool->section == now_seg
1711 && pool->sub_section == now_subseg && pool->size == size)
1712 break;
1713 }
1714
1715 return pool;
1716 }
1717
1718 static literal_pool *
1719 find_or_make_literal_pool (int size)
1720 {
1721 /* Next literal pool ID number. */
1722 static unsigned int latest_pool_num = 1;
1723 literal_pool *pool;
1724
1725 pool = find_literal_pool (size);
1726
1727 if (pool == NULL)
1728 {
1729 /* Create a new pool. */
1730 pool = XNEW (literal_pool);
1731 if (!pool)
1732 return NULL;
1733
1734 /* Currently we always put the literal pool in the current text
1735 section. If we were generating "small" model code where we
1736 knew that all code and initialised data was within 1MB then
1737 we could output literals to mergeable, read-only data
1738 sections. */
1739
1740 pool->next_free_entry = 0;
1741 pool->section = now_seg;
1742 pool->sub_section = now_subseg;
1743 pool->size = size;
1744 pool->next = list_of_pools;
1745 pool->symbol = NULL;
1746
1747 /* Add it to the list. */
1748 list_of_pools = pool;
1749 }
1750
1751 /* New pools, and emptied pools, will have a NULL symbol. */
1752 if (pool->symbol == NULL)
1753 {
1754 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1755 &zero_address_frag, 0);
1756 pool->id = latest_pool_num++;
1757 }
1758
1759 /* Done. */
1760 return pool;
1761 }
1762
1763 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1764 Return TRUE on success, otherwise return FALSE. */
1765 static bool
1766 add_to_lit_pool (expressionS *exp, int size)
1767 {
1768 literal_pool *pool;
1769 unsigned int entry;
1770
1771 pool = find_or_make_literal_pool (size);
1772
1773 /* Check if this literal value is already in the pool. */
1774 for (entry = 0; entry < pool->next_free_entry; entry++)
1775 {
1776 expressionS * litexp = & pool->literals[entry].exp;
1777
1778 if ((litexp->X_op == exp->X_op)
1779 && (exp->X_op == O_constant)
1780 && (litexp->X_add_number == exp->X_add_number)
1781 && (litexp->X_unsigned == exp->X_unsigned))
1782 break;
1783
1784 if ((litexp->X_op == exp->X_op)
1785 && (exp->X_op == O_symbol)
1786 && (litexp->X_add_number == exp->X_add_number)
1787 && (litexp->X_add_symbol == exp->X_add_symbol)
1788 && (litexp->X_op_symbol == exp->X_op_symbol))
1789 break;
1790 }
1791
1792 /* Do we need to create a new entry? */
1793 if (entry == pool->next_free_entry)
1794 {
1795 if (entry >= MAX_LITERAL_POOL_SIZE)
1796 {
1797 set_syntax_error (_("literal pool overflow"));
1798 return false;
1799 }
1800
1801 pool->literals[entry].exp = *exp;
1802 pool->next_free_entry += 1;
1803 if (exp->X_op == O_big)
1804 {
1805 /* PR 16688: Bignums are held in a single global array. We must
1806 copy and preserve that value now, before it is overwritten. */
1807 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1808 exp->X_add_number);
1809 memcpy (pool->literals[entry].bignum, generic_bignum,
1810 CHARS_PER_LITTLENUM * exp->X_add_number);
1811 }
1812 else
1813 pool->literals[entry].bignum = NULL;
1814 }
1815
1816 exp->X_op = O_symbol;
1817 exp->X_add_number = ((int) entry) * size;
1818 exp->X_add_symbol = pool->symbol;
1819
1820 return true;
1821 }
1822
1823 /* Can't use symbol_new here, so have to create a symbol and then at
1824 a later date assign it a value. That's what these functions do. */
1825
1826 static void
1827 symbol_locate (symbolS * symbolP,
1828 const char *name,/* It is copied, the caller can modify. */
1829 segT segment, /* Segment identifier (SEG_<something>). */
1830 valueT valu, /* Symbol value. */
1831 fragS * frag) /* Associated fragment. */
1832 {
1833 size_t name_length;
1834 char *preserved_copy_of_name;
1835
1836 name_length = strlen (name) + 1; /* +1 for \0. */
1837 obstack_grow (&notes, name, name_length);
1838 preserved_copy_of_name = obstack_finish (&notes);
1839
1840 #ifdef tc_canonicalize_symbol_name
1841 preserved_copy_of_name =
1842 tc_canonicalize_symbol_name (preserved_copy_of_name);
1843 #endif
1844
1845 S_SET_NAME (symbolP, preserved_copy_of_name);
1846
1847 S_SET_SEGMENT (symbolP, segment);
1848 S_SET_VALUE (symbolP, valu);
1849 symbol_clear_list_pointers (symbolP);
1850
1851 symbol_set_frag (symbolP, frag);
1852
1853 /* Link to end of symbol chain. */
1854 {
1855 extern int symbol_table_frozen;
1856
1857 if (symbol_table_frozen)
1858 abort ();
1859 }
1860
1861 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1862
1863 obj_symbol_new_hook (symbolP);
1864
1865 #ifdef tc_symbol_new_hook
1866 tc_symbol_new_hook (symbolP);
1867 #endif
1868
1869 #ifdef DEBUG_SYMS
1870 verify_symbol_chain (symbol_rootP, symbol_lastP);
1871 #endif /* DEBUG_SYMS */
1872 }
1873
1874
1875 static void
1876 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1877 {
1878 unsigned int entry;
1879 literal_pool *pool;
1880 char sym_name[20];
1881 int align;
1882
1883 for (align = 2; align <= 4; align++)
1884 {
1885 int size = 1 << align;
1886
1887 pool = find_literal_pool (size);
1888 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1889 continue;
1890
1891 /* Align pool as you have word accesses.
1892 Only make a frag if we have to. */
1893 if (!need_pass_2)
1894 frag_align (align, 0, 0);
1895
1896 mapping_state (MAP_DATA);
1897
1898 record_alignment (now_seg, align);
1899
1900 sprintf (sym_name, "$$lit_\002%x", pool->id);
1901
1902 symbol_locate (pool->symbol, sym_name, now_seg,
1903 (valueT) frag_now_fix (), frag_now);
1904 symbol_table_insert (pool->symbol);
1905
1906 for (entry = 0; entry < pool->next_free_entry; entry++)
1907 {
1908 expressionS * exp = & pool->literals[entry].exp;
1909
1910 if (exp->X_op == O_big)
1911 {
1912 /* PR 16688: Restore the global bignum value. */
1913 gas_assert (pool->literals[entry].bignum != NULL);
1914 memcpy (generic_bignum, pool->literals[entry].bignum,
1915 CHARS_PER_LITTLENUM * exp->X_add_number);
1916 }
1917
1918 /* First output the expression in the instruction to the pool. */
1919 emit_expr (exp, size); /* .word|.xword */
1920
1921 if (exp->X_op == O_big)
1922 {
1923 free (pool->literals[entry].bignum);
1924 pool->literals[entry].bignum = NULL;
1925 }
1926 }
1927
1928 /* Mark the pool as empty. */
1929 pool->next_free_entry = 0;
1930 pool->symbol = NULL;
1931 }
1932 }
1933
1934 #ifdef OBJ_ELF
1935 /* Forward declarations for functions below, in the MD interface
1936 section. */
1937 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1938 static struct reloc_table_entry * find_reloc_table_entry (char **);
1939
1940 /* Directives: Data. */
1941 /* N.B. the support for relocation suffix in this directive needs to be
1942 implemented properly. */
1943
1944 static void
1945 s_aarch64_elf_cons (int nbytes)
1946 {
1947 expressionS exp;
1948
1949 #ifdef md_flush_pending_output
1950 md_flush_pending_output ();
1951 #endif
1952
1953 if (is_it_end_of_statement ())
1954 {
1955 demand_empty_rest_of_line ();
1956 return;
1957 }
1958
1959 #ifdef md_cons_align
1960 md_cons_align (nbytes);
1961 #endif
1962
1963 mapping_state (MAP_DATA);
1964 do
1965 {
1966 struct reloc_table_entry *reloc;
1967
1968 expression (&exp);
1969
1970 if (exp.X_op != O_symbol)
1971 emit_expr (&exp, (unsigned int) nbytes);
1972 else
1973 {
1974 skip_past_char (&input_line_pointer, '#');
1975 if (skip_past_char (&input_line_pointer, ':'))
1976 {
1977 reloc = find_reloc_table_entry (&input_line_pointer);
1978 if (reloc == NULL)
1979 as_bad (_("unrecognized relocation suffix"));
1980 else
1981 as_bad (_("unimplemented relocation suffix"));
1982 ignore_rest_of_line ();
1983 return;
1984 }
1985 else
1986 emit_expr (&exp, (unsigned int) nbytes);
1987 }
1988 }
1989 while (*input_line_pointer++ == ',');
1990
1991 /* Put terminator back into stream. */
1992 input_line_pointer--;
1993 demand_empty_rest_of_line ();
1994 }
1995
1996 /* Mark symbol that it follows a variant PCS convention. */
1997
1998 static void
1999 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2000 {
2001 char *name;
2002 char c;
2003 symbolS *sym;
2004 asymbol *bfdsym;
2005 elf_symbol_type *elfsym;
2006
2007 c = get_symbol_name (&name);
2008 if (!*name)
2009 as_bad (_("Missing symbol name in directive"));
2010 sym = symbol_find_or_make (name);
2011 restore_line_pointer (c);
2012 demand_empty_rest_of_line ();
2013 bfdsym = symbol_get_bfdsym (sym);
2014 elfsym = elf_symbol_from (bfdsym);
2015 gas_assert (elfsym);
2016 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2017 }
2018 #endif /* OBJ_ELF */
2019
2020 /* Output a 32-bit word, but mark as an instruction. */
2021
2022 static void
2023 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2024 {
2025 expressionS exp;
2026
2027 #ifdef md_flush_pending_output
2028 md_flush_pending_output ();
2029 #endif
2030
2031 if (is_it_end_of_statement ())
2032 {
2033 demand_empty_rest_of_line ();
2034 return;
2035 }
2036
2037 /* Sections are assumed to start aligned. In executable section, there is no
2038 MAP_DATA symbol pending. So we only align the address during
2039 MAP_DATA --> MAP_INSN transition.
2040 For other sections, this is not guaranteed. */
2041 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2042 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2043 frag_align_code (2, 0);
2044
2045 #ifdef OBJ_ELF
2046 mapping_state (MAP_INSN);
2047 #endif
2048
2049 do
2050 {
2051 expression (&exp);
2052 if (exp.X_op != O_constant)
2053 {
2054 as_bad (_("constant expression required"));
2055 ignore_rest_of_line ();
2056 return;
2057 }
2058
2059 if (target_big_endian)
2060 {
2061 unsigned int val = exp.X_add_number;
2062 exp.X_add_number = SWAP_32 (val);
2063 }
2064 emit_expr (&exp, 4);
2065 }
2066 while (*input_line_pointer++ == ',');
2067
2068 /* Put terminator back into stream. */
2069 input_line_pointer--;
2070 demand_empty_rest_of_line ();
2071 }
2072
2073 static void
2074 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2075 {
2076 demand_empty_rest_of_line ();
2077 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2078 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2079 }
2080
2081 #ifdef OBJ_ELF
2082 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2083
2084 static void
2085 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2086 {
2087 expressionS exp;
2088
2089 expression (&exp);
2090 frag_grow (4);
2091 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2092 BFD_RELOC_AARCH64_TLSDESC_ADD);
2093
2094 demand_empty_rest_of_line ();
2095 }
2096
2097 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2098
2099 static void
2100 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2101 {
2102 expressionS exp;
2103
2104 /* Since we're just labelling the code, there's no need to define a
2105 mapping symbol. */
2106 expression (&exp);
2107 /* Make sure there is enough room in this frag for the following
2108 blr. This trick only works if the blr follows immediately after
2109 the .tlsdesc directive. */
2110 frag_grow (4);
2111 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2112 BFD_RELOC_AARCH64_TLSDESC_CALL);
2113
2114 demand_empty_rest_of_line ();
2115 }
2116
2117 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2118
2119 static void
2120 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2121 {
2122 expressionS exp;
2123
2124 expression (&exp);
2125 frag_grow (4);
2126 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2127 BFD_RELOC_AARCH64_TLSDESC_LDR);
2128
2129 demand_empty_rest_of_line ();
2130 }
2131 #endif /* OBJ_ELF */
2132
2133 static void s_aarch64_arch (int);
2134 static void s_aarch64_cpu (int);
2135 static void s_aarch64_arch_extension (int);
2136
2137 /* This table describes all the machine specific pseudo-ops the assembler
2138 has to support. The fields are:
2139 pseudo-op name without dot
2140 function to call to execute this pseudo-op
2141 Integer arg to pass to the function. */
2142
2143 const pseudo_typeS md_pseudo_table[] = {
2144 /* Never called because '.req' does not start a line. */
2145 {"req", s_req, 0},
2146 {"unreq", s_unreq, 0},
2147 {"bss", s_bss, 0},
2148 {"even", s_even, 0},
2149 {"ltorg", s_ltorg, 0},
2150 {"pool", s_ltorg, 0},
2151 {"cpu", s_aarch64_cpu, 0},
2152 {"arch", s_aarch64_arch, 0},
2153 {"arch_extension", s_aarch64_arch_extension, 0},
2154 {"inst", s_aarch64_inst, 0},
2155 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2156 #ifdef OBJ_ELF
2157 {"tlsdescadd", s_tlsdescadd, 0},
2158 {"tlsdesccall", s_tlsdesccall, 0},
2159 {"tlsdescldr", s_tlsdescldr, 0},
2160 {"word", s_aarch64_elf_cons, 4},
2161 {"long", s_aarch64_elf_cons, 4},
2162 {"xword", s_aarch64_elf_cons, 8},
2163 {"dword", s_aarch64_elf_cons, 8},
2164 {"variant_pcs", s_variant_pcs, 0},
2165 #endif
2166 {"float16", float_cons, 'h'},
2167 {"bfloat16", float_cons, 'b'},
2168 {0, 0, 0}
2169 };
2170 \f
2171
2172 /* Check whether STR points to a register name followed by a comma or the
2173 end of line; REG_TYPE indicates which register types are checked
2174 against. Return TRUE if STR is such a register name; otherwise return
2175 FALSE. The function does not intend to produce any diagnostics, but since
2176 the register parser aarch64_reg_parse, which is called by this function,
2177 does produce diagnostics, we call clear_error to clear any diagnostics
2178 that may be generated by aarch64_reg_parse.
2179 Also, the function returns FALSE directly if there is any user error
2180 present at the function entry. This prevents the existing diagnostics
2181 state from being spoiled.
2182 The function currently serves parse_constant_immediate and
2183 parse_big_immediate only. */
2184 static bool
2185 reg_name_p (char *str, aarch64_reg_type reg_type)
2186 {
2187 int reg;
2188
2189 /* Prevent the diagnostics state from being spoiled. */
2190 if (error_p ())
2191 return false;
2192
2193 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2194
2195 /* Clear the parsing error that may be set by the reg parser. */
2196 clear_error ();
2197
2198 if (reg == PARSE_FAIL)
2199 return false;
2200
2201 skip_whitespace (str);
2202 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2203 return true;
2204
2205 return false;
2206 }
2207
2208 /* Parser functions used exclusively in instruction operands. */
2209
2210 /* Parse an immediate expression which may not be constant.
2211
2212 To prevent the expression parser from pushing a register name
2213 into the symbol table as an undefined symbol, firstly a check is
2214 done to find out whether STR is a register of type REG_TYPE followed
2215 by a comma or the end of line. Return FALSE if STR is such a string. */
2216
2217 static bool
2218 parse_immediate_expression (char **str, expressionS *exp,
2219 aarch64_reg_type reg_type)
2220 {
2221 if (reg_name_p (*str, reg_type))
2222 {
2223 set_recoverable_error (_("immediate operand required"));
2224 return false;
2225 }
2226
2227 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT,
2228 NORMAL_RESOLUTION);
2229
2230 if (exp->X_op == O_absent)
2231 {
2232 set_fatal_syntax_error (_("missing immediate expression"));
2233 return false;
2234 }
2235
2236 return true;
2237 }
2238
2239 /* Constant immediate-value read function for use in insn parsing.
2240 STR points to the beginning of the immediate (with the optional
2241 leading #); *VAL receives the value. REG_TYPE says which register
2242 names should be treated as registers rather than as symbolic immediates.
2243
2244 Return TRUE on success; otherwise return FALSE. */
2245
2246 static bool
2247 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2248 {
2249 expressionS exp;
2250
2251 if (! parse_immediate_expression (str, &exp, reg_type))
2252 return false;
2253
2254 if (exp.X_op != O_constant)
2255 {
2256 set_syntax_error (_("constant expression required"));
2257 return false;
2258 }
2259
2260 *val = exp.X_add_number;
2261 return true;
2262 }
2263
2264 static uint32_t
2265 encode_imm_float_bits (uint32_t imm)
2266 {
2267 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2268 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2269 }
2270
2271 /* Return TRUE if the single-precision floating-point value encoded in IMM
2272 can be expressed in the AArch64 8-bit signed floating-point format with
2273 3-bit exponent and normalized 4 bits of precision; in other words, the
2274 floating-point value must be expressable as
2275 (+/-) n / 16 * power (2, r)
2276 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2277
2278 static bool
2279 aarch64_imm_float_p (uint32_t imm)
2280 {
2281 /* If a single-precision floating-point value has the following bit
2282 pattern, it can be expressed in the AArch64 8-bit floating-point
2283 format:
2284
2285 3 32222222 2221111111111
2286 1 09876543 21098765432109876543210
2287 n Eeeeeexx xxxx0000000000000000000
2288
2289 where n, e and each x are either 0 or 1 independently, with
2290 E == ~ e. */
2291
2292 uint32_t pattern;
2293
2294 /* Prepare the pattern for 'Eeeeee'. */
2295 if (((imm >> 30) & 0x1) == 0)
2296 pattern = 0x3e000000;
2297 else
2298 pattern = 0x40000000;
2299
2300 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2301 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2302 }
2303
2304 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2305 as an IEEE float without any loss of precision. Store the value in
2306 *FPWORD if so. */
2307
2308 static bool
2309 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2310 {
2311 /* If a double-precision floating-point value has the following bit
2312 pattern, it can be expressed in a float:
2313
2314 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2315 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2316 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2317
2318 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2319 if Eeee_eeee != 1111_1111
2320
2321 where n, e, s and S are either 0 or 1 independently and where ~ is the
2322 inverse of E. */
2323
2324 uint32_t pattern;
2325 uint32_t high32 = imm >> 32;
2326 uint32_t low32 = imm;
2327
2328 /* Lower 29 bits need to be 0s. */
2329 if ((imm & 0x1fffffff) != 0)
2330 return false;
2331
2332 /* Prepare the pattern for 'Eeeeeeeee'. */
2333 if (((high32 >> 30) & 0x1) == 0)
2334 pattern = 0x38000000;
2335 else
2336 pattern = 0x40000000;
2337
2338 /* Check E~~~. */
2339 if ((high32 & 0x78000000) != pattern)
2340 return false;
2341
2342 /* Check Eeee_eeee != 1111_1111. */
2343 if ((high32 & 0x7ff00000) == 0x47f00000)
2344 return false;
2345
2346 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2347 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2348 | (low32 >> 29)); /* 3 S bits. */
2349 return true;
2350 }
2351
2352 /* Return true if we should treat OPERAND as a double-precision
2353 floating-point operand rather than a single-precision one. */
2354 static bool
2355 double_precision_operand_p (const aarch64_opnd_info *operand)
2356 {
2357 /* Check for unsuffixed SVE registers, which are allowed
2358 for LDR and STR but not in instructions that require an
2359 immediate. We get better error messages if we arbitrarily
2360 pick one size, parse the immediate normally, and then
2361 report the match failure in the normal way. */
2362 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2363 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2364 }
2365
2366 /* Parse a floating-point immediate. Return TRUE on success and return the
2367 value in *IMMED in the format of IEEE754 single-precision encoding.
2368 *CCP points to the start of the string; DP_P is TRUE when the immediate
2369 is expected to be in double-precision (N.B. this only matters when
2370 hexadecimal representation is involved). REG_TYPE says which register
2371 names should be treated as registers rather than as symbolic immediates.
2372
2373 This routine accepts any IEEE float; it is up to the callers to reject
2374 invalid ones. */
2375
2376 static bool
2377 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2378 aarch64_reg_type reg_type)
2379 {
2380 char *str = *ccp;
2381 char *fpnum;
2382 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2383 int64_t val = 0;
2384 unsigned fpword = 0;
2385 bool hex_p = false;
2386
2387 skip_past_char (&str, '#');
2388
2389 fpnum = str;
2390 skip_whitespace (fpnum);
2391
2392 if (startswith (fpnum, "0x"))
2393 {
2394 /* Support the hexadecimal representation of the IEEE754 encoding.
2395 Double-precision is expected when DP_P is TRUE, otherwise the
2396 representation should be in single-precision. */
2397 if (! parse_constant_immediate (&str, &val, reg_type))
2398 goto invalid_fp;
2399
2400 if (dp_p)
2401 {
2402 if (!can_convert_double_to_float (val, &fpword))
2403 goto invalid_fp;
2404 }
2405 else if ((uint64_t) val > 0xffffffff)
2406 goto invalid_fp;
2407 else
2408 fpword = val;
2409
2410 hex_p = true;
2411 }
2412 else if (reg_name_p (str, reg_type))
2413 {
2414 set_recoverable_error (_("immediate operand required"));
2415 return false;
2416 }
2417
2418 if (! hex_p)
2419 {
2420 int i;
2421
2422 if ((str = atof_ieee (str, 's', words)) == NULL)
2423 goto invalid_fp;
2424
2425 /* Our FP word must be 32 bits (single-precision FP). */
2426 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2427 {
2428 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2429 fpword |= words[i];
2430 }
2431 }
2432
2433 *immed = fpword;
2434 *ccp = str;
2435 return true;
2436
2437 invalid_fp:
2438 set_fatal_syntax_error (_("invalid floating-point constant"));
2439 return false;
2440 }
2441
2442 /* Less-generic immediate-value read function with the possibility of loading
2443 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2444 instructions.
2445
2446 To prevent the expression parser from pushing a register name into the
2447 symbol table as an undefined symbol, a check is firstly done to find
2448 out whether STR is a register of type REG_TYPE followed by a comma or
2449 the end of line. Return FALSE if STR is such a register. */
2450
2451 static bool
2452 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2453 {
2454 char *ptr = *str;
2455
2456 if (reg_name_p (ptr, reg_type))
2457 {
2458 set_syntax_error (_("immediate operand required"));
2459 return false;
2460 }
2461
2462 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT,
2463 NORMAL_RESOLUTION);
2464
2465 if (inst.reloc.exp.X_op == O_constant)
2466 *imm = inst.reloc.exp.X_add_number;
2467
2468 *str = ptr;
2469
2470 return true;
2471 }
2472
2473 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2474 if NEED_LIBOPCODES is non-zero, the fixup will need
2475 assistance from the libopcodes. */
2476
2477 static inline void
2478 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2479 const aarch64_opnd_info *operand,
2480 int need_libopcodes_p)
2481 {
2482 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2483 reloc->opnd = operand->type;
2484 if (need_libopcodes_p)
2485 reloc->need_libopcodes_p = 1;
2486 };
2487
2488 /* Return TRUE if the instruction needs to be fixed up later internally by
2489 the GAS; otherwise return FALSE. */
2490
2491 static inline bool
2492 aarch64_gas_internal_fixup_p (void)
2493 {
2494 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2495 }
2496
2497 /* Assign the immediate value to the relevant field in *OPERAND if
2498 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2499 needs an internal fixup in a later stage.
2500 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2501 IMM.VALUE that may get assigned with the constant. */
2502 static inline void
2503 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2504 aarch64_opnd_info *operand,
2505 int addr_off_p,
2506 int need_libopcodes_p,
2507 int skip_p)
2508 {
2509 if (reloc->exp.X_op == O_constant)
2510 {
2511 if (addr_off_p)
2512 operand->addr.offset.imm = reloc->exp.X_add_number;
2513 else
2514 operand->imm.value = reloc->exp.X_add_number;
2515 reloc->type = BFD_RELOC_UNUSED;
2516 }
2517 else
2518 {
2519 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2520 /* Tell libopcodes to ignore this operand or not. This is helpful
2521 when one of the operands needs to be fixed up later but we need
2522 libopcodes to check the other operands. */
2523 operand->skip = skip_p;
2524 }
2525 }
2526
2527 /* Relocation modifiers. Each entry in the table contains the textual
2528 name for the relocation which may be placed before a symbol used as
2529 a load/store offset, or add immediate. It must be surrounded by a
2530 leading and trailing colon, for example:
2531
2532 ldr x0, [x1, #:rello:varsym]
2533 add x0, x1, #:rello:varsym */
2534
2535 struct reloc_table_entry
2536 {
2537 const char *name;
2538 int pc_rel;
2539 bfd_reloc_code_real_type adr_type;
2540 bfd_reloc_code_real_type adrp_type;
2541 bfd_reloc_code_real_type movw_type;
2542 bfd_reloc_code_real_type add_type;
2543 bfd_reloc_code_real_type ldst_type;
2544 bfd_reloc_code_real_type ld_literal_type;
2545 };
2546
2547 static struct reloc_table_entry reloc_table[] =
2548 {
2549 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2550 {"lo12", 0,
2551 0, /* adr_type */
2552 0,
2553 0,
2554 BFD_RELOC_AARCH64_ADD_LO12,
2555 BFD_RELOC_AARCH64_LDST_LO12,
2556 0},
2557
2558 /* Higher 21 bits of pc-relative page offset: ADRP */
2559 {"pg_hi21", 1,
2560 0, /* adr_type */
2561 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2562 0,
2563 0,
2564 0,
2565 0},
2566
2567 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2568 {"pg_hi21_nc", 1,
2569 0, /* adr_type */
2570 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2571 0,
2572 0,
2573 0,
2574 0},
2575
2576 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2577 {"abs_g0", 0,
2578 0, /* adr_type */
2579 0,
2580 BFD_RELOC_AARCH64_MOVW_G0,
2581 0,
2582 0,
2583 0},
2584
2585 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2586 {"abs_g0_s", 0,
2587 0, /* adr_type */
2588 0,
2589 BFD_RELOC_AARCH64_MOVW_G0_S,
2590 0,
2591 0,
2592 0},
2593
2594 /* Less significant bits 0-15 of address/value: MOVK, no check */
2595 {"abs_g0_nc", 0,
2596 0, /* adr_type */
2597 0,
2598 BFD_RELOC_AARCH64_MOVW_G0_NC,
2599 0,
2600 0,
2601 0},
2602
2603 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2604 {"abs_g1", 0,
2605 0, /* adr_type */
2606 0,
2607 BFD_RELOC_AARCH64_MOVW_G1,
2608 0,
2609 0,
2610 0},
2611
2612 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2613 {"abs_g1_s", 0,
2614 0, /* adr_type */
2615 0,
2616 BFD_RELOC_AARCH64_MOVW_G1_S,
2617 0,
2618 0,
2619 0},
2620
2621 /* Less significant bits 16-31 of address/value: MOVK, no check */
2622 {"abs_g1_nc", 0,
2623 0, /* adr_type */
2624 0,
2625 BFD_RELOC_AARCH64_MOVW_G1_NC,
2626 0,
2627 0,
2628 0},
2629
2630 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2631 {"abs_g2", 0,
2632 0, /* adr_type */
2633 0,
2634 BFD_RELOC_AARCH64_MOVW_G2,
2635 0,
2636 0,
2637 0},
2638
2639 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2640 {"abs_g2_s", 0,
2641 0, /* adr_type */
2642 0,
2643 BFD_RELOC_AARCH64_MOVW_G2_S,
2644 0,
2645 0,
2646 0},
2647
2648 /* Less significant bits 32-47 of address/value: MOVK, no check */
2649 {"abs_g2_nc", 0,
2650 0, /* adr_type */
2651 0,
2652 BFD_RELOC_AARCH64_MOVW_G2_NC,
2653 0,
2654 0,
2655 0},
2656
2657 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2658 {"abs_g3", 0,
2659 0, /* adr_type */
2660 0,
2661 BFD_RELOC_AARCH64_MOVW_G3,
2662 0,
2663 0,
2664 0},
2665
2666 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2667 {"prel_g0", 1,
2668 0, /* adr_type */
2669 0,
2670 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2671 0,
2672 0,
2673 0},
2674
2675 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2676 {"prel_g0_nc", 1,
2677 0, /* adr_type */
2678 0,
2679 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2680 0,
2681 0,
2682 0},
2683
2684 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2685 {"prel_g1", 1,
2686 0, /* adr_type */
2687 0,
2688 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2689 0,
2690 0,
2691 0},
2692
2693 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2694 {"prel_g1_nc", 1,
2695 0, /* adr_type */
2696 0,
2697 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2698 0,
2699 0,
2700 0},
2701
2702 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2703 {"prel_g2", 1,
2704 0, /* adr_type */
2705 0,
2706 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2707 0,
2708 0,
2709 0},
2710
2711 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2712 {"prel_g2_nc", 1,
2713 0, /* adr_type */
2714 0,
2715 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2716 0,
2717 0,
2718 0},
2719
2720 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2721 {"prel_g3", 1,
2722 0, /* adr_type */
2723 0,
2724 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2725 0,
2726 0,
2727 0},
2728
2729 /* Get to the page containing GOT entry for a symbol. */
2730 {"got", 1,
2731 0, /* adr_type */
2732 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2733 0,
2734 0,
2735 0,
2736 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2737
2738 /* 12 bit offset into the page containing GOT entry for that symbol. */
2739 {"got_lo12", 0,
2740 0, /* adr_type */
2741 0,
2742 0,
2743 0,
2744 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2745 0},
2746
2747 /* 0-15 bits of address/value: MOVk, no check. */
2748 {"gotoff_g0_nc", 0,
2749 0, /* adr_type */
2750 0,
2751 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2752 0,
2753 0,
2754 0},
2755
2756 /* Most significant bits 16-31 of address/value: MOVZ. */
2757 {"gotoff_g1", 0,
2758 0, /* adr_type */
2759 0,
2760 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2761 0,
2762 0,
2763 0},
2764
2765 /* 15 bit offset into the page containing GOT entry for that symbol. */
2766 {"gotoff_lo15", 0,
2767 0, /* adr_type */
2768 0,
2769 0,
2770 0,
2771 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2772 0},
2773
2774 /* Get to the page containing GOT TLS entry for a symbol */
2775 {"gottprel_g0_nc", 0,
2776 0, /* adr_type */
2777 0,
2778 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2779 0,
2780 0,
2781 0},
2782
2783 /* Get to the page containing GOT TLS entry for a symbol */
2784 {"gottprel_g1", 0,
2785 0, /* adr_type */
2786 0,
2787 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2788 0,
2789 0,
2790 0},
2791
2792 /* Get to the page containing GOT TLS entry for a symbol */
2793 {"tlsgd", 0,
2794 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2795 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2796 0,
2797 0,
2798 0,
2799 0},
2800
2801 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2802 {"tlsgd_lo12", 0,
2803 0, /* adr_type */
2804 0,
2805 0,
2806 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2807 0,
2808 0},
2809
2810 /* Lower 16 bits address/value: MOVk. */
2811 {"tlsgd_g0_nc", 0,
2812 0, /* adr_type */
2813 0,
2814 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2815 0,
2816 0,
2817 0},
2818
2819 /* Most significant bits 16-31 of address/value: MOVZ. */
2820 {"tlsgd_g1", 0,
2821 0, /* adr_type */
2822 0,
2823 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2824 0,
2825 0,
2826 0},
2827
2828 /* Get to the page containing GOT TLS entry for a symbol */
2829 {"tlsdesc", 0,
2830 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2831 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2832 0,
2833 0,
2834 0,
2835 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2836
2837 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2838 {"tlsdesc_lo12", 0,
2839 0, /* adr_type */
2840 0,
2841 0,
2842 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2843 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2844 0},
2845
2846 /* Get to the page containing GOT TLS entry for a symbol.
2847 The same as GD, we allocate two consecutive GOT slots
2848 for module index and module offset, the only difference
2849 with GD is the module offset should be initialized to
2850 zero without any outstanding runtime relocation. */
2851 {"tlsldm", 0,
2852 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2853 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2854 0,
2855 0,
2856 0,
2857 0},
2858
2859 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2860 {"tlsldm_lo12_nc", 0,
2861 0, /* adr_type */
2862 0,
2863 0,
2864 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2865 0,
2866 0},
2867
2868 /* 12 bit offset into the module TLS base address. */
2869 {"dtprel_lo12", 0,
2870 0, /* adr_type */
2871 0,
2872 0,
2873 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2874 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2875 0},
2876
2877 /* Same as dtprel_lo12, no overflow check. */
2878 {"dtprel_lo12_nc", 0,
2879 0, /* adr_type */
2880 0,
2881 0,
2882 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2883 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2884 0},
2885
2886 /* bits[23:12] of offset to the module TLS base address. */
2887 {"dtprel_hi12", 0,
2888 0, /* adr_type */
2889 0,
2890 0,
2891 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2892 0,
2893 0},
2894
2895 /* bits[15:0] of offset to the module TLS base address. */
2896 {"dtprel_g0", 0,
2897 0, /* adr_type */
2898 0,
2899 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2900 0,
2901 0,
2902 0},
2903
2904 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2905 {"dtprel_g0_nc", 0,
2906 0, /* adr_type */
2907 0,
2908 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2909 0,
2910 0,
2911 0},
2912
2913 /* bits[31:16] of offset to the module TLS base address. */
2914 {"dtprel_g1", 0,
2915 0, /* adr_type */
2916 0,
2917 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2918 0,
2919 0,
2920 0},
2921
2922 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2923 {"dtprel_g1_nc", 0,
2924 0, /* adr_type */
2925 0,
2926 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2927 0,
2928 0,
2929 0},
2930
2931 /* bits[47:32] of offset to the module TLS base address. */
2932 {"dtprel_g2", 0,
2933 0, /* adr_type */
2934 0,
2935 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2936 0,
2937 0,
2938 0},
2939
2940 /* Lower 16 bit offset into GOT entry for a symbol */
2941 {"tlsdesc_off_g0_nc", 0,
2942 0, /* adr_type */
2943 0,
2944 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2945 0,
2946 0,
2947 0},
2948
2949 /* Higher 16 bit offset into GOT entry for a symbol */
2950 {"tlsdesc_off_g1", 0,
2951 0, /* adr_type */
2952 0,
2953 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2954 0,
2955 0,
2956 0},
2957
2958 /* Get to the page containing GOT TLS entry for a symbol */
2959 {"gottprel", 0,
2960 0, /* adr_type */
2961 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2962 0,
2963 0,
2964 0,
2965 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2966
2967 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2968 {"gottprel_lo12", 0,
2969 0, /* adr_type */
2970 0,
2971 0,
2972 0,
2973 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2974 0},
2975
2976 /* Get tp offset for a symbol. */
2977 {"tprel", 0,
2978 0, /* adr_type */
2979 0,
2980 0,
2981 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2982 0,
2983 0},
2984
2985 /* Get tp offset for a symbol. */
2986 {"tprel_lo12", 0,
2987 0, /* adr_type */
2988 0,
2989 0,
2990 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2991 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2992 0},
2993
2994 /* Get tp offset for a symbol. */
2995 {"tprel_hi12", 0,
2996 0, /* adr_type */
2997 0,
2998 0,
2999 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3000 0,
3001 0},
3002
3003 /* Get tp offset for a symbol. */
3004 {"tprel_lo12_nc", 0,
3005 0, /* adr_type */
3006 0,
3007 0,
3008 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3009 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3010 0},
3011
3012 /* Most significant bits 32-47 of address/value: MOVZ. */
3013 {"tprel_g2", 0,
3014 0, /* adr_type */
3015 0,
3016 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3017 0,
3018 0,
3019 0},
3020
3021 /* Most significant bits 16-31 of address/value: MOVZ. */
3022 {"tprel_g1", 0,
3023 0, /* adr_type */
3024 0,
3025 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3026 0,
3027 0,
3028 0},
3029
3030 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3031 {"tprel_g1_nc", 0,
3032 0, /* adr_type */
3033 0,
3034 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3035 0,
3036 0,
3037 0},
3038
3039 /* Most significant bits 0-15 of address/value: MOVZ. */
3040 {"tprel_g0", 0,
3041 0, /* adr_type */
3042 0,
3043 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3044 0,
3045 0,
3046 0},
3047
3048 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3049 {"tprel_g0_nc", 0,
3050 0, /* adr_type */
3051 0,
3052 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3053 0,
3054 0,
3055 0},
3056
3057 /* 15bit offset from got entry to base address of GOT table. */
3058 {"gotpage_lo15", 0,
3059 0,
3060 0,
3061 0,
3062 0,
3063 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3064 0},
3065
3066 /* 14bit offset from got entry to base address of GOT table. */
3067 {"gotpage_lo14", 0,
3068 0,
3069 0,
3070 0,
3071 0,
3072 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3073 0},
3074 };
3075
3076 /* Given the address of a pointer pointing to the textual name of a
3077 relocation as may appear in assembler source, attempt to find its
3078 details in reloc_table. The pointer will be updated to the character
3079 after the trailing colon. On failure, NULL will be returned;
3080 otherwise return the reloc_table_entry. */
3081
3082 static struct reloc_table_entry *
3083 find_reloc_table_entry (char **str)
3084 {
3085 unsigned int i;
3086 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3087 {
3088 int length = strlen (reloc_table[i].name);
3089
3090 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3091 && (*str)[length] == ':')
3092 {
3093 *str += (length + 1);
3094 return &reloc_table[i];
3095 }
3096 }
3097
3098 return NULL;
3099 }
3100
3101 /* Returns 0 if the relocation should never be forced,
3102 1 if the relocation must be forced, and -1 if either
3103 result is OK. */
3104
3105 static signed int
3106 aarch64_force_reloc (unsigned int type)
3107 {
3108 switch (type)
3109 {
3110 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3111 /* Perform these "immediate" internal relocations
3112 even if the symbol is extern or weak. */
3113 return 0;
3114
3115 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3116 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3117 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3118 /* Pseudo relocs that need to be fixed up according to
3119 ilp32_p. */
3120 return 0;
3121
3122 case BFD_RELOC_AARCH64_ADD_LO12:
3123 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3124 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3125 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3126 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3127 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3128 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3129 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3130 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3131 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3132 case BFD_RELOC_AARCH64_LDST128_LO12:
3133 case BFD_RELOC_AARCH64_LDST16_LO12:
3134 case BFD_RELOC_AARCH64_LDST32_LO12:
3135 case BFD_RELOC_AARCH64_LDST64_LO12:
3136 case BFD_RELOC_AARCH64_LDST8_LO12:
3137 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3138 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3139 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3140 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3141 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3142 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3143 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3144 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3145 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3146 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3147 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3148 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3149 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3150 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3151 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3152 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3153 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3154 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3155 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3156 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3157 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3158 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3159 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3160 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3161 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3162 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3163 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3164 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3165 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3166 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3167 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3168 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3169 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3170 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3171 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3172 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3173 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3174 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3175 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3176 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3177 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3178 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3179 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3180 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3181 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3182 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3183 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3184 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3185 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3186 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3187 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3188 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3189 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3190 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3191 /* Always leave these relocations for the linker. */
3192 return 1;
3193
3194 default:
3195 return -1;
3196 }
3197 }
3198
3199 int
3200 aarch64_force_relocation (struct fix *fixp)
3201 {
3202 int res = aarch64_force_reloc (fixp->fx_r_type);
3203
3204 if (res == -1)
3205 return generic_force_reloc (fixp);
3206 return res;
3207 }
3208
3209 /* Mode argument to parse_shift and parser_shifter_operand. */
3210 enum parse_shift_mode
3211 {
3212 SHIFTED_NONE, /* no shifter allowed */
3213 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3214 "#imm{,lsl #n}" */
3215 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3216 "#imm" */
3217 SHIFTED_LSL, /* bare "lsl #n" */
3218 SHIFTED_MUL, /* bare "mul #n" */
3219 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3220 SHIFTED_MUL_VL, /* "mul vl" */
3221 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3222 };
3223
3224 /* Parse a <shift> operator on an AArch64 data processing instruction.
3225 Return TRUE on success; otherwise return FALSE. */
3226 static bool
3227 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3228 {
3229 const struct aarch64_name_value_pair *shift_op;
3230 enum aarch64_modifier_kind kind;
3231 expressionS exp;
3232 int exp_has_prefix;
3233 char *s = *str;
3234 char *p = s;
3235
3236 for (p = *str; ISALPHA (*p); p++)
3237 ;
3238
3239 if (p == *str)
3240 {
3241 set_syntax_error (_("shift expression expected"));
3242 return false;
3243 }
3244
3245 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3246
3247 if (shift_op == NULL)
3248 {
3249 set_syntax_error (_("shift operator expected"));
3250 return false;
3251 }
3252
3253 kind = aarch64_get_operand_modifier (shift_op);
3254
3255 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3256 {
3257 set_syntax_error (_("invalid use of 'MSL'"));
3258 return false;
3259 }
3260
3261 if (kind == AARCH64_MOD_MUL
3262 && mode != SHIFTED_MUL
3263 && mode != SHIFTED_MUL_VL)
3264 {
3265 set_syntax_error (_("invalid use of 'MUL'"));
3266 return false;
3267 }
3268
3269 switch (mode)
3270 {
3271 case SHIFTED_LOGIC_IMM:
3272 if (aarch64_extend_operator_p (kind))
3273 {
3274 set_syntax_error (_("extending shift is not permitted"));
3275 return false;
3276 }
3277 break;
3278
3279 case SHIFTED_ARITH_IMM:
3280 if (kind == AARCH64_MOD_ROR)
3281 {
3282 set_syntax_error (_("'ROR' shift is not permitted"));
3283 return false;
3284 }
3285 break;
3286
3287 case SHIFTED_LSL:
3288 if (kind != AARCH64_MOD_LSL)
3289 {
3290 set_syntax_error (_("only 'LSL' shift is permitted"));
3291 return false;
3292 }
3293 break;
3294
3295 case SHIFTED_MUL:
3296 if (kind != AARCH64_MOD_MUL)
3297 {
3298 set_syntax_error (_("only 'MUL' is permitted"));
3299 return false;
3300 }
3301 break;
3302
3303 case SHIFTED_MUL_VL:
3304 /* "MUL VL" consists of two separate tokens. Require the first
3305 token to be "MUL" and look for a following "VL". */
3306 if (kind == AARCH64_MOD_MUL)
3307 {
3308 skip_whitespace (p);
3309 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3310 {
3311 p += 2;
3312 kind = AARCH64_MOD_MUL_VL;
3313 break;
3314 }
3315 }
3316 set_syntax_error (_("only 'MUL VL' is permitted"));
3317 return false;
3318
3319 case SHIFTED_REG_OFFSET:
3320 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3321 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3322 {
3323 set_fatal_syntax_error
3324 (_("invalid shift for the register offset addressing mode"));
3325 return false;
3326 }
3327 break;
3328
3329 case SHIFTED_LSL_MSL:
3330 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3331 {
3332 set_syntax_error (_("invalid shift operator"));
3333 return false;
3334 }
3335 break;
3336
3337 default:
3338 abort ();
3339 }
3340
3341 /* Whitespace can appear here if the next thing is a bare digit. */
3342 skip_whitespace (p);
3343
3344 /* Parse shift amount. */
3345 exp_has_prefix = 0;
3346 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3347 exp.X_op = O_absent;
3348 else
3349 {
3350 if (is_immediate_prefix (*p))
3351 {
3352 p++;
3353 exp_has_prefix = 1;
3354 }
3355 (void) aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT,
3356 NORMAL_RESOLUTION);
3357 }
3358 if (kind == AARCH64_MOD_MUL_VL)
3359 /* For consistency, give MUL VL the same shift amount as an implicit
3360 MUL #1. */
3361 operand->shifter.amount = 1;
3362 else if (exp.X_op == O_absent)
3363 {
3364 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3365 {
3366 set_syntax_error (_("missing shift amount"));
3367 return false;
3368 }
3369 operand->shifter.amount = 0;
3370 }
3371 else if (exp.X_op != O_constant)
3372 {
3373 set_syntax_error (_("constant shift amount required"));
3374 return false;
3375 }
3376 /* For parsing purposes, MUL #n has no inherent range. The range
3377 depends on the operand and will be checked by operand-specific
3378 routines. */
3379 else if (kind != AARCH64_MOD_MUL
3380 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3381 {
3382 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3383 return false;
3384 }
3385 else
3386 {
3387 operand->shifter.amount = exp.X_add_number;
3388 operand->shifter.amount_present = 1;
3389 }
3390
3391 operand->shifter.operator_present = 1;
3392 operand->shifter.kind = kind;
3393
3394 *str = p;
3395 return true;
3396 }
3397
3398 /* Parse a <shifter_operand> for a data processing instruction:
3399
3400 #<immediate>
3401 #<immediate>, LSL #imm
3402
3403 Validation of immediate operands is deferred to md_apply_fix.
3404
3405 Return TRUE on success; otherwise return FALSE. */
3406
3407 static bool
3408 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3409 enum parse_shift_mode mode)
3410 {
3411 char *p;
3412
3413 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3414 return false;
3415
3416 p = *str;
3417
3418 /* Accept an immediate expression. */
3419 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3420 REJECT_ABSENT, NORMAL_RESOLUTION))
3421 return false;
3422
3423 /* Accept optional LSL for arithmetic immediate values. */
3424 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3425 if (! parse_shift (&p, operand, SHIFTED_LSL))
3426 return false;
3427
3428 /* Not accept any shifter for logical immediate values. */
3429 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3430 && parse_shift (&p, operand, mode))
3431 {
3432 set_syntax_error (_("unexpected shift operator"));
3433 return false;
3434 }
3435
3436 *str = p;
3437 return true;
3438 }
3439
3440 /* Parse a <shifter_operand> for a data processing instruction:
3441
3442 <Rm>
3443 <Rm>, <shift>
3444 #<immediate>
3445 #<immediate>, LSL #imm
3446
3447 where <shift> is handled by parse_shift above, and the last two
3448 cases are handled by the function above.
3449
3450 Validation of immediate operands is deferred to md_apply_fix.
3451
3452 Return TRUE on success; otherwise return FALSE. */
3453
3454 static bool
3455 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3456 enum parse_shift_mode mode)
3457 {
3458 const reg_entry *reg;
3459 aarch64_opnd_qualifier_t qualifier;
3460 enum aarch64_operand_class opd_class
3461 = aarch64_get_operand_class (operand->type);
3462
3463 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3464 if (reg)
3465 {
3466 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3467 {
3468 set_syntax_error (_("unexpected register in the immediate operand"));
3469 return false;
3470 }
3471
3472 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3473 {
3474 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3475 return false;
3476 }
3477
3478 operand->reg.regno = reg->number;
3479 operand->qualifier = qualifier;
3480
3481 /* Accept optional shift operation on register. */
3482 if (! skip_past_comma (str))
3483 return true;
3484
3485 if (! parse_shift (str, operand, mode))
3486 return false;
3487
3488 return true;
3489 }
3490 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3491 {
3492 set_syntax_error
3493 (_("integer register expected in the extended/shifted operand "
3494 "register"));
3495 return false;
3496 }
3497
3498 /* We have a shifted immediate variable. */
3499 return parse_shifter_operand_imm (str, operand, mode);
3500 }
3501
3502 /* Return TRUE on success; return FALSE otherwise. */
3503
3504 static bool
3505 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3506 enum parse_shift_mode mode)
3507 {
3508 char *p = *str;
3509
3510 /* Determine if we have the sequence of characters #: or just :
3511 coming next. If we do, then we check for a :rello: relocation
3512 modifier. If we don't, punt the whole lot to
3513 parse_shifter_operand. */
3514
3515 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3516 {
3517 struct reloc_table_entry *entry;
3518
3519 if (p[0] == '#')
3520 p += 2;
3521 else
3522 p++;
3523 *str = p;
3524
3525 /* Try to parse a relocation. Anything else is an error. */
3526 if (!(entry = find_reloc_table_entry (str)))
3527 {
3528 set_syntax_error (_("unknown relocation modifier"));
3529 return false;
3530 }
3531
3532 if (entry->add_type == 0)
3533 {
3534 set_syntax_error
3535 (_("this relocation modifier is not allowed on this instruction"));
3536 return false;
3537 }
3538
3539 /* Save str before we decompose it. */
3540 p = *str;
3541
3542 /* Next, we parse the expression. */
3543 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3544 REJECT_ABSENT,
3545 aarch64_force_reloc (entry->add_type) == 1))
3546 return false;
3547
3548 /* Record the relocation type (use the ADD variant here). */
3549 inst.reloc.type = entry->add_type;
3550 inst.reloc.pc_rel = entry->pc_rel;
3551
3552 /* If str is empty, we've reached the end, stop here. */
3553 if (**str == '\0')
3554 return true;
3555
3556 /* Otherwise, we have a shifted reloc modifier, so rewind to
3557 recover the variable name and continue parsing for the shifter. */
3558 *str = p;
3559 return parse_shifter_operand_imm (str, operand, mode);
3560 }
3561
3562 return parse_shifter_operand (str, operand, mode);
3563 }
3564
3565 /* Parse all forms of an address expression. Information is written
3566 to *OPERAND and/or inst.reloc.
3567
3568 The A64 instruction set has the following addressing modes:
3569
3570 Offset
3571 [base] // in SIMD ld/st structure
3572 [base{,#0}] // in ld/st exclusive
3573 [base{,#imm}]
3574 [base,Xm{,LSL #imm}]
3575 [base,Xm,SXTX {#imm}]
3576 [base,Wm,(S|U)XTW {#imm}]
3577 Pre-indexed
3578 [base]! // in ldraa/ldrab exclusive
3579 [base,#imm]!
3580 Post-indexed
3581 [base],#imm
3582 [base],Xm // in SIMD ld/st structure
3583 PC-relative (literal)
3584 label
3585 SVE:
3586 [base,#imm,MUL VL]
3587 [base,Zm.D{,LSL #imm}]
3588 [base,Zm.S,(S|U)XTW {#imm}]
3589 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3590 [Zn.S,#imm]
3591 [Zn.D,#imm]
3592 [Zn.S{, Xm}]
3593 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3594 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3595 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3596
3597 (As a convenience, the notation "=immediate" is permitted in conjunction
3598 with the pc-relative literal load instructions to automatically place an
3599 immediate value or symbolic address in a nearby literal pool and generate
3600 a hidden label which references it.)
3601
3602 Upon a successful parsing, the address structure in *OPERAND will be
3603 filled in the following way:
3604
3605 .base_regno = <base>
3606 .offset.is_reg // 1 if the offset is a register
3607 .offset.imm = <imm>
3608 .offset.regno = <Rm>
3609
3610 For different addressing modes defined in the A64 ISA:
3611
3612 Offset
3613 .pcrel=0; .preind=1; .postind=0; .writeback=0
3614 Pre-indexed
3615 .pcrel=0; .preind=1; .postind=0; .writeback=1
3616 Post-indexed
3617 .pcrel=0; .preind=0; .postind=1; .writeback=1
3618 PC-relative (literal)
3619 .pcrel=1; .preind=1; .postind=0; .writeback=0
3620
3621 The shift/extension information, if any, will be stored in .shifter.
3622 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3623 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3624 corresponding register.
3625
3626 BASE_TYPE says which types of base register should be accepted and
3627 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3628 is the type of shifter that is allowed for immediate offsets,
3629 or SHIFTED_NONE if none.
3630
3631 In all other respects, it is the caller's responsibility to check
3632 for addressing modes not supported by the instruction, and to set
3633 inst.reloc.type. */
3634
3635 static bool
3636 parse_address_main (char **str, aarch64_opnd_info *operand,
3637 aarch64_opnd_qualifier_t *base_qualifier,
3638 aarch64_opnd_qualifier_t *offset_qualifier,
3639 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3640 enum parse_shift_mode imm_shift_mode)
3641 {
3642 char *p = *str;
3643 const reg_entry *reg;
3644 expressionS *exp = &inst.reloc.exp;
3645
3646 *base_qualifier = AARCH64_OPND_QLF_NIL;
3647 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3648 if (! skip_past_char (&p, '['))
3649 {
3650 /* =immediate or label. */
3651 operand->addr.pcrel = 1;
3652 operand->addr.preind = 1;
3653
3654 /* #:<reloc_op>:<symbol> */
3655 skip_past_char (&p, '#');
3656 if (skip_past_char (&p, ':'))
3657 {
3658 bfd_reloc_code_real_type ty;
3659 struct reloc_table_entry *entry;
3660
3661 /* Try to parse a relocation modifier. Anything else is
3662 an error. */
3663 entry = find_reloc_table_entry (&p);
3664 if (! entry)
3665 {
3666 set_syntax_error (_("unknown relocation modifier"));
3667 return false;
3668 }
3669
3670 switch (operand->type)
3671 {
3672 case AARCH64_OPND_ADDR_PCREL21:
3673 /* adr */
3674 ty = entry->adr_type;
3675 break;
3676
3677 default:
3678 ty = entry->ld_literal_type;
3679 break;
3680 }
3681
3682 if (ty == 0)
3683 {
3684 set_syntax_error
3685 (_("this relocation modifier is not allowed on this "
3686 "instruction"));
3687 return false;
3688 }
3689
3690 /* #:<reloc_op>: */
3691 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3692 aarch64_force_reloc (entry->add_type) == 1))
3693 {
3694 set_syntax_error (_("invalid relocation expression"));
3695 return false;
3696 }
3697 /* #:<reloc_op>:<expr> */
3698 /* Record the relocation type. */
3699 inst.reloc.type = ty;
3700 inst.reloc.pc_rel = entry->pc_rel;
3701 }
3702 else
3703 {
3704 if (skip_past_char (&p, '='))
3705 /* =immediate; need to generate the literal in the literal pool. */
3706 inst.gen_lit_pool = 1;
3707
3708 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3709 NORMAL_RESOLUTION))
3710 {
3711 set_syntax_error (_("invalid address"));
3712 return false;
3713 }
3714 }
3715
3716 *str = p;
3717 return true;
3718 }
3719
3720 /* [ */
3721
3722 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3723 if (!reg || !aarch64_check_reg_type (reg, base_type))
3724 {
3725 set_syntax_error (_(get_reg_expected_msg (base_type)));
3726 return false;
3727 }
3728 operand->addr.base_regno = reg->number;
3729
3730 /* [Xn */
3731 if (skip_past_comma (&p))
3732 {
3733 /* [Xn, */
3734 operand->addr.preind = 1;
3735
3736 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3737 if (reg)
3738 {
3739 if (!aarch64_check_reg_type (reg, offset_type))
3740 {
3741 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3742 return false;
3743 }
3744
3745 /* [Xn,Rm */
3746 operand->addr.offset.regno = reg->number;
3747 operand->addr.offset.is_reg = 1;
3748 /* Shifted index. */
3749 if (skip_past_comma (&p))
3750 {
3751 /* [Xn,Rm, */
3752 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3753 /* Use the diagnostics set in parse_shift, so not set new
3754 error message here. */
3755 return false;
3756 }
3757 /* We only accept:
3758 [base,Xm] # For vector plus scalar SVE2 indexing.
3759 [base,Xm{,LSL #imm}]
3760 [base,Xm,SXTX {#imm}]
3761 [base,Wm,(S|U)XTW {#imm}] */
3762 if (operand->shifter.kind == AARCH64_MOD_NONE
3763 || operand->shifter.kind == AARCH64_MOD_LSL
3764 || operand->shifter.kind == AARCH64_MOD_SXTX)
3765 {
3766 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3767 {
3768 set_syntax_error (_("invalid use of 32-bit register offset"));
3769 return false;
3770 }
3771 if (aarch64_get_qualifier_esize (*base_qualifier)
3772 != aarch64_get_qualifier_esize (*offset_qualifier)
3773 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3774 || *base_qualifier != AARCH64_OPND_QLF_S_S
3775 || *offset_qualifier != AARCH64_OPND_QLF_X))
3776 {
3777 set_syntax_error (_("offset has different size from base"));
3778 return false;
3779 }
3780 }
3781 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3782 {
3783 set_syntax_error (_("invalid use of 64-bit register offset"));
3784 return false;
3785 }
3786 }
3787 else
3788 {
3789 /* [Xn,#:<reloc_op>:<symbol> */
3790 skip_past_char (&p, '#');
3791 if (skip_past_char (&p, ':'))
3792 {
3793 struct reloc_table_entry *entry;
3794
3795 /* Try to parse a relocation modifier. Anything else is
3796 an error. */
3797 if (!(entry = find_reloc_table_entry (&p)))
3798 {
3799 set_syntax_error (_("unknown relocation modifier"));
3800 return false;
3801 }
3802
3803 if (entry->ldst_type == 0)
3804 {
3805 set_syntax_error
3806 (_("this relocation modifier is not allowed on this "
3807 "instruction"));
3808 return false;
3809 }
3810
3811 /* [Xn,#:<reloc_op>: */
3812 /* We now have the group relocation table entry corresponding to
3813 the name in the assembler source. Next, we parse the
3814 expression. */
3815 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3816 aarch64_force_reloc (entry->add_type) == 1))
3817 {
3818 set_syntax_error (_("invalid relocation expression"));
3819 return false;
3820 }
3821
3822 /* [Xn,#:<reloc_op>:<expr> */
3823 /* Record the load/store relocation type. */
3824 inst.reloc.type = entry->ldst_type;
3825 inst.reloc.pc_rel = entry->pc_rel;
3826 }
3827 else
3828 {
3829 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3830 NORMAL_RESOLUTION))
3831 {
3832 set_syntax_error (_("invalid expression in the address"));
3833 return false;
3834 }
3835 /* [Xn,<expr> */
3836 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3837 /* [Xn,<expr>,<shifter> */
3838 if (! parse_shift (&p, operand, imm_shift_mode))
3839 return false;
3840 }
3841 }
3842 }
3843
3844 if (! skip_past_char (&p, ']'))
3845 {
3846 set_syntax_error (_("']' expected"));
3847 return false;
3848 }
3849
3850 if (skip_past_char (&p, '!'))
3851 {
3852 if (operand->addr.preind && operand->addr.offset.is_reg)
3853 {
3854 set_syntax_error (_("register offset not allowed in pre-indexed "
3855 "addressing mode"));
3856 return false;
3857 }
3858 /* [Xn]! */
3859 operand->addr.writeback = 1;
3860 }
3861 else if (skip_past_comma (&p))
3862 {
3863 /* [Xn], */
3864 operand->addr.postind = 1;
3865 operand->addr.writeback = 1;
3866
3867 if (operand->addr.preind)
3868 {
3869 set_syntax_error (_("cannot combine pre- and post-indexing"));
3870 return false;
3871 }
3872
3873 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3874 if (reg)
3875 {
3876 /* [Xn],Xm */
3877 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3878 {
3879 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3880 return false;
3881 }
3882
3883 operand->addr.offset.regno = reg->number;
3884 operand->addr.offset.is_reg = 1;
3885 }
3886 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3887 NORMAL_RESOLUTION))
3888 {
3889 /* [Xn],#expr */
3890 set_syntax_error (_("invalid expression in the address"));
3891 return false;
3892 }
3893 }
3894
3895 /* If at this point neither .preind nor .postind is set, we have a
3896 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3897 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3898 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3899 [Zn.<T>, xzr]. */
3900 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3901 {
3902 if (operand->addr.writeback)
3903 {
3904 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3905 {
3906 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3907 operand->addr.offset.is_reg = 0;
3908 operand->addr.offset.imm = 0;
3909 operand->addr.preind = 1;
3910 }
3911 else
3912 {
3913 /* Reject [Rn]! */
3914 set_syntax_error (_("missing offset in the pre-indexed address"));
3915 return false;
3916 }
3917 }
3918 else
3919 {
3920 operand->addr.preind = 1;
3921 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3922 {
3923 operand->addr.offset.is_reg = 1;
3924 operand->addr.offset.regno = REG_ZR;
3925 *offset_qualifier = AARCH64_OPND_QLF_X;
3926 }
3927 else
3928 {
3929 inst.reloc.exp.X_op = O_constant;
3930 inst.reloc.exp.X_add_number = 0;
3931 }
3932 }
3933 }
3934
3935 *str = p;
3936 return true;
3937 }
3938
3939 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3940 on success. */
3941 static bool
3942 parse_address (char **str, aarch64_opnd_info *operand)
3943 {
3944 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3945 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3946 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3947 }
3948
3949 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3950 The arguments have the same meaning as for parse_address_main.
3951 Return TRUE on success. */
3952 static bool
3953 parse_sve_address (char **str, aarch64_opnd_info *operand,
3954 aarch64_opnd_qualifier_t *base_qualifier,
3955 aarch64_opnd_qualifier_t *offset_qualifier)
3956 {
3957 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3958 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3959 SHIFTED_MUL_VL);
3960 }
3961
3962 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3963 Return TRUE on success; otherwise return FALSE. */
3964 static bool
3965 parse_half (char **str, int *internal_fixup_p)
3966 {
3967 char *p = *str;
3968
3969 skip_past_char (&p, '#');
3970
3971 gas_assert (internal_fixup_p);
3972 *internal_fixup_p = 0;
3973
3974 if (*p == ':')
3975 {
3976 struct reloc_table_entry *entry;
3977
3978 /* Try to parse a relocation. Anything else is an error. */
3979 ++p;
3980
3981 if (!(entry = find_reloc_table_entry (&p)))
3982 {
3983 set_syntax_error (_("unknown relocation modifier"));
3984 return false;
3985 }
3986
3987 if (entry->movw_type == 0)
3988 {
3989 set_syntax_error
3990 (_("this relocation modifier is not allowed on this instruction"));
3991 return false;
3992 }
3993
3994 inst.reloc.type = entry->movw_type;
3995 }
3996 else
3997 *internal_fixup_p = 1;
3998
3999 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
4000 aarch64_force_reloc (inst.reloc.type) == 1))
4001 return false;
4002
4003 *str = p;
4004 return true;
4005 }
4006
4007 /* Parse an operand for an ADRP instruction:
4008 ADRP <Xd>, <label>
4009 Return TRUE on success; otherwise return FALSE. */
4010
4011 static bool
4012 parse_adrp (char **str)
4013 {
4014 char *p;
4015
4016 p = *str;
4017 if (*p == ':')
4018 {
4019 struct reloc_table_entry *entry;
4020
4021 /* Try to parse a relocation. Anything else is an error. */
4022 ++p;
4023 if (!(entry = find_reloc_table_entry (&p)))
4024 {
4025 set_syntax_error (_("unknown relocation modifier"));
4026 return false;
4027 }
4028
4029 if (entry->adrp_type == 0)
4030 {
4031 set_syntax_error
4032 (_("this relocation modifier is not allowed on this instruction"));
4033 return false;
4034 }
4035
4036 inst.reloc.type = entry->adrp_type;
4037 }
4038 else
4039 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4040
4041 inst.reloc.pc_rel = 1;
4042 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
4043 aarch64_force_reloc (inst.reloc.type) == 1))
4044 return false;
4045 *str = p;
4046 return true;
4047 }
4048
4049 /* Miscellaneous. */
4050
4051 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4052 of SIZE tokens in which index I gives the token for field value I,
4053 or is null if field value I is invalid. REG_TYPE says which register
4054 names should be treated as registers rather than as symbolic immediates.
4055
4056 Return true on success, moving *STR past the operand and storing the
4057 field value in *VAL. */
4058
4059 static int
4060 parse_enum_string (char **str, int64_t *val, const char *const *array,
4061 size_t size, aarch64_reg_type reg_type)
4062 {
4063 expressionS exp;
4064 char *p, *q;
4065 size_t i;
4066
4067 /* Match C-like tokens. */
4068 p = q = *str;
4069 while (ISALNUM (*q))
4070 q++;
4071
4072 for (i = 0; i < size; ++i)
4073 if (array[i]
4074 && strncasecmp (array[i], p, q - p) == 0
4075 && array[i][q - p] == 0)
4076 {
4077 *val = i;
4078 *str = q;
4079 return true;
4080 }
4081
4082 if (!parse_immediate_expression (&p, &exp, reg_type))
4083 return false;
4084
4085 if (exp.X_op == O_constant
4086 && (uint64_t) exp.X_add_number < size)
4087 {
4088 *val = exp.X_add_number;
4089 *str = p;
4090 return true;
4091 }
4092
4093 /* Use the default error for this operand. */
4094 return false;
4095 }
4096
4097 /* Parse an option for a preload instruction. Returns the encoding for the
4098 option, or PARSE_FAIL. */
4099
4100 static int
4101 parse_pldop (char **str)
4102 {
4103 char *p, *q;
4104 const struct aarch64_name_value_pair *o;
4105
4106 p = q = *str;
4107 while (ISALNUM (*q))
4108 q++;
4109
4110 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4111 if (!o)
4112 return PARSE_FAIL;
4113
4114 *str = q;
4115 return o->value;
4116 }
4117
4118 /* Parse an option for a barrier instruction. Returns the encoding for the
4119 option, or PARSE_FAIL. */
4120
4121 static int
4122 parse_barrier (char **str)
4123 {
4124 char *p, *q;
4125 const struct aarch64_name_value_pair *o;
4126
4127 p = q = *str;
4128 while (ISALPHA (*q))
4129 q++;
4130
4131 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4132 if (!o)
4133 return PARSE_FAIL;
4134
4135 *str = q;
4136 return o->value;
4137 }
4138
4139 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4140 return 0 if successful. Otherwise return PARSE_FAIL. */
4141
4142 static int
4143 parse_barrier_psb (char **str,
4144 const struct aarch64_name_value_pair ** hint_opt)
4145 {
4146 char *p, *q;
4147 const struct aarch64_name_value_pair *o;
4148
4149 p = q = *str;
4150 while (ISALPHA (*q))
4151 q++;
4152
4153 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4154 if (!o)
4155 {
4156 set_fatal_syntax_error
4157 ( _("unknown or missing option to PSB/TSB"));
4158 return PARSE_FAIL;
4159 }
4160
4161 if (o->value != 0x11)
4162 {
4163 /* PSB only accepts option name 'CSYNC'. */
4164 set_syntax_error
4165 (_("the specified option is not accepted for PSB/TSB"));
4166 return PARSE_FAIL;
4167 }
4168
4169 *str = q;
4170 *hint_opt = o;
4171 return 0;
4172 }
4173
4174 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4175 return 0 if successful. Otherwise return PARSE_FAIL. */
4176
4177 static int
4178 parse_bti_operand (char **str,
4179 const struct aarch64_name_value_pair ** hint_opt)
4180 {
4181 char *p, *q;
4182 const struct aarch64_name_value_pair *o;
4183
4184 p = q = *str;
4185 while (ISALPHA (*q))
4186 q++;
4187
4188 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4189 if (!o)
4190 {
4191 set_fatal_syntax_error
4192 ( _("unknown option to BTI"));
4193 return PARSE_FAIL;
4194 }
4195
4196 switch (o->value)
4197 {
4198 /* Valid BTI operands. */
4199 case HINT_OPD_C:
4200 case HINT_OPD_J:
4201 case HINT_OPD_JC:
4202 break;
4203
4204 default:
4205 set_syntax_error
4206 (_("unknown option to BTI"));
4207 return PARSE_FAIL;
4208 }
4209
4210 *str = q;
4211 *hint_opt = o;
4212 return 0;
4213 }
4214
4215 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4216 Returns the encoding for the option, or PARSE_FAIL.
4217
4218 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4219 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4220
4221 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4222 field, otherwise as a system register.
4223 */
4224
4225 static int
4226 parse_sys_reg (char **str, htab_t sys_regs,
4227 int imple_defined_p, int pstatefield_p,
4228 uint32_t* flags)
4229 {
4230 char *p, *q;
4231 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4232 const aarch64_sys_reg *o;
4233 int value;
4234
4235 p = buf;
4236 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4237 if (p < buf + (sizeof (buf) - 1))
4238 *p++ = TOLOWER (*q);
4239 *p = '\0';
4240
4241 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4242 valid system register. This is enforced by construction of the hash
4243 table. */
4244 if (p - buf != q - *str)
4245 return PARSE_FAIL;
4246
4247 o = str_hash_find (sys_regs, buf);
4248 if (!o)
4249 {
4250 if (!imple_defined_p)
4251 return PARSE_FAIL;
4252 else
4253 {
4254 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4255 unsigned int op0, op1, cn, cm, op2;
4256
4257 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4258 != 5)
4259 return PARSE_FAIL;
4260 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4261 return PARSE_FAIL;
4262 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4263 if (flags)
4264 *flags = 0;
4265 }
4266 }
4267 else
4268 {
4269 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4270 as_bad (_("selected processor does not support PSTATE field "
4271 "name '%s'"), buf);
4272 if (!pstatefield_p
4273 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4274 o->value, o->flags, o->features))
4275 as_bad (_("selected processor does not support system register "
4276 "name '%s'"), buf);
4277 if (aarch64_sys_reg_deprecated_p (o->flags))
4278 as_warn (_("system register name '%s' is deprecated and may be "
4279 "removed in a future release"), buf);
4280 value = o->value;
4281 if (flags)
4282 *flags = o->flags;
4283 }
4284
4285 *str = q;
4286 return value;
4287 }
4288
4289 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4290 for the option, or NULL. */
4291
4292 static const aarch64_sys_ins_reg *
4293 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4294 {
4295 char *p, *q;
4296 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4297 const aarch64_sys_ins_reg *o;
4298
4299 p = buf;
4300 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4301 if (p < buf + (sizeof (buf) - 1))
4302 *p++ = TOLOWER (*q);
4303 *p = '\0';
4304
4305 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4306 valid system register. This is enforced by construction of the hash
4307 table. */
4308 if (p - buf != q - *str)
4309 return NULL;
4310
4311 o = str_hash_find (sys_ins_regs, buf);
4312 if (!o)
4313 return NULL;
4314
4315 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4316 o->name, o->value, o->flags, 0))
4317 as_bad (_("selected processor does not support system register "
4318 "name '%s'"), buf);
4319 if (aarch64_sys_reg_deprecated_p (o->flags))
4320 as_warn (_("system register name '%s' is deprecated and may be "
4321 "removed in a future release"), buf);
4322
4323 *str = q;
4324 return o;
4325 }
4326 \f
4327 #define po_char_or_fail(chr) do { \
4328 if (! skip_past_char (&str, chr)) \
4329 goto failure; \
4330 } while (0)
4331
4332 #define po_reg_or_fail(regtype) do { \
4333 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4334 if (val == PARSE_FAIL) \
4335 { \
4336 set_default_error (); \
4337 goto failure; \
4338 } \
4339 } while (0)
4340
4341 #define po_int_reg_or_fail(reg_type) do { \
4342 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4343 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4344 { \
4345 set_default_error (); \
4346 goto failure; \
4347 } \
4348 info->reg.regno = reg->number; \
4349 info->qualifier = qualifier; \
4350 } while (0)
4351
4352 #define po_imm_nc_or_fail() do { \
4353 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4354 goto failure; \
4355 } while (0)
4356
4357 #define po_imm_or_fail(min, max) do { \
4358 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4359 goto failure; \
4360 if (val < min || val > max) \
4361 { \
4362 set_fatal_syntax_error (_("immediate value out of range "\
4363 #min " to "#max)); \
4364 goto failure; \
4365 } \
4366 } while (0)
4367
4368 #define po_enum_or_fail(array) do { \
4369 if (!parse_enum_string (&str, &val, array, \
4370 ARRAY_SIZE (array), imm_reg_type)) \
4371 goto failure; \
4372 } while (0)
4373
4374 #define po_misc_or_fail(expr) do { \
4375 if (!expr) \
4376 goto failure; \
4377 } while (0)
4378 \f
4379 /* encode the 12-bit imm field of Add/sub immediate */
4380 static inline uint32_t
4381 encode_addsub_imm (uint32_t imm)
4382 {
4383 return imm << 10;
4384 }
4385
4386 /* encode the shift amount field of Add/sub immediate */
4387 static inline uint32_t
4388 encode_addsub_imm_shift_amount (uint32_t cnt)
4389 {
4390 return cnt << 22;
4391 }
4392
4393
4394 /* encode the imm field of Adr instruction */
4395 static inline uint32_t
4396 encode_adr_imm (uint32_t imm)
4397 {
4398 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4399 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4400 }
4401
4402 /* encode the immediate field of Move wide immediate */
4403 static inline uint32_t
4404 encode_movw_imm (uint32_t imm)
4405 {
4406 return imm << 5;
4407 }
4408
4409 /* encode the 26-bit offset of unconditional branch */
4410 static inline uint32_t
4411 encode_branch_ofs_26 (uint32_t ofs)
4412 {
4413 return ofs & ((1 << 26) - 1);
4414 }
4415
4416 /* encode the 19-bit offset of conditional branch and compare & branch */
4417 static inline uint32_t
4418 encode_cond_branch_ofs_19 (uint32_t ofs)
4419 {
4420 return (ofs & ((1 << 19) - 1)) << 5;
4421 }
4422
4423 /* encode the 19-bit offset of ld literal */
4424 static inline uint32_t
4425 encode_ld_lit_ofs_19 (uint32_t ofs)
4426 {
4427 return (ofs & ((1 << 19) - 1)) << 5;
4428 }
4429
4430 /* Encode the 14-bit offset of test & branch. */
4431 static inline uint32_t
4432 encode_tst_branch_ofs_14 (uint32_t ofs)
4433 {
4434 return (ofs & ((1 << 14) - 1)) << 5;
4435 }
4436
4437 /* Encode the 16-bit imm field of svc/hvc/smc. */
4438 static inline uint32_t
4439 encode_svc_imm (uint32_t imm)
4440 {
4441 return imm << 5;
4442 }
4443
4444 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4445 static inline uint32_t
4446 reencode_addsub_switch_add_sub (uint32_t opcode)
4447 {
4448 return opcode ^ (1 << 30);
4449 }
4450
4451 static inline uint32_t
4452 reencode_movzn_to_movz (uint32_t opcode)
4453 {
4454 return opcode | (1 << 30);
4455 }
4456
4457 static inline uint32_t
4458 reencode_movzn_to_movn (uint32_t opcode)
4459 {
4460 return opcode & ~(1 << 30);
4461 }
4462
4463 /* Overall per-instruction processing. */
4464
4465 /* We need to be able to fix up arbitrary expressions in some statements.
4466 This is so that we can handle symbols that are an arbitrary distance from
4467 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4468 which returns part of an address in a form which will be valid for
4469 a data instruction. We do this by pushing the expression into a symbol
4470 in the expr_section, and creating a fix for that. */
4471
4472 static fixS *
4473 fix_new_aarch64 (fragS * frag,
4474 int where,
4475 short int size,
4476 expressionS * exp,
4477 int pc_rel,
4478 int reloc)
4479 {
4480 fixS *new_fix;
4481
4482 switch (exp->X_op)
4483 {
4484 case O_constant:
4485 case O_symbol:
4486 case O_add:
4487 case O_subtract:
4488 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4489 break;
4490
4491 default:
4492 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4493 pc_rel, reloc);
4494 break;
4495 }
4496 return new_fix;
4497 }
4498 \f
4499 /* Diagnostics on operands errors. */
4500
4501 /* By default, output verbose error message.
4502 Disable the verbose error message by -mno-verbose-error. */
4503 static int verbose_error_p = 1;
4504
4505 #ifdef DEBUG_AARCH64
4506 /* N.B. this is only for the purpose of debugging. */
4507 const char* operand_mismatch_kind_names[] =
4508 {
4509 "AARCH64_OPDE_NIL",
4510 "AARCH64_OPDE_RECOVERABLE",
4511 "AARCH64_OPDE_SYNTAX_ERROR",
4512 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4513 "AARCH64_OPDE_INVALID_VARIANT",
4514 "AARCH64_OPDE_OUT_OF_RANGE",
4515 "AARCH64_OPDE_UNALIGNED",
4516 "AARCH64_OPDE_REG_LIST",
4517 "AARCH64_OPDE_OTHER_ERROR",
4518 };
4519 #endif /* DEBUG_AARCH64 */
4520
4521 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4522
4523 When multiple errors of different kinds are found in the same assembly
4524 line, only the error of the highest severity will be picked up for
4525 issuing the diagnostics. */
4526
4527 static inline bool
4528 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4529 enum aarch64_operand_error_kind rhs)
4530 {
4531 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4532 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4533 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4534 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4535 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4536 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4537 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4538 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4539 return lhs > rhs;
4540 }
4541
4542 /* Helper routine to get the mnemonic name from the assembly instruction
4543 line; should only be called for the diagnosis purpose, as there is
4544 string copy operation involved, which may affect the runtime
4545 performance if used in elsewhere. */
4546
4547 static const char*
4548 get_mnemonic_name (const char *str)
4549 {
4550 static char mnemonic[32];
4551 char *ptr;
4552
4553 /* Get the first 15 bytes and assume that the full name is included. */
4554 strncpy (mnemonic, str, 31);
4555 mnemonic[31] = '\0';
4556
4557 /* Scan up to the end of the mnemonic, which must end in white space,
4558 '.', or end of string. */
4559 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4560 ;
4561
4562 *ptr = '\0';
4563
4564 /* Append '...' to the truncated long name. */
4565 if (ptr - mnemonic == 31)
4566 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4567
4568 return mnemonic;
4569 }
4570
4571 static void
4572 reset_aarch64_instruction (aarch64_instruction *instruction)
4573 {
4574 memset (instruction, '\0', sizeof (aarch64_instruction));
4575 instruction->reloc.type = BFD_RELOC_UNUSED;
4576 }
4577
4578 /* Data structures storing one user error in the assembly code related to
4579 operands. */
4580
4581 struct operand_error_record
4582 {
4583 const aarch64_opcode *opcode;
4584 aarch64_operand_error detail;
4585 struct operand_error_record *next;
4586 };
4587
4588 typedef struct operand_error_record operand_error_record;
4589
4590 struct operand_errors
4591 {
4592 operand_error_record *head;
4593 operand_error_record *tail;
4594 };
4595
4596 typedef struct operand_errors operand_errors;
4597
4598 /* Top-level data structure reporting user errors for the current line of
4599 the assembly code.
4600 The way md_assemble works is that all opcodes sharing the same mnemonic
4601 name are iterated to find a match to the assembly line. In this data
4602 structure, each of the such opcodes will have one operand_error_record
4603 allocated and inserted. In other words, excessive errors related with
4604 a single opcode are disregarded. */
4605 operand_errors operand_error_report;
4606
4607 /* Free record nodes. */
4608 static operand_error_record *free_opnd_error_record_nodes = NULL;
4609
4610 /* Initialize the data structure that stores the operand mismatch
4611 information on assembling one line of the assembly code. */
4612 static void
4613 init_operand_error_report (void)
4614 {
4615 if (operand_error_report.head != NULL)
4616 {
4617 gas_assert (operand_error_report.tail != NULL);
4618 operand_error_report.tail->next = free_opnd_error_record_nodes;
4619 free_opnd_error_record_nodes = operand_error_report.head;
4620 operand_error_report.head = NULL;
4621 operand_error_report.tail = NULL;
4622 return;
4623 }
4624 gas_assert (operand_error_report.tail == NULL);
4625 }
4626
4627 /* Return TRUE if some operand error has been recorded during the
4628 parsing of the current assembly line using the opcode *OPCODE;
4629 otherwise return FALSE. */
4630 static inline bool
4631 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4632 {
4633 operand_error_record *record = operand_error_report.head;
4634 return record && record->opcode == opcode;
4635 }
4636
4637 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4638 OPCODE field is initialized with OPCODE.
4639 N.B. only one record for each opcode, i.e. the maximum of one error is
4640 recorded for each instruction template. */
4641
4642 static void
4643 add_operand_error_record (const operand_error_record* new_record)
4644 {
4645 const aarch64_opcode *opcode = new_record->opcode;
4646 operand_error_record* record = operand_error_report.head;
4647
4648 /* The record may have been created for this opcode. If not, we need
4649 to prepare one. */
4650 if (! opcode_has_operand_error_p (opcode))
4651 {
4652 /* Get one empty record. */
4653 if (free_opnd_error_record_nodes == NULL)
4654 {
4655 record = XNEW (operand_error_record);
4656 }
4657 else
4658 {
4659 record = free_opnd_error_record_nodes;
4660 free_opnd_error_record_nodes = record->next;
4661 }
4662 record->opcode = opcode;
4663 /* Insert at the head. */
4664 record->next = operand_error_report.head;
4665 operand_error_report.head = record;
4666 if (operand_error_report.tail == NULL)
4667 operand_error_report.tail = record;
4668 }
4669 else if (record->detail.kind != AARCH64_OPDE_NIL
4670 && record->detail.index <= new_record->detail.index
4671 && operand_error_higher_severity_p (record->detail.kind,
4672 new_record->detail.kind))
4673 {
4674 /* In the case of multiple errors found on operands related with a
4675 single opcode, only record the error of the leftmost operand and
4676 only if the error is of higher severity. */
4677 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4678 " the existing error %s on operand %d",
4679 operand_mismatch_kind_names[new_record->detail.kind],
4680 new_record->detail.index,
4681 operand_mismatch_kind_names[record->detail.kind],
4682 record->detail.index);
4683 return;
4684 }
4685
4686 record->detail = new_record->detail;
4687 }
4688
4689 static inline void
4690 record_operand_error_info (const aarch64_opcode *opcode,
4691 aarch64_operand_error *error_info)
4692 {
4693 operand_error_record record;
4694 record.opcode = opcode;
4695 record.detail = *error_info;
4696 add_operand_error_record (&record);
4697 }
4698
4699 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4700 error message *ERROR, for operand IDX (count from 0). */
4701
4702 static void
4703 record_operand_error (const aarch64_opcode *opcode, int idx,
4704 enum aarch64_operand_error_kind kind,
4705 const char* error)
4706 {
4707 aarch64_operand_error info;
4708 memset(&info, 0, sizeof (info));
4709 info.index = idx;
4710 info.kind = kind;
4711 info.error = error;
4712 info.non_fatal = false;
4713 record_operand_error_info (opcode, &info);
4714 }
4715
4716 static void
4717 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4718 enum aarch64_operand_error_kind kind,
4719 const char* error, const int *extra_data)
4720 {
4721 aarch64_operand_error info;
4722 info.index = idx;
4723 info.kind = kind;
4724 info.error = error;
4725 info.data[0] = extra_data[0];
4726 info.data[1] = extra_data[1];
4727 info.data[2] = extra_data[2];
4728 info.non_fatal = false;
4729 record_operand_error_info (opcode, &info);
4730 }
4731
4732 static void
4733 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4734 const char* error, int lower_bound,
4735 int upper_bound)
4736 {
4737 int data[3] = {lower_bound, upper_bound, 0};
4738 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4739 error, data);
4740 }
4741
4742 /* Remove the operand error record for *OPCODE. */
4743 static void ATTRIBUTE_UNUSED
4744 remove_operand_error_record (const aarch64_opcode *opcode)
4745 {
4746 if (opcode_has_operand_error_p (opcode))
4747 {
4748 operand_error_record* record = operand_error_report.head;
4749 gas_assert (record != NULL && operand_error_report.tail != NULL);
4750 operand_error_report.head = record->next;
4751 record->next = free_opnd_error_record_nodes;
4752 free_opnd_error_record_nodes = record;
4753 if (operand_error_report.head == NULL)
4754 {
4755 gas_assert (operand_error_report.tail == record);
4756 operand_error_report.tail = NULL;
4757 }
4758 }
4759 }
4760
4761 /* Given the instruction in *INSTR, return the index of the best matched
4762 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4763
4764 Return -1 if there is no qualifier sequence; return the first match
4765 if there is multiple matches found. */
4766
4767 static int
4768 find_best_match (const aarch64_inst *instr,
4769 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4770 {
4771 int i, num_opnds, max_num_matched, idx;
4772
4773 num_opnds = aarch64_num_of_operands (instr->opcode);
4774 if (num_opnds == 0)
4775 {
4776 DEBUG_TRACE ("no operand");
4777 return -1;
4778 }
4779
4780 max_num_matched = 0;
4781 idx = 0;
4782
4783 /* For each pattern. */
4784 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4785 {
4786 int j, num_matched;
4787 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4788
4789 /* Most opcodes has much fewer patterns in the list. */
4790 if (empty_qualifier_sequence_p (qualifiers))
4791 {
4792 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4793 break;
4794 }
4795
4796 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4797 if (*qualifiers == instr->operands[j].qualifier)
4798 ++num_matched;
4799
4800 if (num_matched > max_num_matched)
4801 {
4802 max_num_matched = num_matched;
4803 idx = i;
4804 }
4805 }
4806
4807 DEBUG_TRACE ("return with %d", idx);
4808 return idx;
4809 }
4810
4811 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4812 corresponding operands in *INSTR. */
4813
4814 static inline void
4815 assign_qualifier_sequence (aarch64_inst *instr,
4816 const aarch64_opnd_qualifier_t *qualifiers)
4817 {
4818 int i = 0;
4819 int num_opnds = aarch64_num_of_operands (instr->opcode);
4820 gas_assert (num_opnds);
4821 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4822 instr->operands[i].qualifier = *qualifiers;
4823 }
4824
4825 /* Print operands for the diagnosis purpose. */
4826
4827 static void
4828 print_operands (char *buf, const aarch64_opcode *opcode,
4829 const aarch64_opnd_info *opnds)
4830 {
4831 int i;
4832
4833 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4834 {
4835 char str[128];
4836
4837 /* We regard the opcode operand info more, however we also look into
4838 the inst->operands to support the disassembling of the optional
4839 operand.
4840 The two operand code should be the same in all cases, apart from
4841 when the operand can be optional. */
4842 if (opcode->operands[i] == AARCH64_OPND_NIL
4843 || opnds[i].type == AARCH64_OPND_NIL)
4844 break;
4845
4846 /* Generate the operand string in STR. */
4847 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4848 NULL, cpu_variant);
4849
4850 /* Delimiter. */
4851 if (str[0] != '\0')
4852 strcat (buf, i == 0 ? " " : ", ");
4853
4854 /* Append the operand string. */
4855 strcat (buf, str);
4856 }
4857 }
4858
4859 /* Send to stderr a string as information. */
4860
4861 static void
4862 output_info (const char *format, ...)
4863 {
4864 const char *file;
4865 unsigned int line;
4866 va_list args;
4867
4868 file = as_where (&line);
4869 if (file)
4870 {
4871 if (line != 0)
4872 fprintf (stderr, "%s:%u: ", file, line);
4873 else
4874 fprintf (stderr, "%s: ", file);
4875 }
4876 fprintf (stderr, _("Info: "));
4877 va_start (args, format);
4878 vfprintf (stderr, format, args);
4879 va_end (args);
4880 (void) putc ('\n', stderr);
4881 }
4882
4883 /* Output one operand error record. */
4884
4885 static void
4886 output_operand_error_record (const operand_error_record *record, char *str)
4887 {
4888 const aarch64_operand_error *detail = &record->detail;
4889 int idx = detail->index;
4890 const aarch64_opcode *opcode = record->opcode;
4891 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4892 : AARCH64_OPND_NIL);
4893
4894 typedef void (*handler_t)(const char *format, ...);
4895 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4896
4897 switch (detail->kind)
4898 {
4899 case AARCH64_OPDE_NIL:
4900 gas_assert (0);
4901 break;
4902 case AARCH64_OPDE_SYNTAX_ERROR:
4903 case AARCH64_OPDE_RECOVERABLE:
4904 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4905 case AARCH64_OPDE_OTHER_ERROR:
4906 /* Use the prepared error message if there is, otherwise use the
4907 operand description string to describe the error. */
4908 if (detail->error != NULL)
4909 {
4910 if (idx < 0)
4911 handler (_("%s -- `%s'"), detail->error, str);
4912 else
4913 handler (_("%s at operand %d -- `%s'"),
4914 detail->error, idx + 1, str);
4915 }
4916 else
4917 {
4918 gas_assert (idx >= 0);
4919 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4920 aarch64_get_operand_desc (opd_code), str);
4921 }
4922 break;
4923
4924 case AARCH64_OPDE_INVALID_VARIANT:
4925 handler (_("operand mismatch -- `%s'"), str);
4926 if (verbose_error_p)
4927 {
4928 /* We will try to correct the erroneous instruction and also provide
4929 more information e.g. all other valid variants.
4930
4931 The string representation of the corrected instruction and other
4932 valid variants are generated by
4933
4934 1) obtaining the intermediate representation of the erroneous
4935 instruction;
4936 2) manipulating the IR, e.g. replacing the operand qualifier;
4937 3) printing out the instruction by calling the printer functions
4938 shared with the disassembler.
4939
4940 The limitation of this method is that the exact input assembly
4941 line cannot be accurately reproduced in some cases, for example an
4942 optional operand present in the actual assembly line will be
4943 omitted in the output; likewise for the optional syntax rules,
4944 e.g. the # before the immediate. Another limitation is that the
4945 assembly symbols and relocation operations in the assembly line
4946 currently cannot be printed out in the error report. Last but not
4947 least, when there is other error(s) co-exist with this error, the
4948 'corrected' instruction may be still incorrect, e.g. given
4949 'ldnp h0,h1,[x0,#6]!'
4950 this diagnosis will provide the version:
4951 'ldnp s0,s1,[x0,#6]!'
4952 which is still not right. */
4953 size_t len = strlen (get_mnemonic_name (str));
4954 int i, qlf_idx;
4955 bool result;
4956 char buf[2048];
4957 aarch64_inst *inst_base = &inst.base;
4958 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4959
4960 /* Init inst. */
4961 reset_aarch64_instruction (&inst);
4962 inst_base->opcode = opcode;
4963
4964 /* Reset the error report so that there is no side effect on the
4965 following operand parsing. */
4966 init_operand_error_report ();
4967
4968 /* Fill inst. */
4969 result = parse_operands (str + len, opcode)
4970 && programmer_friendly_fixup (&inst);
4971 gas_assert (result);
4972 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4973 NULL, NULL, insn_sequence);
4974 gas_assert (!result);
4975
4976 /* Find the most matched qualifier sequence. */
4977 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4978 gas_assert (qlf_idx > -1);
4979
4980 /* Assign the qualifiers. */
4981 assign_qualifier_sequence (inst_base,
4982 opcode->qualifiers_list[qlf_idx]);
4983
4984 /* Print the hint. */
4985 output_info (_(" did you mean this?"));
4986 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4987 print_operands (buf, opcode, inst_base->operands);
4988 output_info (_(" %s"), buf);
4989
4990 /* Print out other variant(s) if there is any. */
4991 if (qlf_idx != 0 ||
4992 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4993 output_info (_(" other valid variant(s):"));
4994
4995 /* For each pattern. */
4996 qualifiers_list = opcode->qualifiers_list;
4997 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4998 {
4999 /* Most opcodes has much fewer patterns in the list.
5000 First NIL qualifier indicates the end in the list. */
5001 if (empty_qualifier_sequence_p (*qualifiers_list))
5002 break;
5003
5004 if (i != qlf_idx)
5005 {
5006 /* Mnemonics name. */
5007 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5008
5009 /* Assign the qualifiers. */
5010 assign_qualifier_sequence (inst_base, *qualifiers_list);
5011
5012 /* Print instruction. */
5013 print_operands (buf, opcode, inst_base->operands);
5014
5015 output_info (_(" %s"), buf);
5016 }
5017 }
5018 }
5019 break;
5020
5021 case AARCH64_OPDE_UNTIED_OPERAND:
5022 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5023 detail->index + 1, str);
5024 break;
5025
5026 case AARCH64_OPDE_OUT_OF_RANGE:
5027 if (detail->data[0] != detail->data[1])
5028 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5029 detail->error ? detail->error : _("immediate value"),
5030 detail->data[0], detail->data[1], idx + 1, str);
5031 else
5032 handler (_("%s must be %d at operand %d -- `%s'"),
5033 detail->error ? detail->error : _("immediate value"),
5034 detail->data[0], idx + 1, str);
5035 break;
5036
5037 case AARCH64_OPDE_REG_LIST:
5038 if (detail->data[0] == 1)
5039 handler (_("invalid number of registers in the list; "
5040 "only 1 register is expected at operand %d -- `%s'"),
5041 idx + 1, str);
5042 else
5043 handler (_("invalid number of registers in the list; "
5044 "%d registers are expected at operand %d -- `%s'"),
5045 detail->data[0], idx + 1, str);
5046 break;
5047
5048 case AARCH64_OPDE_UNALIGNED:
5049 handler (_("immediate value must be a multiple of "
5050 "%d at operand %d -- `%s'"),
5051 detail->data[0], idx + 1, str);
5052 break;
5053
5054 default:
5055 gas_assert (0);
5056 break;
5057 }
5058 }
5059
5060 /* Process and output the error message about the operand mismatching.
5061
5062 When this function is called, the operand error information had
5063 been collected for an assembly line and there will be multiple
5064 errors in the case of multiple instruction templates; output the
5065 error message that most closely describes the problem.
5066
5067 The errors to be printed can be filtered on printing all errors
5068 or only non-fatal errors. This distinction has to be made because
5069 the error buffer may already be filled with fatal errors we don't want to
5070 print due to the different instruction templates. */
5071
5072 static void
5073 output_operand_error_report (char *str, bool non_fatal_only)
5074 {
5075 int largest_error_pos;
5076 const char *msg = NULL;
5077 enum aarch64_operand_error_kind kind;
5078 operand_error_record *curr;
5079 operand_error_record *head = operand_error_report.head;
5080 operand_error_record *record = NULL;
5081
5082 /* No error to report. */
5083 if (head == NULL)
5084 return;
5085
5086 gas_assert (head != NULL && operand_error_report.tail != NULL);
5087
5088 /* Only one error. */
5089 if (head == operand_error_report.tail)
5090 {
5091 /* If the only error is a non-fatal one and we don't want to print it,
5092 just exit. */
5093 if (!non_fatal_only || head->detail.non_fatal)
5094 {
5095 DEBUG_TRACE ("single opcode entry with error kind: %s",
5096 operand_mismatch_kind_names[head->detail.kind]);
5097 output_operand_error_record (head, str);
5098 }
5099 return;
5100 }
5101
5102 /* Find the error kind of the highest severity. */
5103 DEBUG_TRACE ("multiple opcode entries with error kind");
5104 kind = AARCH64_OPDE_NIL;
5105 for (curr = head; curr != NULL; curr = curr->next)
5106 {
5107 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5108 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5109 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5110 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5111 kind = curr->detail.kind;
5112 }
5113
5114 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5115
5116 /* Pick up one of errors of KIND to report. */
5117 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5118 for (curr = head; curr != NULL; curr = curr->next)
5119 {
5120 /* If we don't want to print non-fatal errors then don't consider them
5121 at all. */
5122 if (curr->detail.kind != kind
5123 || (non_fatal_only && !curr->detail.non_fatal))
5124 continue;
5125 /* If there are multiple errors, pick up the one with the highest
5126 mismatching operand index. In the case of multiple errors with
5127 the equally highest operand index, pick up the first one or the
5128 first one with non-NULL error message. */
5129 if (curr->detail.index > largest_error_pos
5130 || (curr->detail.index == largest_error_pos && msg == NULL
5131 && curr->detail.error != NULL))
5132 {
5133 largest_error_pos = curr->detail.index;
5134 record = curr;
5135 msg = record->detail.error;
5136 }
5137 }
5138
5139 /* The way errors are collected in the back-end is a bit non-intuitive. But
5140 essentially, because each operand template is tried recursively you may
5141 always have errors collected from the previous tried OPND. These are
5142 usually skipped if there is one successful match. However now with the
5143 non-fatal errors we have to ignore those previously collected hard errors
5144 when we're only interested in printing the non-fatal ones. This condition
5145 prevents us from printing errors that are not appropriate, since we did
5146 match a condition, but it also has warnings that it wants to print. */
5147 if (non_fatal_only && !record)
5148 return;
5149
5150 gas_assert (largest_error_pos != -2 && record != NULL);
5151 DEBUG_TRACE ("Pick up error kind %s to report",
5152 operand_mismatch_kind_names[record->detail.kind]);
5153
5154 /* Output. */
5155 output_operand_error_record (record, str);
5156 }
5157 \f
5158 /* Write an AARCH64 instruction to buf - always little-endian. */
5159 static void
5160 put_aarch64_insn (char *buf, uint32_t insn)
5161 {
5162 unsigned char *where = (unsigned char *) buf;
5163 where[0] = insn;
5164 where[1] = insn >> 8;
5165 where[2] = insn >> 16;
5166 where[3] = insn >> 24;
5167 }
5168
5169 static uint32_t
5170 get_aarch64_insn (char *buf)
5171 {
5172 unsigned char *where = (unsigned char *) buf;
5173 uint32_t result;
5174 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5175 | ((uint32_t) where[3] << 24)));
5176 return result;
5177 }
5178
5179 static void
5180 output_inst (struct aarch64_inst *new_inst)
5181 {
5182 char *to = NULL;
5183
5184 to = frag_more (INSN_SIZE);
5185
5186 frag_now->tc_frag_data.recorded = 1;
5187
5188 put_aarch64_insn (to, inst.base.value);
5189
5190 if (inst.reloc.type != BFD_RELOC_UNUSED)
5191 {
5192 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5193 INSN_SIZE, &inst.reloc.exp,
5194 inst.reloc.pc_rel,
5195 inst.reloc.type);
5196 DEBUG_TRACE ("Prepared relocation fix up");
5197 /* Don't check the addend value against the instruction size,
5198 that's the job of our code in md_apply_fix(). */
5199 fixp->fx_no_overflow = 1;
5200 if (new_inst != NULL)
5201 fixp->tc_fix_data.inst = new_inst;
5202 if (aarch64_gas_internal_fixup_p ())
5203 {
5204 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5205 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5206 fixp->fx_addnumber = inst.reloc.flags;
5207 }
5208 }
5209
5210 dwarf2_emit_insn (INSN_SIZE);
5211 }
5212
5213 /* Link together opcodes of the same name. */
5214
5215 struct templates
5216 {
5217 const aarch64_opcode *opcode;
5218 struct templates *next;
5219 };
5220
5221 typedef struct templates templates;
5222
5223 static templates *
5224 lookup_mnemonic (const char *start, int len)
5225 {
5226 templates *templ = NULL;
5227
5228 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5229 return templ;
5230 }
5231
5232 /* Subroutine of md_assemble, responsible for looking up the primary
5233 opcode from the mnemonic the user wrote. STR points to the
5234 beginning of the mnemonic. */
5235
5236 static templates *
5237 opcode_lookup (char **str)
5238 {
5239 char *end, *base, *dot;
5240 const aarch64_cond *cond;
5241 char condname[16];
5242 int len;
5243
5244 /* Scan up to the end of the mnemonic, which must end in white space,
5245 '.', or end of string. */
5246 dot = 0;
5247 for (base = end = *str; is_part_of_name(*end); end++)
5248 if (*end == '.' && !dot)
5249 dot = end;
5250
5251 if (end == base || dot == base)
5252 return 0;
5253
5254 inst.cond = COND_ALWAYS;
5255
5256 /* Handle a possible condition. */
5257 if (dot)
5258 {
5259 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5260 if (cond)
5261 {
5262 inst.cond = cond->value;
5263 *str = end;
5264 }
5265 else
5266 {
5267 *str = dot;
5268 return 0;
5269 }
5270 len = dot - base;
5271 }
5272 else
5273 {
5274 *str = end;
5275 len = end - base;
5276 }
5277
5278 if (inst.cond == COND_ALWAYS)
5279 {
5280 /* Look for unaffixed mnemonic. */
5281 return lookup_mnemonic (base, len);
5282 }
5283 else if (len <= 13)
5284 {
5285 /* append ".c" to mnemonic if conditional */
5286 memcpy (condname, base, len);
5287 memcpy (condname + len, ".c", 2);
5288 base = condname;
5289 len += 2;
5290 return lookup_mnemonic (base, len);
5291 }
5292
5293 return NULL;
5294 }
5295
5296 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5297 to a corresponding operand qualifier. */
5298
5299 static inline aarch64_opnd_qualifier_t
5300 vectype_to_qualifier (const struct vector_type_el *vectype)
5301 {
5302 /* Element size in bytes indexed by vector_el_type. */
5303 const unsigned char ele_size[5]
5304 = {1, 2, 4, 8, 16};
5305 const unsigned int ele_base [5] =
5306 {
5307 AARCH64_OPND_QLF_V_4B,
5308 AARCH64_OPND_QLF_V_2H,
5309 AARCH64_OPND_QLF_V_2S,
5310 AARCH64_OPND_QLF_V_1D,
5311 AARCH64_OPND_QLF_V_1Q
5312 };
5313
5314 if (!vectype->defined || vectype->type == NT_invtype)
5315 goto vectype_conversion_fail;
5316
5317 if (vectype->type == NT_zero)
5318 return AARCH64_OPND_QLF_P_Z;
5319 if (vectype->type == NT_merge)
5320 return AARCH64_OPND_QLF_P_M;
5321
5322 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5323
5324 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5325 {
5326 /* Special case S_4B. */
5327 if (vectype->type == NT_b && vectype->width == 4)
5328 return AARCH64_OPND_QLF_S_4B;
5329
5330 /* Special case S_2H. */
5331 if (vectype->type == NT_h && vectype->width == 2)
5332 return AARCH64_OPND_QLF_S_2H;
5333
5334 /* Vector element register. */
5335 return AARCH64_OPND_QLF_S_B + vectype->type;
5336 }
5337 else
5338 {
5339 /* Vector register. */
5340 int reg_size = ele_size[vectype->type] * vectype->width;
5341 unsigned offset;
5342 unsigned shift;
5343 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5344 goto vectype_conversion_fail;
5345
5346 /* The conversion is by calculating the offset from the base operand
5347 qualifier for the vector type. The operand qualifiers are regular
5348 enough that the offset can established by shifting the vector width by
5349 a vector-type dependent amount. */
5350 shift = 0;
5351 if (vectype->type == NT_b)
5352 shift = 3;
5353 else if (vectype->type == NT_h || vectype->type == NT_s)
5354 shift = 2;
5355 else if (vectype->type >= NT_d)
5356 shift = 1;
5357 else
5358 gas_assert (0);
5359
5360 offset = ele_base [vectype->type] + (vectype->width >> shift);
5361 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5362 && offset <= AARCH64_OPND_QLF_V_1Q);
5363 return offset;
5364 }
5365
5366 vectype_conversion_fail:
5367 first_error (_("bad vector arrangement type"));
5368 return AARCH64_OPND_QLF_NIL;
5369 }
5370
5371 /* Process an optional operand that is found omitted from the assembly line.
5372 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5373 instruction's opcode entry while IDX is the index of this omitted operand.
5374 */
5375
5376 static void
5377 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5378 int idx, aarch64_opnd_info *operand)
5379 {
5380 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5381 gas_assert (optional_operand_p (opcode, idx));
5382 gas_assert (!operand->present);
5383
5384 switch (type)
5385 {
5386 case AARCH64_OPND_Rd:
5387 case AARCH64_OPND_Rn:
5388 case AARCH64_OPND_Rm:
5389 case AARCH64_OPND_Rt:
5390 case AARCH64_OPND_Rt2:
5391 case AARCH64_OPND_Rt_LS64:
5392 case AARCH64_OPND_Rt_SP:
5393 case AARCH64_OPND_Rs:
5394 case AARCH64_OPND_Ra:
5395 case AARCH64_OPND_Rt_SYS:
5396 case AARCH64_OPND_Rd_SP:
5397 case AARCH64_OPND_Rn_SP:
5398 case AARCH64_OPND_Rm_SP:
5399 case AARCH64_OPND_Fd:
5400 case AARCH64_OPND_Fn:
5401 case AARCH64_OPND_Fm:
5402 case AARCH64_OPND_Fa:
5403 case AARCH64_OPND_Ft:
5404 case AARCH64_OPND_Ft2:
5405 case AARCH64_OPND_Sd:
5406 case AARCH64_OPND_Sn:
5407 case AARCH64_OPND_Sm:
5408 case AARCH64_OPND_Va:
5409 case AARCH64_OPND_Vd:
5410 case AARCH64_OPND_Vn:
5411 case AARCH64_OPND_Vm:
5412 case AARCH64_OPND_VdD1:
5413 case AARCH64_OPND_VnD1:
5414 operand->reg.regno = default_value;
5415 break;
5416
5417 case AARCH64_OPND_Ed:
5418 case AARCH64_OPND_En:
5419 case AARCH64_OPND_Em:
5420 case AARCH64_OPND_Em16:
5421 case AARCH64_OPND_SM3_IMM2:
5422 operand->reglane.regno = default_value;
5423 break;
5424
5425 case AARCH64_OPND_IDX:
5426 case AARCH64_OPND_BIT_NUM:
5427 case AARCH64_OPND_IMMR:
5428 case AARCH64_OPND_IMMS:
5429 case AARCH64_OPND_SHLL_IMM:
5430 case AARCH64_OPND_IMM_VLSL:
5431 case AARCH64_OPND_IMM_VLSR:
5432 case AARCH64_OPND_CCMP_IMM:
5433 case AARCH64_OPND_FBITS:
5434 case AARCH64_OPND_UIMM4:
5435 case AARCH64_OPND_UIMM3_OP1:
5436 case AARCH64_OPND_UIMM3_OP2:
5437 case AARCH64_OPND_IMM:
5438 case AARCH64_OPND_IMM_2:
5439 case AARCH64_OPND_WIDTH:
5440 case AARCH64_OPND_UIMM7:
5441 case AARCH64_OPND_NZCV:
5442 case AARCH64_OPND_SVE_PATTERN:
5443 case AARCH64_OPND_SVE_PRFOP:
5444 operand->imm.value = default_value;
5445 break;
5446
5447 case AARCH64_OPND_SVE_PATTERN_SCALED:
5448 operand->imm.value = default_value;
5449 operand->shifter.kind = AARCH64_MOD_MUL;
5450 operand->shifter.amount = 1;
5451 break;
5452
5453 case AARCH64_OPND_EXCEPTION:
5454 inst.reloc.type = BFD_RELOC_UNUSED;
5455 break;
5456
5457 case AARCH64_OPND_BARRIER_ISB:
5458 operand->barrier = aarch64_barrier_options + default_value;
5459 break;
5460
5461 case AARCH64_OPND_BTI_TARGET:
5462 operand->hint_option = aarch64_hint_options + default_value;
5463 break;
5464
5465 default:
5466 break;
5467 }
5468 }
5469
5470 /* Process the relocation type for move wide instructions.
5471 Return TRUE on success; otherwise return FALSE. */
5472
5473 static bool
5474 process_movw_reloc_info (void)
5475 {
5476 int is32;
5477 unsigned shift;
5478
5479 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5480
5481 if (inst.base.opcode->op == OP_MOVK)
5482 switch (inst.reloc.type)
5483 {
5484 case BFD_RELOC_AARCH64_MOVW_G0_S:
5485 case BFD_RELOC_AARCH64_MOVW_G1_S:
5486 case BFD_RELOC_AARCH64_MOVW_G2_S:
5487 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5488 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5489 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5490 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5491 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5492 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5493 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5494 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5495 set_syntax_error
5496 (_("the specified relocation type is not allowed for MOVK"));
5497 return false;
5498 default:
5499 break;
5500 }
5501
5502 switch (inst.reloc.type)
5503 {
5504 case BFD_RELOC_AARCH64_MOVW_G0:
5505 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5506 case BFD_RELOC_AARCH64_MOVW_G0_S:
5507 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5508 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5509 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5510 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5511 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5512 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5513 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5514 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5515 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5516 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5517 shift = 0;
5518 break;
5519 case BFD_RELOC_AARCH64_MOVW_G1:
5520 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5521 case BFD_RELOC_AARCH64_MOVW_G1_S:
5522 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5523 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5524 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5525 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5526 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5527 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5528 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5529 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5530 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5531 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5532 shift = 16;
5533 break;
5534 case BFD_RELOC_AARCH64_MOVW_G2:
5535 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5536 case BFD_RELOC_AARCH64_MOVW_G2_S:
5537 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5538 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5539 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5540 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5541 if (is32)
5542 {
5543 set_fatal_syntax_error
5544 (_("the specified relocation type is not allowed for 32-bit "
5545 "register"));
5546 return false;
5547 }
5548 shift = 32;
5549 break;
5550 case BFD_RELOC_AARCH64_MOVW_G3:
5551 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5552 if (is32)
5553 {
5554 set_fatal_syntax_error
5555 (_("the specified relocation type is not allowed for 32-bit "
5556 "register"));
5557 return false;
5558 }
5559 shift = 48;
5560 break;
5561 default:
5562 /* More cases should be added when more MOVW-related relocation types
5563 are supported in GAS. */
5564 gas_assert (aarch64_gas_internal_fixup_p ());
5565 /* The shift amount should have already been set by the parser. */
5566 return true;
5567 }
5568 inst.base.operands[1].shifter.amount = shift;
5569 return true;
5570 }
5571
5572 /* A primitive log calculator. */
5573
5574 static inline unsigned int
5575 get_logsz (unsigned int size)
5576 {
5577 const unsigned char ls[16] =
5578 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5579 if (size > 16)
5580 {
5581 gas_assert (0);
5582 return -1;
5583 }
5584 gas_assert (ls[size - 1] != (unsigned char)-1);
5585 return ls[size - 1];
5586 }
5587
5588 /* Determine and return the real reloc type code for an instruction
5589 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5590
5591 static inline bfd_reloc_code_real_type
5592 ldst_lo12_determine_real_reloc_type (void)
5593 {
5594 unsigned logsz, max_logsz;
5595 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5596 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5597
5598 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5599 {
5600 BFD_RELOC_AARCH64_LDST8_LO12,
5601 BFD_RELOC_AARCH64_LDST16_LO12,
5602 BFD_RELOC_AARCH64_LDST32_LO12,
5603 BFD_RELOC_AARCH64_LDST64_LO12,
5604 BFD_RELOC_AARCH64_LDST128_LO12
5605 },
5606 {
5607 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5608 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5609 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5610 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5611 BFD_RELOC_AARCH64_NONE
5612 },
5613 {
5614 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5615 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5616 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5617 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5618 BFD_RELOC_AARCH64_NONE
5619 },
5620 {
5621 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5622 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5623 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5624 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5625 BFD_RELOC_AARCH64_NONE
5626 },
5627 {
5628 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5629 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5630 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5631 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5632 BFD_RELOC_AARCH64_NONE
5633 }
5634 };
5635
5636 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5637 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5638 || (inst.reloc.type
5639 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5640 || (inst.reloc.type
5641 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5642 || (inst.reloc.type
5643 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5644 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5645
5646 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5647 opd1_qlf =
5648 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5649 1, opd0_qlf, 0);
5650 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5651
5652 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5653
5654 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5655 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5656 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5657 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5658 max_logsz = 3;
5659 else
5660 max_logsz = 4;
5661
5662 if (logsz > max_logsz)
5663 {
5664 /* SEE PR 27904 for an example of this. */
5665 set_fatal_syntax_error
5666 (_("relocation qualifier does not match instruction size"));
5667 return BFD_RELOC_AARCH64_NONE;
5668 }
5669
5670 /* In reloc.c, these pseudo relocation types should be defined in similar
5671 order as above reloc_ldst_lo12 array. Because the array index calculation
5672 below relies on this. */
5673 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5674 }
5675
5676 /* Check whether a register list REGINFO is valid. The registers must be
5677 numbered in increasing order (modulo 32), in increments of one or two.
5678
5679 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5680 increments of two.
5681
5682 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5683
5684 static bool
5685 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5686 {
5687 uint32_t i, nb_regs, prev_regno, incr;
5688
5689 nb_regs = 1 + (reginfo & 0x3);
5690 reginfo >>= 2;
5691 prev_regno = reginfo & 0x1f;
5692 incr = accept_alternate ? 2 : 1;
5693
5694 for (i = 1; i < nb_regs; ++i)
5695 {
5696 uint32_t curr_regno;
5697 reginfo >>= 5;
5698 curr_regno = reginfo & 0x1f;
5699 if (curr_regno != ((prev_regno + incr) & 0x1f))
5700 return false;
5701 prev_regno = curr_regno;
5702 }
5703
5704 return true;
5705 }
5706
5707 /* Generic instruction operand parser. This does no encoding and no
5708 semantic validation; it merely squirrels values away in the inst
5709 structure. Returns TRUE or FALSE depending on whether the
5710 specified grammar matched. */
5711
5712 static bool
5713 parse_operands (char *str, const aarch64_opcode *opcode)
5714 {
5715 int i;
5716 char *backtrack_pos = 0;
5717 const enum aarch64_opnd *operands = opcode->operands;
5718 aarch64_reg_type imm_reg_type;
5719
5720 clear_error ();
5721 skip_whitespace (str);
5722
5723 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5724 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5725 else
5726 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5727
5728 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5729 {
5730 int64_t val;
5731 const reg_entry *reg;
5732 int comma_skipped_p = 0;
5733 aarch64_reg_type rtype;
5734 struct vector_type_el vectype;
5735 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5736 aarch64_opnd_info *info = &inst.base.operands[i];
5737 aarch64_reg_type reg_type;
5738
5739 DEBUG_TRACE ("parse operand %d", i);
5740
5741 /* Assign the operand code. */
5742 info->type = operands[i];
5743
5744 if (optional_operand_p (opcode, i))
5745 {
5746 /* Remember where we are in case we need to backtrack. */
5747 gas_assert (!backtrack_pos);
5748 backtrack_pos = str;
5749 }
5750
5751 /* Expect comma between operands; the backtrack mechanism will take
5752 care of cases of omitted optional operand. */
5753 if (i > 0 && ! skip_past_char (&str, ','))
5754 {
5755 set_syntax_error (_("comma expected between operands"));
5756 goto failure;
5757 }
5758 else
5759 comma_skipped_p = 1;
5760
5761 switch (operands[i])
5762 {
5763 case AARCH64_OPND_Rd:
5764 case AARCH64_OPND_Rn:
5765 case AARCH64_OPND_Rm:
5766 case AARCH64_OPND_Rt:
5767 case AARCH64_OPND_Rt2:
5768 case AARCH64_OPND_Rs:
5769 case AARCH64_OPND_Ra:
5770 case AARCH64_OPND_Rt_LS64:
5771 case AARCH64_OPND_Rt_SYS:
5772 case AARCH64_OPND_PAIRREG:
5773 case AARCH64_OPND_SVE_Rm:
5774 po_int_reg_or_fail (REG_TYPE_R_Z);
5775
5776 /* In LS64 load/store instructions Rt register number must be even
5777 and <=22. */
5778 if (operands[i] == AARCH64_OPND_Rt_LS64)
5779 {
5780 /* We've already checked if this is valid register.
5781 This will check if register number (Rt) is not undefined for LS64
5782 instructions:
5783 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
5784 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
5785 {
5786 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
5787 goto failure;
5788 }
5789 }
5790 break;
5791
5792 case AARCH64_OPND_Rd_SP:
5793 case AARCH64_OPND_Rn_SP:
5794 case AARCH64_OPND_Rt_SP:
5795 case AARCH64_OPND_SVE_Rn_SP:
5796 case AARCH64_OPND_Rm_SP:
5797 po_int_reg_or_fail (REG_TYPE_R_SP);
5798 break;
5799
5800 case AARCH64_OPND_Rm_EXT:
5801 case AARCH64_OPND_Rm_SFT:
5802 po_misc_or_fail (parse_shifter_operand
5803 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5804 ? SHIFTED_ARITH_IMM
5805 : SHIFTED_LOGIC_IMM)));
5806 if (!info->shifter.operator_present)
5807 {
5808 /* Default to LSL if not present. Libopcodes prefers shifter
5809 kind to be explicit. */
5810 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5811 info->shifter.kind = AARCH64_MOD_LSL;
5812 /* For Rm_EXT, libopcodes will carry out further check on whether
5813 or not stack pointer is used in the instruction (Recall that
5814 "the extend operator is not optional unless at least one of
5815 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5816 }
5817 break;
5818
5819 case AARCH64_OPND_Fd:
5820 case AARCH64_OPND_Fn:
5821 case AARCH64_OPND_Fm:
5822 case AARCH64_OPND_Fa:
5823 case AARCH64_OPND_Ft:
5824 case AARCH64_OPND_Ft2:
5825 case AARCH64_OPND_Sd:
5826 case AARCH64_OPND_Sn:
5827 case AARCH64_OPND_Sm:
5828 case AARCH64_OPND_SVE_VZn:
5829 case AARCH64_OPND_SVE_Vd:
5830 case AARCH64_OPND_SVE_Vm:
5831 case AARCH64_OPND_SVE_Vn:
5832 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5833 if (val == PARSE_FAIL)
5834 {
5835 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5836 goto failure;
5837 }
5838 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5839
5840 info->reg.regno = val;
5841 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5842 break;
5843
5844 case AARCH64_OPND_SVE_Pd:
5845 case AARCH64_OPND_SVE_Pg3:
5846 case AARCH64_OPND_SVE_Pg4_5:
5847 case AARCH64_OPND_SVE_Pg4_10:
5848 case AARCH64_OPND_SVE_Pg4_16:
5849 case AARCH64_OPND_SVE_Pm:
5850 case AARCH64_OPND_SVE_Pn:
5851 case AARCH64_OPND_SVE_Pt:
5852 reg_type = REG_TYPE_PN;
5853 goto vector_reg;
5854
5855 case AARCH64_OPND_SVE_Za_5:
5856 case AARCH64_OPND_SVE_Za_16:
5857 case AARCH64_OPND_SVE_Zd:
5858 case AARCH64_OPND_SVE_Zm_5:
5859 case AARCH64_OPND_SVE_Zm_16:
5860 case AARCH64_OPND_SVE_Zn:
5861 case AARCH64_OPND_SVE_Zt:
5862 reg_type = REG_TYPE_ZN;
5863 goto vector_reg;
5864
5865 case AARCH64_OPND_Va:
5866 case AARCH64_OPND_Vd:
5867 case AARCH64_OPND_Vn:
5868 case AARCH64_OPND_Vm:
5869 reg_type = REG_TYPE_VN;
5870 vector_reg:
5871 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5872 if (val == PARSE_FAIL)
5873 {
5874 first_error (_(get_reg_expected_msg (reg_type)));
5875 goto failure;
5876 }
5877 if (vectype.defined & NTA_HASINDEX)
5878 goto failure;
5879
5880 info->reg.regno = val;
5881 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5882 && vectype.type == NT_invtype)
5883 /* Unqualified Pn and Zn registers are allowed in certain
5884 contexts. Rely on F_STRICT qualifier checking to catch
5885 invalid uses. */
5886 info->qualifier = AARCH64_OPND_QLF_NIL;
5887 else
5888 {
5889 info->qualifier = vectype_to_qualifier (&vectype);
5890 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5891 goto failure;
5892 }
5893 break;
5894
5895 case AARCH64_OPND_VdD1:
5896 case AARCH64_OPND_VnD1:
5897 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5898 if (val == PARSE_FAIL)
5899 {
5900 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5901 goto failure;
5902 }
5903 if (vectype.type != NT_d || vectype.index != 1)
5904 {
5905 set_fatal_syntax_error
5906 (_("the top half of a 128-bit FP/SIMD register is expected"));
5907 goto failure;
5908 }
5909 info->reg.regno = val;
5910 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5911 here; it is correct for the purpose of encoding/decoding since
5912 only the register number is explicitly encoded in the related
5913 instructions, although this appears a bit hacky. */
5914 info->qualifier = AARCH64_OPND_QLF_S_D;
5915 break;
5916
5917 case AARCH64_OPND_SVE_Zm3_INDEX:
5918 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5919 case AARCH64_OPND_SVE_Zm3_11_INDEX:
5920 case AARCH64_OPND_SVE_Zm4_11_INDEX:
5921 case AARCH64_OPND_SVE_Zm4_INDEX:
5922 case AARCH64_OPND_SVE_Zn_INDEX:
5923 reg_type = REG_TYPE_ZN;
5924 goto vector_reg_index;
5925
5926 case AARCH64_OPND_Ed:
5927 case AARCH64_OPND_En:
5928 case AARCH64_OPND_Em:
5929 case AARCH64_OPND_Em16:
5930 case AARCH64_OPND_SM3_IMM2:
5931 reg_type = REG_TYPE_VN;
5932 vector_reg_index:
5933 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5934 if (val == PARSE_FAIL)
5935 {
5936 first_error (_(get_reg_expected_msg (reg_type)));
5937 goto failure;
5938 }
5939 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5940 goto failure;
5941
5942 info->reglane.regno = val;
5943 info->reglane.index = vectype.index;
5944 info->qualifier = vectype_to_qualifier (&vectype);
5945 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5946 goto failure;
5947 break;
5948
5949 case AARCH64_OPND_SVE_ZnxN:
5950 case AARCH64_OPND_SVE_ZtxN:
5951 reg_type = REG_TYPE_ZN;
5952 goto vector_reg_list;
5953
5954 case AARCH64_OPND_LVn:
5955 case AARCH64_OPND_LVt:
5956 case AARCH64_OPND_LVt_AL:
5957 case AARCH64_OPND_LEt:
5958 reg_type = REG_TYPE_VN;
5959 vector_reg_list:
5960 if (reg_type == REG_TYPE_ZN
5961 && get_opcode_dependent_value (opcode) == 1
5962 && *str != '{')
5963 {
5964 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5965 if (val == PARSE_FAIL)
5966 {
5967 first_error (_(get_reg_expected_msg (reg_type)));
5968 goto failure;
5969 }
5970 info->reglist.first_regno = val;
5971 info->reglist.num_regs = 1;
5972 }
5973 else
5974 {
5975 val = parse_vector_reg_list (&str, reg_type, &vectype);
5976 if (val == PARSE_FAIL)
5977 goto failure;
5978
5979 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5980 {
5981 set_fatal_syntax_error (_("invalid register list"));
5982 goto failure;
5983 }
5984
5985 if (vectype.width != 0 && *str != ',')
5986 {
5987 set_fatal_syntax_error
5988 (_("expected element type rather than vector type"));
5989 goto failure;
5990 }
5991
5992 info->reglist.first_regno = (val >> 2) & 0x1f;
5993 info->reglist.num_regs = (val & 0x3) + 1;
5994 }
5995 if (operands[i] == AARCH64_OPND_LEt)
5996 {
5997 if (!(vectype.defined & NTA_HASINDEX))
5998 goto failure;
5999 info->reglist.has_index = 1;
6000 info->reglist.index = vectype.index;
6001 }
6002 else
6003 {
6004 if (vectype.defined & NTA_HASINDEX)
6005 goto failure;
6006 if (!(vectype.defined & NTA_HASTYPE))
6007 {
6008 if (reg_type == REG_TYPE_ZN)
6009 set_fatal_syntax_error (_("missing type suffix"));
6010 goto failure;
6011 }
6012 }
6013 info->qualifier = vectype_to_qualifier (&vectype);
6014 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6015 goto failure;
6016 break;
6017
6018 case AARCH64_OPND_CRn:
6019 case AARCH64_OPND_CRm:
6020 {
6021 char prefix = *(str++);
6022 if (prefix != 'c' && prefix != 'C')
6023 goto failure;
6024
6025 po_imm_nc_or_fail ();
6026 if (val > 15)
6027 {
6028 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6029 goto failure;
6030 }
6031 info->qualifier = AARCH64_OPND_QLF_CR;
6032 info->imm.value = val;
6033 break;
6034 }
6035
6036 case AARCH64_OPND_SHLL_IMM:
6037 case AARCH64_OPND_IMM_VLSR:
6038 po_imm_or_fail (1, 64);
6039 info->imm.value = val;
6040 break;
6041
6042 case AARCH64_OPND_CCMP_IMM:
6043 case AARCH64_OPND_SIMM5:
6044 case AARCH64_OPND_FBITS:
6045 case AARCH64_OPND_TME_UIMM16:
6046 case AARCH64_OPND_UIMM4:
6047 case AARCH64_OPND_UIMM4_ADDG:
6048 case AARCH64_OPND_UIMM10:
6049 case AARCH64_OPND_UIMM3_OP1:
6050 case AARCH64_OPND_UIMM3_OP2:
6051 case AARCH64_OPND_IMM_VLSL:
6052 case AARCH64_OPND_IMM:
6053 case AARCH64_OPND_IMM_2:
6054 case AARCH64_OPND_WIDTH:
6055 case AARCH64_OPND_SVE_INV_LIMM:
6056 case AARCH64_OPND_SVE_LIMM:
6057 case AARCH64_OPND_SVE_LIMM_MOV:
6058 case AARCH64_OPND_SVE_SHLIMM_PRED:
6059 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6060 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6061 case AARCH64_OPND_SVE_SHRIMM_PRED:
6062 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6063 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6064 case AARCH64_OPND_SVE_SIMM5:
6065 case AARCH64_OPND_SVE_SIMM5B:
6066 case AARCH64_OPND_SVE_SIMM6:
6067 case AARCH64_OPND_SVE_SIMM8:
6068 case AARCH64_OPND_SVE_UIMM3:
6069 case AARCH64_OPND_SVE_UIMM7:
6070 case AARCH64_OPND_SVE_UIMM8:
6071 case AARCH64_OPND_SVE_UIMM8_53:
6072 case AARCH64_OPND_IMM_ROT1:
6073 case AARCH64_OPND_IMM_ROT2:
6074 case AARCH64_OPND_IMM_ROT3:
6075 case AARCH64_OPND_SVE_IMM_ROT1:
6076 case AARCH64_OPND_SVE_IMM_ROT2:
6077 case AARCH64_OPND_SVE_IMM_ROT3:
6078 po_imm_nc_or_fail ();
6079 info->imm.value = val;
6080 break;
6081
6082 case AARCH64_OPND_SVE_AIMM:
6083 case AARCH64_OPND_SVE_ASIMM:
6084 po_imm_nc_or_fail ();
6085 info->imm.value = val;
6086 skip_whitespace (str);
6087 if (skip_past_comma (&str))
6088 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6089 else
6090 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6091 break;
6092
6093 case AARCH64_OPND_SVE_PATTERN:
6094 po_enum_or_fail (aarch64_sve_pattern_array);
6095 info->imm.value = val;
6096 break;
6097
6098 case AARCH64_OPND_SVE_PATTERN_SCALED:
6099 po_enum_or_fail (aarch64_sve_pattern_array);
6100 info->imm.value = val;
6101 if (skip_past_comma (&str)
6102 && !parse_shift (&str, info, SHIFTED_MUL))
6103 goto failure;
6104 if (!info->shifter.operator_present)
6105 {
6106 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6107 info->shifter.kind = AARCH64_MOD_MUL;
6108 info->shifter.amount = 1;
6109 }
6110 break;
6111
6112 case AARCH64_OPND_SVE_PRFOP:
6113 po_enum_or_fail (aarch64_sve_prfop_array);
6114 info->imm.value = val;
6115 break;
6116
6117 case AARCH64_OPND_UIMM7:
6118 po_imm_or_fail (0, 127);
6119 info->imm.value = val;
6120 break;
6121
6122 case AARCH64_OPND_IDX:
6123 case AARCH64_OPND_MASK:
6124 case AARCH64_OPND_BIT_NUM:
6125 case AARCH64_OPND_IMMR:
6126 case AARCH64_OPND_IMMS:
6127 po_imm_or_fail (0, 63);
6128 info->imm.value = val;
6129 break;
6130
6131 case AARCH64_OPND_IMM0:
6132 po_imm_nc_or_fail ();
6133 if (val != 0)
6134 {
6135 set_fatal_syntax_error (_("immediate zero expected"));
6136 goto failure;
6137 }
6138 info->imm.value = 0;
6139 break;
6140
6141 case AARCH64_OPND_FPIMM0:
6142 {
6143 int qfloat;
6144 bool res1 = false, res2 = false;
6145 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6146 it is probably not worth the effort to support it. */
6147 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6148 imm_reg_type))
6149 && (error_p ()
6150 || !(res2 = parse_constant_immediate (&str, &val,
6151 imm_reg_type))))
6152 goto failure;
6153 if ((res1 && qfloat == 0) || (res2 && val == 0))
6154 {
6155 info->imm.value = 0;
6156 info->imm.is_fp = 1;
6157 break;
6158 }
6159 set_fatal_syntax_error (_("immediate zero expected"));
6160 goto failure;
6161 }
6162
6163 case AARCH64_OPND_IMM_MOV:
6164 {
6165 char *saved = str;
6166 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6167 reg_name_p (str, REG_TYPE_VN))
6168 goto failure;
6169 str = saved;
6170 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6171 GE_OPT_PREFIX, REJECT_ABSENT,
6172 NORMAL_RESOLUTION));
6173 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6174 later. fix_mov_imm_insn will try to determine a machine
6175 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6176 message if the immediate cannot be moved by a single
6177 instruction. */
6178 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6179 inst.base.operands[i].skip = 1;
6180 }
6181 break;
6182
6183 case AARCH64_OPND_SIMD_IMM:
6184 case AARCH64_OPND_SIMD_IMM_SFT:
6185 if (! parse_big_immediate (&str, &val, imm_reg_type))
6186 goto failure;
6187 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6188 /* addr_off_p */ 0,
6189 /* need_libopcodes_p */ 1,
6190 /* skip_p */ 1);
6191 /* Parse shift.
6192 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6193 shift, we don't check it here; we leave the checking to
6194 the libopcodes (operand_general_constraint_met_p). By
6195 doing this, we achieve better diagnostics. */
6196 if (skip_past_comma (&str)
6197 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6198 goto failure;
6199 if (!info->shifter.operator_present
6200 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6201 {
6202 /* Default to LSL if not present. Libopcodes prefers shifter
6203 kind to be explicit. */
6204 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6205 info->shifter.kind = AARCH64_MOD_LSL;
6206 }
6207 break;
6208
6209 case AARCH64_OPND_FPIMM:
6210 case AARCH64_OPND_SIMD_FPIMM:
6211 case AARCH64_OPND_SVE_FPIMM8:
6212 {
6213 int qfloat;
6214 bool dp_p;
6215
6216 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6217 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6218 || !aarch64_imm_float_p (qfloat))
6219 {
6220 if (!error_p ())
6221 set_fatal_syntax_error (_("invalid floating-point"
6222 " constant"));
6223 goto failure;
6224 }
6225 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6226 inst.base.operands[i].imm.is_fp = 1;
6227 }
6228 break;
6229
6230 case AARCH64_OPND_SVE_I1_HALF_ONE:
6231 case AARCH64_OPND_SVE_I1_HALF_TWO:
6232 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6233 {
6234 int qfloat;
6235 bool dp_p;
6236
6237 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6238 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6239 {
6240 if (!error_p ())
6241 set_fatal_syntax_error (_("invalid floating-point"
6242 " constant"));
6243 goto failure;
6244 }
6245 inst.base.operands[i].imm.value = qfloat;
6246 inst.base.operands[i].imm.is_fp = 1;
6247 }
6248 break;
6249
6250 case AARCH64_OPND_LIMM:
6251 po_misc_or_fail (parse_shifter_operand (&str, info,
6252 SHIFTED_LOGIC_IMM));
6253 if (info->shifter.operator_present)
6254 {
6255 set_fatal_syntax_error
6256 (_("shift not allowed for bitmask immediate"));
6257 goto failure;
6258 }
6259 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6260 /* addr_off_p */ 0,
6261 /* need_libopcodes_p */ 1,
6262 /* skip_p */ 1);
6263 break;
6264
6265 case AARCH64_OPND_AIMM:
6266 if (opcode->op == OP_ADD)
6267 /* ADD may have relocation types. */
6268 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6269 SHIFTED_ARITH_IMM));
6270 else
6271 po_misc_or_fail (parse_shifter_operand (&str, info,
6272 SHIFTED_ARITH_IMM));
6273 switch (inst.reloc.type)
6274 {
6275 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6276 info->shifter.amount = 12;
6277 break;
6278 case BFD_RELOC_UNUSED:
6279 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6280 if (info->shifter.kind != AARCH64_MOD_NONE)
6281 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6282 inst.reloc.pc_rel = 0;
6283 break;
6284 default:
6285 break;
6286 }
6287 info->imm.value = 0;
6288 if (!info->shifter.operator_present)
6289 {
6290 /* Default to LSL if not present. Libopcodes prefers shifter
6291 kind to be explicit. */
6292 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6293 info->shifter.kind = AARCH64_MOD_LSL;
6294 }
6295 break;
6296
6297 case AARCH64_OPND_HALF:
6298 {
6299 /* #<imm16> or relocation. */
6300 int internal_fixup_p;
6301 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6302 if (internal_fixup_p)
6303 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6304 skip_whitespace (str);
6305 if (skip_past_comma (&str))
6306 {
6307 /* {, LSL #<shift>} */
6308 if (! aarch64_gas_internal_fixup_p ())
6309 {
6310 set_fatal_syntax_error (_("can't mix relocation modifier "
6311 "with explicit shift"));
6312 goto failure;
6313 }
6314 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6315 }
6316 else
6317 inst.base.operands[i].shifter.amount = 0;
6318 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6319 inst.base.operands[i].imm.value = 0;
6320 if (! process_movw_reloc_info ())
6321 goto failure;
6322 }
6323 break;
6324
6325 case AARCH64_OPND_EXCEPTION:
6326 case AARCH64_OPND_UNDEFINED:
6327 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6328 imm_reg_type));
6329 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6330 /* addr_off_p */ 0,
6331 /* need_libopcodes_p */ 0,
6332 /* skip_p */ 1);
6333 break;
6334
6335 case AARCH64_OPND_NZCV:
6336 {
6337 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6338 if (nzcv != NULL)
6339 {
6340 str += 4;
6341 info->imm.value = nzcv->value;
6342 break;
6343 }
6344 po_imm_or_fail (0, 15);
6345 info->imm.value = val;
6346 }
6347 break;
6348
6349 case AARCH64_OPND_COND:
6350 case AARCH64_OPND_COND1:
6351 {
6352 char *start = str;
6353 do
6354 str++;
6355 while (ISALPHA (*str));
6356 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6357 if (info->cond == NULL)
6358 {
6359 set_syntax_error (_("invalid condition"));
6360 goto failure;
6361 }
6362 else if (operands[i] == AARCH64_OPND_COND1
6363 && (info->cond->value & 0xe) == 0xe)
6364 {
6365 /* Do not allow AL or NV. */
6366 set_default_error ();
6367 goto failure;
6368 }
6369 }
6370 break;
6371
6372 case AARCH64_OPND_ADDR_ADRP:
6373 po_misc_or_fail (parse_adrp (&str));
6374 /* Clear the value as operand needs to be relocated. */
6375 info->imm.value = 0;
6376 break;
6377
6378 case AARCH64_OPND_ADDR_PCREL14:
6379 case AARCH64_OPND_ADDR_PCREL19:
6380 case AARCH64_OPND_ADDR_PCREL21:
6381 case AARCH64_OPND_ADDR_PCREL26:
6382 po_misc_or_fail (parse_address (&str, info));
6383 if (!info->addr.pcrel)
6384 {
6385 set_syntax_error (_("invalid pc-relative address"));
6386 goto failure;
6387 }
6388 if (inst.gen_lit_pool
6389 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6390 {
6391 /* Only permit "=value" in the literal load instructions.
6392 The literal will be generated by programmer_friendly_fixup. */
6393 set_syntax_error (_("invalid use of \"=immediate\""));
6394 goto failure;
6395 }
6396 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6397 {
6398 set_syntax_error (_("unrecognized relocation suffix"));
6399 goto failure;
6400 }
6401 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6402 {
6403 info->imm.value = inst.reloc.exp.X_add_number;
6404 inst.reloc.type = BFD_RELOC_UNUSED;
6405 }
6406 else
6407 {
6408 info->imm.value = 0;
6409 if (inst.reloc.type == BFD_RELOC_UNUSED)
6410 switch (opcode->iclass)
6411 {
6412 case compbranch:
6413 case condbranch:
6414 /* e.g. CBZ or B.COND */
6415 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6416 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6417 break;
6418 case testbranch:
6419 /* e.g. TBZ */
6420 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6421 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6422 break;
6423 case branch_imm:
6424 /* e.g. B or BL */
6425 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6426 inst.reloc.type =
6427 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6428 : BFD_RELOC_AARCH64_JUMP26;
6429 break;
6430 case loadlit:
6431 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6432 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6433 break;
6434 case pcreladdr:
6435 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6436 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6437 break;
6438 default:
6439 gas_assert (0);
6440 abort ();
6441 }
6442 inst.reloc.pc_rel = 1;
6443 }
6444 break;
6445
6446 case AARCH64_OPND_ADDR_SIMPLE:
6447 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6448 {
6449 /* [<Xn|SP>{, #<simm>}] */
6450 char *start = str;
6451 /* First use the normal address-parsing routines, to get
6452 the usual syntax errors. */
6453 po_misc_or_fail (parse_address (&str, info));
6454 if (info->addr.pcrel || info->addr.offset.is_reg
6455 || !info->addr.preind || info->addr.postind
6456 || info->addr.writeback)
6457 {
6458 set_syntax_error (_("invalid addressing mode"));
6459 goto failure;
6460 }
6461
6462 /* Then retry, matching the specific syntax of these addresses. */
6463 str = start;
6464 po_char_or_fail ('[');
6465 po_reg_or_fail (REG_TYPE_R64_SP);
6466 /* Accept optional ", #0". */
6467 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6468 && skip_past_char (&str, ','))
6469 {
6470 skip_past_char (&str, '#');
6471 if (! skip_past_char (&str, '0'))
6472 {
6473 set_fatal_syntax_error
6474 (_("the optional immediate offset can only be 0"));
6475 goto failure;
6476 }
6477 }
6478 po_char_or_fail (']');
6479 break;
6480 }
6481
6482 case AARCH64_OPND_ADDR_REGOFF:
6483 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6484 po_misc_or_fail (parse_address (&str, info));
6485 regoff_addr:
6486 if (info->addr.pcrel || !info->addr.offset.is_reg
6487 || !info->addr.preind || info->addr.postind
6488 || info->addr.writeback)
6489 {
6490 set_syntax_error (_("invalid addressing mode"));
6491 goto failure;
6492 }
6493 if (!info->shifter.operator_present)
6494 {
6495 /* Default to LSL if not present. Libopcodes prefers shifter
6496 kind to be explicit. */
6497 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6498 info->shifter.kind = AARCH64_MOD_LSL;
6499 }
6500 /* Qualifier to be deduced by libopcodes. */
6501 break;
6502
6503 case AARCH64_OPND_ADDR_SIMM7:
6504 po_misc_or_fail (parse_address (&str, info));
6505 if (info->addr.pcrel || info->addr.offset.is_reg
6506 || (!info->addr.preind && !info->addr.postind))
6507 {
6508 set_syntax_error (_("invalid addressing mode"));
6509 goto failure;
6510 }
6511 if (inst.reloc.type != BFD_RELOC_UNUSED)
6512 {
6513 set_syntax_error (_("relocation not allowed"));
6514 goto failure;
6515 }
6516 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6517 /* addr_off_p */ 1,
6518 /* need_libopcodes_p */ 1,
6519 /* skip_p */ 0);
6520 break;
6521
6522 case AARCH64_OPND_ADDR_SIMM9:
6523 case AARCH64_OPND_ADDR_SIMM9_2:
6524 case AARCH64_OPND_ADDR_SIMM11:
6525 case AARCH64_OPND_ADDR_SIMM13:
6526 po_misc_or_fail (parse_address (&str, info));
6527 if (info->addr.pcrel || info->addr.offset.is_reg
6528 || (!info->addr.preind && !info->addr.postind)
6529 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6530 && info->addr.writeback))
6531 {
6532 set_syntax_error (_("invalid addressing mode"));
6533 goto failure;
6534 }
6535 if (inst.reloc.type != BFD_RELOC_UNUSED)
6536 {
6537 set_syntax_error (_("relocation not allowed"));
6538 goto failure;
6539 }
6540 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6541 /* addr_off_p */ 1,
6542 /* need_libopcodes_p */ 1,
6543 /* skip_p */ 0);
6544 break;
6545
6546 case AARCH64_OPND_ADDR_SIMM10:
6547 case AARCH64_OPND_ADDR_OFFSET:
6548 po_misc_or_fail (parse_address (&str, info));
6549 if (info->addr.pcrel || info->addr.offset.is_reg
6550 || !info->addr.preind || info->addr.postind)
6551 {
6552 set_syntax_error (_("invalid addressing mode"));
6553 goto failure;
6554 }
6555 if (inst.reloc.type != BFD_RELOC_UNUSED)
6556 {
6557 set_syntax_error (_("relocation not allowed"));
6558 goto failure;
6559 }
6560 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6561 /* addr_off_p */ 1,
6562 /* need_libopcodes_p */ 1,
6563 /* skip_p */ 0);
6564 break;
6565
6566 case AARCH64_OPND_ADDR_UIMM12:
6567 po_misc_or_fail (parse_address (&str, info));
6568 if (info->addr.pcrel || info->addr.offset.is_reg
6569 || !info->addr.preind || info->addr.writeback)
6570 {
6571 set_syntax_error (_("invalid addressing mode"));
6572 goto failure;
6573 }
6574 if (inst.reloc.type == BFD_RELOC_UNUSED)
6575 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6576 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6577 || (inst.reloc.type
6578 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6579 || (inst.reloc.type
6580 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6581 || (inst.reloc.type
6582 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6583 || (inst.reloc.type
6584 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6585 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6586 /* Leave qualifier to be determined by libopcodes. */
6587 break;
6588
6589 case AARCH64_OPND_SIMD_ADDR_POST:
6590 /* [<Xn|SP>], <Xm|#<amount>> */
6591 po_misc_or_fail (parse_address (&str, info));
6592 if (!info->addr.postind || !info->addr.writeback)
6593 {
6594 set_syntax_error (_("invalid addressing mode"));
6595 goto failure;
6596 }
6597 if (!info->addr.offset.is_reg)
6598 {
6599 if (inst.reloc.exp.X_op == O_constant)
6600 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6601 else
6602 {
6603 set_fatal_syntax_error
6604 (_("writeback value must be an immediate constant"));
6605 goto failure;
6606 }
6607 }
6608 /* No qualifier. */
6609 break;
6610
6611 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6612 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
6613 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6614 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6615 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6616 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6617 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6618 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6619 case AARCH64_OPND_SVE_ADDR_RI_U6:
6620 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6621 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6622 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6623 /* [X<n>{, #imm, MUL VL}]
6624 [X<n>{, #imm}]
6625 but recognizing SVE registers. */
6626 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6627 &offset_qualifier));
6628 if (base_qualifier != AARCH64_OPND_QLF_X)
6629 {
6630 set_syntax_error (_("invalid addressing mode"));
6631 goto failure;
6632 }
6633 sve_regimm:
6634 if (info->addr.pcrel || info->addr.offset.is_reg
6635 || !info->addr.preind || info->addr.writeback)
6636 {
6637 set_syntax_error (_("invalid addressing mode"));
6638 goto failure;
6639 }
6640 if (inst.reloc.type != BFD_RELOC_UNUSED
6641 || inst.reloc.exp.X_op != O_constant)
6642 {
6643 /* Make sure this has priority over
6644 "invalid addressing mode". */
6645 set_fatal_syntax_error (_("constant offset required"));
6646 goto failure;
6647 }
6648 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6649 break;
6650
6651 case AARCH64_OPND_SVE_ADDR_R:
6652 /* [<Xn|SP>{, <R><m>}]
6653 but recognizing SVE registers. */
6654 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6655 &offset_qualifier));
6656 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6657 {
6658 offset_qualifier = AARCH64_OPND_QLF_X;
6659 info->addr.offset.is_reg = 1;
6660 info->addr.offset.regno = 31;
6661 }
6662 else if (base_qualifier != AARCH64_OPND_QLF_X
6663 || offset_qualifier != AARCH64_OPND_QLF_X)
6664 {
6665 set_syntax_error (_("invalid addressing mode"));
6666 goto failure;
6667 }
6668 goto regoff_addr;
6669
6670 case AARCH64_OPND_SVE_ADDR_RR:
6671 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6672 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6673 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6674 case AARCH64_OPND_SVE_ADDR_RX:
6675 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6676 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6677 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6678 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6679 but recognizing SVE registers. */
6680 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6681 &offset_qualifier));
6682 if (base_qualifier != AARCH64_OPND_QLF_X
6683 || offset_qualifier != AARCH64_OPND_QLF_X)
6684 {
6685 set_syntax_error (_("invalid addressing mode"));
6686 goto failure;
6687 }
6688 goto regoff_addr;
6689
6690 case AARCH64_OPND_SVE_ADDR_RZ:
6691 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6692 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6693 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6694 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6695 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6696 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6697 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6698 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6699 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6700 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6701 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6702 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6703 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6704 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6705 &offset_qualifier));
6706 if (base_qualifier != AARCH64_OPND_QLF_X
6707 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6708 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6709 {
6710 set_syntax_error (_("invalid addressing mode"));
6711 goto failure;
6712 }
6713 info->qualifier = offset_qualifier;
6714 goto regoff_addr;
6715
6716 case AARCH64_OPND_SVE_ADDR_ZX:
6717 /* [Zn.<T>{, <Xm>}]. */
6718 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6719 &offset_qualifier));
6720 /* Things to check:
6721 base_qualifier either S_S or S_D
6722 offset_qualifier must be X
6723 */
6724 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6725 && base_qualifier != AARCH64_OPND_QLF_S_D)
6726 || offset_qualifier != AARCH64_OPND_QLF_X)
6727 {
6728 set_syntax_error (_("invalid addressing mode"));
6729 goto failure;
6730 }
6731 info->qualifier = base_qualifier;
6732 if (!info->addr.offset.is_reg || info->addr.pcrel
6733 || !info->addr.preind || info->addr.writeback
6734 || info->shifter.operator_present != 0)
6735 {
6736 set_syntax_error (_("invalid addressing mode"));
6737 goto failure;
6738 }
6739 info->shifter.kind = AARCH64_MOD_LSL;
6740 break;
6741
6742
6743 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6744 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6745 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6746 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6747 /* [Z<n>.<T>{, #imm}] */
6748 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6749 &offset_qualifier));
6750 if (base_qualifier != AARCH64_OPND_QLF_S_S
6751 && base_qualifier != AARCH64_OPND_QLF_S_D)
6752 {
6753 set_syntax_error (_("invalid addressing mode"));
6754 goto failure;
6755 }
6756 info->qualifier = base_qualifier;
6757 goto sve_regimm;
6758
6759 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6760 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6761 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6762 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6763 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6764
6765 We don't reject:
6766
6767 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6768
6769 here since we get better error messages by leaving it to
6770 the qualifier checking routines. */
6771 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6772 &offset_qualifier));
6773 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6774 && base_qualifier != AARCH64_OPND_QLF_S_D)
6775 || offset_qualifier != base_qualifier)
6776 {
6777 set_syntax_error (_("invalid addressing mode"));
6778 goto failure;
6779 }
6780 info->qualifier = base_qualifier;
6781 goto regoff_addr;
6782
6783 case AARCH64_OPND_SYSREG:
6784 {
6785 uint32_t sysreg_flags;
6786 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6787 &sysreg_flags)) == PARSE_FAIL)
6788 {
6789 set_syntax_error (_("unknown or missing system register name"));
6790 goto failure;
6791 }
6792 inst.base.operands[i].sysreg.value = val;
6793 inst.base.operands[i].sysreg.flags = sysreg_flags;
6794 break;
6795 }
6796
6797 case AARCH64_OPND_PSTATEFIELD:
6798 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6799 == PARSE_FAIL)
6800 {
6801 set_syntax_error (_("unknown or missing PSTATE field name"));
6802 goto failure;
6803 }
6804 inst.base.operands[i].pstatefield = val;
6805 break;
6806
6807 case AARCH64_OPND_SYSREG_IC:
6808 inst.base.operands[i].sysins_op =
6809 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6810 goto sys_reg_ins;
6811
6812 case AARCH64_OPND_SYSREG_DC:
6813 inst.base.operands[i].sysins_op =
6814 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6815 goto sys_reg_ins;
6816
6817 case AARCH64_OPND_SYSREG_AT:
6818 inst.base.operands[i].sysins_op =
6819 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6820 goto sys_reg_ins;
6821
6822 case AARCH64_OPND_SYSREG_SR:
6823 inst.base.operands[i].sysins_op =
6824 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6825 goto sys_reg_ins;
6826
6827 case AARCH64_OPND_SYSREG_TLBI:
6828 inst.base.operands[i].sysins_op =
6829 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6830 sys_reg_ins:
6831 if (inst.base.operands[i].sysins_op == NULL)
6832 {
6833 set_fatal_syntax_error ( _("unknown or missing operation name"));
6834 goto failure;
6835 }
6836 break;
6837
6838 case AARCH64_OPND_BARRIER:
6839 case AARCH64_OPND_BARRIER_ISB:
6840 val = parse_barrier (&str);
6841 if (val != PARSE_FAIL
6842 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6843 {
6844 /* ISB only accepts options name 'sy'. */
6845 set_syntax_error
6846 (_("the specified option is not accepted in ISB"));
6847 /* Turn off backtrack as this optional operand is present. */
6848 backtrack_pos = 0;
6849 goto failure;
6850 }
6851 if (val != PARSE_FAIL
6852 && operands[i] == AARCH64_OPND_BARRIER)
6853 {
6854 /* Regular barriers accept options CRm (C0-C15).
6855 DSB nXS barrier variant accepts values > 15. */
6856 if (val < 0 || val > 15)
6857 {
6858 set_syntax_error (_("the specified option is not accepted in DSB"));
6859 goto failure;
6860 }
6861 }
6862 /* This is an extension to accept a 0..15 immediate. */
6863 if (val == PARSE_FAIL)
6864 po_imm_or_fail (0, 15);
6865 info->barrier = aarch64_barrier_options + val;
6866 break;
6867
6868 case AARCH64_OPND_BARRIER_DSB_NXS:
6869 val = parse_barrier (&str);
6870 if (val != PARSE_FAIL)
6871 {
6872 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
6873 if (!(val == 16 || val == 20 || val == 24 || val == 28))
6874 {
6875 set_syntax_error (_("the specified option is not accepted in DSB"));
6876 /* Turn off backtrack as this optional operand is present. */
6877 backtrack_pos = 0;
6878 goto failure;
6879 }
6880 }
6881 else
6882 {
6883 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
6884 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
6885 if (! parse_constant_immediate (&str, &val, imm_reg_type))
6886 goto failure;
6887 if (!(val == 16 || val == 20 || val == 24 || val == 28))
6888 {
6889 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
6890 goto failure;
6891 }
6892 }
6893 /* Option index is encoded as 2-bit value in val<3:2>. */
6894 val = (val >> 2) - 4;
6895 info->barrier = aarch64_barrier_dsb_nxs_options + val;
6896 break;
6897
6898 case AARCH64_OPND_PRFOP:
6899 val = parse_pldop (&str);
6900 /* This is an extension to accept a 0..31 immediate. */
6901 if (val == PARSE_FAIL)
6902 po_imm_or_fail (0, 31);
6903 inst.base.operands[i].prfop = aarch64_prfops + val;
6904 break;
6905
6906 case AARCH64_OPND_BARRIER_PSB:
6907 val = parse_barrier_psb (&str, &(info->hint_option));
6908 if (val == PARSE_FAIL)
6909 goto failure;
6910 break;
6911
6912 case AARCH64_OPND_BTI_TARGET:
6913 val = parse_bti_operand (&str, &(info->hint_option));
6914 if (val == PARSE_FAIL)
6915 goto failure;
6916 break;
6917
6918 default:
6919 as_fatal (_("unhandled operand code %d"), operands[i]);
6920 }
6921
6922 /* If we get here, this operand was successfully parsed. */
6923 inst.base.operands[i].present = 1;
6924 continue;
6925
6926 failure:
6927 /* The parse routine should already have set the error, but in case
6928 not, set a default one here. */
6929 if (! error_p ())
6930 set_default_error ();
6931
6932 if (! backtrack_pos)
6933 goto parse_operands_return;
6934
6935 {
6936 /* We reach here because this operand is marked as optional, and
6937 either no operand was supplied or the operand was supplied but it
6938 was syntactically incorrect. In the latter case we report an
6939 error. In the former case we perform a few more checks before
6940 dropping through to the code to insert the default operand. */
6941
6942 char *tmp = backtrack_pos;
6943 char endchar = END_OF_INSN;
6944
6945 if (i != (aarch64_num_of_operands (opcode) - 1))
6946 endchar = ',';
6947 skip_past_char (&tmp, ',');
6948
6949 if (*tmp != endchar)
6950 /* The user has supplied an operand in the wrong format. */
6951 goto parse_operands_return;
6952
6953 /* Make sure there is not a comma before the optional operand.
6954 For example the fifth operand of 'sys' is optional:
6955
6956 sys #0,c0,c0,#0, <--- wrong
6957 sys #0,c0,c0,#0 <--- correct. */
6958 if (comma_skipped_p && i && endchar == END_OF_INSN)
6959 {
6960 set_fatal_syntax_error
6961 (_("unexpected comma before the omitted optional operand"));
6962 goto parse_operands_return;
6963 }
6964 }
6965
6966 /* Reaching here means we are dealing with an optional operand that is
6967 omitted from the assembly line. */
6968 gas_assert (optional_operand_p (opcode, i));
6969 info->present = 0;
6970 process_omitted_operand (operands[i], opcode, i, info);
6971
6972 /* Try again, skipping the optional operand at backtrack_pos. */
6973 str = backtrack_pos;
6974 backtrack_pos = 0;
6975
6976 /* Clear any error record after the omitted optional operand has been
6977 successfully handled. */
6978 clear_error ();
6979 }
6980
6981 /* Check if we have parsed all the operands. */
6982 if (*str != '\0' && ! error_p ())
6983 {
6984 /* Set I to the index of the last present operand; this is
6985 for the purpose of diagnostics. */
6986 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6987 ;
6988 set_fatal_syntax_error
6989 (_("unexpected characters following instruction"));
6990 }
6991
6992 parse_operands_return:
6993
6994 if (error_p ())
6995 {
6996 DEBUG_TRACE ("parsing FAIL: %s - %s",
6997 operand_mismatch_kind_names[get_error_kind ()],
6998 get_error_message ());
6999 /* Record the operand error properly; this is useful when there
7000 are multiple instruction templates for a mnemonic name, so that
7001 later on, we can select the error that most closely describes
7002 the problem. */
7003 record_operand_error (opcode, i, get_error_kind (),
7004 get_error_message ());
7005 return false;
7006 }
7007 else
7008 {
7009 DEBUG_TRACE ("parsing SUCCESS");
7010 return true;
7011 }
7012 }
7013
7014 /* It does some fix-up to provide some programmer friendly feature while
7015 keeping the libopcodes happy, i.e. libopcodes only accepts
7016 the preferred architectural syntax.
7017 Return FALSE if there is any failure; otherwise return TRUE. */
7018
7019 static bool
7020 programmer_friendly_fixup (aarch64_instruction *instr)
7021 {
7022 aarch64_inst *base = &instr->base;
7023 const aarch64_opcode *opcode = base->opcode;
7024 enum aarch64_op op = opcode->op;
7025 aarch64_opnd_info *operands = base->operands;
7026
7027 DEBUG_TRACE ("enter");
7028
7029 switch (opcode->iclass)
7030 {
7031 case testbranch:
7032 /* TBNZ Xn|Wn, #uimm6, label
7033 Test and Branch Not Zero: conditionally jumps to label if bit number
7034 uimm6 in register Xn is not zero. The bit number implies the width of
7035 the register, which may be written and should be disassembled as Wn if
7036 uimm is less than 32. */
7037 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7038 {
7039 if (operands[1].imm.value >= 32)
7040 {
7041 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7042 0, 31);
7043 return false;
7044 }
7045 operands[0].qualifier = AARCH64_OPND_QLF_X;
7046 }
7047 break;
7048 case loadlit:
7049 /* LDR Wt, label | =value
7050 As a convenience assemblers will typically permit the notation
7051 "=value" in conjunction with the pc-relative literal load instructions
7052 to automatically place an immediate value or symbolic address in a
7053 nearby literal pool and generate a hidden label which references it.
7054 ISREG has been set to 0 in the case of =value. */
7055 if (instr->gen_lit_pool
7056 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7057 {
7058 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7059 if (op == OP_LDRSW_LIT)
7060 size = 4;
7061 if (instr->reloc.exp.X_op != O_constant
7062 && instr->reloc.exp.X_op != O_big
7063 && instr->reloc.exp.X_op != O_symbol)
7064 {
7065 record_operand_error (opcode, 1,
7066 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7067 _("constant expression expected"));
7068 return false;
7069 }
7070 if (! add_to_lit_pool (&instr->reloc.exp, size))
7071 {
7072 record_operand_error (opcode, 1,
7073 AARCH64_OPDE_OTHER_ERROR,
7074 _("literal pool insertion failed"));
7075 return false;
7076 }
7077 }
7078 break;
7079 case log_shift:
7080 case bitfield:
7081 /* UXT[BHW] Wd, Wn
7082 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7083 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7084 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7085 A programmer-friendly assembler should accept a destination Xd in
7086 place of Wd, however that is not the preferred form for disassembly.
7087 */
7088 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7089 && operands[1].qualifier == AARCH64_OPND_QLF_W
7090 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7091 operands[0].qualifier = AARCH64_OPND_QLF_W;
7092 break;
7093
7094 case addsub_ext:
7095 {
7096 /* In the 64-bit form, the final register operand is written as Wm
7097 for all but the (possibly omitted) UXTX/LSL and SXTX
7098 operators.
7099 As a programmer-friendly assembler, we accept e.g.
7100 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7101 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7102 int idx = aarch64_operand_index (opcode->operands,
7103 AARCH64_OPND_Rm_EXT);
7104 gas_assert (idx == 1 || idx == 2);
7105 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7106 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7107 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7108 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7109 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7110 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7111 }
7112 break;
7113
7114 default:
7115 break;
7116 }
7117
7118 DEBUG_TRACE ("exit with SUCCESS");
7119 return true;
7120 }
7121
7122 /* Check for loads and stores that will cause unpredictable behavior. */
7123
7124 static void
7125 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7126 {
7127 aarch64_inst *base = &instr->base;
7128 const aarch64_opcode *opcode = base->opcode;
7129 const aarch64_opnd_info *opnds = base->operands;
7130 switch (opcode->iclass)
7131 {
7132 case ldst_pos:
7133 case ldst_imm9:
7134 case ldst_imm10:
7135 case ldst_unscaled:
7136 case ldst_unpriv:
7137 /* Loading/storing the base register is unpredictable if writeback. */
7138 if ((aarch64_get_operand_class (opnds[0].type)
7139 == AARCH64_OPND_CLASS_INT_REG)
7140 && opnds[0].reg.regno == opnds[1].addr.base_regno
7141 && opnds[1].addr.base_regno != REG_SP
7142 /* Exempt STG/STZG/ST2G/STZ2G. */
7143 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7144 && opnds[1].addr.writeback)
7145 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7146 break;
7147
7148 case ldstpair_off:
7149 case ldstnapair_offs:
7150 case ldstpair_indexed:
7151 /* Loading/storing the base register is unpredictable if writeback. */
7152 if ((aarch64_get_operand_class (opnds[0].type)
7153 == AARCH64_OPND_CLASS_INT_REG)
7154 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7155 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7156 && opnds[2].addr.base_regno != REG_SP
7157 /* Exempt STGP. */
7158 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7159 && opnds[2].addr.writeback)
7160 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7161 /* Load operations must load different registers. */
7162 if ((opcode->opcode & (1 << 22))
7163 && opnds[0].reg.regno == opnds[1].reg.regno)
7164 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7165 break;
7166
7167 case ldstexcl:
7168 if ((aarch64_get_operand_class (opnds[0].type)
7169 == AARCH64_OPND_CLASS_INT_REG)
7170 && (aarch64_get_operand_class (opnds[1].type)
7171 == AARCH64_OPND_CLASS_INT_REG))
7172 {
7173 if ((opcode->opcode & (1 << 22)))
7174 {
7175 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7176 if ((opcode->opcode & (1 << 21))
7177 && opnds[0].reg.regno == opnds[1].reg.regno)
7178 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7179 }
7180 else
7181 {
7182 /* Store-Exclusive is unpredictable if Rt == Rs. */
7183 if (opnds[0].reg.regno == opnds[1].reg.regno)
7184 as_warn
7185 (_("unpredictable: identical transfer and status registers"
7186 " --`%s'"),str);
7187
7188 if (opnds[0].reg.regno == opnds[2].reg.regno)
7189 {
7190 if (!(opcode->opcode & (1 << 21)))
7191 /* Store-Exclusive is unpredictable if Rn == Rs. */
7192 as_warn
7193 (_("unpredictable: identical base and status registers"
7194 " --`%s'"),str);
7195 else
7196 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7197 as_warn
7198 (_("unpredictable: "
7199 "identical transfer and status registers"
7200 " --`%s'"),str);
7201 }
7202
7203 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7204 if ((opcode->opcode & (1 << 21))
7205 && opnds[0].reg.regno == opnds[3].reg.regno
7206 && opnds[3].reg.regno != REG_SP)
7207 as_warn (_("unpredictable: identical base and status registers"
7208 " --`%s'"),str);
7209 }
7210 }
7211 break;
7212
7213 default:
7214 break;
7215 }
7216 }
7217
7218 static void
7219 force_automatic_sequence_close (void)
7220 {
7221 if (now_instr_sequence.instr)
7222 {
7223 as_warn (_("previous `%s' sequence has not been closed"),
7224 now_instr_sequence.instr->opcode->name);
7225 init_insn_sequence (NULL, &now_instr_sequence);
7226 }
7227 }
7228
7229 /* A wrapper function to interface with libopcodes on encoding and
7230 record the error message if there is any.
7231
7232 Return TRUE on success; otherwise return FALSE. */
7233
7234 static bool
7235 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7236 aarch64_insn *code)
7237 {
7238 aarch64_operand_error error_info;
7239 memset (&error_info, '\0', sizeof (error_info));
7240 error_info.kind = AARCH64_OPDE_NIL;
7241 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7242 && !error_info.non_fatal)
7243 return true;
7244
7245 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7246 record_operand_error_info (opcode, &error_info);
7247 return error_info.non_fatal;
7248 }
7249
7250 #ifdef DEBUG_AARCH64
7251 static inline void
7252 dump_opcode_operands (const aarch64_opcode *opcode)
7253 {
7254 int i = 0;
7255 while (opcode->operands[i] != AARCH64_OPND_NIL)
7256 {
7257 aarch64_verbose ("\t\t opnd%d: %s", i,
7258 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7259 ? aarch64_get_operand_name (opcode->operands[i])
7260 : aarch64_get_operand_desc (opcode->operands[i]));
7261 ++i;
7262 }
7263 }
7264 #endif /* DEBUG_AARCH64 */
7265
7266 /* This is the guts of the machine-dependent assembler. STR points to a
7267 machine dependent instruction. This function is supposed to emit
7268 the frags/bytes it assembles to. */
7269
7270 void
7271 md_assemble (char *str)
7272 {
7273 char *p = str;
7274 templates *template;
7275 const aarch64_opcode *opcode;
7276 aarch64_inst *inst_base;
7277 unsigned saved_cond;
7278
7279 /* Align the previous label if needed. */
7280 if (last_label_seen != NULL)
7281 {
7282 symbol_set_frag (last_label_seen, frag_now);
7283 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7284 S_SET_SEGMENT (last_label_seen, now_seg);
7285 }
7286
7287 /* Update the current insn_sequence from the segment. */
7288 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
7289
7290 inst.reloc.type = BFD_RELOC_UNUSED;
7291
7292 DEBUG_TRACE ("\n\n");
7293 DEBUG_TRACE ("==============================");
7294 DEBUG_TRACE ("Enter md_assemble with %s", str);
7295
7296 template = opcode_lookup (&p);
7297 if (!template)
7298 {
7299 /* It wasn't an instruction, but it might be a register alias of
7300 the form alias .req reg directive. */
7301 if (!create_register_alias (str, p))
7302 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7303 str);
7304 return;
7305 }
7306
7307 skip_whitespace (p);
7308 if (*p == ',')
7309 {
7310 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7311 get_mnemonic_name (str), str);
7312 return;
7313 }
7314
7315 init_operand_error_report ();
7316
7317 /* Sections are assumed to start aligned. In executable section, there is no
7318 MAP_DATA symbol pending. So we only align the address during
7319 MAP_DATA --> MAP_INSN transition.
7320 For other sections, this is not guaranteed. */
7321 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7322 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7323 frag_align_code (2, 0);
7324
7325 saved_cond = inst.cond;
7326 reset_aarch64_instruction (&inst);
7327 inst.cond = saved_cond;
7328
7329 /* Iterate through all opcode entries with the same mnemonic name. */
7330 do
7331 {
7332 opcode = template->opcode;
7333
7334 DEBUG_TRACE ("opcode %s found", opcode->name);
7335 #ifdef DEBUG_AARCH64
7336 if (debug_dump)
7337 dump_opcode_operands (opcode);
7338 #endif /* DEBUG_AARCH64 */
7339
7340 mapping_state (MAP_INSN);
7341
7342 inst_base = &inst.base;
7343 inst_base->opcode = opcode;
7344
7345 /* Truly conditionally executed instructions, e.g. b.cond. */
7346 if (opcode->flags & F_COND)
7347 {
7348 gas_assert (inst.cond != COND_ALWAYS);
7349 inst_base->cond = get_cond_from_value (inst.cond);
7350 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7351 }
7352 else if (inst.cond != COND_ALWAYS)
7353 {
7354 /* It shouldn't arrive here, where the assembly looks like a
7355 conditional instruction but the found opcode is unconditional. */
7356 gas_assert (0);
7357 continue;
7358 }
7359
7360 if (parse_operands (p, opcode)
7361 && programmer_friendly_fixup (&inst)
7362 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7363 {
7364 /* Check that this instruction is supported for this CPU. */
7365 if (!opcode->avariant
7366 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7367 {
7368 as_bad (_("selected processor does not support `%s'"), str);
7369 return;
7370 }
7371
7372 warn_unpredictable_ldst (&inst, str);
7373
7374 if (inst.reloc.type == BFD_RELOC_UNUSED
7375 || !inst.reloc.need_libopcodes_p)
7376 output_inst (NULL);
7377 else
7378 {
7379 /* If there is relocation generated for the instruction,
7380 store the instruction information for the future fix-up. */
7381 struct aarch64_inst *copy;
7382 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7383 copy = XNEW (struct aarch64_inst);
7384 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7385 output_inst (copy);
7386 }
7387
7388 /* Issue non-fatal messages if any. */
7389 output_operand_error_report (str, true);
7390 return;
7391 }
7392
7393 template = template->next;
7394 if (template != NULL)
7395 {
7396 reset_aarch64_instruction (&inst);
7397 inst.cond = saved_cond;
7398 }
7399 }
7400 while (template != NULL);
7401
7402 /* Issue the error messages if any. */
7403 output_operand_error_report (str, false);
7404 }
7405
7406 /* Various frobbings of labels and their addresses. */
7407
7408 void
7409 aarch64_start_line_hook (void)
7410 {
7411 last_label_seen = NULL;
7412 }
7413
7414 void
7415 aarch64_frob_label (symbolS * sym)
7416 {
7417 last_label_seen = sym;
7418
7419 dwarf2_emit_label (sym);
7420 }
7421
7422 void
7423 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7424 {
7425 /* Check to see if we have a block to close. */
7426 force_automatic_sequence_close ();
7427 }
7428
7429 int
7430 aarch64_data_in_code (void)
7431 {
7432 if (startswith (input_line_pointer + 1, "data:"))
7433 {
7434 *input_line_pointer = '/';
7435 input_line_pointer += 5;
7436 *input_line_pointer = 0;
7437 return 1;
7438 }
7439
7440 return 0;
7441 }
7442
7443 char *
7444 aarch64_canonicalize_symbol_name (char *name)
7445 {
7446 int len;
7447
7448 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7449 *(name + len - 5) = 0;
7450
7451 return name;
7452 }
7453 \f
7454 /* Table of all register names defined by default. The user can
7455 define additional names with .req. Note that all register names
7456 should appear in both upper and lowercase variants. Some registers
7457 also have mixed-case names. */
7458
7459 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
7460 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
7461 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7462 #define REGSET16(p,t) \
7463 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7464 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7465 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7466 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7467 #define REGSET31(p,t) \
7468 REGSET16(p, t), \
7469 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7470 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7471 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7472 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7473 #define REGSET(p,t) \
7474 REGSET31(p,t), REGNUM(p,31,t)
7475
7476 /* These go into aarch64_reg_hsh hash-table. */
7477 static const reg_entry reg_names[] = {
7478 /* Integer registers. */
7479 REGSET31 (x, R_64), REGSET31 (X, R_64),
7480 REGSET31 (w, R_32), REGSET31 (W, R_32),
7481
7482 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7483 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7484 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7485 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7486 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7487 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7488
7489 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7490 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7491
7492 /* Floating-point single precision registers. */
7493 REGSET (s, FP_S), REGSET (S, FP_S),
7494
7495 /* Floating-point double precision registers. */
7496 REGSET (d, FP_D), REGSET (D, FP_D),
7497
7498 /* Floating-point half precision registers. */
7499 REGSET (h, FP_H), REGSET (H, FP_H),
7500
7501 /* Floating-point byte precision registers. */
7502 REGSET (b, FP_B), REGSET (B, FP_B),
7503
7504 /* Floating-point quad precision registers. */
7505 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7506
7507 /* FP/SIMD registers. */
7508 REGSET (v, VN), REGSET (V, VN),
7509
7510 /* SVE vector registers. */
7511 REGSET (z, ZN), REGSET (Z, ZN),
7512
7513 /* SVE predicate registers. */
7514 REGSET16 (p, PN), REGSET16 (P, PN)
7515 };
7516
7517 #undef REGDEF
7518 #undef REGDEF_ALIAS
7519 #undef REGNUM
7520 #undef REGSET16
7521 #undef REGSET31
7522 #undef REGSET
7523
7524 #define N 1
7525 #define n 0
7526 #define Z 1
7527 #define z 0
7528 #define C 1
7529 #define c 0
7530 #define V 1
7531 #define v 0
7532 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7533 static const asm_nzcv nzcv_names[] = {
7534 {"nzcv", B (n, z, c, v)},
7535 {"nzcV", B (n, z, c, V)},
7536 {"nzCv", B (n, z, C, v)},
7537 {"nzCV", B (n, z, C, V)},
7538 {"nZcv", B (n, Z, c, v)},
7539 {"nZcV", B (n, Z, c, V)},
7540 {"nZCv", B (n, Z, C, v)},
7541 {"nZCV", B (n, Z, C, V)},
7542 {"Nzcv", B (N, z, c, v)},
7543 {"NzcV", B (N, z, c, V)},
7544 {"NzCv", B (N, z, C, v)},
7545 {"NzCV", B (N, z, C, V)},
7546 {"NZcv", B (N, Z, c, v)},
7547 {"NZcV", B (N, Z, c, V)},
7548 {"NZCv", B (N, Z, C, v)},
7549 {"NZCV", B (N, Z, C, V)}
7550 };
7551
7552 #undef N
7553 #undef n
7554 #undef Z
7555 #undef z
7556 #undef C
7557 #undef c
7558 #undef V
7559 #undef v
7560 #undef B
7561 \f
7562 /* MD interface: bits in the object file. */
7563
7564 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7565 for use in the a.out file, and stores them in the array pointed to by buf.
7566 This knows about the endian-ness of the target machine and does
7567 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7568 2 (short) and 4 (long) Floating numbers are put out as a series of
7569 LITTLENUMS (shorts, here at least). */
7570
7571 void
7572 md_number_to_chars (char *buf, valueT val, int n)
7573 {
7574 if (target_big_endian)
7575 number_to_chars_bigendian (buf, val, n);
7576 else
7577 number_to_chars_littleendian (buf, val, n);
7578 }
7579
7580 /* MD interface: Sections. */
7581
7582 /* Estimate the size of a frag before relaxing. Assume everything fits in
7583 4 bytes. */
7584
7585 int
7586 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7587 {
7588 fragp->fr_var = 4;
7589 return 4;
7590 }
7591
7592 /* Round up a section size to the appropriate boundary. */
7593
7594 valueT
7595 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7596 {
7597 return size;
7598 }
7599
7600 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7601 of an rs_align_code fragment.
7602
7603 Here we fill the frag with the appropriate info for padding the
7604 output stream. The resulting frag will consist of a fixed (fr_fix)
7605 and of a repeating (fr_var) part.
7606
7607 The fixed content is always emitted before the repeating content and
7608 these two parts are used as follows in constructing the output:
7609 - the fixed part will be used to align to a valid instruction word
7610 boundary, in case that we start at a misaligned address; as no
7611 executable instruction can live at the misaligned location, we
7612 simply fill with zeros;
7613 - the variable part will be used to cover the remaining padding and
7614 we fill using the AArch64 NOP instruction.
7615
7616 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7617 enough storage space for up to 3 bytes for padding the back to a valid
7618 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7619
7620 void
7621 aarch64_handle_align (fragS * fragP)
7622 {
7623 /* NOP = d503201f */
7624 /* AArch64 instructions are always little-endian. */
7625 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7626
7627 int bytes, fix, noop_size;
7628 char *p;
7629
7630 if (fragP->fr_type != rs_align_code)
7631 return;
7632
7633 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7634 p = fragP->fr_literal + fragP->fr_fix;
7635
7636 #ifdef OBJ_ELF
7637 gas_assert (fragP->tc_frag_data.recorded);
7638 #endif
7639
7640 noop_size = sizeof (aarch64_noop);
7641
7642 fix = bytes & (noop_size - 1);
7643 if (fix)
7644 {
7645 #ifdef OBJ_ELF
7646 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7647 #endif
7648 memset (p, 0, fix);
7649 p += fix;
7650 fragP->fr_fix += fix;
7651 }
7652
7653 if (noop_size)
7654 memcpy (p, aarch64_noop, noop_size);
7655 fragP->fr_var = noop_size;
7656 }
7657
7658 /* Perform target specific initialisation of a frag.
7659 Note - despite the name this initialisation is not done when the frag
7660 is created, but only when its type is assigned. A frag can be created
7661 and used a long time before its type is set, so beware of assuming that
7662 this initialisation is performed first. */
7663
7664 #ifndef OBJ_ELF
7665 void
7666 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7667 int max_chars ATTRIBUTE_UNUSED)
7668 {
7669 }
7670
7671 #else /* OBJ_ELF is defined. */
7672 void
7673 aarch64_init_frag (fragS * fragP, int max_chars)
7674 {
7675 /* Record a mapping symbol for alignment frags. We will delete this
7676 later if the alignment ends up empty. */
7677 if (!fragP->tc_frag_data.recorded)
7678 fragP->tc_frag_data.recorded = 1;
7679
7680 /* PR 21809: Do not set a mapping state for debug sections
7681 - it just confuses other tools. */
7682 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
7683 return;
7684
7685 switch (fragP->fr_type)
7686 {
7687 case rs_align_test:
7688 case rs_fill:
7689 mapping_state_2 (MAP_DATA, max_chars);
7690 break;
7691 case rs_align:
7692 /* PR 20364: We can get alignment frags in code sections,
7693 so do not just assume that we should use the MAP_DATA state. */
7694 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7695 break;
7696 case rs_align_code:
7697 mapping_state_2 (MAP_INSN, max_chars);
7698 break;
7699 default:
7700 break;
7701 }
7702 }
7703 \f
7704 /* Initialize the DWARF-2 unwind information for this procedure. */
7705
7706 void
7707 tc_aarch64_frame_initial_instructions (void)
7708 {
7709 cfi_add_CFA_def_cfa (REG_SP, 0);
7710 }
7711 #endif /* OBJ_ELF */
7712
7713 /* Convert REGNAME to a DWARF-2 register number. */
7714
7715 int
7716 tc_aarch64_regname_to_dw2regnum (char *regname)
7717 {
7718 const reg_entry *reg = parse_reg (&regname);
7719 if (reg == NULL)
7720 return -1;
7721
7722 switch (reg->type)
7723 {
7724 case REG_TYPE_SP_32:
7725 case REG_TYPE_SP_64:
7726 case REG_TYPE_R_32:
7727 case REG_TYPE_R_64:
7728 return reg->number;
7729
7730 case REG_TYPE_FP_B:
7731 case REG_TYPE_FP_H:
7732 case REG_TYPE_FP_S:
7733 case REG_TYPE_FP_D:
7734 case REG_TYPE_FP_Q:
7735 return reg->number + 64;
7736
7737 default:
7738 break;
7739 }
7740 return -1;
7741 }
7742
7743 /* Implement DWARF2_ADDR_SIZE. */
7744
7745 int
7746 aarch64_dwarf2_addr_size (void)
7747 {
7748 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7749 if (ilp32_p)
7750 return 4;
7751 #endif
7752 return bfd_arch_bits_per_address (stdoutput) / 8;
7753 }
7754
7755 /* MD interface: Symbol and relocation handling. */
7756
7757 /* Return the address within the segment that a PC-relative fixup is
7758 relative to. For AArch64 PC-relative fixups applied to instructions
7759 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7760
7761 long
7762 md_pcrel_from_section (fixS * fixP, segT seg)
7763 {
7764 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7765
7766 /* If this is pc-relative and we are going to emit a relocation
7767 then we just want to put out any pipeline compensation that the linker
7768 will need. Otherwise we want to use the calculated base. */
7769 if (fixP->fx_pcrel
7770 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7771 || aarch64_force_relocation (fixP)))
7772 base = 0;
7773
7774 /* AArch64 should be consistent for all pc-relative relocations. */
7775 return base + AARCH64_PCREL_OFFSET;
7776 }
7777
7778 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7779 Otherwise we have no need to default values of symbols. */
7780
7781 symbolS *
7782 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7783 {
7784 #ifdef OBJ_ELF
7785 if (name[0] == '_' && name[1] == 'G'
7786 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7787 {
7788 if (!GOT_symbol)
7789 {
7790 if (symbol_find (name))
7791 as_bad (_("GOT already in the symbol table"));
7792
7793 GOT_symbol = symbol_new (name, undefined_section,
7794 &zero_address_frag, 0);
7795 }
7796
7797 return GOT_symbol;
7798 }
7799 #endif
7800
7801 return 0;
7802 }
7803
7804 /* Return non-zero if the indicated VALUE has overflowed the maximum
7805 range expressible by a unsigned number with the indicated number of
7806 BITS. */
7807
7808 static bool
7809 unsigned_overflow (valueT value, unsigned bits)
7810 {
7811 valueT lim;
7812 if (bits >= sizeof (valueT) * 8)
7813 return false;
7814 lim = (valueT) 1 << bits;
7815 return (value >= lim);
7816 }
7817
7818
7819 /* Return non-zero if the indicated VALUE has overflowed the maximum
7820 range expressible by an signed number with the indicated number of
7821 BITS. */
7822
7823 static bool
7824 signed_overflow (offsetT value, unsigned bits)
7825 {
7826 offsetT lim;
7827 if (bits >= sizeof (offsetT) * 8)
7828 return false;
7829 lim = (offsetT) 1 << (bits - 1);
7830 return (value < -lim || value >= lim);
7831 }
7832
7833 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7834 unsigned immediate offset load/store instruction, try to encode it as
7835 an unscaled, 9-bit, signed immediate offset load/store instruction.
7836 Return TRUE if it is successful; otherwise return FALSE.
7837
7838 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7839 in response to the standard LDR/STR mnemonics when the immediate offset is
7840 unambiguous, i.e. when it is negative or unaligned. */
7841
7842 static bool
7843 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7844 {
7845 int idx;
7846 enum aarch64_op new_op;
7847 const aarch64_opcode *new_opcode;
7848
7849 gas_assert (instr->opcode->iclass == ldst_pos);
7850
7851 switch (instr->opcode->op)
7852 {
7853 case OP_LDRB_POS:new_op = OP_LDURB; break;
7854 case OP_STRB_POS: new_op = OP_STURB; break;
7855 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7856 case OP_LDRH_POS: new_op = OP_LDURH; break;
7857 case OP_STRH_POS: new_op = OP_STURH; break;
7858 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7859 case OP_LDR_POS: new_op = OP_LDUR; break;
7860 case OP_STR_POS: new_op = OP_STUR; break;
7861 case OP_LDRF_POS: new_op = OP_LDURV; break;
7862 case OP_STRF_POS: new_op = OP_STURV; break;
7863 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7864 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7865 default: new_op = OP_NIL; break;
7866 }
7867
7868 if (new_op == OP_NIL)
7869 return false;
7870
7871 new_opcode = aarch64_get_opcode (new_op);
7872 gas_assert (new_opcode != NULL);
7873
7874 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7875 instr->opcode->op, new_opcode->op);
7876
7877 aarch64_replace_opcode (instr, new_opcode);
7878
7879 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7880 qualifier matching may fail because the out-of-date qualifier will
7881 prevent the operand being updated with a new and correct qualifier. */
7882 idx = aarch64_operand_index (instr->opcode->operands,
7883 AARCH64_OPND_ADDR_SIMM9);
7884 gas_assert (idx == 1);
7885 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7886
7887 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7888
7889 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7890 insn_sequence))
7891 return false;
7892
7893 return true;
7894 }
7895
7896 /* Called by fix_insn to fix a MOV immediate alias instruction.
7897
7898 Operand for a generic move immediate instruction, which is an alias
7899 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7900 a 32-bit/64-bit immediate value into general register. An assembler error
7901 shall result if the immediate cannot be created by a single one of these
7902 instructions. If there is a choice, then to ensure reversability an
7903 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7904
7905 static void
7906 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7907 {
7908 const aarch64_opcode *opcode;
7909
7910 /* Need to check if the destination is SP/ZR. The check has to be done
7911 before any aarch64_replace_opcode. */
7912 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7913 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7914
7915 instr->operands[1].imm.value = value;
7916 instr->operands[1].skip = 0;
7917
7918 if (try_mov_wide_p)
7919 {
7920 /* Try the MOVZ alias. */
7921 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7922 aarch64_replace_opcode (instr, opcode);
7923 if (aarch64_opcode_encode (instr->opcode, instr,
7924 &instr->value, NULL, NULL, insn_sequence))
7925 {
7926 put_aarch64_insn (buf, instr->value);
7927 return;
7928 }
7929 /* Try the MOVK alias. */
7930 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7931 aarch64_replace_opcode (instr, opcode);
7932 if (aarch64_opcode_encode (instr->opcode, instr,
7933 &instr->value, NULL, NULL, insn_sequence))
7934 {
7935 put_aarch64_insn (buf, instr->value);
7936 return;
7937 }
7938 }
7939
7940 if (try_mov_bitmask_p)
7941 {
7942 /* Try the ORR alias. */
7943 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7944 aarch64_replace_opcode (instr, opcode);
7945 if (aarch64_opcode_encode (instr->opcode, instr,
7946 &instr->value, NULL, NULL, insn_sequence))
7947 {
7948 put_aarch64_insn (buf, instr->value);
7949 return;
7950 }
7951 }
7952
7953 as_bad_where (fixP->fx_file, fixP->fx_line,
7954 _("immediate cannot be moved by a single instruction"));
7955 }
7956
7957 /* An instruction operand which is immediate related may have symbol used
7958 in the assembly, e.g.
7959
7960 mov w0, u32
7961 .set u32, 0x00ffff00
7962
7963 At the time when the assembly instruction is parsed, a referenced symbol,
7964 like 'u32' in the above example may not have been seen; a fixS is created
7965 in such a case and is handled here after symbols have been resolved.
7966 Instruction is fixed up with VALUE using the information in *FIXP plus
7967 extra information in FLAGS.
7968
7969 This function is called by md_apply_fix to fix up instructions that need
7970 a fix-up described above but does not involve any linker-time relocation. */
7971
7972 static void
7973 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7974 {
7975 int idx;
7976 uint32_t insn;
7977 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7978 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7979 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7980
7981 if (new_inst)
7982 {
7983 /* Now the instruction is about to be fixed-up, so the operand that
7984 was previously marked as 'ignored' needs to be unmarked in order
7985 to get the encoding done properly. */
7986 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7987 new_inst->operands[idx].skip = 0;
7988 }
7989
7990 gas_assert (opnd != AARCH64_OPND_NIL);
7991
7992 switch (opnd)
7993 {
7994 case AARCH64_OPND_EXCEPTION:
7995 case AARCH64_OPND_UNDEFINED:
7996 if (unsigned_overflow (value, 16))
7997 as_bad_where (fixP->fx_file, fixP->fx_line,
7998 _("immediate out of range"));
7999 insn = get_aarch64_insn (buf);
8000 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
8001 put_aarch64_insn (buf, insn);
8002 break;
8003
8004 case AARCH64_OPND_AIMM:
8005 /* ADD or SUB with immediate.
8006 NOTE this assumes we come here with a add/sub shifted reg encoding
8007 3 322|2222|2 2 2 21111 111111
8008 1 098|7654|3 2 1 09876 543210 98765 43210
8009 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8010 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8011 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8012 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8013 ->
8014 3 322|2222|2 2 221111111111
8015 1 098|7654|3 2 109876543210 98765 43210
8016 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8017 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8018 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8019 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8020 Fields sf Rn Rd are already set. */
8021 insn = get_aarch64_insn (buf);
8022 if (value < 0)
8023 {
8024 /* Add <-> sub. */
8025 insn = reencode_addsub_switch_add_sub (insn);
8026 value = -value;
8027 }
8028
8029 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8030 && unsigned_overflow (value, 12))
8031 {
8032 /* Try to shift the value by 12 to make it fit. */
8033 if (((value >> 12) << 12) == value
8034 && ! unsigned_overflow (value, 12 + 12))
8035 {
8036 value >>= 12;
8037 insn |= encode_addsub_imm_shift_amount (1);
8038 }
8039 }
8040
8041 if (unsigned_overflow (value, 12))
8042 as_bad_where (fixP->fx_file, fixP->fx_line,
8043 _("immediate out of range"));
8044
8045 insn |= encode_addsub_imm (value);
8046
8047 put_aarch64_insn (buf, insn);
8048 break;
8049
8050 case AARCH64_OPND_SIMD_IMM:
8051 case AARCH64_OPND_SIMD_IMM_SFT:
8052 case AARCH64_OPND_LIMM:
8053 /* Bit mask immediate. */
8054 gas_assert (new_inst != NULL);
8055 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8056 new_inst->operands[idx].imm.value = value;
8057 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8058 &new_inst->value, NULL, NULL, insn_sequence))
8059 put_aarch64_insn (buf, new_inst->value);
8060 else
8061 as_bad_where (fixP->fx_file, fixP->fx_line,
8062 _("invalid immediate"));
8063 break;
8064
8065 case AARCH64_OPND_HALF:
8066 /* 16-bit unsigned immediate. */
8067 if (unsigned_overflow (value, 16))
8068 as_bad_where (fixP->fx_file, fixP->fx_line,
8069 _("immediate out of range"));
8070 insn = get_aarch64_insn (buf);
8071 insn |= encode_movw_imm (value & 0xffff);
8072 put_aarch64_insn (buf, insn);
8073 break;
8074
8075 case AARCH64_OPND_IMM_MOV:
8076 /* Operand for a generic move immediate instruction, which is
8077 an alias instruction that generates a single MOVZ, MOVN or ORR
8078 instruction to loads a 32-bit/64-bit immediate value into general
8079 register. An assembler error shall result if the immediate cannot be
8080 created by a single one of these instructions. If there is a choice,
8081 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8082 and MOVZ or MOVN to ORR. */
8083 gas_assert (new_inst != NULL);
8084 fix_mov_imm_insn (fixP, buf, new_inst, value);
8085 break;
8086
8087 case AARCH64_OPND_ADDR_SIMM7:
8088 case AARCH64_OPND_ADDR_SIMM9:
8089 case AARCH64_OPND_ADDR_SIMM9_2:
8090 case AARCH64_OPND_ADDR_SIMM10:
8091 case AARCH64_OPND_ADDR_UIMM12:
8092 case AARCH64_OPND_ADDR_SIMM11:
8093 case AARCH64_OPND_ADDR_SIMM13:
8094 /* Immediate offset in an address. */
8095 insn = get_aarch64_insn (buf);
8096
8097 gas_assert (new_inst != NULL && new_inst->value == insn);
8098 gas_assert (new_inst->opcode->operands[1] == opnd
8099 || new_inst->opcode->operands[2] == opnd);
8100
8101 /* Get the index of the address operand. */
8102 if (new_inst->opcode->operands[1] == opnd)
8103 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8104 idx = 1;
8105 else
8106 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8107 idx = 2;
8108
8109 /* Update the resolved offset value. */
8110 new_inst->operands[idx].addr.offset.imm = value;
8111
8112 /* Encode/fix-up. */
8113 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8114 &new_inst->value, NULL, NULL, insn_sequence))
8115 {
8116 put_aarch64_insn (buf, new_inst->value);
8117 break;
8118 }
8119 else if (new_inst->opcode->iclass == ldst_pos
8120 && try_to_encode_as_unscaled_ldst (new_inst))
8121 {
8122 put_aarch64_insn (buf, new_inst->value);
8123 break;
8124 }
8125
8126 as_bad_where (fixP->fx_file, fixP->fx_line,
8127 _("immediate offset out of range"));
8128 break;
8129
8130 default:
8131 gas_assert (0);
8132 as_fatal (_("unhandled operand code %d"), opnd);
8133 }
8134 }
8135
8136 /* Apply a fixup (fixP) to segment data, once it has been determined
8137 by our caller that we have all the info we need to fix it up.
8138
8139 Parameter valP is the pointer to the value of the bits. */
8140
8141 void
8142 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8143 {
8144 offsetT value = *valP;
8145 uint32_t insn;
8146 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8147 int scale;
8148 unsigned flags = fixP->fx_addnumber;
8149
8150 DEBUG_TRACE ("\n\n");
8151 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8152 DEBUG_TRACE ("Enter md_apply_fix");
8153
8154 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8155
8156 /* Note whether this will delete the relocation. */
8157
8158 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
8159 fixP->fx_done = 1;
8160
8161 /* Process the relocations. */
8162 switch (fixP->fx_r_type)
8163 {
8164 case BFD_RELOC_NONE:
8165 /* This will need to go in the object file. */
8166 fixP->fx_done = 0;
8167 break;
8168
8169 case BFD_RELOC_8:
8170 case BFD_RELOC_8_PCREL:
8171 if (fixP->fx_done || !seg->use_rela_p)
8172 md_number_to_chars (buf, value, 1);
8173 break;
8174
8175 case BFD_RELOC_16:
8176 case BFD_RELOC_16_PCREL:
8177 if (fixP->fx_done || !seg->use_rela_p)
8178 md_number_to_chars (buf, value, 2);
8179 break;
8180
8181 case BFD_RELOC_32:
8182 case BFD_RELOC_32_PCREL:
8183 if (fixP->fx_done || !seg->use_rela_p)
8184 md_number_to_chars (buf, value, 4);
8185 break;
8186
8187 case BFD_RELOC_64:
8188 case BFD_RELOC_64_PCREL:
8189 if (fixP->fx_done || !seg->use_rela_p)
8190 md_number_to_chars (buf, value, 8);
8191 break;
8192
8193 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8194 /* We claim that these fixups have been processed here, even if
8195 in fact we generate an error because we do not have a reloc
8196 for them, so tc_gen_reloc() will reject them. */
8197 fixP->fx_done = 1;
8198 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8199 {
8200 as_bad_where (fixP->fx_file, fixP->fx_line,
8201 _("undefined symbol %s used as an immediate value"),
8202 S_GET_NAME (fixP->fx_addsy));
8203 goto apply_fix_return;
8204 }
8205 fix_insn (fixP, flags, value);
8206 break;
8207
8208 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8209 if (fixP->fx_done || !seg->use_rela_p)
8210 {
8211 if (value & 3)
8212 as_bad_where (fixP->fx_file, fixP->fx_line,
8213 _("pc-relative load offset not word aligned"));
8214 if (signed_overflow (value, 21))
8215 as_bad_where (fixP->fx_file, fixP->fx_line,
8216 _("pc-relative load offset out of range"));
8217 insn = get_aarch64_insn (buf);
8218 insn |= encode_ld_lit_ofs_19 (value >> 2);
8219 put_aarch64_insn (buf, insn);
8220 }
8221 break;
8222
8223 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8224 if (fixP->fx_done || !seg->use_rela_p)
8225 {
8226 if (signed_overflow (value, 21))
8227 as_bad_where (fixP->fx_file, fixP->fx_line,
8228 _("pc-relative address offset out of range"));
8229 insn = get_aarch64_insn (buf);
8230 insn |= encode_adr_imm (value);
8231 put_aarch64_insn (buf, insn);
8232 }
8233 break;
8234
8235 case BFD_RELOC_AARCH64_BRANCH19:
8236 if (fixP->fx_done || !seg->use_rela_p)
8237 {
8238 if (value & 3)
8239 as_bad_where (fixP->fx_file, fixP->fx_line,
8240 _("conditional branch target not word aligned"));
8241 if (signed_overflow (value, 21))
8242 as_bad_where (fixP->fx_file, fixP->fx_line,
8243 _("conditional branch out of range"));
8244 insn = get_aarch64_insn (buf);
8245 insn |= encode_cond_branch_ofs_19 (value >> 2);
8246 put_aarch64_insn (buf, insn);
8247 }
8248 break;
8249
8250 case BFD_RELOC_AARCH64_TSTBR14:
8251 if (fixP->fx_done || !seg->use_rela_p)
8252 {
8253 if (value & 3)
8254 as_bad_where (fixP->fx_file, fixP->fx_line,
8255 _("conditional branch target not word aligned"));
8256 if (signed_overflow (value, 16))
8257 as_bad_where (fixP->fx_file, fixP->fx_line,
8258 _("conditional branch out of range"));
8259 insn = get_aarch64_insn (buf);
8260 insn |= encode_tst_branch_ofs_14 (value >> 2);
8261 put_aarch64_insn (buf, insn);
8262 }
8263 break;
8264
8265 case BFD_RELOC_AARCH64_CALL26:
8266 case BFD_RELOC_AARCH64_JUMP26:
8267 if (fixP->fx_done || !seg->use_rela_p)
8268 {
8269 if (value & 3)
8270 as_bad_where (fixP->fx_file, fixP->fx_line,
8271 _("branch target not word aligned"));
8272 if (signed_overflow (value, 28))
8273 as_bad_where (fixP->fx_file, fixP->fx_line,
8274 _("branch out of range"));
8275 insn = get_aarch64_insn (buf);
8276 insn |= encode_branch_ofs_26 (value >> 2);
8277 put_aarch64_insn (buf, insn);
8278 }
8279 break;
8280
8281 case BFD_RELOC_AARCH64_MOVW_G0:
8282 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8283 case BFD_RELOC_AARCH64_MOVW_G0_S:
8284 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8285 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8286 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8287 scale = 0;
8288 goto movw_common;
8289 case BFD_RELOC_AARCH64_MOVW_G1:
8290 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8291 case BFD_RELOC_AARCH64_MOVW_G1_S:
8292 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8293 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8294 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8295 scale = 16;
8296 goto movw_common;
8297 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8298 scale = 0;
8299 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8300 /* Should always be exported to object file, see
8301 aarch64_force_relocation(). */
8302 gas_assert (!fixP->fx_done);
8303 gas_assert (seg->use_rela_p);
8304 goto movw_common;
8305 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8306 scale = 16;
8307 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8308 /* Should always be exported to object file, see
8309 aarch64_force_relocation(). */
8310 gas_assert (!fixP->fx_done);
8311 gas_assert (seg->use_rela_p);
8312 goto movw_common;
8313 case BFD_RELOC_AARCH64_MOVW_G2:
8314 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8315 case BFD_RELOC_AARCH64_MOVW_G2_S:
8316 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8317 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8318 scale = 32;
8319 goto movw_common;
8320 case BFD_RELOC_AARCH64_MOVW_G3:
8321 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8322 scale = 48;
8323 movw_common:
8324 if (fixP->fx_done || !seg->use_rela_p)
8325 {
8326 insn = get_aarch64_insn (buf);
8327
8328 if (!fixP->fx_done)
8329 {
8330 /* REL signed addend must fit in 16 bits */
8331 if (signed_overflow (value, 16))
8332 as_bad_where (fixP->fx_file, fixP->fx_line,
8333 _("offset out of range"));
8334 }
8335 else
8336 {
8337 /* Check for overflow and scale. */
8338 switch (fixP->fx_r_type)
8339 {
8340 case BFD_RELOC_AARCH64_MOVW_G0:
8341 case BFD_RELOC_AARCH64_MOVW_G1:
8342 case BFD_RELOC_AARCH64_MOVW_G2:
8343 case BFD_RELOC_AARCH64_MOVW_G3:
8344 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8345 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8346 if (unsigned_overflow (value, scale + 16))
8347 as_bad_where (fixP->fx_file, fixP->fx_line,
8348 _("unsigned value out of range"));
8349 break;
8350 case BFD_RELOC_AARCH64_MOVW_G0_S:
8351 case BFD_RELOC_AARCH64_MOVW_G1_S:
8352 case BFD_RELOC_AARCH64_MOVW_G2_S:
8353 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8354 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8355 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8356 /* NOTE: We can only come here with movz or movn. */
8357 if (signed_overflow (value, scale + 16))
8358 as_bad_where (fixP->fx_file, fixP->fx_line,
8359 _("signed value out of range"));
8360 if (value < 0)
8361 {
8362 /* Force use of MOVN. */
8363 value = ~value;
8364 insn = reencode_movzn_to_movn (insn);
8365 }
8366 else
8367 {
8368 /* Force use of MOVZ. */
8369 insn = reencode_movzn_to_movz (insn);
8370 }
8371 break;
8372 default:
8373 /* Unchecked relocations. */
8374 break;
8375 }
8376 value >>= scale;
8377 }
8378
8379 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8380 insn |= encode_movw_imm (value & 0xffff);
8381
8382 put_aarch64_insn (buf, insn);
8383 }
8384 break;
8385
8386 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8387 fixP->fx_r_type = (ilp32_p
8388 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8389 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8390 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8391 /* Should always be exported to object file, see
8392 aarch64_force_relocation(). */
8393 gas_assert (!fixP->fx_done);
8394 gas_assert (seg->use_rela_p);
8395 break;
8396
8397 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8398 fixP->fx_r_type = (ilp32_p
8399 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8400 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8401 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8402 /* Should always be exported to object file, see
8403 aarch64_force_relocation(). */
8404 gas_assert (!fixP->fx_done);
8405 gas_assert (seg->use_rela_p);
8406 break;
8407
8408 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8409 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8410 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8411 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8412 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8413 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8414 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8415 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8416 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8417 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8418 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8419 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8420 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8421 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8422 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8423 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8424 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8425 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8426 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8427 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8428 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8429 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8430 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8431 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8432 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8433 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8434 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8435 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8436 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8437 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8438 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8439 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8440 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8441 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8442 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8443 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8444 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8445 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8446 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8447 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8448 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8449 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8450 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8451 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8452 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8453 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8454 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8455 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8456 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8457 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8458 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8459 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8460 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8461 /* Should always be exported to object file, see
8462 aarch64_force_relocation(). */
8463 gas_assert (!fixP->fx_done);
8464 gas_assert (seg->use_rela_p);
8465 break;
8466
8467 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8468 /* Should always be exported to object file, see
8469 aarch64_force_relocation(). */
8470 fixP->fx_r_type = (ilp32_p
8471 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8472 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8473 gas_assert (!fixP->fx_done);
8474 gas_assert (seg->use_rela_p);
8475 break;
8476
8477 case BFD_RELOC_AARCH64_ADD_LO12:
8478 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8479 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8480 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8481 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8482 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8483 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8484 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8485 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8486 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8487 case BFD_RELOC_AARCH64_LDST128_LO12:
8488 case BFD_RELOC_AARCH64_LDST16_LO12:
8489 case BFD_RELOC_AARCH64_LDST32_LO12:
8490 case BFD_RELOC_AARCH64_LDST64_LO12:
8491 case BFD_RELOC_AARCH64_LDST8_LO12:
8492 /* Should always be exported to object file, see
8493 aarch64_force_relocation(). */
8494 gas_assert (!fixP->fx_done);
8495 gas_assert (seg->use_rela_p);
8496 break;
8497
8498 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8499 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8500 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8501 break;
8502
8503 case BFD_RELOC_UNUSED:
8504 /* An error will already have been reported. */
8505 break;
8506
8507 default:
8508 as_bad_where (fixP->fx_file, fixP->fx_line,
8509 _("unexpected %s fixup"),
8510 bfd_get_reloc_code_name (fixP->fx_r_type));
8511 break;
8512 }
8513
8514 apply_fix_return:
8515 /* Free the allocated the struct aarch64_inst.
8516 N.B. currently there are very limited number of fix-up types actually use
8517 this field, so the impact on the performance should be minimal . */
8518 free (fixP->tc_fix_data.inst);
8519
8520 return;
8521 }
8522
8523 /* Translate internal representation of relocation info to BFD target
8524 format. */
8525
8526 arelent *
8527 tc_gen_reloc (asection * section, fixS * fixp)
8528 {
8529 arelent *reloc;
8530 bfd_reloc_code_real_type code;
8531
8532 reloc = XNEW (arelent);
8533
8534 reloc->sym_ptr_ptr = XNEW (asymbol *);
8535 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8536 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8537
8538 if (fixp->fx_pcrel)
8539 {
8540 if (section->use_rela_p)
8541 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8542 else
8543 fixp->fx_offset = reloc->address;
8544 }
8545 reloc->addend = fixp->fx_offset;
8546
8547 code = fixp->fx_r_type;
8548 switch (code)
8549 {
8550 case BFD_RELOC_16:
8551 if (fixp->fx_pcrel)
8552 code = BFD_RELOC_16_PCREL;
8553 break;
8554
8555 case BFD_RELOC_32:
8556 if (fixp->fx_pcrel)
8557 code = BFD_RELOC_32_PCREL;
8558 break;
8559
8560 case BFD_RELOC_64:
8561 if (fixp->fx_pcrel)
8562 code = BFD_RELOC_64_PCREL;
8563 break;
8564
8565 default:
8566 break;
8567 }
8568
8569 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8570 if (reloc->howto == NULL)
8571 {
8572 as_bad_where (fixp->fx_file, fixp->fx_line,
8573 _
8574 ("cannot represent %s relocation in this object file format"),
8575 bfd_get_reloc_code_name (code));
8576 return NULL;
8577 }
8578
8579 return reloc;
8580 }
8581
8582 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8583
8584 void
8585 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8586 {
8587 bfd_reloc_code_real_type type;
8588 int pcrel = 0;
8589
8590 /* Pick a reloc.
8591 FIXME: @@ Should look at CPU word size. */
8592 switch (size)
8593 {
8594 case 1:
8595 type = BFD_RELOC_8;
8596 break;
8597 case 2:
8598 type = BFD_RELOC_16;
8599 break;
8600 case 4:
8601 type = BFD_RELOC_32;
8602 break;
8603 case 8:
8604 type = BFD_RELOC_64;
8605 break;
8606 default:
8607 as_bad (_("cannot do %u-byte relocation"), size);
8608 type = BFD_RELOC_UNUSED;
8609 break;
8610 }
8611
8612 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8613 }
8614
8615 #ifdef OBJ_ELF
8616
8617 /* Implement md_after_parse_args. This is the earliest time we need to decide
8618 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8619
8620 void
8621 aarch64_after_parse_args (void)
8622 {
8623 if (aarch64_abi != AARCH64_ABI_NONE)
8624 return;
8625
8626 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8627 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8628 aarch64_abi = AARCH64_ABI_ILP32;
8629 else
8630 aarch64_abi = AARCH64_ABI_LP64;
8631 }
8632
8633 const char *
8634 elf64_aarch64_target_format (void)
8635 {
8636 #ifdef TE_CLOUDABI
8637 /* FIXME: What to do for ilp32_p ? */
8638 if (target_big_endian)
8639 return "elf64-bigaarch64-cloudabi";
8640 else
8641 return "elf64-littleaarch64-cloudabi";
8642 #else
8643 if (target_big_endian)
8644 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8645 else
8646 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8647 #endif
8648 }
8649
8650 void
8651 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8652 {
8653 elf_frob_symbol (symp, puntp);
8654 }
8655 #endif
8656
8657 /* MD interface: Finalization. */
8658
8659 /* A good place to do this, although this was probably not intended
8660 for this kind of use. We need to dump the literal pool before
8661 references are made to a null symbol pointer. */
8662
8663 void
8664 aarch64_cleanup (void)
8665 {
8666 literal_pool *pool;
8667
8668 for (pool = list_of_pools; pool; pool = pool->next)
8669 {
8670 /* Put it at the end of the relevant section. */
8671 subseg_set (pool->section, pool->sub_section);
8672 s_ltorg (0);
8673 }
8674 }
8675
8676 #ifdef OBJ_ELF
8677 /* Remove any excess mapping symbols generated for alignment frags in
8678 SEC. We may have created a mapping symbol before a zero byte
8679 alignment; remove it if there's a mapping symbol after the
8680 alignment. */
8681 static void
8682 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8683 void *dummy ATTRIBUTE_UNUSED)
8684 {
8685 segment_info_type *seginfo = seg_info (sec);
8686 fragS *fragp;
8687
8688 if (seginfo == NULL || seginfo->frchainP == NULL)
8689 return;
8690
8691 for (fragp = seginfo->frchainP->frch_root;
8692 fragp != NULL; fragp = fragp->fr_next)
8693 {
8694 symbolS *sym = fragp->tc_frag_data.last_map;
8695 fragS *next = fragp->fr_next;
8696
8697 /* Variable-sized frags have been converted to fixed size by
8698 this point. But if this was variable-sized to start with,
8699 there will be a fixed-size frag after it. So don't handle
8700 next == NULL. */
8701 if (sym == NULL || next == NULL)
8702 continue;
8703
8704 if (S_GET_VALUE (sym) < next->fr_address)
8705 /* Not at the end of this frag. */
8706 continue;
8707 know (S_GET_VALUE (sym) == next->fr_address);
8708
8709 do
8710 {
8711 if (next->tc_frag_data.first_map != NULL)
8712 {
8713 /* Next frag starts with a mapping symbol. Discard this
8714 one. */
8715 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8716 break;
8717 }
8718
8719 if (next->fr_next == NULL)
8720 {
8721 /* This mapping symbol is at the end of the section. Discard
8722 it. */
8723 know (next->fr_fix == 0 && next->fr_var == 0);
8724 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8725 break;
8726 }
8727
8728 /* As long as we have empty frags without any mapping symbols,
8729 keep looking. */
8730 /* If the next frag is non-empty and does not start with a
8731 mapping symbol, then this mapping symbol is required. */
8732 if (next->fr_address != next->fr_next->fr_address)
8733 break;
8734
8735 next = next->fr_next;
8736 }
8737 while (next != NULL);
8738 }
8739 }
8740 #endif
8741
8742 /* Adjust the symbol table. */
8743
8744 void
8745 aarch64_adjust_symtab (void)
8746 {
8747 #ifdef OBJ_ELF
8748 /* Remove any overlapping mapping symbols generated by alignment frags. */
8749 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8750 /* Now do generic ELF adjustments. */
8751 elf_adjust_symtab ();
8752 #endif
8753 }
8754
8755 static void
8756 checked_hash_insert (htab_t table, const char *key, void *value)
8757 {
8758 str_hash_insert (table, key, value, 0);
8759 }
8760
8761 static void
8762 sysreg_hash_insert (htab_t table, const char *key, void *value)
8763 {
8764 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
8765 checked_hash_insert (table, key, value);
8766 }
8767
8768 static void
8769 fill_instruction_hash_table (void)
8770 {
8771 const aarch64_opcode *opcode = aarch64_opcode_table;
8772
8773 while (opcode->name != NULL)
8774 {
8775 templates *templ, *new_templ;
8776 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
8777
8778 new_templ = XNEW (templates);
8779 new_templ->opcode = opcode;
8780 new_templ->next = NULL;
8781
8782 if (!templ)
8783 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8784 else
8785 {
8786 new_templ->next = templ->next;
8787 templ->next = new_templ;
8788 }
8789 ++opcode;
8790 }
8791 }
8792
8793 static inline void
8794 convert_to_upper (char *dst, const char *src, size_t num)
8795 {
8796 unsigned int i;
8797 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8798 *dst = TOUPPER (*src);
8799 *dst = '\0';
8800 }
8801
8802 /* Assume STR point to a lower-case string, allocate, convert and return
8803 the corresponding upper-case string. */
8804 static inline const char*
8805 get_upper_str (const char *str)
8806 {
8807 char *ret;
8808 size_t len = strlen (str);
8809 ret = XNEWVEC (char, len + 1);
8810 convert_to_upper (ret, str, len);
8811 return ret;
8812 }
8813
8814 /* MD interface: Initialization. */
8815
8816 void
8817 md_begin (void)
8818 {
8819 unsigned mach;
8820 unsigned int i;
8821
8822 aarch64_ops_hsh = str_htab_create ();
8823 aarch64_cond_hsh = str_htab_create ();
8824 aarch64_shift_hsh = str_htab_create ();
8825 aarch64_sys_regs_hsh = str_htab_create ();
8826 aarch64_pstatefield_hsh = str_htab_create ();
8827 aarch64_sys_regs_ic_hsh = str_htab_create ();
8828 aarch64_sys_regs_dc_hsh = str_htab_create ();
8829 aarch64_sys_regs_at_hsh = str_htab_create ();
8830 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
8831 aarch64_sys_regs_sr_hsh = str_htab_create ();
8832 aarch64_reg_hsh = str_htab_create ();
8833 aarch64_barrier_opt_hsh = str_htab_create ();
8834 aarch64_nzcv_hsh = str_htab_create ();
8835 aarch64_pldop_hsh = str_htab_create ();
8836 aarch64_hint_opt_hsh = str_htab_create ();
8837
8838 fill_instruction_hash_table ();
8839
8840 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8841 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8842 (void *) (aarch64_sys_regs + i));
8843
8844 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8845 sysreg_hash_insert (aarch64_pstatefield_hsh,
8846 aarch64_pstatefields[i].name,
8847 (void *) (aarch64_pstatefields + i));
8848
8849 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8850 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
8851 aarch64_sys_regs_ic[i].name,
8852 (void *) (aarch64_sys_regs_ic + i));
8853
8854 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8855 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
8856 aarch64_sys_regs_dc[i].name,
8857 (void *) (aarch64_sys_regs_dc + i));
8858
8859 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8860 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
8861 aarch64_sys_regs_at[i].name,
8862 (void *) (aarch64_sys_regs_at + i));
8863
8864 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8865 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
8866 aarch64_sys_regs_tlbi[i].name,
8867 (void *) (aarch64_sys_regs_tlbi + i));
8868
8869 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8870 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
8871 aarch64_sys_regs_sr[i].name,
8872 (void *) (aarch64_sys_regs_sr + i));
8873
8874 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8875 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8876 (void *) (reg_names + i));
8877
8878 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8879 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8880 (void *) (nzcv_names + i));
8881
8882 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8883 {
8884 const char *name = aarch64_operand_modifiers[i].name;
8885 checked_hash_insert (aarch64_shift_hsh, name,
8886 (void *) (aarch64_operand_modifiers + i));
8887 /* Also hash the name in the upper case. */
8888 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8889 (void *) (aarch64_operand_modifiers + i));
8890 }
8891
8892 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8893 {
8894 unsigned int j;
8895 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8896 the same condition code. */
8897 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8898 {
8899 const char *name = aarch64_conds[i].names[j];
8900 if (name == NULL)
8901 break;
8902 checked_hash_insert (aarch64_cond_hsh, name,
8903 (void *) (aarch64_conds + i));
8904 /* Also hash the name in the upper case. */
8905 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8906 (void *) (aarch64_conds + i));
8907 }
8908 }
8909
8910 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8911 {
8912 const char *name = aarch64_barrier_options[i].name;
8913 /* Skip xx00 - the unallocated values of option. */
8914 if ((i & 0x3) == 0)
8915 continue;
8916 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8917 (void *) (aarch64_barrier_options + i));
8918 /* Also hash the name in the upper case. */
8919 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8920 (void *) (aarch64_barrier_options + i));
8921 }
8922
8923 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
8924 {
8925 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
8926 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8927 (void *) (aarch64_barrier_dsb_nxs_options + i));
8928 /* Also hash the name in the upper case. */
8929 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8930 (void *) (aarch64_barrier_dsb_nxs_options + i));
8931 }
8932
8933 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8934 {
8935 const char* name = aarch64_prfops[i].name;
8936 /* Skip the unallocated hint encodings. */
8937 if (name == NULL)
8938 continue;
8939 checked_hash_insert (aarch64_pldop_hsh, name,
8940 (void *) (aarch64_prfops + i));
8941 /* Also hash the name in the upper case. */
8942 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8943 (void *) (aarch64_prfops + i));
8944 }
8945
8946 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8947 {
8948 const char* name = aarch64_hint_options[i].name;
8949 const char* upper_name = get_upper_str(name);
8950
8951 checked_hash_insert (aarch64_hint_opt_hsh, name,
8952 (void *) (aarch64_hint_options + i));
8953
8954 /* Also hash the name in the upper case if not the same. */
8955 if (strcmp (name, upper_name) != 0)
8956 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
8957 (void *) (aarch64_hint_options + i));
8958 }
8959
8960 /* Set the cpu variant based on the command-line options. */
8961 if (!mcpu_cpu_opt)
8962 mcpu_cpu_opt = march_cpu_opt;
8963
8964 if (!mcpu_cpu_opt)
8965 mcpu_cpu_opt = &cpu_default;
8966
8967 cpu_variant = *mcpu_cpu_opt;
8968
8969 /* Record the CPU type. */
8970 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8971
8972 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8973 }
8974
8975 /* Command line processing. */
8976
8977 const char *md_shortopts = "m:";
8978
8979 #ifdef AARCH64_BI_ENDIAN
8980 #define OPTION_EB (OPTION_MD_BASE + 0)
8981 #define OPTION_EL (OPTION_MD_BASE + 1)
8982 #else
8983 #if TARGET_BYTES_BIG_ENDIAN
8984 #define OPTION_EB (OPTION_MD_BASE + 0)
8985 #else
8986 #define OPTION_EL (OPTION_MD_BASE + 1)
8987 #endif
8988 #endif
8989
8990 struct option md_longopts[] = {
8991 #ifdef OPTION_EB
8992 {"EB", no_argument, NULL, OPTION_EB},
8993 #endif
8994 #ifdef OPTION_EL
8995 {"EL", no_argument, NULL, OPTION_EL},
8996 #endif
8997 {NULL, no_argument, NULL, 0}
8998 };
8999
9000 size_t md_longopts_size = sizeof (md_longopts);
9001
9002 struct aarch64_option_table
9003 {
9004 const char *option; /* Option name to match. */
9005 const char *help; /* Help information. */
9006 int *var; /* Variable to change. */
9007 int value; /* What to change it to. */
9008 char *deprecated; /* If non-null, print this message. */
9009 };
9010
9011 static struct aarch64_option_table aarch64_opts[] = {
9012 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9013 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9014 NULL},
9015 #ifdef DEBUG_AARCH64
9016 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9017 #endif /* DEBUG_AARCH64 */
9018 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9019 NULL},
9020 {"mno-verbose-error", N_("do not output verbose error messages"),
9021 &verbose_error_p, 0, NULL},
9022 {NULL, NULL, NULL, 0, NULL}
9023 };
9024
9025 struct aarch64_cpu_option_table
9026 {
9027 const char *name;
9028 const aarch64_feature_set value;
9029 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9030 case. */
9031 const char *canonical_name;
9032 };
9033
9034 /* This list should, at a minimum, contain all the cpu names
9035 recognized by GCC. */
9036 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9037 {"all", AARCH64_ANY, NULL},
9038 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9039 AARCH64_FEATURE_CRC), "Cortex-A34"},
9040 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9041 AARCH64_FEATURE_CRC), "Cortex-A35"},
9042 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9043 AARCH64_FEATURE_CRC), "Cortex-A53"},
9044 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9045 AARCH64_FEATURE_CRC), "Cortex-A57"},
9046 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9047 AARCH64_FEATURE_CRC), "Cortex-A72"},
9048 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9049 AARCH64_FEATURE_CRC), "Cortex-A73"},
9050 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9051 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9052 "Cortex-A55"},
9053 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9054 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9055 "Cortex-A75"},
9056 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9057 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9058 "Cortex-A76"},
9059 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9060 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9061 | AARCH64_FEATURE_DOTPROD
9062 | AARCH64_FEATURE_SSBS),
9063 "Cortex-A76AE"},
9064 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9065 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9066 | AARCH64_FEATURE_DOTPROD
9067 | AARCH64_FEATURE_SSBS),
9068 "Cortex-A77"},
9069 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9070 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9071 | AARCH64_FEATURE_DOTPROD
9072 | AARCH64_FEATURE_SSBS),
9073 "Cortex-A65"},
9074 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9075 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9076 | AARCH64_FEATURE_DOTPROD
9077 | AARCH64_FEATURE_SSBS),
9078 "Cortex-A65AE"},
9079 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9080 AARCH64_FEATURE_F16
9081 | AARCH64_FEATURE_RCPC
9082 | AARCH64_FEATURE_DOTPROD
9083 | AARCH64_FEATURE_SSBS
9084 | AARCH64_FEATURE_PROFILE),
9085 "Cortex-A78"},
9086 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9087 AARCH64_FEATURE_F16
9088 | AARCH64_FEATURE_RCPC
9089 | AARCH64_FEATURE_DOTPROD
9090 | AARCH64_FEATURE_SSBS
9091 | AARCH64_FEATURE_PROFILE),
9092 "Cortex-A78AE"},
9093 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9094 AARCH64_FEATURE_DOTPROD
9095 | AARCH64_FEATURE_F16
9096 | AARCH64_FEATURE_FLAGM
9097 | AARCH64_FEATURE_PAC
9098 | AARCH64_FEATURE_PROFILE
9099 | AARCH64_FEATURE_RCPC
9100 | AARCH64_FEATURE_SSBS),
9101 "Cortex-A78C"},
9102 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9103 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9104 | AARCH64_FEATURE_DOTPROD
9105 | AARCH64_FEATURE_PROFILE),
9106 "Ares"},
9107 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9108 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9109 "Samsung Exynos M1"},
9110 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9111 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9112 | AARCH64_FEATURE_RDMA),
9113 "Qualcomm Falkor"},
9114 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9115 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9116 | AARCH64_FEATURE_DOTPROD
9117 | AARCH64_FEATURE_SSBS),
9118 "Neoverse E1"},
9119 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9120 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9121 | AARCH64_FEATURE_DOTPROD
9122 | AARCH64_FEATURE_PROFILE),
9123 "Neoverse N1"},
9124 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9125 AARCH64_FEATURE_BFLOAT16
9126 | AARCH64_FEATURE_I8MM
9127 | AARCH64_FEATURE_F16
9128 | AARCH64_FEATURE_SVE
9129 | AARCH64_FEATURE_SVE2
9130 | AARCH64_FEATURE_SVE2_BITPERM
9131 | AARCH64_FEATURE_MEMTAG
9132 | AARCH64_FEATURE_RNG),
9133 "Neoverse N2"},
9134 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9135 AARCH64_FEATURE_PROFILE
9136 | AARCH64_FEATURE_CVADP
9137 | AARCH64_FEATURE_SVE
9138 | AARCH64_FEATURE_SSBS
9139 | AARCH64_FEATURE_RNG
9140 | AARCH64_FEATURE_F16
9141 | AARCH64_FEATURE_BFLOAT16
9142 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9143 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9144 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9145 | AARCH64_FEATURE_RDMA),
9146 "Qualcomm QDF24XX"},
9147 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9148 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9149 "Qualcomm Saphira"},
9150 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9151 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9152 "Cavium ThunderX"},
9153 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9154 AARCH64_FEATURE_CRYPTO),
9155 "Broadcom Vulcan"},
9156 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9157 in earlier releases and is superseded by 'xgene1' in all
9158 tools. */
9159 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9160 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9161 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9162 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9163 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9164 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9165 AARCH64_FEATURE_F16
9166 | AARCH64_FEATURE_RCPC
9167 | AARCH64_FEATURE_DOTPROD
9168 | AARCH64_FEATURE_SSBS
9169 | AARCH64_FEATURE_PROFILE),
9170 "Cortex-X1"},
9171 {"generic", AARCH64_ARCH_V8, NULL},
9172
9173 {NULL, AARCH64_ARCH_NONE, NULL}
9174 };
9175
9176 struct aarch64_arch_option_table
9177 {
9178 const char *name;
9179 const aarch64_feature_set value;
9180 };
9181
9182 /* This list should, at a minimum, contain all the architecture names
9183 recognized by GCC. */
9184 static const struct aarch64_arch_option_table aarch64_archs[] = {
9185 {"all", AARCH64_ANY},
9186 {"armv8-a", AARCH64_ARCH_V8},
9187 {"armv8.1-a", AARCH64_ARCH_V8_1},
9188 {"armv8.2-a", AARCH64_ARCH_V8_2},
9189 {"armv8.3-a", AARCH64_ARCH_V8_3},
9190 {"armv8.4-a", AARCH64_ARCH_V8_4},
9191 {"armv8.5-a", AARCH64_ARCH_V8_5},
9192 {"armv8.6-a", AARCH64_ARCH_V8_6},
9193 {"armv8.7-a", AARCH64_ARCH_V8_7},
9194 {"armv8-r", AARCH64_ARCH_V8_R},
9195 {NULL, AARCH64_ARCH_NONE}
9196 };
9197
9198 /* ISA extensions. */
9199 struct aarch64_option_cpu_value_table
9200 {
9201 const char *name;
9202 const aarch64_feature_set value;
9203 const aarch64_feature_set require; /* Feature dependencies. */
9204 };
9205
9206 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9207 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9208 AARCH64_ARCH_NONE},
9209 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9210 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9211 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9212 AARCH64_ARCH_NONE},
9213 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9214 AARCH64_ARCH_NONE},
9215 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9216 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9217 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9218 AARCH64_ARCH_NONE},
9219 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9220 AARCH64_ARCH_NONE},
9221 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9222 AARCH64_ARCH_NONE},
9223 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9224 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9225 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9226 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9227 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9228 AARCH64_FEATURE (AARCH64_FEATURE_FP
9229 | AARCH64_FEATURE_F16, 0)},
9230 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9231 AARCH64_ARCH_NONE},
9232 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9233 AARCH64_FEATURE (AARCH64_FEATURE_F16
9234 | AARCH64_FEATURE_SIMD
9235 | AARCH64_FEATURE_COMPNUM, 0)},
9236 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9237 AARCH64_ARCH_NONE},
9238 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9239 AARCH64_FEATURE (AARCH64_FEATURE_F16
9240 | AARCH64_FEATURE_SIMD, 0)},
9241 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9242 AARCH64_ARCH_NONE},
9243 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9244 AARCH64_ARCH_NONE},
9245 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9246 AARCH64_ARCH_NONE},
9247 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9248 AARCH64_ARCH_NONE},
9249 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9250 AARCH64_ARCH_NONE},
9251 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9252 AARCH64_ARCH_NONE},
9253 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9254 AARCH64_ARCH_NONE},
9255 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9256 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9257 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9258 AARCH64_ARCH_NONE},
9259 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9260 AARCH64_ARCH_NONE},
9261 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9262 AARCH64_ARCH_NONE},
9263 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9264 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9265 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9266 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9267 | AARCH64_FEATURE_SM4, 0)},
9268 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9269 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9270 | AARCH64_FEATURE_AES, 0)},
9271 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9272 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9273 | AARCH64_FEATURE_SHA3, 0)},
9274 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9275 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9276 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9277 AARCH64_ARCH_NONE},
9278 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9279 AARCH64_ARCH_NONE},
9280 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9281 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9282 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9283 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9284 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9285 AARCH64_ARCH_NONE},
9286 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
9287 AARCH64_ARCH_NONE},
9288 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
9289 AARCH64_ARCH_NONE},
9290 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9291 };
9292
9293 struct aarch64_long_option_table
9294 {
9295 const char *option; /* Substring to match. */
9296 const char *help; /* Help information. */
9297 int (*func) (const char *subopt); /* Function to decode sub-option. */
9298 char *deprecated; /* If non-null, print this message. */
9299 };
9300
9301 /* Transitive closure of features depending on set. */
9302 static aarch64_feature_set
9303 aarch64_feature_disable_set (aarch64_feature_set set)
9304 {
9305 const struct aarch64_option_cpu_value_table *opt;
9306 aarch64_feature_set prev = 0;
9307
9308 while (prev != set) {
9309 prev = set;
9310 for (opt = aarch64_features; opt->name != NULL; opt++)
9311 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9312 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9313 }
9314 return set;
9315 }
9316
9317 /* Transitive closure of dependencies of set. */
9318 static aarch64_feature_set
9319 aarch64_feature_enable_set (aarch64_feature_set set)
9320 {
9321 const struct aarch64_option_cpu_value_table *opt;
9322 aarch64_feature_set prev = 0;
9323
9324 while (prev != set) {
9325 prev = set;
9326 for (opt = aarch64_features; opt->name != NULL; opt++)
9327 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9328 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9329 }
9330 return set;
9331 }
9332
9333 static int
9334 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9335 bool ext_only)
9336 {
9337 /* We insist on extensions being added before being removed. We achieve
9338 this by using the ADDING_VALUE variable to indicate whether we are
9339 adding an extension (1) or removing it (0) and only allowing it to
9340 change in the order -1 -> 1 -> 0. */
9341 int adding_value = -1;
9342 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9343
9344 /* Copy the feature set, so that we can modify it. */
9345 *ext_set = **opt_p;
9346 *opt_p = ext_set;
9347
9348 while (str != NULL && *str != 0)
9349 {
9350 const struct aarch64_option_cpu_value_table *opt;
9351 const char *ext = NULL;
9352 int optlen;
9353
9354 if (!ext_only)
9355 {
9356 if (*str != '+')
9357 {
9358 as_bad (_("invalid architectural extension"));
9359 return 0;
9360 }
9361
9362 ext = strchr (++str, '+');
9363 }
9364
9365 if (ext != NULL)
9366 optlen = ext - str;
9367 else
9368 optlen = strlen (str);
9369
9370 if (optlen >= 2 && startswith (str, "no"))
9371 {
9372 if (adding_value != 0)
9373 adding_value = 0;
9374 optlen -= 2;
9375 str += 2;
9376 }
9377 else if (optlen > 0)
9378 {
9379 if (adding_value == -1)
9380 adding_value = 1;
9381 else if (adding_value != 1)
9382 {
9383 as_bad (_("must specify extensions to add before specifying "
9384 "those to remove"));
9385 return false;
9386 }
9387 }
9388
9389 if (optlen == 0)
9390 {
9391 as_bad (_("missing architectural extension"));
9392 return 0;
9393 }
9394
9395 gas_assert (adding_value != -1);
9396
9397 for (opt = aarch64_features; opt->name != NULL; opt++)
9398 if (strncmp (opt->name, str, optlen) == 0)
9399 {
9400 aarch64_feature_set set;
9401
9402 /* Add or remove the extension. */
9403 if (adding_value)
9404 {
9405 set = aarch64_feature_enable_set (opt->value);
9406 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9407 }
9408 else
9409 {
9410 set = aarch64_feature_disable_set (opt->value);
9411 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9412 }
9413 break;
9414 }
9415
9416 if (opt->name == NULL)
9417 {
9418 as_bad (_("unknown architectural extension `%s'"), str);
9419 return 0;
9420 }
9421
9422 str = ext;
9423 };
9424
9425 return 1;
9426 }
9427
9428 static int
9429 aarch64_parse_cpu (const char *str)
9430 {
9431 const struct aarch64_cpu_option_table *opt;
9432 const char *ext = strchr (str, '+');
9433 size_t optlen;
9434
9435 if (ext != NULL)
9436 optlen = ext - str;
9437 else
9438 optlen = strlen (str);
9439
9440 if (optlen == 0)
9441 {
9442 as_bad (_("missing cpu name `%s'"), str);
9443 return 0;
9444 }
9445
9446 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9447 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9448 {
9449 mcpu_cpu_opt = &opt->value;
9450 if (ext != NULL)
9451 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
9452
9453 return 1;
9454 }
9455
9456 as_bad (_("unknown cpu `%s'"), str);
9457 return 0;
9458 }
9459
9460 static int
9461 aarch64_parse_arch (const char *str)
9462 {
9463 const struct aarch64_arch_option_table *opt;
9464 const char *ext = strchr (str, '+');
9465 size_t optlen;
9466
9467 if (ext != NULL)
9468 optlen = ext - str;
9469 else
9470 optlen = strlen (str);
9471
9472 if (optlen == 0)
9473 {
9474 as_bad (_("missing architecture name `%s'"), str);
9475 return 0;
9476 }
9477
9478 for (opt = aarch64_archs; opt->name != NULL; opt++)
9479 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9480 {
9481 march_cpu_opt = &opt->value;
9482 if (ext != NULL)
9483 return aarch64_parse_features (ext, &march_cpu_opt, false);
9484
9485 return 1;
9486 }
9487
9488 as_bad (_("unknown architecture `%s'\n"), str);
9489 return 0;
9490 }
9491
9492 /* ABIs. */
9493 struct aarch64_option_abi_value_table
9494 {
9495 const char *name;
9496 enum aarch64_abi_type value;
9497 };
9498
9499 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9500 {"ilp32", AARCH64_ABI_ILP32},
9501 {"lp64", AARCH64_ABI_LP64},
9502 };
9503
9504 static int
9505 aarch64_parse_abi (const char *str)
9506 {
9507 unsigned int i;
9508
9509 if (str[0] == '\0')
9510 {
9511 as_bad (_("missing abi name `%s'"), str);
9512 return 0;
9513 }
9514
9515 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9516 if (strcmp (str, aarch64_abis[i].name) == 0)
9517 {
9518 aarch64_abi = aarch64_abis[i].value;
9519 return 1;
9520 }
9521
9522 as_bad (_("unknown abi `%s'\n"), str);
9523 return 0;
9524 }
9525
9526 static struct aarch64_long_option_table aarch64_long_opts[] = {
9527 #ifdef OBJ_ELF
9528 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9529 aarch64_parse_abi, NULL},
9530 #endif /* OBJ_ELF */
9531 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9532 aarch64_parse_cpu, NULL},
9533 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9534 aarch64_parse_arch, NULL},
9535 {NULL, NULL, 0, NULL}
9536 };
9537
9538 int
9539 md_parse_option (int c, const char *arg)
9540 {
9541 struct aarch64_option_table *opt;
9542 struct aarch64_long_option_table *lopt;
9543
9544 switch (c)
9545 {
9546 #ifdef OPTION_EB
9547 case OPTION_EB:
9548 target_big_endian = 1;
9549 break;
9550 #endif
9551
9552 #ifdef OPTION_EL
9553 case OPTION_EL:
9554 target_big_endian = 0;
9555 break;
9556 #endif
9557
9558 case 'a':
9559 /* Listing option. Just ignore these, we don't support additional
9560 ones. */
9561 return 0;
9562
9563 default:
9564 for (opt = aarch64_opts; opt->option != NULL; opt++)
9565 {
9566 if (c == opt->option[0]
9567 && ((arg == NULL && opt->option[1] == 0)
9568 || streq (arg, opt->option + 1)))
9569 {
9570 /* If the option is deprecated, tell the user. */
9571 if (opt->deprecated != NULL)
9572 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9573 arg ? arg : "", _(opt->deprecated));
9574
9575 if (opt->var != NULL)
9576 *opt->var = opt->value;
9577
9578 return 1;
9579 }
9580 }
9581
9582 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9583 {
9584 /* These options are expected to have an argument. */
9585 if (c == lopt->option[0]
9586 && arg != NULL
9587 && startswith (arg, lopt->option + 1))
9588 {
9589 /* If the option is deprecated, tell the user. */
9590 if (lopt->deprecated != NULL)
9591 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9592 _(lopt->deprecated));
9593
9594 /* Call the sup-option parser. */
9595 return lopt->func (arg + strlen (lopt->option) - 1);
9596 }
9597 }
9598
9599 return 0;
9600 }
9601
9602 return 1;
9603 }
9604
9605 void
9606 md_show_usage (FILE * fp)
9607 {
9608 struct aarch64_option_table *opt;
9609 struct aarch64_long_option_table *lopt;
9610
9611 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9612
9613 for (opt = aarch64_opts; opt->option != NULL; opt++)
9614 if (opt->help != NULL)
9615 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9616
9617 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9618 if (lopt->help != NULL)
9619 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9620
9621 #ifdef OPTION_EB
9622 fprintf (fp, _("\
9623 -EB assemble code for a big-endian cpu\n"));
9624 #endif
9625
9626 #ifdef OPTION_EL
9627 fprintf (fp, _("\
9628 -EL assemble code for a little-endian cpu\n"));
9629 #endif
9630 }
9631
9632 /* Parse a .cpu directive. */
9633
9634 static void
9635 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9636 {
9637 const struct aarch64_cpu_option_table *opt;
9638 char saved_char;
9639 char *name;
9640 char *ext;
9641 size_t optlen;
9642
9643 name = input_line_pointer;
9644 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9645 input_line_pointer++;
9646 saved_char = *input_line_pointer;
9647 *input_line_pointer = 0;
9648
9649 ext = strchr (name, '+');
9650
9651 if (ext != NULL)
9652 optlen = ext - name;
9653 else
9654 optlen = strlen (name);
9655
9656 /* Skip the first "all" entry. */
9657 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9658 if (strlen (opt->name) == optlen
9659 && strncmp (name, opt->name, optlen) == 0)
9660 {
9661 mcpu_cpu_opt = &opt->value;
9662 if (ext != NULL)
9663 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
9664 return;
9665
9666 cpu_variant = *mcpu_cpu_opt;
9667
9668 *input_line_pointer = saved_char;
9669 demand_empty_rest_of_line ();
9670 return;
9671 }
9672 as_bad (_("unknown cpu `%s'"), name);
9673 *input_line_pointer = saved_char;
9674 ignore_rest_of_line ();
9675 }
9676
9677
9678 /* Parse a .arch directive. */
9679
9680 static void
9681 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9682 {
9683 const struct aarch64_arch_option_table *opt;
9684 char saved_char;
9685 char *name;
9686 char *ext;
9687 size_t optlen;
9688
9689 name = input_line_pointer;
9690 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9691 input_line_pointer++;
9692 saved_char = *input_line_pointer;
9693 *input_line_pointer = 0;
9694
9695 ext = strchr (name, '+');
9696
9697 if (ext != NULL)
9698 optlen = ext - name;
9699 else
9700 optlen = strlen (name);
9701
9702 /* Skip the first "all" entry. */
9703 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9704 if (strlen (opt->name) == optlen
9705 && strncmp (name, opt->name, optlen) == 0)
9706 {
9707 mcpu_cpu_opt = &opt->value;
9708 if (ext != NULL)
9709 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
9710 return;
9711
9712 cpu_variant = *mcpu_cpu_opt;
9713
9714 *input_line_pointer = saved_char;
9715 demand_empty_rest_of_line ();
9716 return;
9717 }
9718
9719 as_bad (_("unknown architecture `%s'\n"), name);
9720 *input_line_pointer = saved_char;
9721 ignore_rest_of_line ();
9722 }
9723
9724 /* Parse a .arch_extension directive. */
9725
9726 static void
9727 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9728 {
9729 char saved_char;
9730 char *ext = input_line_pointer;;
9731
9732 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9733 input_line_pointer++;
9734 saved_char = *input_line_pointer;
9735 *input_line_pointer = 0;
9736
9737 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
9738 return;
9739
9740 cpu_variant = *mcpu_cpu_opt;
9741
9742 *input_line_pointer = saved_char;
9743 demand_empty_rest_of_line ();
9744 }
9745
9746 /* Copy symbol information. */
9747
9748 void
9749 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9750 {
9751 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9752 }
9753
9754 #ifdef OBJ_ELF
9755 /* Same as elf_copy_symbol_attributes, but without copying st_other.
9756 This is needed so AArch64 specific st_other values can be independently
9757 specified for an IFUNC resolver (that is called by the dynamic linker)
9758 and the symbol it resolves (aliased to the resolver). In particular,
9759 if a function symbol has special st_other value set via directives,
9760 then attaching an IFUNC resolver to that symbol should not override
9761 the st_other setting. Requiring the directive on the IFUNC resolver
9762 symbol would be unexpected and problematic in C code, where the two
9763 symbols appear as two independent function declarations. */
9764
9765 void
9766 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
9767 {
9768 struct elf_obj_sy *srcelf = symbol_get_obj (src);
9769 struct elf_obj_sy *destelf = symbol_get_obj (dest);
9770 if (srcelf->size)
9771 {
9772 if (destelf->size == NULL)
9773 destelf->size = XNEW (expressionS);
9774 *destelf->size = *srcelf->size;
9775 }
9776 else
9777 {
9778 free (destelf->size);
9779 destelf->size = NULL;
9780 }
9781 S_SET_SIZE (dest, S_GET_SIZE (src));
9782 }
9783 #endif