]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
[AArch64][gas] Add -mcpu support for Arm Ares
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bfd_boolean
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Number of littlenums required to hold an extended precision number. */
242 #define MAX_LITTLENUMS 6
243
244 /* Return value for certain parsers when the parsing fails; those parsers
245 return the information of the parsed result, e.g. register number, on
246 success. */
247 #define PARSE_FAIL -1
248
249 /* This is an invalid condition code that means no conditional field is
250 present. */
251 #define COND_ALWAYS 0x10
252
253 typedef struct
254 {
255 const char *template;
256 unsigned long value;
257 } asm_barrier_opt;
258
259 typedef struct
260 {
261 const char *template;
262 uint32_t value;
263 } asm_nzcv;
264
265 struct reloc_entry
266 {
267 char *name;
268 bfd_reloc_code_real_type reloc;
269 };
270
271 /* Macros to define the register types and masks for the purpose
272 of parsing. */
273
274 #undef AARCH64_REG_TYPES
275 #define AARCH64_REG_TYPES \
276 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
277 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
278 BASIC_REG_TYPE(SP_32) /* wsp */ \
279 BASIC_REG_TYPE(SP_64) /* sp */ \
280 BASIC_REG_TYPE(Z_32) /* wzr */ \
281 BASIC_REG_TYPE(Z_64) /* xzr */ \
282 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
283 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
284 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
285 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
286 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
287 BASIC_REG_TYPE(VN) /* v[0-31] */ \
288 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
289 BASIC_REG_TYPE(PN) /* p[0-15] */ \
290 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
291 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
292 /* Typecheck: same, plus SVE registers. */ \
293 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
294 | REG_TYPE(ZN)) \
295 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
296 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
298 /* Typecheck: same, plus SVE registers. */ \
299 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
300 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
301 | REG_TYPE(ZN)) \
302 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
303 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
305 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
306 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
307 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
308 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
309 /* Typecheck: any [BHSDQ]P FP. */ \
310 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
311 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
312 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
313 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
314 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
315 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
316 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
317 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
318 be used for SVE instructions, since Zn and Pn are valid symbols \
319 in other contexts. */ \
320 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
321 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
322 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
323 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
324 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
325 | REG_TYPE(ZN) | REG_TYPE(PN)) \
326 /* Any integer register; used for error messages only. */ \
327 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
328 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
329 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
330 /* Pseudo type to mark the end of the enumerator sequence. */ \
331 BASIC_REG_TYPE(MAX)
332
333 #undef BASIC_REG_TYPE
334 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
335 #undef MULTI_REG_TYPE
336 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
337
338 /* Register type enumerators. */
339 typedef enum aarch64_reg_type_
340 {
341 /* A list of REG_TYPE_*. */
342 AARCH64_REG_TYPES
343 } aarch64_reg_type;
344
345 #undef BASIC_REG_TYPE
346 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
347 #undef REG_TYPE
348 #define REG_TYPE(T) (1 << REG_TYPE_##T)
349 #undef MULTI_REG_TYPE
350 #define MULTI_REG_TYPE(T,V) V,
351
352 /* Structure for a hash table entry for a register. */
353 typedef struct
354 {
355 const char *name;
356 unsigned char number;
357 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
358 unsigned char builtin;
359 } reg_entry;
360
361 /* Values indexed by aarch64_reg_type to assist the type checking. */
362 static const unsigned reg_type_masks[] =
363 {
364 AARCH64_REG_TYPES
365 };
366
367 #undef BASIC_REG_TYPE
368 #undef REG_TYPE
369 #undef MULTI_REG_TYPE
370 #undef AARCH64_REG_TYPES
371
372 /* Diagnostics used when we don't get a register of the expected type.
373 Note: this has to synchronized with aarch64_reg_type definitions
374 above. */
375 static const char *
376 get_reg_expected_msg (aarch64_reg_type reg_type)
377 {
378 const char *msg;
379
380 switch (reg_type)
381 {
382 case REG_TYPE_R_32:
383 msg = N_("integer 32-bit register expected");
384 break;
385 case REG_TYPE_R_64:
386 msg = N_("integer 64-bit register expected");
387 break;
388 case REG_TYPE_R_N:
389 msg = N_("integer register expected");
390 break;
391 case REG_TYPE_R64_SP:
392 msg = N_("64-bit integer or SP register expected");
393 break;
394 case REG_TYPE_SVE_BASE:
395 msg = N_("base register expected");
396 break;
397 case REG_TYPE_R_Z:
398 msg = N_("integer or zero register expected");
399 break;
400 case REG_TYPE_SVE_OFFSET:
401 msg = N_("offset register expected");
402 break;
403 case REG_TYPE_R_SP:
404 msg = N_("integer or SP register expected");
405 break;
406 case REG_TYPE_R_Z_SP:
407 msg = N_("integer, zero or SP register expected");
408 break;
409 case REG_TYPE_FP_B:
410 msg = N_("8-bit SIMD scalar register expected");
411 break;
412 case REG_TYPE_FP_H:
413 msg = N_("16-bit SIMD scalar or floating-point half precision "
414 "register expected");
415 break;
416 case REG_TYPE_FP_S:
417 msg = N_("32-bit SIMD scalar or floating-point single precision "
418 "register expected");
419 break;
420 case REG_TYPE_FP_D:
421 msg = N_("64-bit SIMD scalar or floating-point double precision "
422 "register expected");
423 break;
424 case REG_TYPE_FP_Q:
425 msg = N_("128-bit SIMD scalar or floating-point quad precision "
426 "register expected");
427 break;
428 case REG_TYPE_R_Z_BHSDQ_V:
429 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
430 msg = N_("register expected");
431 break;
432 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
433 msg = N_("SIMD scalar or floating-point register expected");
434 break;
435 case REG_TYPE_VN: /* any V reg */
436 msg = N_("vector register expected");
437 break;
438 case REG_TYPE_ZN:
439 msg = N_("SVE vector register expected");
440 break;
441 case REG_TYPE_PN:
442 msg = N_("SVE predicate register expected");
443 break;
444 default:
445 as_fatal (_("invalid register type %d"), reg_type);
446 }
447 return msg;
448 }
449
450 /* Some well known registers that we refer to directly elsewhere. */
451 #define REG_SP 31
452
453 /* Instructions take 4 bytes in the object file. */
454 #define INSN_SIZE 4
455
456 static struct hash_control *aarch64_ops_hsh;
457 static struct hash_control *aarch64_cond_hsh;
458 static struct hash_control *aarch64_shift_hsh;
459 static struct hash_control *aarch64_sys_regs_hsh;
460 static struct hash_control *aarch64_pstatefield_hsh;
461 static struct hash_control *aarch64_sys_regs_ic_hsh;
462 static struct hash_control *aarch64_sys_regs_dc_hsh;
463 static struct hash_control *aarch64_sys_regs_at_hsh;
464 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
465 static struct hash_control *aarch64_sys_regs_sr_hsh;
466 static struct hash_control *aarch64_reg_hsh;
467 static struct hash_control *aarch64_barrier_opt_hsh;
468 static struct hash_control *aarch64_nzcv_hsh;
469 static struct hash_control *aarch64_pldop_hsh;
470 static struct hash_control *aarch64_hint_opt_hsh;
471
472 /* Stuff needed to resolve the label ambiguity
473 As:
474 ...
475 label: <insn>
476 may differ from:
477 ...
478 label:
479 <insn> */
480
481 static symbolS *last_label_seen;
482
483 /* Literal pool structure. Held on a per-section
484 and per-sub-section basis. */
485
486 #define MAX_LITERAL_POOL_SIZE 1024
487 typedef struct literal_expression
488 {
489 expressionS exp;
490 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
491 LITTLENUM_TYPE * bignum;
492 } literal_expression;
493
494 typedef struct literal_pool
495 {
496 literal_expression literals[MAX_LITERAL_POOL_SIZE];
497 unsigned int next_free_entry;
498 unsigned int id;
499 symbolS *symbol;
500 segT section;
501 subsegT sub_section;
502 int size;
503 struct literal_pool *next;
504 } literal_pool;
505
506 /* Pointer to a linked list of literal pools. */
507 static literal_pool *list_of_pools = NULL;
508 \f
509 /* Pure syntax. */
510
511 /* This array holds the chars that always start a comment. If the
512 pre-processor is disabled, these aren't very useful. */
513 const char comment_chars[] = "";
514
515 /* This array holds the chars that only start a comment at the beginning of
516 a line. If the line seems to have the form '# 123 filename'
517 .line and .file directives will appear in the pre-processed output. */
518 /* Note that input_file.c hand checks for '#' at the beginning of the
519 first line of the input file. This is because the compiler outputs
520 #NO_APP at the beginning of its output. */
521 /* Also note that comments like this one will always work. */
522 const char line_comment_chars[] = "#";
523
524 const char line_separator_chars[] = ";";
525
526 /* Chars that can be used to separate mant
527 from exp in floating point numbers. */
528 const char EXP_CHARS[] = "eE";
529
530 /* Chars that mean this number is a floating point constant. */
531 /* As in 0f12.456 */
532 /* or 0d1.2345e12 */
533
534 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
535
536 /* Prefix character that indicates the start of an immediate value. */
537 #define is_immediate_prefix(C) ((C) == '#')
538
539 /* Separator character handling. */
540
541 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
542
543 static inline bfd_boolean
544 skip_past_char (char **str, char c)
545 {
546 if (**str == c)
547 {
548 (*str)++;
549 return TRUE;
550 }
551 else
552 return FALSE;
553 }
554
555 #define skip_past_comma(str) skip_past_char (str, ',')
556
557 /* Arithmetic expressions (possibly involving symbols). */
558
559 static bfd_boolean in_my_get_expression_p = FALSE;
560
561 /* Third argument to my_get_expression. */
562 #define GE_NO_PREFIX 0
563 #define GE_OPT_PREFIX 1
564
565 /* Return TRUE if the string pointed by *STR is successfully parsed
566 as an valid expression; *EP will be filled with the information of
567 such an expression. Otherwise return FALSE. */
568
569 static bfd_boolean
570 my_get_expression (expressionS * ep, char **str, int prefix_mode,
571 int reject_absent)
572 {
573 char *save_in;
574 segT seg;
575 int prefix_present_p = 0;
576
577 switch (prefix_mode)
578 {
579 case GE_NO_PREFIX:
580 break;
581 case GE_OPT_PREFIX:
582 if (is_immediate_prefix (**str))
583 {
584 (*str)++;
585 prefix_present_p = 1;
586 }
587 break;
588 default:
589 abort ();
590 }
591
592 memset (ep, 0, sizeof (expressionS));
593
594 save_in = input_line_pointer;
595 input_line_pointer = *str;
596 in_my_get_expression_p = TRUE;
597 seg = expression (ep);
598 in_my_get_expression_p = FALSE;
599
600 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
601 {
602 /* We found a bad expression in md_operand(). */
603 *str = input_line_pointer;
604 input_line_pointer = save_in;
605 if (prefix_present_p && ! error_p ())
606 set_fatal_syntax_error (_("bad expression"));
607 else
608 set_first_syntax_error (_("bad expression"));
609 return FALSE;
610 }
611
612 #ifdef OBJ_AOUT
613 if (seg != absolute_section
614 && seg != text_section
615 && seg != data_section
616 && seg != bss_section && seg != undefined_section)
617 {
618 set_syntax_error (_("bad segment"));
619 *str = input_line_pointer;
620 input_line_pointer = save_in;
621 return FALSE;
622 }
623 #else
624 (void) seg;
625 #endif
626
627 *str = input_line_pointer;
628 input_line_pointer = save_in;
629 return TRUE;
630 }
631
632 /* Turn a string in input_line_pointer into a floating point constant
633 of type TYPE, and store the appropriate bytes in *LITP. The number
634 of LITTLENUMS emitted is stored in *SIZEP. An error message is
635 returned, or NULL on OK. */
636
637 const char *
638 md_atof (int type, char *litP, int *sizeP)
639 {
640 return ieee_md_atof (type, litP, sizeP, target_big_endian);
641 }
642
643 /* We handle all bad expressions here, so that we can report the faulty
644 instruction in the error message. */
645 void
646 md_operand (expressionS * exp)
647 {
648 if (in_my_get_expression_p)
649 exp->X_op = O_illegal;
650 }
651
652 /* Immediate values. */
653
654 /* Errors may be set multiple times during parsing or bit encoding
655 (particularly in the Neon bits), but usually the earliest error which is set
656 will be the most meaningful. Avoid overwriting it with later (cascading)
657 errors by calling this function. */
658
659 static void
660 first_error (const char *error)
661 {
662 if (! error_p ())
663 set_syntax_error (error);
664 }
665
666 /* Similar to first_error, but this function accepts formatted error
667 message. */
668 static void
669 first_error_fmt (const char *format, ...)
670 {
671 va_list args;
672 enum
673 { size = 100 };
674 /* N.B. this single buffer will not cause error messages for different
675 instructions to pollute each other; this is because at the end of
676 processing of each assembly line, error message if any will be
677 collected by as_bad. */
678 static char buffer[size];
679
680 if (! error_p ())
681 {
682 int ret ATTRIBUTE_UNUSED;
683 va_start (args, format);
684 ret = vsnprintf (buffer, size, format, args);
685 know (ret <= size - 1 && ret >= 0);
686 va_end (args);
687 set_syntax_error (buffer);
688 }
689 }
690
691 /* Register parsing. */
692
693 /* Generic register parser which is called by other specialized
694 register parsers.
695 CCP points to what should be the beginning of a register name.
696 If it is indeed a valid register name, advance CCP over it and
697 return the reg_entry structure; otherwise return NULL.
698 It does not issue diagnostics. */
699
700 static reg_entry *
701 parse_reg (char **ccp)
702 {
703 char *start = *ccp;
704 char *p;
705 reg_entry *reg;
706
707 #ifdef REGISTER_PREFIX
708 if (*start != REGISTER_PREFIX)
709 return NULL;
710 start++;
711 #endif
712
713 p = start;
714 if (!ISALPHA (*p) || !is_name_beginner (*p))
715 return NULL;
716
717 do
718 p++;
719 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
720
721 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
722
723 if (!reg)
724 return NULL;
725
726 *ccp = p;
727 return reg;
728 }
729
730 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
731 return FALSE. */
732 static bfd_boolean
733 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
734 {
735 return (reg_type_masks[type] & (1 << reg->type)) != 0;
736 }
737
738 /* Try to parse a base or offset register. Allow SVE base and offset
739 registers if REG_TYPE includes SVE registers. Return the register
740 entry on success, setting *QUALIFIER to the register qualifier.
741 Return null otherwise.
742
743 Note that this function does not issue any diagnostics. */
744
745 static const reg_entry *
746 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
747 aarch64_opnd_qualifier_t *qualifier)
748 {
749 char *str = *ccp;
750 const reg_entry *reg = parse_reg (&str);
751
752 if (reg == NULL)
753 return NULL;
754
755 switch (reg->type)
756 {
757 case REG_TYPE_R_32:
758 case REG_TYPE_SP_32:
759 case REG_TYPE_Z_32:
760 *qualifier = AARCH64_OPND_QLF_W;
761 break;
762
763 case REG_TYPE_R_64:
764 case REG_TYPE_SP_64:
765 case REG_TYPE_Z_64:
766 *qualifier = AARCH64_OPND_QLF_X;
767 break;
768
769 case REG_TYPE_ZN:
770 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
771 || str[0] != '.')
772 return NULL;
773 switch (TOLOWER (str[1]))
774 {
775 case 's':
776 *qualifier = AARCH64_OPND_QLF_S_S;
777 break;
778 case 'd':
779 *qualifier = AARCH64_OPND_QLF_S_D;
780 break;
781 default:
782 return NULL;
783 }
784 str += 2;
785 break;
786
787 default:
788 return NULL;
789 }
790
791 *ccp = str;
792
793 return reg;
794 }
795
796 /* Try to parse a base or offset register. Return the register entry
797 on success, setting *QUALIFIER to the register qualifier. Return null
798 otherwise.
799
800 Note that this function does not issue any diagnostics. */
801
802 static const reg_entry *
803 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
804 {
805 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
806 }
807
808 /* Parse the qualifier of a vector register or vector element of type
809 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
810 succeeds; otherwise return FALSE.
811
812 Accept only one occurrence of:
813 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
814 b h s d q */
815 static bfd_boolean
816 parse_vector_type_for_operand (aarch64_reg_type reg_type,
817 struct vector_type_el *parsed_type, char **str)
818 {
819 char *ptr = *str;
820 unsigned width;
821 unsigned element_size;
822 enum vector_el_type type;
823
824 /* skip '.' */
825 gas_assert (*ptr == '.');
826 ptr++;
827
828 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
829 {
830 width = 0;
831 goto elt_size;
832 }
833 width = strtoul (ptr, &ptr, 10);
834 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
835 {
836 first_error_fmt (_("bad size %d in vector width specifier"), width);
837 return FALSE;
838 }
839
840 elt_size:
841 switch (TOLOWER (*ptr))
842 {
843 case 'b':
844 type = NT_b;
845 element_size = 8;
846 break;
847 case 'h':
848 type = NT_h;
849 element_size = 16;
850 break;
851 case 's':
852 type = NT_s;
853 element_size = 32;
854 break;
855 case 'd':
856 type = NT_d;
857 element_size = 64;
858 break;
859 case 'q':
860 if (reg_type == REG_TYPE_ZN || width == 1)
861 {
862 type = NT_q;
863 element_size = 128;
864 break;
865 }
866 /* fall through. */
867 default:
868 if (*ptr != '\0')
869 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
870 else
871 first_error (_("missing element size"));
872 return FALSE;
873 }
874 if (width != 0 && width * element_size != 64
875 && width * element_size != 128
876 && !(width == 2 && element_size == 16)
877 && !(width == 4 && element_size == 8))
878 {
879 first_error_fmt (_
880 ("invalid element size %d and vector size combination %c"),
881 width, *ptr);
882 return FALSE;
883 }
884 ptr++;
885
886 parsed_type->type = type;
887 parsed_type->width = width;
888
889 *str = ptr;
890
891 return TRUE;
892 }
893
894 /* *STR contains an SVE zero/merge predication suffix. Parse it into
895 *PARSED_TYPE and point *STR at the end of the suffix. */
896
897 static bfd_boolean
898 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
899 {
900 char *ptr = *str;
901
902 /* Skip '/'. */
903 gas_assert (*ptr == '/');
904 ptr++;
905 switch (TOLOWER (*ptr))
906 {
907 case 'z':
908 parsed_type->type = NT_zero;
909 break;
910 case 'm':
911 parsed_type->type = NT_merge;
912 break;
913 default:
914 if (*ptr != '\0' && *ptr != ',')
915 first_error_fmt (_("unexpected character `%c' in predication type"),
916 *ptr);
917 else
918 first_error (_("missing predication type"));
919 return FALSE;
920 }
921 parsed_type->width = 0;
922 *str = ptr + 1;
923 return TRUE;
924 }
925
926 /* Parse a register of the type TYPE.
927
928 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
929 name or the parsed register is not of TYPE.
930
931 Otherwise return the register number, and optionally fill in the actual
932 type of the register in *RTYPE when multiple alternatives were given, and
933 return the register shape and element index information in *TYPEINFO.
934
935 IN_REG_LIST should be set with TRUE if the caller is parsing a register
936 list. */
937
938 static int
939 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
940 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
941 {
942 char *str = *ccp;
943 const reg_entry *reg = parse_reg (&str);
944 struct vector_type_el atype;
945 struct vector_type_el parsetype;
946 bfd_boolean is_typed_vecreg = FALSE;
947
948 atype.defined = 0;
949 atype.type = NT_invtype;
950 atype.width = -1;
951 atype.index = 0;
952
953 if (reg == NULL)
954 {
955 if (typeinfo)
956 *typeinfo = atype;
957 set_default_error ();
958 return PARSE_FAIL;
959 }
960
961 if (! aarch64_check_reg_type (reg, type))
962 {
963 DEBUG_TRACE ("reg type check failed");
964 set_default_error ();
965 return PARSE_FAIL;
966 }
967 type = reg->type;
968
969 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
970 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
971 {
972 if (*str == '.')
973 {
974 if (!parse_vector_type_for_operand (type, &parsetype, &str))
975 return PARSE_FAIL;
976 }
977 else
978 {
979 if (!parse_predication_for_operand (&parsetype, &str))
980 return PARSE_FAIL;
981 }
982
983 /* Register if of the form Vn.[bhsdq]. */
984 is_typed_vecreg = TRUE;
985
986 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
987 {
988 /* The width is always variable; we don't allow an integer width
989 to be specified. */
990 gas_assert (parsetype.width == 0);
991 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
992 }
993 else if (parsetype.width == 0)
994 /* Expect index. In the new scheme we cannot have
995 Vn.[bhsdq] represent a scalar. Therefore any
996 Vn.[bhsdq] should have an index following it.
997 Except in reglists of course. */
998 atype.defined |= NTA_HASINDEX;
999 else
1000 atype.defined |= NTA_HASTYPE;
1001
1002 atype.type = parsetype.type;
1003 atype.width = parsetype.width;
1004 }
1005
1006 if (skip_past_char (&str, '['))
1007 {
1008 expressionS exp;
1009
1010 /* Reject Sn[index] syntax. */
1011 if (!is_typed_vecreg)
1012 {
1013 first_error (_("this type of register can't be indexed"));
1014 return PARSE_FAIL;
1015 }
1016
1017 if (in_reg_list)
1018 {
1019 first_error (_("index not allowed inside register list"));
1020 return PARSE_FAIL;
1021 }
1022
1023 atype.defined |= NTA_HASINDEX;
1024
1025 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1026
1027 if (exp.X_op != O_constant)
1028 {
1029 first_error (_("constant expression required"));
1030 return PARSE_FAIL;
1031 }
1032
1033 if (! skip_past_char (&str, ']'))
1034 return PARSE_FAIL;
1035
1036 atype.index = exp.X_add_number;
1037 }
1038 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1039 {
1040 /* Indexed vector register expected. */
1041 first_error (_("indexed vector register expected"));
1042 return PARSE_FAIL;
1043 }
1044
1045 /* A vector reg Vn should be typed or indexed. */
1046 if (type == REG_TYPE_VN && atype.defined == 0)
1047 {
1048 first_error (_("invalid use of vector register"));
1049 }
1050
1051 if (typeinfo)
1052 *typeinfo = atype;
1053
1054 if (rtype)
1055 *rtype = type;
1056
1057 *ccp = str;
1058
1059 return reg->number;
1060 }
1061
1062 /* Parse register.
1063
1064 Return the register number on success; return PARSE_FAIL otherwise.
1065
1066 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1067 the register (e.g. NEON double or quad reg when either has been requested).
1068
1069 If this is a NEON vector register with additional type information, fill
1070 in the struct pointed to by VECTYPE (if non-NULL).
1071
1072 This parser does not handle register list. */
1073
1074 static int
1075 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1076 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1077 {
1078 struct vector_type_el atype;
1079 char *str = *ccp;
1080 int reg = parse_typed_reg (&str, type, rtype, &atype,
1081 /*in_reg_list= */ FALSE);
1082
1083 if (reg == PARSE_FAIL)
1084 return PARSE_FAIL;
1085
1086 if (vectype)
1087 *vectype = atype;
1088
1089 *ccp = str;
1090
1091 return reg;
1092 }
1093
1094 static inline bfd_boolean
1095 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1096 {
1097 return
1098 e1.type == e2.type
1099 && e1.defined == e2.defined
1100 && e1.width == e2.width && e1.index == e2.index;
1101 }
1102
1103 /* This function parses a list of vector registers of type TYPE.
1104 On success, it returns the parsed register list information in the
1105 following encoded format:
1106
1107 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1108 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1109
1110 The information of the register shape and/or index is returned in
1111 *VECTYPE.
1112
1113 It returns PARSE_FAIL if the register list is invalid.
1114
1115 The list contains one to four registers.
1116 Each register can be one of:
1117 <Vt>.<T>[<index>]
1118 <Vt>.<T>
1119 All <T> should be identical.
1120 All <index> should be identical.
1121 There are restrictions on <Vt> numbers which are checked later
1122 (by reg_list_valid_p). */
1123
1124 static int
1125 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1126 struct vector_type_el *vectype)
1127 {
1128 char *str = *ccp;
1129 int nb_regs;
1130 struct vector_type_el typeinfo, typeinfo_first;
1131 int val, val_range;
1132 int in_range;
1133 int ret_val;
1134 int i;
1135 bfd_boolean error = FALSE;
1136 bfd_boolean expect_index = FALSE;
1137
1138 if (*str != '{')
1139 {
1140 set_syntax_error (_("expecting {"));
1141 return PARSE_FAIL;
1142 }
1143 str++;
1144
1145 nb_regs = 0;
1146 typeinfo_first.defined = 0;
1147 typeinfo_first.type = NT_invtype;
1148 typeinfo_first.width = -1;
1149 typeinfo_first.index = 0;
1150 ret_val = 0;
1151 val = -1;
1152 val_range = -1;
1153 in_range = 0;
1154 do
1155 {
1156 if (in_range)
1157 {
1158 str++; /* skip over '-' */
1159 val_range = val;
1160 }
1161 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1162 /*in_reg_list= */ TRUE);
1163 if (val == PARSE_FAIL)
1164 {
1165 set_first_syntax_error (_("invalid vector register in list"));
1166 error = TRUE;
1167 continue;
1168 }
1169 /* reject [bhsd]n */
1170 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1171 {
1172 set_first_syntax_error (_("invalid scalar register in list"));
1173 error = TRUE;
1174 continue;
1175 }
1176
1177 if (typeinfo.defined & NTA_HASINDEX)
1178 expect_index = TRUE;
1179
1180 if (in_range)
1181 {
1182 if (val < val_range)
1183 {
1184 set_first_syntax_error
1185 (_("invalid range in vector register list"));
1186 error = TRUE;
1187 }
1188 val_range++;
1189 }
1190 else
1191 {
1192 val_range = val;
1193 if (nb_regs == 0)
1194 typeinfo_first = typeinfo;
1195 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1196 {
1197 set_first_syntax_error
1198 (_("type mismatch in vector register list"));
1199 error = TRUE;
1200 }
1201 }
1202 if (! error)
1203 for (i = val_range; i <= val; i++)
1204 {
1205 ret_val |= i << (5 * nb_regs);
1206 nb_regs++;
1207 }
1208 in_range = 0;
1209 }
1210 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1211
1212 skip_whitespace (str);
1213 if (*str != '}')
1214 {
1215 set_first_syntax_error (_("end of vector register list not found"));
1216 error = TRUE;
1217 }
1218 str++;
1219
1220 skip_whitespace (str);
1221
1222 if (expect_index)
1223 {
1224 if (skip_past_char (&str, '['))
1225 {
1226 expressionS exp;
1227
1228 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1229 if (exp.X_op != O_constant)
1230 {
1231 set_first_syntax_error (_("constant expression required."));
1232 error = TRUE;
1233 }
1234 if (! skip_past_char (&str, ']'))
1235 error = TRUE;
1236 else
1237 typeinfo_first.index = exp.X_add_number;
1238 }
1239 else
1240 {
1241 set_first_syntax_error (_("expected index"));
1242 error = TRUE;
1243 }
1244 }
1245
1246 if (nb_regs > 4)
1247 {
1248 set_first_syntax_error (_("too many registers in vector register list"));
1249 error = TRUE;
1250 }
1251 else if (nb_regs == 0)
1252 {
1253 set_first_syntax_error (_("empty vector register list"));
1254 error = TRUE;
1255 }
1256
1257 *ccp = str;
1258 if (! error)
1259 *vectype = typeinfo_first;
1260
1261 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1262 }
1263
1264 /* Directives: register aliases. */
1265
1266 static reg_entry *
1267 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1268 {
1269 reg_entry *new;
1270 const char *name;
1271
1272 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1273 {
1274 if (new->builtin)
1275 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1276 str);
1277
1278 /* Only warn about a redefinition if it's not defined as the
1279 same register. */
1280 else if (new->number != number || new->type != type)
1281 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1282
1283 return NULL;
1284 }
1285
1286 name = xstrdup (str);
1287 new = XNEW (reg_entry);
1288
1289 new->name = name;
1290 new->number = number;
1291 new->type = type;
1292 new->builtin = FALSE;
1293
1294 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1295 abort ();
1296
1297 return new;
1298 }
1299
1300 /* Look for the .req directive. This is of the form:
1301
1302 new_register_name .req existing_register_name
1303
1304 If we find one, or if it looks sufficiently like one that we want to
1305 handle any error here, return TRUE. Otherwise return FALSE. */
1306
1307 static bfd_boolean
1308 create_register_alias (char *newname, char *p)
1309 {
1310 const reg_entry *old;
1311 char *oldname, *nbuf;
1312 size_t nlen;
1313
1314 /* The input scrubber ensures that whitespace after the mnemonic is
1315 collapsed to single spaces. */
1316 oldname = p;
1317 if (strncmp (oldname, " .req ", 6) != 0)
1318 return FALSE;
1319
1320 oldname += 6;
1321 if (*oldname == '\0')
1322 return FALSE;
1323
1324 old = hash_find (aarch64_reg_hsh, oldname);
1325 if (!old)
1326 {
1327 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1328 return TRUE;
1329 }
1330
1331 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1332 the desired alias name, and p points to its end. If not, then
1333 the desired alias name is in the global original_case_string. */
1334 #ifdef TC_CASE_SENSITIVE
1335 nlen = p - newname;
1336 #else
1337 newname = original_case_string;
1338 nlen = strlen (newname);
1339 #endif
1340
1341 nbuf = xmemdup0 (newname, nlen);
1342
1343 /* Create aliases under the new name as stated; an all-lowercase
1344 version of the new name; and an all-uppercase version of the new
1345 name. */
1346 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1347 {
1348 for (p = nbuf; *p; p++)
1349 *p = TOUPPER (*p);
1350
1351 if (strncmp (nbuf, newname, nlen))
1352 {
1353 /* If this attempt to create an additional alias fails, do not bother
1354 trying to create the all-lower case alias. We will fail and issue
1355 a second, duplicate error message. This situation arises when the
1356 programmer does something like:
1357 foo .req r0
1358 Foo .req r1
1359 The second .req creates the "Foo" alias but then fails to create
1360 the artificial FOO alias because it has already been created by the
1361 first .req. */
1362 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1363 {
1364 free (nbuf);
1365 return TRUE;
1366 }
1367 }
1368
1369 for (p = nbuf; *p; p++)
1370 *p = TOLOWER (*p);
1371
1372 if (strncmp (nbuf, newname, nlen))
1373 insert_reg_alias (nbuf, old->number, old->type);
1374 }
1375
1376 free (nbuf);
1377 return TRUE;
1378 }
1379
1380 /* Should never be called, as .req goes between the alias and the
1381 register name, not at the beginning of the line. */
1382 static void
1383 s_req (int a ATTRIBUTE_UNUSED)
1384 {
1385 as_bad (_("invalid syntax for .req directive"));
1386 }
1387
1388 /* The .unreq directive deletes an alias which was previously defined
1389 by .req. For example:
1390
1391 my_alias .req r11
1392 .unreq my_alias */
1393
1394 static void
1395 s_unreq (int a ATTRIBUTE_UNUSED)
1396 {
1397 char *name;
1398 char saved_char;
1399
1400 name = input_line_pointer;
1401
1402 while (*input_line_pointer != 0
1403 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1404 ++input_line_pointer;
1405
1406 saved_char = *input_line_pointer;
1407 *input_line_pointer = 0;
1408
1409 if (!*name)
1410 as_bad (_("invalid syntax for .unreq directive"));
1411 else
1412 {
1413 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1414
1415 if (!reg)
1416 as_bad (_("unknown register alias '%s'"), name);
1417 else if (reg->builtin)
1418 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1419 name);
1420 else
1421 {
1422 char *p;
1423 char *nbuf;
1424
1425 hash_delete (aarch64_reg_hsh, name, FALSE);
1426 free ((char *) reg->name);
1427 free (reg);
1428
1429 /* Also locate the all upper case and all lower case versions.
1430 Do not complain if we cannot find one or the other as it
1431 was probably deleted above. */
1432
1433 nbuf = strdup (name);
1434 for (p = nbuf; *p; p++)
1435 *p = TOUPPER (*p);
1436 reg = hash_find (aarch64_reg_hsh, nbuf);
1437 if (reg)
1438 {
1439 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1440 free ((char *) reg->name);
1441 free (reg);
1442 }
1443
1444 for (p = nbuf; *p; p++)
1445 *p = TOLOWER (*p);
1446 reg = hash_find (aarch64_reg_hsh, nbuf);
1447 if (reg)
1448 {
1449 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1450 free ((char *) reg->name);
1451 free (reg);
1452 }
1453
1454 free (nbuf);
1455 }
1456 }
1457
1458 *input_line_pointer = saved_char;
1459 demand_empty_rest_of_line ();
1460 }
1461
1462 /* Directives: Instruction set selection. */
1463
1464 #ifdef OBJ_ELF
1465 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1466 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1467 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1468 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1469
1470 /* Create a new mapping symbol for the transition to STATE. */
1471
1472 static void
1473 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1474 {
1475 symbolS *symbolP;
1476 const char *symname;
1477 int type;
1478
1479 switch (state)
1480 {
1481 case MAP_DATA:
1482 symname = "$d";
1483 type = BSF_NO_FLAGS;
1484 break;
1485 case MAP_INSN:
1486 symname = "$x";
1487 type = BSF_NO_FLAGS;
1488 break;
1489 default:
1490 abort ();
1491 }
1492
1493 symbolP = symbol_new (symname, now_seg, value, frag);
1494 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1495
1496 /* Save the mapping symbols for future reference. Also check that
1497 we do not place two mapping symbols at the same offset within a
1498 frag. We'll handle overlap between frags in
1499 check_mapping_symbols.
1500
1501 If .fill or other data filling directive generates zero sized data,
1502 the mapping symbol for the following code will have the same value
1503 as the one generated for the data filling directive. In this case,
1504 we replace the old symbol with the new one at the same address. */
1505 if (value == 0)
1506 {
1507 if (frag->tc_frag_data.first_map != NULL)
1508 {
1509 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1510 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1511 &symbol_lastP);
1512 }
1513 frag->tc_frag_data.first_map = symbolP;
1514 }
1515 if (frag->tc_frag_data.last_map != NULL)
1516 {
1517 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1518 S_GET_VALUE (symbolP));
1519 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1520 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1521 &symbol_lastP);
1522 }
1523 frag->tc_frag_data.last_map = symbolP;
1524 }
1525
1526 /* We must sometimes convert a region marked as code to data during
1527 code alignment, if an odd number of bytes have to be padded. The
1528 code mapping symbol is pushed to an aligned address. */
1529
1530 static void
1531 insert_data_mapping_symbol (enum mstate state,
1532 valueT value, fragS * frag, offsetT bytes)
1533 {
1534 /* If there was already a mapping symbol, remove it. */
1535 if (frag->tc_frag_data.last_map != NULL
1536 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1537 frag->fr_address + value)
1538 {
1539 symbolS *symp = frag->tc_frag_data.last_map;
1540
1541 if (value == 0)
1542 {
1543 know (frag->tc_frag_data.first_map == symp);
1544 frag->tc_frag_data.first_map = NULL;
1545 }
1546 frag->tc_frag_data.last_map = NULL;
1547 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1548 }
1549
1550 make_mapping_symbol (MAP_DATA, value, frag);
1551 make_mapping_symbol (state, value + bytes, frag);
1552 }
1553
1554 static void mapping_state_2 (enum mstate state, int max_chars);
1555
1556 /* Set the mapping state to STATE. Only call this when about to
1557 emit some STATE bytes to the file. */
1558
1559 void
1560 mapping_state (enum mstate state)
1561 {
1562 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1563
1564 if (state == MAP_INSN)
1565 /* AArch64 instructions require 4-byte alignment. When emitting
1566 instructions into any section, record the appropriate section
1567 alignment. */
1568 record_alignment (now_seg, 2);
1569
1570 if (mapstate == state)
1571 /* The mapping symbol has already been emitted.
1572 There is nothing else to do. */
1573 return;
1574
1575 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1576 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1577 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1578 evaluated later in the next else. */
1579 return;
1580 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1581 {
1582 /* Only add the symbol if the offset is > 0:
1583 if we're at the first frag, check it's size > 0;
1584 if we're not at the first frag, then for sure
1585 the offset is > 0. */
1586 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1587 const int add_symbol = (frag_now != frag_first)
1588 || (frag_now_fix () > 0);
1589
1590 if (add_symbol)
1591 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1592 }
1593 #undef TRANSITION
1594
1595 mapping_state_2 (state, 0);
1596 }
1597
1598 /* Same as mapping_state, but MAX_CHARS bytes have already been
1599 allocated. Put the mapping symbol that far back. */
1600
1601 static void
1602 mapping_state_2 (enum mstate state, int max_chars)
1603 {
1604 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1605
1606 if (!SEG_NORMAL (now_seg))
1607 return;
1608
1609 if (mapstate == state)
1610 /* The mapping symbol has already been emitted.
1611 There is nothing else to do. */
1612 return;
1613
1614 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1615 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1616 }
1617 #else
1618 #define mapping_state(x) /* nothing */
1619 #define mapping_state_2(x, y) /* nothing */
1620 #endif
1621
1622 /* Directives: sectioning and alignment. */
1623
1624 static void
1625 s_bss (int ignore ATTRIBUTE_UNUSED)
1626 {
1627 /* We don't support putting frags in the BSS segment, we fake it by
1628 marking in_bss, then looking at s_skip for clues. */
1629 subseg_set (bss_section, 0);
1630 demand_empty_rest_of_line ();
1631 mapping_state (MAP_DATA);
1632 }
1633
1634 static void
1635 s_even (int ignore ATTRIBUTE_UNUSED)
1636 {
1637 /* Never make frag if expect extra pass. */
1638 if (!need_pass_2)
1639 frag_align (1, 0, 0);
1640
1641 record_alignment (now_seg, 1);
1642
1643 demand_empty_rest_of_line ();
1644 }
1645
1646 /* Directives: Literal pools. */
1647
1648 static literal_pool *
1649 find_literal_pool (int size)
1650 {
1651 literal_pool *pool;
1652
1653 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1654 {
1655 if (pool->section == now_seg
1656 && pool->sub_section == now_subseg && pool->size == size)
1657 break;
1658 }
1659
1660 return pool;
1661 }
1662
1663 static literal_pool *
1664 find_or_make_literal_pool (int size)
1665 {
1666 /* Next literal pool ID number. */
1667 static unsigned int latest_pool_num = 1;
1668 literal_pool *pool;
1669
1670 pool = find_literal_pool (size);
1671
1672 if (pool == NULL)
1673 {
1674 /* Create a new pool. */
1675 pool = XNEW (literal_pool);
1676 if (!pool)
1677 return NULL;
1678
1679 /* Currently we always put the literal pool in the current text
1680 section. If we were generating "small" model code where we
1681 knew that all code and initialised data was within 1MB then
1682 we could output literals to mergeable, read-only data
1683 sections. */
1684
1685 pool->next_free_entry = 0;
1686 pool->section = now_seg;
1687 pool->sub_section = now_subseg;
1688 pool->size = size;
1689 pool->next = list_of_pools;
1690 pool->symbol = NULL;
1691
1692 /* Add it to the list. */
1693 list_of_pools = pool;
1694 }
1695
1696 /* New pools, and emptied pools, will have a NULL symbol. */
1697 if (pool->symbol == NULL)
1698 {
1699 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1700 (valueT) 0, &zero_address_frag);
1701 pool->id = latest_pool_num++;
1702 }
1703
1704 /* Done. */
1705 return pool;
1706 }
1707
1708 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1709 Return TRUE on success, otherwise return FALSE. */
1710 static bfd_boolean
1711 add_to_lit_pool (expressionS *exp, int size)
1712 {
1713 literal_pool *pool;
1714 unsigned int entry;
1715
1716 pool = find_or_make_literal_pool (size);
1717
1718 /* Check if this literal value is already in the pool. */
1719 for (entry = 0; entry < pool->next_free_entry; entry++)
1720 {
1721 expressionS * litexp = & pool->literals[entry].exp;
1722
1723 if ((litexp->X_op == exp->X_op)
1724 && (exp->X_op == O_constant)
1725 && (litexp->X_add_number == exp->X_add_number)
1726 && (litexp->X_unsigned == exp->X_unsigned))
1727 break;
1728
1729 if ((litexp->X_op == exp->X_op)
1730 && (exp->X_op == O_symbol)
1731 && (litexp->X_add_number == exp->X_add_number)
1732 && (litexp->X_add_symbol == exp->X_add_symbol)
1733 && (litexp->X_op_symbol == exp->X_op_symbol))
1734 break;
1735 }
1736
1737 /* Do we need to create a new entry? */
1738 if (entry == pool->next_free_entry)
1739 {
1740 if (entry >= MAX_LITERAL_POOL_SIZE)
1741 {
1742 set_syntax_error (_("literal pool overflow"));
1743 return FALSE;
1744 }
1745
1746 pool->literals[entry].exp = *exp;
1747 pool->next_free_entry += 1;
1748 if (exp->X_op == O_big)
1749 {
1750 /* PR 16688: Bignums are held in a single global array. We must
1751 copy and preserve that value now, before it is overwritten. */
1752 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1753 exp->X_add_number);
1754 memcpy (pool->literals[entry].bignum, generic_bignum,
1755 CHARS_PER_LITTLENUM * exp->X_add_number);
1756 }
1757 else
1758 pool->literals[entry].bignum = NULL;
1759 }
1760
1761 exp->X_op = O_symbol;
1762 exp->X_add_number = ((int) entry) * size;
1763 exp->X_add_symbol = pool->symbol;
1764
1765 return TRUE;
1766 }
1767
1768 /* Can't use symbol_new here, so have to create a symbol and then at
1769 a later date assign it a value. That's what these functions do. */
1770
1771 static void
1772 symbol_locate (symbolS * symbolP,
1773 const char *name,/* It is copied, the caller can modify. */
1774 segT segment, /* Segment identifier (SEG_<something>). */
1775 valueT valu, /* Symbol value. */
1776 fragS * frag) /* Associated fragment. */
1777 {
1778 size_t name_length;
1779 char *preserved_copy_of_name;
1780
1781 name_length = strlen (name) + 1; /* +1 for \0. */
1782 obstack_grow (&notes, name, name_length);
1783 preserved_copy_of_name = obstack_finish (&notes);
1784
1785 #ifdef tc_canonicalize_symbol_name
1786 preserved_copy_of_name =
1787 tc_canonicalize_symbol_name (preserved_copy_of_name);
1788 #endif
1789
1790 S_SET_NAME (symbolP, preserved_copy_of_name);
1791
1792 S_SET_SEGMENT (symbolP, segment);
1793 S_SET_VALUE (symbolP, valu);
1794 symbol_clear_list_pointers (symbolP);
1795
1796 symbol_set_frag (symbolP, frag);
1797
1798 /* Link to end of symbol chain. */
1799 {
1800 extern int symbol_table_frozen;
1801
1802 if (symbol_table_frozen)
1803 abort ();
1804 }
1805
1806 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1807
1808 obj_symbol_new_hook (symbolP);
1809
1810 #ifdef tc_symbol_new_hook
1811 tc_symbol_new_hook (symbolP);
1812 #endif
1813
1814 #ifdef DEBUG_SYMS
1815 verify_symbol_chain (symbol_rootP, symbol_lastP);
1816 #endif /* DEBUG_SYMS */
1817 }
1818
1819
1820 static void
1821 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1822 {
1823 unsigned int entry;
1824 literal_pool *pool;
1825 char sym_name[20];
1826 int align;
1827
1828 for (align = 2; align <= 4; align++)
1829 {
1830 int size = 1 << align;
1831
1832 pool = find_literal_pool (size);
1833 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1834 continue;
1835
1836 /* Align pool as you have word accesses.
1837 Only make a frag if we have to. */
1838 if (!need_pass_2)
1839 frag_align (align, 0, 0);
1840
1841 mapping_state (MAP_DATA);
1842
1843 record_alignment (now_seg, align);
1844
1845 sprintf (sym_name, "$$lit_\002%x", pool->id);
1846
1847 symbol_locate (pool->symbol, sym_name, now_seg,
1848 (valueT) frag_now_fix (), frag_now);
1849 symbol_table_insert (pool->symbol);
1850
1851 for (entry = 0; entry < pool->next_free_entry; entry++)
1852 {
1853 expressionS * exp = & pool->literals[entry].exp;
1854
1855 if (exp->X_op == O_big)
1856 {
1857 /* PR 16688: Restore the global bignum value. */
1858 gas_assert (pool->literals[entry].bignum != NULL);
1859 memcpy (generic_bignum, pool->literals[entry].bignum,
1860 CHARS_PER_LITTLENUM * exp->X_add_number);
1861 }
1862
1863 /* First output the expression in the instruction to the pool. */
1864 emit_expr (exp, size); /* .word|.xword */
1865
1866 if (exp->X_op == O_big)
1867 {
1868 free (pool->literals[entry].bignum);
1869 pool->literals[entry].bignum = NULL;
1870 }
1871 }
1872
1873 /* Mark the pool as empty. */
1874 pool->next_free_entry = 0;
1875 pool->symbol = NULL;
1876 }
1877 }
1878
1879 #ifdef OBJ_ELF
1880 /* Forward declarations for functions below, in the MD interface
1881 section. */
1882 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1883 static struct reloc_table_entry * find_reloc_table_entry (char **);
1884
1885 /* Directives: Data. */
1886 /* N.B. the support for relocation suffix in this directive needs to be
1887 implemented properly. */
1888
1889 static void
1890 s_aarch64_elf_cons (int nbytes)
1891 {
1892 expressionS exp;
1893
1894 #ifdef md_flush_pending_output
1895 md_flush_pending_output ();
1896 #endif
1897
1898 if (is_it_end_of_statement ())
1899 {
1900 demand_empty_rest_of_line ();
1901 return;
1902 }
1903
1904 #ifdef md_cons_align
1905 md_cons_align (nbytes);
1906 #endif
1907
1908 mapping_state (MAP_DATA);
1909 do
1910 {
1911 struct reloc_table_entry *reloc;
1912
1913 expression (&exp);
1914
1915 if (exp.X_op != O_symbol)
1916 emit_expr (&exp, (unsigned int) nbytes);
1917 else
1918 {
1919 skip_past_char (&input_line_pointer, '#');
1920 if (skip_past_char (&input_line_pointer, ':'))
1921 {
1922 reloc = find_reloc_table_entry (&input_line_pointer);
1923 if (reloc == NULL)
1924 as_bad (_("unrecognized relocation suffix"));
1925 else
1926 as_bad (_("unimplemented relocation suffix"));
1927 ignore_rest_of_line ();
1928 return;
1929 }
1930 else
1931 emit_expr (&exp, (unsigned int) nbytes);
1932 }
1933 }
1934 while (*input_line_pointer++ == ',');
1935
1936 /* Put terminator back into stream. */
1937 input_line_pointer--;
1938 demand_empty_rest_of_line ();
1939 }
1940
1941 #endif /* OBJ_ELF */
1942
1943 /* Output a 32-bit word, but mark as an instruction. */
1944
1945 static void
1946 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1947 {
1948 expressionS exp;
1949
1950 #ifdef md_flush_pending_output
1951 md_flush_pending_output ();
1952 #endif
1953
1954 if (is_it_end_of_statement ())
1955 {
1956 demand_empty_rest_of_line ();
1957 return;
1958 }
1959
1960 /* Sections are assumed to start aligned. In executable section, there is no
1961 MAP_DATA symbol pending. So we only align the address during
1962 MAP_DATA --> MAP_INSN transition.
1963 For other sections, this is not guaranteed. */
1964 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1965 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1966 frag_align_code (2, 0);
1967
1968 #ifdef OBJ_ELF
1969 mapping_state (MAP_INSN);
1970 #endif
1971
1972 do
1973 {
1974 expression (&exp);
1975 if (exp.X_op != O_constant)
1976 {
1977 as_bad (_("constant expression required"));
1978 ignore_rest_of_line ();
1979 return;
1980 }
1981
1982 if (target_big_endian)
1983 {
1984 unsigned int val = exp.X_add_number;
1985 exp.X_add_number = SWAP_32 (val);
1986 }
1987 emit_expr (&exp, 4);
1988 }
1989 while (*input_line_pointer++ == ',');
1990
1991 /* Put terminator back into stream. */
1992 input_line_pointer--;
1993 demand_empty_rest_of_line ();
1994 }
1995
1996 static void
1997 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
1998 {
1999 demand_empty_rest_of_line ();
2000 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2001 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2002 }
2003
2004 #ifdef OBJ_ELF
2005 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2006
2007 static void
2008 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2009 {
2010 expressionS exp;
2011
2012 expression (&exp);
2013 frag_grow (4);
2014 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2015 BFD_RELOC_AARCH64_TLSDESC_ADD);
2016
2017 demand_empty_rest_of_line ();
2018 }
2019
2020 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2021
2022 static void
2023 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2024 {
2025 expressionS exp;
2026
2027 /* Since we're just labelling the code, there's no need to define a
2028 mapping symbol. */
2029 expression (&exp);
2030 /* Make sure there is enough room in this frag for the following
2031 blr. This trick only works if the blr follows immediately after
2032 the .tlsdesc directive. */
2033 frag_grow (4);
2034 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2035 BFD_RELOC_AARCH64_TLSDESC_CALL);
2036
2037 demand_empty_rest_of_line ();
2038 }
2039
2040 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2041
2042 static void
2043 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2044 {
2045 expressionS exp;
2046
2047 expression (&exp);
2048 frag_grow (4);
2049 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2050 BFD_RELOC_AARCH64_TLSDESC_LDR);
2051
2052 demand_empty_rest_of_line ();
2053 }
2054 #endif /* OBJ_ELF */
2055
2056 static void s_aarch64_arch (int);
2057 static void s_aarch64_cpu (int);
2058 static void s_aarch64_arch_extension (int);
2059
2060 /* This table describes all the machine specific pseudo-ops the assembler
2061 has to support. The fields are:
2062 pseudo-op name without dot
2063 function to call to execute this pseudo-op
2064 Integer arg to pass to the function. */
2065
2066 const pseudo_typeS md_pseudo_table[] = {
2067 /* Never called because '.req' does not start a line. */
2068 {"req", s_req, 0},
2069 {"unreq", s_unreq, 0},
2070 {"bss", s_bss, 0},
2071 {"even", s_even, 0},
2072 {"ltorg", s_ltorg, 0},
2073 {"pool", s_ltorg, 0},
2074 {"cpu", s_aarch64_cpu, 0},
2075 {"arch", s_aarch64_arch, 0},
2076 {"arch_extension", s_aarch64_arch_extension, 0},
2077 {"inst", s_aarch64_inst, 0},
2078 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2079 #ifdef OBJ_ELF
2080 {"tlsdescadd", s_tlsdescadd, 0},
2081 {"tlsdesccall", s_tlsdesccall, 0},
2082 {"tlsdescldr", s_tlsdescldr, 0},
2083 {"word", s_aarch64_elf_cons, 4},
2084 {"long", s_aarch64_elf_cons, 4},
2085 {"xword", s_aarch64_elf_cons, 8},
2086 {"dword", s_aarch64_elf_cons, 8},
2087 #endif
2088 {0, 0, 0}
2089 };
2090 \f
2091
2092 /* Check whether STR points to a register name followed by a comma or the
2093 end of line; REG_TYPE indicates which register types are checked
2094 against. Return TRUE if STR is such a register name; otherwise return
2095 FALSE. The function does not intend to produce any diagnostics, but since
2096 the register parser aarch64_reg_parse, which is called by this function,
2097 does produce diagnostics, we call clear_error to clear any diagnostics
2098 that may be generated by aarch64_reg_parse.
2099 Also, the function returns FALSE directly if there is any user error
2100 present at the function entry. This prevents the existing diagnostics
2101 state from being spoiled.
2102 The function currently serves parse_constant_immediate and
2103 parse_big_immediate only. */
2104 static bfd_boolean
2105 reg_name_p (char *str, aarch64_reg_type reg_type)
2106 {
2107 int reg;
2108
2109 /* Prevent the diagnostics state from being spoiled. */
2110 if (error_p ())
2111 return FALSE;
2112
2113 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2114
2115 /* Clear the parsing error that may be set by the reg parser. */
2116 clear_error ();
2117
2118 if (reg == PARSE_FAIL)
2119 return FALSE;
2120
2121 skip_whitespace (str);
2122 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2123 return TRUE;
2124
2125 return FALSE;
2126 }
2127
2128 /* Parser functions used exclusively in instruction operands. */
2129
2130 /* Parse an immediate expression which may not be constant.
2131
2132 To prevent the expression parser from pushing a register name
2133 into the symbol table as an undefined symbol, firstly a check is
2134 done to find out whether STR is a register of type REG_TYPE followed
2135 by a comma or the end of line. Return FALSE if STR is such a string. */
2136
2137 static bfd_boolean
2138 parse_immediate_expression (char **str, expressionS *exp,
2139 aarch64_reg_type reg_type)
2140 {
2141 if (reg_name_p (*str, reg_type))
2142 {
2143 set_recoverable_error (_("immediate operand required"));
2144 return FALSE;
2145 }
2146
2147 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2148
2149 if (exp->X_op == O_absent)
2150 {
2151 set_fatal_syntax_error (_("missing immediate expression"));
2152 return FALSE;
2153 }
2154
2155 return TRUE;
2156 }
2157
2158 /* Constant immediate-value read function for use in insn parsing.
2159 STR points to the beginning of the immediate (with the optional
2160 leading #); *VAL receives the value. REG_TYPE says which register
2161 names should be treated as registers rather than as symbolic immediates.
2162
2163 Return TRUE on success; otherwise return FALSE. */
2164
2165 static bfd_boolean
2166 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2167 {
2168 expressionS exp;
2169
2170 if (! parse_immediate_expression (str, &exp, reg_type))
2171 return FALSE;
2172
2173 if (exp.X_op != O_constant)
2174 {
2175 set_syntax_error (_("constant expression required"));
2176 return FALSE;
2177 }
2178
2179 *val = exp.X_add_number;
2180 return TRUE;
2181 }
2182
2183 static uint32_t
2184 encode_imm_float_bits (uint32_t imm)
2185 {
2186 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2187 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2188 }
2189
2190 /* Return TRUE if the single-precision floating-point value encoded in IMM
2191 can be expressed in the AArch64 8-bit signed floating-point format with
2192 3-bit exponent and normalized 4 bits of precision; in other words, the
2193 floating-point value must be expressable as
2194 (+/-) n / 16 * power (2, r)
2195 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2196
2197 static bfd_boolean
2198 aarch64_imm_float_p (uint32_t imm)
2199 {
2200 /* If a single-precision floating-point value has the following bit
2201 pattern, it can be expressed in the AArch64 8-bit floating-point
2202 format:
2203
2204 3 32222222 2221111111111
2205 1 09876543 21098765432109876543210
2206 n Eeeeeexx xxxx0000000000000000000
2207
2208 where n, e and each x are either 0 or 1 independently, with
2209 E == ~ e. */
2210
2211 uint32_t pattern;
2212
2213 /* Prepare the pattern for 'Eeeeee'. */
2214 if (((imm >> 30) & 0x1) == 0)
2215 pattern = 0x3e000000;
2216 else
2217 pattern = 0x40000000;
2218
2219 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2220 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2221 }
2222
2223 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2224 as an IEEE float without any loss of precision. Store the value in
2225 *FPWORD if so. */
2226
2227 static bfd_boolean
2228 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2229 {
2230 /* If a double-precision floating-point value has the following bit
2231 pattern, it can be expressed in a float:
2232
2233 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2234 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2235 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2236
2237 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2238 if Eeee_eeee != 1111_1111
2239
2240 where n, e, s and S are either 0 or 1 independently and where ~ is the
2241 inverse of E. */
2242
2243 uint32_t pattern;
2244 uint32_t high32 = imm >> 32;
2245 uint32_t low32 = imm;
2246
2247 /* Lower 29 bits need to be 0s. */
2248 if ((imm & 0x1fffffff) != 0)
2249 return FALSE;
2250
2251 /* Prepare the pattern for 'Eeeeeeeee'. */
2252 if (((high32 >> 30) & 0x1) == 0)
2253 pattern = 0x38000000;
2254 else
2255 pattern = 0x40000000;
2256
2257 /* Check E~~~. */
2258 if ((high32 & 0x78000000) != pattern)
2259 return FALSE;
2260
2261 /* Check Eeee_eeee != 1111_1111. */
2262 if ((high32 & 0x7ff00000) == 0x47f00000)
2263 return FALSE;
2264
2265 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2266 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2267 | (low32 >> 29)); /* 3 S bits. */
2268 return TRUE;
2269 }
2270
2271 /* Return true if we should treat OPERAND as a double-precision
2272 floating-point operand rather than a single-precision one. */
2273 static bfd_boolean
2274 double_precision_operand_p (const aarch64_opnd_info *operand)
2275 {
2276 /* Check for unsuffixed SVE registers, which are allowed
2277 for LDR and STR but not in instructions that require an
2278 immediate. We get better error messages if we arbitrarily
2279 pick one size, parse the immediate normally, and then
2280 report the match failure in the normal way. */
2281 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2282 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2283 }
2284
2285 /* Parse a floating-point immediate. Return TRUE on success and return the
2286 value in *IMMED in the format of IEEE754 single-precision encoding.
2287 *CCP points to the start of the string; DP_P is TRUE when the immediate
2288 is expected to be in double-precision (N.B. this only matters when
2289 hexadecimal representation is involved). REG_TYPE says which register
2290 names should be treated as registers rather than as symbolic immediates.
2291
2292 This routine accepts any IEEE float; it is up to the callers to reject
2293 invalid ones. */
2294
2295 static bfd_boolean
2296 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2297 aarch64_reg_type reg_type)
2298 {
2299 char *str = *ccp;
2300 char *fpnum;
2301 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2302 int64_t val = 0;
2303 unsigned fpword = 0;
2304 bfd_boolean hex_p = FALSE;
2305
2306 skip_past_char (&str, '#');
2307
2308 fpnum = str;
2309 skip_whitespace (fpnum);
2310
2311 if (strncmp (fpnum, "0x", 2) == 0)
2312 {
2313 /* Support the hexadecimal representation of the IEEE754 encoding.
2314 Double-precision is expected when DP_P is TRUE, otherwise the
2315 representation should be in single-precision. */
2316 if (! parse_constant_immediate (&str, &val, reg_type))
2317 goto invalid_fp;
2318
2319 if (dp_p)
2320 {
2321 if (!can_convert_double_to_float (val, &fpword))
2322 goto invalid_fp;
2323 }
2324 else if ((uint64_t) val > 0xffffffff)
2325 goto invalid_fp;
2326 else
2327 fpword = val;
2328
2329 hex_p = TRUE;
2330 }
2331 else if (reg_name_p (str, reg_type))
2332 {
2333 set_recoverable_error (_("immediate operand required"));
2334 return FALSE;
2335 }
2336
2337 if (! hex_p)
2338 {
2339 int i;
2340
2341 if ((str = atof_ieee (str, 's', words)) == NULL)
2342 goto invalid_fp;
2343
2344 /* Our FP word must be 32 bits (single-precision FP). */
2345 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2346 {
2347 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2348 fpword |= words[i];
2349 }
2350 }
2351
2352 *immed = fpword;
2353 *ccp = str;
2354 return TRUE;
2355
2356 invalid_fp:
2357 set_fatal_syntax_error (_("invalid floating-point constant"));
2358 return FALSE;
2359 }
2360
2361 /* Less-generic immediate-value read function with the possibility of loading
2362 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2363 instructions.
2364
2365 To prevent the expression parser from pushing a register name into the
2366 symbol table as an undefined symbol, a check is firstly done to find
2367 out whether STR is a register of type REG_TYPE followed by a comma or
2368 the end of line. Return FALSE if STR is such a register. */
2369
2370 static bfd_boolean
2371 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2372 {
2373 char *ptr = *str;
2374
2375 if (reg_name_p (ptr, reg_type))
2376 {
2377 set_syntax_error (_("immediate operand required"));
2378 return FALSE;
2379 }
2380
2381 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2382
2383 if (inst.reloc.exp.X_op == O_constant)
2384 *imm = inst.reloc.exp.X_add_number;
2385
2386 *str = ptr;
2387
2388 return TRUE;
2389 }
2390
2391 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2392 if NEED_LIBOPCODES is non-zero, the fixup will need
2393 assistance from the libopcodes. */
2394
2395 static inline void
2396 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2397 const aarch64_opnd_info *operand,
2398 int need_libopcodes_p)
2399 {
2400 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2401 reloc->opnd = operand->type;
2402 if (need_libopcodes_p)
2403 reloc->need_libopcodes_p = 1;
2404 };
2405
2406 /* Return TRUE if the instruction needs to be fixed up later internally by
2407 the GAS; otherwise return FALSE. */
2408
2409 static inline bfd_boolean
2410 aarch64_gas_internal_fixup_p (void)
2411 {
2412 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2413 }
2414
2415 /* Assign the immediate value to the relevant field in *OPERAND if
2416 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2417 needs an internal fixup in a later stage.
2418 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2419 IMM.VALUE that may get assigned with the constant. */
2420 static inline void
2421 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2422 aarch64_opnd_info *operand,
2423 int addr_off_p,
2424 int need_libopcodes_p,
2425 int skip_p)
2426 {
2427 if (reloc->exp.X_op == O_constant)
2428 {
2429 if (addr_off_p)
2430 operand->addr.offset.imm = reloc->exp.X_add_number;
2431 else
2432 operand->imm.value = reloc->exp.X_add_number;
2433 reloc->type = BFD_RELOC_UNUSED;
2434 }
2435 else
2436 {
2437 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2438 /* Tell libopcodes to ignore this operand or not. This is helpful
2439 when one of the operands needs to be fixed up later but we need
2440 libopcodes to check the other operands. */
2441 operand->skip = skip_p;
2442 }
2443 }
2444
2445 /* Relocation modifiers. Each entry in the table contains the textual
2446 name for the relocation which may be placed before a symbol used as
2447 a load/store offset, or add immediate. It must be surrounded by a
2448 leading and trailing colon, for example:
2449
2450 ldr x0, [x1, #:rello:varsym]
2451 add x0, x1, #:rello:varsym */
2452
2453 struct reloc_table_entry
2454 {
2455 const char *name;
2456 int pc_rel;
2457 bfd_reloc_code_real_type adr_type;
2458 bfd_reloc_code_real_type adrp_type;
2459 bfd_reloc_code_real_type movw_type;
2460 bfd_reloc_code_real_type add_type;
2461 bfd_reloc_code_real_type ldst_type;
2462 bfd_reloc_code_real_type ld_literal_type;
2463 };
2464
2465 static struct reloc_table_entry reloc_table[] = {
2466 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2467 {"lo12", 0,
2468 0, /* adr_type */
2469 0,
2470 0,
2471 BFD_RELOC_AARCH64_ADD_LO12,
2472 BFD_RELOC_AARCH64_LDST_LO12,
2473 0},
2474
2475 /* Higher 21 bits of pc-relative page offset: ADRP */
2476 {"pg_hi21", 1,
2477 0, /* adr_type */
2478 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2479 0,
2480 0,
2481 0,
2482 0},
2483
2484 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2485 {"pg_hi21_nc", 1,
2486 0, /* adr_type */
2487 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2488 0,
2489 0,
2490 0,
2491 0},
2492
2493 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2494 {"abs_g0", 0,
2495 0, /* adr_type */
2496 0,
2497 BFD_RELOC_AARCH64_MOVW_G0,
2498 0,
2499 0,
2500 0},
2501
2502 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2503 {"abs_g0_s", 0,
2504 0, /* adr_type */
2505 0,
2506 BFD_RELOC_AARCH64_MOVW_G0_S,
2507 0,
2508 0,
2509 0},
2510
2511 /* Less significant bits 0-15 of address/value: MOVK, no check */
2512 {"abs_g0_nc", 0,
2513 0, /* adr_type */
2514 0,
2515 BFD_RELOC_AARCH64_MOVW_G0_NC,
2516 0,
2517 0,
2518 0},
2519
2520 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2521 {"abs_g1", 0,
2522 0, /* adr_type */
2523 0,
2524 BFD_RELOC_AARCH64_MOVW_G1,
2525 0,
2526 0,
2527 0},
2528
2529 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2530 {"abs_g1_s", 0,
2531 0, /* adr_type */
2532 0,
2533 BFD_RELOC_AARCH64_MOVW_G1_S,
2534 0,
2535 0,
2536 0},
2537
2538 /* Less significant bits 16-31 of address/value: MOVK, no check */
2539 {"abs_g1_nc", 0,
2540 0, /* adr_type */
2541 0,
2542 BFD_RELOC_AARCH64_MOVW_G1_NC,
2543 0,
2544 0,
2545 0},
2546
2547 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2548 {"abs_g2", 0,
2549 0, /* adr_type */
2550 0,
2551 BFD_RELOC_AARCH64_MOVW_G2,
2552 0,
2553 0,
2554 0},
2555
2556 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2557 {"abs_g2_s", 0,
2558 0, /* adr_type */
2559 0,
2560 BFD_RELOC_AARCH64_MOVW_G2_S,
2561 0,
2562 0,
2563 0},
2564
2565 /* Less significant bits 32-47 of address/value: MOVK, no check */
2566 {"abs_g2_nc", 0,
2567 0, /* adr_type */
2568 0,
2569 BFD_RELOC_AARCH64_MOVW_G2_NC,
2570 0,
2571 0,
2572 0},
2573
2574 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2575 {"abs_g3", 0,
2576 0, /* adr_type */
2577 0,
2578 BFD_RELOC_AARCH64_MOVW_G3,
2579 0,
2580 0,
2581 0},
2582
2583 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2584 {"prel_g0", 1,
2585 0, /* adr_type */
2586 0,
2587 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2588 0,
2589 0,
2590 0},
2591
2592 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2593 {"prel_g0_nc", 1,
2594 0, /* adr_type */
2595 0,
2596 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2597 0,
2598 0,
2599 0},
2600
2601 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2602 {"prel_g1", 1,
2603 0, /* adr_type */
2604 0,
2605 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2606 0,
2607 0,
2608 0},
2609
2610 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2611 {"prel_g1_nc", 1,
2612 0, /* adr_type */
2613 0,
2614 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2615 0,
2616 0,
2617 0},
2618
2619 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2620 {"prel_g2", 1,
2621 0, /* adr_type */
2622 0,
2623 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2624 0,
2625 0,
2626 0},
2627
2628 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2629 {"prel_g2_nc", 1,
2630 0, /* adr_type */
2631 0,
2632 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2633 0,
2634 0,
2635 0},
2636
2637 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2638 {"prel_g3", 1,
2639 0, /* adr_type */
2640 0,
2641 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2642 0,
2643 0,
2644 0},
2645
2646 /* Get to the page containing GOT entry for a symbol. */
2647 {"got", 1,
2648 0, /* adr_type */
2649 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2650 0,
2651 0,
2652 0,
2653 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2654
2655 /* 12 bit offset into the page containing GOT entry for that symbol. */
2656 {"got_lo12", 0,
2657 0, /* adr_type */
2658 0,
2659 0,
2660 0,
2661 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2662 0},
2663
2664 /* 0-15 bits of address/value: MOVk, no check. */
2665 {"gotoff_g0_nc", 0,
2666 0, /* adr_type */
2667 0,
2668 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2669 0,
2670 0,
2671 0},
2672
2673 /* Most significant bits 16-31 of address/value: MOVZ. */
2674 {"gotoff_g1", 0,
2675 0, /* adr_type */
2676 0,
2677 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2678 0,
2679 0,
2680 0},
2681
2682 /* 15 bit offset into the page containing GOT entry for that symbol. */
2683 {"gotoff_lo15", 0,
2684 0, /* adr_type */
2685 0,
2686 0,
2687 0,
2688 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2689 0},
2690
2691 /* Get to the page containing GOT TLS entry for a symbol */
2692 {"gottprel_g0_nc", 0,
2693 0, /* adr_type */
2694 0,
2695 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2696 0,
2697 0,
2698 0},
2699
2700 /* Get to the page containing GOT TLS entry for a symbol */
2701 {"gottprel_g1", 0,
2702 0, /* adr_type */
2703 0,
2704 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2705 0,
2706 0,
2707 0},
2708
2709 /* Get to the page containing GOT TLS entry for a symbol */
2710 {"tlsgd", 0,
2711 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2712 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2713 0,
2714 0,
2715 0,
2716 0},
2717
2718 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2719 {"tlsgd_lo12", 0,
2720 0, /* adr_type */
2721 0,
2722 0,
2723 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2724 0,
2725 0},
2726
2727 /* Lower 16 bits address/value: MOVk. */
2728 {"tlsgd_g0_nc", 0,
2729 0, /* adr_type */
2730 0,
2731 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2732 0,
2733 0,
2734 0},
2735
2736 /* Most significant bits 16-31 of address/value: MOVZ. */
2737 {"tlsgd_g1", 0,
2738 0, /* adr_type */
2739 0,
2740 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2741 0,
2742 0,
2743 0},
2744
2745 /* Get to the page containing GOT TLS entry for a symbol */
2746 {"tlsdesc", 0,
2747 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2748 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2749 0,
2750 0,
2751 0,
2752 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2753
2754 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2755 {"tlsdesc_lo12", 0,
2756 0, /* adr_type */
2757 0,
2758 0,
2759 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2760 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2761 0},
2762
2763 /* Get to the page containing GOT TLS entry for a symbol.
2764 The same as GD, we allocate two consecutive GOT slots
2765 for module index and module offset, the only difference
2766 with GD is the module offset should be initialized to
2767 zero without any outstanding runtime relocation. */
2768 {"tlsldm", 0,
2769 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2770 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2771 0,
2772 0,
2773 0,
2774 0},
2775
2776 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2777 {"tlsldm_lo12_nc", 0,
2778 0, /* adr_type */
2779 0,
2780 0,
2781 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2782 0,
2783 0},
2784
2785 /* 12 bit offset into the module TLS base address. */
2786 {"dtprel_lo12", 0,
2787 0, /* adr_type */
2788 0,
2789 0,
2790 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2791 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2792 0},
2793
2794 /* Same as dtprel_lo12, no overflow check. */
2795 {"dtprel_lo12_nc", 0,
2796 0, /* adr_type */
2797 0,
2798 0,
2799 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2800 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2801 0},
2802
2803 /* bits[23:12] of offset to the module TLS base address. */
2804 {"dtprel_hi12", 0,
2805 0, /* adr_type */
2806 0,
2807 0,
2808 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2809 0,
2810 0},
2811
2812 /* bits[15:0] of offset to the module TLS base address. */
2813 {"dtprel_g0", 0,
2814 0, /* adr_type */
2815 0,
2816 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2817 0,
2818 0,
2819 0},
2820
2821 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2822 {"dtprel_g0_nc", 0,
2823 0, /* adr_type */
2824 0,
2825 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2826 0,
2827 0,
2828 0},
2829
2830 /* bits[31:16] of offset to the module TLS base address. */
2831 {"dtprel_g1", 0,
2832 0, /* adr_type */
2833 0,
2834 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2835 0,
2836 0,
2837 0},
2838
2839 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2840 {"dtprel_g1_nc", 0,
2841 0, /* adr_type */
2842 0,
2843 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2844 0,
2845 0,
2846 0},
2847
2848 /* bits[47:32] of offset to the module TLS base address. */
2849 {"dtprel_g2", 0,
2850 0, /* adr_type */
2851 0,
2852 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2853 0,
2854 0,
2855 0},
2856
2857 /* Lower 16 bit offset into GOT entry for a symbol */
2858 {"tlsdesc_off_g0_nc", 0,
2859 0, /* adr_type */
2860 0,
2861 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2862 0,
2863 0,
2864 0},
2865
2866 /* Higher 16 bit offset into GOT entry for a symbol */
2867 {"tlsdesc_off_g1", 0,
2868 0, /* adr_type */
2869 0,
2870 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2871 0,
2872 0,
2873 0},
2874
2875 /* Get to the page containing GOT TLS entry for a symbol */
2876 {"gottprel", 0,
2877 0, /* adr_type */
2878 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2879 0,
2880 0,
2881 0,
2882 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2883
2884 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2885 {"gottprel_lo12", 0,
2886 0, /* adr_type */
2887 0,
2888 0,
2889 0,
2890 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2891 0},
2892
2893 /* Get tp offset for a symbol. */
2894 {"tprel", 0,
2895 0, /* adr_type */
2896 0,
2897 0,
2898 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2899 0,
2900 0},
2901
2902 /* Get tp offset for a symbol. */
2903 {"tprel_lo12", 0,
2904 0, /* adr_type */
2905 0,
2906 0,
2907 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2908 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2909 0},
2910
2911 /* Get tp offset for a symbol. */
2912 {"tprel_hi12", 0,
2913 0, /* adr_type */
2914 0,
2915 0,
2916 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2917 0,
2918 0},
2919
2920 /* Get tp offset for a symbol. */
2921 {"tprel_lo12_nc", 0,
2922 0, /* adr_type */
2923 0,
2924 0,
2925 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2926 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2927 0},
2928
2929 /* Most significant bits 32-47 of address/value: MOVZ. */
2930 {"tprel_g2", 0,
2931 0, /* adr_type */
2932 0,
2933 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2934 0,
2935 0,
2936 0},
2937
2938 /* Most significant bits 16-31 of address/value: MOVZ. */
2939 {"tprel_g1", 0,
2940 0, /* adr_type */
2941 0,
2942 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2943 0,
2944 0,
2945 0},
2946
2947 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2948 {"tprel_g1_nc", 0,
2949 0, /* adr_type */
2950 0,
2951 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2952 0,
2953 0,
2954 0},
2955
2956 /* Most significant bits 0-15 of address/value: MOVZ. */
2957 {"tprel_g0", 0,
2958 0, /* adr_type */
2959 0,
2960 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2961 0,
2962 0,
2963 0},
2964
2965 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2966 {"tprel_g0_nc", 0,
2967 0, /* adr_type */
2968 0,
2969 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2970 0,
2971 0,
2972 0},
2973
2974 /* 15bit offset from got entry to base address of GOT table. */
2975 {"gotpage_lo15", 0,
2976 0,
2977 0,
2978 0,
2979 0,
2980 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2981 0},
2982
2983 /* 14bit offset from got entry to base address of GOT table. */
2984 {"gotpage_lo14", 0,
2985 0,
2986 0,
2987 0,
2988 0,
2989 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2990 0},
2991 };
2992
2993 /* Given the address of a pointer pointing to the textual name of a
2994 relocation as may appear in assembler source, attempt to find its
2995 details in reloc_table. The pointer will be updated to the character
2996 after the trailing colon. On failure, NULL will be returned;
2997 otherwise return the reloc_table_entry. */
2998
2999 static struct reloc_table_entry *
3000 find_reloc_table_entry (char **str)
3001 {
3002 unsigned int i;
3003 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3004 {
3005 int length = strlen (reloc_table[i].name);
3006
3007 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3008 && (*str)[length] == ':')
3009 {
3010 *str += (length + 1);
3011 return &reloc_table[i];
3012 }
3013 }
3014
3015 return NULL;
3016 }
3017
3018 /* Mode argument to parse_shift and parser_shifter_operand. */
3019 enum parse_shift_mode
3020 {
3021 SHIFTED_NONE, /* no shifter allowed */
3022 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3023 "#imm{,lsl #n}" */
3024 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3025 "#imm" */
3026 SHIFTED_LSL, /* bare "lsl #n" */
3027 SHIFTED_MUL, /* bare "mul #n" */
3028 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3029 SHIFTED_MUL_VL, /* "mul vl" */
3030 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3031 };
3032
3033 /* Parse a <shift> operator on an AArch64 data processing instruction.
3034 Return TRUE on success; otherwise return FALSE. */
3035 static bfd_boolean
3036 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3037 {
3038 const struct aarch64_name_value_pair *shift_op;
3039 enum aarch64_modifier_kind kind;
3040 expressionS exp;
3041 int exp_has_prefix;
3042 char *s = *str;
3043 char *p = s;
3044
3045 for (p = *str; ISALPHA (*p); p++)
3046 ;
3047
3048 if (p == *str)
3049 {
3050 set_syntax_error (_("shift expression expected"));
3051 return FALSE;
3052 }
3053
3054 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
3055
3056 if (shift_op == NULL)
3057 {
3058 set_syntax_error (_("shift operator expected"));
3059 return FALSE;
3060 }
3061
3062 kind = aarch64_get_operand_modifier (shift_op);
3063
3064 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3065 {
3066 set_syntax_error (_("invalid use of 'MSL'"));
3067 return FALSE;
3068 }
3069
3070 if (kind == AARCH64_MOD_MUL
3071 && mode != SHIFTED_MUL
3072 && mode != SHIFTED_MUL_VL)
3073 {
3074 set_syntax_error (_("invalid use of 'MUL'"));
3075 return FALSE;
3076 }
3077
3078 switch (mode)
3079 {
3080 case SHIFTED_LOGIC_IMM:
3081 if (aarch64_extend_operator_p (kind))
3082 {
3083 set_syntax_error (_("extending shift is not permitted"));
3084 return FALSE;
3085 }
3086 break;
3087
3088 case SHIFTED_ARITH_IMM:
3089 if (kind == AARCH64_MOD_ROR)
3090 {
3091 set_syntax_error (_("'ROR' shift is not permitted"));
3092 return FALSE;
3093 }
3094 break;
3095
3096 case SHIFTED_LSL:
3097 if (kind != AARCH64_MOD_LSL)
3098 {
3099 set_syntax_error (_("only 'LSL' shift is permitted"));
3100 return FALSE;
3101 }
3102 break;
3103
3104 case SHIFTED_MUL:
3105 if (kind != AARCH64_MOD_MUL)
3106 {
3107 set_syntax_error (_("only 'MUL' is permitted"));
3108 return FALSE;
3109 }
3110 break;
3111
3112 case SHIFTED_MUL_VL:
3113 /* "MUL VL" consists of two separate tokens. Require the first
3114 token to be "MUL" and look for a following "VL". */
3115 if (kind == AARCH64_MOD_MUL)
3116 {
3117 skip_whitespace (p);
3118 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3119 {
3120 p += 2;
3121 kind = AARCH64_MOD_MUL_VL;
3122 break;
3123 }
3124 }
3125 set_syntax_error (_("only 'MUL VL' is permitted"));
3126 return FALSE;
3127
3128 case SHIFTED_REG_OFFSET:
3129 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3130 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3131 {
3132 set_fatal_syntax_error
3133 (_("invalid shift for the register offset addressing mode"));
3134 return FALSE;
3135 }
3136 break;
3137
3138 case SHIFTED_LSL_MSL:
3139 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3140 {
3141 set_syntax_error (_("invalid shift operator"));
3142 return FALSE;
3143 }
3144 break;
3145
3146 default:
3147 abort ();
3148 }
3149
3150 /* Whitespace can appear here if the next thing is a bare digit. */
3151 skip_whitespace (p);
3152
3153 /* Parse shift amount. */
3154 exp_has_prefix = 0;
3155 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3156 exp.X_op = O_absent;
3157 else
3158 {
3159 if (is_immediate_prefix (*p))
3160 {
3161 p++;
3162 exp_has_prefix = 1;
3163 }
3164 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3165 }
3166 if (kind == AARCH64_MOD_MUL_VL)
3167 /* For consistency, give MUL VL the same shift amount as an implicit
3168 MUL #1. */
3169 operand->shifter.amount = 1;
3170 else if (exp.X_op == O_absent)
3171 {
3172 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3173 {
3174 set_syntax_error (_("missing shift amount"));
3175 return FALSE;
3176 }
3177 operand->shifter.amount = 0;
3178 }
3179 else if (exp.X_op != O_constant)
3180 {
3181 set_syntax_error (_("constant shift amount required"));
3182 return FALSE;
3183 }
3184 /* For parsing purposes, MUL #n has no inherent range. The range
3185 depends on the operand and will be checked by operand-specific
3186 routines. */
3187 else if (kind != AARCH64_MOD_MUL
3188 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3189 {
3190 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3191 return FALSE;
3192 }
3193 else
3194 {
3195 operand->shifter.amount = exp.X_add_number;
3196 operand->shifter.amount_present = 1;
3197 }
3198
3199 operand->shifter.operator_present = 1;
3200 operand->shifter.kind = kind;
3201
3202 *str = p;
3203 return TRUE;
3204 }
3205
3206 /* Parse a <shifter_operand> for a data processing instruction:
3207
3208 #<immediate>
3209 #<immediate>, LSL #imm
3210
3211 Validation of immediate operands is deferred to md_apply_fix.
3212
3213 Return TRUE on success; otherwise return FALSE. */
3214
3215 static bfd_boolean
3216 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3217 enum parse_shift_mode mode)
3218 {
3219 char *p;
3220
3221 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3222 return FALSE;
3223
3224 p = *str;
3225
3226 /* Accept an immediate expression. */
3227 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3228 return FALSE;
3229
3230 /* Accept optional LSL for arithmetic immediate values. */
3231 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3232 if (! parse_shift (&p, operand, SHIFTED_LSL))
3233 return FALSE;
3234
3235 /* Not accept any shifter for logical immediate values. */
3236 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3237 && parse_shift (&p, operand, mode))
3238 {
3239 set_syntax_error (_("unexpected shift operator"));
3240 return FALSE;
3241 }
3242
3243 *str = p;
3244 return TRUE;
3245 }
3246
3247 /* Parse a <shifter_operand> for a data processing instruction:
3248
3249 <Rm>
3250 <Rm>, <shift>
3251 #<immediate>
3252 #<immediate>, LSL #imm
3253
3254 where <shift> is handled by parse_shift above, and the last two
3255 cases are handled by the function above.
3256
3257 Validation of immediate operands is deferred to md_apply_fix.
3258
3259 Return TRUE on success; otherwise return FALSE. */
3260
3261 static bfd_boolean
3262 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3263 enum parse_shift_mode mode)
3264 {
3265 const reg_entry *reg;
3266 aarch64_opnd_qualifier_t qualifier;
3267 enum aarch64_operand_class opd_class
3268 = aarch64_get_operand_class (operand->type);
3269
3270 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3271 if (reg)
3272 {
3273 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3274 {
3275 set_syntax_error (_("unexpected register in the immediate operand"));
3276 return FALSE;
3277 }
3278
3279 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3280 {
3281 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3282 return FALSE;
3283 }
3284
3285 operand->reg.regno = reg->number;
3286 operand->qualifier = qualifier;
3287
3288 /* Accept optional shift operation on register. */
3289 if (! skip_past_comma (str))
3290 return TRUE;
3291
3292 if (! parse_shift (str, operand, mode))
3293 return FALSE;
3294
3295 return TRUE;
3296 }
3297 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3298 {
3299 set_syntax_error
3300 (_("integer register expected in the extended/shifted operand "
3301 "register"));
3302 return FALSE;
3303 }
3304
3305 /* We have a shifted immediate variable. */
3306 return parse_shifter_operand_imm (str, operand, mode);
3307 }
3308
3309 /* Return TRUE on success; return FALSE otherwise. */
3310
3311 static bfd_boolean
3312 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3313 enum parse_shift_mode mode)
3314 {
3315 char *p = *str;
3316
3317 /* Determine if we have the sequence of characters #: or just :
3318 coming next. If we do, then we check for a :rello: relocation
3319 modifier. If we don't, punt the whole lot to
3320 parse_shifter_operand. */
3321
3322 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3323 {
3324 struct reloc_table_entry *entry;
3325
3326 if (p[0] == '#')
3327 p += 2;
3328 else
3329 p++;
3330 *str = p;
3331
3332 /* Try to parse a relocation. Anything else is an error. */
3333 if (!(entry = find_reloc_table_entry (str)))
3334 {
3335 set_syntax_error (_("unknown relocation modifier"));
3336 return FALSE;
3337 }
3338
3339 if (entry->add_type == 0)
3340 {
3341 set_syntax_error
3342 (_("this relocation modifier is not allowed on this instruction"));
3343 return FALSE;
3344 }
3345
3346 /* Save str before we decompose it. */
3347 p = *str;
3348
3349 /* Next, we parse the expression. */
3350 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3351 return FALSE;
3352
3353 /* Record the relocation type (use the ADD variant here). */
3354 inst.reloc.type = entry->add_type;
3355 inst.reloc.pc_rel = entry->pc_rel;
3356
3357 /* If str is empty, we've reached the end, stop here. */
3358 if (**str == '\0')
3359 return TRUE;
3360
3361 /* Otherwise, we have a shifted reloc modifier, so rewind to
3362 recover the variable name and continue parsing for the shifter. */
3363 *str = p;
3364 return parse_shifter_operand_imm (str, operand, mode);
3365 }
3366
3367 return parse_shifter_operand (str, operand, mode);
3368 }
3369
3370 /* Parse all forms of an address expression. Information is written
3371 to *OPERAND and/or inst.reloc.
3372
3373 The A64 instruction set has the following addressing modes:
3374
3375 Offset
3376 [base] // in SIMD ld/st structure
3377 [base{,#0}] // in ld/st exclusive
3378 [base{,#imm}]
3379 [base,Xm{,LSL #imm}]
3380 [base,Xm,SXTX {#imm}]
3381 [base,Wm,(S|U)XTW {#imm}]
3382 Pre-indexed
3383 [base,#imm]!
3384 [base]! // in ld/stgv
3385 Post-indexed
3386 [base],#imm
3387 [base],Xm // in SIMD ld/st structure
3388 PC-relative (literal)
3389 label
3390 SVE:
3391 [base,#imm,MUL VL]
3392 [base,Zm.D{,LSL #imm}]
3393 [base,Zm.S,(S|U)XTW {#imm}]
3394 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3395 [Zn.S,#imm]
3396 [Zn.D,#imm]
3397 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3398 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3399 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3400
3401 (As a convenience, the notation "=immediate" is permitted in conjunction
3402 with the pc-relative literal load instructions to automatically place an
3403 immediate value or symbolic address in a nearby literal pool and generate
3404 a hidden label which references it.)
3405
3406 Upon a successful parsing, the address structure in *OPERAND will be
3407 filled in the following way:
3408
3409 .base_regno = <base>
3410 .offset.is_reg // 1 if the offset is a register
3411 .offset.imm = <imm>
3412 .offset.regno = <Rm>
3413
3414 For different addressing modes defined in the A64 ISA:
3415
3416 Offset
3417 .pcrel=0; .preind=1; .postind=0; .writeback=0
3418 Pre-indexed
3419 .pcrel=0; .preind=1; .postind=0; .writeback=1
3420 Post-indexed
3421 .pcrel=0; .preind=0; .postind=1; .writeback=1
3422 PC-relative (literal)
3423 .pcrel=1; .preind=1; .postind=0; .writeback=0
3424
3425 The shift/extension information, if any, will be stored in .shifter.
3426 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3427 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3428 corresponding register.
3429
3430 BASE_TYPE says which types of base register should be accepted and
3431 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3432 is the type of shifter that is allowed for immediate offsets,
3433 or SHIFTED_NONE if none.
3434
3435 In all other respects, it is the caller's responsibility to check
3436 for addressing modes not supported by the instruction, and to set
3437 inst.reloc.type. */
3438
3439 static bfd_boolean
3440 parse_address_main (char **str, aarch64_opnd_info *operand,
3441 aarch64_opnd_qualifier_t *base_qualifier,
3442 aarch64_opnd_qualifier_t *offset_qualifier,
3443 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3444 enum parse_shift_mode imm_shift_mode)
3445 {
3446 char *p = *str;
3447 const reg_entry *reg;
3448 expressionS *exp = &inst.reloc.exp;
3449
3450 *base_qualifier = AARCH64_OPND_QLF_NIL;
3451 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3452 if (! skip_past_char (&p, '['))
3453 {
3454 /* =immediate or label. */
3455 operand->addr.pcrel = 1;
3456 operand->addr.preind = 1;
3457
3458 /* #:<reloc_op>:<symbol> */
3459 skip_past_char (&p, '#');
3460 if (skip_past_char (&p, ':'))
3461 {
3462 bfd_reloc_code_real_type ty;
3463 struct reloc_table_entry *entry;
3464
3465 /* Try to parse a relocation modifier. Anything else is
3466 an error. */
3467 entry = find_reloc_table_entry (&p);
3468 if (! entry)
3469 {
3470 set_syntax_error (_("unknown relocation modifier"));
3471 return FALSE;
3472 }
3473
3474 switch (operand->type)
3475 {
3476 case AARCH64_OPND_ADDR_PCREL21:
3477 /* adr */
3478 ty = entry->adr_type;
3479 break;
3480
3481 default:
3482 ty = entry->ld_literal_type;
3483 break;
3484 }
3485
3486 if (ty == 0)
3487 {
3488 set_syntax_error
3489 (_("this relocation modifier is not allowed on this "
3490 "instruction"));
3491 return FALSE;
3492 }
3493
3494 /* #:<reloc_op>: */
3495 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3496 {
3497 set_syntax_error (_("invalid relocation expression"));
3498 return FALSE;
3499 }
3500
3501 /* #:<reloc_op>:<expr> */
3502 /* Record the relocation type. */
3503 inst.reloc.type = ty;
3504 inst.reloc.pc_rel = entry->pc_rel;
3505 }
3506 else
3507 {
3508
3509 if (skip_past_char (&p, '='))
3510 /* =immediate; need to generate the literal in the literal pool. */
3511 inst.gen_lit_pool = 1;
3512
3513 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3514 {
3515 set_syntax_error (_("invalid address"));
3516 return FALSE;
3517 }
3518 }
3519
3520 *str = p;
3521 return TRUE;
3522 }
3523
3524 /* [ */
3525
3526 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3527 if (!reg || !aarch64_check_reg_type (reg, base_type))
3528 {
3529 set_syntax_error (_(get_reg_expected_msg (base_type)));
3530 return FALSE;
3531 }
3532 operand->addr.base_regno = reg->number;
3533
3534 /* [Xn */
3535 if (skip_past_comma (&p))
3536 {
3537 /* [Xn, */
3538 operand->addr.preind = 1;
3539
3540 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3541 if (reg)
3542 {
3543 if (!aarch64_check_reg_type (reg, offset_type))
3544 {
3545 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3546 return FALSE;
3547 }
3548
3549 /* [Xn,Rm */
3550 operand->addr.offset.regno = reg->number;
3551 operand->addr.offset.is_reg = 1;
3552 /* Shifted index. */
3553 if (skip_past_comma (&p))
3554 {
3555 /* [Xn,Rm, */
3556 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3557 /* Use the diagnostics set in parse_shift, so not set new
3558 error message here. */
3559 return FALSE;
3560 }
3561 /* We only accept:
3562 [base,Xm{,LSL #imm}]
3563 [base,Xm,SXTX {#imm}]
3564 [base,Wm,(S|U)XTW {#imm}] */
3565 if (operand->shifter.kind == AARCH64_MOD_NONE
3566 || operand->shifter.kind == AARCH64_MOD_LSL
3567 || operand->shifter.kind == AARCH64_MOD_SXTX)
3568 {
3569 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3570 {
3571 set_syntax_error (_("invalid use of 32-bit register offset"));
3572 return FALSE;
3573 }
3574 if (aarch64_get_qualifier_esize (*base_qualifier)
3575 != aarch64_get_qualifier_esize (*offset_qualifier))
3576 {
3577 set_syntax_error (_("offset has different size from base"));
3578 return FALSE;
3579 }
3580 }
3581 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3582 {
3583 set_syntax_error (_("invalid use of 64-bit register offset"));
3584 return FALSE;
3585 }
3586 }
3587 else
3588 {
3589 /* [Xn,#:<reloc_op>:<symbol> */
3590 skip_past_char (&p, '#');
3591 if (skip_past_char (&p, ':'))
3592 {
3593 struct reloc_table_entry *entry;
3594
3595 /* Try to parse a relocation modifier. Anything else is
3596 an error. */
3597 if (!(entry = find_reloc_table_entry (&p)))
3598 {
3599 set_syntax_error (_("unknown relocation modifier"));
3600 return FALSE;
3601 }
3602
3603 if (entry->ldst_type == 0)
3604 {
3605 set_syntax_error
3606 (_("this relocation modifier is not allowed on this "
3607 "instruction"));
3608 return FALSE;
3609 }
3610
3611 /* [Xn,#:<reloc_op>: */
3612 /* We now have the group relocation table entry corresponding to
3613 the name in the assembler source. Next, we parse the
3614 expression. */
3615 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3616 {
3617 set_syntax_error (_("invalid relocation expression"));
3618 return FALSE;
3619 }
3620
3621 /* [Xn,#:<reloc_op>:<expr> */
3622 /* Record the load/store relocation type. */
3623 inst.reloc.type = entry->ldst_type;
3624 inst.reloc.pc_rel = entry->pc_rel;
3625 }
3626 else
3627 {
3628 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3629 {
3630 set_syntax_error (_("invalid expression in the address"));
3631 return FALSE;
3632 }
3633 /* [Xn,<expr> */
3634 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3635 /* [Xn,<expr>,<shifter> */
3636 if (! parse_shift (&p, operand, imm_shift_mode))
3637 return FALSE;
3638 }
3639 }
3640 }
3641
3642 if (! skip_past_char (&p, ']'))
3643 {
3644 set_syntax_error (_("']' expected"));
3645 return FALSE;
3646 }
3647
3648 if (skip_past_char (&p, '!'))
3649 {
3650 if (operand->addr.preind && operand->addr.offset.is_reg)
3651 {
3652 set_syntax_error (_("register offset not allowed in pre-indexed "
3653 "addressing mode"));
3654 return FALSE;
3655 }
3656 /* [Xn]! */
3657 operand->addr.writeback = 1;
3658 }
3659 else if (skip_past_comma (&p))
3660 {
3661 /* [Xn], */
3662 operand->addr.postind = 1;
3663 operand->addr.writeback = 1;
3664
3665 if (operand->addr.preind)
3666 {
3667 set_syntax_error (_("cannot combine pre- and post-indexing"));
3668 return FALSE;
3669 }
3670
3671 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3672 if (reg)
3673 {
3674 /* [Xn],Xm */
3675 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3676 {
3677 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3678 return FALSE;
3679 }
3680
3681 operand->addr.offset.regno = reg->number;
3682 operand->addr.offset.is_reg = 1;
3683 }
3684 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3685 {
3686 /* [Xn],#expr */
3687 set_syntax_error (_("invalid expression in the address"));
3688 return FALSE;
3689 }
3690 }
3691
3692 /* If at this point neither .preind nor .postind is set, we have a
3693 bare [Rn]{!}; reject [Rn]! except for ld/stgv but accept [Rn]
3694 as a shorthand for [Rn,#0]. */
3695 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3696 {
3697 if (operand->type != AARCH64_OPND_ADDR_SIMPLE_2 && operand->addr.writeback)
3698 {
3699 /* Reject [Rn]! */
3700 set_syntax_error (_("missing offset in the pre-indexed address"));
3701 return FALSE;
3702 }
3703
3704 operand->addr.preind = 1;
3705 inst.reloc.exp.X_op = O_constant;
3706 inst.reloc.exp.X_add_number = 0;
3707 }
3708
3709 *str = p;
3710 return TRUE;
3711 }
3712
3713 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3714 on success. */
3715 static bfd_boolean
3716 parse_address (char **str, aarch64_opnd_info *operand)
3717 {
3718 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3719 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3720 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3721 }
3722
3723 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3724 The arguments have the same meaning as for parse_address_main.
3725 Return TRUE on success. */
3726 static bfd_boolean
3727 parse_sve_address (char **str, aarch64_opnd_info *operand,
3728 aarch64_opnd_qualifier_t *base_qualifier,
3729 aarch64_opnd_qualifier_t *offset_qualifier)
3730 {
3731 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3732 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3733 SHIFTED_MUL_VL);
3734 }
3735
3736 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3737 Return TRUE on success; otherwise return FALSE. */
3738 static bfd_boolean
3739 parse_half (char **str, int *internal_fixup_p)
3740 {
3741 char *p = *str;
3742
3743 skip_past_char (&p, '#');
3744
3745 gas_assert (internal_fixup_p);
3746 *internal_fixup_p = 0;
3747
3748 if (*p == ':')
3749 {
3750 struct reloc_table_entry *entry;
3751
3752 /* Try to parse a relocation. Anything else is an error. */
3753 ++p;
3754 if (!(entry = find_reloc_table_entry (&p)))
3755 {
3756 set_syntax_error (_("unknown relocation modifier"));
3757 return FALSE;
3758 }
3759
3760 if (entry->movw_type == 0)
3761 {
3762 set_syntax_error
3763 (_("this relocation modifier is not allowed on this instruction"));
3764 return FALSE;
3765 }
3766
3767 inst.reloc.type = entry->movw_type;
3768 }
3769 else
3770 *internal_fixup_p = 1;
3771
3772 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3773 return FALSE;
3774
3775 *str = p;
3776 return TRUE;
3777 }
3778
3779 /* Parse an operand for an ADRP instruction:
3780 ADRP <Xd>, <label>
3781 Return TRUE on success; otherwise return FALSE. */
3782
3783 static bfd_boolean
3784 parse_adrp (char **str)
3785 {
3786 char *p;
3787
3788 p = *str;
3789 if (*p == ':')
3790 {
3791 struct reloc_table_entry *entry;
3792
3793 /* Try to parse a relocation. Anything else is an error. */
3794 ++p;
3795 if (!(entry = find_reloc_table_entry (&p)))
3796 {
3797 set_syntax_error (_("unknown relocation modifier"));
3798 return FALSE;
3799 }
3800
3801 if (entry->adrp_type == 0)
3802 {
3803 set_syntax_error
3804 (_("this relocation modifier is not allowed on this instruction"));
3805 return FALSE;
3806 }
3807
3808 inst.reloc.type = entry->adrp_type;
3809 }
3810 else
3811 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3812
3813 inst.reloc.pc_rel = 1;
3814
3815 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3816 return FALSE;
3817
3818 *str = p;
3819 return TRUE;
3820 }
3821
3822 /* Miscellaneous. */
3823
3824 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3825 of SIZE tokens in which index I gives the token for field value I,
3826 or is null if field value I is invalid. REG_TYPE says which register
3827 names should be treated as registers rather than as symbolic immediates.
3828
3829 Return true on success, moving *STR past the operand and storing the
3830 field value in *VAL. */
3831
3832 static int
3833 parse_enum_string (char **str, int64_t *val, const char *const *array,
3834 size_t size, aarch64_reg_type reg_type)
3835 {
3836 expressionS exp;
3837 char *p, *q;
3838 size_t i;
3839
3840 /* Match C-like tokens. */
3841 p = q = *str;
3842 while (ISALNUM (*q))
3843 q++;
3844
3845 for (i = 0; i < size; ++i)
3846 if (array[i]
3847 && strncasecmp (array[i], p, q - p) == 0
3848 && array[i][q - p] == 0)
3849 {
3850 *val = i;
3851 *str = q;
3852 return TRUE;
3853 }
3854
3855 if (!parse_immediate_expression (&p, &exp, reg_type))
3856 return FALSE;
3857
3858 if (exp.X_op == O_constant
3859 && (uint64_t) exp.X_add_number < size)
3860 {
3861 *val = exp.X_add_number;
3862 *str = p;
3863 return TRUE;
3864 }
3865
3866 /* Use the default error for this operand. */
3867 return FALSE;
3868 }
3869
3870 /* Parse an option for a preload instruction. Returns the encoding for the
3871 option, or PARSE_FAIL. */
3872
3873 static int
3874 parse_pldop (char **str)
3875 {
3876 char *p, *q;
3877 const struct aarch64_name_value_pair *o;
3878
3879 p = q = *str;
3880 while (ISALNUM (*q))
3881 q++;
3882
3883 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3884 if (!o)
3885 return PARSE_FAIL;
3886
3887 *str = q;
3888 return o->value;
3889 }
3890
3891 /* Parse an option for a barrier instruction. Returns the encoding for the
3892 option, or PARSE_FAIL. */
3893
3894 static int
3895 parse_barrier (char **str)
3896 {
3897 char *p, *q;
3898 const asm_barrier_opt *o;
3899
3900 p = q = *str;
3901 while (ISALPHA (*q))
3902 q++;
3903
3904 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3905 if (!o)
3906 return PARSE_FAIL;
3907
3908 *str = q;
3909 return o->value;
3910 }
3911
3912 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3913 return 0 if successful. Otherwise return PARSE_FAIL. */
3914
3915 static int
3916 parse_barrier_psb (char **str,
3917 const struct aarch64_name_value_pair ** hint_opt)
3918 {
3919 char *p, *q;
3920 const struct aarch64_name_value_pair *o;
3921
3922 p = q = *str;
3923 while (ISALPHA (*q))
3924 q++;
3925
3926 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3927 if (!o)
3928 {
3929 set_fatal_syntax_error
3930 ( _("unknown or missing option to PSB"));
3931 return PARSE_FAIL;
3932 }
3933
3934 if (o->value != 0x11)
3935 {
3936 /* PSB only accepts option name 'CSYNC'. */
3937 set_syntax_error
3938 (_("the specified option is not accepted for PSB"));
3939 return PARSE_FAIL;
3940 }
3941
3942 *str = q;
3943 *hint_opt = o;
3944 return 0;
3945 }
3946
3947 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
3948 return 0 if successful. Otherwise return PARSE_FAIL. */
3949
3950 static int
3951 parse_bti_operand (char **str,
3952 const struct aarch64_name_value_pair ** hint_opt)
3953 {
3954 char *p, *q;
3955 const struct aarch64_name_value_pair *o;
3956
3957 p = q = *str;
3958 while (ISALPHA (*q))
3959 q++;
3960
3961 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3962 if (!o)
3963 {
3964 set_fatal_syntax_error
3965 ( _("unknown option to BTI"));
3966 return PARSE_FAIL;
3967 }
3968
3969 switch (o->value)
3970 {
3971 /* Valid BTI operands. */
3972 case HINT_OPD_C:
3973 case HINT_OPD_J:
3974 case HINT_OPD_JC:
3975 break;
3976
3977 default:
3978 set_syntax_error
3979 (_("unknown option to BTI"));
3980 return PARSE_FAIL;
3981 }
3982
3983 *str = q;
3984 *hint_opt = o;
3985 return 0;
3986 }
3987
3988 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3989 Returns the encoding for the option, or PARSE_FAIL.
3990
3991 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3992 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3993
3994 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3995 field, otherwise as a system register.
3996 */
3997
3998 static int
3999 parse_sys_reg (char **str, struct hash_control *sys_regs,
4000 int imple_defined_p, int pstatefield_p,
4001 uint32_t* flags)
4002 {
4003 char *p, *q;
4004 char buf[32];
4005 const aarch64_sys_reg *o;
4006 int value;
4007
4008 p = buf;
4009 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4010 if (p < buf + 31)
4011 *p++ = TOLOWER (*q);
4012 *p = '\0';
4013 /* Assert that BUF be large enough. */
4014 gas_assert (p - buf == q - *str);
4015
4016 o = hash_find (sys_regs, buf);
4017 if (!o)
4018 {
4019 if (!imple_defined_p)
4020 return PARSE_FAIL;
4021 else
4022 {
4023 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4024 unsigned int op0, op1, cn, cm, op2;
4025
4026 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4027 != 5)
4028 return PARSE_FAIL;
4029 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4030 return PARSE_FAIL;
4031 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4032 if (flags)
4033 *flags = 0;
4034 }
4035 }
4036 else
4037 {
4038 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4039 as_bad (_("selected processor does not support PSTATE field "
4040 "name '%s'"), buf);
4041 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
4042 as_bad (_("selected processor does not support system register "
4043 "name '%s'"), buf);
4044 if (aarch64_sys_reg_deprecated_p (o))
4045 as_warn (_("system register name '%s' is deprecated and may be "
4046 "removed in a future release"), buf);
4047 value = o->value;
4048 if (flags)
4049 *flags = o->flags;
4050 }
4051
4052 *str = q;
4053 return value;
4054 }
4055
4056 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4057 for the option, or NULL. */
4058
4059 static const aarch64_sys_ins_reg *
4060 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
4061 {
4062 char *p, *q;
4063 char buf[32];
4064 const aarch64_sys_ins_reg *o;
4065
4066 p = buf;
4067 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4068 if (p < buf + 31)
4069 *p++ = TOLOWER (*q);
4070 *p = '\0';
4071
4072 o = hash_find (sys_ins_regs, buf);
4073 if (!o)
4074 return NULL;
4075
4076 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
4077 as_bad (_("selected processor does not support system register "
4078 "name '%s'"), buf);
4079
4080 *str = q;
4081 return o;
4082 }
4083 \f
4084 #define po_char_or_fail(chr) do { \
4085 if (! skip_past_char (&str, chr)) \
4086 goto failure; \
4087 } while (0)
4088
4089 #define po_reg_or_fail(regtype) do { \
4090 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4091 if (val == PARSE_FAIL) \
4092 { \
4093 set_default_error (); \
4094 goto failure; \
4095 } \
4096 } while (0)
4097
4098 #define po_int_reg_or_fail(reg_type) do { \
4099 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4100 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4101 { \
4102 set_default_error (); \
4103 goto failure; \
4104 } \
4105 info->reg.regno = reg->number; \
4106 info->qualifier = qualifier; \
4107 } while (0)
4108
4109 #define po_imm_nc_or_fail() do { \
4110 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4111 goto failure; \
4112 } while (0)
4113
4114 #define po_imm_or_fail(min, max) do { \
4115 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4116 goto failure; \
4117 if (val < min || val > max) \
4118 { \
4119 set_fatal_syntax_error (_("immediate value out of range "\
4120 #min " to "#max)); \
4121 goto failure; \
4122 } \
4123 } while (0)
4124
4125 #define po_enum_or_fail(array) do { \
4126 if (!parse_enum_string (&str, &val, array, \
4127 ARRAY_SIZE (array), imm_reg_type)) \
4128 goto failure; \
4129 } while (0)
4130
4131 #define po_misc_or_fail(expr) do { \
4132 if (!expr) \
4133 goto failure; \
4134 } while (0)
4135 \f
4136 /* encode the 12-bit imm field of Add/sub immediate */
4137 static inline uint32_t
4138 encode_addsub_imm (uint32_t imm)
4139 {
4140 return imm << 10;
4141 }
4142
4143 /* encode the shift amount field of Add/sub immediate */
4144 static inline uint32_t
4145 encode_addsub_imm_shift_amount (uint32_t cnt)
4146 {
4147 return cnt << 22;
4148 }
4149
4150
4151 /* encode the imm field of Adr instruction */
4152 static inline uint32_t
4153 encode_adr_imm (uint32_t imm)
4154 {
4155 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4156 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4157 }
4158
4159 /* encode the immediate field of Move wide immediate */
4160 static inline uint32_t
4161 encode_movw_imm (uint32_t imm)
4162 {
4163 return imm << 5;
4164 }
4165
4166 /* encode the 26-bit offset of unconditional branch */
4167 static inline uint32_t
4168 encode_branch_ofs_26 (uint32_t ofs)
4169 {
4170 return ofs & ((1 << 26) - 1);
4171 }
4172
4173 /* encode the 19-bit offset of conditional branch and compare & branch */
4174 static inline uint32_t
4175 encode_cond_branch_ofs_19 (uint32_t ofs)
4176 {
4177 return (ofs & ((1 << 19) - 1)) << 5;
4178 }
4179
4180 /* encode the 19-bit offset of ld literal */
4181 static inline uint32_t
4182 encode_ld_lit_ofs_19 (uint32_t ofs)
4183 {
4184 return (ofs & ((1 << 19) - 1)) << 5;
4185 }
4186
4187 /* Encode the 14-bit offset of test & branch. */
4188 static inline uint32_t
4189 encode_tst_branch_ofs_14 (uint32_t ofs)
4190 {
4191 return (ofs & ((1 << 14) - 1)) << 5;
4192 }
4193
4194 /* Encode the 16-bit imm field of svc/hvc/smc. */
4195 static inline uint32_t
4196 encode_svc_imm (uint32_t imm)
4197 {
4198 return imm << 5;
4199 }
4200
4201 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4202 static inline uint32_t
4203 reencode_addsub_switch_add_sub (uint32_t opcode)
4204 {
4205 return opcode ^ (1 << 30);
4206 }
4207
4208 static inline uint32_t
4209 reencode_movzn_to_movz (uint32_t opcode)
4210 {
4211 return opcode | (1 << 30);
4212 }
4213
4214 static inline uint32_t
4215 reencode_movzn_to_movn (uint32_t opcode)
4216 {
4217 return opcode & ~(1 << 30);
4218 }
4219
4220 /* Overall per-instruction processing. */
4221
4222 /* We need to be able to fix up arbitrary expressions in some statements.
4223 This is so that we can handle symbols that are an arbitrary distance from
4224 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4225 which returns part of an address in a form which will be valid for
4226 a data instruction. We do this by pushing the expression into a symbol
4227 in the expr_section, and creating a fix for that. */
4228
4229 static fixS *
4230 fix_new_aarch64 (fragS * frag,
4231 int where,
4232 short int size, expressionS * exp, int pc_rel, int reloc)
4233 {
4234 fixS *new_fix;
4235
4236 switch (exp->X_op)
4237 {
4238 case O_constant:
4239 case O_symbol:
4240 case O_add:
4241 case O_subtract:
4242 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4243 break;
4244
4245 default:
4246 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4247 pc_rel, reloc);
4248 break;
4249 }
4250 return new_fix;
4251 }
4252 \f
4253 /* Diagnostics on operands errors. */
4254
4255 /* By default, output verbose error message.
4256 Disable the verbose error message by -mno-verbose-error. */
4257 static int verbose_error_p = 1;
4258
4259 #ifdef DEBUG_AARCH64
4260 /* N.B. this is only for the purpose of debugging. */
4261 const char* operand_mismatch_kind_names[] =
4262 {
4263 "AARCH64_OPDE_NIL",
4264 "AARCH64_OPDE_RECOVERABLE",
4265 "AARCH64_OPDE_SYNTAX_ERROR",
4266 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4267 "AARCH64_OPDE_INVALID_VARIANT",
4268 "AARCH64_OPDE_OUT_OF_RANGE",
4269 "AARCH64_OPDE_UNALIGNED",
4270 "AARCH64_OPDE_REG_LIST",
4271 "AARCH64_OPDE_OTHER_ERROR",
4272 };
4273 #endif /* DEBUG_AARCH64 */
4274
4275 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4276
4277 When multiple errors of different kinds are found in the same assembly
4278 line, only the error of the highest severity will be picked up for
4279 issuing the diagnostics. */
4280
4281 static inline bfd_boolean
4282 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4283 enum aarch64_operand_error_kind rhs)
4284 {
4285 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4286 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4287 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4288 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4289 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4290 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4291 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4292 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4293 return lhs > rhs;
4294 }
4295
4296 /* Helper routine to get the mnemonic name from the assembly instruction
4297 line; should only be called for the diagnosis purpose, as there is
4298 string copy operation involved, which may affect the runtime
4299 performance if used in elsewhere. */
4300
4301 static const char*
4302 get_mnemonic_name (const char *str)
4303 {
4304 static char mnemonic[32];
4305 char *ptr;
4306
4307 /* Get the first 15 bytes and assume that the full name is included. */
4308 strncpy (mnemonic, str, 31);
4309 mnemonic[31] = '\0';
4310
4311 /* Scan up to the end of the mnemonic, which must end in white space,
4312 '.', or end of string. */
4313 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4314 ;
4315
4316 *ptr = '\0';
4317
4318 /* Append '...' to the truncated long name. */
4319 if (ptr - mnemonic == 31)
4320 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4321
4322 return mnemonic;
4323 }
4324
4325 static void
4326 reset_aarch64_instruction (aarch64_instruction *instruction)
4327 {
4328 memset (instruction, '\0', sizeof (aarch64_instruction));
4329 instruction->reloc.type = BFD_RELOC_UNUSED;
4330 }
4331
4332 /* Data structures storing one user error in the assembly code related to
4333 operands. */
4334
4335 struct operand_error_record
4336 {
4337 const aarch64_opcode *opcode;
4338 aarch64_operand_error detail;
4339 struct operand_error_record *next;
4340 };
4341
4342 typedef struct operand_error_record operand_error_record;
4343
4344 struct operand_errors
4345 {
4346 operand_error_record *head;
4347 operand_error_record *tail;
4348 };
4349
4350 typedef struct operand_errors operand_errors;
4351
4352 /* Top-level data structure reporting user errors for the current line of
4353 the assembly code.
4354 The way md_assemble works is that all opcodes sharing the same mnemonic
4355 name are iterated to find a match to the assembly line. In this data
4356 structure, each of the such opcodes will have one operand_error_record
4357 allocated and inserted. In other words, excessive errors related with
4358 a single opcode are disregarded. */
4359 operand_errors operand_error_report;
4360
4361 /* Free record nodes. */
4362 static operand_error_record *free_opnd_error_record_nodes = NULL;
4363
4364 /* Initialize the data structure that stores the operand mismatch
4365 information on assembling one line of the assembly code. */
4366 static void
4367 init_operand_error_report (void)
4368 {
4369 if (operand_error_report.head != NULL)
4370 {
4371 gas_assert (operand_error_report.tail != NULL);
4372 operand_error_report.tail->next = free_opnd_error_record_nodes;
4373 free_opnd_error_record_nodes = operand_error_report.head;
4374 operand_error_report.head = NULL;
4375 operand_error_report.tail = NULL;
4376 return;
4377 }
4378 gas_assert (operand_error_report.tail == NULL);
4379 }
4380
4381 /* Return TRUE if some operand error has been recorded during the
4382 parsing of the current assembly line using the opcode *OPCODE;
4383 otherwise return FALSE. */
4384 static inline bfd_boolean
4385 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4386 {
4387 operand_error_record *record = operand_error_report.head;
4388 return record && record->opcode == opcode;
4389 }
4390
4391 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4392 OPCODE field is initialized with OPCODE.
4393 N.B. only one record for each opcode, i.e. the maximum of one error is
4394 recorded for each instruction template. */
4395
4396 static void
4397 add_operand_error_record (const operand_error_record* new_record)
4398 {
4399 const aarch64_opcode *opcode = new_record->opcode;
4400 operand_error_record* record = operand_error_report.head;
4401
4402 /* The record may have been created for this opcode. If not, we need
4403 to prepare one. */
4404 if (! opcode_has_operand_error_p (opcode))
4405 {
4406 /* Get one empty record. */
4407 if (free_opnd_error_record_nodes == NULL)
4408 {
4409 record = XNEW (operand_error_record);
4410 }
4411 else
4412 {
4413 record = free_opnd_error_record_nodes;
4414 free_opnd_error_record_nodes = record->next;
4415 }
4416 record->opcode = opcode;
4417 /* Insert at the head. */
4418 record->next = operand_error_report.head;
4419 operand_error_report.head = record;
4420 if (operand_error_report.tail == NULL)
4421 operand_error_report.tail = record;
4422 }
4423 else if (record->detail.kind != AARCH64_OPDE_NIL
4424 && record->detail.index <= new_record->detail.index
4425 && operand_error_higher_severity_p (record->detail.kind,
4426 new_record->detail.kind))
4427 {
4428 /* In the case of multiple errors found on operands related with a
4429 single opcode, only record the error of the leftmost operand and
4430 only if the error is of higher severity. */
4431 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4432 " the existing error %s on operand %d",
4433 operand_mismatch_kind_names[new_record->detail.kind],
4434 new_record->detail.index,
4435 operand_mismatch_kind_names[record->detail.kind],
4436 record->detail.index);
4437 return;
4438 }
4439
4440 record->detail = new_record->detail;
4441 }
4442
4443 static inline void
4444 record_operand_error_info (const aarch64_opcode *opcode,
4445 aarch64_operand_error *error_info)
4446 {
4447 operand_error_record record;
4448 record.opcode = opcode;
4449 record.detail = *error_info;
4450 add_operand_error_record (&record);
4451 }
4452
4453 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4454 error message *ERROR, for operand IDX (count from 0). */
4455
4456 static void
4457 record_operand_error (const aarch64_opcode *opcode, int idx,
4458 enum aarch64_operand_error_kind kind,
4459 const char* error)
4460 {
4461 aarch64_operand_error info;
4462 memset(&info, 0, sizeof (info));
4463 info.index = idx;
4464 info.kind = kind;
4465 info.error = error;
4466 info.non_fatal = FALSE;
4467 record_operand_error_info (opcode, &info);
4468 }
4469
4470 static void
4471 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4472 enum aarch64_operand_error_kind kind,
4473 const char* error, const int *extra_data)
4474 {
4475 aarch64_operand_error info;
4476 info.index = idx;
4477 info.kind = kind;
4478 info.error = error;
4479 info.data[0] = extra_data[0];
4480 info.data[1] = extra_data[1];
4481 info.data[2] = extra_data[2];
4482 info.non_fatal = FALSE;
4483 record_operand_error_info (opcode, &info);
4484 }
4485
4486 static void
4487 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4488 const char* error, int lower_bound,
4489 int upper_bound)
4490 {
4491 int data[3] = {lower_bound, upper_bound, 0};
4492 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4493 error, data);
4494 }
4495
4496 /* Remove the operand error record for *OPCODE. */
4497 static void ATTRIBUTE_UNUSED
4498 remove_operand_error_record (const aarch64_opcode *opcode)
4499 {
4500 if (opcode_has_operand_error_p (opcode))
4501 {
4502 operand_error_record* record = operand_error_report.head;
4503 gas_assert (record != NULL && operand_error_report.tail != NULL);
4504 operand_error_report.head = record->next;
4505 record->next = free_opnd_error_record_nodes;
4506 free_opnd_error_record_nodes = record;
4507 if (operand_error_report.head == NULL)
4508 {
4509 gas_assert (operand_error_report.tail == record);
4510 operand_error_report.tail = NULL;
4511 }
4512 }
4513 }
4514
4515 /* Given the instruction in *INSTR, return the index of the best matched
4516 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4517
4518 Return -1 if there is no qualifier sequence; return the first match
4519 if there is multiple matches found. */
4520
4521 static int
4522 find_best_match (const aarch64_inst *instr,
4523 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4524 {
4525 int i, num_opnds, max_num_matched, idx;
4526
4527 num_opnds = aarch64_num_of_operands (instr->opcode);
4528 if (num_opnds == 0)
4529 {
4530 DEBUG_TRACE ("no operand");
4531 return -1;
4532 }
4533
4534 max_num_matched = 0;
4535 idx = 0;
4536
4537 /* For each pattern. */
4538 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4539 {
4540 int j, num_matched;
4541 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4542
4543 /* Most opcodes has much fewer patterns in the list. */
4544 if (empty_qualifier_sequence_p (qualifiers))
4545 {
4546 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4547 break;
4548 }
4549
4550 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4551 if (*qualifiers == instr->operands[j].qualifier)
4552 ++num_matched;
4553
4554 if (num_matched > max_num_matched)
4555 {
4556 max_num_matched = num_matched;
4557 idx = i;
4558 }
4559 }
4560
4561 DEBUG_TRACE ("return with %d", idx);
4562 return idx;
4563 }
4564
4565 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4566 corresponding operands in *INSTR. */
4567
4568 static inline void
4569 assign_qualifier_sequence (aarch64_inst *instr,
4570 const aarch64_opnd_qualifier_t *qualifiers)
4571 {
4572 int i = 0;
4573 int num_opnds = aarch64_num_of_operands (instr->opcode);
4574 gas_assert (num_opnds);
4575 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4576 instr->operands[i].qualifier = *qualifiers;
4577 }
4578
4579 /* Print operands for the diagnosis purpose. */
4580
4581 static void
4582 print_operands (char *buf, const aarch64_opcode *opcode,
4583 const aarch64_opnd_info *opnds)
4584 {
4585 int i;
4586
4587 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4588 {
4589 char str[128];
4590
4591 /* We regard the opcode operand info more, however we also look into
4592 the inst->operands to support the disassembling of the optional
4593 operand.
4594 The two operand code should be the same in all cases, apart from
4595 when the operand can be optional. */
4596 if (opcode->operands[i] == AARCH64_OPND_NIL
4597 || opnds[i].type == AARCH64_OPND_NIL)
4598 break;
4599
4600 /* Generate the operand string in STR. */
4601 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4602 NULL);
4603
4604 /* Delimiter. */
4605 if (str[0] != '\0')
4606 strcat (buf, i == 0 ? " " : ", ");
4607
4608 /* Append the operand string. */
4609 strcat (buf, str);
4610 }
4611 }
4612
4613 /* Send to stderr a string as information. */
4614
4615 static void
4616 output_info (const char *format, ...)
4617 {
4618 const char *file;
4619 unsigned int line;
4620 va_list args;
4621
4622 file = as_where (&line);
4623 if (file)
4624 {
4625 if (line != 0)
4626 fprintf (stderr, "%s:%u: ", file, line);
4627 else
4628 fprintf (stderr, "%s: ", file);
4629 }
4630 fprintf (stderr, _("Info: "));
4631 va_start (args, format);
4632 vfprintf (stderr, format, args);
4633 va_end (args);
4634 (void) putc ('\n', stderr);
4635 }
4636
4637 /* Output one operand error record. */
4638
4639 static void
4640 output_operand_error_record (const operand_error_record *record, char *str)
4641 {
4642 const aarch64_operand_error *detail = &record->detail;
4643 int idx = detail->index;
4644 const aarch64_opcode *opcode = record->opcode;
4645 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4646 : AARCH64_OPND_NIL);
4647
4648 typedef void (*handler_t)(const char *format, ...);
4649 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4650
4651 switch (detail->kind)
4652 {
4653 case AARCH64_OPDE_NIL:
4654 gas_assert (0);
4655 break;
4656 case AARCH64_OPDE_SYNTAX_ERROR:
4657 case AARCH64_OPDE_RECOVERABLE:
4658 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4659 case AARCH64_OPDE_OTHER_ERROR:
4660 /* Use the prepared error message if there is, otherwise use the
4661 operand description string to describe the error. */
4662 if (detail->error != NULL)
4663 {
4664 if (idx < 0)
4665 handler (_("%s -- `%s'"), detail->error, str);
4666 else
4667 handler (_("%s at operand %d -- `%s'"),
4668 detail->error, idx + 1, str);
4669 }
4670 else
4671 {
4672 gas_assert (idx >= 0);
4673 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4674 aarch64_get_operand_desc (opd_code), str);
4675 }
4676 break;
4677
4678 case AARCH64_OPDE_INVALID_VARIANT:
4679 handler (_("operand mismatch -- `%s'"), str);
4680 if (verbose_error_p)
4681 {
4682 /* We will try to correct the erroneous instruction and also provide
4683 more information e.g. all other valid variants.
4684
4685 The string representation of the corrected instruction and other
4686 valid variants are generated by
4687
4688 1) obtaining the intermediate representation of the erroneous
4689 instruction;
4690 2) manipulating the IR, e.g. replacing the operand qualifier;
4691 3) printing out the instruction by calling the printer functions
4692 shared with the disassembler.
4693
4694 The limitation of this method is that the exact input assembly
4695 line cannot be accurately reproduced in some cases, for example an
4696 optional operand present in the actual assembly line will be
4697 omitted in the output; likewise for the optional syntax rules,
4698 e.g. the # before the immediate. Another limitation is that the
4699 assembly symbols and relocation operations in the assembly line
4700 currently cannot be printed out in the error report. Last but not
4701 least, when there is other error(s) co-exist with this error, the
4702 'corrected' instruction may be still incorrect, e.g. given
4703 'ldnp h0,h1,[x0,#6]!'
4704 this diagnosis will provide the version:
4705 'ldnp s0,s1,[x0,#6]!'
4706 which is still not right. */
4707 size_t len = strlen (get_mnemonic_name (str));
4708 int i, qlf_idx;
4709 bfd_boolean result;
4710 char buf[2048];
4711 aarch64_inst *inst_base = &inst.base;
4712 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4713
4714 /* Init inst. */
4715 reset_aarch64_instruction (&inst);
4716 inst_base->opcode = opcode;
4717
4718 /* Reset the error report so that there is no side effect on the
4719 following operand parsing. */
4720 init_operand_error_report ();
4721
4722 /* Fill inst. */
4723 result = parse_operands (str + len, opcode)
4724 && programmer_friendly_fixup (&inst);
4725 gas_assert (result);
4726 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4727 NULL, NULL, insn_sequence);
4728 gas_assert (!result);
4729
4730 /* Find the most matched qualifier sequence. */
4731 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4732 gas_assert (qlf_idx > -1);
4733
4734 /* Assign the qualifiers. */
4735 assign_qualifier_sequence (inst_base,
4736 opcode->qualifiers_list[qlf_idx]);
4737
4738 /* Print the hint. */
4739 output_info (_(" did you mean this?"));
4740 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4741 print_operands (buf, opcode, inst_base->operands);
4742 output_info (_(" %s"), buf);
4743
4744 /* Print out other variant(s) if there is any. */
4745 if (qlf_idx != 0 ||
4746 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4747 output_info (_(" other valid variant(s):"));
4748
4749 /* For each pattern. */
4750 qualifiers_list = opcode->qualifiers_list;
4751 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4752 {
4753 /* Most opcodes has much fewer patterns in the list.
4754 First NIL qualifier indicates the end in the list. */
4755 if (empty_qualifier_sequence_p (*qualifiers_list))
4756 break;
4757
4758 if (i != qlf_idx)
4759 {
4760 /* Mnemonics name. */
4761 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4762
4763 /* Assign the qualifiers. */
4764 assign_qualifier_sequence (inst_base, *qualifiers_list);
4765
4766 /* Print instruction. */
4767 print_operands (buf, opcode, inst_base->operands);
4768
4769 output_info (_(" %s"), buf);
4770 }
4771 }
4772 }
4773 break;
4774
4775 case AARCH64_OPDE_UNTIED_OPERAND:
4776 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4777 detail->index + 1, str);
4778 break;
4779
4780 case AARCH64_OPDE_OUT_OF_RANGE:
4781 if (detail->data[0] != detail->data[1])
4782 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4783 detail->error ? detail->error : _("immediate value"),
4784 detail->data[0], detail->data[1], idx + 1, str);
4785 else
4786 handler (_("%s must be %d at operand %d -- `%s'"),
4787 detail->error ? detail->error : _("immediate value"),
4788 detail->data[0], idx + 1, str);
4789 break;
4790
4791 case AARCH64_OPDE_REG_LIST:
4792 if (detail->data[0] == 1)
4793 handler (_("invalid number of registers in the list; "
4794 "only 1 register is expected at operand %d -- `%s'"),
4795 idx + 1, str);
4796 else
4797 handler (_("invalid number of registers in the list; "
4798 "%d registers are expected at operand %d -- `%s'"),
4799 detail->data[0], idx + 1, str);
4800 break;
4801
4802 case AARCH64_OPDE_UNALIGNED:
4803 handler (_("immediate value must be a multiple of "
4804 "%d at operand %d -- `%s'"),
4805 detail->data[0], idx + 1, str);
4806 break;
4807
4808 default:
4809 gas_assert (0);
4810 break;
4811 }
4812 }
4813
4814 /* Process and output the error message about the operand mismatching.
4815
4816 When this function is called, the operand error information had
4817 been collected for an assembly line and there will be multiple
4818 errors in the case of multiple instruction templates; output the
4819 error message that most closely describes the problem.
4820
4821 The errors to be printed can be filtered on printing all errors
4822 or only non-fatal errors. This distinction has to be made because
4823 the error buffer may already be filled with fatal errors we don't want to
4824 print due to the different instruction templates. */
4825
4826 static void
4827 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4828 {
4829 int largest_error_pos;
4830 const char *msg = NULL;
4831 enum aarch64_operand_error_kind kind;
4832 operand_error_record *curr;
4833 operand_error_record *head = operand_error_report.head;
4834 operand_error_record *record = NULL;
4835
4836 /* No error to report. */
4837 if (head == NULL)
4838 return;
4839
4840 gas_assert (head != NULL && operand_error_report.tail != NULL);
4841
4842 /* Only one error. */
4843 if (head == operand_error_report.tail)
4844 {
4845 /* If the only error is a non-fatal one and we don't want to print it,
4846 just exit. */
4847 if (!non_fatal_only || head->detail.non_fatal)
4848 {
4849 DEBUG_TRACE ("single opcode entry with error kind: %s",
4850 operand_mismatch_kind_names[head->detail.kind]);
4851 output_operand_error_record (head, str);
4852 }
4853 return;
4854 }
4855
4856 /* Find the error kind of the highest severity. */
4857 DEBUG_TRACE ("multiple opcode entries with error kind");
4858 kind = AARCH64_OPDE_NIL;
4859 for (curr = head; curr != NULL; curr = curr->next)
4860 {
4861 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4862 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4863 if (operand_error_higher_severity_p (curr->detail.kind, kind)
4864 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4865 kind = curr->detail.kind;
4866 }
4867
4868 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
4869
4870 /* Pick up one of errors of KIND to report. */
4871 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4872 for (curr = head; curr != NULL; curr = curr->next)
4873 {
4874 /* If we don't want to print non-fatal errors then don't consider them
4875 at all. */
4876 if (curr->detail.kind != kind
4877 || (non_fatal_only && !curr->detail.non_fatal))
4878 continue;
4879 /* If there are multiple errors, pick up the one with the highest
4880 mismatching operand index. In the case of multiple errors with
4881 the equally highest operand index, pick up the first one or the
4882 first one with non-NULL error message. */
4883 if (curr->detail.index > largest_error_pos
4884 || (curr->detail.index == largest_error_pos && msg == NULL
4885 && curr->detail.error != NULL))
4886 {
4887 largest_error_pos = curr->detail.index;
4888 record = curr;
4889 msg = record->detail.error;
4890 }
4891 }
4892
4893 /* The way errors are collected in the back-end is a bit non-intuitive. But
4894 essentially, because each operand template is tried recursively you may
4895 always have errors collected from the previous tried OPND. These are
4896 usually skipped if there is one successful match. However now with the
4897 non-fatal errors we have to ignore those previously collected hard errors
4898 when we're only interested in printing the non-fatal ones. This condition
4899 prevents us from printing errors that are not appropriate, since we did
4900 match a condition, but it also has warnings that it wants to print. */
4901 if (non_fatal_only && !record)
4902 return;
4903
4904 gas_assert (largest_error_pos != -2 && record != NULL);
4905 DEBUG_TRACE ("Pick up error kind %s to report",
4906 operand_mismatch_kind_names[record->detail.kind]);
4907
4908 /* Output. */
4909 output_operand_error_record (record, str);
4910 }
4911 \f
4912 /* Write an AARCH64 instruction to buf - always little-endian. */
4913 static void
4914 put_aarch64_insn (char *buf, uint32_t insn)
4915 {
4916 unsigned char *where = (unsigned char *) buf;
4917 where[0] = insn;
4918 where[1] = insn >> 8;
4919 where[2] = insn >> 16;
4920 where[3] = insn >> 24;
4921 }
4922
4923 static uint32_t
4924 get_aarch64_insn (char *buf)
4925 {
4926 unsigned char *where = (unsigned char *) buf;
4927 uint32_t result;
4928 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4929 return result;
4930 }
4931
4932 static void
4933 output_inst (struct aarch64_inst *new_inst)
4934 {
4935 char *to = NULL;
4936
4937 to = frag_more (INSN_SIZE);
4938
4939 frag_now->tc_frag_data.recorded = 1;
4940
4941 put_aarch64_insn (to, inst.base.value);
4942
4943 if (inst.reloc.type != BFD_RELOC_UNUSED)
4944 {
4945 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4946 INSN_SIZE, &inst.reloc.exp,
4947 inst.reloc.pc_rel,
4948 inst.reloc.type);
4949 DEBUG_TRACE ("Prepared relocation fix up");
4950 /* Don't check the addend value against the instruction size,
4951 that's the job of our code in md_apply_fix(). */
4952 fixp->fx_no_overflow = 1;
4953 if (new_inst != NULL)
4954 fixp->tc_fix_data.inst = new_inst;
4955 if (aarch64_gas_internal_fixup_p ())
4956 {
4957 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4958 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4959 fixp->fx_addnumber = inst.reloc.flags;
4960 }
4961 }
4962
4963 dwarf2_emit_insn (INSN_SIZE);
4964 }
4965
4966 /* Link together opcodes of the same name. */
4967
4968 struct templates
4969 {
4970 aarch64_opcode *opcode;
4971 struct templates *next;
4972 };
4973
4974 typedef struct templates templates;
4975
4976 static templates *
4977 lookup_mnemonic (const char *start, int len)
4978 {
4979 templates *templ = NULL;
4980
4981 templ = hash_find_n (aarch64_ops_hsh, start, len);
4982 return templ;
4983 }
4984
4985 /* Subroutine of md_assemble, responsible for looking up the primary
4986 opcode from the mnemonic the user wrote. STR points to the
4987 beginning of the mnemonic. */
4988
4989 static templates *
4990 opcode_lookup (char **str)
4991 {
4992 char *end, *base, *dot;
4993 const aarch64_cond *cond;
4994 char condname[16];
4995 int len;
4996
4997 /* Scan up to the end of the mnemonic, which must end in white space,
4998 '.', or end of string. */
4999 dot = 0;
5000 for (base = end = *str; is_part_of_name(*end); end++)
5001 if (*end == '.' && !dot)
5002 dot = end;
5003
5004 if (end == base || dot == base)
5005 return 0;
5006
5007 inst.cond = COND_ALWAYS;
5008
5009 /* Handle a possible condition. */
5010 if (dot)
5011 {
5012 cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5013 if (cond)
5014 {
5015 inst.cond = cond->value;
5016 *str = end;
5017 }
5018 else
5019 {
5020 *str = dot;
5021 return 0;
5022 }
5023 len = dot - base;
5024 }
5025 else
5026 {
5027 *str = end;
5028 len = end - base;
5029 }
5030
5031 if (inst.cond == COND_ALWAYS)
5032 {
5033 /* Look for unaffixed mnemonic. */
5034 return lookup_mnemonic (base, len);
5035 }
5036 else if (len <= 13)
5037 {
5038 /* append ".c" to mnemonic if conditional */
5039 memcpy (condname, base, len);
5040 memcpy (condname + len, ".c", 2);
5041 base = condname;
5042 len += 2;
5043 return lookup_mnemonic (base, len);
5044 }
5045
5046 return NULL;
5047 }
5048
5049 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5050 to a corresponding operand qualifier. */
5051
5052 static inline aarch64_opnd_qualifier_t
5053 vectype_to_qualifier (const struct vector_type_el *vectype)
5054 {
5055 /* Element size in bytes indexed by vector_el_type. */
5056 const unsigned char ele_size[5]
5057 = {1, 2, 4, 8, 16};
5058 const unsigned int ele_base [5] =
5059 {
5060 AARCH64_OPND_QLF_V_4B,
5061 AARCH64_OPND_QLF_V_2H,
5062 AARCH64_OPND_QLF_V_2S,
5063 AARCH64_OPND_QLF_V_1D,
5064 AARCH64_OPND_QLF_V_1Q
5065 };
5066
5067 if (!vectype->defined || vectype->type == NT_invtype)
5068 goto vectype_conversion_fail;
5069
5070 if (vectype->type == NT_zero)
5071 return AARCH64_OPND_QLF_P_Z;
5072 if (vectype->type == NT_merge)
5073 return AARCH64_OPND_QLF_P_M;
5074
5075 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5076
5077 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5078 {
5079 /* Special case S_4B. */
5080 if (vectype->type == NT_b && vectype->width == 4)
5081 return AARCH64_OPND_QLF_S_4B;
5082
5083 /* Vector element register. */
5084 return AARCH64_OPND_QLF_S_B + vectype->type;
5085 }
5086 else
5087 {
5088 /* Vector register. */
5089 int reg_size = ele_size[vectype->type] * vectype->width;
5090 unsigned offset;
5091 unsigned shift;
5092 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5093 goto vectype_conversion_fail;
5094
5095 /* The conversion is by calculating the offset from the base operand
5096 qualifier for the vector type. The operand qualifiers are regular
5097 enough that the offset can established by shifting the vector width by
5098 a vector-type dependent amount. */
5099 shift = 0;
5100 if (vectype->type == NT_b)
5101 shift = 3;
5102 else if (vectype->type == NT_h || vectype->type == NT_s)
5103 shift = 2;
5104 else if (vectype->type >= NT_d)
5105 shift = 1;
5106 else
5107 gas_assert (0);
5108
5109 offset = ele_base [vectype->type] + (vectype->width >> shift);
5110 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5111 && offset <= AARCH64_OPND_QLF_V_1Q);
5112 return offset;
5113 }
5114
5115 vectype_conversion_fail:
5116 first_error (_("bad vector arrangement type"));
5117 return AARCH64_OPND_QLF_NIL;
5118 }
5119
5120 /* Process an optional operand that is found omitted from the assembly line.
5121 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5122 instruction's opcode entry while IDX is the index of this omitted operand.
5123 */
5124
5125 static void
5126 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5127 int idx, aarch64_opnd_info *operand)
5128 {
5129 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5130 gas_assert (optional_operand_p (opcode, idx));
5131 gas_assert (!operand->present);
5132
5133 switch (type)
5134 {
5135 case AARCH64_OPND_Rd:
5136 case AARCH64_OPND_Rn:
5137 case AARCH64_OPND_Rm:
5138 case AARCH64_OPND_Rt:
5139 case AARCH64_OPND_Rt2:
5140 case AARCH64_OPND_Rs:
5141 case AARCH64_OPND_Ra:
5142 case AARCH64_OPND_Rt_SYS:
5143 case AARCH64_OPND_Rd_SP:
5144 case AARCH64_OPND_Rn_SP:
5145 case AARCH64_OPND_Rm_SP:
5146 case AARCH64_OPND_Fd:
5147 case AARCH64_OPND_Fn:
5148 case AARCH64_OPND_Fm:
5149 case AARCH64_OPND_Fa:
5150 case AARCH64_OPND_Ft:
5151 case AARCH64_OPND_Ft2:
5152 case AARCH64_OPND_Sd:
5153 case AARCH64_OPND_Sn:
5154 case AARCH64_OPND_Sm:
5155 case AARCH64_OPND_Va:
5156 case AARCH64_OPND_Vd:
5157 case AARCH64_OPND_Vn:
5158 case AARCH64_OPND_Vm:
5159 case AARCH64_OPND_VdD1:
5160 case AARCH64_OPND_VnD1:
5161 operand->reg.regno = default_value;
5162 break;
5163
5164 case AARCH64_OPND_Ed:
5165 case AARCH64_OPND_En:
5166 case AARCH64_OPND_Em:
5167 case AARCH64_OPND_Em16:
5168 case AARCH64_OPND_SM3_IMM2:
5169 operand->reglane.regno = default_value;
5170 break;
5171
5172 case AARCH64_OPND_IDX:
5173 case AARCH64_OPND_BIT_NUM:
5174 case AARCH64_OPND_IMMR:
5175 case AARCH64_OPND_IMMS:
5176 case AARCH64_OPND_SHLL_IMM:
5177 case AARCH64_OPND_IMM_VLSL:
5178 case AARCH64_OPND_IMM_VLSR:
5179 case AARCH64_OPND_CCMP_IMM:
5180 case AARCH64_OPND_FBITS:
5181 case AARCH64_OPND_UIMM4:
5182 case AARCH64_OPND_UIMM3_OP1:
5183 case AARCH64_OPND_UIMM3_OP2:
5184 case AARCH64_OPND_IMM:
5185 case AARCH64_OPND_IMM_2:
5186 case AARCH64_OPND_WIDTH:
5187 case AARCH64_OPND_UIMM7:
5188 case AARCH64_OPND_NZCV:
5189 case AARCH64_OPND_SVE_PATTERN:
5190 case AARCH64_OPND_SVE_PRFOP:
5191 operand->imm.value = default_value;
5192 break;
5193
5194 case AARCH64_OPND_SVE_PATTERN_SCALED:
5195 operand->imm.value = default_value;
5196 operand->shifter.kind = AARCH64_MOD_MUL;
5197 operand->shifter.amount = 1;
5198 break;
5199
5200 case AARCH64_OPND_EXCEPTION:
5201 inst.reloc.type = BFD_RELOC_UNUSED;
5202 break;
5203
5204 case AARCH64_OPND_BARRIER_ISB:
5205 operand->barrier = aarch64_barrier_options + default_value;
5206 break;
5207
5208 case AARCH64_OPND_BTI_TARGET:
5209 operand->hint_option = aarch64_hint_options + default_value;
5210 break;
5211
5212 default:
5213 break;
5214 }
5215 }
5216
5217 /* Process the relocation type for move wide instructions.
5218 Return TRUE on success; otherwise return FALSE. */
5219
5220 static bfd_boolean
5221 process_movw_reloc_info (void)
5222 {
5223 int is32;
5224 unsigned shift;
5225
5226 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5227
5228 if (inst.base.opcode->op == OP_MOVK)
5229 switch (inst.reloc.type)
5230 {
5231 case BFD_RELOC_AARCH64_MOVW_G0_S:
5232 case BFD_RELOC_AARCH64_MOVW_G1_S:
5233 case BFD_RELOC_AARCH64_MOVW_G2_S:
5234 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5235 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5236 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5237 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5238 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5239 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5240 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5241 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5242 set_syntax_error
5243 (_("the specified relocation type is not allowed for MOVK"));
5244 return FALSE;
5245 default:
5246 break;
5247 }
5248
5249 switch (inst.reloc.type)
5250 {
5251 case BFD_RELOC_AARCH64_MOVW_G0:
5252 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5253 case BFD_RELOC_AARCH64_MOVW_G0_S:
5254 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5255 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5256 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5257 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5258 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5259 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5260 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5261 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5262 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5263 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5264 shift = 0;
5265 break;
5266 case BFD_RELOC_AARCH64_MOVW_G1:
5267 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5268 case BFD_RELOC_AARCH64_MOVW_G1_S:
5269 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5270 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5271 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5272 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5273 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5274 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5275 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5276 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5277 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5278 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5279 shift = 16;
5280 break;
5281 case BFD_RELOC_AARCH64_MOVW_G2:
5282 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5283 case BFD_RELOC_AARCH64_MOVW_G2_S:
5284 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5285 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5286 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5287 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5288 if (is32)
5289 {
5290 set_fatal_syntax_error
5291 (_("the specified relocation type is not allowed for 32-bit "
5292 "register"));
5293 return FALSE;
5294 }
5295 shift = 32;
5296 break;
5297 case BFD_RELOC_AARCH64_MOVW_G3:
5298 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5299 if (is32)
5300 {
5301 set_fatal_syntax_error
5302 (_("the specified relocation type is not allowed for 32-bit "
5303 "register"));
5304 return FALSE;
5305 }
5306 shift = 48;
5307 break;
5308 default:
5309 /* More cases should be added when more MOVW-related relocation types
5310 are supported in GAS. */
5311 gas_assert (aarch64_gas_internal_fixup_p ());
5312 /* The shift amount should have already been set by the parser. */
5313 return TRUE;
5314 }
5315 inst.base.operands[1].shifter.amount = shift;
5316 return TRUE;
5317 }
5318
5319 /* A primitive log calculator. */
5320
5321 static inline unsigned int
5322 get_logsz (unsigned int size)
5323 {
5324 const unsigned char ls[16] =
5325 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5326 if (size > 16)
5327 {
5328 gas_assert (0);
5329 return -1;
5330 }
5331 gas_assert (ls[size - 1] != (unsigned char)-1);
5332 return ls[size - 1];
5333 }
5334
5335 /* Determine and return the real reloc type code for an instruction
5336 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5337
5338 static inline bfd_reloc_code_real_type
5339 ldst_lo12_determine_real_reloc_type (void)
5340 {
5341 unsigned logsz;
5342 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5343 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5344
5345 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5346 {
5347 BFD_RELOC_AARCH64_LDST8_LO12,
5348 BFD_RELOC_AARCH64_LDST16_LO12,
5349 BFD_RELOC_AARCH64_LDST32_LO12,
5350 BFD_RELOC_AARCH64_LDST64_LO12,
5351 BFD_RELOC_AARCH64_LDST128_LO12
5352 },
5353 {
5354 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5355 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5356 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5357 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5358 BFD_RELOC_AARCH64_NONE
5359 },
5360 {
5361 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5362 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5363 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5364 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5365 BFD_RELOC_AARCH64_NONE
5366 },
5367 {
5368 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5369 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5370 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5371 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5372 BFD_RELOC_AARCH64_NONE
5373 },
5374 {
5375 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5376 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5377 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5378 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5379 BFD_RELOC_AARCH64_NONE
5380 }
5381 };
5382
5383 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5384 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5385 || (inst.reloc.type
5386 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5387 || (inst.reloc.type
5388 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5389 || (inst.reloc.type
5390 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5391 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5392
5393 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5394 opd1_qlf =
5395 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5396 1, opd0_qlf, 0);
5397 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5398
5399 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5400 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5401 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5402 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5403 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5404 gas_assert (logsz <= 3);
5405 else
5406 gas_assert (logsz <= 4);
5407
5408 /* In reloc.c, these pseudo relocation types should be defined in similar
5409 order as above reloc_ldst_lo12 array. Because the array index calculation
5410 below relies on this. */
5411 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5412 }
5413
5414 /* Check whether a register list REGINFO is valid. The registers must be
5415 numbered in increasing order (modulo 32), in increments of one or two.
5416
5417 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5418 increments of two.
5419
5420 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5421
5422 static bfd_boolean
5423 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5424 {
5425 uint32_t i, nb_regs, prev_regno, incr;
5426
5427 nb_regs = 1 + (reginfo & 0x3);
5428 reginfo >>= 2;
5429 prev_regno = reginfo & 0x1f;
5430 incr = accept_alternate ? 2 : 1;
5431
5432 for (i = 1; i < nb_regs; ++i)
5433 {
5434 uint32_t curr_regno;
5435 reginfo >>= 5;
5436 curr_regno = reginfo & 0x1f;
5437 if (curr_regno != ((prev_regno + incr) & 0x1f))
5438 return FALSE;
5439 prev_regno = curr_regno;
5440 }
5441
5442 return TRUE;
5443 }
5444
5445 /* Generic instruction operand parser. This does no encoding and no
5446 semantic validation; it merely squirrels values away in the inst
5447 structure. Returns TRUE or FALSE depending on whether the
5448 specified grammar matched. */
5449
5450 static bfd_boolean
5451 parse_operands (char *str, const aarch64_opcode *opcode)
5452 {
5453 int i;
5454 char *backtrack_pos = 0;
5455 const enum aarch64_opnd *operands = opcode->operands;
5456 aarch64_reg_type imm_reg_type;
5457
5458 clear_error ();
5459 skip_whitespace (str);
5460
5461 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5462 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5463 else
5464 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5465
5466 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5467 {
5468 int64_t val;
5469 const reg_entry *reg;
5470 int comma_skipped_p = 0;
5471 aarch64_reg_type rtype;
5472 struct vector_type_el vectype;
5473 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5474 aarch64_opnd_info *info = &inst.base.operands[i];
5475 aarch64_reg_type reg_type;
5476
5477 DEBUG_TRACE ("parse operand %d", i);
5478
5479 /* Assign the operand code. */
5480 info->type = operands[i];
5481
5482 if (optional_operand_p (opcode, i))
5483 {
5484 /* Remember where we are in case we need to backtrack. */
5485 gas_assert (!backtrack_pos);
5486 backtrack_pos = str;
5487 }
5488
5489 /* Expect comma between operands; the backtrack mechanism will take
5490 care of cases of omitted optional operand. */
5491 if (i > 0 && ! skip_past_char (&str, ','))
5492 {
5493 set_syntax_error (_("comma expected between operands"));
5494 goto failure;
5495 }
5496 else
5497 comma_skipped_p = 1;
5498
5499 switch (operands[i])
5500 {
5501 case AARCH64_OPND_Rd:
5502 case AARCH64_OPND_Rn:
5503 case AARCH64_OPND_Rm:
5504 case AARCH64_OPND_Rt:
5505 case AARCH64_OPND_Rt2:
5506 case AARCH64_OPND_Rs:
5507 case AARCH64_OPND_Ra:
5508 case AARCH64_OPND_Rt_SYS:
5509 case AARCH64_OPND_PAIRREG:
5510 case AARCH64_OPND_SVE_Rm:
5511 po_int_reg_or_fail (REG_TYPE_R_Z);
5512 break;
5513
5514 case AARCH64_OPND_Rd_SP:
5515 case AARCH64_OPND_Rn_SP:
5516 case AARCH64_OPND_SVE_Rn_SP:
5517 case AARCH64_OPND_Rm_SP:
5518 po_int_reg_or_fail (REG_TYPE_R_SP);
5519 break;
5520
5521 case AARCH64_OPND_Rm_EXT:
5522 case AARCH64_OPND_Rm_SFT:
5523 po_misc_or_fail (parse_shifter_operand
5524 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5525 ? SHIFTED_ARITH_IMM
5526 : SHIFTED_LOGIC_IMM)));
5527 if (!info->shifter.operator_present)
5528 {
5529 /* Default to LSL if not present. Libopcodes prefers shifter
5530 kind to be explicit. */
5531 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5532 info->shifter.kind = AARCH64_MOD_LSL;
5533 /* For Rm_EXT, libopcodes will carry out further check on whether
5534 or not stack pointer is used in the instruction (Recall that
5535 "the extend operator is not optional unless at least one of
5536 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5537 }
5538 break;
5539
5540 case AARCH64_OPND_Fd:
5541 case AARCH64_OPND_Fn:
5542 case AARCH64_OPND_Fm:
5543 case AARCH64_OPND_Fa:
5544 case AARCH64_OPND_Ft:
5545 case AARCH64_OPND_Ft2:
5546 case AARCH64_OPND_Sd:
5547 case AARCH64_OPND_Sn:
5548 case AARCH64_OPND_Sm:
5549 case AARCH64_OPND_SVE_VZn:
5550 case AARCH64_OPND_SVE_Vd:
5551 case AARCH64_OPND_SVE_Vm:
5552 case AARCH64_OPND_SVE_Vn:
5553 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5554 if (val == PARSE_FAIL)
5555 {
5556 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5557 goto failure;
5558 }
5559 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5560
5561 info->reg.regno = val;
5562 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5563 break;
5564
5565 case AARCH64_OPND_SVE_Pd:
5566 case AARCH64_OPND_SVE_Pg3:
5567 case AARCH64_OPND_SVE_Pg4_5:
5568 case AARCH64_OPND_SVE_Pg4_10:
5569 case AARCH64_OPND_SVE_Pg4_16:
5570 case AARCH64_OPND_SVE_Pm:
5571 case AARCH64_OPND_SVE_Pn:
5572 case AARCH64_OPND_SVE_Pt:
5573 reg_type = REG_TYPE_PN;
5574 goto vector_reg;
5575
5576 case AARCH64_OPND_SVE_Za_5:
5577 case AARCH64_OPND_SVE_Za_16:
5578 case AARCH64_OPND_SVE_Zd:
5579 case AARCH64_OPND_SVE_Zm_5:
5580 case AARCH64_OPND_SVE_Zm_16:
5581 case AARCH64_OPND_SVE_Zn:
5582 case AARCH64_OPND_SVE_Zt:
5583 reg_type = REG_TYPE_ZN;
5584 goto vector_reg;
5585
5586 case AARCH64_OPND_Va:
5587 case AARCH64_OPND_Vd:
5588 case AARCH64_OPND_Vn:
5589 case AARCH64_OPND_Vm:
5590 reg_type = REG_TYPE_VN;
5591 vector_reg:
5592 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5593 if (val == PARSE_FAIL)
5594 {
5595 first_error (_(get_reg_expected_msg (reg_type)));
5596 goto failure;
5597 }
5598 if (vectype.defined & NTA_HASINDEX)
5599 goto failure;
5600
5601 info->reg.regno = val;
5602 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5603 && vectype.type == NT_invtype)
5604 /* Unqualified Pn and Zn registers are allowed in certain
5605 contexts. Rely on F_STRICT qualifier checking to catch
5606 invalid uses. */
5607 info->qualifier = AARCH64_OPND_QLF_NIL;
5608 else
5609 {
5610 info->qualifier = vectype_to_qualifier (&vectype);
5611 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5612 goto failure;
5613 }
5614 break;
5615
5616 case AARCH64_OPND_VdD1:
5617 case AARCH64_OPND_VnD1:
5618 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5619 if (val == PARSE_FAIL)
5620 {
5621 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5622 goto failure;
5623 }
5624 if (vectype.type != NT_d || vectype.index != 1)
5625 {
5626 set_fatal_syntax_error
5627 (_("the top half of a 128-bit FP/SIMD register is expected"));
5628 goto failure;
5629 }
5630 info->reg.regno = val;
5631 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5632 here; it is correct for the purpose of encoding/decoding since
5633 only the register number is explicitly encoded in the related
5634 instructions, although this appears a bit hacky. */
5635 info->qualifier = AARCH64_OPND_QLF_S_D;
5636 break;
5637
5638 case AARCH64_OPND_SVE_Zm3_INDEX:
5639 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5640 case AARCH64_OPND_SVE_Zm4_INDEX:
5641 case AARCH64_OPND_SVE_Zn_INDEX:
5642 reg_type = REG_TYPE_ZN;
5643 goto vector_reg_index;
5644
5645 case AARCH64_OPND_Ed:
5646 case AARCH64_OPND_En:
5647 case AARCH64_OPND_Em:
5648 case AARCH64_OPND_Em16:
5649 case AARCH64_OPND_SM3_IMM2:
5650 reg_type = REG_TYPE_VN;
5651 vector_reg_index:
5652 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5653 if (val == PARSE_FAIL)
5654 {
5655 first_error (_(get_reg_expected_msg (reg_type)));
5656 goto failure;
5657 }
5658 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5659 goto failure;
5660
5661 info->reglane.regno = val;
5662 info->reglane.index = vectype.index;
5663 info->qualifier = vectype_to_qualifier (&vectype);
5664 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5665 goto failure;
5666 break;
5667
5668 case AARCH64_OPND_SVE_ZnxN:
5669 case AARCH64_OPND_SVE_ZtxN:
5670 reg_type = REG_TYPE_ZN;
5671 goto vector_reg_list;
5672
5673 case AARCH64_OPND_LVn:
5674 case AARCH64_OPND_LVt:
5675 case AARCH64_OPND_LVt_AL:
5676 case AARCH64_OPND_LEt:
5677 reg_type = REG_TYPE_VN;
5678 vector_reg_list:
5679 if (reg_type == REG_TYPE_ZN
5680 && get_opcode_dependent_value (opcode) == 1
5681 && *str != '{')
5682 {
5683 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5684 if (val == PARSE_FAIL)
5685 {
5686 first_error (_(get_reg_expected_msg (reg_type)));
5687 goto failure;
5688 }
5689 info->reglist.first_regno = val;
5690 info->reglist.num_regs = 1;
5691 }
5692 else
5693 {
5694 val = parse_vector_reg_list (&str, reg_type, &vectype);
5695 if (val == PARSE_FAIL)
5696 goto failure;
5697 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5698 {
5699 set_fatal_syntax_error (_("invalid register list"));
5700 goto failure;
5701 }
5702 info->reglist.first_regno = (val >> 2) & 0x1f;
5703 info->reglist.num_regs = (val & 0x3) + 1;
5704 }
5705 if (operands[i] == AARCH64_OPND_LEt)
5706 {
5707 if (!(vectype.defined & NTA_HASINDEX))
5708 goto failure;
5709 info->reglist.has_index = 1;
5710 info->reglist.index = vectype.index;
5711 }
5712 else
5713 {
5714 if (vectype.defined & NTA_HASINDEX)
5715 goto failure;
5716 if (!(vectype.defined & NTA_HASTYPE))
5717 {
5718 if (reg_type == REG_TYPE_ZN)
5719 set_fatal_syntax_error (_("missing type suffix"));
5720 goto failure;
5721 }
5722 }
5723 info->qualifier = vectype_to_qualifier (&vectype);
5724 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5725 goto failure;
5726 break;
5727
5728 case AARCH64_OPND_CRn:
5729 case AARCH64_OPND_CRm:
5730 {
5731 char prefix = *(str++);
5732 if (prefix != 'c' && prefix != 'C')
5733 goto failure;
5734
5735 po_imm_nc_or_fail ();
5736 if (val > 15)
5737 {
5738 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5739 goto failure;
5740 }
5741 info->qualifier = AARCH64_OPND_QLF_CR;
5742 info->imm.value = val;
5743 break;
5744 }
5745
5746 case AARCH64_OPND_SHLL_IMM:
5747 case AARCH64_OPND_IMM_VLSR:
5748 po_imm_or_fail (1, 64);
5749 info->imm.value = val;
5750 break;
5751
5752 case AARCH64_OPND_CCMP_IMM:
5753 case AARCH64_OPND_SIMM5:
5754 case AARCH64_OPND_FBITS:
5755 case AARCH64_OPND_UIMM4:
5756 case AARCH64_OPND_UIMM4_ADDG:
5757 case AARCH64_OPND_UIMM10:
5758 case AARCH64_OPND_UIMM3_OP1:
5759 case AARCH64_OPND_UIMM3_OP2:
5760 case AARCH64_OPND_IMM_VLSL:
5761 case AARCH64_OPND_IMM:
5762 case AARCH64_OPND_IMM_2:
5763 case AARCH64_OPND_WIDTH:
5764 case AARCH64_OPND_SVE_INV_LIMM:
5765 case AARCH64_OPND_SVE_LIMM:
5766 case AARCH64_OPND_SVE_LIMM_MOV:
5767 case AARCH64_OPND_SVE_SHLIMM_PRED:
5768 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5769 case AARCH64_OPND_SVE_SHRIMM_PRED:
5770 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5771 case AARCH64_OPND_SVE_SIMM5:
5772 case AARCH64_OPND_SVE_SIMM5B:
5773 case AARCH64_OPND_SVE_SIMM6:
5774 case AARCH64_OPND_SVE_SIMM8:
5775 case AARCH64_OPND_SVE_UIMM3:
5776 case AARCH64_OPND_SVE_UIMM7:
5777 case AARCH64_OPND_SVE_UIMM8:
5778 case AARCH64_OPND_SVE_UIMM8_53:
5779 case AARCH64_OPND_IMM_ROT1:
5780 case AARCH64_OPND_IMM_ROT2:
5781 case AARCH64_OPND_IMM_ROT3:
5782 case AARCH64_OPND_SVE_IMM_ROT1:
5783 case AARCH64_OPND_SVE_IMM_ROT2:
5784 po_imm_nc_or_fail ();
5785 info->imm.value = val;
5786 break;
5787
5788 case AARCH64_OPND_SVE_AIMM:
5789 case AARCH64_OPND_SVE_ASIMM:
5790 po_imm_nc_or_fail ();
5791 info->imm.value = val;
5792 skip_whitespace (str);
5793 if (skip_past_comma (&str))
5794 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5795 else
5796 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5797 break;
5798
5799 case AARCH64_OPND_SVE_PATTERN:
5800 po_enum_or_fail (aarch64_sve_pattern_array);
5801 info->imm.value = val;
5802 break;
5803
5804 case AARCH64_OPND_SVE_PATTERN_SCALED:
5805 po_enum_or_fail (aarch64_sve_pattern_array);
5806 info->imm.value = val;
5807 if (skip_past_comma (&str)
5808 && !parse_shift (&str, info, SHIFTED_MUL))
5809 goto failure;
5810 if (!info->shifter.operator_present)
5811 {
5812 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5813 info->shifter.kind = AARCH64_MOD_MUL;
5814 info->shifter.amount = 1;
5815 }
5816 break;
5817
5818 case AARCH64_OPND_SVE_PRFOP:
5819 po_enum_or_fail (aarch64_sve_prfop_array);
5820 info->imm.value = val;
5821 break;
5822
5823 case AARCH64_OPND_UIMM7:
5824 po_imm_or_fail (0, 127);
5825 info->imm.value = val;
5826 break;
5827
5828 case AARCH64_OPND_IDX:
5829 case AARCH64_OPND_MASK:
5830 case AARCH64_OPND_BIT_NUM:
5831 case AARCH64_OPND_IMMR:
5832 case AARCH64_OPND_IMMS:
5833 po_imm_or_fail (0, 63);
5834 info->imm.value = val;
5835 break;
5836
5837 case AARCH64_OPND_IMM0:
5838 po_imm_nc_or_fail ();
5839 if (val != 0)
5840 {
5841 set_fatal_syntax_error (_("immediate zero expected"));
5842 goto failure;
5843 }
5844 info->imm.value = 0;
5845 break;
5846
5847 case AARCH64_OPND_FPIMM0:
5848 {
5849 int qfloat;
5850 bfd_boolean res1 = FALSE, res2 = FALSE;
5851 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5852 it is probably not worth the effort to support it. */
5853 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5854 imm_reg_type))
5855 && (error_p ()
5856 || !(res2 = parse_constant_immediate (&str, &val,
5857 imm_reg_type))))
5858 goto failure;
5859 if ((res1 && qfloat == 0) || (res2 && val == 0))
5860 {
5861 info->imm.value = 0;
5862 info->imm.is_fp = 1;
5863 break;
5864 }
5865 set_fatal_syntax_error (_("immediate zero expected"));
5866 goto failure;
5867 }
5868
5869 case AARCH64_OPND_IMM_MOV:
5870 {
5871 char *saved = str;
5872 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5873 reg_name_p (str, REG_TYPE_VN))
5874 goto failure;
5875 str = saved;
5876 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5877 GE_OPT_PREFIX, 1));
5878 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5879 later. fix_mov_imm_insn will try to determine a machine
5880 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5881 message if the immediate cannot be moved by a single
5882 instruction. */
5883 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5884 inst.base.operands[i].skip = 1;
5885 }
5886 break;
5887
5888 case AARCH64_OPND_SIMD_IMM:
5889 case AARCH64_OPND_SIMD_IMM_SFT:
5890 if (! parse_big_immediate (&str, &val, imm_reg_type))
5891 goto failure;
5892 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5893 /* addr_off_p */ 0,
5894 /* need_libopcodes_p */ 1,
5895 /* skip_p */ 1);
5896 /* Parse shift.
5897 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5898 shift, we don't check it here; we leave the checking to
5899 the libopcodes (operand_general_constraint_met_p). By
5900 doing this, we achieve better diagnostics. */
5901 if (skip_past_comma (&str)
5902 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5903 goto failure;
5904 if (!info->shifter.operator_present
5905 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5906 {
5907 /* Default to LSL if not present. Libopcodes prefers shifter
5908 kind to be explicit. */
5909 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5910 info->shifter.kind = AARCH64_MOD_LSL;
5911 }
5912 break;
5913
5914 case AARCH64_OPND_FPIMM:
5915 case AARCH64_OPND_SIMD_FPIMM:
5916 case AARCH64_OPND_SVE_FPIMM8:
5917 {
5918 int qfloat;
5919 bfd_boolean dp_p;
5920
5921 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5922 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5923 || !aarch64_imm_float_p (qfloat))
5924 {
5925 if (!error_p ())
5926 set_fatal_syntax_error (_("invalid floating-point"
5927 " constant"));
5928 goto failure;
5929 }
5930 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5931 inst.base.operands[i].imm.is_fp = 1;
5932 }
5933 break;
5934
5935 case AARCH64_OPND_SVE_I1_HALF_ONE:
5936 case AARCH64_OPND_SVE_I1_HALF_TWO:
5937 case AARCH64_OPND_SVE_I1_ZERO_ONE:
5938 {
5939 int qfloat;
5940 bfd_boolean dp_p;
5941
5942 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5943 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
5944 {
5945 if (!error_p ())
5946 set_fatal_syntax_error (_("invalid floating-point"
5947 " constant"));
5948 goto failure;
5949 }
5950 inst.base.operands[i].imm.value = qfloat;
5951 inst.base.operands[i].imm.is_fp = 1;
5952 }
5953 break;
5954
5955 case AARCH64_OPND_LIMM:
5956 po_misc_or_fail (parse_shifter_operand (&str, info,
5957 SHIFTED_LOGIC_IMM));
5958 if (info->shifter.operator_present)
5959 {
5960 set_fatal_syntax_error
5961 (_("shift not allowed for bitmask immediate"));
5962 goto failure;
5963 }
5964 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5965 /* addr_off_p */ 0,
5966 /* need_libopcodes_p */ 1,
5967 /* skip_p */ 1);
5968 break;
5969
5970 case AARCH64_OPND_AIMM:
5971 if (opcode->op == OP_ADD)
5972 /* ADD may have relocation types. */
5973 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5974 SHIFTED_ARITH_IMM));
5975 else
5976 po_misc_or_fail (parse_shifter_operand (&str, info,
5977 SHIFTED_ARITH_IMM));
5978 switch (inst.reloc.type)
5979 {
5980 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5981 info->shifter.amount = 12;
5982 break;
5983 case BFD_RELOC_UNUSED:
5984 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5985 if (info->shifter.kind != AARCH64_MOD_NONE)
5986 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5987 inst.reloc.pc_rel = 0;
5988 break;
5989 default:
5990 break;
5991 }
5992 info->imm.value = 0;
5993 if (!info->shifter.operator_present)
5994 {
5995 /* Default to LSL if not present. Libopcodes prefers shifter
5996 kind to be explicit. */
5997 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5998 info->shifter.kind = AARCH64_MOD_LSL;
5999 }
6000 break;
6001
6002 case AARCH64_OPND_HALF:
6003 {
6004 /* #<imm16> or relocation. */
6005 int internal_fixup_p;
6006 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6007 if (internal_fixup_p)
6008 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6009 skip_whitespace (str);
6010 if (skip_past_comma (&str))
6011 {
6012 /* {, LSL #<shift>} */
6013 if (! aarch64_gas_internal_fixup_p ())
6014 {
6015 set_fatal_syntax_error (_("can't mix relocation modifier "
6016 "with explicit shift"));
6017 goto failure;
6018 }
6019 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6020 }
6021 else
6022 inst.base.operands[i].shifter.amount = 0;
6023 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6024 inst.base.operands[i].imm.value = 0;
6025 if (! process_movw_reloc_info ())
6026 goto failure;
6027 }
6028 break;
6029
6030 case AARCH64_OPND_EXCEPTION:
6031 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6032 imm_reg_type));
6033 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6034 /* addr_off_p */ 0,
6035 /* need_libopcodes_p */ 0,
6036 /* skip_p */ 1);
6037 break;
6038
6039 case AARCH64_OPND_NZCV:
6040 {
6041 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
6042 if (nzcv != NULL)
6043 {
6044 str += 4;
6045 info->imm.value = nzcv->value;
6046 break;
6047 }
6048 po_imm_or_fail (0, 15);
6049 info->imm.value = val;
6050 }
6051 break;
6052
6053 case AARCH64_OPND_COND:
6054 case AARCH64_OPND_COND1:
6055 {
6056 char *start = str;
6057 do
6058 str++;
6059 while (ISALPHA (*str));
6060 info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
6061 if (info->cond == NULL)
6062 {
6063 set_syntax_error (_("invalid condition"));
6064 goto failure;
6065 }
6066 else if (operands[i] == AARCH64_OPND_COND1
6067 && (info->cond->value & 0xe) == 0xe)
6068 {
6069 /* Do not allow AL or NV. */
6070 set_default_error ();
6071 goto failure;
6072 }
6073 }
6074 break;
6075
6076 case AARCH64_OPND_ADDR_ADRP:
6077 po_misc_or_fail (parse_adrp (&str));
6078 /* Clear the value as operand needs to be relocated. */
6079 info->imm.value = 0;
6080 break;
6081
6082 case AARCH64_OPND_ADDR_PCREL14:
6083 case AARCH64_OPND_ADDR_PCREL19:
6084 case AARCH64_OPND_ADDR_PCREL21:
6085 case AARCH64_OPND_ADDR_PCREL26:
6086 po_misc_or_fail (parse_address (&str, info));
6087 if (!info->addr.pcrel)
6088 {
6089 set_syntax_error (_("invalid pc-relative address"));
6090 goto failure;
6091 }
6092 if (inst.gen_lit_pool
6093 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6094 {
6095 /* Only permit "=value" in the literal load instructions.
6096 The literal will be generated by programmer_friendly_fixup. */
6097 set_syntax_error (_("invalid use of \"=immediate\""));
6098 goto failure;
6099 }
6100 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6101 {
6102 set_syntax_error (_("unrecognized relocation suffix"));
6103 goto failure;
6104 }
6105 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6106 {
6107 info->imm.value = inst.reloc.exp.X_add_number;
6108 inst.reloc.type = BFD_RELOC_UNUSED;
6109 }
6110 else
6111 {
6112 info->imm.value = 0;
6113 if (inst.reloc.type == BFD_RELOC_UNUSED)
6114 switch (opcode->iclass)
6115 {
6116 case compbranch:
6117 case condbranch:
6118 /* e.g. CBZ or B.COND */
6119 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6120 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6121 break;
6122 case testbranch:
6123 /* e.g. TBZ */
6124 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6125 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6126 break;
6127 case branch_imm:
6128 /* e.g. B or BL */
6129 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6130 inst.reloc.type =
6131 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6132 : BFD_RELOC_AARCH64_JUMP26;
6133 break;
6134 case loadlit:
6135 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6136 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6137 break;
6138 case pcreladdr:
6139 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6140 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6141 break;
6142 default:
6143 gas_assert (0);
6144 abort ();
6145 }
6146 inst.reloc.pc_rel = 1;
6147 }
6148 break;
6149
6150 case AARCH64_OPND_ADDR_SIMPLE:
6151 case AARCH64_OPND_ADDR_SIMPLE_2:
6152 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6153 {
6154 /* [<Xn|SP>{, #<simm>}] */
6155 char *start = str;
6156 /* First use the normal address-parsing routines, to get
6157 the usual syntax errors. */
6158 po_misc_or_fail (parse_address (&str, info));
6159 if (info->addr.pcrel || info->addr.offset.is_reg
6160 || !info->addr.preind || info->addr.postind
6161 || (info->addr.writeback
6162 && operands[i] != AARCH64_OPND_ADDR_SIMPLE_2))
6163 {
6164 set_syntax_error (_("invalid addressing mode"));
6165 goto failure;
6166 }
6167
6168 /* Then retry, matching the specific syntax of these addresses. */
6169 str = start;
6170 po_char_or_fail ('[');
6171 po_reg_or_fail (REG_TYPE_R64_SP);
6172 /* Accept optional ", #0". */
6173 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6174 && skip_past_char (&str, ','))
6175 {
6176 skip_past_char (&str, '#');
6177 if (! skip_past_char (&str, '0'))
6178 {
6179 set_fatal_syntax_error
6180 (_("the optional immediate offset can only be 0"));
6181 goto failure;
6182 }
6183 }
6184 po_char_or_fail (']');
6185 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE_2)
6186 po_char_or_fail ('!');
6187 break;
6188 }
6189
6190 case AARCH64_OPND_ADDR_REGOFF:
6191 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6192 po_misc_or_fail (parse_address (&str, info));
6193 regoff_addr:
6194 if (info->addr.pcrel || !info->addr.offset.is_reg
6195 || !info->addr.preind || info->addr.postind
6196 || info->addr.writeback)
6197 {
6198 set_syntax_error (_("invalid addressing mode"));
6199 goto failure;
6200 }
6201 if (!info->shifter.operator_present)
6202 {
6203 /* Default to LSL if not present. Libopcodes prefers shifter
6204 kind to be explicit. */
6205 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6206 info->shifter.kind = AARCH64_MOD_LSL;
6207 }
6208 /* Qualifier to be deduced by libopcodes. */
6209 break;
6210
6211 case AARCH64_OPND_ADDR_SIMM7:
6212 po_misc_or_fail (parse_address (&str, info));
6213 if (info->addr.pcrel || info->addr.offset.is_reg
6214 || (!info->addr.preind && !info->addr.postind))
6215 {
6216 set_syntax_error (_("invalid addressing mode"));
6217 goto failure;
6218 }
6219 if (inst.reloc.type != BFD_RELOC_UNUSED)
6220 {
6221 set_syntax_error (_("relocation not allowed"));
6222 goto failure;
6223 }
6224 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6225 /* addr_off_p */ 1,
6226 /* need_libopcodes_p */ 1,
6227 /* skip_p */ 0);
6228 break;
6229
6230 case AARCH64_OPND_ADDR_SIMM9:
6231 case AARCH64_OPND_ADDR_SIMM9_2:
6232 case AARCH64_OPND_ADDR_SIMM11:
6233 case AARCH64_OPND_ADDR_SIMM13:
6234 po_misc_or_fail (parse_address (&str, info));
6235 if (info->addr.pcrel || info->addr.offset.is_reg
6236 || (!info->addr.preind && !info->addr.postind)
6237 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6238 && info->addr.writeback))
6239 {
6240 set_syntax_error (_("invalid addressing mode"));
6241 goto failure;
6242 }
6243 if (inst.reloc.type != BFD_RELOC_UNUSED)
6244 {
6245 set_syntax_error (_("relocation not allowed"));
6246 goto failure;
6247 }
6248 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6249 /* addr_off_p */ 1,
6250 /* need_libopcodes_p */ 1,
6251 /* skip_p */ 0);
6252 break;
6253
6254 case AARCH64_OPND_ADDR_SIMM10:
6255 case AARCH64_OPND_ADDR_OFFSET:
6256 po_misc_or_fail (parse_address (&str, info));
6257 if (info->addr.pcrel || info->addr.offset.is_reg
6258 || !info->addr.preind || info->addr.postind)
6259 {
6260 set_syntax_error (_("invalid addressing mode"));
6261 goto failure;
6262 }
6263 if (inst.reloc.type != BFD_RELOC_UNUSED)
6264 {
6265 set_syntax_error (_("relocation not allowed"));
6266 goto failure;
6267 }
6268 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6269 /* addr_off_p */ 1,
6270 /* need_libopcodes_p */ 1,
6271 /* skip_p */ 0);
6272 break;
6273
6274 case AARCH64_OPND_ADDR_UIMM12:
6275 po_misc_or_fail (parse_address (&str, info));
6276 if (info->addr.pcrel || info->addr.offset.is_reg
6277 || !info->addr.preind || info->addr.writeback)
6278 {
6279 set_syntax_error (_("invalid addressing mode"));
6280 goto failure;
6281 }
6282 if (inst.reloc.type == BFD_RELOC_UNUSED)
6283 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6284 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6285 || (inst.reloc.type
6286 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6287 || (inst.reloc.type
6288 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6289 || (inst.reloc.type
6290 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6291 || (inst.reloc.type
6292 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6293 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6294 /* Leave qualifier to be determined by libopcodes. */
6295 break;
6296
6297 case AARCH64_OPND_SIMD_ADDR_POST:
6298 /* [<Xn|SP>], <Xm|#<amount>> */
6299 po_misc_or_fail (parse_address (&str, info));
6300 if (!info->addr.postind || !info->addr.writeback)
6301 {
6302 set_syntax_error (_("invalid addressing mode"));
6303 goto failure;
6304 }
6305 if (!info->addr.offset.is_reg)
6306 {
6307 if (inst.reloc.exp.X_op == O_constant)
6308 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6309 else
6310 {
6311 set_fatal_syntax_error
6312 (_("writeback value must be an immediate constant"));
6313 goto failure;
6314 }
6315 }
6316 /* No qualifier. */
6317 break;
6318
6319 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6320 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6321 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6322 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6323 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6324 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6325 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6326 case AARCH64_OPND_SVE_ADDR_RI_U6:
6327 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6328 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6329 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6330 /* [X<n>{, #imm, MUL VL}]
6331 [X<n>{, #imm}]
6332 but recognizing SVE registers. */
6333 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6334 &offset_qualifier));
6335 if (base_qualifier != AARCH64_OPND_QLF_X)
6336 {
6337 set_syntax_error (_("invalid addressing mode"));
6338 goto failure;
6339 }
6340 sve_regimm:
6341 if (info->addr.pcrel || info->addr.offset.is_reg
6342 || !info->addr.preind || info->addr.writeback)
6343 {
6344 set_syntax_error (_("invalid addressing mode"));
6345 goto failure;
6346 }
6347 if (inst.reloc.type != BFD_RELOC_UNUSED
6348 || inst.reloc.exp.X_op != O_constant)
6349 {
6350 /* Make sure this has priority over
6351 "invalid addressing mode". */
6352 set_fatal_syntax_error (_("constant offset required"));
6353 goto failure;
6354 }
6355 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6356 break;
6357
6358 case AARCH64_OPND_SVE_ADDR_R:
6359 /* [<Xn|SP>{, <R><m>}]
6360 but recognizing SVE registers. */
6361 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6362 &offset_qualifier));
6363 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6364 {
6365 offset_qualifier = AARCH64_OPND_QLF_X;
6366 info->addr.offset.is_reg = 1;
6367 info->addr.offset.regno = 31;
6368 }
6369 else if (base_qualifier != AARCH64_OPND_QLF_X
6370 || offset_qualifier != AARCH64_OPND_QLF_X)
6371 {
6372 set_syntax_error (_("invalid addressing mode"));
6373 goto failure;
6374 }
6375 goto regoff_addr;
6376
6377 case AARCH64_OPND_SVE_ADDR_RR:
6378 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6379 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6380 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6381 case AARCH64_OPND_SVE_ADDR_RX:
6382 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6383 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6384 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6385 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6386 but recognizing SVE registers. */
6387 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6388 &offset_qualifier));
6389 if (base_qualifier != AARCH64_OPND_QLF_X
6390 || offset_qualifier != AARCH64_OPND_QLF_X)
6391 {
6392 set_syntax_error (_("invalid addressing mode"));
6393 goto failure;
6394 }
6395 goto regoff_addr;
6396
6397 case AARCH64_OPND_SVE_ADDR_RZ:
6398 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6399 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6400 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6401 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6402 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6403 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6404 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6405 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6406 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6407 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6408 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6409 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6410 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6411 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6412 &offset_qualifier));
6413 if (base_qualifier != AARCH64_OPND_QLF_X
6414 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6415 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6416 {
6417 set_syntax_error (_("invalid addressing mode"));
6418 goto failure;
6419 }
6420 info->qualifier = offset_qualifier;
6421 goto regoff_addr;
6422
6423 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6424 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6425 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6426 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6427 /* [Z<n>.<T>{, #imm}] */
6428 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6429 &offset_qualifier));
6430 if (base_qualifier != AARCH64_OPND_QLF_S_S
6431 && base_qualifier != AARCH64_OPND_QLF_S_D)
6432 {
6433 set_syntax_error (_("invalid addressing mode"));
6434 goto failure;
6435 }
6436 info->qualifier = base_qualifier;
6437 goto sve_regimm;
6438
6439 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6440 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6441 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6442 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6443 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6444
6445 We don't reject:
6446
6447 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6448
6449 here since we get better error messages by leaving it to
6450 the qualifier checking routines. */
6451 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6452 &offset_qualifier));
6453 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6454 && base_qualifier != AARCH64_OPND_QLF_S_D)
6455 || offset_qualifier != base_qualifier)
6456 {
6457 set_syntax_error (_("invalid addressing mode"));
6458 goto failure;
6459 }
6460 info->qualifier = base_qualifier;
6461 goto regoff_addr;
6462
6463 case AARCH64_OPND_SYSREG:
6464 {
6465 uint32_t sysreg_flags;
6466 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6467 &sysreg_flags)) == PARSE_FAIL)
6468 {
6469 set_syntax_error (_("unknown or missing system register name"));
6470 goto failure;
6471 }
6472 inst.base.operands[i].sysreg.value = val;
6473 inst.base.operands[i].sysreg.flags = sysreg_flags;
6474 break;
6475 }
6476
6477 case AARCH64_OPND_PSTATEFIELD:
6478 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6479 == PARSE_FAIL)
6480 {
6481 set_syntax_error (_("unknown or missing PSTATE field name"));
6482 goto failure;
6483 }
6484 inst.base.operands[i].pstatefield = val;
6485 break;
6486
6487 case AARCH64_OPND_SYSREG_IC:
6488 inst.base.operands[i].sysins_op =
6489 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6490 goto sys_reg_ins;
6491
6492 case AARCH64_OPND_SYSREG_DC:
6493 inst.base.operands[i].sysins_op =
6494 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6495 goto sys_reg_ins;
6496
6497 case AARCH64_OPND_SYSREG_AT:
6498 inst.base.operands[i].sysins_op =
6499 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6500 goto sys_reg_ins;
6501
6502 case AARCH64_OPND_SYSREG_SR:
6503 inst.base.operands[i].sysins_op =
6504 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6505 goto sys_reg_ins;
6506
6507 case AARCH64_OPND_SYSREG_TLBI:
6508 inst.base.operands[i].sysins_op =
6509 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6510 sys_reg_ins:
6511 if (inst.base.operands[i].sysins_op == NULL)
6512 {
6513 set_fatal_syntax_error ( _("unknown or missing operation name"));
6514 goto failure;
6515 }
6516 break;
6517
6518 case AARCH64_OPND_BARRIER:
6519 case AARCH64_OPND_BARRIER_ISB:
6520 val = parse_barrier (&str);
6521 if (val != PARSE_FAIL
6522 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6523 {
6524 /* ISB only accepts options name 'sy'. */
6525 set_syntax_error
6526 (_("the specified option is not accepted in ISB"));
6527 /* Turn off backtrack as this optional operand is present. */
6528 backtrack_pos = 0;
6529 goto failure;
6530 }
6531 /* This is an extension to accept a 0..15 immediate. */
6532 if (val == PARSE_FAIL)
6533 po_imm_or_fail (0, 15);
6534 info->barrier = aarch64_barrier_options + val;
6535 break;
6536
6537 case AARCH64_OPND_PRFOP:
6538 val = parse_pldop (&str);
6539 /* This is an extension to accept a 0..31 immediate. */
6540 if (val == PARSE_FAIL)
6541 po_imm_or_fail (0, 31);
6542 inst.base.operands[i].prfop = aarch64_prfops + val;
6543 break;
6544
6545 case AARCH64_OPND_BARRIER_PSB:
6546 val = parse_barrier_psb (&str, &(info->hint_option));
6547 if (val == PARSE_FAIL)
6548 goto failure;
6549 break;
6550
6551 case AARCH64_OPND_BTI_TARGET:
6552 val = parse_bti_operand (&str, &(info->hint_option));
6553 if (val == PARSE_FAIL)
6554 goto failure;
6555 break;
6556
6557 default:
6558 as_fatal (_("unhandled operand code %d"), operands[i]);
6559 }
6560
6561 /* If we get here, this operand was successfully parsed. */
6562 inst.base.operands[i].present = 1;
6563 continue;
6564
6565 failure:
6566 /* The parse routine should already have set the error, but in case
6567 not, set a default one here. */
6568 if (! error_p ())
6569 set_default_error ();
6570
6571 if (! backtrack_pos)
6572 goto parse_operands_return;
6573
6574 {
6575 /* We reach here because this operand is marked as optional, and
6576 either no operand was supplied or the operand was supplied but it
6577 was syntactically incorrect. In the latter case we report an
6578 error. In the former case we perform a few more checks before
6579 dropping through to the code to insert the default operand. */
6580
6581 char *tmp = backtrack_pos;
6582 char endchar = END_OF_INSN;
6583
6584 if (i != (aarch64_num_of_operands (opcode) - 1))
6585 endchar = ',';
6586 skip_past_char (&tmp, ',');
6587
6588 if (*tmp != endchar)
6589 /* The user has supplied an operand in the wrong format. */
6590 goto parse_operands_return;
6591
6592 /* Make sure there is not a comma before the optional operand.
6593 For example the fifth operand of 'sys' is optional:
6594
6595 sys #0,c0,c0,#0, <--- wrong
6596 sys #0,c0,c0,#0 <--- correct. */
6597 if (comma_skipped_p && i && endchar == END_OF_INSN)
6598 {
6599 set_fatal_syntax_error
6600 (_("unexpected comma before the omitted optional operand"));
6601 goto parse_operands_return;
6602 }
6603 }
6604
6605 /* Reaching here means we are dealing with an optional operand that is
6606 omitted from the assembly line. */
6607 gas_assert (optional_operand_p (opcode, i));
6608 info->present = 0;
6609 process_omitted_operand (operands[i], opcode, i, info);
6610
6611 /* Try again, skipping the optional operand at backtrack_pos. */
6612 str = backtrack_pos;
6613 backtrack_pos = 0;
6614
6615 /* Clear any error record after the omitted optional operand has been
6616 successfully handled. */
6617 clear_error ();
6618 }
6619
6620 /* Check if we have parsed all the operands. */
6621 if (*str != '\0' && ! error_p ())
6622 {
6623 /* Set I to the index of the last present operand; this is
6624 for the purpose of diagnostics. */
6625 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6626 ;
6627 set_fatal_syntax_error
6628 (_("unexpected characters following instruction"));
6629 }
6630
6631 parse_operands_return:
6632
6633 if (error_p ())
6634 {
6635 DEBUG_TRACE ("parsing FAIL: %s - %s",
6636 operand_mismatch_kind_names[get_error_kind ()],
6637 get_error_message ());
6638 /* Record the operand error properly; this is useful when there
6639 are multiple instruction templates for a mnemonic name, so that
6640 later on, we can select the error that most closely describes
6641 the problem. */
6642 record_operand_error (opcode, i, get_error_kind (),
6643 get_error_message ());
6644 return FALSE;
6645 }
6646 else
6647 {
6648 DEBUG_TRACE ("parsing SUCCESS");
6649 return TRUE;
6650 }
6651 }
6652
6653 /* It does some fix-up to provide some programmer friendly feature while
6654 keeping the libopcodes happy, i.e. libopcodes only accepts
6655 the preferred architectural syntax.
6656 Return FALSE if there is any failure; otherwise return TRUE. */
6657
6658 static bfd_boolean
6659 programmer_friendly_fixup (aarch64_instruction *instr)
6660 {
6661 aarch64_inst *base = &instr->base;
6662 const aarch64_opcode *opcode = base->opcode;
6663 enum aarch64_op op = opcode->op;
6664 aarch64_opnd_info *operands = base->operands;
6665
6666 DEBUG_TRACE ("enter");
6667
6668 switch (opcode->iclass)
6669 {
6670 case testbranch:
6671 /* TBNZ Xn|Wn, #uimm6, label
6672 Test and Branch Not Zero: conditionally jumps to label if bit number
6673 uimm6 in register Xn is not zero. The bit number implies the width of
6674 the register, which may be written and should be disassembled as Wn if
6675 uimm is less than 32. */
6676 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6677 {
6678 if (operands[1].imm.value >= 32)
6679 {
6680 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6681 0, 31);
6682 return FALSE;
6683 }
6684 operands[0].qualifier = AARCH64_OPND_QLF_X;
6685 }
6686 break;
6687 case loadlit:
6688 /* LDR Wt, label | =value
6689 As a convenience assemblers will typically permit the notation
6690 "=value" in conjunction with the pc-relative literal load instructions
6691 to automatically place an immediate value or symbolic address in a
6692 nearby literal pool and generate a hidden label which references it.
6693 ISREG has been set to 0 in the case of =value. */
6694 if (instr->gen_lit_pool
6695 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6696 {
6697 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6698 if (op == OP_LDRSW_LIT)
6699 size = 4;
6700 if (instr->reloc.exp.X_op != O_constant
6701 && instr->reloc.exp.X_op != O_big
6702 && instr->reloc.exp.X_op != O_symbol)
6703 {
6704 record_operand_error (opcode, 1,
6705 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6706 _("constant expression expected"));
6707 return FALSE;
6708 }
6709 if (! add_to_lit_pool (&instr->reloc.exp, size))
6710 {
6711 record_operand_error (opcode, 1,
6712 AARCH64_OPDE_OTHER_ERROR,
6713 _("literal pool insertion failed"));
6714 return FALSE;
6715 }
6716 }
6717 break;
6718 case log_shift:
6719 case bitfield:
6720 /* UXT[BHW] Wd, Wn
6721 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6722 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6723 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6724 A programmer-friendly assembler should accept a destination Xd in
6725 place of Wd, however that is not the preferred form for disassembly.
6726 */
6727 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6728 && operands[1].qualifier == AARCH64_OPND_QLF_W
6729 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6730 operands[0].qualifier = AARCH64_OPND_QLF_W;
6731 break;
6732
6733 case addsub_ext:
6734 {
6735 /* In the 64-bit form, the final register operand is written as Wm
6736 for all but the (possibly omitted) UXTX/LSL and SXTX
6737 operators.
6738 As a programmer-friendly assembler, we accept e.g.
6739 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6740 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6741 int idx = aarch64_operand_index (opcode->operands,
6742 AARCH64_OPND_Rm_EXT);
6743 gas_assert (idx == 1 || idx == 2);
6744 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6745 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6746 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6747 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6748 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6749 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6750 }
6751 break;
6752
6753 default:
6754 break;
6755 }
6756
6757 DEBUG_TRACE ("exit with SUCCESS");
6758 return TRUE;
6759 }
6760
6761 /* Check for loads and stores that will cause unpredictable behavior. */
6762
6763 static void
6764 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6765 {
6766 aarch64_inst *base = &instr->base;
6767 const aarch64_opcode *opcode = base->opcode;
6768 const aarch64_opnd_info *opnds = base->operands;
6769 switch (opcode->iclass)
6770 {
6771 case ldst_pos:
6772 case ldst_imm9:
6773 case ldst_imm10:
6774 case ldst_unscaled:
6775 case ldst_unpriv:
6776 /* Loading/storing the base register is unpredictable if writeback. */
6777 if ((aarch64_get_operand_class (opnds[0].type)
6778 == AARCH64_OPND_CLASS_INT_REG)
6779 && opnds[0].reg.regno == opnds[1].addr.base_regno
6780 && opnds[1].addr.base_regno != REG_SP
6781 && opnds[1].addr.writeback)
6782 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6783 break;
6784
6785 case ldstgv_indexed:
6786 /* Load operations must load different registers. */
6787 if ((opcode->opcode & (1 << 22))
6788 && opnds[0].reg.regno == opnds[1].addr.base_regno)
6789 as_warn (_("unpredictable load of register -- `%s'"), str);
6790 break;
6791
6792 case ldstpair_off:
6793 case ldstnapair_offs:
6794 case ldstpair_indexed:
6795 /* Loading/storing the base register is unpredictable if writeback. */
6796 if ((aarch64_get_operand_class (opnds[0].type)
6797 == AARCH64_OPND_CLASS_INT_REG)
6798 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6799 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6800 && opnds[2].addr.base_regno != REG_SP
6801 /* Exempt STGP. */
6802 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
6803 && opnds[2].addr.writeback)
6804 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6805 /* Load operations must load different registers. */
6806 if ((opcode->opcode & (1 << 22))
6807 && opnds[0].reg.regno == opnds[1].reg.regno)
6808 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6809 break;
6810
6811 case ldstexcl:
6812 /* It is unpredictable if the destination and status registers are the
6813 same. */
6814 if ((aarch64_get_operand_class (opnds[0].type)
6815 == AARCH64_OPND_CLASS_INT_REG)
6816 && (aarch64_get_operand_class (opnds[1].type)
6817 == AARCH64_OPND_CLASS_INT_REG)
6818 && (opnds[0].reg.regno == opnds[1].reg.regno
6819 || opnds[0].reg.regno == opnds[2].reg.regno))
6820 as_warn (_("unpredictable: identical transfer and status registers"
6821 " --`%s'"),
6822 str);
6823
6824 break;
6825
6826 default:
6827 break;
6828 }
6829 }
6830
6831 static void
6832 force_automatic_sequence_close (void)
6833 {
6834 if (now_instr_sequence.instr)
6835 {
6836 as_warn (_("previous `%s' sequence has not been closed"),
6837 now_instr_sequence.instr->opcode->name);
6838 init_insn_sequence (NULL, &now_instr_sequence);
6839 }
6840 }
6841
6842 /* A wrapper function to interface with libopcodes on encoding and
6843 record the error message if there is any.
6844
6845 Return TRUE on success; otherwise return FALSE. */
6846
6847 static bfd_boolean
6848 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6849 aarch64_insn *code)
6850 {
6851 aarch64_operand_error error_info;
6852 memset (&error_info, '\0', sizeof (error_info));
6853 error_info.kind = AARCH64_OPDE_NIL;
6854 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
6855 && !error_info.non_fatal)
6856 return TRUE;
6857
6858 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6859 record_operand_error_info (opcode, &error_info);
6860 return error_info.non_fatal;
6861 }
6862
6863 #ifdef DEBUG_AARCH64
6864 static inline void
6865 dump_opcode_operands (const aarch64_opcode *opcode)
6866 {
6867 int i = 0;
6868 while (opcode->operands[i] != AARCH64_OPND_NIL)
6869 {
6870 aarch64_verbose ("\t\t opnd%d: %s", i,
6871 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6872 ? aarch64_get_operand_name (opcode->operands[i])
6873 : aarch64_get_operand_desc (opcode->operands[i]));
6874 ++i;
6875 }
6876 }
6877 #endif /* DEBUG_AARCH64 */
6878
6879 /* This is the guts of the machine-dependent assembler. STR points to a
6880 machine dependent instruction. This function is supposed to emit
6881 the frags/bytes it assembles to. */
6882
6883 void
6884 md_assemble (char *str)
6885 {
6886 char *p = str;
6887 templates *template;
6888 aarch64_opcode *opcode;
6889 aarch64_inst *inst_base;
6890 unsigned saved_cond;
6891
6892 /* Align the previous label if needed. */
6893 if (last_label_seen != NULL)
6894 {
6895 symbol_set_frag (last_label_seen, frag_now);
6896 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6897 S_SET_SEGMENT (last_label_seen, now_seg);
6898 }
6899
6900 /* Update the current insn_sequence from the segment. */
6901 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
6902
6903 inst.reloc.type = BFD_RELOC_UNUSED;
6904
6905 DEBUG_TRACE ("\n\n");
6906 DEBUG_TRACE ("==============================");
6907 DEBUG_TRACE ("Enter md_assemble with %s", str);
6908
6909 template = opcode_lookup (&p);
6910 if (!template)
6911 {
6912 /* It wasn't an instruction, but it might be a register alias of
6913 the form alias .req reg directive. */
6914 if (!create_register_alias (str, p))
6915 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6916 str);
6917 return;
6918 }
6919
6920 skip_whitespace (p);
6921 if (*p == ',')
6922 {
6923 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6924 get_mnemonic_name (str), str);
6925 return;
6926 }
6927
6928 init_operand_error_report ();
6929
6930 /* Sections are assumed to start aligned. In executable section, there is no
6931 MAP_DATA symbol pending. So we only align the address during
6932 MAP_DATA --> MAP_INSN transition.
6933 For other sections, this is not guaranteed. */
6934 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6935 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6936 frag_align_code (2, 0);
6937
6938 saved_cond = inst.cond;
6939 reset_aarch64_instruction (&inst);
6940 inst.cond = saved_cond;
6941
6942 /* Iterate through all opcode entries with the same mnemonic name. */
6943 do
6944 {
6945 opcode = template->opcode;
6946
6947 DEBUG_TRACE ("opcode %s found", opcode->name);
6948 #ifdef DEBUG_AARCH64
6949 if (debug_dump)
6950 dump_opcode_operands (opcode);
6951 #endif /* DEBUG_AARCH64 */
6952
6953 mapping_state (MAP_INSN);
6954
6955 inst_base = &inst.base;
6956 inst_base->opcode = opcode;
6957
6958 /* Truly conditionally executed instructions, e.g. b.cond. */
6959 if (opcode->flags & F_COND)
6960 {
6961 gas_assert (inst.cond != COND_ALWAYS);
6962 inst_base->cond = get_cond_from_value (inst.cond);
6963 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6964 }
6965 else if (inst.cond != COND_ALWAYS)
6966 {
6967 /* It shouldn't arrive here, where the assembly looks like a
6968 conditional instruction but the found opcode is unconditional. */
6969 gas_assert (0);
6970 continue;
6971 }
6972
6973 if (parse_operands (p, opcode)
6974 && programmer_friendly_fixup (&inst)
6975 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6976 {
6977 /* Check that this instruction is supported for this CPU. */
6978 if (!opcode->avariant
6979 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6980 {
6981 as_bad (_("selected processor does not support `%s'"), str);
6982 return;
6983 }
6984
6985 warn_unpredictable_ldst (&inst, str);
6986
6987 if (inst.reloc.type == BFD_RELOC_UNUSED
6988 || !inst.reloc.need_libopcodes_p)
6989 output_inst (NULL);
6990 else
6991 {
6992 /* If there is relocation generated for the instruction,
6993 store the instruction information for the future fix-up. */
6994 struct aarch64_inst *copy;
6995 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6996 copy = XNEW (struct aarch64_inst);
6997 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6998 output_inst (copy);
6999 }
7000
7001 /* Issue non-fatal messages if any. */
7002 output_operand_error_report (str, TRUE);
7003 return;
7004 }
7005
7006 template = template->next;
7007 if (template != NULL)
7008 {
7009 reset_aarch64_instruction (&inst);
7010 inst.cond = saved_cond;
7011 }
7012 }
7013 while (template != NULL);
7014
7015 /* Issue the error messages if any. */
7016 output_operand_error_report (str, FALSE);
7017 }
7018
7019 /* Various frobbings of labels and their addresses. */
7020
7021 void
7022 aarch64_start_line_hook (void)
7023 {
7024 last_label_seen = NULL;
7025 }
7026
7027 void
7028 aarch64_frob_label (symbolS * sym)
7029 {
7030 last_label_seen = sym;
7031
7032 dwarf2_emit_label (sym);
7033 }
7034
7035 void
7036 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7037 {
7038 /* Check to see if we have a block to close. */
7039 force_automatic_sequence_close ();
7040 }
7041
7042 int
7043 aarch64_data_in_code (void)
7044 {
7045 if (!strncmp (input_line_pointer + 1, "data:", 5))
7046 {
7047 *input_line_pointer = '/';
7048 input_line_pointer += 5;
7049 *input_line_pointer = 0;
7050 return 1;
7051 }
7052
7053 return 0;
7054 }
7055
7056 char *
7057 aarch64_canonicalize_symbol_name (char *name)
7058 {
7059 int len;
7060
7061 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7062 *(name + len - 5) = 0;
7063
7064 return name;
7065 }
7066 \f
7067 /* Table of all register names defined by default. The user can
7068 define additional names with .req. Note that all register names
7069 should appear in both upper and lowercase variants. Some registers
7070 also have mixed-case names. */
7071
7072 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
7073 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
7074 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7075 #define REGSET16(p,t) \
7076 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7077 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7078 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7079 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7080 #define REGSET31(p,t) \
7081 REGSET16(p, t), \
7082 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7083 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7084 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7085 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7086 #define REGSET(p,t) \
7087 REGSET31(p,t), REGNUM(p,31,t)
7088
7089 /* These go into aarch64_reg_hsh hash-table. */
7090 static const reg_entry reg_names[] = {
7091 /* Integer registers. */
7092 REGSET31 (x, R_64), REGSET31 (X, R_64),
7093 REGSET31 (w, R_32), REGSET31 (W, R_32),
7094
7095 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7096 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7097 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7098 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7099 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7100 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7101
7102 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7103 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7104
7105 /* Floating-point single precision registers. */
7106 REGSET (s, FP_S), REGSET (S, FP_S),
7107
7108 /* Floating-point double precision registers. */
7109 REGSET (d, FP_D), REGSET (D, FP_D),
7110
7111 /* Floating-point half precision registers. */
7112 REGSET (h, FP_H), REGSET (H, FP_H),
7113
7114 /* Floating-point byte precision registers. */
7115 REGSET (b, FP_B), REGSET (B, FP_B),
7116
7117 /* Floating-point quad precision registers. */
7118 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7119
7120 /* FP/SIMD registers. */
7121 REGSET (v, VN), REGSET (V, VN),
7122
7123 /* SVE vector registers. */
7124 REGSET (z, ZN), REGSET (Z, ZN),
7125
7126 /* SVE predicate registers. */
7127 REGSET16 (p, PN), REGSET16 (P, PN)
7128 };
7129
7130 #undef REGDEF
7131 #undef REGDEF_ALIAS
7132 #undef REGNUM
7133 #undef REGSET16
7134 #undef REGSET31
7135 #undef REGSET
7136
7137 #define N 1
7138 #define n 0
7139 #define Z 1
7140 #define z 0
7141 #define C 1
7142 #define c 0
7143 #define V 1
7144 #define v 0
7145 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7146 static const asm_nzcv nzcv_names[] = {
7147 {"nzcv", B (n, z, c, v)},
7148 {"nzcV", B (n, z, c, V)},
7149 {"nzCv", B (n, z, C, v)},
7150 {"nzCV", B (n, z, C, V)},
7151 {"nZcv", B (n, Z, c, v)},
7152 {"nZcV", B (n, Z, c, V)},
7153 {"nZCv", B (n, Z, C, v)},
7154 {"nZCV", B (n, Z, C, V)},
7155 {"Nzcv", B (N, z, c, v)},
7156 {"NzcV", B (N, z, c, V)},
7157 {"NzCv", B (N, z, C, v)},
7158 {"NzCV", B (N, z, C, V)},
7159 {"NZcv", B (N, Z, c, v)},
7160 {"NZcV", B (N, Z, c, V)},
7161 {"NZCv", B (N, Z, C, v)},
7162 {"NZCV", B (N, Z, C, V)}
7163 };
7164
7165 #undef N
7166 #undef n
7167 #undef Z
7168 #undef z
7169 #undef C
7170 #undef c
7171 #undef V
7172 #undef v
7173 #undef B
7174 \f
7175 /* MD interface: bits in the object file. */
7176
7177 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7178 for use in the a.out file, and stores them in the array pointed to by buf.
7179 This knows about the endian-ness of the target machine and does
7180 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7181 2 (short) and 4 (long) Floating numbers are put out as a series of
7182 LITTLENUMS (shorts, here at least). */
7183
7184 void
7185 md_number_to_chars (char *buf, valueT val, int n)
7186 {
7187 if (target_big_endian)
7188 number_to_chars_bigendian (buf, val, n);
7189 else
7190 number_to_chars_littleendian (buf, val, n);
7191 }
7192
7193 /* MD interface: Sections. */
7194
7195 /* Estimate the size of a frag before relaxing. Assume everything fits in
7196 4 bytes. */
7197
7198 int
7199 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7200 {
7201 fragp->fr_var = 4;
7202 return 4;
7203 }
7204
7205 /* Round up a section size to the appropriate boundary. */
7206
7207 valueT
7208 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7209 {
7210 return size;
7211 }
7212
7213 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7214 of an rs_align_code fragment.
7215
7216 Here we fill the frag with the appropriate info for padding the
7217 output stream. The resulting frag will consist of a fixed (fr_fix)
7218 and of a repeating (fr_var) part.
7219
7220 The fixed content is always emitted before the repeating content and
7221 these two parts are used as follows in constructing the output:
7222 - the fixed part will be used to align to a valid instruction word
7223 boundary, in case that we start at a misaligned address; as no
7224 executable instruction can live at the misaligned location, we
7225 simply fill with zeros;
7226 - the variable part will be used to cover the remaining padding and
7227 we fill using the AArch64 NOP instruction.
7228
7229 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7230 enough storage space for up to 3 bytes for padding the back to a valid
7231 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7232
7233 void
7234 aarch64_handle_align (fragS * fragP)
7235 {
7236 /* NOP = d503201f */
7237 /* AArch64 instructions are always little-endian. */
7238 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7239
7240 int bytes, fix, noop_size;
7241 char *p;
7242
7243 if (fragP->fr_type != rs_align_code)
7244 return;
7245
7246 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7247 p = fragP->fr_literal + fragP->fr_fix;
7248
7249 #ifdef OBJ_ELF
7250 gas_assert (fragP->tc_frag_data.recorded);
7251 #endif
7252
7253 noop_size = sizeof (aarch64_noop);
7254
7255 fix = bytes & (noop_size - 1);
7256 if (fix)
7257 {
7258 #ifdef OBJ_ELF
7259 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7260 #endif
7261 memset (p, 0, fix);
7262 p += fix;
7263 fragP->fr_fix += fix;
7264 }
7265
7266 if (noop_size)
7267 memcpy (p, aarch64_noop, noop_size);
7268 fragP->fr_var = noop_size;
7269 }
7270
7271 /* Perform target specific initialisation of a frag.
7272 Note - despite the name this initialisation is not done when the frag
7273 is created, but only when its type is assigned. A frag can be created
7274 and used a long time before its type is set, so beware of assuming that
7275 this initialisation is performed first. */
7276
7277 #ifndef OBJ_ELF
7278 void
7279 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7280 int max_chars ATTRIBUTE_UNUSED)
7281 {
7282 }
7283
7284 #else /* OBJ_ELF is defined. */
7285 void
7286 aarch64_init_frag (fragS * fragP, int max_chars)
7287 {
7288 /* Record a mapping symbol for alignment frags. We will delete this
7289 later if the alignment ends up empty. */
7290 if (!fragP->tc_frag_data.recorded)
7291 fragP->tc_frag_data.recorded = 1;
7292
7293 /* PR 21809: Do not set a mapping state for debug sections
7294 - it just confuses other tools. */
7295 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
7296 return;
7297
7298 switch (fragP->fr_type)
7299 {
7300 case rs_align_test:
7301 case rs_fill:
7302 mapping_state_2 (MAP_DATA, max_chars);
7303 break;
7304 case rs_align:
7305 /* PR 20364: We can get alignment frags in code sections,
7306 so do not just assume that we should use the MAP_DATA state. */
7307 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7308 break;
7309 case rs_align_code:
7310 mapping_state_2 (MAP_INSN, max_chars);
7311 break;
7312 default:
7313 break;
7314 }
7315 }
7316 \f
7317 /* Initialize the DWARF-2 unwind information for this procedure. */
7318
7319 void
7320 tc_aarch64_frame_initial_instructions (void)
7321 {
7322 cfi_add_CFA_def_cfa (REG_SP, 0);
7323 }
7324 #endif /* OBJ_ELF */
7325
7326 /* Convert REGNAME to a DWARF-2 register number. */
7327
7328 int
7329 tc_aarch64_regname_to_dw2regnum (char *regname)
7330 {
7331 const reg_entry *reg = parse_reg (&regname);
7332 if (reg == NULL)
7333 return -1;
7334
7335 switch (reg->type)
7336 {
7337 case REG_TYPE_SP_32:
7338 case REG_TYPE_SP_64:
7339 case REG_TYPE_R_32:
7340 case REG_TYPE_R_64:
7341 return reg->number;
7342
7343 case REG_TYPE_FP_B:
7344 case REG_TYPE_FP_H:
7345 case REG_TYPE_FP_S:
7346 case REG_TYPE_FP_D:
7347 case REG_TYPE_FP_Q:
7348 return reg->number + 64;
7349
7350 default:
7351 break;
7352 }
7353 return -1;
7354 }
7355
7356 /* Implement DWARF2_ADDR_SIZE. */
7357
7358 int
7359 aarch64_dwarf2_addr_size (void)
7360 {
7361 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7362 if (ilp32_p)
7363 return 4;
7364 #endif
7365 return bfd_arch_bits_per_address (stdoutput) / 8;
7366 }
7367
7368 /* MD interface: Symbol and relocation handling. */
7369
7370 /* Return the address within the segment that a PC-relative fixup is
7371 relative to. For AArch64 PC-relative fixups applied to instructions
7372 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7373
7374 long
7375 md_pcrel_from_section (fixS * fixP, segT seg)
7376 {
7377 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7378
7379 /* If this is pc-relative and we are going to emit a relocation
7380 then we just want to put out any pipeline compensation that the linker
7381 will need. Otherwise we want to use the calculated base. */
7382 if (fixP->fx_pcrel
7383 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7384 || aarch64_force_relocation (fixP)))
7385 base = 0;
7386
7387 /* AArch64 should be consistent for all pc-relative relocations. */
7388 return base + AARCH64_PCREL_OFFSET;
7389 }
7390
7391 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7392 Otherwise we have no need to default values of symbols. */
7393
7394 symbolS *
7395 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7396 {
7397 #ifdef OBJ_ELF
7398 if (name[0] == '_' && name[1] == 'G'
7399 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7400 {
7401 if (!GOT_symbol)
7402 {
7403 if (symbol_find (name))
7404 as_bad (_("GOT already in the symbol table"));
7405
7406 GOT_symbol = symbol_new (name, undefined_section,
7407 (valueT) 0, &zero_address_frag);
7408 }
7409
7410 return GOT_symbol;
7411 }
7412 #endif
7413
7414 return 0;
7415 }
7416
7417 /* Return non-zero if the indicated VALUE has overflowed the maximum
7418 range expressible by a unsigned number with the indicated number of
7419 BITS. */
7420
7421 static bfd_boolean
7422 unsigned_overflow (valueT value, unsigned bits)
7423 {
7424 valueT lim;
7425 if (bits >= sizeof (valueT) * 8)
7426 return FALSE;
7427 lim = (valueT) 1 << bits;
7428 return (value >= lim);
7429 }
7430
7431
7432 /* Return non-zero if the indicated VALUE has overflowed the maximum
7433 range expressible by an signed number with the indicated number of
7434 BITS. */
7435
7436 static bfd_boolean
7437 signed_overflow (offsetT value, unsigned bits)
7438 {
7439 offsetT lim;
7440 if (bits >= sizeof (offsetT) * 8)
7441 return FALSE;
7442 lim = (offsetT) 1 << (bits - 1);
7443 return (value < -lim || value >= lim);
7444 }
7445
7446 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7447 unsigned immediate offset load/store instruction, try to encode it as
7448 an unscaled, 9-bit, signed immediate offset load/store instruction.
7449 Return TRUE if it is successful; otherwise return FALSE.
7450
7451 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7452 in response to the standard LDR/STR mnemonics when the immediate offset is
7453 unambiguous, i.e. when it is negative or unaligned. */
7454
7455 static bfd_boolean
7456 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7457 {
7458 int idx;
7459 enum aarch64_op new_op;
7460 const aarch64_opcode *new_opcode;
7461
7462 gas_assert (instr->opcode->iclass == ldst_pos);
7463
7464 switch (instr->opcode->op)
7465 {
7466 case OP_LDRB_POS:new_op = OP_LDURB; break;
7467 case OP_STRB_POS: new_op = OP_STURB; break;
7468 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7469 case OP_LDRH_POS: new_op = OP_LDURH; break;
7470 case OP_STRH_POS: new_op = OP_STURH; break;
7471 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7472 case OP_LDR_POS: new_op = OP_LDUR; break;
7473 case OP_STR_POS: new_op = OP_STUR; break;
7474 case OP_LDRF_POS: new_op = OP_LDURV; break;
7475 case OP_STRF_POS: new_op = OP_STURV; break;
7476 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7477 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7478 default: new_op = OP_NIL; break;
7479 }
7480
7481 if (new_op == OP_NIL)
7482 return FALSE;
7483
7484 new_opcode = aarch64_get_opcode (new_op);
7485 gas_assert (new_opcode != NULL);
7486
7487 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7488 instr->opcode->op, new_opcode->op);
7489
7490 aarch64_replace_opcode (instr, new_opcode);
7491
7492 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7493 qualifier matching may fail because the out-of-date qualifier will
7494 prevent the operand being updated with a new and correct qualifier. */
7495 idx = aarch64_operand_index (instr->opcode->operands,
7496 AARCH64_OPND_ADDR_SIMM9);
7497 gas_assert (idx == 1);
7498 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7499
7500 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7501
7502 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7503 insn_sequence))
7504 return FALSE;
7505
7506 return TRUE;
7507 }
7508
7509 /* Called by fix_insn to fix a MOV immediate alias instruction.
7510
7511 Operand for a generic move immediate instruction, which is an alias
7512 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7513 a 32-bit/64-bit immediate value into general register. An assembler error
7514 shall result if the immediate cannot be created by a single one of these
7515 instructions. If there is a choice, then to ensure reversability an
7516 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7517
7518 static void
7519 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7520 {
7521 const aarch64_opcode *opcode;
7522
7523 /* Need to check if the destination is SP/ZR. The check has to be done
7524 before any aarch64_replace_opcode. */
7525 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7526 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7527
7528 instr->operands[1].imm.value = value;
7529 instr->operands[1].skip = 0;
7530
7531 if (try_mov_wide_p)
7532 {
7533 /* Try the MOVZ alias. */
7534 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7535 aarch64_replace_opcode (instr, opcode);
7536 if (aarch64_opcode_encode (instr->opcode, instr,
7537 &instr->value, NULL, NULL, insn_sequence))
7538 {
7539 put_aarch64_insn (buf, instr->value);
7540 return;
7541 }
7542 /* Try the MOVK alias. */
7543 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7544 aarch64_replace_opcode (instr, opcode);
7545 if (aarch64_opcode_encode (instr->opcode, instr,
7546 &instr->value, NULL, NULL, insn_sequence))
7547 {
7548 put_aarch64_insn (buf, instr->value);
7549 return;
7550 }
7551 }
7552
7553 if (try_mov_bitmask_p)
7554 {
7555 /* Try the ORR alias. */
7556 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7557 aarch64_replace_opcode (instr, opcode);
7558 if (aarch64_opcode_encode (instr->opcode, instr,
7559 &instr->value, NULL, NULL, insn_sequence))
7560 {
7561 put_aarch64_insn (buf, instr->value);
7562 return;
7563 }
7564 }
7565
7566 as_bad_where (fixP->fx_file, fixP->fx_line,
7567 _("immediate cannot be moved by a single instruction"));
7568 }
7569
7570 /* An instruction operand which is immediate related may have symbol used
7571 in the assembly, e.g.
7572
7573 mov w0, u32
7574 .set u32, 0x00ffff00
7575
7576 At the time when the assembly instruction is parsed, a referenced symbol,
7577 like 'u32' in the above example may not have been seen; a fixS is created
7578 in such a case and is handled here after symbols have been resolved.
7579 Instruction is fixed up with VALUE using the information in *FIXP plus
7580 extra information in FLAGS.
7581
7582 This function is called by md_apply_fix to fix up instructions that need
7583 a fix-up described above but does not involve any linker-time relocation. */
7584
7585 static void
7586 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7587 {
7588 int idx;
7589 uint32_t insn;
7590 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7591 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7592 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7593
7594 if (new_inst)
7595 {
7596 /* Now the instruction is about to be fixed-up, so the operand that
7597 was previously marked as 'ignored' needs to be unmarked in order
7598 to get the encoding done properly. */
7599 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7600 new_inst->operands[idx].skip = 0;
7601 }
7602
7603 gas_assert (opnd != AARCH64_OPND_NIL);
7604
7605 switch (opnd)
7606 {
7607 case AARCH64_OPND_EXCEPTION:
7608 if (unsigned_overflow (value, 16))
7609 as_bad_where (fixP->fx_file, fixP->fx_line,
7610 _("immediate out of range"));
7611 insn = get_aarch64_insn (buf);
7612 insn |= encode_svc_imm (value);
7613 put_aarch64_insn (buf, insn);
7614 break;
7615
7616 case AARCH64_OPND_AIMM:
7617 /* ADD or SUB with immediate.
7618 NOTE this assumes we come here with a add/sub shifted reg encoding
7619 3 322|2222|2 2 2 21111 111111
7620 1 098|7654|3 2 1 09876 543210 98765 43210
7621 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7622 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7623 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7624 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7625 ->
7626 3 322|2222|2 2 221111111111
7627 1 098|7654|3 2 109876543210 98765 43210
7628 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7629 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7630 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7631 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7632 Fields sf Rn Rd are already set. */
7633 insn = get_aarch64_insn (buf);
7634 if (value < 0)
7635 {
7636 /* Add <-> sub. */
7637 insn = reencode_addsub_switch_add_sub (insn);
7638 value = -value;
7639 }
7640
7641 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7642 && unsigned_overflow (value, 12))
7643 {
7644 /* Try to shift the value by 12 to make it fit. */
7645 if (((value >> 12) << 12) == value
7646 && ! unsigned_overflow (value, 12 + 12))
7647 {
7648 value >>= 12;
7649 insn |= encode_addsub_imm_shift_amount (1);
7650 }
7651 }
7652
7653 if (unsigned_overflow (value, 12))
7654 as_bad_where (fixP->fx_file, fixP->fx_line,
7655 _("immediate out of range"));
7656
7657 insn |= encode_addsub_imm (value);
7658
7659 put_aarch64_insn (buf, insn);
7660 break;
7661
7662 case AARCH64_OPND_SIMD_IMM:
7663 case AARCH64_OPND_SIMD_IMM_SFT:
7664 case AARCH64_OPND_LIMM:
7665 /* Bit mask immediate. */
7666 gas_assert (new_inst != NULL);
7667 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7668 new_inst->operands[idx].imm.value = value;
7669 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7670 &new_inst->value, NULL, NULL, insn_sequence))
7671 put_aarch64_insn (buf, new_inst->value);
7672 else
7673 as_bad_where (fixP->fx_file, fixP->fx_line,
7674 _("invalid immediate"));
7675 break;
7676
7677 case AARCH64_OPND_HALF:
7678 /* 16-bit unsigned immediate. */
7679 if (unsigned_overflow (value, 16))
7680 as_bad_where (fixP->fx_file, fixP->fx_line,
7681 _("immediate out of range"));
7682 insn = get_aarch64_insn (buf);
7683 insn |= encode_movw_imm (value & 0xffff);
7684 put_aarch64_insn (buf, insn);
7685 break;
7686
7687 case AARCH64_OPND_IMM_MOV:
7688 /* Operand for a generic move immediate instruction, which is
7689 an alias instruction that generates a single MOVZ, MOVN or ORR
7690 instruction to loads a 32-bit/64-bit immediate value into general
7691 register. An assembler error shall result if the immediate cannot be
7692 created by a single one of these instructions. If there is a choice,
7693 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7694 and MOVZ or MOVN to ORR. */
7695 gas_assert (new_inst != NULL);
7696 fix_mov_imm_insn (fixP, buf, new_inst, value);
7697 break;
7698
7699 case AARCH64_OPND_ADDR_SIMM7:
7700 case AARCH64_OPND_ADDR_SIMM9:
7701 case AARCH64_OPND_ADDR_SIMM9_2:
7702 case AARCH64_OPND_ADDR_SIMM10:
7703 case AARCH64_OPND_ADDR_UIMM12:
7704 case AARCH64_OPND_ADDR_SIMM11:
7705 case AARCH64_OPND_ADDR_SIMM13:
7706 /* Immediate offset in an address. */
7707 insn = get_aarch64_insn (buf);
7708
7709 gas_assert (new_inst != NULL && new_inst->value == insn);
7710 gas_assert (new_inst->opcode->operands[1] == opnd
7711 || new_inst->opcode->operands[2] == opnd);
7712
7713 /* Get the index of the address operand. */
7714 if (new_inst->opcode->operands[1] == opnd)
7715 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7716 idx = 1;
7717 else
7718 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7719 idx = 2;
7720
7721 /* Update the resolved offset value. */
7722 new_inst->operands[idx].addr.offset.imm = value;
7723
7724 /* Encode/fix-up. */
7725 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7726 &new_inst->value, NULL, NULL, insn_sequence))
7727 {
7728 put_aarch64_insn (buf, new_inst->value);
7729 break;
7730 }
7731 else if (new_inst->opcode->iclass == ldst_pos
7732 && try_to_encode_as_unscaled_ldst (new_inst))
7733 {
7734 put_aarch64_insn (buf, new_inst->value);
7735 break;
7736 }
7737
7738 as_bad_where (fixP->fx_file, fixP->fx_line,
7739 _("immediate offset out of range"));
7740 break;
7741
7742 default:
7743 gas_assert (0);
7744 as_fatal (_("unhandled operand code %d"), opnd);
7745 }
7746 }
7747
7748 /* Apply a fixup (fixP) to segment data, once it has been determined
7749 by our caller that we have all the info we need to fix it up.
7750
7751 Parameter valP is the pointer to the value of the bits. */
7752
7753 void
7754 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7755 {
7756 offsetT value = *valP;
7757 uint32_t insn;
7758 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7759 int scale;
7760 unsigned flags = fixP->fx_addnumber;
7761
7762 DEBUG_TRACE ("\n\n");
7763 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7764 DEBUG_TRACE ("Enter md_apply_fix");
7765
7766 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7767
7768 /* Note whether this will delete the relocation. */
7769
7770 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7771 fixP->fx_done = 1;
7772
7773 /* Process the relocations. */
7774 switch (fixP->fx_r_type)
7775 {
7776 case BFD_RELOC_NONE:
7777 /* This will need to go in the object file. */
7778 fixP->fx_done = 0;
7779 break;
7780
7781 case BFD_RELOC_8:
7782 case BFD_RELOC_8_PCREL:
7783 if (fixP->fx_done || !seg->use_rela_p)
7784 md_number_to_chars (buf, value, 1);
7785 break;
7786
7787 case BFD_RELOC_16:
7788 case BFD_RELOC_16_PCREL:
7789 if (fixP->fx_done || !seg->use_rela_p)
7790 md_number_to_chars (buf, value, 2);
7791 break;
7792
7793 case BFD_RELOC_32:
7794 case BFD_RELOC_32_PCREL:
7795 if (fixP->fx_done || !seg->use_rela_p)
7796 md_number_to_chars (buf, value, 4);
7797 break;
7798
7799 case BFD_RELOC_64:
7800 case BFD_RELOC_64_PCREL:
7801 if (fixP->fx_done || !seg->use_rela_p)
7802 md_number_to_chars (buf, value, 8);
7803 break;
7804
7805 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7806 /* We claim that these fixups have been processed here, even if
7807 in fact we generate an error because we do not have a reloc
7808 for them, so tc_gen_reloc() will reject them. */
7809 fixP->fx_done = 1;
7810 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7811 {
7812 as_bad_where (fixP->fx_file, fixP->fx_line,
7813 _("undefined symbol %s used as an immediate value"),
7814 S_GET_NAME (fixP->fx_addsy));
7815 goto apply_fix_return;
7816 }
7817 fix_insn (fixP, flags, value);
7818 break;
7819
7820 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7821 if (fixP->fx_done || !seg->use_rela_p)
7822 {
7823 if (value & 3)
7824 as_bad_where (fixP->fx_file, fixP->fx_line,
7825 _("pc-relative load offset not word aligned"));
7826 if (signed_overflow (value, 21))
7827 as_bad_where (fixP->fx_file, fixP->fx_line,
7828 _("pc-relative load offset out of range"));
7829 insn = get_aarch64_insn (buf);
7830 insn |= encode_ld_lit_ofs_19 (value >> 2);
7831 put_aarch64_insn (buf, insn);
7832 }
7833 break;
7834
7835 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7836 if (fixP->fx_done || !seg->use_rela_p)
7837 {
7838 if (signed_overflow (value, 21))
7839 as_bad_where (fixP->fx_file, fixP->fx_line,
7840 _("pc-relative address offset out of range"));
7841 insn = get_aarch64_insn (buf);
7842 insn |= encode_adr_imm (value);
7843 put_aarch64_insn (buf, insn);
7844 }
7845 break;
7846
7847 case BFD_RELOC_AARCH64_BRANCH19:
7848 if (fixP->fx_done || !seg->use_rela_p)
7849 {
7850 if (value & 3)
7851 as_bad_where (fixP->fx_file, fixP->fx_line,
7852 _("conditional branch target not word aligned"));
7853 if (signed_overflow (value, 21))
7854 as_bad_where (fixP->fx_file, fixP->fx_line,
7855 _("conditional branch out of range"));
7856 insn = get_aarch64_insn (buf);
7857 insn |= encode_cond_branch_ofs_19 (value >> 2);
7858 put_aarch64_insn (buf, insn);
7859 }
7860 break;
7861
7862 case BFD_RELOC_AARCH64_TSTBR14:
7863 if (fixP->fx_done || !seg->use_rela_p)
7864 {
7865 if (value & 3)
7866 as_bad_where (fixP->fx_file, fixP->fx_line,
7867 _("conditional branch target not word aligned"));
7868 if (signed_overflow (value, 16))
7869 as_bad_where (fixP->fx_file, fixP->fx_line,
7870 _("conditional branch out of range"));
7871 insn = get_aarch64_insn (buf);
7872 insn |= encode_tst_branch_ofs_14 (value >> 2);
7873 put_aarch64_insn (buf, insn);
7874 }
7875 break;
7876
7877 case BFD_RELOC_AARCH64_CALL26:
7878 case BFD_RELOC_AARCH64_JUMP26:
7879 if (fixP->fx_done || !seg->use_rela_p)
7880 {
7881 if (value & 3)
7882 as_bad_where (fixP->fx_file, fixP->fx_line,
7883 _("branch target not word aligned"));
7884 if (signed_overflow (value, 28))
7885 as_bad_where (fixP->fx_file, fixP->fx_line,
7886 _("branch out of range"));
7887 insn = get_aarch64_insn (buf);
7888 insn |= encode_branch_ofs_26 (value >> 2);
7889 put_aarch64_insn (buf, insn);
7890 }
7891 break;
7892
7893 case BFD_RELOC_AARCH64_MOVW_G0:
7894 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7895 case BFD_RELOC_AARCH64_MOVW_G0_S:
7896 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7897 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
7898 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
7899 scale = 0;
7900 goto movw_common;
7901 case BFD_RELOC_AARCH64_MOVW_G1:
7902 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7903 case BFD_RELOC_AARCH64_MOVW_G1_S:
7904 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7905 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
7906 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
7907 scale = 16;
7908 goto movw_common;
7909 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7910 scale = 0;
7911 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7912 /* Should always be exported to object file, see
7913 aarch64_force_relocation(). */
7914 gas_assert (!fixP->fx_done);
7915 gas_assert (seg->use_rela_p);
7916 goto movw_common;
7917 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7918 scale = 16;
7919 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7920 /* Should always be exported to object file, see
7921 aarch64_force_relocation(). */
7922 gas_assert (!fixP->fx_done);
7923 gas_assert (seg->use_rela_p);
7924 goto movw_common;
7925 case BFD_RELOC_AARCH64_MOVW_G2:
7926 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7927 case BFD_RELOC_AARCH64_MOVW_G2_S:
7928 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
7929 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
7930 scale = 32;
7931 goto movw_common;
7932 case BFD_RELOC_AARCH64_MOVW_G3:
7933 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
7934 scale = 48;
7935 movw_common:
7936 if (fixP->fx_done || !seg->use_rela_p)
7937 {
7938 insn = get_aarch64_insn (buf);
7939
7940 if (!fixP->fx_done)
7941 {
7942 /* REL signed addend must fit in 16 bits */
7943 if (signed_overflow (value, 16))
7944 as_bad_where (fixP->fx_file, fixP->fx_line,
7945 _("offset out of range"));
7946 }
7947 else
7948 {
7949 /* Check for overflow and scale. */
7950 switch (fixP->fx_r_type)
7951 {
7952 case BFD_RELOC_AARCH64_MOVW_G0:
7953 case BFD_RELOC_AARCH64_MOVW_G1:
7954 case BFD_RELOC_AARCH64_MOVW_G2:
7955 case BFD_RELOC_AARCH64_MOVW_G3:
7956 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7957 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7958 if (unsigned_overflow (value, scale + 16))
7959 as_bad_where (fixP->fx_file, fixP->fx_line,
7960 _("unsigned value out of range"));
7961 break;
7962 case BFD_RELOC_AARCH64_MOVW_G0_S:
7963 case BFD_RELOC_AARCH64_MOVW_G1_S:
7964 case BFD_RELOC_AARCH64_MOVW_G2_S:
7965 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
7966 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
7967 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
7968 /* NOTE: We can only come here with movz or movn. */
7969 if (signed_overflow (value, scale + 16))
7970 as_bad_where (fixP->fx_file, fixP->fx_line,
7971 _("signed value out of range"));
7972 if (value < 0)
7973 {
7974 /* Force use of MOVN. */
7975 value = ~value;
7976 insn = reencode_movzn_to_movn (insn);
7977 }
7978 else
7979 {
7980 /* Force use of MOVZ. */
7981 insn = reencode_movzn_to_movz (insn);
7982 }
7983 break;
7984 default:
7985 /* Unchecked relocations. */
7986 break;
7987 }
7988 value >>= scale;
7989 }
7990
7991 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7992 insn |= encode_movw_imm (value & 0xffff);
7993
7994 put_aarch64_insn (buf, insn);
7995 }
7996 break;
7997
7998 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7999 fixP->fx_r_type = (ilp32_p
8000 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8001 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8002 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8003 /* Should always be exported to object file, see
8004 aarch64_force_relocation(). */
8005 gas_assert (!fixP->fx_done);
8006 gas_assert (seg->use_rela_p);
8007 break;
8008
8009 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8010 fixP->fx_r_type = (ilp32_p
8011 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8012 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8013 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8014 /* Should always be exported to object file, see
8015 aarch64_force_relocation(). */
8016 gas_assert (!fixP->fx_done);
8017 gas_assert (seg->use_rela_p);
8018 break;
8019
8020 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8021 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8022 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8023 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8024 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8025 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8026 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8027 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8028 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8029 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8030 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8031 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8032 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8033 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8034 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8035 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8036 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8037 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8038 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8039 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8040 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8041 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8042 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8043 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8044 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8045 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8046 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8047 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8048 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8049 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8050 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8051 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8052 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8053 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8054 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8055 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8056 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8057 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8058 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8059 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8060 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8061 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8062 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8063 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8064 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8065 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8066 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8067 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8068 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8069 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8070 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8071 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8072 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8073 /* Should always be exported to object file, see
8074 aarch64_force_relocation(). */
8075 gas_assert (!fixP->fx_done);
8076 gas_assert (seg->use_rela_p);
8077 break;
8078
8079 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8080 /* Should always be exported to object file, see
8081 aarch64_force_relocation(). */
8082 fixP->fx_r_type = (ilp32_p
8083 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8084 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8085 gas_assert (!fixP->fx_done);
8086 gas_assert (seg->use_rela_p);
8087 break;
8088
8089 case BFD_RELOC_AARCH64_ADD_LO12:
8090 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8091 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8092 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8093 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8094 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8095 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8096 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8097 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8098 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8099 case BFD_RELOC_AARCH64_LDST128_LO12:
8100 case BFD_RELOC_AARCH64_LDST16_LO12:
8101 case BFD_RELOC_AARCH64_LDST32_LO12:
8102 case BFD_RELOC_AARCH64_LDST64_LO12:
8103 case BFD_RELOC_AARCH64_LDST8_LO12:
8104 /* Should always be exported to object file, see
8105 aarch64_force_relocation(). */
8106 gas_assert (!fixP->fx_done);
8107 gas_assert (seg->use_rela_p);
8108 break;
8109
8110 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8111 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8112 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8113 break;
8114
8115 case BFD_RELOC_UNUSED:
8116 /* An error will already have been reported. */
8117 break;
8118
8119 default:
8120 as_bad_where (fixP->fx_file, fixP->fx_line,
8121 _("unexpected %s fixup"),
8122 bfd_get_reloc_code_name (fixP->fx_r_type));
8123 break;
8124 }
8125
8126 apply_fix_return:
8127 /* Free the allocated the struct aarch64_inst.
8128 N.B. currently there are very limited number of fix-up types actually use
8129 this field, so the impact on the performance should be minimal . */
8130 if (fixP->tc_fix_data.inst != NULL)
8131 free (fixP->tc_fix_data.inst);
8132
8133 return;
8134 }
8135
8136 /* Translate internal representation of relocation info to BFD target
8137 format. */
8138
8139 arelent *
8140 tc_gen_reloc (asection * section, fixS * fixp)
8141 {
8142 arelent *reloc;
8143 bfd_reloc_code_real_type code;
8144
8145 reloc = XNEW (arelent);
8146
8147 reloc->sym_ptr_ptr = XNEW (asymbol *);
8148 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8149 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8150
8151 if (fixp->fx_pcrel)
8152 {
8153 if (section->use_rela_p)
8154 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8155 else
8156 fixp->fx_offset = reloc->address;
8157 }
8158 reloc->addend = fixp->fx_offset;
8159
8160 code = fixp->fx_r_type;
8161 switch (code)
8162 {
8163 case BFD_RELOC_16:
8164 if (fixp->fx_pcrel)
8165 code = BFD_RELOC_16_PCREL;
8166 break;
8167
8168 case BFD_RELOC_32:
8169 if (fixp->fx_pcrel)
8170 code = BFD_RELOC_32_PCREL;
8171 break;
8172
8173 case BFD_RELOC_64:
8174 if (fixp->fx_pcrel)
8175 code = BFD_RELOC_64_PCREL;
8176 break;
8177
8178 default:
8179 break;
8180 }
8181
8182 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8183 if (reloc->howto == NULL)
8184 {
8185 as_bad_where (fixp->fx_file, fixp->fx_line,
8186 _
8187 ("cannot represent %s relocation in this object file format"),
8188 bfd_get_reloc_code_name (code));
8189 return NULL;
8190 }
8191
8192 return reloc;
8193 }
8194
8195 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8196
8197 void
8198 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8199 {
8200 bfd_reloc_code_real_type type;
8201 int pcrel = 0;
8202
8203 /* Pick a reloc.
8204 FIXME: @@ Should look at CPU word size. */
8205 switch (size)
8206 {
8207 case 1:
8208 type = BFD_RELOC_8;
8209 break;
8210 case 2:
8211 type = BFD_RELOC_16;
8212 break;
8213 case 4:
8214 type = BFD_RELOC_32;
8215 break;
8216 case 8:
8217 type = BFD_RELOC_64;
8218 break;
8219 default:
8220 as_bad (_("cannot do %u-byte relocation"), size);
8221 type = BFD_RELOC_UNUSED;
8222 break;
8223 }
8224
8225 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8226 }
8227
8228 int
8229 aarch64_force_relocation (struct fix *fixp)
8230 {
8231 switch (fixp->fx_r_type)
8232 {
8233 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8234 /* Perform these "immediate" internal relocations
8235 even if the symbol is extern or weak. */
8236 return 0;
8237
8238 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8239 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8240 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8241 /* Pseudo relocs that need to be fixed up according to
8242 ilp32_p. */
8243 return 0;
8244
8245 case BFD_RELOC_AARCH64_ADD_LO12:
8246 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8247 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8248 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8249 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8250 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8251 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8252 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8253 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8254 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8255 case BFD_RELOC_AARCH64_LDST128_LO12:
8256 case BFD_RELOC_AARCH64_LDST16_LO12:
8257 case BFD_RELOC_AARCH64_LDST32_LO12:
8258 case BFD_RELOC_AARCH64_LDST64_LO12:
8259 case BFD_RELOC_AARCH64_LDST8_LO12:
8260 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8261 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8262 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8263 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8264 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8265 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8266 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8267 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8268 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8269 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8270 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8271 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8272 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8273 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8274 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8275 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8276 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8277 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8278 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8279 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8280 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8281 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8282 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8283 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8284 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8285 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8286 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8287 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8288 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8289 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8290 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8291 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8292 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8293 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8294 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8295 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8296 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8297 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8298 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8299 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8300 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8301 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8302 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8303 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8304 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8305 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8306 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8307 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8308 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8309 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8310 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8311 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8312 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8313 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8314 /* Always leave these relocations for the linker. */
8315 return 1;
8316
8317 default:
8318 break;
8319 }
8320
8321 return generic_force_reloc (fixp);
8322 }
8323
8324 #ifdef OBJ_ELF
8325
8326 /* Implement md_after_parse_args. This is the earliest time we need to decide
8327 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8328
8329 void
8330 aarch64_after_parse_args (void)
8331 {
8332 if (aarch64_abi != AARCH64_ABI_NONE)
8333 return;
8334
8335 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8336 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8337 aarch64_abi = AARCH64_ABI_ILP32;
8338 else
8339 aarch64_abi = AARCH64_ABI_LP64;
8340 }
8341
8342 const char *
8343 elf64_aarch64_target_format (void)
8344 {
8345 if (strcmp (TARGET_OS, "cloudabi") == 0)
8346 {
8347 /* FIXME: What to do for ilp32_p ? */
8348 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
8349 }
8350 if (target_big_endian)
8351 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8352 else
8353 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8354 }
8355
8356 void
8357 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8358 {
8359 elf_frob_symbol (symp, puntp);
8360 }
8361 #endif
8362
8363 /* MD interface: Finalization. */
8364
8365 /* A good place to do this, although this was probably not intended
8366 for this kind of use. We need to dump the literal pool before
8367 references are made to a null symbol pointer. */
8368
8369 void
8370 aarch64_cleanup (void)
8371 {
8372 literal_pool *pool;
8373
8374 for (pool = list_of_pools; pool; pool = pool->next)
8375 {
8376 /* Put it at the end of the relevant section. */
8377 subseg_set (pool->section, pool->sub_section);
8378 s_ltorg (0);
8379 }
8380 }
8381
8382 #ifdef OBJ_ELF
8383 /* Remove any excess mapping symbols generated for alignment frags in
8384 SEC. We may have created a mapping symbol before a zero byte
8385 alignment; remove it if there's a mapping symbol after the
8386 alignment. */
8387 static void
8388 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8389 void *dummy ATTRIBUTE_UNUSED)
8390 {
8391 segment_info_type *seginfo = seg_info (sec);
8392 fragS *fragp;
8393
8394 if (seginfo == NULL || seginfo->frchainP == NULL)
8395 return;
8396
8397 for (fragp = seginfo->frchainP->frch_root;
8398 fragp != NULL; fragp = fragp->fr_next)
8399 {
8400 symbolS *sym = fragp->tc_frag_data.last_map;
8401 fragS *next = fragp->fr_next;
8402
8403 /* Variable-sized frags have been converted to fixed size by
8404 this point. But if this was variable-sized to start with,
8405 there will be a fixed-size frag after it. So don't handle
8406 next == NULL. */
8407 if (sym == NULL || next == NULL)
8408 continue;
8409
8410 if (S_GET_VALUE (sym) < next->fr_address)
8411 /* Not at the end of this frag. */
8412 continue;
8413 know (S_GET_VALUE (sym) == next->fr_address);
8414
8415 do
8416 {
8417 if (next->tc_frag_data.first_map != NULL)
8418 {
8419 /* Next frag starts with a mapping symbol. Discard this
8420 one. */
8421 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8422 break;
8423 }
8424
8425 if (next->fr_next == NULL)
8426 {
8427 /* This mapping symbol is at the end of the section. Discard
8428 it. */
8429 know (next->fr_fix == 0 && next->fr_var == 0);
8430 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8431 break;
8432 }
8433
8434 /* As long as we have empty frags without any mapping symbols,
8435 keep looking. */
8436 /* If the next frag is non-empty and does not start with a
8437 mapping symbol, then this mapping symbol is required. */
8438 if (next->fr_address != next->fr_next->fr_address)
8439 break;
8440
8441 next = next->fr_next;
8442 }
8443 while (next != NULL);
8444 }
8445 }
8446 #endif
8447
8448 /* Adjust the symbol table. */
8449
8450 void
8451 aarch64_adjust_symtab (void)
8452 {
8453 #ifdef OBJ_ELF
8454 /* Remove any overlapping mapping symbols generated by alignment frags. */
8455 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8456 /* Now do generic ELF adjustments. */
8457 elf_adjust_symtab ();
8458 #endif
8459 }
8460
8461 static void
8462 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8463 {
8464 const char *hash_err;
8465
8466 hash_err = hash_insert (table, key, value);
8467 if (hash_err)
8468 printf ("Internal Error: Can't hash %s\n", key);
8469 }
8470
8471 static void
8472 fill_instruction_hash_table (void)
8473 {
8474 aarch64_opcode *opcode = aarch64_opcode_table;
8475
8476 while (opcode->name != NULL)
8477 {
8478 templates *templ, *new_templ;
8479 templ = hash_find (aarch64_ops_hsh, opcode->name);
8480
8481 new_templ = XNEW (templates);
8482 new_templ->opcode = opcode;
8483 new_templ->next = NULL;
8484
8485 if (!templ)
8486 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8487 else
8488 {
8489 new_templ->next = templ->next;
8490 templ->next = new_templ;
8491 }
8492 ++opcode;
8493 }
8494 }
8495
8496 static inline void
8497 convert_to_upper (char *dst, const char *src, size_t num)
8498 {
8499 unsigned int i;
8500 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8501 *dst = TOUPPER (*src);
8502 *dst = '\0';
8503 }
8504
8505 /* Assume STR point to a lower-case string, allocate, convert and return
8506 the corresponding upper-case string. */
8507 static inline const char*
8508 get_upper_str (const char *str)
8509 {
8510 char *ret;
8511 size_t len = strlen (str);
8512 ret = XNEWVEC (char, len + 1);
8513 convert_to_upper (ret, str, len);
8514 return ret;
8515 }
8516
8517 /* MD interface: Initialization. */
8518
8519 void
8520 md_begin (void)
8521 {
8522 unsigned mach;
8523 unsigned int i;
8524
8525 if ((aarch64_ops_hsh = hash_new ()) == NULL
8526 || (aarch64_cond_hsh = hash_new ()) == NULL
8527 || (aarch64_shift_hsh = hash_new ()) == NULL
8528 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8529 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8530 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8531 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8532 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8533 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8534 || (aarch64_sys_regs_sr_hsh = hash_new ()) == NULL
8535 || (aarch64_reg_hsh = hash_new ()) == NULL
8536 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8537 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8538 || (aarch64_pldop_hsh = hash_new ()) == NULL
8539 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8540 as_fatal (_("virtual memory exhausted"));
8541
8542 fill_instruction_hash_table ();
8543
8544 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8545 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8546 (void *) (aarch64_sys_regs + i));
8547
8548 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8549 checked_hash_insert (aarch64_pstatefield_hsh,
8550 aarch64_pstatefields[i].name,
8551 (void *) (aarch64_pstatefields + i));
8552
8553 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8554 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8555 aarch64_sys_regs_ic[i].name,
8556 (void *) (aarch64_sys_regs_ic + i));
8557
8558 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8559 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8560 aarch64_sys_regs_dc[i].name,
8561 (void *) (aarch64_sys_regs_dc + i));
8562
8563 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8564 checked_hash_insert (aarch64_sys_regs_at_hsh,
8565 aarch64_sys_regs_at[i].name,
8566 (void *) (aarch64_sys_regs_at + i));
8567
8568 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8569 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8570 aarch64_sys_regs_tlbi[i].name,
8571 (void *) (aarch64_sys_regs_tlbi + i));
8572
8573 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8574 checked_hash_insert (aarch64_sys_regs_sr_hsh,
8575 aarch64_sys_regs_sr[i].name,
8576 (void *) (aarch64_sys_regs_sr + i));
8577
8578 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8579 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8580 (void *) (reg_names + i));
8581
8582 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8583 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8584 (void *) (nzcv_names + i));
8585
8586 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8587 {
8588 const char *name = aarch64_operand_modifiers[i].name;
8589 checked_hash_insert (aarch64_shift_hsh, name,
8590 (void *) (aarch64_operand_modifiers + i));
8591 /* Also hash the name in the upper case. */
8592 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8593 (void *) (aarch64_operand_modifiers + i));
8594 }
8595
8596 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8597 {
8598 unsigned int j;
8599 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8600 the same condition code. */
8601 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8602 {
8603 const char *name = aarch64_conds[i].names[j];
8604 if (name == NULL)
8605 break;
8606 checked_hash_insert (aarch64_cond_hsh, name,
8607 (void *) (aarch64_conds + i));
8608 /* Also hash the name in the upper case. */
8609 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8610 (void *) (aarch64_conds + i));
8611 }
8612 }
8613
8614 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8615 {
8616 const char *name = aarch64_barrier_options[i].name;
8617 /* Skip xx00 - the unallocated values of option. */
8618 if ((i & 0x3) == 0)
8619 continue;
8620 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8621 (void *) (aarch64_barrier_options + i));
8622 /* Also hash the name in the upper case. */
8623 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8624 (void *) (aarch64_barrier_options + i));
8625 }
8626
8627 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8628 {
8629 const char* name = aarch64_prfops[i].name;
8630 /* Skip the unallocated hint encodings. */
8631 if (name == NULL)
8632 continue;
8633 checked_hash_insert (aarch64_pldop_hsh, name,
8634 (void *) (aarch64_prfops + i));
8635 /* Also hash the name in the upper case. */
8636 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8637 (void *) (aarch64_prfops + i));
8638 }
8639
8640 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8641 {
8642 const char* name = aarch64_hint_options[i].name;
8643
8644 checked_hash_insert (aarch64_hint_opt_hsh, name,
8645 (void *) (aarch64_hint_options + i));
8646 /* Also hash the name in the upper case. */
8647 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8648 (void *) (aarch64_hint_options + i));
8649 }
8650
8651 /* Set the cpu variant based on the command-line options. */
8652 if (!mcpu_cpu_opt)
8653 mcpu_cpu_opt = march_cpu_opt;
8654
8655 if (!mcpu_cpu_opt)
8656 mcpu_cpu_opt = &cpu_default;
8657
8658 cpu_variant = *mcpu_cpu_opt;
8659
8660 /* Record the CPU type. */
8661 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8662
8663 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8664 }
8665
8666 /* Command line processing. */
8667
8668 const char *md_shortopts = "m:";
8669
8670 #ifdef AARCH64_BI_ENDIAN
8671 #define OPTION_EB (OPTION_MD_BASE + 0)
8672 #define OPTION_EL (OPTION_MD_BASE + 1)
8673 #else
8674 #if TARGET_BYTES_BIG_ENDIAN
8675 #define OPTION_EB (OPTION_MD_BASE + 0)
8676 #else
8677 #define OPTION_EL (OPTION_MD_BASE + 1)
8678 #endif
8679 #endif
8680
8681 struct option md_longopts[] = {
8682 #ifdef OPTION_EB
8683 {"EB", no_argument, NULL, OPTION_EB},
8684 #endif
8685 #ifdef OPTION_EL
8686 {"EL", no_argument, NULL, OPTION_EL},
8687 #endif
8688 {NULL, no_argument, NULL, 0}
8689 };
8690
8691 size_t md_longopts_size = sizeof (md_longopts);
8692
8693 struct aarch64_option_table
8694 {
8695 const char *option; /* Option name to match. */
8696 const char *help; /* Help information. */
8697 int *var; /* Variable to change. */
8698 int value; /* What to change it to. */
8699 char *deprecated; /* If non-null, print this message. */
8700 };
8701
8702 static struct aarch64_option_table aarch64_opts[] = {
8703 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8704 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8705 NULL},
8706 #ifdef DEBUG_AARCH64
8707 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8708 #endif /* DEBUG_AARCH64 */
8709 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8710 NULL},
8711 {"mno-verbose-error", N_("do not output verbose error messages"),
8712 &verbose_error_p, 0, NULL},
8713 {NULL, NULL, NULL, 0, NULL}
8714 };
8715
8716 struct aarch64_cpu_option_table
8717 {
8718 const char *name;
8719 const aarch64_feature_set value;
8720 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8721 case. */
8722 const char *canonical_name;
8723 };
8724
8725 /* This list should, at a minimum, contain all the cpu names
8726 recognized by GCC. */
8727 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8728 {"all", AARCH64_ANY, NULL},
8729 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8730 AARCH64_FEATURE_CRC), "Cortex-A35"},
8731 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8732 AARCH64_FEATURE_CRC), "Cortex-A53"},
8733 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8734 AARCH64_FEATURE_CRC), "Cortex-A57"},
8735 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8736 AARCH64_FEATURE_CRC), "Cortex-A72"},
8737 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8738 AARCH64_FEATURE_CRC), "Cortex-A73"},
8739 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8740 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8741 "Cortex-A55"},
8742 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8743 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8744 "Cortex-A75"},
8745 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8746 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8747 "Cortex-A76"},
8748 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8749 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8750 | AARCH64_FEATURE_DOTPROD
8751 | AARCH64_FEATURE_PROFILE),
8752 "Ares"},
8753 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8754 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8755 "Samsung Exynos M1"},
8756 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8757 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8758 | AARCH64_FEATURE_RDMA),
8759 "Qualcomm Falkor"},
8760 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8761 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8762 | AARCH64_FEATURE_RDMA),
8763 "Qualcomm QDF24XX"},
8764 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8765 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8766 "Qualcomm Saphira"},
8767 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8768 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8769 "Cavium ThunderX"},
8770 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8771 AARCH64_FEATURE_CRYPTO),
8772 "Broadcom Vulcan"},
8773 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8774 in earlier releases and is superseded by 'xgene1' in all
8775 tools. */
8776 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8777 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8778 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8779 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8780 {"generic", AARCH64_ARCH_V8, NULL},
8781
8782 {NULL, AARCH64_ARCH_NONE, NULL}
8783 };
8784
8785 struct aarch64_arch_option_table
8786 {
8787 const char *name;
8788 const aarch64_feature_set value;
8789 };
8790
8791 /* This list should, at a minimum, contain all the architecture names
8792 recognized by GCC. */
8793 static const struct aarch64_arch_option_table aarch64_archs[] = {
8794 {"all", AARCH64_ANY},
8795 {"armv8-a", AARCH64_ARCH_V8},
8796 {"armv8.1-a", AARCH64_ARCH_V8_1},
8797 {"armv8.2-a", AARCH64_ARCH_V8_2},
8798 {"armv8.3-a", AARCH64_ARCH_V8_3},
8799 {"armv8.4-a", AARCH64_ARCH_V8_4},
8800 {"armv8.5-a", AARCH64_ARCH_V8_5},
8801 {NULL, AARCH64_ARCH_NONE}
8802 };
8803
8804 /* ISA extensions. */
8805 struct aarch64_option_cpu_value_table
8806 {
8807 const char *name;
8808 const aarch64_feature_set value;
8809 const aarch64_feature_set require; /* Feature dependencies. */
8810 };
8811
8812 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8813 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8814 AARCH64_ARCH_NONE},
8815 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO
8816 | AARCH64_FEATURE_AES
8817 | AARCH64_FEATURE_SHA2, 0),
8818 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8819 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8820 AARCH64_ARCH_NONE},
8821 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8822 AARCH64_ARCH_NONE},
8823 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8824 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8825 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8826 AARCH64_ARCH_NONE},
8827 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8828 AARCH64_ARCH_NONE},
8829 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8830 AARCH64_ARCH_NONE},
8831 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8832 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8833 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8834 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8835 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
8836 AARCH64_FEATURE (AARCH64_FEATURE_FP
8837 | AARCH64_FEATURE_F16, 0)},
8838 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8839 AARCH64_ARCH_NONE},
8840 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
8841 AARCH64_FEATURE (AARCH64_FEATURE_F16
8842 | AARCH64_FEATURE_SIMD
8843 | AARCH64_FEATURE_COMPNUM, 0)},
8844 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
8845 AARCH64_FEATURE (AARCH64_FEATURE_F16
8846 | AARCH64_FEATURE_SIMD, 0)},
8847 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
8848 AARCH64_ARCH_NONE},
8849 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
8850 AARCH64_ARCH_NONE},
8851 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
8852 AARCH64_ARCH_NONE},
8853 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
8854 AARCH64_ARCH_NONE},
8855 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
8856 AARCH64_ARCH_NONE},
8857 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
8858 AARCH64_ARCH_NONE},
8859 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
8860 AARCH64_ARCH_NONE},
8861 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA2
8862 | AARCH64_FEATURE_SHA3, 0),
8863 AARCH64_ARCH_NONE},
8864 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
8865 AARCH64_ARCH_NONE},
8866 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
8867 AARCH64_ARCH_NONE},
8868 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
8869 AARCH64_ARCH_NONE},
8870 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8871 };
8872
8873 struct aarch64_long_option_table
8874 {
8875 const char *option; /* Substring to match. */
8876 const char *help; /* Help information. */
8877 int (*func) (const char *subopt); /* Function to decode sub-option. */
8878 char *deprecated; /* If non-null, print this message. */
8879 };
8880
8881 /* Transitive closure of features depending on set. */
8882 static aarch64_feature_set
8883 aarch64_feature_disable_set (aarch64_feature_set set)
8884 {
8885 const struct aarch64_option_cpu_value_table *opt;
8886 aarch64_feature_set prev = 0;
8887
8888 while (prev != set) {
8889 prev = set;
8890 for (opt = aarch64_features; opt->name != NULL; opt++)
8891 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
8892 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
8893 }
8894 return set;
8895 }
8896
8897 /* Transitive closure of dependencies of set. */
8898 static aarch64_feature_set
8899 aarch64_feature_enable_set (aarch64_feature_set set)
8900 {
8901 const struct aarch64_option_cpu_value_table *opt;
8902 aarch64_feature_set prev = 0;
8903
8904 while (prev != set) {
8905 prev = set;
8906 for (opt = aarch64_features; opt->name != NULL; opt++)
8907 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8908 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8909 }
8910 return set;
8911 }
8912
8913 static int
8914 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8915 bfd_boolean ext_only)
8916 {
8917 /* We insist on extensions being added before being removed. We achieve
8918 this by using the ADDING_VALUE variable to indicate whether we are
8919 adding an extension (1) or removing it (0) and only allowing it to
8920 change in the order -1 -> 1 -> 0. */
8921 int adding_value = -1;
8922 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8923
8924 /* Copy the feature set, so that we can modify it. */
8925 *ext_set = **opt_p;
8926 *opt_p = ext_set;
8927
8928 while (str != NULL && *str != 0)
8929 {
8930 const struct aarch64_option_cpu_value_table *opt;
8931 const char *ext = NULL;
8932 int optlen;
8933
8934 if (!ext_only)
8935 {
8936 if (*str != '+')
8937 {
8938 as_bad (_("invalid architectural extension"));
8939 return 0;
8940 }
8941
8942 ext = strchr (++str, '+');
8943 }
8944
8945 if (ext != NULL)
8946 optlen = ext - str;
8947 else
8948 optlen = strlen (str);
8949
8950 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
8951 {
8952 if (adding_value != 0)
8953 adding_value = 0;
8954 optlen -= 2;
8955 str += 2;
8956 }
8957 else if (optlen > 0)
8958 {
8959 if (adding_value == -1)
8960 adding_value = 1;
8961 else if (adding_value != 1)
8962 {
8963 as_bad (_("must specify extensions to add before specifying "
8964 "those to remove"));
8965 return FALSE;
8966 }
8967 }
8968
8969 if (optlen == 0)
8970 {
8971 as_bad (_("missing architectural extension"));
8972 return 0;
8973 }
8974
8975 gas_assert (adding_value != -1);
8976
8977 for (opt = aarch64_features; opt->name != NULL; opt++)
8978 if (strncmp (opt->name, str, optlen) == 0)
8979 {
8980 aarch64_feature_set set;
8981
8982 /* Add or remove the extension. */
8983 if (adding_value)
8984 {
8985 set = aarch64_feature_enable_set (opt->value);
8986 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
8987 }
8988 else
8989 {
8990 set = aarch64_feature_disable_set (opt->value);
8991 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
8992 }
8993 break;
8994 }
8995
8996 if (opt->name == NULL)
8997 {
8998 as_bad (_("unknown architectural extension `%s'"), str);
8999 return 0;
9000 }
9001
9002 str = ext;
9003 };
9004
9005 return 1;
9006 }
9007
9008 static int
9009 aarch64_parse_cpu (const char *str)
9010 {
9011 const struct aarch64_cpu_option_table *opt;
9012 const char *ext = strchr (str, '+');
9013 size_t optlen;
9014
9015 if (ext != NULL)
9016 optlen = ext - str;
9017 else
9018 optlen = strlen (str);
9019
9020 if (optlen == 0)
9021 {
9022 as_bad (_("missing cpu name `%s'"), str);
9023 return 0;
9024 }
9025
9026 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9027 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9028 {
9029 mcpu_cpu_opt = &opt->value;
9030 if (ext != NULL)
9031 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
9032
9033 return 1;
9034 }
9035
9036 as_bad (_("unknown cpu `%s'"), str);
9037 return 0;
9038 }
9039
9040 static int
9041 aarch64_parse_arch (const char *str)
9042 {
9043 const struct aarch64_arch_option_table *opt;
9044 const char *ext = strchr (str, '+');
9045 size_t optlen;
9046
9047 if (ext != NULL)
9048 optlen = ext - str;
9049 else
9050 optlen = strlen (str);
9051
9052 if (optlen == 0)
9053 {
9054 as_bad (_("missing architecture name `%s'"), str);
9055 return 0;
9056 }
9057
9058 for (opt = aarch64_archs; opt->name != NULL; opt++)
9059 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9060 {
9061 march_cpu_opt = &opt->value;
9062 if (ext != NULL)
9063 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
9064
9065 return 1;
9066 }
9067
9068 as_bad (_("unknown architecture `%s'\n"), str);
9069 return 0;
9070 }
9071
9072 /* ABIs. */
9073 struct aarch64_option_abi_value_table
9074 {
9075 const char *name;
9076 enum aarch64_abi_type value;
9077 };
9078
9079 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9080 {"ilp32", AARCH64_ABI_ILP32},
9081 {"lp64", AARCH64_ABI_LP64},
9082 };
9083
9084 static int
9085 aarch64_parse_abi (const char *str)
9086 {
9087 unsigned int i;
9088
9089 if (str[0] == '\0')
9090 {
9091 as_bad (_("missing abi name `%s'"), str);
9092 return 0;
9093 }
9094
9095 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9096 if (strcmp (str, aarch64_abis[i].name) == 0)
9097 {
9098 aarch64_abi = aarch64_abis[i].value;
9099 return 1;
9100 }
9101
9102 as_bad (_("unknown abi `%s'\n"), str);
9103 return 0;
9104 }
9105
9106 static struct aarch64_long_option_table aarch64_long_opts[] = {
9107 #ifdef OBJ_ELF
9108 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9109 aarch64_parse_abi, NULL},
9110 #endif /* OBJ_ELF */
9111 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9112 aarch64_parse_cpu, NULL},
9113 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9114 aarch64_parse_arch, NULL},
9115 {NULL, NULL, 0, NULL}
9116 };
9117
9118 int
9119 md_parse_option (int c, const char *arg)
9120 {
9121 struct aarch64_option_table *opt;
9122 struct aarch64_long_option_table *lopt;
9123
9124 switch (c)
9125 {
9126 #ifdef OPTION_EB
9127 case OPTION_EB:
9128 target_big_endian = 1;
9129 break;
9130 #endif
9131
9132 #ifdef OPTION_EL
9133 case OPTION_EL:
9134 target_big_endian = 0;
9135 break;
9136 #endif
9137
9138 case 'a':
9139 /* Listing option. Just ignore these, we don't support additional
9140 ones. */
9141 return 0;
9142
9143 default:
9144 for (opt = aarch64_opts; opt->option != NULL; opt++)
9145 {
9146 if (c == opt->option[0]
9147 && ((arg == NULL && opt->option[1] == 0)
9148 || streq (arg, opt->option + 1)))
9149 {
9150 /* If the option is deprecated, tell the user. */
9151 if (opt->deprecated != NULL)
9152 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9153 arg ? arg : "", _(opt->deprecated));
9154
9155 if (opt->var != NULL)
9156 *opt->var = opt->value;
9157
9158 return 1;
9159 }
9160 }
9161
9162 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9163 {
9164 /* These options are expected to have an argument. */
9165 if (c == lopt->option[0]
9166 && arg != NULL
9167 && strncmp (arg, lopt->option + 1,
9168 strlen (lopt->option + 1)) == 0)
9169 {
9170 /* If the option is deprecated, tell the user. */
9171 if (lopt->deprecated != NULL)
9172 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9173 _(lopt->deprecated));
9174
9175 /* Call the sup-option parser. */
9176 return lopt->func (arg + strlen (lopt->option) - 1);
9177 }
9178 }
9179
9180 return 0;
9181 }
9182
9183 return 1;
9184 }
9185
9186 void
9187 md_show_usage (FILE * fp)
9188 {
9189 struct aarch64_option_table *opt;
9190 struct aarch64_long_option_table *lopt;
9191
9192 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9193
9194 for (opt = aarch64_opts; opt->option != NULL; opt++)
9195 if (opt->help != NULL)
9196 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9197
9198 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9199 if (lopt->help != NULL)
9200 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9201
9202 #ifdef OPTION_EB
9203 fprintf (fp, _("\
9204 -EB assemble code for a big-endian cpu\n"));
9205 #endif
9206
9207 #ifdef OPTION_EL
9208 fprintf (fp, _("\
9209 -EL assemble code for a little-endian cpu\n"));
9210 #endif
9211 }
9212
9213 /* Parse a .cpu directive. */
9214
9215 static void
9216 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9217 {
9218 const struct aarch64_cpu_option_table *opt;
9219 char saved_char;
9220 char *name;
9221 char *ext;
9222 size_t optlen;
9223
9224 name = input_line_pointer;
9225 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9226 input_line_pointer++;
9227 saved_char = *input_line_pointer;
9228 *input_line_pointer = 0;
9229
9230 ext = strchr (name, '+');
9231
9232 if (ext != NULL)
9233 optlen = ext - name;
9234 else
9235 optlen = strlen (name);
9236
9237 /* Skip the first "all" entry. */
9238 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9239 if (strlen (opt->name) == optlen
9240 && strncmp (name, opt->name, optlen) == 0)
9241 {
9242 mcpu_cpu_opt = &opt->value;
9243 if (ext != NULL)
9244 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9245 return;
9246
9247 cpu_variant = *mcpu_cpu_opt;
9248
9249 *input_line_pointer = saved_char;
9250 demand_empty_rest_of_line ();
9251 return;
9252 }
9253 as_bad (_("unknown cpu `%s'"), name);
9254 *input_line_pointer = saved_char;
9255 ignore_rest_of_line ();
9256 }
9257
9258
9259 /* Parse a .arch directive. */
9260
9261 static void
9262 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9263 {
9264 const struct aarch64_arch_option_table *opt;
9265 char saved_char;
9266 char *name;
9267 char *ext;
9268 size_t optlen;
9269
9270 name = input_line_pointer;
9271 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9272 input_line_pointer++;
9273 saved_char = *input_line_pointer;
9274 *input_line_pointer = 0;
9275
9276 ext = strchr (name, '+');
9277
9278 if (ext != NULL)
9279 optlen = ext - name;
9280 else
9281 optlen = strlen (name);
9282
9283 /* Skip the first "all" entry. */
9284 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9285 if (strlen (opt->name) == optlen
9286 && strncmp (name, opt->name, optlen) == 0)
9287 {
9288 mcpu_cpu_opt = &opt->value;
9289 if (ext != NULL)
9290 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9291 return;
9292
9293 cpu_variant = *mcpu_cpu_opt;
9294
9295 *input_line_pointer = saved_char;
9296 demand_empty_rest_of_line ();
9297 return;
9298 }
9299
9300 as_bad (_("unknown architecture `%s'\n"), name);
9301 *input_line_pointer = saved_char;
9302 ignore_rest_of_line ();
9303 }
9304
9305 /* Parse a .arch_extension directive. */
9306
9307 static void
9308 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9309 {
9310 char saved_char;
9311 char *ext = input_line_pointer;;
9312
9313 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9314 input_line_pointer++;
9315 saved_char = *input_line_pointer;
9316 *input_line_pointer = 0;
9317
9318 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9319 return;
9320
9321 cpu_variant = *mcpu_cpu_opt;
9322
9323 *input_line_pointer = saved_char;
9324 demand_empty_rest_of_line ();
9325 }
9326
9327 /* Copy symbol information. */
9328
9329 void
9330 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9331 {
9332 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9333 }