]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
Correct disassembly of dot product instructions.
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2017 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_NONE = 0,
66 AARCH64_ABI_LP64 = 1,
67 AARCH64_ABI_ILP32 = 2
68 };
69
70 #ifndef DEFAULT_ARCH
71 #define DEFAULT_ARCH "aarch64"
72 #endif
73
74 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
75 static const char *default_arch = DEFAULT_ARCH;
76
77 /* AArch64 ABI for the output file. */
78 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
79
80 /* When non-zero, program to a 32-bit model, in which the C data types
81 int, long and all pointer types are 32-bit objects (ILP32); or to a
82 64-bit model, in which the C int type is 32-bits but the C long type
83 and all pointer types are 64-bit objects (LP64). */
84 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
85 #endif
86
87 enum vector_el_type
88 {
89 NT_invtype = -1,
90 NT_b,
91 NT_h,
92 NT_s,
93 NT_d,
94 NT_q,
95 NT_zero,
96 NT_merge
97 };
98
99 /* Bits for DEFINED field in vector_type_el. */
100 #define NTA_HASTYPE 1
101 #define NTA_HASINDEX 2
102 #define NTA_HASVARWIDTH 4
103
104 struct vector_type_el
105 {
106 enum vector_el_type type;
107 unsigned char defined;
108 unsigned width;
109 int64_t index;
110 };
111
112 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
113
114 struct reloc
115 {
116 bfd_reloc_code_real_type type;
117 expressionS exp;
118 int pc_rel;
119 enum aarch64_opnd opnd;
120 uint32_t flags;
121 unsigned need_libopcodes_p : 1;
122 };
123
124 struct aarch64_instruction
125 {
126 /* libopcodes structure for instruction intermediate representation. */
127 aarch64_inst base;
128 /* Record assembly errors found during the parsing. */
129 struct
130 {
131 enum aarch64_operand_error_kind kind;
132 const char *error;
133 } parsing_error;
134 /* The condition that appears in the assembly line. */
135 int cond;
136 /* Relocation information (including the GAS internal fixup). */
137 struct reloc reloc;
138 /* Need to generate an immediate in the literal pool. */
139 unsigned gen_lit_pool : 1;
140 };
141
142 typedef struct aarch64_instruction aarch64_instruction;
143
144 static aarch64_instruction inst;
145
146 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
147 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
148
149 /* Diagnostics inline function utilities.
150
151 These are lightweight utilities which should only be called by parse_operands
152 and other parsers. GAS processes each assembly line by parsing it against
153 instruction template(s), in the case of multiple templates (for the same
154 mnemonic name), those templates are tried one by one until one succeeds or
155 all fail. An assembly line may fail a few templates before being
156 successfully parsed; an error saved here in most cases is not a user error
157 but an error indicating the current template is not the right template.
158 Therefore it is very important that errors can be saved at a low cost during
159 the parsing; we don't want to slow down the whole parsing by recording
160 non-user errors in detail.
161
162 Remember that the objective is to help GAS pick up the most appropriate
163 error message in the case of multiple templates, e.g. FMOV which has 8
164 templates. */
165
166 static inline void
167 clear_error (void)
168 {
169 inst.parsing_error.kind = AARCH64_OPDE_NIL;
170 inst.parsing_error.error = NULL;
171 }
172
173 static inline bfd_boolean
174 error_p (void)
175 {
176 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
177 }
178
179 static inline const char *
180 get_error_message (void)
181 {
182 return inst.parsing_error.error;
183 }
184
185 static inline enum aarch64_operand_error_kind
186 get_error_kind (void)
187 {
188 return inst.parsing_error.kind;
189 }
190
191 static inline void
192 set_error (enum aarch64_operand_error_kind kind, const char *error)
193 {
194 inst.parsing_error.kind = kind;
195 inst.parsing_error.error = error;
196 }
197
198 static inline void
199 set_recoverable_error (const char *error)
200 {
201 set_error (AARCH64_OPDE_RECOVERABLE, error);
202 }
203
204 /* Use the DESC field of the corresponding aarch64_operand entry to compose
205 the error message. */
206 static inline void
207 set_default_error (void)
208 {
209 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
210 }
211
212 static inline void
213 set_syntax_error (const char *error)
214 {
215 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
216 }
217
218 static inline void
219 set_first_syntax_error (const char *error)
220 {
221 if (! error_p ())
222 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
223 }
224
225 static inline void
226 set_fatal_syntax_error (const char *error)
227 {
228 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
229 }
230 \f
231 /* Number of littlenums required to hold an extended precision number. */
232 #define MAX_LITTLENUMS 6
233
234 /* Return value for certain parsers when the parsing fails; those parsers
235 return the information of the parsed result, e.g. register number, on
236 success. */
237 #define PARSE_FAIL -1
238
239 /* This is an invalid condition code that means no conditional field is
240 present. */
241 #define COND_ALWAYS 0x10
242
243 typedef struct
244 {
245 const char *template;
246 unsigned long value;
247 } asm_barrier_opt;
248
249 typedef struct
250 {
251 const char *template;
252 uint32_t value;
253 } asm_nzcv;
254
255 struct reloc_entry
256 {
257 char *name;
258 bfd_reloc_code_real_type reloc;
259 };
260
261 /* Macros to define the register types and masks for the purpose
262 of parsing. */
263
264 #undef AARCH64_REG_TYPES
265 #define AARCH64_REG_TYPES \
266 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
267 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
268 BASIC_REG_TYPE(SP_32) /* wsp */ \
269 BASIC_REG_TYPE(SP_64) /* sp */ \
270 BASIC_REG_TYPE(Z_32) /* wzr */ \
271 BASIC_REG_TYPE(Z_64) /* xzr */ \
272 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
273 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
274 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
275 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
276 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
277 BASIC_REG_TYPE(VN) /* v[0-31] */ \
278 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
279 BASIC_REG_TYPE(PN) /* p[0-15] */ \
280 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
281 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
282 /* Typecheck: same, plus SVE registers. */ \
283 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
284 | REG_TYPE(ZN)) \
285 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
286 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
287 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
288 /* Typecheck: same, plus SVE registers. */ \
289 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
290 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
291 | REG_TYPE(ZN)) \
292 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
293 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
294 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
295 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
296 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
298 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
299 /* Typecheck: any [BHSDQ]P FP. */ \
300 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
301 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
302 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
303 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
305 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
306 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
307 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
308 be used for SVE instructions, since Zn and Pn are valid symbols \
309 in other contexts. */ \
310 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
311 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
312 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
313 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
314 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
315 | REG_TYPE(ZN) | REG_TYPE(PN)) \
316 /* Any integer register; used for error messages only. */ \
317 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
318 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
319 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
320 /* Pseudo type to mark the end of the enumerator sequence. */ \
321 BASIC_REG_TYPE(MAX)
322
323 #undef BASIC_REG_TYPE
324 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
325 #undef MULTI_REG_TYPE
326 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
327
328 /* Register type enumerators. */
329 typedef enum aarch64_reg_type_
330 {
331 /* A list of REG_TYPE_*. */
332 AARCH64_REG_TYPES
333 } aarch64_reg_type;
334
335 #undef BASIC_REG_TYPE
336 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
337 #undef REG_TYPE
338 #define REG_TYPE(T) (1 << REG_TYPE_##T)
339 #undef MULTI_REG_TYPE
340 #define MULTI_REG_TYPE(T,V) V,
341
342 /* Structure for a hash table entry for a register. */
343 typedef struct
344 {
345 const char *name;
346 unsigned char number;
347 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
348 unsigned char builtin;
349 } reg_entry;
350
351 /* Values indexed by aarch64_reg_type to assist the type checking. */
352 static const unsigned reg_type_masks[] =
353 {
354 AARCH64_REG_TYPES
355 };
356
357 #undef BASIC_REG_TYPE
358 #undef REG_TYPE
359 #undef MULTI_REG_TYPE
360 #undef AARCH64_REG_TYPES
361
362 /* Diagnostics used when we don't get a register of the expected type.
363 Note: this has to synchronized with aarch64_reg_type definitions
364 above. */
365 static const char *
366 get_reg_expected_msg (aarch64_reg_type reg_type)
367 {
368 const char *msg;
369
370 switch (reg_type)
371 {
372 case REG_TYPE_R_32:
373 msg = N_("integer 32-bit register expected");
374 break;
375 case REG_TYPE_R_64:
376 msg = N_("integer 64-bit register expected");
377 break;
378 case REG_TYPE_R_N:
379 msg = N_("integer register expected");
380 break;
381 case REG_TYPE_R64_SP:
382 msg = N_("64-bit integer or SP register expected");
383 break;
384 case REG_TYPE_SVE_BASE:
385 msg = N_("base register expected");
386 break;
387 case REG_TYPE_R_Z:
388 msg = N_("integer or zero register expected");
389 break;
390 case REG_TYPE_SVE_OFFSET:
391 msg = N_("offset register expected");
392 break;
393 case REG_TYPE_R_SP:
394 msg = N_("integer or SP register expected");
395 break;
396 case REG_TYPE_R_Z_SP:
397 msg = N_("integer, zero or SP register expected");
398 break;
399 case REG_TYPE_FP_B:
400 msg = N_("8-bit SIMD scalar register expected");
401 break;
402 case REG_TYPE_FP_H:
403 msg = N_("16-bit SIMD scalar or floating-point half precision "
404 "register expected");
405 break;
406 case REG_TYPE_FP_S:
407 msg = N_("32-bit SIMD scalar or floating-point single precision "
408 "register expected");
409 break;
410 case REG_TYPE_FP_D:
411 msg = N_("64-bit SIMD scalar or floating-point double precision "
412 "register expected");
413 break;
414 case REG_TYPE_FP_Q:
415 msg = N_("128-bit SIMD scalar or floating-point quad precision "
416 "register expected");
417 break;
418 case REG_TYPE_R_Z_BHSDQ_V:
419 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
420 msg = N_("register expected");
421 break;
422 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
423 msg = N_("SIMD scalar or floating-point register expected");
424 break;
425 case REG_TYPE_VN: /* any V reg */
426 msg = N_("vector register expected");
427 break;
428 case REG_TYPE_ZN:
429 msg = N_("SVE vector register expected");
430 break;
431 case REG_TYPE_PN:
432 msg = N_("SVE predicate register expected");
433 break;
434 default:
435 as_fatal (_("invalid register type %d"), reg_type);
436 }
437 return msg;
438 }
439
440 /* Some well known registers that we refer to directly elsewhere. */
441 #define REG_SP 31
442
443 /* Instructions take 4 bytes in the object file. */
444 #define INSN_SIZE 4
445
446 static struct hash_control *aarch64_ops_hsh;
447 static struct hash_control *aarch64_cond_hsh;
448 static struct hash_control *aarch64_shift_hsh;
449 static struct hash_control *aarch64_sys_regs_hsh;
450 static struct hash_control *aarch64_pstatefield_hsh;
451 static struct hash_control *aarch64_sys_regs_ic_hsh;
452 static struct hash_control *aarch64_sys_regs_dc_hsh;
453 static struct hash_control *aarch64_sys_regs_at_hsh;
454 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
455 static struct hash_control *aarch64_reg_hsh;
456 static struct hash_control *aarch64_barrier_opt_hsh;
457 static struct hash_control *aarch64_nzcv_hsh;
458 static struct hash_control *aarch64_pldop_hsh;
459 static struct hash_control *aarch64_hint_opt_hsh;
460
461 /* Stuff needed to resolve the label ambiguity
462 As:
463 ...
464 label: <insn>
465 may differ from:
466 ...
467 label:
468 <insn> */
469
470 static symbolS *last_label_seen;
471
472 /* Literal pool structure. Held on a per-section
473 and per-sub-section basis. */
474
475 #define MAX_LITERAL_POOL_SIZE 1024
476 typedef struct literal_expression
477 {
478 expressionS exp;
479 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
480 LITTLENUM_TYPE * bignum;
481 } literal_expression;
482
483 typedef struct literal_pool
484 {
485 literal_expression literals[MAX_LITERAL_POOL_SIZE];
486 unsigned int next_free_entry;
487 unsigned int id;
488 symbolS *symbol;
489 segT section;
490 subsegT sub_section;
491 int size;
492 struct literal_pool *next;
493 } literal_pool;
494
495 /* Pointer to a linked list of literal pools. */
496 static literal_pool *list_of_pools = NULL;
497 \f
498 /* Pure syntax. */
499
500 /* This array holds the chars that always start a comment. If the
501 pre-processor is disabled, these aren't very useful. */
502 const char comment_chars[] = "";
503
504 /* This array holds the chars that only start a comment at the beginning of
505 a line. If the line seems to have the form '# 123 filename'
506 .line and .file directives will appear in the pre-processed output. */
507 /* Note that input_file.c hand checks for '#' at the beginning of the
508 first line of the input file. This is because the compiler outputs
509 #NO_APP at the beginning of its output. */
510 /* Also note that comments like this one will always work. */
511 const char line_comment_chars[] = "#";
512
513 const char line_separator_chars[] = ";";
514
515 /* Chars that can be used to separate mant
516 from exp in floating point numbers. */
517 const char EXP_CHARS[] = "eE";
518
519 /* Chars that mean this number is a floating point constant. */
520 /* As in 0f12.456 */
521 /* or 0d1.2345e12 */
522
523 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
524
525 /* Prefix character that indicates the start of an immediate value. */
526 #define is_immediate_prefix(C) ((C) == '#')
527
528 /* Separator character handling. */
529
530 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
531
532 static inline bfd_boolean
533 skip_past_char (char **str, char c)
534 {
535 if (**str == c)
536 {
537 (*str)++;
538 return TRUE;
539 }
540 else
541 return FALSE;
542 }
543
544 #define skip_past_comma(str) skip_past_char (str, ',')
545
546 /* Arithmetic expressions (possibly involving symbols). */
547
548 static bfd_boolean in_my_get_expression_p = FALSE;
549
550 /* Third argument to my_get_expression. */
551 #define GE_NO_PREFIX 0
552 #define GE_OPT_PREFIX 1
553
554 /* Return TRUE if the string pointed by *STR is successfully parsed
555 as an valid expression; *EP will be filled with the information of
556 such an expression. Otherwise return FALSE. */
557
558 static bfd_boolean
559 my_get_expression (expressionS * ep, char **str, int prefix_mode,
560 int reject_absent)
561 {
562 char *save_in;
563 segT seg;
564 int prefix_present_p = 0;
565
566 switch (prefix_mode)
567 {
568 case GE_NO_PREFIX:
569 break;
570 case GE_OPT_PREFIX:
571 if (is_immediate_prefix (**str))
572 {
573 (*str)++;
574 prefix_present_p = 1;
575 }
576 break;
577 default:
578 abort ();
579 }
580
581 memset (ep, 0, sizeof (expressionS));
582
583 save_in = input_line_pointer;
584 input_line_pointer = *str;
585 in_my_get_expression_p = TRUE;
586 seg = expression (ep);
587 in_my_get_expression_p = FALSE;
588
589 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
590 {
591 /* We found a bad expression in md_operand(). */
592 *str = input_line_pointer;
593 input_line_pointer = save_in;
594 if (prefix_present_p && ! error_p ())
595 set_fatal_syntax_error (_("bad expression"));
596 else
597 set_first_syntax_error (_("bad expression"));
598 return FALSE;
599 }
600
601 #ifdef OBJ_AOUT
602 if (seg != absolute_section
603 && seg != text_section
604 && seg != data_section
605 && seg != bss_section && seg != undefined_section)
606 {
607 set_syntax_error (_("bad segment"));
608 *str = input_line_pointer;
609 input_line_pointer = save_in;
610 return FALSE;
611 }
612 #else
613 (void) seg;
614 #endif
615
616 *str = input_line_pointer;
617 input_line_pointer = save_in;
618 return TRUE;
619 }
620
621 /* Turn a string in input_line_pointer into a floating point constant
622 of type TYPE, and store the appropriate bytes in *LITP. The number
623 of LITTLENUMS emitted is stored in *SIZEP. An error message is
624 returned, or NULL on OK. */
625
626 const char *
627 md_atof (int type, char *litP, int *sizeP)
628 {
629 return ieee_md_atof (type, litP, sizeP, target_big_endian);
630 }
631
632 /* We handle all bad expressions here, so that we can report the faulty
633 instruction in the error message. */
634 void
635 md_operand (expressionS * exp)
636 {
637 if (in_my_get_expression_p)
638 exp->X_op = O_illegal;
639 }
640
641 /* Immediate values. */
642
643 /* Errors may be set multiple times during parsing or bit encoding
644 (particularly in the Neon bits), but usually the earliest error which is set
645 will be the most meaningful. Avoid overwriting it with later (cascading)
646 errors by calling this function. */
647
648 static void
649 first_error (const char *error)
650 {
651 if (! error_p ())
652 set_syntax_error (error);
653 }
654
655 /* Similar to first_error, but this function accepts formatted error
656 message. */
657 static void
658 first_error_fmt (const char *format, ...)
659 {
660 va_list args;
661 enum
662 { size = 100 };
663 /* N.B. this single buffer will not cause error messages for different
664 instructions to pollute each other; this is because at the end of
665 processing of each assembly line, error message if any will be
666 collected by as_bad. */
667 static char buffer[size];
668
669 if (! error_p ())
670 {
671 int ret ATTRIBUTE_UNUSED;
672 va_start (args, format);
673 ret = vsnprintf (buffer, size, format, args);
674 know (ret <= size - 1 && ret >= 0);
675 va_end (args);
676 set_syntax_error (buffer);
677 }
678 }
679
680 /* Register parsing. */
681
682 /* Generic register parser which is called by other specialized
683 register parsers.
684 CCP points to what should be the beginning of a register name.
685 If it is indeed a valid register name, advance CCP over it and
686 return the reg_entry structure; otherwise return NULL.
687 It does not issue diagnostics. */
688
689 static reg_entry *
690 parse_reg (char **ccp)
691 {
692 char *start = *ccp;
693 char *p;
694 reg_entry *reg;
695
696 #ifdef REGISTER_PREFIX
697 if (*start != REGISTER_PREFIX)
698 return NULL;
699 start++;
700 #endif
701
702 p = start;
703 if (!ISALPHA (*p) || !is_name_beginner (*p))
704 return NULL;
705
706 do
707 p++;
708 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
709
710 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
711
712 if (!reg)
713 return NULL;
714
715 *ccp = p;
716 return reg;
717 }
718
719 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
720 return FALSE. */
721 static bfd_boolean
722 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
723 {
724 return (reg_type_masks[type] & (1 << reg->type)) != 0;
725 }
726
727 /* Try to parse a base or offset register. Allow SVE base and offset
728 registers if REG_TYPE includes SVE registers. Return the register
729 entry on success, setting *QUALIFIER to the register qualifier.
730 Return null otherwise.
731
732 Note that this function does not issue any diagnostics. */
733
734 static const reg_entry *
735 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
736 aarch64_opnd_qualifier_t *qualifier)
737 {
738 char *str = *ccp;
739 const reg_entry *reg = parse_reg (&str);
740
741 if (reg == NULL)
742 return NULL;
743
744 switch (reg->type)
745 {
746 case REG_TYPE_R_32:
747 case REG_TYPE_SP_32:
748 case REG_TYPE_Z_32:
749 *qualifier = AARCH64_OPND_QLF_W;
750 break;
751
752 case REG_TYPE_R_64:
753 case REG_TYPE_SP_64:
754 case REG_TYPE_Z_64:
755 *qualifier = AARCH64_OPND_QLF_X;
756 break;
757
758 case REG_TYPE_ZN:
759 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
760 || str[0] != '.')
761 return NULL;
762 switch (TOLOWER (str[1]))
763 {
764 case 's':
765 *qualifier = AARCH64_OPND_QLF_S_S;
766 break;
767 case 'd':
768 *qualifier = AARCH64_OPND_QLF_S_D;
769 break;
770 default:
771 return NULL;
772 }
773 str += 2;
774 break;
775
776 default:
777 return NULL;
778 }
779
780 *ccp = str;
781
782 return reg;
783 }
784
785 /* Try to parse a base or offset register. Return the register entry
786 on success, setting *QUALIFIER to the register qualifier. Return null
787 otherwise.
788
789 Note that this function does not issue any diagnostics. */
790
791 static const reg_entry *
792 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
793 {
794 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
795 }
796
797 /* Parse the qualifier of a vector register or vector element of type
798 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
799 succeeds; otherwise return FALSE.
800
801 Accept only one occurrence of:
802 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
803 b h s d q */
804 static bfd_boolean
805 parse_vector_type_for_operand (aarch64_reg_type reg_type,
806 struct vector_type_el *parsed_type, char **str)
807 {
808 char *ptr = *str;
809 unsigned width;
810 unsigned element_size;
811 enum vector_el_type type;
812
813 /* skip '.' */
814 gas_assert (*ptr == '.');
815 ptr++;
816
817 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
818 {
819 width = 0;
820 goto elt_size;
821 }
822 width = strtoul (ptr, &ptr, 10);
823 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
824 {
825 first_error_fmt (_("bad size %d in vector width specifier"), width);
826 return FALSE;
827 }
828
829 elt_size:
830 switch (TOLOWER (*ptr))
831 {
832 case 'b':
833 type = NT_b;
834 element_size = 8;
835 break;
836 case 'h':
837 type = NT_h;
838 element_size = 16;
839 break;
840 case 's':
841 type = NT_s;
842 element_size = 32;
843 break;
844 case 'd':
845 type = NT_d;
846 element_size = 64;
847 break;
848 case 'q':
849 if (reg_type == REG_TYPE_ZN || width == 1)
850 {
851 type = NT_q;
852 element_size = 128;
853 break;
854 }
855 /* fall through. */
856 default:
857 if (*ptr != '\0')
858 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
859 else
860 first_error (_("missing element size"));
861 return FALSE;
862 }
863 if (width != 0 && width * element_size != 64
864 && width * element_size != 128
865 && !(width == 2 && element_size == 16)
866 && !(width == 4 && element_size == 8))
867 {
868 first_error_fmt (_
869 ("invalid element size %d and vector size combination %c"),
870 width, *ptr);
871 return FALSE;
872 }
873 ptr++;
874
875 parsed_type->type = type;
876 parsed_type->width = width;
877
878 *str = ptr;
879
880 return TRUE;
881 }
882
883 /* *STR contains an SVE zero/merge predication suffix. Parse it into
884 *PARSED_TYPE and point *STR at the end of the suffix. */
885
886 static bfd_boolean
887 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
888 {
889 char *ptr = *str;
890
891 /* Skip '/'. */
892 gas_assert (*ptr == '/');
893 ptr++;
894 switch (TOLOWER (*ptr))
895 {
896 case 'z':
897 parsed_type->type = NT_zero;
898 break;
899 case 'm':
900 parsed_type->type = NT_merge;
901 break;
902 default:
903 if (*ptr != '\0' && *ptr != ',')
904 first_error_fmt (_("unexpected character `%c' in predication type"),
905 *ptr);
906 else
907 first_error (_("missing predication type"));
908 return FALSE;
909 }
910 parsed_type->width = 0;
911 *str = ptr + 1;
912 return TRUE;
913 }
914
915 /* Parse a register of the type TYPE.
916
917 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
918 name or the parsed register is not of TYPE.
919
920 Otherwise return the register number, and optionally fill in the actual
921 type of the register in *RTYPE when multiple alternatives were given, and
922 return the register shape and element index information in *TYPEINFO.
923
924 IN_REG_LIST should be set with TRUE if the caller is parsing a register
925 list. */
926
927 static int
928 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
929 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
930 {
931 char *str = *ccp;
932 const reg_entry *reg = parse_reg (&str);
933 struct vector_type_el atype;
934 struct vector_type_el parsetype;
935 bfd_boolean is_typed_vecreg = FALSE;
936
937 atype.defined = 0;
938 atype.type = NT_invtype;
939 atype.width = -1;
940 atype.index = 0;
941
942 if (reg == NULL)
943 {
944 if (typeinfo)
945 *typeinfo = atype;
946 set_default_error ();
947 return PARSE_FAIL;
948 }
949
950 if (! aarch64_check_reg_type (reg, type))
951 {
952 DEBUG_TRACE ("reg type check failed");
953 set_default_error ();
954 return PARSE_FAIL;
955 }
956 type = reg->type;
957
958 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
959 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
960 {
961 if (*str == '.')
962 {
963 if (!parse_vector_type_for_operand (type, &parsetype, &str))
964 return PARSE_FAIL;
965 }
966 else
967 {
968 if (!parse_predication_for_operand (&parsetype, &str))
969 return PARSE_FAIL;
970 }
971
972 /* Register if of the form Vn.[bhsdq]. */
973 is_typed_vecreg = TRUE;
974
975 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
976 {
977 /* The width is always variable; we don't allow an integer width
978 to be specified. */
979 gas_assert (parsetype.width == 0);
980 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
981 }
982 else if (parsetype.width == 0)
983 /* Expect index. In the new scheme we cannot have
984 Vn.[bhsdq] represent a scalar. Therefore any
985 Vn.[bhsdq] should have an index following it.
986 Except in reglists of course. */
987 atype.defined |= NTA_HASINDEX;
988 else
989 atype.defined |= NTA_HASTYPE;
990
991 atype.type = parsetype.type;
992 atype.width = parsetype.width;
993 }
994
995 if (skip_past_char (&str, '['))
996 {
997 expressionS exp;
998
999 /* Reject Sn[index] syntax. */
1000 if (!is_typed_vecreg)
1001 {
1002 first_error (_("this type of register can't be indexed"));
1003 return PARSE_FAIL;
1004 }
1005
1006 if (in_reg_list)
1007 {
1008 first_error (_("index not allowed inside register list"));
1009 return PARSE_FAIL;
1010 }
1011
1012 atype.defined |= NTA_HASINDEX;
1013
1014 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1015
1016 if (exp.X_op != O_constant)
1017 {
1018 first_error (_("constant expression required"));
1019 return PARSE_FAIL;
1020 }
1021
1022 if (! skip_past_char (&str, ']'))
1023 return PARSE_FAIL;
1024
1025 atype.index = exp.X_add_number;
1026 }
1027 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1028 {
1029 /* Indexed vector register expected. */
1030 first_error (_("indexed vector register expected"));
1031 return PARSE_FAIL;
1032 }
1033
1034 /* A vector reg Vn should be typed or indexed. */
1035 if (type == REG_TYPE_VN && atype.defined == 0)
1036 {
1037 first_error (_("invalid use of vector register"));
1038 }
1039
1040 if (typeinfo)
1041 *typeinfo = atype;
1042
1043 if (rtype)
1044 *rtype = type;
1045
1046 *ccp = str;
1047
1048 return reg->number;
1049 }
1050
1051 /* Parse register.
1052
1053 Return the register number on success; return PARSE_FAIL otherwise.
1054
1055 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1056 the register (e.g. NEON double or quad reg when either has been requested).
1057
1058 If this is a NEON vector register with additional type information, fill
1059 in the struct pointed to by VECTYPE (if non-NULL).
1060
1061 This parser does not handle register list. */
1062
1063 static int
1064 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1065 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1066 {
1067 struct vector_type_el atype;
1068 char *str = *ccp;
1069 int reg = parse_typed_reg (&str, type, rtype, &atype,
1070 /*in_reg_list= */ FALSE);
1071
1072 if (reg == PARSE_FAIL)
1073 return PARSE_FAIL;
1074
1075 if (vectype)
1076 *vectype = atype;
1077
1078 *ccp = str;
1079
1080 return reg;
1081 }
1082
1083 static inline bfd_boolean
1084 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1085 {
1086 return
1087 e1.type == e2.type
1088 && e1.defined == e2.defined
1089 && e1.width == e2.width && e1.index == e2.index;
1090 }
1091
1092 /* This function parses a list of vector registers of type TYPE.
1093 On success, it returns the parsed register list information in the
1094 following encoded format:
1095
1096 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1097 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1098
1099 The information of the register shape and/or index is returned in
1100 *VECTYPE.
1101
1102 It returns PARSE_FAIL if the register list is invalid.
1103
1104 The list contains one to four registers.
1105 Each register can be one of:
1106 <Vt>.<T>[<index>]
1107 <Vt>.<T>
1108 All <T> should be identical.
1109 All <index> should be identical.
1110 There are restrictions on <Vt> numbers which are checked later
1111 (by reg_list_valid_p). */
1112
1113 static int
1114 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1115 struct vector_type_el *vectype)
1116 {
1117 char *str = *ccp;
1118 int nb_regs;
1119 struct vector_type_el typeinfo, typeinfo_first;
1120 int val, val_range;
1121 int in_range;
1122 int ret_val;
1123 int i;
1124 bfd_boolean error = FALSE;
1125 bfd_boolean expect_index = FALSE;
1126
1127 if (*str != '{')
1128 {
1129 set_syntax_error (_("expecting {"));
1130 return PARSE_FAIL;
1131 }
1132 str++;
1133
1134 nb_regs = 0;
1135 typeinfo_first.defined = 0;
1136 typeinfo_first.type = NT_invtype;
1137 typeinfo_first.width = -1;
1138 typeinfo_first.index = 0;
1139 ret_val = 0;
1140 val = -1;
1141 val_range = -1;
1142 in_range = 0;
1143 do
1144 {
1145 if (in_range)
1146 {
1147 str++; /* skip over '-' */
1148 val_range = val;
1149 }
1150 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1151 /*in_reg_list= */ TRUE);
1152 if (val == PARSE_FAIL)
1153 {
1154 set_first_syntax_error (_("invalid vector register in list"));
1155 error = TRUE;
1156 continue;
1157 }
1158 /* reject [bhsd]n */
1159 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1160 {
1161 set_first_syntax_error (_("invalid scalar register in list"));
1162 error = TRUE;
1163 continue;
1164 }
1165
1166 if (typeinfo.defined & NTA_HASINDEX)
1167 expect_index = TRUE;
1168
1169 if (in_range)
1170 {
1171 if (val < val_range)
1172 {
1173 set_first_syntax_error
1174 (_("invalid range in vector register list"));
1175 error = TRUE;
1176 }
1177 val_range++;
1178 }
1179 else
1180 {
1181 val_range = val;
1182 if (nb_regs == 0)
1183 typeinfo_first = typeinfo;
1184 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1185 {
1186 set_first_syntax_error
1187 (_("type mismatch in vector register list"));
1188 error = TRUE;
1189 }
1190 }
1191 if (! error)
1192 for (i = val_range; i <= val; i++)
1193 {
1194 ret_val |= i << (5 * nb_regs);
1195 nb_regs++;
1196 }
1197 in_range = 0;
1198 }
1199 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1200
1201 skip_whitespace (str);
1202 if (*str != '}')
1203 {
1204 set_first_syntax_error (_("end of vector register list not found"));
1205 error = TRUE;
1206 }
1207 str++;
1208
1209 skip_whitespace (str);
1210
1211 if (expect_index)
1212 {
1213 if (skip_past_char (&str, '['))
1214 {
1215 expressionS exp;
1216
1217 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1218 if (exp.X_op != O_constant)
1219 {
1220 set_first_syntax_error (_("constant expression required."));
1221 error = TRUE;
1222 }
1223 if (! skip_past_char (&str, ']'))
1224 error = TRUE;
1225 else
1226 typeinfo_first.index = exp.X_add_number;
1227 }
1228 else
1229 {
1230 set_first_syntax_error (_("expected index"));
1231 error = TRUE;
1232 }
1233 }
1234
1235 if (nb_regs > 4)
1236 {
1237 set_first_syntax_error (_("too many registers in vector register list"));
1238 error = TRUE;
1239 }
1240 else if (nb_regs == 0)
1241 {
1242 set_first_syntax_error (_("empty vector register list"));
1243 error = TRUE;
1244 }
1245
1246 *ccp = str;
1247 if (! error)
1248 *vectype = typeinfo_first;
1249
1250 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1251 }
1252
1253 /* Directives: register aliases. */
1254
1255 static reg_entry *
1256 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1257 {
1258 reg_entry *new;
1259 const char *name;
1260
1261 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1262 {
1263 if (new->builtin)
1264 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1265 str);
1266
1267 /* Only warn about a redefinition if it's not defined as the
1268 same register. */
1269 else if (new->number != number || new->type != type)
1270 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1271
1272 return NULL;
1273 }
1274
1275 name = xstrdup (str);
1276 new = XNEW (reg_entry);
1277
1278 new->name = name;
1279 new->number = number;
1280 new->type = type;
1281 new->builtin = FALSE;
1282
1283 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1284 abort ();
1285
1286 return new;
1287 }
1288
1289 /* Look for the .req directive. This is of the form:
1290
1291 new_register_name .req existing_register_name
1292
1293 If we find one, or if it looks sufficiently like one that we want to
1294 handle any error here, return TRUE. Otherwise return FALSE. */
1295
1296 static bfd_boolean
1297 create_register_alias (char *newname, char *p)
1298 {
1299 const reg_entry *old;
1300 char *oldname, *nbuf;
1301 size_t nlen;
1302
1303 /* The input scrubber ensures that whitespace after the mnemonic is
1304 collapsed to single spaces. */
1305 oldname = p;
1306 if (strncmp (oldname, " .req ", 6) != 0)
1307 return FALSE;
1308
1309 oldname += 6;
1310 if (*oldname == '\0')
1311 return FALSE;
1312
1313 old = hash_find (aarch64_reg_hsh, oldname);
1314 if (!old)
1315 {
1316 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1317 return TRUE;
1318 }
1319
1320 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1321 the desired alias name, and p points to its end. If not, then
1322 the desired alias name is in the global original_case_string. */
1323 #ifdef TC_CASE_SENSITIVE
1324 nlen = p - newname;
1325 #else
1326 newname = original_case_string;
1327 nlen = strlen (newname);
1328 #endif
1329
1330 nbuf = xmemdup0 (newname, nlen);
1331
1332 /* Create aliases under the new name as stated; an all-lowercase
1333 version of the new name; and an all-uppercase version of the new
1334 name. */
1335 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1336 {
1337 for (p = nbuf; *p; p++)
1338 *p = TOUPPER (*p);
1339
1340 if (strncmp (nbuf, newname, nlen))
1341 {
1342 /* If this attempt to create an additional alias fails, do not bother
1343 trying to create the all-lower case alias. We will fail and issue
1344 a second, duplicate error message. This situation arises when the
1345 programmer does something like:
1346 foo .req r0
1347 Foo .req r1
1348 The second .req creates the "Foo" alias but then fails to create
1349 the artificial FOO alias because it has already been created by the
1350 first .req. */
1351 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1352 {
1353 free (nbuf);
1354 return TRUE;
1355 }
1356 }
1357
1358 for (p = nbuf; *p; p++)
1359 *p = TOLOWER (*p);
1360
1361 if (strncmp (nbuf, newname, nlen))
1362 insert_reg_alias (nbuf, old->number, old->type);
1363 }
1364
1365 free (nbuf);
1366 return TRUE;
1367 }
1368
1369 /* Should never be called, as .req goes between the alias and the
1370 register name, not at the beginning of the line. */
1371 static void
1372 s_req (int a ATTRIBUTE_UNUSED)
1373 {
1374 as_bad (_("invalid syntax for .req directive"));
1375 }
1376
1377 /* The .unreq directive deletes an alias which was previously defined
1378 by .req. For example:
1379
1380 my_alias .req r11
1381 .unreq my_alias */
1382
1383 static void
1384 s_unreq (int a ATTRIBUTE_UNUSED)
1385 {
1386 char *name;
1387 char saved_char;
1388
1389 name = input_line_pointer;
1390
1391 while (*input_line_pointer != 0
1392 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1393 ++input_line_pointer;
1394
1395 saved_char = *input_line_pointer;
1396 *input_line_pointer = 0;
1397
1398 if (!*name)
1399 as_bad (_("invalid syntax for .unreq directive"));
1400 else
1401 {
1402 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1403
1404 if (!reg)
1405 as_bad (_("unknown register alias '%s'"), name);
1406 else if (reg->builtin)
1407 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1408 name);
1409 else
1410 {
1411 char *p;
1412 char *nbuf;
1413
1414 hash_delete (aarch64_reg_hsh, name, FALSE);
1415 free ((char *) reg->name);
1416 free (reg);
1417
1418 /* Also locate the all upper case and all lower case versions.
1419 Do not complain if we cannot find one or the other as it
1420 was probably deleted above. */
1421
1422 nbuf = strdup (name);
1423 for (p = nbuf; *p; p++)
1424 *p = TOUPPER (*p);
1425 reg = hash_find (aarch64_reg_hsh, nbuf);
1426 if (reg)
1427 {
1428 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1429 free ((char *) reg->name);
1430 free (reg);
1431 }
1432
1433 for (p = nbuf; *p; p++)
1434 *p = TOLOWER (*p);
1435 reg = hash_find (aarch64_reg_hsh, nbuf);
1436 if (reg)
1437 {
1438 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1439 free ((char *) reg->name);
1440 free (reg);
1441 }
1442
1443 free (nbuf);
1444 }
1445 }
1446
1447 *input_line_pointer = saved_char;
1448 demand_empty_rest_of_line ();
1449 }
1450
1451 /* Directives: Instruction set selection. */
1452
1453 #ifdef OBJ_ELF
1454 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1455 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1456 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1457 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1458
1459 /* Create a new mapping symbol for the transition to STATE. */
1460
1461 static void
1462 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1463 {
1464 symbolS *symbolP;
1465 const char *symname;
1466 int type;
1467
1468 switch (state)
1469 {
1470 case MAP_DATA:
1471 symname = "$d";
1472 type = BSF_NO_FLAGS;
1473 break;
1474 case MAP_INSN:
1475 symname = "$x";
1476 type = BSF_NO_FLAGS;
1477 break;
1478 default:
1479 abort ();
1480 }
1481
1482 symbolP = symbol_new (symname, now_seg, value, frag);
1483 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1484
1485 /* Save the mapping symbols for future reference. Also check that
1486 we do not place two mapping symbols at the same offset within a
1487 frag. We'll handle overlap between frags in
1488 check_mapping_symbols.
1489
1490 If .fill or other data filling directive generates zero sized data,
1491 the mapping symbol for the following code will have the same value
1492 as the one generated for the data filling directive. In this case,
1493 we replace the old symbol with the new one at the same address. */
1494 if (value == 0)
1495 {
1496 if (frag->tc_frag_data.first_map != NULL)
1497 {
1498 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1499 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1500 &symbol_lastP);
1501 }
1502 frag->tc_frag_data.first_map = symbolP;
1503 }
1504 if (frag->tc_frag_data.last_map != NULL)
1505 {
1506 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1507 S_GET_VALUE (symbolP));
1508 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1509 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1510 &symbol_lastP);
1511 }
1512 frag->tc_frag_data.last_map = symbolP;
1513 }
1514
1515 /* We must sometimes convert a region marked as code to data during
1516 code alignment, if an odd number of bytes have to be padded. The
1517 code mapping symbol is pushed to an aligned address. */
1518
1519 static void
1520 insert_data_mapping_symbol (enum mstate state,
1521 valueT value, fragS * frag, offsetT bytes)
1522 {
1523 /* If there was already a mapping symbol, remove it. */
1524 if (frag->tc_frag_data.last_map != NULL
1525 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1526 frag->fr_address + value)
1527 {
1528 symbolS *symp = frag->tc_frag_data.last_map;
1529
1530 if (value == 0)
1531 {
1532 know (frag->tc_frag_data.first_map == symp);
1533 frag->tc_frag_data.first_map = NULL;
1534 }
1535 frag->tc_frag_data.last_map = NULL;
1536 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1537 }
1538
1539 make_mapping_symbol (MAP_DATA, value, frag);
1540 make_mapping_symbol (state, value + bytes, frag);
1541 }
1542
1543 static void mapping_state_2 (enum mstate state, int max_chars);
1544
1545 /* Set the mapping state to STATE. Only call this when about to
1546 emit some STATE bytes to the file. */
1547
1548 void
1549 mapping_state (enum mstate state)
1550 {
1551 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1552
1553 if (state == MAP_INSN)
1554 /* AArch64 instructions require 4-byte alignment. When emitting
1555 instructions into any section, record the appropriate section
1556 alignment. */
1557 record_alignment (now_seg, 2);
1558
1559 if (mapstate == state)
1560 /* The mapping symbol has already been emitted.
1561 There is nothing else to do. */
1562 return;
1563
1564 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1565 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1566 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1567 evaluated later in the next else. */
1568 return;
1569 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1570 {
1571 /* Only add the symbol if the offset is > 0:
1572 if we're at the first frag, check it's size > 0;
1573 if we're not at the first frag, then for sure
1574 the offset is > 0. */
1575 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1576 const int add_symbol = (frag_now != frag_first)
1577 || (frag_now_fix () > 0);
1578
1579 if (add_symbol)
1580 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1581 }
1582 #undef TRANSITION
1583
1584 mapping_state_2 (state, 0);
1585 }
1586
1587 /* Same as mapping_state, but MAX_CHARS bytes have already been
1588 allocated. Put the mapping symbol that far back. */
1589
1590 static void
1591 mapping_state_2 (enum mstate state, int max_chars)
1592 {
1593 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1594
1595 if (!SEG_NORMAL (now_seg))
1596 return;
1597
1598 if (mapstate == state)
1599 /* The mapping symbol has already been emitted.
1600 There is nothing else to do. */
1601 return;
1602
1603 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1604 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1605 }
1606 #else
1607 #define mapping_state(x) /* nothing */
1608 #define mapping_state_2(x, y) /* nothing */
1609 #endif
1610
1611 /* Directives: sectioning and alignment. */
1612
1613 static void
1614 s_bss (int ignore ATTRIBUTE_UNUSED)
1615 {
1616 /* We don't support putting frags in the BSS segment, we fake it by
1617 marking in_bss, then looking at s_skip for clues. */
1618 subseg_set (bss_section, 0);
1619 demand_empty_rest_of_line ();
1620 mapping_state (MAP_DATA);
1621 }
1622
1623 static void
1624 s_even (int ignore ATTRIBUTE_UNUSED)
1625 {
1626 /* Never make frag if expect extra pass. */
1627 if (!need_pass_2)
1628 frag_align (1, 0, 0);
1629
1630 record_alignment (now_seg, 1);
1631
1632 demand_empty_rest_of_line ();
1633 }
1634
1635 /* Directives: Literal pools. */
1636
1637 static literal_pool *
1638 find_literal_pool (int size)
1639 {
1640 literal_pool *pool;
1641
1642 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1643 {
1644 if (pool->section == now_seg
1645 && pool->sub_section == now_subseg && pool->size == size)
1646 break;
1647 }
1648
1649 return pool;
1650 }
1651
1652 static literal_pool *
1653 find_or_make_literal_pool (int size)
1654 {
1655 /* Next literal pool ID number. */
1656 static unsigned int latest_pool_num = 1;
1657 literal_pool *pool;
1658
1659 pool = find_literal_pool (size);
1660
1661 if (pool == NULL)
1662 {
1663 /* Create a new pool. */
1664 pool = XNEW (literal_pool);
1665 if (!pool)
1666 return NULL;
1667
1668 /* Currently we always put the literal pool in the current text
1669 section. If we were generating "small" model code where we
1670 knew that all code and initialised data was within 1MB then
1671 we could output literals to mergeable, read-only data
1672 sections. */
1673
1674 pool->next_free_entry = 0;
1675 pool->section = now_seg;
1676 pool->sub_section = now_subseg;
1677 pool->size = size;
1678 pool->next = list_of_pools;
1679 pool->symbol = NULL;
1680
1681 /* Add it to the list. */
1682 list_of_pools = pool;
1683 }
1684
1685 /* New pools, and emptied pools, will have a NULL symbol. */
1686 if (pool->symbol == NULL)
1687 {
1688 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1689 (valueT) 0, &zero_address_frag);
1690 pool->id = latest_pool_num++;
1691 }
1692
1693 /* Done. */
1694 return pool;
1695 }
1696
1697 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1698 Return TRUE on success, otherwise return FALSE. */
1699 static bfd_boolean
1700 add_to_lit_pool (expressionS *exp, int size)
1701 {
1702 literal_pool *pool;
1703 unsigned int entry;
1704
1705 pool = find_or_make_literal_pool (size);
1706
1707 /* Check if this literal value is already in the pool. */
1708 for (entry = 0; entry < pool->next_free_entry; entry++)
1709 {
1710 expressionS * litexp = & pool->literals[entry].exp;
1711
1712 if ((litexp->X_op == exp->X_op)
1713 && (exp->X_op == O_constant)
1714 && (litexp->X_add_number == exp->X_add_number)
1715 && (litexp->X_unsigned == exp->X_unsigned))
1716 break;
1717
1718 if ((litexp->X_op == exp->X_op)
1719 && (exp->X_op == O_symbol)
1720 && (litexp->X_add_number == exp->X_add_number)
1721 && (litexp->X_add_symbol == exp->X_add_symbol)
1722 && (litexp->X_op_symbol == exp->X_op_symbol))
1723 break;
1724 }
1725
1726 /* Do we need to create a new entry? */
1727 if (entry == pool->next_free_entry)
1728 {
1729 if (entry >= MAX_LITERAL_POOL_SIZE)
1730 {
1731 set_syntax_error (_("literal pool overflow"));
1732 return FALSE;
1733 }
1734
1735 pool->literals[entry].exp = *exp;
1736 pool->next_free_entry += 1;
1737 if (exp->X_op == O_big)
1738 {
1739 /* PR 16688: Bignums are held in a single global array. We must
1740 copy and preserve that value now, before it is overwritten. */
1741 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1742 exp->X_add_number);
1743 memcpy (pool->literals[entry].bignum, generic_bignum,
1744 CHARS_PER_LITTLENUM * exp->X_add_number);
1745 }
1746 else
1747 pool->literals[entry].bignum = NULL;
1748 }
1749
1750 exp->X_op = O_symbol;
1751 exp->X_add_number = ((int) entry) * size;
1752 exp->X_add_symbol = pool->symbol;
1753
1754 return TRUE;
1755 }
1756
1757 /* Can't use symbol_new here, so have to create a symbol and then at
1758 a later date assign it a value. That's what these functions do. */
1759
1760 static void
1761 symbol_locate (symbolS * symbolP,
1762 const char *name,/* It is copied, the caller can modify. */
1763 segT segment, /* Segment identifier (SEG_<something>). */
1764 valueT valu, /* Symbol value. */
1765 fragS * frag) /* Associated fragment. */
1766 {
1767 size_t name_length;
1768 char *preserved_copy_of_name;
1769
1770 name_length = strlen (name) + 1; /* +1 for \0. */
1771 obstack_grow (&notes, name, name_length);
1772 preserved_copy_of_name = obstack_finish (&notes);
1773
1774 #ifdef tc_canonicalize_symbol_name
1775 preserved_copy_of_name =
1776 tc_canonicalize_symbol_name (preserved_copy_of_name);
1777 #endif
1778
1779 S_SET_NAME (symbolP, preserved_copy_of_name);
1780
1781 S_SET_SEGMENT (symbolP, segment);
1782 S_SET_VALUE (symbolP, valu);
1783 symbol_clear_list_pointers (symbolP);
1784
1785 symbol_set_frag (symbolP, frag);
1786
1787 /* Link to end of symbol chain. */
1788 {
1789 extern int symbol_table_frozen;
1790
1791 if (symbol_table_frozen)
1792 abort ();
1793 }
1794
1795 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1796
1797 obj_symbol_new_hook (symbolP);
1798
1799 #ifdef tc_symbol_new_hook
1800 tc_symbol_new_hook (symbolP);
1801 #endif
1802
1803 #ifdef DEBUG_SYMS
1804 verify_symbol_chain (symbol_rootP, symbol_lastP);
1805 #endif /* DEBUG_SYMS */
1806 }
1807
1808
1809 static void
1810 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1811 {
1812 unsigned int entry;
1813 literal_pool *pool;
1814 char sym_name[20];
1815 int align;
1816
1817 for (align = 2; align <= 4; align++)
1818 {
1819 int size = 1 << align;
1820
1821 pool = find_literal_pool (size);
1822 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1823 continue;
1824
1825 /* Align pool as you have word accesses.
1826 Only make a frag if we have to. */
1827 if (!need_pass_2)
1828 frag_align (align, 0, 0);
1829
1830 mapping_state (MAP_DATA);
1831
1832 record_alignment (now_seg, align);
1833
1834 sprintf (sym_name, "$$lit_\002%x", pool->id);
1835
1836 symbol_locate (pool->symbol, sym_name, now_seg,
1837 (valueT) frag_now_fix (), frag_now);
1838 symbol_table_insert (pool->symbol);
1839
1840 for (entry = 0; entry < pool->next_free_entry; entry++)
1841 {
1842 expressionS * exp = & pool->literals[entry].exp;
1843
1844 if (exp->X_op == O_big)
1845 {
1846 /* PR 16688: Restore the global bignum value. */
1847 gas_assert (pool->literals[entry].bignum != NULL);
1848 memcpy (generic_bignum, pool->literals[entry].bignum,
1849 CHARS_PER_LITTLENUM * exp->X_add_number);
1850 }
1851
1852 /* First output the expression in the instruction to the pool. */
1853 emit_expr (exp, size); /* .word|.xword */
1854
1855 if (exp->X_op == O_big)
1856 {
1857 free (pool->literals[entry].bignum);
1858 pool->literals[entry].bignum = NULL;
1859 }
1860 }
1861
1862 /* Mark the pool as empty. */
1863 pool->next_free_entry = 0;
1864 pool->symbol = NULL;
1865 }
1866 }
1867
1868 #ifdef OBJ_ELF
1869 /* Forward declarations for functions below, in the MD interface
1870 section. */
1871 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1872 static struct reloc_table_entry * find_reloc_table_entry (char **);
1873
1874 /* Directives: Data. */
1875 /* N.B. the support for relocation suffix in this directive needs to be
1876 implemented properly. */
1877
1878 static void
1879 s_aarch64_elf_cons (int nbytes)
1880 {
1881 expressionS exp;
1882
1883 #ifdef md_flush_pending_output
1884 md_flush_pending_output ();
1885 #endif
1886
1887 if (is_it_end_of_statement ())
1888 {
1889 demand_empty_rest_of_line ();
1890 return;
1891 }
1892
1893 #ifdef md_cons_align
1894 md_cons_align (nbytes);
1895 #endif
1896
1897 mapping_state (MAP_DATA);
1898 do
1899 {
1900 struct reloc_table_entry *reloc;
1901
1902 expression (&exp);
1903
1904 if (exp.X_op != O_symbol)
1905 emit_expr (&exp, (unsigned int) nbytes);
1906 else
1907 {
1908 skip_past_char (&input_line_pointer, '#');
1909 if (skip_past_char (&input_line_pointer, ':'))
1910 {
1911 reloc = find_reloc_table_entry (&input_line_pointer);
1912 if (reloc == NULL)
1913 as_bad (_("unrecognized relocation suffix"));
1914 else
1915 as_bad (_("unimplemented relocation suffix"));
1916 ignore_rest_of_line ();
1917 return;
1918 }
1919 else
1920 emit_expr (&exp, (unsigned int) nbytes);
1921 }
1922 }
1923 while (*input_line_pointer++ == ',');
1924
1925 /* Put terminator back into stream. */
1926 input_line_pointer--;
1927 demand_empty_rest_of_line ();
1928 }
1929
1930 #endif /* OBJ_ELF */
1931
1932 /* Output a 32-bit word, but mark as an instruction. */
1933
1934 static void
1935 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1936 {
1937 expressionS exp;
1938
1939 #ifdef md_flush_pending_output
1940 md_flush_pending_output ();
1941 #endif
1942
1943 if (is_it_end_of_statement ())
1944 {
1945 demand_empty_rest_of_line ();
1946 return;
1947 }
1948
1949 /* Sections are assumed to start aligned. In executable section, there is no
1950 MAP_DATA symbol pending. So we only align the address during
1951 MAP_DATA --> MAP_INSN transition.
1952 For other sections, this is not guaranteed. */
1953 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1954 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1955 frag_align_code (2, 0);
1956
1957 #ifdef OBJ_ELF
1958 mapping_state (MAP_INSN);
1959 #endif
1960
1961 do
1962 {
1963 expression (&exp);
1964 if (exp.X_op != O_constant)
1965 {
1966 as_bad (_("constant expression required"));
1967 ignore_rest_of_line ();
1968 return;
1969 }
1970
1971 if (target_big_endian)
1972 {
1973 unsigned int val = exp.X_add_number;
1974 exp.X_add_number = SWAP_32 (val);
1975 }
1976 emit_expr (&exp, 4);
1977 }
1978 while (*input_line_pointer++ == ',');
1979
1980 /* Put terminator back into stream. */
1981 input_line_pointer--;
1982 demand_empty_rest_of_line ();
1983 }
1984
1985 #ifdef OBJ_ELF
1986 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1987
1988 static void
1989 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
1990 {
1991 expressionS exp;
1992
1993 expression (&exp);
1994 frag_grow (4);
1995 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1996 BFD_RELOC_AARCH64_TLSDESC_ADD);
1997
1998 demand_empty_rest_of_line ();
1999 }
2000
2001 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2002
2003 static void
2004 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2005 {
2006 expressionS exp;
2007
2008 /* Since we're just labelling the code, there's no need to define a
2009 mapping symbol. */
2010 expression (&exp);
2011 /* Make sure there is enough room in this frag for the following
2012 blr. This trick only works if the blr follows immediately after
2013 the .tlsdesc directive. */
2014 frag_grow (4);
2015 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2016 BFD_RELOC_AARCH64_TLSDESC_CALL);
2017
2018 demand_empty_rest_of_line ();
2019 }
2020
2021 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2022
2023 static void
2024 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2025 {
2026 expressionS exp;
2027
2028 expression (&exp);
2029 frag_grow (4);
2030 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2031 BFD_RELOC_AARCH64_TLSDESC_LDR);
2032
2033 demand_empty_rest_of_line ();
2034 }
2035 #endif /* OBJ_ELF */
2036
2037 static void s_aarch64_arch (int);
2038 static void s_aarch64_cpu (int);
2039 static void s_aarch64_arch_extension (int);
2040
2041 /* This table describes all the machine specific pseudo-ops the assembler
2042 has to support. The fields are:
2043 pseudo-op name without dot
2044 function to call to execute this pseudo-op
2045 Integer arg to pass to the function. */
2046
2047 const pseudo_typeS md_pseudo_table[] = {
2048 /* Never called because '.req' does not start a line. */
2049 {"req", s_req, 0},
2050 {"unreq", s_unreq, 0},
2051 {"bss", s_bss, 0},
2052 {"even", s_even, 0},
2053 {"ltorg", s_ltorg, 0},
2054 {"pool", s_ltorg, 0},
2055 {"cpu", s_aarch64_cpu, 0},
2056 {"arch", s_aarch64_arch, 0},
2057 {"arch_extension", s_aarch64_arch_extension, 0},
2058 {"inst", s_aarch64_inst, 0},
2059 #ifdef OBJ_ELF
2060 {"tlsdescadd", s_tlsdescadd, 0},
2061 {"tlsdesccall", s_tlsdesccall, 0},
2062 {"tlsdescldr", s_tlsdescldr, 0},
2063 {"word", s_aarch64_elf_cons, 4},
2064 {"long", s_aarch64_elf_cons, 4},
2065 {"xword", s_aarch64_elf_cons, 8},
2066 {"dword", s_aarch64_elf_cons, 8},
2067 #endif
2068 {0, 0, 0}
2069 };
2070 \f
2071
2072 /* Check whether STR points to a register name followed by a comma or the
2073 end of line; REG_TYPE indicates which register types are checked
2074 against. Return TRUE if STR is such a register name; otherwise return
2075 FALSE. The function does not intend to produce any diagnostics, but since
2076 the register parser aarch64_reg_parse, which is called by this function,
2077 does produce diagnostics, we call clear_error to clear any diagnostics
2078 that may be generated by aarch64_reg_parse.
2079 Also, the function returns FALSE directly if there is any user error
2080 present at the function entry. This prevents the existing diagnostics
2081 state from being spoiled.
2082 The function currently serves parse_constant_immediate and
2083 parse_big_immediate only. */
2084 static bfd_boolean
2085 reg_name_p (char *str, aarch64_reg_type reg_type)
2086 {
2087 int reg;
2088
2089 /* Prevent the diagnostics state from being spoiled. */
2090 if (error_p ())
2091 return FALSE;
2092
2093 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2094
2095 /* Clear the parsing error that may be set by the reg parser. */
2096 clear_error ();
2097
2098 if (reg == PARSE_FAIL)
2099 return FALSE;
2100
2101 skip_whitespace (str);
2102 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2103 return TRUE;
2104
2105 return FALSE;
2106 }
2107
2108 /* Parser functions used exclusively in instruction operands. */
2109
2110 /* Parse an immediate expression which may not be constant.
2111
2112 To prevent the expression parser from pushing a register name
2113 into the symbol table as an undefined symbol, firstly a check is
2114 done to find out whether STR is a register of type REG_TYPE followed
2115 by a comma or the end of line. Return FALSE if STR is such a string. */
2116
2117 static bfd_boolean
2118 parse_immediate_expression (char **str, expressionS *exp,
2119 aarch64_reg_type reg_type)
2120 {
2121 if (reg_name_p (*str, reg_type))
2122 {
2123 set_recoverable_error (_("immediate operand required"));
2124 return FALSE;
2125 }
2126
2127 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2128
2129 if (exp->X_op == O_absent)
2130 {
2131 set_fatal_syntax_error (_("missing immediate expression"));
2132 return FALSE;
2133 }
2134
2135 return TRUE;
2136 }
2137
2138 /* Constant immediate-value read function for use in insn parsing.
2139 STR points to the beginning of the immediate (with the optional
2140 leading #); *VAL receives the value. REG_TYPE says which register
2141 names should be treated as registers rather than as symbolic immediates.
2142
2143 Return TRUE on success; otherwise return FALSE. */
2144
2145 static bfd_boolean
2146 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2147 {
2148 expressionS exp;
2149
2150 if (! parse_immediate_expression (str, &exp, reg_type))
2151 return FALSE;
2152
2153 if (exp.X_op != O_constant)
2154 {
2155 set_syntax_error (_("constant expression required"));
2156 return FALSE;
2157 }
2158
2159 *val = exp.X_add_number;
2160 return TRUE;
2161 }
2162
2163 static uint32_t
2164 encode_imm_float_bits (uint32_t imm)
2165 {
2166 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2167 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2168 }
2169
2170 /* Return TRUE if the single-precision floating-point value encoded in IMM
2171 can be expressed in the AArch64 8-bit signed floating-point format with
2172 3-bit exponent and normalized 4 bits of precision; in other words, the
2173 floating-point value must be expressable as
2174 (+/-) n / 16 * power (2, r)
2175 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2176
2177 static bfd_boolean
2178 aarch64_imm_float_p (uint32_t imm)
2179 {
2180 /* If a single-precision floating-point value has the following bit
2181 pattern, it can be expressed in the AArch64 8-bit floating-point
2182 format:
2183
2184 3 32222222 2221111111111
2185 1 09876543 21098765432109876543210
2186 n Eeeeeexx xxxx0000000000000000000
2187
2188 where n, e and each x are either 0 or 1 independently, with
2189 E == ~ e. */
2190
2191 uint32_t pattern;
2192
2193 /* Prepare the pattern for 'Eeeeee'. */
2194 if (((imm >> 30) & 0x1) == 0)
2195 pattern = 0x3e000000;
2196 else
2197 pattern = 0x40000000;
2198
2199 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2200 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2201 }
2202
2203 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2204 as an IEEE float without any loss of precision. Store the value in
2205 *FPWORD if so. */
2206
2207 static bfd_boolean
2208 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2209 {
2210 /* If a double-precision floating-point value has the following bit
2211 pattern, it can be expressed in a float:
2212
2213 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2214 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2215 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2216
2217 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2218 if Eeee_eeee != 1111_1111
2219
2220 where n, e, s and S are either 0 or 1 independently and where ~ is the
2221 inverse of E. */
2222
2223 uint32_t pattern;
2224 uint32_t high32 = imm >> 32;
2225 uint32_t low32 = imm;
2226
2227 /* Lower 29 bits need to be 0s. */
2228 if ((imm & 0x1fffffff) != 0)
2229 return FALSE;
2230
2231 /* Prepare the pattern for 'Eeeeeeeee'. */
2232 if (((high32 >> 30) & 0x1) == 0)
2233 pattern = 0x38000000;
2234 else
2235 pattern = 0x40000000;
2236
2237 /* Check E~~~. */
2238 if ((high32 & 0x78000000) != pattern)
2239 return FALSE;
2240
2241 /* Check Eeee_eeee != 1111_1111. */
2242 if ((high32 & 0x7ff00000) == 0x47f00000)
2243 return FALSE;
2244
2245 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2246 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2247 | (low32 >> 29)); /* 3 S bits. */
2248 return TRUE;
2249 }
2250
2251 /* Return true if we should treat OPERAND as a double-precision
2252 floating-point operand rather than a single-precision one. */
2253 static bfd_boolean
2254 double_precision_operand_p (const aarch64_opnd_info *operand)
2255 {
2256 /* Check for unsuffixed SVE registers, which are allowed
2257 for LDR and STR but not in instructions that require an
2258 immediate. We get better error messages if we arbitrarily
2259 pick one size, parse the immediate normally, and then
2260 report the match failure in the normal way. */
2261 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2262 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2263 }
2264
2265 /* Parse a floating-point immediate. Return TRUE on success and return the
2266 value in *IMMED in the format of IEEE754 single-precision encoding.
2267 *CCP points to the start of the string; DP_P is TRUE when the immediate
2268 is expected to be in double-precision (N.B. this only matters when
2269 hexadecimal representation is involved). REG_TYPE says which register
2270 names should be treated as registers rather than as symbolic immediates.
2271
2272 This routine accepts any IEEE float; it is up to the callers to reject
2273 invalid ones. */
2274
2275 static bfd_boolean
2276 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2277 aarch64_reg_type reg_type)
2278 {
2279 char *str = *ccp;
2280 char *fpnum;
2281 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2282 int found_fpchar = 0;
2283 int64_t val = 0;
2284 unsigned fpword = 0;
2285 bfd_boolean hex_p = FALSE;
2286
2287 skip_past_char (&str, '#');
2288
2289 fpnum = str;
2290 skip_whitespace (fpnum);
2291
2292 if (strncmp (fpnum, "0x", 2) == 0)
2293 {
2294 /* Support the hexadecimal representation of the IEEE754 encoding.
2295 Double-precision is expected when DP_P is TRUE, otherwise the
2296 representation should be in single-precision. */
2297 if (! parse_constant_immediate (&str, &val, reg_type))
2298 goto invalid_fp;
2299
2300 if (dp_p)
2301 {
2302 if (!can_convert_double_to_float (val, &fpword))
2303 goto invalid_fp;
2304 }
2305 else if ((uint64_t) val > 0xffffffff)
2306 goto invalid_fp;
2307 else
2308 fpword = val;
2309
2310 hex_p = TRUE;
2311 }
2312 else
2313 {
2314 if (reg_name_p (str, reg_type))
2315 {
2316 set_recoverable_error (_("immediate operand required"));
2317 return FALSE;
2318 }
2319
2320 /* We must not accidentally parse an integer as a floating-point number.
2321 Make sure that the value we parse is not an integer by checking for
2322 special characters '.' or 'e'. */
2323 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2324 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2325 {
2326 found_fpchar = 1;
2327 break;
2328 }
2329
2330 if (!found_fpchar)
2331 return FALSE;
2332 }
2333
2334 if (! hex_p)
2335 {
2336 int i;
2337
2338 if ((str = atof_ieee (str, 's', words)) == NULL)
2339 goto invalid_fp;
2340
2341 /* Our FP word must be 32 bits (single-precision FP). */
2342 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2343 {
2344 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2345 fpword |= words[i];
2346 }
2347 }
2348
2349 *immed = fpword;
2350 *ccp = str;
2351 return TRUE;
2352
2353 invalid_fp:
2354 set_fatal_syntax_error (_("invalid floating-point constant"));
2355 return FALSE;
2356 }
2357
2358 /* Less-generic immediate-value read function with the possibility of loading
2359 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2360 instructions.
2361
2362 To prevent the expression parser from pushing a register name into the
2363 symbol table as an undefined symbol, a check is firstly done to find
2364 out whether STR is a register of type REG_TYPE followed by a comma or
2365 the end of line. Return FALSE if STR is such a register. */
2366
2367 static bfd_boolean
2368 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2369 {
2370 char *ptr = *str;
2371
2372 if (reg_name_p (ptr, reg_type))
2373 {
2374 set_syntax_error (_("immediate operand required"));
2375 return FALSE;
2376 }
2377
2378 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2379
2380 if (inst.reloc.exp.X_op == O_constant)
2381 *imm = inst.reloc.exp.X_add_number;
2382
2383 *str = ptr;
2384
2385 return TRUE;
2386 }
2387
2388 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2389 if NEED_LIBOPCODES is non-zero, the fixup will need
2390 assistance from the libopcodes. */
2391
2392 static inline void
2393 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2394 const aarch64_opnd_info *operand,
2395 int need_libopcodes_p)
2396 {
2397 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2398 reloc->opnd = operand->type;
2399 if (need_libopcodes_p)
2400 reloc->need_libopcodes_p = 1;
2401 };
2402
2403 /* Return TRUE if the instruction needs to be fixed up later internally by
2404 the GAS; otherwise return FALSE. */
2405
2406 static inline bfd_boolean
2407 aarch64_gas_internal_fixup_p (void)
2408 {
2409 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2410 }
2411
2412 /* Assign the immediate value to the relevant field in *OPERAND if
2413 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2414 needs an internal fixup in a later stage.
2415 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2416 IMM.VALUE that may get assigned with the constant. */
2417 static inline void
2418 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2419 aarch64_opnd_info *operand,
2420 int addr_off_p,
2421 int need_libopcodes_p,
2422 int skip_p)
2423 {
2424 if (reloc->exp.X_op == O_constant)
2425 {
2426 if (addr_off_p)
2427 operand->addr.offset.imm = reloc->exp.X_add_number;
2428 else
2429 operand->imm.value = reloc->exp.X_add_number;
2430 reloc->type = BFD_RELOC_UNUSED;
2431 }
2432 else
2433 {
2434 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2435 /* Tell libopcodes to ignore this operand or not. This is helpful
2436 when one of the operands needs to be fixed up later but we need
2437 libopcodes to check the other operands. */
2438 operand->skip = skip_p;
2439 }
2440 }
2441
2442 /* Relocation modifiers. Each entry in the table contains the textual
2443 name for the relocation which may be placed before a symbol used as
2444 a load/store offset, or add immediate. It must be surrounded by a
2445 leading and trailing colon, for example:
2446
2447 ldr x0, [x1, #:rello:varsym]
2448 add x0, x1, #:rello:varsym */
2449
2450 struct reloc_table_entry
2451 {
2452 const char *name;
2453 int pc_rel;
2454 bfd_reloc_code_real_type adr_type;
2455 bfd_reloc_code_real_type adrp_type;
2456 bfd_reloc_code_real_type movw_type;
2457 bfd_reloc_code_real_type add_type;
2458 bfd_reloc_code_real_type ldst_type;
2459 bfd_reloc_code_real_type ld_literal_type;
2460 };
2461
2462 static struct reloc_table_entry reloc_table[] = {
2463 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2464 {"lo12", 0,
2465 0, /* adr_type */
2466 0,
2467 0,
2468 BFD_RELOC_AARCH64_ADD_LO12,
2469 BFD_RELOC_AARCH64_LDST_LO12,
2470 0},
2471
2472 /* Higher 21 bits of pc-relative page offset: ADRP */
2473 {"pg_hi21", 1,
2474 0, /* adr_type */
2475 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2476 0,
2477 0,
2478 0,
2479 0},
2480
2481 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2482 {"pg_hi21_nc", 1,
2483 0, /* adr_type */
2484 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2485 0,
2486 0,
2487 0,
2488 0},
2489
2490 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2491 {"abs_g0", 0,
2492 0, /* adr_type */
2493 0,
2494 BFD_RELOC_AARCH64_MOVW_G0,
2495 0,
2496 0,
2497 0},
2498
2499 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2500 {"abs_g0_s", 0,
2501 0, /* adr_type */
2502 0,
2503 BFD_RELOC_AARCH64_MOVW_G0_S,
2504 0,
2505 0,
2506 0},
2507
2508 /* Less significant bits 0-15 of address/value: MOVK, no check */
2509 {"abs_g0_nc", 0,
2510 0, /* adr_type */
2511 0,
2512 BFD_RELOC_AARCH64_MOVW_G0_NC,
2513 0,
2514 0,
2515 0},
2516
2517 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2518 {"abs_g1", 0,
2519 0, /* adr_type */
2520 0,
2521 BFD_RELOC_AARCH64_MOVW_G1,
2522 0,
2523 0,
2524 0},
2525
2526 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2527 {"abs_g1_s", 0,
2528 0, /* adr_type */
2529 0,
2530 BFD_RELOC_AARCH64_MOVW_G1_S,
2531 0,
2532 0,
2533 0},
2534
2535 /* Less significant bits 16-31 of address/value: MOVK, no check */
2536 {"abs_g1_nc", 0,
2537 0, /* adr_type */
2538 0,
2539 BFD_RELOC_AARCH64_MOVW_G1_NC,
2540 0,
2541 0,
2542 0},
2543
2544 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2545 {"abs_g2", 0,
2546 0, /* adr_type */
2547 0,
2548 BFD_RELOC_AARCH64_MOVW_G2,
2549 0,
2550 0,
2551 0},
2552
2553 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2554 {"abs_g2_s", 0,
2555 0, /* adr_type */
2556 0,
2557 BFD_RELOC_AARCH64_MOVW_G2_S,
2558 0,
2559 0,
2560 0},
2561
2562 /* Less significant bits 32-47 of address/value: MOVK, no check */
2563 {"abs_g2_nc", 0,
2564 0, /* adr_type */
2565 0,
2566 BFD_RELOC_AARCH64_MOVW_G2_NC,
2567 0,
2568 0,
2569 0},
2570
2571 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2572 {"abs_g3", 0,
2573 0, /* adr_type */
2574 0,
2575 BFD_RELOC_AARCH64_MOVW_G3,
2576 0,
2577 0,
2578 0},
2579
2580 /* Get to the page containing GOT entry for a symbol. */
2581 {"got", 1,
2582 0, /* adr_type */
2583 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2584 0,
2585 0,
2586 0,
2587 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2588
2589 /* 12 bit offset into the page containing GOT entry for that symbol. */
2590 {"got_lo12", 0,
2591 0, /* adr_type */
2592 0,
2593 0,
2594 0,
2595 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2596 0},
2597
2598 /* 0-15 bits of address/value: MOVk, no check. */
2599 {"gotoff_g0_nc", 0,
2600 0, /* adr_type */
2601 0,
2602 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2603 0,
2604 0,
2605 0},
2606
2607 /* Most significant bits 16-31 of address/value: MOVZ. */
2608 {"gotoff_g1", 0,
2609 0, /* adr_type */
2610 0,
2611 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2612 0,
2613 0,
2614 0},
2615
2616 /* 15 bit offset into the page containing GOT entry for that symbol. */
2617 {"gotoff_lo15", 0,
2618 0, /* adr_type */
2619 0,
2620 0,
2621 0,
2622 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2623 0},
2624
2625 /* Get to the page containing GOT TLS entry for a symbol */
2626 {"gottprel_g0_nc", 0,
2627 0, /* adr_type */
2628 0,
2629 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2630 0,
2631 0,
2632 0},
2633
2634 /* Get to the page containing GOT TLS entry for a symbol */
2635 {"gottprel_g1", 0,
2636 0, /* adr_type */
2637 0,
2638 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2639 0,
2640 0,
2641 0},
2642
2643 /* Get to the page containing GOT TLS entry for a symbol */
2644 {"tlsgd", 0,
2645 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2646 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2647 0,
2648 0,
2649 0,
2650 0},
2651
2652 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2653 {"tlsgd_lo12", 0,
2654 0, /* adr_type */
2655 0,
2656 0,
2657 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2658 0,
2659 0},
2660
2661 /* Lower 16 bits address/value: MOVk. */
2662 {"tlsgd_g0_nc", 0,
2663 0, /* adr_type */
2664 0,
2665 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2666 0,
2667 0,
2668 0},
2669
2670 /* Most significant bits 16-31 of address/value: MOVZ. */
2671 {"tlsgd_g1", 0,
2672 0, /* adr_type */
2673 0,
2674 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2675 0,
2676 0,
2677 0},
2678
2679 /* Get to the page containing GOT TLS entry for a symbol */
2680 {"tlsdesc", 0,
2681 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2682 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2683 0,
2684 0,
2685 0,
2686 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2687
2688 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2689 {"tlsdesc_lo12", 0,
2690 0, /* adr_type */
2691 0,
2692 0,
2693 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2694 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2695 0},
2696
2697 /* Get to the page containing GOT TLS entry for a symbol.
2698 The same as GD, we allocate two consecutive GOT slots
2699 for module index and module offset, the only difference
2700 with GD is the module offset should be initialized to
2701 zero without any outstanding runtime relocation. */
2702 {"tlsldm", 0,
2703 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2704 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2705 0,
2706 0,
2707 0,
2708 0},
2709
2710 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2711 {"tlsldm_lo12_nc", 0,
2712 0, /* adr_type */
2713 0,
2714 0,
2715 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2716 0,
2717 0},
2718
2719 /* 12 bit offset into the module TLS base address. */
2720 {"dtprel_lo12", 0,
2721 0, /* adr_type */
2722 0,
2723 0,
2724 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2725 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2726 0},
2727
2728 /* Same as dtprel_lo12, no overflow check. */
2729 {"dtprel_lo12_nc", 0,
2730 0, /* adr_type */
2731 0,
2732 0,
2733 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2734 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2735 0},
2736
2737 /* bits[23:12] of offset to the module TLS base address. */
2738 {"dtprel_hi12", 0,
2739 0, /* adr_type */
2740 0,
2741 0,
2742 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2743 0,
2744 0},
2745
2746 /* bits[15:0] of offset to the module TLS base address. */
2747 {"dtprel_g0", 0,
2748 0, /* adr_type */
2749 0,
2750 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2751 0,
2752 0,
2753 0},
2754
2755 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2756 {"dtprel_g0_nc", 0,
2757 0, /* adr_type */
2758 0,
2759 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2760 0,
2761 0,
2762 0},
2763
2764 /* bits[31:16] of offset to the module TLS base address. */
2765 {"dtprel_g1", 0,
2766 0, /* adr_type */
2767 0,
2768 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2769 0,
2770 0,
2771 0},
2772
2773 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2774 {"dtprel_g1_nc", 0,
2775 0, /* adr_type */
2776 0,
2777 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2778 0,
2779 0,
2780 0},
2781
2782 /* bits[47:32] of offset to the module TLS base address. */
2783 {"dtprel_g2", 0,
2784 0, /* adr_type */
2785 0,
2786 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2787 0,
2788 0,
2789 0},
2790
2791 /* Lower 16 bit offset into GOT entry for a symbol */
2792 {"tlsdesc_off_g0_nc", 0,
2793 0, /* adr_type */
2794 0,
2795 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2796 0,
2797 0,
2798 0},
2799
2800 /* Higher 16 bit offset into GOT entry for a symbol */
2801 {"tlsdesc_off_g1", 0,
2802 0, /* adr_type */
2803 0,
2804 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2805 0,
2806 0,
2807 0},
2808
2809 /* Get to the page containing GOT TLS entry for a symbol */
2810 {"gottprel", 0,
2811 0, /* adr_type */
2812 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2813 0,
2814 0,
2815 0,
2816 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2817
2818 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2819 {"gottprel_lo12", 0,
2820 0, /* adr_type */
2821 0,
2822 0,
2823 0,
2824 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2825 0},
2826
2827 /* Get tp offset for a symbol. */
2828 {"tprel", 0,
2829 0, /* adr_type */
2830 0,
2831 0,
2832 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2833 0,
2834 0},
2835
2836 /* Get tp offset for a symbol. */
2837 {"tprel_lo12", 0,
2838 0, /* adr_type */
2839 0,
2840 0,
2841 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2842 0,
2843 0},
2844
2845 /* Get tp offset for a symbol. */
2846 {"tprel_hi12", 0,
2847 0, /* adr_type */
2848 0,
2849 0,
2850 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2851 0,
2852 0},
2853
2854 /* Get tp offset for a symbol. */
2855 {"tprel_lo12_nc", 0,
2856 0, /* adr_type */
2857 0,
2858 0,
2859 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2860 0,
2861 0},
2862
2863 /* Most significant bits 32-47 of address/value: MOVZ. */
2864 {"tprel_g2", 0,
2865 0, /* adr_type */
2866 0,
2867 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2868 0,
2869 0,
2870 0},
2871
2872 /* Most significant bits 16-31 of address/value: MOVZ. */
2873 {"tprel_g1", 0,
2874 0, /* adr_type */
2875 0,
2876 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2877 0,
2878 0,
2879 0},
2880
2881 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2882 {"tprel_g1_nc", 0,
2883 0, /* adr_type */
2884 0,
2885 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2886 0,
2887 0,
2888 0},
2889
2890 /* Most significant bits 0-15 of address/value: MOVZ. */
2891 {"tprel_g0", 0,
2892 0, /* adr_type */
2893 0,
2894 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2895 0,
2896 0,
2897 0},
2898
2899 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2900 {"tprel_g0_nc", 0,
2901 0, /* adr_type */
2902 0,
2903 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2904 0,
2905 0,
2906 0},
2907
2908 /* 15bit offset from got entry to base address of GOT table. */
2909 {"gotpage_lo15", 0,
2910 0,
2911 0,
2912 0,
2913 0,
2914 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2915 0},
2916
2917 /* 14bit offset from got entry to base address of GOT table. */
2918 {"gotpage_lo14", 0,
2919 0,
2920 0,
2921 0,
2922 0,
2923 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2924 0},
2925 };
2926
2927 /* Given the address of a pointer pointing to the textual name of a
2928 relocation as may appear in assembler source, attempt to find its
2929 details in reloc_table. The pointer will be updated to the character
2930 after the trailing colon. On failure, NULL will be returned;
2931 otherwise return the reloc_table_entry. */
2932
2933 static struct reloc_table_entry *
2934 find_reloc_table_entry (char **str)
2935 {
2936 unsigned int i;
2937 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2938 {
2939 int length = strlen (reloc_table[i].name);
2940
2941 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2942 && (*str)[length] == ':')
2943 {
2944 *str += (length + 1);
2945 return &reloc_table[i];
2946 }
2947 }
2948
2949 return NULL;
2950 }
2951
2952 /* Mode argument to parse_shift and parser_shifter_operand. */
2953 enum parse_shift_mode
2954 {
2955 SHIFTED_NONE, /* no shifter allowed */
2956 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2957 "#imm{,lsl #n}" */
2958 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2959 "#imm" */
2960 SHIFTED_LSL, /* bare "lsl #n" */
2961 SHIFTED_MUL, /* bare "mul #n" */
2962 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2963 SHIFTED_MUL_VL, /* "mul vl" */
2964 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2965 };
2966
2967 /* Parse a <shift> operator on an AArch64 data processing instruction.
2968 Return TRUE on success; otherwise return FALSE. */
2969 static bfd_boolean
2970 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2971 {
2972 const struct aarch64_name_value_pair *shift_op;
2973 enum aarch64_modifier_kind kind;
2974 expressionS exp;
2975 int exp_has_prefix;
2976 char *s = *str;
2977 char *p = s;
2978
2979 for (p = *str; ISALPHA (*p); p++)
2980 ;
2981
2982 if (p == *str)
2983 {
2984 set_syntax_error (_("shift expression expected"));
2985 return FALSE;
2986 }
2987
2988 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2989
2990 if (shift_op == NULL)
2991 {
2992 set_syntax_error (_("shift operator expected"));
2993 return FALSE;
2994 }
2995
2996 kind = aarch64_get_operand_modifier (shift_op);
2997
2998 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2999 {
3000 set_syntax_error (_("invalid use of 'MSL'"));
3001 return FALSE;
3002 }
3003
3004 if (kind == AARCH64_MOD_MUL
3005 && mode != SHIFTED_MUL
3006 && mode != SHIFTED_MUL_VL)
3007 {
3008 set_syntax_error (_("invalid use of 'MUL'"));
3009 return FALSE;
3010 }
3011
3012 switch (mode)
3013 {
3014 case SHIFTED_LOGIC_IMM:
3015 if (aarch64_extend_operator_p (kind))
3016 {
3017 set_syntax_error (_("extending shift is not permitted"));
3018 return FALSE;
3019 }
3020 break;
3021
3022 case SHIFTED_ARITH_IMM:
3023 if (kind == AARCH64_MOD_ROR)
3024 {
3025 set_syntax_error (_("'ROR' shift is not permitted"));
3026 return FALSE;
3027 }
3028 break;
3029
3030 case SHIFTED_LSL:
3031 if (kind != AARCH64_MOD_LSL)
3032 {
3033 set_syntax_error (_("only 'LSL' shift is permitted"));
3034 return FALSE;
3035 }
3036 break;
3037
3038 case SHIFTED_MUL:
3039 if (kind != AARCH64_MOD_MUL)
3040 {
3041 set_syntax_error (_("only 'MUL' is permitted"));
3042 return FALSE;
3043 }
3044 break;
3045
3046 case SHIFTED_MUL_VL:
3047 /* "MUL VL" consists of two separate tokens. Require the first
3048 token to be "MUL" and look for a following "VL". */
3049 if (kind == AARCH64_MOD_MUL)
3050 {
3051 skip_whitespace (p);
3052 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3053 {
3054 p += 2;
3055 kind = AARCH64_MOD_MUL_VL;
3056 break;
3057 }
3058 }
3059 set_syntax_error (_("only 'MUL VL' is permitted"));
3060 return FALSE;
3061
3062 case SHIFTED_REG_OFFSET:
3063 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3064 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3065 {
3066 set_fatal_syntax_error
3067 (_("invalid shift for the register offset addressing mode"));
3068 return FALSE;
3069 }
3070 break;
3071
3072 case SHIFTED_LSL_MSL:
3073 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3074 {
3075 set_syntax_error (_("invalid shift operator"));
3076 return FALSE;
3077 }
3078 break;
3079
3080 default:
3081 abort ();
3082 }
3083
3084 /* Whitespace can appear here if the next thing is a bare digit. */
3085 skip_whitespace (p);
3086
3087 /* Parse shift amount. */
3088 exp_has_prefix = 0;
3089 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3090 exp.X_op = O_absent;
3091 else
3092 {
3093 if (is_immediate_prefix (*p))
3094 {
3095 p++;
3096 exp_has_prefix = 1;
3097 }
3098 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3099 }
3100 if (kind == AARCH64_MOD_MUL_VL)
3101 /* For consistency, give MUL VL the same shift amount as an implicit
3102 MUL #1. */
3103 operand->shifter.amount = 1;
3104 else if (exp.X_op == O_absent)
3105 {
3106 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3107 {
3108 set_syntax_error (_("missing shift amount"));
3109 return FALSE;
3110 }
3111 operand->shifter.amount = 0;
3112 }
3113 else if (exp.X_op != O_constant)
3114 {
3115 set_syntax_error (_("constant shift amount required"));
3116 return FALSE;
3117 }
3118 /* For parsing purposes, MUL #n has no inherent range. The range
3119 depends on the operand and will be checked by operand-specific
3120 routines. */
3121 else if (kind != AARCH64_MOD_MUL
3122 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3123 {
3124 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3125 return FALSE;
3126 }
3127 else
3128 {
3129 operand->shifter.amount = exp.X_add_number;
3130 operand->shifter.amount_present = 1;
3131 }
3132
3133 operand->shifter.operator_present = 1;
3134 operand->shifter.kind = kind;
3135
3136 *str = p;
3137 return TRUE;
3138 }
3139
3140 /* Parse a <shifter_operand> for a data processing instruction:
3141
3142 #<immediate>
3143 #<immediate>, LSL #imm
3144
3145 Validation of immediate operands is deferred to md_apply_fix.
3146
3147 Return TRUE on success; otherwise return FALSE. */
3148
3149 static bfd_boolean
3150 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3151 enum parse_shift_mode mode)
3152 {
3153 char *p;
3154
3155 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3156 return FALSE;
3157
3158 p = *str;
3159
3160 /* Accept an immediate expression. */
3161 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3162 return FALSE;
3163
3164 /* Accept optional LSL for arithmetic immediate values. */
3165 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3166 if (! parse_shift (&p, operand, SHIFTED_LSL))
3167 return FALSE;
3168
3169 /* Not accept any shifter for logical immediate values. */
3170 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3171 && parse_shift (&p, operand, mode))
3172 {
3173 set_syntax_error (_("unexpected shift operator"));
3174 return FALSE;
3175 }
3176
3177 *str = p;
3178 return TRUE;
3179 }
3180
3181 /* Parse a <shifter_operand> for a data processing instruction:
3182
3183 <Rm>
3184 <Rm>, <shift>
3185 #<immediate>
3186 #<immediate>, LSL #imm
3187
3188 where <shift> is handled by parse_shift above, and the last two
3189 cases are handled by the function above.
3190
3191 Validation of immediate operands is deferred to md_apply_fix.
3192
3193 Return TRUE on success; otherwise return FALSE. */
3194
3195 static bfd_boolean
3196 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3197 enum parse_shift_mode mode)
3198 {
3199 const reg_entry *reg;
3200 aarch64_opnd_qualifier_t qualifier;
3201 enum aarch64_operand_class opd_class
3202 = aarch64_get_operand_class (operand->type);
3203
3204 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3205 if (reg)
3206 {
3207 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3208 {
3209 set_syntax_error (_("unexpected register in the immediate operand"));
3210 return FALSE;
3211 }
3212
3213 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3214 {
3215 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3216 return FALSE;
3217 }
3218
3219 operand->reg.regno = reg->number;
3220 operand->qualifier = qualifier;
3221
3222 /* Accept optional shift operation on register. */
3223 if (! skip_past_comma (str))
3224 return TRUE;
3225
3226 if (! parse_shift (str, operand, mode))
3227 return FALSE;
3228
3229 return TRUE;
3230 }
3231 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3232 {
3233 set_syntax_error
3234 (_("integer register expected in the extended/shifted operand "
3235 "register"));
3236 return FALSE;
3237 }
3238
3239 /* We have a shifted immediate variable. */
3240 return parse_shifter_operand_imm (str, operand, mode);
3241 }
3242
3243 /* Return TRUE on success; return FALSE otherwise. */
3244
3245 static bfd_boolean
3246 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3247 enum parse_shift_mode mode)
3248 {
3249 char *p = *str;
3250
3251 /* Determine if we have the sequence of characters #: or just :
3252 coming next. If we do, then we check for a :rello: relocation
3253 modifier. If we don't, punt the whole lot to
3254 parse_shifter_operand. */
3255
3256 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3257 {
3258 struct reloc_table_entry *entry;
3259
3260 if (p[0] == '#')
3261 p += 2;
3262 else
3263 p++;
3264 *str = p;
3265
3266 /* Try to parse a relocation. Anything else is an error. */
3267 if (!(entry = find_reloc_table_entry (str)))
3268 {
3269 set_syntax_error (_("unknown relocation modifier"));
3270 return FALSE;
3271 }
3272
3273 if (entry->add_type == 0)
3274 {
3275 set_syntax_error
3276 (_("this relocation modifier is not allowed on this instruction"));
3277 return FALSE;
3278 }
3279
3280 /* Save str before we decompose it. */
3281 p = *str;
3282
3283 /* Next, we parse the expression. */
3284 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3285 return FALSE;
3286
3287 /* Record the relocation type (use the ADD variant here). */
3288 inst.reloc.type = entry->add_type;
3289 inst.reloc.pc_rel = entry->pc_rel;
3290
3291 /* If str is empty, we've reached the end, stop here. */
3292 if (**str == '\0')
3293 return TRUE;
3294
3295 /* Otherwise, we have a shifted reloc modifier, so rewind to
3296 recover the variable name and continue parsing for the shifter. */
3297 *str = p;
3298 return parse_shifter_operand_imm (str, operand, mode);
3299 }
3300
3301 return parse_shifter_operand (str, operand, mode);
3302 }
3303
3304 /* Parse all forms of an address expression. Information is written
3305 to *OPERAND and/or inst.reloc.
3306
3307 The A64 instruction set has the following addressing modes:
3308
3309 Offset
3310 [base] // in SIMD ld/st structure
3311 [base{,#0}] // in ld/st exclusive
3312 [base{,#imm}]
3313 [base,Xm{,LSL #imm}]
3314 [base,Xm,SXTX {#imm}]
3315 [base,Wm,(S|U)XTW {#imm}]
3316 Pre-indexed
3317 [base,#imm]!
3318 Post-indexed
3319 [base],#imm
3320 [base],Xm // in SIMD ld/st structure
3321 PC-relative (literal)
3322 label
3323 SVE:
3324 [base,#imm,MUL VL]
3325 [base,Zm.D{,LSL #imm}]
3326 [base,Zm.S,(S|U)XTW {#imm}]
3327 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3328 [Zn.S,#imm]
3329 [Zn.D,#imm]
3330 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3331 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3332 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3333
3334 (As a convenience, the notation "=immediate" is permitted in conjunction
3335 with the pc-relative literal load instructions to automatically place an
3336 immediate value or symbolic address in a nearby literal pool and generate
3337 a hidden label which references it.)
3338
3339 Upon a successful parsing, the address structure in *OPERAND will be
3340 filled in the following way:
3341
3342 .base_regno = <base>
3343 .offset.is_reg // 1 if the offset is a register
3344 .offset.imm = <imm>
3345 .offset.regno = <Rm>
3346
3347 For different addressing modes defined in the A64 ISA:
3348
3349 Offset
3350 .pcrel=0; .preind=1; .postind=0; .writeback=0
3351 Pre-indexed
3352 .pcrel=0; .preind=1; .postind=0; .writeback=1
3353 Post-indexed
3354 .pcrel=0; .preind=0; .postind=1; .writeback=1
3355 PC-relative (literal)
3356 .pcrel=1; .preind=1; .postind=0; .writeback=0
3357
3358 The shift/extension information, if any, will be stored in .shifter.
3359 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3360 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3361 corresponding register.
3362
3363 BASE_TYPE says which types of base register should be accepted and
3364 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3365 is the type of shifter that is allowed for immediate offsets,
3366 or SHIFTED_NONE if none.
3367
3368 In all other respects, it is the caller's responsibility to check
3369 for addressing modes not supported by the instruction, and to set
3370 inst.reloc.type. */
3371
3372 static bfd_boolean
3373 parse_address_main (char **str, aarch64_opnd_info *operand,
3374 aarch64_opnd_qualifier_t *base_qualifier,
3375 aarch64_opnd_qualifier_t *offset_qualifier,
3376 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3377 enum parse_shift_mode imm_shift_mode)
3378 {
3379 char *p = *str;
3380 const reg_entry *reg;
3381 expressionS *exp = &inst.reloc.exp;
3382
3383 *base_qualifier = AARCH64_OPND_QLF_NIL;
3384 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3385 if (! skip_past_char (&p, '['))
3386 {
3387 /* =immediate or label. */
3388 operand->addr.pcrel = 1;
3389 operand->addr.preind = 1;
3390
3391 /* #:<reloc_op>:<symbol> */
3392 skip_past_char (&p, '#');
3393 if (skip_past_char (&p, ':'))
3394 {
3395 bfd_reloc_code_real_type ty;
3396 struct reloc_table_entry *entry;
3397
3398 /* Try to parse a relocation modifier. Anything else is
3399 an error. */
3400 entry = find_reloc_table_entry (&p);
3401 if (! entry)
3402 {
3403 set_syntax_error (_("unknown relocation modifier"));
3404 return FALSE;
3405 }
3406
3407 switch (operand->type)
3408 {
3409 case AARCH64_OPND_ADDR_PCREL21:
3410 /* adr */
3411 ty = entry->adr_type;
3412 break;
3413
3414 default:
3415 ty = entry->ld_literal_type;
3416 break;
3417 }
3418
3419 if (ty == 0)
3420 {
3421 set_syntax_error
3422 (_("this relocation modifier is not allowed on this "
3423 "instruction"));
3424 return FALSE;
3425 }
3426
3427 /* #:<reloc_op>: */
3428 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3429 {
3430 set_syntax_error (_("invalid relocation expression"));
3431 return FALSE;
3432 }
3433
3434 /* #:<reloc_op>:<expr> */
3435 /* Record the relocation type. */
3436 inst.reloc.type = ty;
3437 inst.reloc.pc_rel = entry->pc_rel;
3438 }
3439 else
3440 {
3441
3442 if (skip_past_char (&p, '='))
3443 /* =immediate; need to generate the literal in the literal pool. */
3444 inst.gen_lit_pool = 1;
3445
3446 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3447 {
3448 set_syntax_error (_("invalid address"));
3449 return FALSE;
3450 }
3451 }
3452
3453 *str = p;
3454 return TRUE;
3455 }
3456
3457 /* [ */
3458
3459 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3460 if (!reg || !aarch64_check_reg_type (reg, base_type))
3461 {
3462 set_syntax_error (_(get_reg_expected_msg (base_type)));
3463 return FALSE;
3464 }
3465 operand->addr.base_regno = reg->number;
3466
3467 /* [Xn */
3468 if (skip_past_comma (&p))
3469 {
3470 /* [Xn, */
3471 operand->addr.preind = 1;
3472
3473 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3474 if (reg)
3475 {
3476 if (!aarch64_check_reg_type (reg, offset_type))
3477 {
3478 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3479 return FALSE;
3480 }
3481
3482 /* [Xn,Rm */
3483 operand->addr.offset.regno = reg->number;
3484 operand->addr.offset.is_reg = 1;
3485 /* Shifted index. */
3486 if (skip_past_comma (&p))
3487 {
3488 /* [Xn,Rm, */
3489 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3490 /* Use the diagnostics set in parse_shift, so not set new
3491 error message here. */
3492 return FALSE;
3493 }
3494 /* We only accept:
3495 [base,Xm{,LSL #imm}]
3496 [base,Xm,SXTX {#imm}]
3497 [base,Wm,(S|U)XTW {#imm}] */
3498 if (operand->shifter.kind == AARCH64_MOD_NONE
3499 || operand->shifter.kind == AARCH64_MOD_LSL
3500 || operand->shifter.kind == AARCH64_MOD_SXTX)
3501 {
3502 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3503 {
3504 set_syntax_error (_("invalid use of 32-bit register offset"));
3505 return FALSE;
3506 }
3507 if (aarch64_get_qualifier_esize (*base_qualifier)
3508 != aarch64_get_qualifier_esize (*offset_qualifier))
3509 {
3510 set_syntax_error (_("offset has different size from base"));
3511 return FALSE;
3512 }
3513 }
3514 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3515 {
3516 set_syntax_error (_("invalid use of 64-bit register offset"));
3517 return FALSE;
3518 }
3519 }
3520 else
3521 {
3522 /* [Xn,#:<reloc_op>:<symbol> */
3523 skip_past_char (&p, '#');
3524 if (skip_past_char (&p, ':'))
3525 {
3526 struct reloc_table_entry *entry;
3527
3528 /* Try to parse a relocation modifier. Anything else is
3529 an error. */
3530 if (!(entry = find_reloc_table_entry (&p)))
3531 {
3532 set_syntax_error (_("unknown relocation modifier"));
3533 return FALSE;
3534 }
3535
3536 if (entry->ldst_type == 0)
3537 {
3538 set_syntax_error
3539 (_("this relocation modifier is not allowed on this "
3540 "instruction"));
3541 return FALSE;
3542 }
3543
3544 /* [Xn,#:<reloc_op>: */
3545 /* We now have the group relocation table entry corresponding to
3546 the name in the assembler source. Next, we parse the
3547 expression. */
3548 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3549 {
3550 set_syntax_error (_("invalid relocation expression"));
3551 return FALSE;
3552 }
3553
3554 /* [Xn,#:<reloc_op>:<expr> */
3555 /* Record the load/store relocation type. */
3556 inst.reloc.type = entry->ldst_type;
3557 inst.reloc.pc_rel = entry->pc_rel;
3558 }
3559 else
3560 {
3561 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3562 {
3563 set_syntax_error (_("invalid expression in the address"));
3564 return FALSE;
3565 }
3566 /* [Xn,<expr> */
3567 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3568 /* [Xn,<expr>,<shifter> */
3569 if (! parse_shift (&p, operand, imm_shift_mode))
3570 return FALSE;
3571 }
3572 }
3573 }
3574
3575 if (! skip_past_char (&p, ']'))
3576 {
3577 set_syntax_error (_("']' expected"));
3578 return FALSE;
3579 }
3580
3581 if (skip_past_char (&p, '!'))
3582 {
3583 if (operand->addr.preind && operand->addr.offset.is_reg)
3584 {
3585 set_syntax_error (_("register offset not allowed in pre-indexed "
3586 "addressing mode"));
3587 return FALSE;
3588 }
3589 /* [Xn]! */
3590 operand->addr.writeback = 1;
3591 }
3592 else if (skip_past_comma (&p))
3593 {
3594 /* [Xn], */
3595 operand->addr.postind = 1;
3596 operand->addr.writeback = 1;
3597
3598 if (operand->addr.preind)
3599 {
3600 set_syntax_error (_("cannot combine pre- and post-indexing"));
3601 return FALSE;
3602 }
3603
3604 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3605 if (reg)
3606 {
3607 /* [Xn],Xm */
3608 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3609 {
3610 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3611 return FALSE;
3612 }
3613
3614 operand->addr.offset.regno = reg->number;
3615 operand->addr.offset.is_reg = 1;
3616 }
3617 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3618 {
3619 /* [Xn],#expr */
3620 set_syntax_error (_("invalid expression in the address"));
3621 return FALSE;
3622 }
3623 }
3624
3625 /* If at this point neither .preind nor .postind is set, we have a
3626 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3627 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3628 {
3629 if (operand->addr.writeback)
3630 {
3631 /* Reject [Rn]! */
3632 set_syntax_error (_("missing offset in the pre-indexed address"));
3633 return FALSE;
3634 }
3635 operand->addr.preind = 1;
3636 inst.reloc.exp.X_op = O_constant;
3637 inst.reloc.exp.X_add_number = 0;
3638 }
3639
3640 *str = p;
3641 return TRUE;
3642 }
3643
3644 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3645 on success. */
3646 static bfd_boolean
3647 parse_address (char **str, aarch64_opnd_info *operand)
3648 {
3649 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3650 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3651 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3652 }
3653
3654 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3655 The arguments have the same meaning as for parse_address_main.
3656 Return TRUE on success. */
3657 static bfd_boolean
3658 parse_sve_address (char **str, aarch64_opnd_info *operand,
3659 aarch64_opnd_qualifier_t *base_qualifier,
3660 aarch64_opnd_qualifier_t *offset_qualifier)
3661 {
3662 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3663 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3664 SHIFTED_MUL_VL);
3665 }
3666
3667 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3668 Return TRUE on success; otherwise return FALSE. */
3669 static bfd_boolean
3670 parse_half (char **str, int *internal_fixup_p)
3671 {
3672 char *p = *str;
3673
3674 skip_past_char (&p, '#');
3675
3676 gas_assert (internal_fixup_p);
3677 *internal_fixup_p = 0;
3678
3679 if (*p == ':')
3680 {
3681 struct reloc_table_entry *entry;
3682
3683 /* Try to parse a relocation. Anything else is an error. */
3684 ++p;
3685 if (!(entry = find_reloc_table_entry (&p)))
3686 {
3687 set_syntax_error (_("unknown relocation modifier"));
3688 return FALSE;
3689 }
3690
3691 if (entry->movw_type == 0)
3692 {
3693 set_syntax_error
3694 (_("this relocation modifier is not allowed on this instruction"));
3695 return FALSE;
3696 }
3697
3698 inst.reloc.type = entry->movw_type;
3699 }
3700 else
3701 *internal_fixup_p = 1;
3702
3703 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3704 return FALSE;
3705
3706 *str = p;
3707 return TRUE;
3708 }
3709
3710 /* Parse an operand for an ADRP instruction:
3711 ADRP <Xd>, <label>
3712 Return TRUE on success; otherwise return FALSE. */
3713
3714 static bfd_boolean
3715 parse_adrp (char **str)
3716 {
3717 char *p;
3718
3719 p = *str;
3720 if (*p == ':')
3721 {
3722 struct reloc_table_entry *entry;
3723
3724 /* Try to parse a relocation. Anything else is an error. */
3725 ++p;
3726 if (!(entry = find_reloc_table_entry (&p)))
3727 {
3728 set_syntax_error (_("unknown relocation modifier"));
3729 return FALSE;
3730 }
3731
3732 if (entry->adrp_type == 0)
3733 {
3734 set_syntax_error
3735 (_("this relocation modifier is not allowed on this instruction"));
3736 return FALSE;
3737 }
3738
3739 inst.reloc.type = entry->adrp_type;
3740 }
3741 else
3742 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3743
3744 inst.reloc.pc_rel = 1;
3745
3746 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3747 return FALSE;
3748
3749 *str = p;
3750 return TRUE;
3751 }
3752
3753 /* Miscellaneous. */
3754
3755 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3756 of SIZE tokens in which index I gives the token for field value I,
3757 or is null if field value I is invalid. REG_TYPE says which register
3758 names should be treated as registers rather than as symbolic immediates.
3759
3760 Return true on success, moving *STR past the operand and storing the
3761 field value in *VAL. */
3762
3763 static int
3764 parse_enum_string (char **str, int64_t *val, const char *const *array,
3765 size_t size, aarch64_reg_type reg_type)
3766 {
3767 expressionS exp;
3768 char *p, *q;
3769 size_t i;
3770
3771 /* Match C-like tokens. */
3772 p = q = *str;
3773 while (ISALNUM (*q))
3774 q++;
3775
3776 for (i = 0; i < size; ++i)
3777 if (array[i]
3778 && strncasecmp (array[i], p, q - p) == 0
3779 && array[i][q - p] == 0)
3780 {
3781 *val = i;
3782 *str = q;
3783 return TRUE;
3784 }
3785
3786 if (!parse_immediate_expression (&p, &exp, reg_type))
3787 return FALSE;
3788
3789 if (exp.X_op == O_constant
3790 && (uint64_t) exp.X_add_number < size)
3791 {
3792 *val = exp.X_add_number;
3793 *str = p;
3794 return TRUE;
3795 }
3796
3797 /* Use the default error for this operand. */
3798 return FALSE;
3799 }
3800
3801 /* Parse an option for a preload instruction. Returns the encoding for the
3802 option, or PARSE_FAIL. */
3803
3804 static int
3805 parse_pldop (char **str)
3806 {
3807 char *p, *q;
3808 const struct aarch64_name_value_pair *o;
3809
3810 p = q = *str;
3811 while (ISALNUM (*q))
3812 q++;
3813
3814 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3815 if (!o)
3816 return PARSE_FAIL;
3817
3818 *str = q;
3819 return o->value;
3820 }
3821
3822 /* Parse an option for a barrier instruction. Returns the encoding for the
3823 option, or PARSE_FAIL. */
3824
3825 static int
3826 parse_barrier (char **str)
3827 {
3828 char *p, *q;
3829 const asm_barrier_opt *o;
3830
3831 p = q = *str;
3832 while (ISALPHA (*q))
3833 q++;
3834
3835 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3836 if (!o)
3837 return PARSE_FAIL;
3838
3839 *str = q;
3840 return o->value;
3841 }
3842
3843 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3844 return 0 if successful. Otherwise return PARSE_FAIL. */
3845
3846 static int
3847 parse_barrier_psb (char **str,
3848 const struct aarch64_name_value_pair ** hint_opt)
3849 {
3850 char *p, *q;
3851 const struct aarch64_name_value_pair *o;
3852
3853 p = q = *str;
3854 while (ISALPHA (*q))
3855 q++;
3856
3857 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3858 if (!o)
3859 {
3860 set_fatal_syntax_error
3861 ( _("unknown or missing option to PSB"));
3862 return PARSE_FAIL;
3863 }
3864
3865 if (o->value != 0x11)
3866 {
3867 /* PSB only accepts option name 'CSYNC'. */
3868 set_syntax_error
3869 (_("the specified option is not accepted for PSB"));
3870 return PARSE_FAIL;
3871 }
3872
3873 *str = q;
3874 *hint_opt = o;
3875 return 0;
3876 }
3877
3878 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3879 Returns the encoding for the option, or PARSE_FAIL.
3880
3881 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3882 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3883
3884 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3885 field, otherwise as a system register.
3886 */
3887
3888 static int
3889 parse_sys_reg (char **str, struct hash_control *sys_regs,
3890 int imple_defined_p, int pstatefield_p)
3891 {
3892 char *p, *q;
3893 char buf[32];
3894 const aarch64_sys_reg *o;
3895 int value;
3896
3897 p = buf;
3898 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3899 if (p < buf + 31)
3900 *p++ = TOLOWER (*q);
3901 *p = '\0';
3902 /* Assert that BUF be large enough. */
3903 gas_assert (p - buf == q - *str);
3904
3905 o = hash_find (sys_regs, buf);
3906 if (!o)
3907 {
3908 if (!imple_defined_p)
3909 return PARSE_FAIL;
3910 else
3911 {
3912 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3913 unsigned int op0, op1, cn, cm, op2;
3914
3915 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3916 != 5)
3917 return PARSE_FAIL;
3918 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3919 return PARSE_FAIL;
3920 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3921 }
3922 }
3923 else
3924 {
3925 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3926 as_bad (_("selected processor does not support PSTATE field "
3927 "name '%s'"), buf);
3928 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3929 as_bad (_("selected processor does not support system register "
3930 "name '%s'"), buf);
3931 if (aarch64_sys_reg_deprecated_p (o))
3932 as_warn (_("system register name '%s' is deprecated and may be "
3933 "removed in a future release"), buf);
3934 value = o->value;
3935 }
3936
3937 *str = q;
3938 return value;
3939 }
3940
3941 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3942 for the option, or NULL. */
3943
3944 static const aarch64_sys_ins_reg *
3945 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3946 {
3947 char *p, *q;
3948 char buf[32];
3949 const aarch64_sys_ins_reg *o;
3950
3951 p = buf;
3952 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3953 if (p < buf + 31)
3954 *p++ = TOLOWER (*q);
3955 *p = '\0';
3956
3957 o = hash_find (sys_ins_regs, buf);
3958 if (!o)
3959 return NULL;
3960
3961 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
3962 as_bad (_("selected processor does not support system register "
3963 "name '%s'"), buf);
3964
3965 *str = q;
3966 return o;
3967 }
3968 \f
3969 #define po_char_or_fail(chr) do { \
3970 if (! skip_past_char (&str, chr)) \
3971 goto failure; \
3972 } while (0)
3973
3974 #define po_reg_or_fail(regtype) do { \
3975 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3976 if (val == PARSE_FAIL) \
3977 { \
3978 set_default_error (); \
3979 goto failure; \
3980 } \
3981 } while (0)
3982
3983 #define po_int_reg_or_fail(reg_type) do { \
3984 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
3985 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
3986 { \
3987 set_default_error (); \
3988 goto failure; \
3989 } \
3990 info->reg.regno = reg->number; \
3991 info->qualifier = qualifier; \
3992 } while (0)
3993
3994 #define po_imm_nc_or_fail() do { \
3995 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
3996 goto failure; \
3997 } while (0)
3998
3999 #define po_imm_or_fail(min, max) do { \
4000 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4001 goto failure; \
4002 if (val < min || val > max) \
4003 { \
4004 set_fatal_syntax_error (_("immediate value out of range "\
4005 #min " to "#max)); \
4006 goto failure; \
4007 } \
4008 } while (0)
4009
4010 #define po_enum_or_fail(array) do { \
4011 if (!parse_enum_string (&str, &val, array, \
4012 ARRAY_SIZE (array), imm_reg_type)) \
4013 goto failure; \
4014 } while (0)
4015
4016 #define po_misc_or_fail(expr) do { \
4017 if (!expr) \
4018 goto failure; \
4019 } while (0)
4020 \f
4021 /* encode the 12-bit imm field of Add/sub immediate */
4022 static inline uint32_t
4023 encode_addsub_imm (uint32_t imm)
4024 {
4025 return imm << 10;
4026 }
4027
4028 /* encode the shift amount field of Add/sub immediate */
4029 static inline uint32_t
4030 encode_addsub_imm_shift_amount (uint32_t cnt)
4031 {
4032 return cnt << 22;
4033 }
4034
4035
4036 /* encode the imm field of Adr instruction */
4037 static inline uint32_t
4038 encode_adr_imm (uint32_t imm)
4039 {
4040 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4041 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4042 }
4043
4044 /* encode the immediate field of Move wide immediate */
4045 static inline uint32_t
4046 encode_movw_imm (uint32_t imm)
4047 {
4048 return imm << 5;
4049 }
4050
4051 /* encode the 26-bit offset of unconditional branch */
4052 static inline uint32_t
4053 encode_branch_ofs_26 (uint32_t ofs)
4054 {
4055 return ofs & ((1 << 26) - 1);
4056 }
4057
4058 /* encode the 19-bit offset of conditional branch and compare & branch */
4059 static inline uint32_t
4060 encode_cond_branch_ofs_19 (uint32_t ofs)
4061 {
4062 return (ofs & ((1 << 19) - 1)) << 5;
4063 }
4064
4065 /* encode the 19-bit offset of ld literal */
4066 static inline uint32_t
4067 encode_ld_lit_ofs_19 (uint32_t ofs)
4068 {
4069 return (ofs & ((1 << 19) - 1)) << 5;
4070 }
4071
4072 /* Encode the 14-bit offset of test & branch. */
4073 static inline uint32_t
4074 encode_tst_branch_ofs_14 (uint32_t ofs)
4075 {
4076 return (ofs & ((1 << 14) - 1)) << 5;
4077 }
4078
4079 /* Encode the 16-bit imm field of svc/hvc/smc. */
4080 static inline uint32_t
4081 encode_svc_imm (uint32_t imm)
4082 {
4083 return imm << 5;
4084 }
4085
4086 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4087 static inline uint32_t
4088 reencode_addsub_switch_add_sub (uint32_t opcode)
4089 {
4090 return opcode ^ (1 << 30);
4091 }
4092
4093 static inline uint32_t
4094 reencode_movzn_to_movz (uint32_t opcode)
4095 {
4096 return opcode | (1 << 30);
4097 }
4098
4099 static inline uint32_t
4100 reencode_movzn_to_movn (uint32_t opcode)
4101 {
4102 return opcode & ~(1 << 30);
4103 }
4104
4105 /* Overall per-instruction processing. */
4106
4107 /* We need to be able to fix up arbitrary expressions in some statements.
4108 This is so that we can handle symbols that are an arbitrary distance from
4109 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4110 which returns part of an address in a form which will be valid for
4111 a data instruction. We do this by pushing the expression into a symbol
4112 in the expr_section, and creating a fix for that. */
4113
4114 static fixS *
4115 fix_new_aarch64 (fragS * frag,
4116 int where,
4117 short int size, expressionS * exp, int pc_rel, int reloc)
4118 {
4119 fixS *new_fix;
4120
4121 switch (exp->X_op)
4122 {
4123 case O_constant:
4124 case O_symbol:
4125 case O_add:
4126 case O_subtract:
4127 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4128 break;
4129
4130 default:
4131 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4132 pc_rel, reloc);
4133 break;
4134 }
4135 return new_fix;
4136 }
4137 \f
4138 /* Diagnostics on operands errors. */
4139
4140 /* By default, output verbose error message.
4141 Disable the verbose error message by -mno-verbose-error. */
4142 static int verbose_error_p = 1;
4143
4144 #ifdef DEBUG_AARCH64
4145 /* N.B. this is only for the purpose of debugging. */
4146 const char* operand_mismatch_kind_names[] =
4147 {
4148 "AARCH64_OPDE_NIL",
4149 "AARCH64_OPDE_RECOVERABLE",
4150 "AARCH64_OPDE_SYNTAX_ERROR",
4151 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4152 "AARCH64_OPDE_INVALID_VARIANT",
4153 "AARCH64_OPDE_OUT_OF_RANGE",
4154 "AARCH64_OPDE_UNALIGNED",
4155 "AARCH64_OPDE_REG_LIST",
4156 "AARCH64_OPDE_OTHER_ERROR",
4157 };
4158 #endif /* DEBUG_AARCH64 */
4159
4160 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4161
4162 When multiple errors of different kinds are found in the same assembly
4163 line, only the error of the highest severity will be picked up for
4164 issuing the diagnostics. */
4165
4166 static inline bfd_boolean
4167 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4168 enum aarch64_operand_error_kind rhs)
4169 {
4170 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4171 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4172 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4173 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4174 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4175 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4176 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4177 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4178 return lhs > rhs;
4179 }
4180
4181 /* Helper routine to get the mnemonic name from the assembly instruction
4182 line; should only be called for the diagnosis purpose, as there is
4183 string copy operation involved, which may affect the runtime
4184 performance if used in elsewhere. */
4185
4186 static const char*
4187 get_mnemonic_name (const char *str)
4188 {
4189 static char mnemonic[32];
4190 char *ptr;
4191
4192 /* Get the first 15 bytes and assume that the full name is included. */
4193 strncpy (mnemonic, str, 31);
4194 mnemonic[31] = '\0';
4195
4196 /* Scan up to the end of the mnemonic, which must end in white space,
4197 '.', or end of string. */
4198 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4199 ;
4200
4201 *ptr = '\0';
4202
4203 /* Append '...' to the truncated long name. */
4204 if (ptr - mnemonic == 31)
4205 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4206
4207 return mnemonic;
4208 }
4209
4210 static void
4211 reset_aarch64_instruction (aarch64_instruction *instruction)
4212 {
4213 memset (instruction, '\0', sizeof (aarch64_instruction));
4214 instruction->reloc.type = BFD_RELOC_UNUSED;
4215 }
4216
4217 /* Data structures storing one user error in the assembly code related to
4218 operands. */
4219
4220 struct operand_error_record
4221 {
4222 const aarch64_opcode *opcode;
4223 aarch64_operand_error detail;
4224 struct operand_error_record *next;
4225 };
4226
4227 typedef struct operand_error_record operand_error_record;
4228
4229 struct operand_errors
4230 {
4231 operand_error_record *head;
4232 operand_error_record *tail;
4233 };
4234
4235 typedef struct operand_errors operand_errors;
4236
4237 /* Top-level data structure reporting user errors for the current line of
4238 the assembly code.
4239 The way md_assemble works is that all opcodes sharing the same mnemonic
4240 name are iterated to find a match to the assembly line. In this data
4241 structure, each of the such opcodes will have one operand_error_record
4242 allocated and inserted. In other words, excessive errors related with
4243 a single opcode are disregarded. */
4244 operand_errors operand_error_report;
4245
4246 /* Free record nodes. */
4247 static operand_error_record *free_opnd_error_record_nodes = NULL;
4248
4249 /* Initialize the data structure that stores the operand mismatch
4250 information on assembling one line of the assembly code. */
4251 static void
4252 init_operand_error_report (void)
4253 {
4254 if (operand_error_report.head != NULL)
4255 {
4256 gas_assert (operand_error_report.tail != NULL);
4257 operand_error_report.tail->next = free_opnd_error_record_nodes;
4258 free_opnd_error_record_nodes = operand_error_report.head;
4259 operand_error_report.head = NULL;
4260 operand_error_report.tail = NULL;
4261 return;
4262 }
4263 gas_assert (operand_error_report.tail == NULL);
4264 }
4265
4266 /* Return TRUE if some operand error has been recorded during the
4267 parsing of the current assembly line using the opcode *OPCODE;
4268 otherwise return FALSE. */
4269 static inline bfd_boolean
4270 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4271 {
4272 operand_error_record *record = operand_error_report.head;
4273 return record && record->opcode == opcode;
4274 }
4275
4276 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4277 OPCODE field is initialized with OPCODE.
4278 N.B. only one record for each opcode, i.e. the maximum of one error is
4279 recorded for each instruction template. */
4280
4281 static void
4282 add_operand_error_record (const operand_error_record* new_record)
4283 {
4284 const aarch64_opcode *opcode = new_record->opcode;
4285 operand_error_record* record = operand_error_report.head;
4286
4287 /* The record may have been created for this opcode. If not, we need
4288 to prepare one. */
4289 if (! opcode_has_operand_error_p (opcode))
4290 {
4291 /* Get one empty record. */
4292 if (free_opnd_error_record_nodes == NULL)
4293 {
4294 record = XNEW (operand_error_record);
4295 }
4296 else
4297 {
4298 record = free_opnd_error_record_nodes;
4299 free_opnd_error_record_nodes = record->next;
4300 }
4301 record->opcode = opcode;
4302 /* Insert at the head. */
4303 record->next = operand_error_report.head;
4304 operand_error_report.head = record;
4305 if (operand_error_report.tail == NULL)
4306 operand_error_report.tail = record;
4307 }
4308 else if (record->detail.kind != AARCH64_OPDE_NIL
4309 && record->detail.index <= new_record->detail.index
4310 && operand_error_higher_severity_p (record->detail.kind,
4311 new_record->detail.kind))
4312 {
4313 /* In the case of multiple errors found on operands related with a
4314 single opcode, only record the error of the leftmost operand and
4315 only if the error is of higher severity. */
4316 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4317 " the existing error %s on operand %d",
4318 operand_mismatch_kind_names[new_record->detail.kind],
4319 new_record->detail.index,
4320 operand_mismatch_kind_names[record->detail.kind],
4321 record->detail.index);
4322 return;
4323 }
4324
4325 record->detail = new_record->detail;
4326 }
4327
4328 static inline void
4329 record_operand_error_info (const aarch64_opcode *opcode,
4330 aarch64_operand_error *error_info)
4331 {
4332 operand_error_record record;
4333 record.opcode = opcode;
4334 record.detail = *error_info;
4335 add_operand_error_record (&record);
4336 }
4337
4338 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4339 error message *ERROR, for operand IDX (count from 0). */
4340
4341 static void
4342 record_operand_error (const aarch64_opcode *opcode, int idx,
4343 enum aarch64_operand_error_kind kind,
4344 const char* error)
4345 {
4346 aarch64_operand_error info;
4347 memset(&info, 0, sizeof (info));
4348 info.index = idx;
4349 info.kind = kind;
4350 info.error = error;
4351 record_operand_error_info (opcode, &info);
4352 }
4353
4354 static void
4355 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4356 enum aarch64_operand_error_kind kind,
4357 const char* error, const int *extra_data)
4358 {
4359 aarch64_operand_error info;
4360 info.index = idx;
4361 info.kind = kind;
4362 info.error = error;
4363 info.data[0] = extra_data[0];
4364 info.data[1] = extra_data[1];
4365 info.data[2] = extra_data[2];
4366 record_operand_error_info (opcode, &info);
4367 }
4368
4369 static void
4370 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4371 const char* error, int lower_bound,
4372 int upper_bound)
4373 {
4374 int data[3] = {lower_bound, upper_bound, 0};
4375 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4376 error, data);
4377 }
4378
4379 /* Remove the operand error record for *OPCODE. */
4380 static void ATTRIBUTE_UNUSED
4381 remove_operand_error_record (const aarch64_opcode *opcode)
4382 {
4383 if (opcode_has_operand_error_p (opcode))
4384 {
4385 operand_error_record* record = operand_error_report.head;
4386 gas_assert (record != NULL && operand_error_report.tail != NULL);
4387 operand_error_report.head = record->next;
4388 record->next = free_opnd_error_record_nodes;
4389 free_opnd_error_record_nodes = record;
4390 if (operand_error_report.head == NULL)
4391 {
4392 gas_assert (operand_error_report.tail == record);
4393 operand_error_report.tail = NULL;
4394 }
4395 }
4396 }
4397
4398 /* Given the instruction in *INSTR, return the index of the best matched
4399 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4400
4401 Return -1 if there is no qualifier sequence; return the first match
4402 if there is multiple matches found. */
4403
4404 static int
4405 find_best_match (const aarch64_inst *instr,
4406 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4407 {
4408 int i, num_opnds, max_num_matched, idx;
4409
4410 num_opnds = aarch64_num_of_operands (instr->opcode);
4411 if (num_opnds == 0)
4412 {
4413 DEBUG_TRACE ("no operand");
4414 return -1;
4415 }
4416
4417 max_num_matched = 0;
4418 idx = 0;
4419
4420 /* For each pattern. */
4421 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4422 {
4423 int j, num_matched;
4424 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4425
4426 /* Most opcodes has much fewer patterns in the list. */
4427 if (empty_qualifier_sequence_p (qualifiers))
4428 {
4429 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4430 break;
4431 }
4432
4433 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4434 if (*qualifiers == instr->operands[j].qualifier)
4435 ++num_matched;
4436
4437 if (num_matched > max_num_matched)
4438 {
4439 max_num_matched = num_matched;
4440 idx = i;
4441 }
4442 }
4443
4444 DEBUG_TRACE ("return with %d", idx);
4445 return idx;
4446 }
4447
4448 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4449 corresponding operands in *INSTR. */
4450
4451 static inline void
4452 assign_qualifier_sequence (aarch64_inst *instr,
4453 const aarch64_opnd_qualifier_t *qualifiers)
4454 {
4455 int i = 0;
4456 int num_opnds = aarch64_num_of_operands (instr->opcode);
4457 gas_assert (num_opnds);
4458 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4459 instr->operands[i].qualifier = *qualifiers;
4460 }
4461
4462 /* Print operands for the diagnosis purpose. */
4463
4464 static void
4465 print_operands (char *buf, const aarch64_opcode *opcode,
4466 const aarch64_opnd_info *opnds)
4467 {
4468 int i;
4469
4470 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4471 {
4472 char str[128];
4473
4474 /* We regard the opcode operand info more, however we also look into
4475 the inst->operands to support the disassembling of the optional
4476 operand.
4477 The two operand code should be the same in all cases, apart from
4478 when the operand can be optional. */
4479 if (opcode->operands[i] == AARCH64_OPND_NIL
4480 || opnds[i].type == AARCH64_OPND_NIL)
4481 break;
4482
4483 /* Generate the operand string in STR. */
4484 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL);
4485
4486 /* Delimiter. */
4487 if (str[0] != '\0')
4488 strcat (buf, i == 0 ? " " : ", ");
4489
4490 /* Append the operand string. */
4491 strcat (buf, str);
4492 }
4493 }
4494
4495 /* Send to stderr a string as information. */
4496
4497 static void
4498 output_info (const char *format, ...)
4499 {
4500 const char *file;
4501 unsigned int line;
4502 va_list args;
4503
4504 file = as_where (&line);
4505 if (file)
4506 {
4507 if (line != 0)
4508 fprintf (stderr, "%s:%u: ", file, line);
4509 else
4510 fprintf (stderr, "%s: ", file);
4511 }
4512 fprintf (stderr, _("Info: "));
4513 va_start (args, format);
4514 vfprintf (stderr, format, args);
4515 va_end (args);
4516 (void) putc ('\n', stderr);
4517 }
4518
4519 /* Output one operand error record. */
4520
4521 static void
4522 output_operand_error_record (const operand_error_record *record, char *str)
4523 {
4524 const aarch64_operand_error *detail = &record->detail;
4525 int idx = detail->index;
4526 const aarch64_opcode *opcode = record->opcode;
4527 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4528 : AARCH64_OPND_NIL);
4529
4530 switch (detail->kind)
4531 {
4532 case AARCH64_OPDE_NIL:
4533 gas_assert (0);
4534 break;
4535
4536 case AARCH64_OPDE_SYNTAX_ERROR:
4537 case AARCH64_OPDE_RECOVERABLE:
4538 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4539 case AARCH64_OPDE_OTHER_ERROR:
4540 /* Use the prepared error message if there is, otherwise use the
4541 operand description string to describe the error. */
4542 if (detail->error != NULL)
4543 {
4544 if (idx < 0)
4545 as_bad (_("%s -- `%s'"), detail->error, str);
4546 else
4547 as_bad (_("%s at operand %d -- `%s'"),
4548 detail->error, idx + 1, str);
4549 }
4550 else
4551 {
4552 gas_assert (idx >= 0);
4553 as_bad (_("operand %d must be %s -- `%s'"), idx + 1,
4554 aarch64_get_operand_desc (opd_code), str);
4555 }
4556 break;
4557
4558 case AARCH64_OPDE_INVALID_VARIANT:
4559 as_bad (_("operand mismatch -- `%s'"), str);
4560 if (verbose_error_p)
4561 {
4562 /* We will try to correct the erroneous instruction and also provide
4563 more information e.g. all other valid variants.
4564
4565 The string representation of the corrected instruction and other
4566 valid variants are generated by
4567
4568 1) obtaining the intermediate representation of the erroneous
4569 instruction;
4570 2) manipulating the IR, e.g. replacing the operand qualifier;
4571 3) printing out the instruction by calling the printer functions
4572 shared with the disassembler.
4573
4574 The limitation of this method is that the exact input assembly
4575 line cannot be accurately reproduced in some cases, for example an
4576 optional operand present in the actual assembly line will be
4577 omitted in the output; likewise for the optional syntax rules,
4578 e.g. the # before the immediate. Another limitation is that the
4579 assembly symbols and relocation operations in the assembly line
4580 currently cannot be printed out in the error report. Last but not
4581 least, when there is other error(s) co-exist with this error, the
4582 'corrected' instruction may be still incorrect, e.g. given
4583 'ldnp h0,h1,[x0,#6]!'
4584 this diagnosis will provide the version:
4585 'ldnp s0,s1,[x0,#6]!'
4586 which is still not right. */
4587 size_t len = strlen (get_mnemonic_name (str));
4588 int i, qlf_idx;
4589 bfd_boolean result;
4590 char buf[2048];
4591 aarch64_inst *inst_base = &inst.base;
4592 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4593
4594 /* Init inst. */
4595 reset_aarch64_instruction (&inst);
4596 inst_base->opcode = opcode;
4597
4598 /* Reset the error report so that there is no side effect on the
4599 following operand parsing. */
4600 init_operand_error_report ();
4601
4602 /* Fill inst. */
4603 result = parse_operands (str + len, opcode)
4604 && programmer_friendly_fixup (&inst);
4605 gas_assert (result);
4606 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4607 NULL, NULL);
4608 gas_assert (!result);
4609
4610 /* Find the most matched qualifier sequence. */
4611 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4612 gas_assert (qlf_idx > -1);
4613
4614 /* Assign the qualifiers. */
4615 assign_qualifier_sequence (inst_base,
4616 opcode->qualifiers_list[qlf_idx]);
4617
4618 /* Print the hint. */
4619 output_info (_(" did you mean this?"));
4620 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4621 print_operands (buf, opcode, inst_base->operands);
4622 output_info (_(" %s"), buf);
4623
4624 /* Print out other variant(s) if there is any. */
4625 if (qlf_idx != 0 ||
4626 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4627 output_info (_(" other valid variant(s):"));
4628
4629 /* For each pattern. */
4630 qualifiers_list = opcode->qualifiers_list;
4631 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4632 {
4633 /* Most opcodes has much fewer patterns in the list.
4634 First NIL qualifier indicates the end in the list. */
4635 if (empty_qualifier_sequence_p (*qualifiers_list))
4636 break;
4637
4638 if (i != qlf_idx)
4639 {
4640 /* Mnemonics name. */
4641 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4642
4643 /* Assign the qualifiers. */
4644 assign_qualifier_sequence (inst_base, *qualifiers_list);
4645
4646 /* Print instruction. */
4647 print_operands (buf, opcode, inst_base->operands);
4648
4649 output_info (_(" %s"), buf);
4650 }
4651 }
4652 }
4653 break;
4654
4655 case AARCH64_OPDE_UNTIED_OPERAND:
4656 as_bad (_("operand %d must be the same register as operand 1 -- `%s'"),
4657 detail->index + 1, str);
4658 break;
4659
4660 case AARCH64_OPDE_OUT_OF_RANGE:
4661 if (detail->data[0] != detail->data[1])
4662 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4663 detail->error ? detail->error : _("immediate value"),
4664 detail->data[0], detail->data[1], idx + 1, str);
4665 else
4666 as_bad (_("%s must be %d at operand %d -- `%s'"),
4667 detail->error ? detail->error : _("immediate value"),
4668 detail->data[0], idx + 1, str);
4669 break;
4670
4671 case AARCH64_OPDE_REG_LIST:
4672 if (detail->data[0] == 1)
4673 as_bad (_("invalid number of registers in the list; "
4674 "only 1 register is expected at operand %d -- `%s'"),
4675 idx + 1, str);
4676 else
4677 as_bad (_("invalid number of registers in the list; "
4678 "%d registers are expected at operand %d -- `%s'"),
4679 detail->data[0], idx + 1, str);
4680 break;
4681
4682 case AARCH64_OPDE_UNALIGNED:
4683 as_bad (_("immediate value must be a multiple of "
4684 "%d at operand %d -- `%s'"),
4685 detail->data[0], idx + 1, str);
4686 break;
4687
4688 default:
4689 gas_assert (0);
4690 break;
4691 }
4692 }
4693
4694 /* Process and output the error message about the operand mismatching.
4695
4696 When this function is called, the operand error information had
4697 been collected for an assembly line and there will be multiple
4698 errors in the case of multiple instruction templates; output the
4699 error message that most closely describes the problem. */
4700
4701 static void
4702 output_operand_error_report (char *str)
4703 {
4704 int largest_error_pos;
4705 const char *msg = NULL;
4706 enum aarch64_operand_error_kind kind;
4707 operand_error_record *curr;
4708 operand_error_record *head = operand_error_report.head;
4709 operand_error_record *record = NULL;
4710
4711 /* No error to report. */
4712 if (head == NULL)
4713 return;
4714
4715 gas_assert (head != NULL && operand_error_report.tail != NULL);
4716
4717 /* Only one error. */
4718 if (head == operand_error_report.tail)
4719 {
4720 DEBUG_TRACE ("single opcode entry with error kind: %s",
4721 operand_mismatch_kind_names[head->detail.kind]);
4722 output_operand_error_record (head, str);
4723 return;
4724 }
4725
4726 /* Find the error kind of the highest severity. */
4727 DEBUG_TRACE ("multiple opcode entries with error kind");
4728 kind = AARCH64_OPDE_NIL;
4729 for (curr = head; curr != NULL; curr = curr->next)
4730 {
4731 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4732 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4733 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4734 kind = curr->detail.kind;
4735 }
4736 gas_assert (kind != AARCH64_OPDE_NIL);
4737
4738 /* Pick up one of errors of KIND to report. */
4739 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4740 for (curr = head; curr != NULL; curr = curr->next)
4741 {
4742 if (curr->detail.kind != kind)
4743 continue;
4744 /* If there are multiple errors, pick up the one with the highest
4745 mismatching operand index. In the case of multiple errors with
4746 the equally highest operand index, pick up the first one or the
4747 first one with non-NULL error message. */
4748 if (curr->detail.index > largest_error_pos
4749 || (curr->detail.index == largest_error_pos && msg == NULL
4750 && curr->detail.error != NULL))
4751 {
4752 largest_error_pos = curr->detail.index;
4753 record = curr;
4754 msg = record->detail.error;
4755 }
4756 }
4757
4758 gas_assert (largest_error_pos != -2 && record != NULL);
4759 DEBUG_TRACE ("Pick up error kind %s to report",
4760 operand_mismatch_kind_names[record->detail.kind]);
4761
4762 /* Output. */
4763 output_operand_error_record (record, str);
4764 }
4765 \f
4766 /* Write an AARCH64 instruction to buf - always little-endian. */
4767 static void
4768 put_aarch64_insn (char *buf, uint32_t insn)
4769 {
4770 unsigned char *where = (unsigned char *) buf;
4771 where[0] = insn;
4772 where[1] = insn >> 8;
4773 where[2] = insn >> 16;
4774 where[3] = insn >> 24;
4775 }
4776
4777 static uint32_t
4778 get_aarch64_insn (char *buf)
4779 {
4780 unsigned char *where = (unsigned char *) buf;
4781 uint32_t result;
4782 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4783 return result;
4784 }
4785
4786 static void
4787 output_inst (struct aarch64_inst *new_inst)
4788 {
4789 char *to = NULL;
4790
4791 to = frag_more (INSN_SIZE);
4792
4793 frag_now->tc_frag_data.recorded = 1;
4794
4795 put_aarch64_insn (to, inst.base.value);
4796
4797 if (inst.reloc.type != BFD_RELOC_UNUSED)
4798 {
4799 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4800 INSN_SIZE, &inst.reloc.exp,
4801 inst.reloc.pc_rel,
4802 inst.reloc.type);
4803 DEBUG_TRACE ("Prepared relocation fix up");
4804 /* Don't check the addend value against the instruction size,
4805 that's the job of our code in md_apply_fix(). */
4806 fixp->fx_no_overflow = 1;
4807 if (new_inst != NULL)
4808 fixp->tc_fix_data.inst = new_inst;
4809 if (aarch64_gas_internal_fixup_p ())
4810 {
4811 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4812 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4813 fixp->fx_addnumber = inst.reloc.flags;
4814 }
4815 }
4816
4817 dwarf2_emit_insn (INSN_SIZE);
4818 }
4819
4820 /* Link together opcodes of the same name. */
4821
4822 struct templates
4823 {
4824 aarch64_opcode *opcode;
4825 struct templates *next;
4826 };
4827
4828 typedef struct templates templates;
4829
4830 static templates *
4831 lookup_mnemonic (const char *start, int len)
4832 {
4833 templates *templ = NULL;
4834
4835 templ = hash_find_n (aarch64_ops_hsh, start, len);
4836 return templ;
4837 }
4838
4839 /* Subroutine of md_assemble, responsible for looking up the primary
4840 opcode from the mnemonic the user wrote. STR points to the
4841 beginning of the mnemonic. */
4842
4843 static templates *
4844 opcode_lookup (char **str)
4845 {
4846 char *end, *base, *dot;
4847 const aarch64_cond *cond;
4848 char condname[16];
4849 int len;
4850
4851 /* Scan up to the end of the mnemonic, which must end in white space,
4852 '.', or end of string. */
4853 dot = 0;
4854 for (base = end = *str; is_part_of_name(*end); end++)
4855 if (*end == '.' && !dot)
4856 dot = end;
4857
4858 if (end == base || dot == base)
4859 return 0;
4860
4861 inst.cond = COND_ALWAYS;
4862
4863 /* Handle a possible condition. */
4864 if (dot)
4865 {
4866 cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
4867 if (cond)
4868 {
4869 inst.cond = cond->value;
4870 *str = end;
4871 }
4872 else
4873 {
4874 *str = dot;
4875 return 0;
4876 }
4877 len = dot - base;
4878 }
4879 else
4880 {
4881 *str = end;
4882 len = end - base;
4883 }
4884
4885 if (inst.cond == COND_ALWAYS)
4886 {
4887 /* Look for unaffixed mnemonic. */
4888 return lookup_mnemonic (base, len);
4889 }
4890 else if (len <= 13)
4891 {
4892 /* append ".c" to mnemonic if conditional */
4893 memcpy (condname, base, len);
4894 memcpy (condname + len, ".c", 2);
4895 base = condname;
4896 len += 2;
4897 return lookup_mnemonic (base, len);
4898 }
4899
4900 return NULL;
4901 }
4902
4903 /* Internal helper routine converting a vector_type_el structure *VECTYPE
4904 to a corresponding operand qualifier. */
4905
4906 static inline aarch64_opnd_qualifier_t
4907 vectype_to_qualifier (const struct vector_type_el *vectype)
4908 {
4909 /* Element size in bytes indexed by vector_el_type. */
4910 const unsigned char ele_size[5]
4911 = {1, 2, 4, 8, 16};
4912 const unsigned int ele_base [5] =
4913 {
4914 AARCH64_OPND_QLF_V_4B,
4915 AARCH64_OPND_QLF_V_2H,
4916 AARCH64_OPND_QLF_V_2S,
4917 AARCH64_OPND_QLF_V_1D,
4918 AARCH64_OPND_QLF_V_1Q
4919 };
4920
4921 if (!vectype->defined || vectype->type == NT_invtype)
4922 goto vectype_conversion_fail;
4923
4924 if (vectype->type == NT_zero)
4925 return AARCH64_OPND_QLF_P_Z;
4926 if (vectype->type == NT_merge)
4927 return AARCH64_OPND_QLF_P_M;
4928
4929 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4930
4931 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
4932 {
4933 /* Special case S_4B. */
4934 if (vectype->type == NT_b && vectype->width == 4)
4935 return AARCH64_OPND_QLF_S_4B;
4936
4937 /* Vector element register. */
4938 return AARCH64_OPND_QLF_S_B + vectype->type;
4939 }
4940 else
4941 {
4942 /* Vector register. */
4943 int reg_size = ele_size[vectype->type] * vectype->width;
4944 unsigned offset;
4945 unsigned shift;
4946 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
4947 goto vectype_conversion_fail;
4948
4949 /* The conversion is by calculating the offset from the base operand
4950 qualifier for the vector type. The operand qualifiers are regular
4951 enough that the offset can established by shifting the vector width by
4952 a vector-type dependent amount. */
4953 shift = 0;
4954 if (vectype->type == NT_b)
4955 shift = 3;
4956 else if (vectype->type == NT_h || vectype->type == NT_s)
4957 shift = 2;
4958 else if (vectype->type >= NT_d)
4959 shift = 1;
4960 else
4961 gas_assert (0);
4962
4963 offset = ele_base [vectype->type] + (vectype->width >> shift);
4964 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
4965 && offset <= AARCH64_OPND_QLF_V_1Q);
4966 return offset;
4967 }
4968
4969 vectype_conversion_fail:
4970 first_error (_("bad vector arrangement type"));
4971 return AARCH64_OPND_QLF_NIL;
4972 }
4973
4974 /* Process an optional operand that is found omitted from the assembly line.
4975 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4976 instruction's opcode entry while IDX is the index of this omitted operand.
4977 */
4978
4979 static void
4980 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4981 int idx, aarch64_opnd_info *operand)
4982 {
4983 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4984 gas_assert (optional_operand_p (opcode, idx));
4985 gas_assert (!operand->present);
4986
4987 switch (type)
4988 {
4989 case AARCH64_OPND_Rd:
4990 case AARCH64_OPND_Rn:
4991 case AARCH64_OPND_Rm:
4992 case AARCH64_OPND_Rt:
4993 case AARCH64_OPND_Rt2:
4994 case AARCH64_OPND_Rs:
4995 case AARCH64_OPND_Ra:
4996 case AARCH64_OPND_Rt_SYS:
4997 case AARCH64_OPND_Rd_SP:
4998 case AARCH64_OPND_Rn_SP:
4999 case AARCH64_OPND_Rm_SP:
5000 case AARCH64_OPND_Fd:
5001 case AARCH64_OPND_Fn:
5002 case AARCH64_OPND_Fm:
5003 case AARCH64_OPND_Fa:
5004 case AARCH64_OPND_Ft:
5005 case AARCH64_OPND_Ft2:
5006 case AARCH64_OPND_Sd:
5007 case AARCH64_OPND_Sn:
5008 case AARCH64_OPND_Sm:
5009 case AARCH64_OPND_Va:
5010 case AARCH64_OPND_Vd:
5011 case AARCH64_OPND_Vn:
5012 case AARCH64_OPND_Vm:
5013 case AARCH64_OPND_VdD1:
5014 case AARCH64_OPND_VnD1:
5015 operand->reg.regno = default_value;
5016 break;
5017
5018 case AARCH64_OPND_Ed:
5019 case AARCH64_OPND_En:
5020 case AARCH64_OPND_Em:
5021 case AARCH64_OPND_SM3_IMM2:
5022 operand->reglane.regno = default_value;
5023 break;
5024
5025 case AARCH64_OPND_IDX:
5026 case AARCH64_OPND_BIT_NUM:
5027 case AARCH64_OPND_IMMR:
5028 case AARCH64_OPND_IMMS:
5029 case AARCH64_OPND_SHLL_IMM:
5030 case AARCH64_OPND_IMM_VLSL:
5031 case AARCH64_OPND_IMM_VLSR:
5032 case AARCH64_OPND_CCMP_IMM:
5033 case AARCH64_OPND_FBITS:
5034 case AARCH64_OPND_UIMM4:
5035 case AARCH64_OPND_UIMM3_OP1:
5036 case AARCH64_OPND_UIMM3_OP2:
5037 case AARCH64_OPND_IMM:
5038 case AARCH64_OPND_IMM_2:
5039 case AARCH64_OPND_WIDTH:
5040 case AARCH64_OPND_UIMM7:
5041 case AARCH64_OPND_NZCV:
5042 case AARCH64_OPND_SVE_PATTERN:
5043 case AARCH64_OPND_SVE_PRFOP:
5044 operand->imm.value = default_value;
5045 break;
5046
5047 case AARCH64_OPND_SVE_PATTERN_SCALED:
5048 operand->imm.value = default_value;
5049 operand->shifter.kind = AARCH64_MOD_MUL;
5050 operand->shifter.amount = 1;
5051 break;
5052
5053 case AARCH64_OPND_EXCEPTION:
5054 inst.reloc.type = BFD_RELOC_UNUSED;
5055 break;
5056
5057 case AARCH64_OPND_BARRIER_ISB:
5058 operand->barrier = aarch64_barrier_options + default_value;
5059
5060 default:
5061 break;
5062 }
5063 }
5064
5065 /* Process the relocation type for move wide instructions.
5066 Return TRUE on success; otherwise return FALSE. */
5067
5068 static bfd_boolean
5069 process_movw_reloc_info (void)
5070 {
5071 int is32;
5072 unsigned shift;
5073
5074 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5075
5076 if (inst.base.opcode->op == OP_MOVK)
5077 switch (inst.reloc.type)
5078 {
5079 case BFD_RELOC_AARCH64_MOVW_G0_S:
5080 case BFD_RELOC_AARCH64_MOVW_G1_S:
5081 case BFD_RELOC_AARCH64_MOVW_G2_S:
5082 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5083 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5084 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5085 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5086 set_syntax_error
5087 (_("the specified relocation type is not allowed for MOVK"));
5088 return FALSE;
5089 default:
5090 break;
5091 }
5092
5093 switch (inst.reloc.type)
5094 {
5095 case BFD_RELOC_AARCH64_MOVW_G0:
5096 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5097 case BFD_RELOC_AARCH64_MOVW_G0_S:
5098 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5099 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5100 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5101 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5102 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5103 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5104 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5105 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5106 shift = 0;
5107 break;
5108 case BFD_RELOC_AARCH64_MOVW_G1:
5109 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5110 case BFD_RELOC_AARCH64_MOVW_G1_S:
5111 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5112 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5113 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5114 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5115 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5116 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5117 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5118 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5119 shift = 16;
5120 break;
5121 case BFD_RELOC_AARCH64_MOVW_G2:
5122 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5123 case BFD_RELOC_AARCH64_MOVW_G2_S:
5124 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5125 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5126 if (is32)
5127 {
5128 set_fatal_syntax_error
5129 (_("the specified relocation type is not allowed for 32-bit "
5130 "register"));
5131 return FALSE;
5132 }
5133 shift = 32;
5134 break;
5135 case BFD_RELOC_AARCH64_MOVW_G3:
5136 if (is32)
5137 {
5138 set_fatal_syntax_error
5139 (_("the specified relocation type is not allowed for 32-bit "
5140 "register"));
5141 return FALSE;
5142 }
5143 shift = 48;
5144 break;
5145 default:
5146 /* More cases should be added when more MOVW-related relocation types
5147 are supported in GAS. */
5148 gas_assert (aarch64_gas_internal_fixup_p ());
5149 /* The shift amount should have already been set by the parser. */
5150 return TRUE;
5151 }
5152 inst.base.operands[1].shifter.amount = shift;
5153 return TRUE;
5154 }
5155
5156 /* A primitive log calculator. */
5157
5158 static inline unsigned int
5159 get_logsz (unsigned int size)
5160 {
5161 const unsigned char ls[16] =
5162 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5163 if (size > 16)
5164 {
5165 gas_assert (0);
5166 return -1;
5167 }
5168 gas_assert (ls[size - 1] != (unsigned char)-1);
5169 return ls[size - 1];
5170 }
5171
5172 /* Determine and return the real reloc type code for an instruction
5173 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5174
5175 static inline bfd_reloc_code_real_type
5176 ldst_lo12_determine_real_reloc_type (void)
5177 {
5178 unsigned logsz;
5179 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5180 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5181
5182 const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
5183 {
5184 BFD_RELOC_AARCH64_LDST8_LO12,
5185 BFD_RELOC_AARCH64_LDST16_LO12,
5186 BFD_RELOC_AARCH64_LDST32_LO12,
5187 BFD_RELOC_AARCH64_LDST64_LO12,
5188 BFD_RELOC_AARCH64_LDST128_LO12
5189 },
5190 {
5191 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5192 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5193 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5194 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5195 BFD_RELOC_AARCH64_NONE
5196 },
5197 {
5198 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5199 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5200 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5201 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5202 BFD_RELOC_AARCH64_NONE
5203 }
5204 };
5205
5206 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5207 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5208 || (inst.reloc.type
5209 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
5210 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5211
5212 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5213 opd1_qlf =
5214 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5215 1, opd0_qlf, 0);
5216 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5217
5218 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5219 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5220 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5221 gas_assert (logsz <= 3);
5222 else
5223 gas_assert (logsz <= 4);
5224
5225 /* In reloc.c, these pseudo relocation types should be defined in similar
5226 order as above reloc_ldst_lo12 array. Because the array index calculation
5227 below relies on this. */
5228 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5229 }
5230
5231 /* Check whether a register list REGINFO is valid. The registers must be
5232 numbered in increasing order (modulo 32), in increments of one or two.
5233
5234 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5235 increments of two.
5236
5237 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5238
5239 static bfd_boolean
5240 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5241 {
5242 uint32_t i, nb_regs, prev_regno, incr;
5243
5244 nb_regs = 1 + (reginfo & 0x3);
5245 reginfo >>= 2;
5246 prev_regno = reginfo & 0x1f;
5247 incr = accept_alternate ? 2 : 1;
5248
5249 for (i = 1; i < nb_regs; ++i)
5250 {
5251 uint32_t curr_regno;
5252 reginfo >>= 5;
5253 curr_regno = reginfo & 0x1f;
5254 if (curr_regno != ((prev_regno + incr) & 0x1f))
5255 return FALSE;
5256 prev_regno = curr_regno;
5257 }
5258
5259 return TRUE;
5260 }
5261
5262 /* Generic instruction operand parser. This does no encoding and no
5263 semantic validation; it merely squirrels values away in the inst
5264 structure. Returns TRUE or FALSE depending on whether the
5265 specified grammar matched. */
5266
5267 static bfd_boolean
5268 parse_operands (char *str, const aarch64_opcode *opcode)
5269 {
5270 int i;
5271 char *backtrack_pos = 0;
5272 const enum aarch64_opnd *operands = opcode->operands;
5273 aarch64_reg_type imm_reg_type;
5274
5275 clear_error ();
5276 skip_whitespace (str);
5277
5278 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5279 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5280 else
5281 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5282
5283 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5284 {
5285 int64_t val;
5286 const reg_entry *reg;
5287 int comma_skipped_p = 0;
5288 aarch64_reg_type rtype;
5289 struct vector_type_el vectype;
5290 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5291 aarch64_opnd_info *info = &inst.base.operands[i];
5292 aarch64_reg_type reg_type;
5293
5294 DEBUG_TRACE ("parse operand %d", i);
5295
5296 /* Assign the operand code. */
5297 info->type = operands[i];
5298
5299 if (optional_operand_p (opcode, i))
5300 {
5301 /* Remember where we are in case we need to backtrack. */
5302 gas_assert (!backtrack_pos);
5303 backtrack_pos = str;
5304 }
5305
5306 /* Expect comma between operands; the backtrack mechanism will take
5307 care of cases of omitted optional operand. */
5308 if (i > 0 && ! skip_past_char (&str, ','))
5309 {
5310 set_syntax_error (_("comma expected between operands"));
5311 goto failure;
5312 }
5313 else
5314 comma_skipped_p = 1;
5315
5316 switch (operands[i])
5317 {
5318 case AARCH64_OPND_Rd:
5319 case AARCH64_OPND_Rn:
5320 case AARCH64_OPND_Rm:
5321 case AARCH64_OPND_Rt:
5322 case AARCH64_OPND_Rt2:
5323 case AARCH64_OPND_Rs:
5324 case AARCH64_OPND_Ra:
5325 case AARCH64_OPND_Rt_SYS:
5326 case AARCH64_OPND_PAIRREG:
5327 case AARCH64_OPND_SVE_Rm:
5328 po_int_reg_or_fail (REG_TYPE_R_Z);
5329 break;
5330
5331 case AARCH64_OPND_Rd_SP:
5332 case AARCH64_OPND_Rn_SP:
5333 case AARCH64_OPND_SVE_Rn_SP:
5334 case AARCH64_OPND_Rm_SP:
5335 po_int_reg_or_fail (REG_TYPE_R_SP);
5336 break;
5337
5338 case AARCH64_OPND_Rm_EXT:
5339 case AARCH64_OPND_Rm_SFT:
5340 po_misc_or_fail (parse_shifter_operand
5341 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5342 ? SHIFTED_ARITH_IMM
5343 : SHIFTED_LOGIC_IMM)));
5344 if (!info->shifter.operator_present)
5345 {
5346 /* Default to LSL if not present. Libopcodes prefers shifter
5347 kind to be explicit. */
5348 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5349 info->shifter.kind = AARCH64_MOD_LSL;
5350 /* For Rm_EXT, libopcodes will carry out further check on whether
5351 or not stack pointer is used in the instruction (Recall that
5352 "the extend operator is not optional unless at least one of
5353 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5354 }
5355 break;
5356
5357 case AARCH64_OPND_Fd:
5358 case AARCH64_OPND_Fn:
5359 case AARCH64_OPND_Fm:
5360 case AARCH64_OPND_Fa:
5361 case AARCH64_OPND_Ft:
5362 case AARCH64_OPND_Ft2:
5363 case AARCH64_OPND_Sd:
5364 case AARCH64_OPND_Sn:
5365 case AARCH64_OPND_Sm:
5366 case AARCH64_OPND_SVE_VZn:
5367 case AARCH64_OPND_SVE_Vd:
5368 case AARCH64_OPND_SVE_Vm:
5369 case AARCH64_OPND_SVE_Vn:
5370 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5371 if (val == PARSE_FAIL)
5372 {
5373 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5374 goto failure;
5375 }
5376 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5377
5378 info->reg.regno = val;
5379 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5380 break;
5381
5382 case AARCH64_OPND_SVE_Pd:
5383 case AARCH64_OPND_SVE_Pg3:
5384 case AARCH64_OPND_SVE_Pg4_5:
5385 case AARCH64_OPND_SVE_Pg4_10:
5386 case AARCH64_OPND_SVE_Pg4_16:
5387 case AARCH64_OPND_SVE_Pm:
5388 case AARCH64_OPND_SVE_Pn:
5389 case AARCH64_OPND_SVE_Pt:
5390 reg_type = REG_TYPE_PN;
5391 goto vector_reg;
5392
5393 case AARCH64_OPND_SVE_Za_5:
5394 case AARCH64_OPND_SVE_Za_16:
5395 case AARCH64_OPND_SVE_Zd:
5396 case AARCH64_OPND_SVE_Zm_5:
5397 case AARCH64_OPND_SVE_Zm_16:
5398 case AARCH64_OPND_SVE_Zn:
5399 case AARCH64_OPND_SVE_Zt:
5400 reg_type = REG_TYPE_ZN;
5401 goto vector_reg;
5402
5403 case AARCH64_OPND_Va:
5404 case AARCH64_OPND_Vd:
5405 case AARCH64_OPND_Vn:
5406 case AARCH64_OPND_Vm:
5407 reg_type = REG_TYPE_VN;
5408 vector_reg:
5409 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5410 if (val == PARSE_FAIL)
5411 {
5412 first_error (_(get_reg_expected_msg (reg_type)));
5413 goto failure;
5414 }
5415 if (vectype.defined & NTA_HASINDEX)
5416 goto failure;
5417
5418 info->reg.regno = val;
5419 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5420 && vectype.type == NT_invtype)
5421 /* Unqualified Pn and Zn registers are allowed in certain
5422 contexts. Rely on F_STRICT qualifier checking to catch
5423 invalid uses. */
5424 info->qualifier = AARCH64_OPND_QLF_NIL;
5425 else
5426 {
5427 info->qualifier = vectype_to_qualifier (&vectype);
5428 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5429 goto failure;
5430 }
5431 break;
5432
5433 case AARCH64_OPND_VdD1:
5434 case AARCH64_OPND_VnD1:
5435 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5436 if (val == PARSE_FAIL)
5437 {
5438 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5439 goto failure;
5440 }
5441 if (vectype.type != NT_d || vectype.index != 1)
5442 {
5443 set_fatal_syntax_error
5444 (_("the top half of a 128-bit FP/SIMD register is expected"));
5445 goto failure;
5446 }
5447 info->reg.regno = val;
5448 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5449 here; it is correct for the purpose of encoding/decoding since
5450 only the register number is explicitly encoded in the related
5451 instructions, although this appears a bit hacky. */
5452 info->qualifier = AARCH64_OPND_QLF_S_D;
5453 break;
5454
5455 case AARCH64_OPND_SVE_Zm3_INDEX:
5456 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5457 case AARCH64_OPND_SVE_Zm4_INDEX:
5458 case AARCH64_OPND_SVE_Zn_INDEX:
5459 reg_type = REG_TYPE_ZN;
5460 goto vector_reg_index;
5461
5462 case AARCH64_OPND_Ed:
5463 case AARCH64_OPND_En:
5464 case AARCH64_OPND_Em:
5465 case AARCH64_OPND_SM3_IMM2:
5466 reg_type = REG_TYPE_VN;
5467 vector_reg_index:
5468 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5469 if (val == PARSE_FAIL)
5470 {
5471 first_error (_(get_reg_expected_msg (reg_type)));
5472 goto failure;
5473 }
5474 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5475 goto failure;
5476
5477 info->reglane.regno = val;
5478 info->reglane.index = vectype.index;
5479 info->qualifier = vectype_to_qualifier (&vectype);
5480 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5481 goto failure;
5482 break;
5483
5484 case AARCH64_OPND_SVE_ZnxN:
5485 case AARCH64_OPND_SVE_ZtxN:
5486 reg_type = REG_TYPE_ZN;
5487 goto vector_reg_list;
5488
5489 case AARCH64_OPND_LVn:
5490 case AARCH64_OPND_LVt:
5491 case AARCH64_OPND_LVt_AL:
5492 case AARCH64_OPND_LEt:
5493 reg_type = REG_TYPE_VN;
5494 vector_reg_list:
5495 if (reg_type == REG_TYPE_ZN
5496 && get_opcode_dependent_value (opcode) == 1
5497 && *str != '{')
5498 {
5499 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5500 if (val == PARSE_FAIL)
5501 {
5502 first_error (_(get_reg_expected_msg (reg_type)));
5503 goto failure;
5504 }
5505 info->reglist.first_regno = val;
5506 info->reglist.num_regs = 1;
5507 }
5508 else
5509 {
5510 val = parse_vector_reg_list (&str, reg_type, &vectype);
5511 if (val == PARSE_FAIL)
5512 goto failure;
5513 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5514 {
5515 set_fatal_syntax_error (_("invalid register list"));
5516 goto failure;
5517 }
5518 info->reglist.first_regno = (val >> 2) & 0x1f;
5519 info->reglist.num_regs = (val & 0x3) + 1;
5520 }
5521 if (operands[i] == AARCH64_OPND_LEt)
5522 {
5523 if (!(vectype.defined & NTA_HASINDEX))
5524 goto failure;
5525 info->reglist.has_index = 1;
5526 info->reglist.index = vectype.index;
5527 }
5528 else
5529 {
5530 if (vectype.defined & NTA_HASINDEX)
5531 goto failure;
5532 if (!(vectype.defined & NTA_HASTYPE))
5533 {
5534 if (reg_type == REG_TYPE_ZN)
5535 set_fatal_syntax_error (_("missing type suffix"));
5536 goto failure;
5537 }
5538 }
5539 info->qualifier = vectype_to_qualifier (&vectype);
5540 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5541 goto failure;
5542 break;
5543
5544 case AARCH64_OPND_CRn:
5545 case AARCH64_OPND_CRm:
5546 {
5547 char prefix = *(str++);
5548 if (prefix != 'c' && prefix != 'C')
5549 goto failure;
5550
5551 po_imm_nc_or_fail ();
5552 if (val > 15)
5553 {
5554 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5555 goto failure;
5556 }
5557 info->qualifier = AARCH64_OPND_QLF_CR;
5558 info->imm.value = val;
5559 break;
5560 }
5561
5562 case AARCH64_OPND_SHLL_IMM:
5563 case AARCH64_OPND_IMM_VLSR:
5564 po_imm_or_fail (1, 64);
5565 info->imm.value = val;
5566 break;
5567
5568 case AARCH64_OPND_CCMP_IMM:
5569 case AARCH64_OPND_SIMM5:
5570 case AARCH64_OPND_FBITS:
5571 case AARCH64_OPND_UIMM4:
5572 case AARCH64_OPND_UIMM3_OP1:
5573 case AARCH64_OPND_UIMM3_OP2:
5574 case AARCH64_OPND_IMM_VLSL:
5575 case AARCH64_OPND_IMM:
5576 case AARCH64_OPND_IMM_2:
5577 case AARCH64_OPND_WIDTH:
5578 case AARCH64_OPND_SVE_INV_LIMM:
5579 case AARCH64_OPND_SVE_LIMM:
5580 case AARCH64_OPND_SVE_LIMM_MOV:
5581 case AARCH64_OPND_SVE_SHLIMM_PRED:
5582 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5583 case AARCH64_OPND_SVE_SHRIMM_PRED:
5584 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5585 case AARCH64_OPND_SVE_SIMM5:
5586 case AARCH64_OPND_SVE_SIMM5B:
5587 case AARCH64_OPND_SVE_SIMM6:
5588 case AARCH64_OPND_SVE_SIMM8:
5589 case AARCH64_OPND_SVE_UIMM3:
5590 case AARCH64_OPND_SVE_UIMM7:
5591 case AARCH64_OPND_SVE_UIMM8:
5592 case AARCH64_OPND_SVE_UIMM8_53:
5593 case AARCH64_OPND_IMM_ROT1:
5594 case AARCH64_OPND_IMM_ROT2:
5595 case AARCH64_OPND_IMM_ROT3:
5596 case AARCH64_OPND_SVE_IMM_ROT1:
5597 case AARCH64_OPND_SVE_IMM_ROT2:
5598 po_imm_nc_or_fail ();
5599 info->imm.value = val;
5600 break;
5601
5602 case AARCH64_OPND_SVE_AIMM:
5603 case AARCH64_OPND_SVE_ASIMM:
5604 po_imm_nc_or_fail ();
5605 info->imm.value = val;
5606 skip_whitespace (str);
5607 if (skip_past_comma (&str))
5608 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5609 else
5610 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5611 break;
5612
5613 case AARCH64_OPND_SVE_PATTERN:
5614 po_enum_or_fail (aarch64_sve_pattern_array);
5615 info->imm.value = val;
5616 break;
5617
5618 case AARCH64_OPND_SVE_PATTERN_SCALED:
5619 po_enum_or_fail (aarch64_sve_pattern_array);
5620 info->imm.value = val;
5621 if (skip_past_comma (&str)
5622 && !parse_shift (&str, info, SHIFTED_MUL))
5623 goto failure;
5624 if (!info->shifter.operator_present)
5625 {
5626 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5627 info->shifter.kind = AARCH64_MOD_MUL;
5628 info->shifter.amount = 1;
5629 }
5630 break;
5631
5632 case AARCH64_OPND_SVE_PRFOP:
5633 po_enum_or_fail (aarch64_sve_prfop_array);
5634 info->imm.value = val;
5635 break;
5636
5637 case AARCH64_OPND_UIMM7:
5638 po_imm_or_fail (0, 127);
5639 info->imm.value = val;
5640 break;
5641
5642 case AARCH64_OPND_IDX:
5643 case AARCH64_OPND_MASK:
5644 case AARCH64_OPND_BIT_NUM:
5645 case AARCH64_OPND_IMMR:
5646 case AARCH64_OPND_IMMS:
5647 po_imm_or_fail (0, 63);
5648 info->imm.value = val;
5649 break;
5650
5651 case AARCH64_OPND_IMM0:
5652 po_imm_nc_or_fail ();
5653 if (val != 0)
5654 {
5655 set_fatal_syntax_error (_("immediate zero expected"));
5656 goto failure;
5657 }
5658 info->imm.value = 0;
5659 break;
5660
5661 case AARCH64_OPND_FPIMM0:
5662 {
5663 int qfloat;
5664 bfd_boolean res1 = FALSE, res2 = FALSE;
5665 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5666 it is probably not worth the effort to support it. */
5667 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5668 imm_reg_type))
5669 && (error_p ()
5670 || !(res2 = parse_constant_immediate (&str, &val,
5671 imm_reg_type))))
5672 goto failure;
5673 if ((res1 && qfloat == 0) || (res2 && val == 0))
5674 {
5675 info->imm.value = 0;
5676 info->imm.is_fp = 1;
5677 break;
5678 }
5679 set_fatal_syntax_error (_("immediate zero expected"));
5680 goto failure;
5681 }
5682
5683 case AARCH64_OPND_IMM_MOV:
5684 {
5685 char *saved = str;
5686 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5687 reg_name_p (str, REG_TYPE_VN))
5688 goto failure;
5689 str = saved;
5690 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5691 GE_OPT_PREFIX, 1));
5692 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5693 later. fix_mov_imm_insn will try to determine a machine
5694 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5695 message if the immediate cannot be moved by a single
5696 instruction. */
5697 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5698 inst.base.operands[i].skip = 1;
5699 }
5700 break;
5701
5702 case AARCH64_OPND_SIMD_IMM:
5703 case AARCH64_OPND_SIMD_IMM_SFT:
5704 if (! parse_big_immediate (&str, &val, imm_reg_type))
5705 goto failure;
5706 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5707 /* addr_off_p */ 0,
5708 /* need_libopcodes_p */ 1,
5709 /* skip_p */ 1);
5710 /* Parse shift.
5711 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5712 shift, we don't check it here; we leave the checking to
5713 the libopcodes (operand_general_constraint_met_p). By
5714 doing this, we achieve better diagnostics. */
5715 if (skip_past_comma (&str)
5716 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5717 goto failure;
5718 if (!info->shifter.operator_present
5719 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5720 {
5721 /* Default to LSL if not present. Libopcodes prefers shifter
5722 kind to be explicit. */
5723 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5724 info->shifter.kind = AARCH64_MOD_LSL;
5725 }
5726 break;
5727
5728 case AARCH64_OPND_FPIMM:
5729 case AARCH64_OPND_SIMD_FPIMM:
5730 case AARCH64_OPND_SVE_FPIMM8:
5731 {
5732 int qfloat;
5733 bfd_boolean dp_p;
5734
5735 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5736 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5737 || !aarch64_imm_float_p (qfloat))
5738 {
5739 if (!error_p ())
5740 set_fatal_syntax_error (_("invalid floating-point"
5741 " constant"));
5742 goto failure;
5743 }
5744 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5745 inst.base.operands[i].imm.is_fp = 1;
5746 }
5747 break;
5748
5749 case AARCH64_OPND_SVE_I1_HALF_ONE:
5750 case AARCH64_OPND_SVE_I1_HALF_TWO:
5751 case AARCH64_OPND_SVE_I1_ZERO_ONE:
5752 {
5753 int qfloat;
5754 bfd_boolean dp_p;
5755
5756 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5757 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
5758 {
5759 if (!error_p ())
5760 set_fatal_syntax_error (_("invalid floating-point"
5761 " constant"));
5762 goto failure;
5763 }
5764 inst.base.operands[i].imm.value = qfloat;
5765 inst.base.operands[i].imm.is_fp = 1;
5766 }
5767 break;
5768
5769 case AARCH64_OPND_LIMM:
5770 po_misc_or_fail (parse_shifter_operand (&str, info,
5771 SHIFTED_LOGIC_IMM));
5772 if (info->shifter.operator_present)
5773 {
5774 set_fatal_syntax_error
5775 (_("shift not allowed for bitmask immediate"));
5776 goto failure;
5777 }
5778 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5779 /* addr_off_p */ 0,
5780 /* need_libopcodes_p */ 1,
5781 /* skip_p */ 1);
5782 break;
5783
5784 case AARCH64_OPND_AIMM:
5785 if (opcode->op == OP_ADD)
5786 /* ADD may have relocation types. */
5787 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5788 SHIFTED_ARITH_IMM));
5789 else
5790 po_misc_or_fail (parse_shifter_operand (&str, info,
5791 SHIFTED_ARITH_IMM));
5792 switch (inst.reloc.type)
5793 {
5794 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5795 info->shifter.amount = 12;
5796 break;
5797 case BFD_RELOC_UNUSED:
5798 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5799 if (info->shifter.kind != AARCH64_MOD_NONE)
5800 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5801 inst.reloc.pc_rel = 0;
5802 break;
5803 default:
5804 break;
5805 }
5806 info->imm.value = 0;
5807 if (!info->shifter.operator_present)
5808 {
5809 /* Default to LSL if not present. Libopcodes prefers shifter
5810 kind to be explicit. */
5811 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5812 info->shifter.kind = AARCH64_MOD_LSL;
5813 }
5814 break;
5815
5816 case AARCH64_OPND_HALF:
5817 {
5818 /* #<imm16> or relocation. */
5819 int internal_fixup_p;
5820 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5821 if (internal_fixup_p)
5822 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5823 skip_whitespace (str);
5824 if (skip_past_comma (&str))
5825 {
5826 /* {, LSL #<shift>} */
5827 if (! aarch64_gas_internal_fixup_p ())
5828 {
5829 set_fatal_syntax_error (_("can't mix relocation modifier "
5830 "with explicit shift"));
5831 goto failure;
5832 }
5833 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5834 }
5835 else
5836 inst.base.operands[i].shifter.amount = 0;
5837 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5838 inst.base.operands[i].imm.value = 0;
5839 if (! process_movw_reloc_info ())
5840 goto failure;
5841 }
5842 break;
5843
5844 case AARCH64_OPND_EXCEPTION:
5845 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
5846 imm_reg_type));
5847 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5848 /* addr_off_p */ 0,
5849 /* need_libopcodes_p */ 0,
5850 /* skip_p */ 1);
5851 break;
5852
5853 case AARCH64_OPND_NZCV:
5854 {
5855 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5856 if (nzcv != NULL)
5857 {
5858 str += 4;
5859 info->imm.value = nzcv->value;
5860 break;
5861 }
5862 po_imm_or_fail (0, 15);
5863 info->imm.value = val;
5864 }
5865 break;
5866
5867 case AARCH64_OPND_COND:
5868 case AARCH64_OPND_COND1:
5869 {
5870 char *start = str;
5871 do
5872 str++;
5873 while (ISALPHA (*str));
5874 info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
5875 if (info->cond == NULL)
5876 {
5877 set_syntax_error (_("invalid condition"));
5878 goto failure;
5879 }
5880 else if (operands[i] == AARCH64_OPND_COND1
5881 && (info->cond->value & 0xe) == 0xe)
5882 {
5883 /* Do not allow AL or NV. */
5884 set_default_error ();
5885 goto failure;
5886 }
5887 }
5888 break;
5889
5890 case AARCH64_OPND_ADDR_ADRP:
5891 po_misc_or_fail (parse_adrp (&str));
5892 /* Clear the value as operand needs to be relocated. */
5893 info->imm.value = 0;
5894 break;
5895
5896 case AARCH64_OPND_ADDR_PCREL14:
5897 case AARCH64_OPND_ADDR_PCREL19:
5898 case AARCH64_OPND_ADDR_PCREL21:
5899 case AARCH64_OPND_ADDR_PCREL26:
5900 po_misc_or_fail (parse_address (&str, info));
5901 if (!info->addr.pcrel)
5902 {
5903 set_syntax_error (_("invalid pc-relative address"));
5904 goto failure;
5905 }
5906 if (inst.gen_lit_pool
5907 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5908 {
5909 /* Only permit "=value" in the literal load instructions.
5910 The literal will be generated by programmer_friendly_fixup. */
5911 set_syntax_error (_("invalid use of \"=immediate\""));
5912 goto failure;
5913 }
5914 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5915 {
5916 set_syntax_error (_("unrecognized relocation suffix"));
5917 goto failure;
5918 }
5919 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5920 {
5921 info->imm.value = inst.reloc.exp.X_add_number;
5922 inst.reloc.type = BFD_RELOC_UNUSED;
5923 }
5924 else
5925 {
5926 info->imm.value = 0;
5927 if (inst.reloc.type == BFD_RELOC_UNUSED)
5928 switch (opcode->iclass)
5929 {
5930 case compbranch:
5931 case condbranch:
5932 /* e.g. CBZ or B.COND */
5933 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5934 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5935 break;
5936 case testbranch:
5937 /* e.g. TBZ */
5938 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5939 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5940 break;
5941 case branch_imm:
5942 /* e.g. B or BL */
5943 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5944 inst.reloc.type =
5945 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5946 : BFD_RELOC_AARCH64_JUMP26;
5947 break;
5948 case loadlit:
5949 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5950 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5951 break;
5952 case pcreladdr:
5953 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5954 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5955 break;
5956 default:
5957 gas_assert (0);
5958 abort ();
5959 }
5960 inst.reloc.pc_rel = 1;
5961 }
5962 break;
5963
5964 case AARCH64_OPND_ADDR_SIMPLE:
5965 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5966 {
5967 /* [<Xn|SP>{, #<simm>}] */
5968 char *start = str;
5969 /* First use the normal address-parsing routines, to get
5970 the usual syntax errors. */
5971 po_misc_or_fail (parse_address (&str, info));
5972 if (info->addr.pcrel || info->addr.offset.is_reg
5973 || !info->addr.preind || info->addr.postind
5974 || info->addr.writeback)
5975 {
5976 set_syntax_error (_("invalid addressing mode"));
5977 goto failure;
5978 }
5979
5980 /* Then retry, matching the specific syntax of these addresses. */
5981 str = start;
5982 po_char_or_fail ('[');
5983 po_reg_or_fail (REG_TYPE_R64_SP);
5984 /* Accept optional ", #0". */
5985 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5986 && skip_past_char (&str, ','))
5987 {
5988 skip_past_char (&str, '#');
5989 if (! skip_past_char (&str, '0'))
5990 {
5991 set_fatal_syntax_error
5992 (_("the optional immediate offset can only be 0"));
5993 goto failure;
5994 }
5995 }
5996 po_char_or_fail (']');
5997 break;
5998 }
5999
6000 case AARCH64_OPND_ADDR_REGOFF:
6001 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6002 po_misc_or_fail (parse_address (&str, info));
6003 regoff_addr:
6004 if (info->addr.pcrel || !info->addr.offset.is_reg
6005 || !info->addr.preind || info->addr.postind
6006 || info->addr.writeback)
6007 {
6008 set_syntax_error (_("invalid addressing mode"));
6009 goto failure;
6010 }
6011 if (!info->shifter.operator_present)
6012 {
6013 /* Default to LSL if not present. Libopcodes prefers shifter
6014 kind to be explicit. */
6015 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6016 info->shifter.kind = AARCH64_MOD_LSL;
6017 }
6018 /* Qualifier to be deduced by libopcodes. */
6019 break;
6020
6021 case AARCH64_OPND_ADDR_SIMM7:
6022 po_misc_or_fail (parse_address (&str, info));
6023 if (info->addr.pcrel || info->addr.offset.is_reg
6024 || (!info->addr.preind && !info->addr.postind))
6025 {
6026 set_syntax_error (_("invalid addressing mode"));
6027 goto failure;
6028 }
6029 if (inst.reloc.type != BFD_RELOC_UNUSED)
6030 {
6031 set_syntax_error (_("relocation not allowed"));
6032 goto failure;
6033 }
6034 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6035 /* addr_off_p */ 1,
6036 /* need_libopcodes_p */ 1,
6037 /* skip_p */ 0);
6038 break;
6039
6040 case AARCH64_OPND_ADDR_SIMM9:
6041 case AARCH64_OPND_ADDR_SIMM9_2:
6042 po_misc_or_fail (parse_address (&str, info));
6043 if (info->addr.pcrel || info->addr.offset.is_reg
6044 || (!info->addr.preind && !info->addr.postind)
6045 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6046 && info->addr.writeback))
6047 {
6048 set_syntax_error (_("invalid addressing mode"));
6049 goto failure;
6050 }
6051 if (inst.reloc.type != BFD_RELOC_UNUSED)
6052 {
6053 set_syntax_error (_("relocation not allowed"));
6054 goto failure;
6055 }
6056 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6057 /* addr_off_p */ 1,
6058 /* need_libopcodes_p */ 1,
6059 /* skip_p */ 0);
6060 break;
6061
6062 case AARCH64_OPND_ADDR_SIMM10:
6063 case AARCH64_OPND_ADDR_OFFSET:
6064 po_misc_or_fail (parse_address (&str, info));
6065 if (info->addr.pcrel || info->addr.offset.is_reg
6066 || !info->addr.preind || info->addr.postind)
6067 {
6068 set_syntax_error (_("invalid addressing mode"));
6069 goto failure;
6070 }
6071 if (inst.reloc.type != BFD_RELOC_UNUSED)
6072 {
6073 set_syntax_error (_("relocation not allowed"));
6074 goto failure;
6075 }
6076 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6077 /* addr_off_p */ 1,
6078 /* need_libopcodes_p */ 1,
6079 /* skip_p */ 0);
6080 break;
6081
6082 case AARCH64_OPND_ADDR_UIMM12:
6083 po_misc_or_fail (parse_address (&str, info));
6084 if (info->addr.pcrel || info->addr.offset.is_reg
6085 || !info->addr.preind || info->addr.writeback)
6086 {
6087 set_syntax_error (_("invalid addressing mode"));
6088 goto failure;
6089 }
6090 if (inst.reloc.type == BFD_RELOC_UNUSED)
6091 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6092 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6093 || (inst.reloc.type
6094 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6095 || (inst.reloc.type
6096 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
6097 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6098 /* Leave qualifier to be determined by libopcodes. */
6099 break;
6100
6101 case AARCH64_OPND_SIMD_ADDR_POST:
6102 /* [<Xn|SP>], <Xm|#<amount>> */
6103 po_misc_or_fail (parse_address (&str, info));
6104 if (!info->addr.postind || !info->addr.writeback)
6105 {
6106 set_syntax_error (_("invalid addressing mode"));
6107 goto failure;
6108 }
6109 if (!info->addr.offset.is_reg)
6110 {
6111 if (inst.reloc.exp.X_op == O_constant)
6112 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6113 else
6114 {
6115 set_fatal_syntax_error
6116 (_("writeback value must be an immediate constant"));
6117 goto failure;
6118 }
6119 }
6120 /* No qualifier. */
6121 break;
6122
6123 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6124 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6125 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6126 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6127 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6128 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6129 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6130 case AARCH64_OPND_SVE_ADDR_RI_U6:
6131 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6132 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6133 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6134 /* [X<n>{, #imm, MUL VL}]
6135 [X<n>{, #imm}]
6136 but recognizing SVE registers. */
6137 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6138 &offset_qualifier));
6139 if (base_qualifier != AARCH64_OPND_QLF_X)
6140 {
6141 set_syntax_error (_("invalid addressing mode"));
6142 goto failure;
6143 }
6144 sve_regimm:
6145 if (info->addr.pcrel || info->addr.offset.is_reg
6146 || !info->addr.preind || info->addr.writeback)
6147 {
6148 set_syntax_error (_("invalid addressing mode"));
6149 goto failure;
6150 }
6151 if (inst.reloc.type != BFD_RELOC_UNUSED
6152 || inst.reloc.exp.X_op != O_constant)
6153 {
6154 /* Make sure this has priority over
6155 "invalid addressing mode". */
6156 set_fatal_syntax_error (_("constant offset required"));
6157 goto failure;
6158 }
6159 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6160 break;
6161
6162 case AARCH64_OPND_SVE_ADDR_RR:
6163 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6164 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6165 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6166 case AARCH64_OPND_SVE_ADDR_RX:
6167 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6168 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6169 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6170 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6171 but recognizing SVE registers. */
6172 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6173 &offset_qualifier));
6174 if (base_qualifier != AARCH64_OPND_QLF_X
6175 || offset_qualifier != AARCH64_OPND_QLF_X)
6176 {
6177 set_syntax_error (_("invalid addressing mode"));
6178 goto failure;
6179 }
6180 goto regoff_addr;
6181
6182 case AARCH64_OPND_SVE_ADDR_RZ:
6183 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6184 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6185 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6186 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6187 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6188 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6189 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6190 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6191 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6192 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6193 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6194 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6195 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6196 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6197 &offset_qualifier));
6198 if (base_qualifier != AARCH64_OPND_QLF_X
6199 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6200 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6201 {
6202 set_syntax_error (_("invalid addressing mode"));
6203 goto failure;
6204 }
6205 info->qualifier = offset_qualifier;
6206 goto regoff_addr;
6207
6208 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6209 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6210 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6211 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6212 /* [Z<n>.<T>{, #imm}] */
6213 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6214 &offset_qualifier));
6215 if (base_qualifier != AARCH64_OPND_QLF_S_S
6216 && base_qualifier != AARCH64_OPND_QLF_S_D)
6217 {
6218 set_syntax_error (_("invalid addressing mode"));
6219 goto failure;
6220 }
6221 info->qualifier = base_qualifier;
6222 goto sve_regimm;
6223
6224 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6225 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6226 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6227 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6228 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6229
6230 We don't reject:
6231
6232 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6233
6234 here since we get better error messages by leaving it to
6235 the qualifier checking routines. */
6236 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6237 &offset_qualifier));
6238 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6239 && base_qualifier != AARCH64_OPND_QLF_S_D)
6240 || offset_qualifier != base_qualifier)
6241 {
6242 set_syntax_error (_("invalid addressing mode"));
6243 goto failure;
6244 }
6245 info->qualifier = base_qualifier;
6246 goto regoff_addr;
6247
6248 case AARCH64_OPND_SYSREG:
6249 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
6250 == PARSE_FAIL)
6251 {
6252 set_syntax_error (_("unknown or missing system register name"));
6253 goto failure;
6254 }
6255 inst.base.operands[i].sysreg = val;
6256 break;
6257
6258 case AARCH64_OPND_PSTATEFIELD:
6259 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
6260 == PARSE_FAIL)
6261 {
6262 set_syntax_error (_("unknown or missing PSTATE field name"));
6263 goto failure;
6264 }
6265 inst.base.operands[i].pstatefield = val;
6266 break;
6267
6268 case AARCH64_OPND_SYSREG_IC:
6269 inst.base.operands[i].sysins_op =
6270 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6271 goto sys_reg_ins;
6272 case AARCH64_OPND_SYSREG_DC:
6273 inst.base.operands[i].sysins_op =
6274 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6275 goto sys_reg_ins;
6276 case AARCH64_OPND_SYSREG_AT:
6277 inst.base.operands[i].sysins_op =
6278 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6279 goto sys_reg_ins;
6280 case AARCH64_OPND_SYSREG_TLBI:
6281 inst.base.operands[i].sysins_op =
6282 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6283 sys_reg_ins:
6284 if (inst.base.operands[i].sysins_op == NULL)
6285 {
6286 set_fatal_syntax_error ( _("unknown or missing operation name"));
6287 goto failure;
6288 }
6289 break;
6290
6291 case AARCH64_OPND_BARRIER:
6292 case AARCH64_OPND_BARRIER_ISB:
6293 val = parse_barrier (&str);
6294 if (val != PARSE_FAIL
6295 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6296 {
6297 /* ISB only accepts options name 'sy'. */
6298 set_syntax_error
6299 (_("the specified option is not accepted in ISB"));
6300 /* Turn off backtrack as this optional operand is present. */
6301 backtrack_pos = 0;
6302 goto failure;
6303 }
6304 /* This is an extension to accept a 0..15 immediate. */
6305 if (val == PARSE_FAIL)
6306 po_imm_or_fail (0, 15);
6307 info->barrier = aarch64_barrier_options + val;
6308 break;
6309
6310 case AARCH64_OPND_PRFOP:
6311 val = parse_pldop (&str);
6312 /* This is an extension to accept a 0..31 immediate. */
6313 if (val == PARSE_FAIL)
6314 po_imm_or_fail (0, 31);
6315 inst.base.operands[i].prfop = aarch64_prfops + val;
6316 break;
6317
6318 case AARCH64_OPND_BARRIER_PSB:
6319 val = parse_barrier_psb (&str, &(info->hint_option));
6320 if (val == PARSE_FAIL)
6321 goto failure;
6322 break;
6323
6324 default:
6325 as_fatal (_("unhandled operand code %d"), operands[i]);
6326 }
6327
6328 /* If we get here, this operand was successfully parsed. */
6329 inst.base.operands[i].present = 1;
6330 continue;
6331
6332 failure:
6333 /* The parse routine should already have set the error, but in case
6334 not, set a default one here. */
6335 if (! error_p ())
6336 set_default_error ();
6337
6338 if (! backtrack_pos)
6339 goto parse_operands_return;
6340
6341 {
6342 /* We reach here because this operand is marked as optional, and
6343 either no operand was supplied or the operand was supplied but it
6344 was syntactically incorrect. In the latter case we report an
6345 error. In the former case we perform a few more checks before
6346 dropping through to the code to insert the default operand. */
6347
6348 char *tmp = backtrack_pos;
6349 char endchar = END_OF_INSN;
6350
6351 if (i != (aarch64_num_of_operands (opcode) - 1))
6352 endchar = ',';
6353 skip_past_char (&tmp, ',');
6354
6355 if (*tmp != endchar)
6356 /* The user has supplied an operand in the wrong format. */
6357 goto parse_operands_return;
6358
6359 /* Make sure there is not a comma before the optional operand.
6360 For example the fifth operand of 'sys' is optional:
6361
6362 sys #0,c0,c0,#0, <--- wrong
6363 sys #0,c0,c0,#0 <--- correct. */
6364 if (comma_skipped_p && i && endchar == END_OF_INSN)
6365 {
6366 set_fatal_syntax_error
6367 (_("unexpected comma before the omitted optional operand"));
6368 goto parse_operands_return;
6369 }
6370 }
6371
6372 /* Reaching here means we are dealing with an optional operand that is
6373 omitted from the assembly line. */
6374 gas_assert (optional_operand_p (opcode, i));
6375 info->present = 0;
6376 process_omitted_operand (operands[i], opcode, i, info);
6377
6378 /* Try again, skipping the optional operand at backtrack_pos. */
6379 str = backtrack_pos;
6380 backtrack_pos = 0;
6381
6382 /* Clear any error record after the omitted optional operand has been
6383 successfully handled. */
6384 clear_error ();
6385 }
6386
6387 /* Check if we have parsed all the operands. */
6388 if (*str != '\0' && ! error_p ())
6389 {
6390 /* Set I to the index of the last present operand; this is
6391 for the purpose of diagnostics. */
6392 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6393 ;
6394 set_fatal_syntax_error
6395 (_("unexpected characters following instruction"));
6396 }
6397
6398 parse_operands_return:
6399
6400 if (error_p ())
6401 {
6402 DEBUG_TRACE ("parsing FAIL: %s - %s",
6403 operand_mismatch_kind_names[get_error_kind ()],
6404 get_error_message ());
6405 /* Record the operand error properly; this is useful when there
6406 are multiple instruction templates for a mnemonic name, so that
6407 later on, we can select the error that most closely describes
6408 the problem. */
6409 record_operand_error (opcode, i, get_error_kind (),
6410 get_error_message ());
6411 return FALSE;
6412 }
6413 else
6414 {
6415 DEBUG_TRACE ("parsing SUCCESS");
6416 return TRUE;
6417 }
6418 }
6419
6420 /* It does some fix-up to provide some programmer friendly feature while
6421 keeping the libopcodes happy, i.e. libopcodes only accepts
6422 the preferred architectural syntax.
6423 Return FALSE if there is any failure; otherwise return TRUE. */
6424
6425 static bfd_boolean
6426 programmer_friendly_fixup (aarch64_instruction *instr)
6427 {
6428 aarch64_inst *base = &instr->base;
6429 const aarch64_opcode *opcode = base->opcode;
6430 enum aarch64_op op = opcode->op;
6431 aarch64_opnd_info *operands = base->operands;
6432
6433 DEBUG_TRACE ("enter");
6434
6435 switch (opcode->iclass)
6436 {
6437 case testbranch:
6438 /* TBNZ Xn|Wn, #uimm6, label
6439 Test and Branch Not Zero: conditionally jumps to label if bit number
6440 uimm6 in register Xn is not zero. The bit number implies the width of
6441 the register, which may be written and should be disassembled as Wn if
6442 uimm is less than 32. */
6443 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6444 {
6445 if (operands[1].imm.value >= 32)
6446 {
6447 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6448 0, 31);
6449 return FALSE;
6450 }
6451 operands[0].qualifier = AARCH64_OPND_QLF_X;
6452 }
6453 break;
6454 case loadlit:
6455 /* LDR Wt, label | =value
6456 As a convenience assemblers will typically permit the notation
6457 "=value" in conjunction with the pc-relative literal load instructions
6458 to automatically place an immediate value or symbolic address in a
6459 nearby literal pool and generate a hidden label which references it.
6460 ISREG has been set to 0 in the case of =value. */
6461 if (instr->gen_lit_pool
6462 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6463 {
6464 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6465 if (op == OP_LDRSW_LIT)
6466 size = 4;
6467 if (instr->reloc.exp.X_op != O_constant
6468 && instr->reloc.exp.X_op != O_big
6469 && instr->reloc.exp.X_op != O_symbol)
6470 {
6471 record_operand_error (opcode, 1,
6472 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6473 _("constant expression expected"));
6474 return FALSE;
6475 }
6476 if (! add_to_lit_pool (&instr->reloc.exp, size))
6477 {
6478 record_operand_error (opcode, 1,
6479 AARCH64_OPDE_OTHER_ERROR,
6480 _("literal pool insertion failed"));
6481 return FALSE;
6482 }
6483 }
6484 break;
6485 case log_shift:
6486 case bitfield:
6487 /* UXT[BHW] Wd, Wn
6488 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6489 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6490 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6491 A programmer-friendly assembler should accept a destination Xd in
6492 place of Wd, however that is not the preferred form for disassembly.
6493 */
6494 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6495 && operands[1].qualifier == AARCH64_OPND_QLF_W
6496 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6497 operands[0].qualifier = AARCH64_OPND_QLF_W;
6498 break;
6499
6500 case addsub_ext:
6501 {
6502 /* In the 64-bit form, the final register operand is written as Wm
6503 for all but the (possibly omitted) UXTX/LSL and SXTX
6504 operators.
6505 As a programmer-friendly assembler, we accept e.g.
6506 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6507 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6508 int idx = aarch64_operand_index (opcode->operands,
6509 AARCH64_OPND_Rm_EXT);
6510 gas_assert (idx == 1 || idx == 2);
6511 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6512 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6513 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6514 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6515 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6516 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6517 }
6518 break;
6519
6520 default:
6521 break;
6522 }
6523
6524 DEBUG_TRACE ("exit with SUCCESS");
6525 return TRUE;
6526 }
6527
6528 /* Check for loads and stores that will cause unpredictable behavior. */
6529
6530 static void
6531 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6532 {
6533 aarch64_inst *base = &instr->base;
6534 const aarch64_opcode *opcode = base->opcode;
6535 const aarch64_opnd_info *opnds = base->operands;
6536 switch (opcode->iclass)
6537 {
6538 case ldst_pos:
6539 case ldst_imm9:
6540 case ldst_imm10:
6541 case ldst_unscaled:
6542 case ldst_unpriv:
6543 /* Loading/storing the base register is unpredictable if writeback. */
6544 if ((aarch64_get_operand_class (opnds[0].type)
6545 == AARCH64_OPND_CLASS_INT_REG)
6546 && opnds[0].reg.regno == opnds[1].addr.base_regno
6547 && opnds[1].addr.base_regno != REG_SP
6548 && opnds[1].addr.writeback)
6549 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6550 break;
6551 case ldstpair_off:
6552 case ldstnapair_offs:
6553 case ldstpair_indexed:
6554 /* Loading/storing the base register is unpredictable if writeback. */
6555 if ((aarch64_get_operand_class (opnds[0].type)
6556 == AARCH64_OPND_CLASS_INT_REG)
6557 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6558 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6559 && opnds[2].addr.base_regno != REG_SP
6560 && opnds[2].addr.writeback)
6561 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6562 /* Load operations must load different registers. */
6563 if ((opcode->opcode & (1 << 22))
6564 && opnds[0].reg.regno == opnds[1].reg.regno)
6565 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6566 break;
6567 default:
6568 break;
6569 }
6570 }
6571
6572 /* A wrapper function to interface with libopcodes on encoding and
6573 record the error message if there is any.
6574
6575 Return TRUE on success; otherwise return FALSE. */
6576
6577 static bfd_boolean
6578 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6579 aarch64_insn *code)
6580 {
6581 aarch64_operand_error error_info;
6582 error_info.kind = AARCH64_OPDE_NIL;
6583 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
6584 return TRUE;
6585 else
6586 {
6587 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6588 record_operand_error_info (opcode, &error_info);
6589 return FALSE;
6590 }
6591 }
6592
6593 #ifdef DEBUG_AARCH64
6594 static inline void
6595 dump_opcode_operands (const aarch64_opcode *opcode)
6596 {
6597 int i = 0;
6598 while (opcode->operands[i] != AARCH64_OPND_NIL)
6599 {
6600 aarch64_verbose ("\t\t opnd%d: %s", i,
6601 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6602 ? aarch64_get_operand_name (opcode->operands[i])
6603 : aarch64_get_operand_desc (opcode->operands[i]));
6604 ++i;
6605 }
6606 }
6607 #endif /* DEBUG_AARCH64 */
6608
6609 /* This is the guts of the machine-dependent assembler. STR points to a
6610 machine dependent instruction. This function is supposed to emit
6611 the frags/bytes it assembles to. */
6612
6613 void
6614 md_assemble (char *str)
6615 {
6616 char *p = str;
6617 templates *template;
6618 aarch64_opcode *opcode;
6619 aarch64_inst *inst_base;
6620 unsigned saved_cond;
6621
6622 /* Align the previous label if needed. */
6623 if (last_label_seen != NULL)
6624 {
6625 symbol_set_frag (last_label_seen, frag_now);
6626 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6627 S_SET_SEGMENT (last_label_seen, now_seg);
6628 }
6629
6630 inst.reloc.type = BFD_RELOC_UNUSED;
6631
6632 DEBUG_TRACE ("\n\n");
6633 DEBUG_TRACE ("==============================");
6634 DEBUG_TRACE ("Enter md_assemble with %s", str);
6635
6636 template = opcode_lookup (&p);
6637 if (!template)
6638 {
6639 /* It wasn't an instruction, but it might be a register alias of
6640 the form alias .req reg directive. */
6641 if (!create_register_alias (str, p))
6642 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6643 str);
6644 return;
6645 }
6646
6647 skip_whitespace (p);
6648 if (*p == ',')
6649 {
6650 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6651 get_mnemonic_name (str), str);
6652 return;
6653 }
6654
6655 init_operand_error_report ();
6656
6657 /* Sections are assumed to start aligned. In executable section, there is no
6658 MAP_DATA symbol pending. So we only align the address during
6659 MAP_DATA --> MAP_INSN transition.
6660 For other sections, this is not guaranteed. */
6661 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6662 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6663 frag_align_code (2, 0);
6664
6665 saved_cond = inst.cond;
6666 reset_aarch64_instruction (&inst);
6667 inst.cond = saved_cond;
6668
6669 /* Iterate through all opcode entries with the same mnemonic name. */
6670 do
6671 {
6672 opcode = template->opcode;
6673
6674 DEBUG_TRACE ("opcode %s found", opcode->name);
6675 #ifdef DEBUG_AARCH64
6676 if (debug_dump)
6677 dump_opcode_operands (opcode);
6678 #endif /* DEBUG_AARCH64 */
6679
6680 mapping_state (MAP_INSN);
6681
6682 inst_base = &inst.base;
6683 inst_base->opcode = opcode;
6684
6685 /* Truly conditionally executed instructions, e.g. b.cond. */
6686 if (opcode->flags & F_COND)
6687 {
6688 gas_assert (inst.cond != COND_ALWAYS);
6689 inst_base->cond = get_cond_from_value (inst.cond);
6690 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6691 }
6692 else if (inst.cond != COND_ALWAYS)
6693 {
6694 /* It shouldn't arrive here, where the assembly looks like a
6695 conditional instruction but the found opcode is unconditional. */
6696 gas_assert (0);
6697 continue;
6698 }
6699
6700 if (parse_operands (p, opcode)
6701 && programmer_friendly_fixup (&inst)
6702 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6703 {
6704 /* Check that this instruction is supported for this CPU. */
6705 if (!opcode->avariant
6706 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6707 {
6708 as_bad (_("selected processor does not support `%s'"), str);
6709 return;
6710 }
6711
6712 warn_unpredictable_ldst (&inst, str);
6713
6714 if (inst.reloc.type == BFD_RELOC_UNUSED
6715 || !inst.reloc.need_libopcodes_p)
6716 output_inst (NULL);
6717 else
6718 {
6719 /* If there is relocation generated for the instruction,
6720 store the instruction information for the future fix-up. */
6721 struct aarch64_inst *copy;
6722 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6723 copy = XNEW (struct aarch64_inst);
6724 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6725 output_inst (copy);
6726 }
6727 return;
6728 }
6729
6730 template = template->next;
6731 if (template != NULL)
6732 {
6733 reset_aarch64_instruction (&inst);
6734 inst.cond = saved_cond;
6735 }
6736 }
6737 while (template != NULL);
6738
6739 /* Issue the error messages if any. */
6740 output_operand_error_report (str);
6741 }
6742
6743 /* Various frobbings of labels and their addresses. */
6744
6745 void
6746 aarch64_start_line_hook (void)
6747 {
6748 last_label_seen = NULL;
6749 }
6750
6751 void
6752 aarch64_frob_label (symbolS * sym)
6753 {
6754 last_label_seen = sym;
6755
6756 dwarf2_emit_label (sym);
6757 }
6758
6759 int
6760 aarch64_data_in_code (void)
6761 {
6762 if (!strncmp (input_line_pointer + 1, "data:", 5))
6763 {
6764 *input_line_pointer = '/';
6765 input_line_pointer += 5;
6766 *input_line_pointer = 0;
6767 return 1;
6768 }
6769
6770 return 0;
6771 }
6772
6773 char *
6774 aarch64_canonicalize_symbol_name (char *name)
6775 {
6776 int len;
6777
6778 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6779 *(name + len - 5) = 0;
6780
6781 return name;
6782 }
6783 \f
6784 /* Table of all register names defined by default. The user can
6785 define additional names with .req. Note that all register names
6786 should appear in both upper and lowercase variants. Some registers
6787 also have mixed-case names. */
6788
6789 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6790 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
6791 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6792 #define REGSET16(p,t) \
6793 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6794 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6795 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6796 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
6797 #define REGSET31(p,t) \
6798 REGSET16(p, t), \
6799 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6800 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6801 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6802 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6803 #define REGSET(p,t) \
6804 REGSET31(p,t), REGNUM(p,31,t)
6805
6806 /* These go into aarch64_reg_hsh hash-table. */
6807 static const reg_entry reg_names[] = {
6808 /* Integer registers. */
6809 REGSET31 (x, R_64), REGSET31 (X, R_64),
6810 REGSET31 (w, R_32), REGSET31 (W, R_32),
6811
6812 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
6813 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
6814 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
6815 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
6816 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6817 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6818
6819 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6820 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6821
6822 /* Floating-point single precision registers. */
6823 REGSET (s, FP_S), REGSET (S, FP_S),
6824
6825 /* Floating-point double precision registers. */
6826 REGSET (d, FP_D), REGSET (D, FP_D),
6827
6828 /* Floating-point half precision registers. */
6829 REGSET (h, FP_H), REGSET (H, FP_H),
6830
6831 /* Floating-point byte precision registers. */
6832 REGSET (b, FP_B), REGSET (B, FP_B),
6833
6834 /* Floating-point quad precision registers. */
6835 REGSET (q, FP_Q), REGSET (Q, FP_Q),
6836
6837 /* FP/SIMD registers. */
6838 REGSET (v, VN), REGSET (V, VN),
6839
6840 /* SVE vector registers. */
6841 REGSET (z, ZN), REGSET (Z, ZN),
6842
6843 /* SVE predicate registers. */
6844 REGSET16 (p, PN), REGSET16 (P, PN)
6845 };
6846
6847 #undef REGDEF
6848 #undef REGDEF_ALIAS
6849 #undef REGNUM
6850 #undef REGSET16
6851 #undef REGSET31
6852 #undef REGSET
6853
6854 #define N 1
6855 #define n 0
6856 #define Z 1
6857 #define z 0
6858 #define C 1
6859 #define c 0
6860 #define V 1
6861 #define v 0
6862 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6863 static const asm_nzcv nzcv_names[] = {
6864 {"nzcv", B (n, z, c, v)},
6865 {"nzcV", B (n, z, c, V)},
6866 {"nzCv", B (n, z, C, v)},
6867 {"nzCV", B (n, z, C, V)},
6868 {"nZcv", B (n, Z, c, v)},
6869 {"nZcV", B (n, Z, c, V)},
6870 {"nZCv", B (n, Z, C, v)},
6871 {"nZCV", B (n, Z, C, V)},
6872 {"Nzcv", B (N, z, c, v)},
6873 {"NzcV", B (N, z, c, V)},
6874 {"NzCv", B (N, z, C, v)},
6875 {"NzCV", B (N, z, C, V)},
6876 {"NZcv", B (N, Z, c, v)},
6877 {"NZcV", B (N, Z, c, V)},
6878 {"NZCv", B (N, Z, C, v)},
6879 {"NZCV", B (N, Z, C, V)}
6880 };
6881
6882 #undef N
6883 #undef n
6884 #undef Z
6885 #undef z
6886 #undef C
6887 #undef c
6888 #undef V
6889 #undef v
6890 #undef B
6891 \f
6892 /* MD interface: bits in the object file. */
6893
6894 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6895 for use in the a.out file, and stores them in the array pointed to by buf.
6896 This knows about the endian-ness of the target machine and does
6897 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
6898 2 (short) and 4 (long) Floating numbers are put out as a series of
6899 LITTLENUMS (shorts, here at least). */
6900
6901 void
6902 md_number_to_chars (char *buf, valueT val, int n)
6903 {
6904 if (target_big_endian)
6905 number_to_chars_bigendian (buf, val, n);
6906 else
6907 number_to_chars_littleendian (buf, val, n);
6908 }
6909
6910 /* MD interface: Sections. */
6911
6912 /* Estimate the size of a frag before relaxing. Assume everything fits in
6913 4 bytes. */
6914
6915 int
6916 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6917 {
6918 fragp->fr_var = 4;
6919 return 4;
6920 }
6921
6922 /* Round up a section size to the appropriate boundary. */
6923
6924 valueT
6925 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6926 {
6927 return size;
6928 }
6929
6930 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
6931 of an rs_align_code fragment.
6932
6933 Here we fill the frag with the appropriate info for padding the
6934 output stream. The resulting frag will consist of a fixed (fr_fix)
6935 and of a repeating (fr_var) part.
6936
6937 The fixed content is always emitted before the repeating content and
6938 these two parts are used as follows in constructing the output:
6939 - the fixed part will be used to align to a valid instruction word
6940 boundary, in case that we start at a misaligned address; as no
6941 executable instruction can live at the misaligned location, we
6942 simply fill with zeros;
6943 - the variable part will be used to cover the remaining padding and
6944 we fill using the AArch64 NOP instruction.
6945
6946 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6947 enough storage space for up to 3 bytes for padding the back to a valid
6948 instruction alignment and exactly 4 bytes to store the NOP pattern. */
6949
6950 void
6951 aarch64_handle_align (fragS * fragP)
6952 {
6953 /* NOP = d503201f */
6954 /* AArch64 instructions are always little-endian. */
6955 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6956
6957 int bytes, fix, noop_size;
6958 char *p;
6959
6960 if (fragP->fr_type != rs_align_code)
6961 return;
6962
6963 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6964 p = fragP->fr_literal + fragP->fr_fix;
6965
6966 #ifdef OBJ_ELF
6967 gas_assert (fragP->tc_frag_data.recorded);
6968 #endif
6969
6970 noop_size = sizeof (aarch64_noop);
6971
6972 fix = bytes & (noop_size - 1);
6973 if (fix)
6974 {
6975 #ifdef OBJ_ELF
6976 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6977 #endif
6978 memset (p, 0, fix);
6979 p += fix;
6980 fragP->fr_fix += fix;
6981 }
6982
6983 if (noop_size)
6984 memcpy (p, aarch64_noop, noop_size);
6985 fragP->fr_var = noop_size;
6986 }
6987
6988 /* Perform target specific initialisation of a frag.
6989 Note - despite the name this initialisation is not done when the frag
6990 is created, but only when its type is assigned. A frag can be created
6991 and used a long time before its type is set, so beware of assuming that
6992 this initialisation is performed first. */
6993
6994 #ifndef OBJ_ELF
6995 void
6996 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6997 int max_chars ATTRIBUTE_UNUSED)
6998 {
6999 }
7000
7001 #else /* OBJ_ELF is defined. */
7002 void
7003 aarch64_init_frag (fragS * fragP, int max_chars)
7004 {
7005 /* Record a mapping symbol for alignment frags. We will delete this
7006 later if the alignment ends up empty. */
7007 if (!fragP->tc_frag_data.recorded)
7008 fragP->tc_frag_data.recorded = 1;
7009
7010 /* PR 21809: Do not set a mapping state for debug sections
7011 - it just confuses other tools. */
7012 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
7013 return;
7014
7015 switch (fragP->fr_type)
7016 {
7017 case rs_align_test:
7018 case rs_fill:
7019 mapping_state_2 (MAP_DATA, max_chars);
7020 break;
7021 case rs_align:
7022 /* PR 20364: We can get alignment frags in code sections,
7023 so do not just assume that we should use the MAP_DATA state. */
7024 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7025 break;
7026 case rs_align_code:
7027 mapping_state_2 (MAP_INSN, max_chars);
7028 break;
7029 default:
7030 break;
7031 }
7032 }
7033 \f
7034 /* Initialize the DWARF-2 unwind information for this procedure. */
7035
7036 void
7037 tc_aarch64_frame_initial_instructions (void)
7038 {
7039 cfi_add_CFA_def_cfa (REG_SP, 0);
7040 }
7041 #endif /* OBJ_ELF */
7042
7043 /* Convert REGNAME to a DWARF-2 register number. */
7044
7045 int
7046 tc_aarch64_regname_to_dw2regnum (char *regname)
7047 {
7048 const reg_entry *reg = parse_reg (&regname);
7049 if (reg == NULL)
7050 return -1;
7051
7052 switch (reg->type)
7053 {
7054 case REG_TYPE_SP_32:
7055 case REG_TYPE_SP_64:
7056 case REG_TYPE_R_32:
7057 case REG_TYPE_R_64:
7058 return reg->number;
7059
7060 case REG_TYPE_FP_B:
7061 case REG_TYPE_FP_H:
7062 case REG_TYPE_FP_S:
7063 case REG_TYPE_FP_D:
7064 case REG_TYPE_FP_Q:
7065 return reg->number + 64;
7066
7067 default:
7068 break;
7069 }
7070 return -1;
7071 }
7072
7073 /* Implement DWARF2_ADDR_SIZE. */
7074
7075 int
7076 aarch64_dwarf2_addr_size (void)
7077 {
7078 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7079 if (ilp32_p)
7080 return 4;
7081 #endif
7082 return bfd_arch_bits_per_address (stdoutput) / 8;
7083 }
7084
7085 /* MD interface: Symbol and relocation handling. */
7086
7087 /* Return the address within the segment that a PC-relative fixup is
7088 relative to. For AArch64 PC-relative fixups applied to instructions
7089 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7090
7091 long
7092 md_pcrel_from_section (fixS * fixP, segT seg)
7093 {
7094 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7095
7096 /* If this is pc-relative and we are going to emit a relocation
7097 then we just want to put out any pipeline compensation that the linker
7098 will need. Otherwise we want to use the calculated base. */
7099 if (fixP->fx_pcrel
7100 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7101 || aarch64_force_relocation (fixP)))
7102 base = 0;
7103
7104 /* AArch64 should be consistent for all pc-relative relocations. */
7105 return base + AARCH64_PCREL_OFFSET;
7106 }
7107
7108 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7109 Otherwise we have no need to default values of symbols. */
7110
7111 symbolS *
7112 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7113 {
7114 #ifdef OBJ_ELF
7115 if (name[0] == '_' && name[1] == 'G'
7116 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7117 {
7118 if (!GOT_symbol)
7119 {
7120 if (symbol_find (name))
7121 as_bad (_("GOT already in the symbol table"));
7122
7123 GOT_symbol = symbol_new (name, undefined_section,
7124 (valueT) 0, &zero_address_frag);
7125 }
7126
7127 return GOT_symbol;
7128 }
7129 #endif
7130
7131 return 0;
7132 }
7133
7134 /* Return non-zero if the indicated VALUE has overflowed the maximum
7135 range expressible by a unsigned number with the indicated number of
7136 BITS. */
7137
7138 static bfd_boolean
7139 unsigned_overflow (valueT value, unsigned bits)
7140 {
7141 valueT lim;
7142 if (bits >= sizeof (valueT) * 8)
7143 return FALSE;
7144 lim = (valueT) 1 << bits;
7145 return (value >= lim);
7146 }
7147
7148
7149 /* Return non-zero if the indicated VALUE has overflowed the maximum
7150 range expressible by an signed number with the indicated number of
7151 BITS. */
7152
7153 static bfd_boolean
7154 signed_overflow (offsetT value, unsigned bits)
7155 {
7156 offsetT lim;
7157 if (bits >= sizeof (offsetT) * 8)
7158 return FALSE;
7159 lim = (offsetT) 1 << (bits - 1);
7160 return (value < -lim || value >= lim);
7161 }
7162
7163 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7164 unsigned immediate offset load/store instruction, try to encode it as
7165 an unscaled, 9-bit, signed immediate offset load/store instruction.
7166 Return TRUE if it is successful; otherwise return FALSE.
7167
7168 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7169 in response to the standard LDR/STR mnemonics when the immediate offset is
7170 unambiguous, i.e. when it is negative or unaligned. */
7171
7172 static bfd_boolean
7173 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7174 {
7175 int idx;
7176 enum aarch64_op new_op;
7177 const aarch64_opcode *new_opcode;
7178
7179 gas_assert (instr->opcode->iclass == ldst_pos);
7180
7181 switch (instr->opcode->op)
7182 {
7183 case OP_LDRB_POS:new_op = OP_LDURB; break;
7184 case OP_STRB_POS: new_op = OP_STURB; break;
7185 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7186 case OP_LDRH_POS: new_op = OP_LDURH; break;
7187 case OP_STRH_POS: new_op = OP_STURH; break;
7188 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7189 case OP_LDR_POS: new_op = OP_LDUR; break;
7190 case OP_STR_POS: new_op = OP_STUR; break;
7191 case OP_LDRF_POS: new_op = OP_LDURV; break;
7192 case OP_STRF_POS: new_op = OP_STURV; break;
7193 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7194 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7195 default: new_op = OP_NIL; break;
7196 }
7197
7198 if (new_op == OP_NIL)
7199 return FALSE;
7200
7201 new_opcode = aarch64_get_opcode (new_op);
7202 gas_assert (new_opcode != NULL);
7203
7204 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7205 instr->opcode->op, new_opcode->op);
7206
7207 aarch64_replace_opcode (instr, new_opcode);
7208
7209 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7210 qualifier matching may fail because the out-of-date qualifier will
7211 prevent the operand being updated with a new and correct qualifier. */
7212 idx = aarch64_operand_index (instr->opcode->operands,
7213 AARCH64_OPND_ADDR_SIMM9);
7214 gas_assert (idx == 1);
7215 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7216
7217 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7218
7219 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
7220 return FALSE;
7221
7222 return TRUE;
7223 }
7224
7225 /* Called by fix_insn to fix a MOV immediate alias instruction.
7226
7227 Operand for a generic move immediate instruction, which is an alias
7228 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7229 a 32-bit/64-bit immediate value into general register. An assembler error
7230 shall result if the immediate cannot be created by a single one of these
7231 instructions. If there is a choice, then to ensure reversability an
7232 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7233
7234 static void
7235 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7236 {
7237 const aarch64_opcode *opcode;
7238
7239 /* Need to check if the destination is SP/ZR. The check has to be done
7240 before any aarch64_replace_opcode. */
7241 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7242 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7243
7244 instr->operands[1].imm.value = value;
7245 instr->operands[1].skip = 0;
7246
7247 if (try_mov_wide_p)
7248 {
7249 /* Try the MOVZ alias. */
7250 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7251 aarch64_replace_opcode (instr, opcode);
7252 if (aarch64_opcode_encode (instr->opcode, instr,
7253 &instr->value, NULL, NULL))
7254 {
7255 put_aarch64_insn (buf, instr->value);
7256 return;
7257 }
7258 /* Try the MOVK alias. */
7259 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7260 aarch64_replace_opcode (instr, opcode);
7261 if (aarch64_opcode_encode (instr->opcode, instr,
7262 &instr->value, NULL, NULL))
7263 {
7264 put_aarch64_insn (buf, instr->value);
7265 return;
7266 }
7267 }
7268
7269 if (try_mov_bitmask_p)
7270 {
7271 /* Try the ORR alias. */
7272 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7273 aarch64_replace_opcode (instr, opcode);
7274 if (aarch64_opcode_encode (instr->opcode, instr,
7275 &instr->value, NULL, NULL))
7276 {
7277 put_aarch64_insn (buf, instr->value);
7278 return;
7279 }
7280 }
7281
7282 as_bad_where (fixP->fx_file, fixP->fx_line,
7283 _("immediate cannot be moved by a single instruction"));
7284 }
7285
7286 /* An instruction operand which is immediate related may have symbol used
7287 in the assembly, e.g.
7288
7289 mov w0, u32
7290 .set u32, 0x00ffff00
7291
7292 At the time when the assembly instruction is parsed, a referenced symbol,
7293 like 'u32' in the above example may not have been seen; a fixS is created
7294 in such a case and is handled here after symbols have been resolved.
7295 Instruction is fixed up with VALUE using the information in *FIXP plus
7296 extra information in FLAGS.
7297
7298 This function is called by md_apply_fix to fix up instructions that need
7299 a fix-up described above but does not involve any linker-time relocation. */
7300
7301 static void
7302 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7303 {
7304 int idx;
7305 uint32_t insn;
7306 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7307 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7308 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7309
7310 if (new_inst)
7311 {
7312 /* Now the instruction is about to be fixed-up, so the operand that
7313 was previously marked as 'ignored' needs to be unmarked in order
7314 to get the encoding done properly. */
7315 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7316 new_inst->operands[idx].skip = 0;
7317 }
7318
7319 gas_assert (opnd != AARCH64_OPND_NIL);
7320
7321 switch (opnd)
7322 {
7323 case AARCH64_OPND_EXCEPTION:
7324 if (unsigned_overflow (value, 16))
7325 as_bad_where (fixP->fx_file, fixP->fx_line,
7326 _("immediate out of range"));
7327 insn = get_aarch64_insn (buf);
7328 insn |= encode_svc_imm (value);
7329 put_aarch64_insn (buf, insn);
7330 break;
7331
7332 case AARCH64_OPND_AIMM:
7333 /* ADD or SUB with immediate.
7334 NOTE this assumes we come here with a add/sub shifted reg encoding
7335 3 322|2222|2 2 2 21111 111111
7336 1 098|7654|3 2 1 09876 543210 98765 43210
7337 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7338 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7339 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7340 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7341 ->
7342 3 322|2222|2 2 221111111111
7343 1 098|7654|3 2 109876543210 98765 43210
7344 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7345 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7346 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7347 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7348 Fields sf Rn Rd are already set. */
7349 insn = get_aarch64_insn (buf);
7350 if (value < 0)
7351 {
7352 /* Add <-> sub. */
7353 insn = reencode_addsub_switch_add_sub (insn);
7354 value = -value;
7355 }
7356
7357 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7358 && unsigned_overflow (value, 12))
7359 {
7360 /* Try to shift the value by 12 to make it fit. */
7361 if (((value >> 12) << 12) == value
7362 && ! unsigned_overflow (value, 12 + 12))
7363 {
7364 value >>= 12;
7365 insn |= encode_addsub_imm_shift_amount (1);
7366 }
7367 }
7368
7369 if (unsigned_overflow (value, 12))
7370 as_bad_where (fixP->fx_file, fixP->fx_line,
7371 _("immediate out of range"));
7372
7373 insn |= encode_addsub_imm (value);
7374
7375 put_aarch64_insn (buf, insn);
7376 break;
7377
7378 case AARCH64_OPND_SIMD_IMM:
7379 case AARCH64_OPND_SIMD_IMM_SFT:
7380 case AARCH64_OPND_LIMM:
7381 /* Bit mask immediate. */
7382 gas_assert (new_inst != NULL);
7383 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7384 new_inst->operands[idx].imm.value = value;
7385 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7386 &new_inst->value, NULL, NULL))
7387 put_aarch64_insn (buf, new_inst->value);
7388 else
7389 as_bad_where (fixP->fx_file, fixP->fx_line,
7390 _("invalid immediate"));
7391 break;
7392
7393 case AARCH64_OPND_HALF:
7394 /* 16-bit unsigned immediate. */
7395 if (unsigned_overflow (value, 16))
7396 as_bad_where (fixP->fx_file, fixP->fx_line,
7397 _("immediate out of range"));
7398 insn = get_aarch64_insn (buf);
7399 insn |= encode_movw_imm (value & 0xffff);
7400 put_aarch64_insn (buf, insn);
7401 break;
7402
7403 case AARCH64_OPND_IMM_MOV:
7404 /* Operand for a generic move immediate instruction, which is
7405 an alias instruction that generates a single MOVZ, MOVN or ORR
7406 instruction to loads a 32-bit/64-bit immediate value into general
7407 register. An assembler error shall result if the immediate cannot be
7408 created by a single one of these instructions. If there is a choice,
7409 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7410 and MOVZ or MOVN to ORR. */
7411 gas_assert (new_inst != NULL);
7412 fix_mov_imm_insn (fixP, buf, new_inst, value);
7413 break;
7414
7415 case AARCH64_OPND_ADDR_SIMM7:
7416 case AARCH64_OPND_ADDR_SIMM9:
7417 case AARCH64_OPND_ADDR_SIMM9_2:
7418 case AARCH64_OPND_ADDR_SIMM10:
7419 case AARCH64_OPND_ADDR_UIMM12:
7420 /* Immediate offset in an address. */
7421 insn = get_aarch64_insn (buf);
7422
7423 gas_assert (new_inst != NULL && new_inst->value == insn);
7424 gas_assert (new_inst->opcode->operands[1] == opnd
7425 || new_inst->opcode->operands[2] == opnd);
7426
7427 /* Get the index of the address operand. */
7428 if (new_inst->opcode->operands[1] == opnd)
7429 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7430 idx = 1;
7431 else
7432 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7433 idx = 2;
7434
7435 /* Update the resolved offset value. */
7436 new_inst->operands[idx].addr.offset.imm = value;
7437
7438 /* Encode/fix-up. */
7439 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7440 &new_inst->value, NULL, NULL))
7441 {
7442 put_aarch64_insn (buf, new_inst->value);
7443 break;
7444 }
7445 else if (new_inst->opcode->iclass == ldst_pos
7446 && try_to_encode_as_unscaled_ldst (new_inst))
7447 {
7448 put_aarch64_insn (buf, new_inst->value);
7449 break;
7450 }
7451
7452 as_bad_where (fixP->fx_file, fixP->fx_line,
7453 _("immediate offset out of range"));
7454 break;
7455
7456 default:
7457 gas_assert (0);
7458 as_fatal (_("unhandled operand code %d"), opnd);
7459 }
7460 }
7461
7462 /* Apply a fixup (fixP) to segment data, once it has been determined
7463 by our caller that we have all the info we need to fix it up.
7464
7465 Parameter valP is the pointer to the value of the bits. */
7466
7467 void
7468 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7469 {
7470 offsetT value = *valP;
7471 uint32_t insn;
7472 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7473 int scale;
7474 unsigned flags = fixP->fx_addnumber;
7475
7476 DEBUG_TRACE ("\n\n");
7477 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7478 DEBUG_TRACE ("Enter md_apply_fix");
7479
7480 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7481
7482 /* Note whether this will delete the relocation. */
7483
7484 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7485 fixP->fx_done = 1;
7486
7487 /* Process the relocations. */
7488 switch (fixP->fx_r_type)
7489 {
7490 case BFD_RELOC_NONE:
7491 /* This will need to go in the object file. */
7492 fixP->fx_done = 0;
7493 break;
7494
7495 case BFD_RELOC_8:
7496 case BFD_RELOC_8_PCREL:
7497 if (fixP->fx_done || !seg->use_rela_p)
7498 md_number_to_chars (buf, value, 1);
7499 break;
7500
7501 case BFD_RELOC_16:
7502 case BFD_RELOC_16_PCREL:
7503 if (fixP->fx_done || !seg->use_rela_p)
7504 md_number_to_chars (buf, value, 2);
7505 break;
7506
7507 case BFD_RELOC_32:
7508 case BFD_RELOC_32_PCREL:
7509 if (fixP->fx_done || !seg->use_rela_p)
7510 md_number_to_chars (buf, value, 4);
7511 break;
7512
7513 case BFD_RELOC_64:
7514 case BFD_RELOC_64_PCREL:
7515 if (fixP->fx_done || !seg->use_rela_p)
7516 md_number_to_chars (buf, value, 8);
7517 break;
7518
7519 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7520 /* We claim that these fixups have been processed here, even if
7521 in fact we generate an error because we do not have a reloc
7522 for them, so tc_gen_reloc() will reject them. */
7523 fixP->fx_done = 1;
7524 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7525 {
7526 as_bad_where (fixP->fx_file, fixP->fx_line,
7527 _("undefined symbol %s used as an immediate value"),
7528 S_GET_NAME (fixP->fx_addsy));
7529 goto apply_fix_return;
7530 }
7531 fix_insn (fixP, flags, value);
7532 break;
7533
7534 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7535 if (fixP->fx_done || !seg->use_rela_p)
7536 {
7537 if (value & 3)
7538 as_bad_where (fixP->fx_file, fixP->fx_line,
7539 _("pc-relative load offset not word aligned"));
7540 if (signed_overflow (value, 21))
7541 as_bad_where (fixP->fx_file, fixP->fx_line,
7542 _("pc-relative load offset out of range"));
7543 insn = get_aarch64_insn (buf);
7544 insn |= encode_ld_lit_ofs_19 (value >> 2);
7545 put_aarch64_insn (buf, insn);
7546 }
7547 break;
7548
7549 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7550 if (fixP->fx_done || !seg->use_rela_p)
7551 {
7552 if (signed_overflow (value, 21))
7553 as_bad_where (fixP->fx_file, fixP->fx_line,
7554 _("pc-relative address offset out of range"));
7555 insn = get_aarch64_insn (buf);
7556 insn |= encode_adr_imm (value);
7557 put_aarch64_insn (buf, insn);
7558 }
7559 break;
7560
7561 case BFD_RELOC_AARCH64_BRANCH19:
7562 if (fixP->fx_done || !seg->use_rela_p)
7563 {
7564 if (value & 3)
7565 as_bad_where (fixP->fx_file, fixP->fx_line,
7566 _("conditional branch target not word aligned"));
7567 if (signed_overflow (value, 21))
7568 as_bad_where (fixP->fx_file, fixP->fx_line,
7569 _("conditional branch out of range"));
7570 insn = get_aarch64_insn (buf);
7571 insn |= encode_cond_branch_ofs_19 (value >> 2);
7572 put_aarch64_insn (buf, insn);
7573 }
7574 break;
7575
7576 case BFD_RELOC_AARCH64_TSTBR14:
7577 if (fixP->fx_done || !seg->use_rela_p)
7578 {
7579 if (value & 3)
7580 as_bad_where (fixP->fx_file, fixP->fx_line,
7581 _("conditional branch target not word aligned"));
7582 if (signed_overflow (value, 16))
7583 as_bad_where (fixP->fx_file, fixP->fx_line,
7584 _("conditional branch out of range"));
7585 insn = get_aarch64_insn (buf);
7586 insn |= encode_tst_branch_ofs_14 (value >> 2);
7587 put_aarch64_insn (buf, insn);
7588 }
7589 break;
7590
7591 case BFD_RELOC_AARCH64_CALL26:
7592 case BFD_RELOC_AARCH64_JUMP26:
7593 if (fixP->fx_done || !seg->use_rela_p)
7594 {
7595 if (value & 3)
7596 as_bad_where (fixP->fx_file, fixP->fx_line,
7597 _("branch target not word aligned"));
7598 if (signed_overflow (value, 28))
7599 as_bad_where (fixP->fx_file, fixP->fx_line,
7600 _("branch out of range"));
7601 insn = get_aarch64_insn (buf);
7602 insn |= encode_branch_ofs_26 (value >> 2);
7603 put_aarch64_insn (buf, insn);
7604 }
7605 break;
7606
7607 case BFD_RELOC_AARCH64_MOVW_G0:
7608 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7609 case BFD_RELOC_AARCH64_MOVW_G0_S:
7610 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7611 scale = 0;
7612 goto movw_common;
7613 case BFD_RELOC_AARCH64_MOVW_G1:
7614 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7615 case BFD_RELOC_AARCH64_MOVW_G1_S:
7616 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7617 scale = 16;
7618 goto movw_common;
7619 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7620 scale = 0;
7621 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7622 /* Should always be exported to object file, see
7623 aarch64_force_relocation(). */
7624 gas_assert (!fixP->fx_done);
7625 gas_assert (seg->use_rela_p);
7626 goto movw_common;
7627 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7628 scale = 16;
7629 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7630 /* Should always be exported to object file, see
7631 aarch64_force_relocation(). */
7632 gas_assert (!fixP->fx_done);
7633 gas_assert (seg->use_rela_p);
7634 goto movw_common;
7635 case BFD_RELOC_AARCH64_MOVW_G2:
7636 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7637 case BFD_RELOC_AARCH64_MOVW_G2_S:
7638 scale = 32;
7639 goto movw_common;
7640 case BFD_RELOC_AARCH64_MOVW_G3:
7641 scale = 48;
7642 movw_common:
7643 if (fixP->fx_done || !seg->use_rela_p)
7644 {
7645 insn = get_aarch64_insn (buf);
7646
7647 if (!fixP->fx_done)
7648 {
7649 /* REL signed addend must fit in 16 bits */
7650 if (signed_overflow (value, 16))
7651 as_bad_where (fixP->fx_file, fixP->fx_line,
7652 _("offset out of range"));
7653 }
7654 else
7655 {
7656 /* Check for overflow and scale. */
7657 switch (fixP->fx_r_type)
7658 {
7659 case BFD_RELOC_AARCH64_MOVW_G0:
7660 case BFD_RELOC_AARCH64_MOVW_G1:
7661 case BFD_RELOC_AARCH64_MOVW_G2:
7662 case BFD_RELOC_AARCH64_MOVW_G3:
7663 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7664 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7665 if (unsigned_overflow (value, scale + 16))
7666 as_bad_where (fixP->fx_file, fixP->fx_line,
7667 _("unsigned value out of range"));
7668 break;
7669 case BFD_RELOC_AARCH64_MOVW_G0_S:
7670 case BFD_RELOC_AARCH64_MOVW_G1_S:
7671 case BFD_RELOC_AARCH64_MOVW_G2_S:
7672 /* NOTE: We can only come here with movz or movn. */
7673 if (signed_overflow (value, scale + 16))
7674 as_bad_where (fixP->fx_file, fixP->fx_line,
7675 _("signed value out of range"));
7676 if (value < 0)
7677 {
7678 /* Force use of MOVN. */
7679 value = ~value;
7680 insn = reencode_movzn_to_movn (insn);
7681 }
7682 else
7683 {
7684 /* Force use of MOVZ. */
7685 insn = reencode_movzn_to_movz (insn);
7686 }
7687 break;
7688 default:
7689 /* Unchecked relocations. */
7690 break;
7691 }
7692 value >>= scale;
7693 }
7694
7695 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7696 insn |= encode_movw_imm (value & 0xffff);
7697
7698 put_aarch64_insn (buf, insn);
7699 }
7700 break;
7701
7702 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7703 fixP->fx_r_type = (ilp32_p
7704 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7705 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7706 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7707 /* Should always be exported to object file, see
7708 aarch64_force_relocation(). */
7709 gas_assert (!fixP->fx_done);
7710 gas_assert (seg->use_rela_p);
7711 break;
7712
7713 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7714 fixP->fx_r_type = (ilp32_p
7715 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7716 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
7717 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7718 /* Should always be exported to object file, see
7719 aarch64_force_relocation(). */
7720 gas_assert (!fixP->fx_done);
7721 gas_assert (seg->use_rela_p);
7722 break;
7723
7724 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
7725 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7726 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7727 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7728 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
7729 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7730 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7731 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7732 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7733 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7734 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7735 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7736 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7737 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7738 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7739 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7740 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7741 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7742 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7743 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7744 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7745 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7746 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7747 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7748 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7749 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7750 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7751 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7752 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7753 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7754 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7755 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7756 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7757 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7758 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7759 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7760 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7761 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7762 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7763 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7764 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7765 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7766 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7767 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7768 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7769 /* Should always be exported to object file, see
7770 aarch64_force_relocation(). */
7771 gas_assert (!fixP->fx_done);
7772 gas_assert (seg->use_rela_p);
7773 break;
7774
7775 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7776 /* Should always be exported to object file, see
7777 aarch64_force_relocation(). */
7778 fixP->fx_r_type = (ilp32_p
7779 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7780 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7781 gas_assert (!fixP->fx_done);
7782 gas_assert (seg->use_rela_p);
7783 break;
7784
7785 case BFD_RELOC_AARCH64_ADD_LO12:
7786 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7787 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7788 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7789 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7790 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7791 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7792 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7793 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7794 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7795 case BFD_RELOC_AARCH64_LDST128_LO12:
7796 case BFD_RELOC_AARCH64_LDST16_LO12:
7797 case BFD_RELOC_AARCH64_LDST32_LO12:
7798 case BFD_RELOC_AARCH64_LDST64_LO12:
7799 case BFD_RELOC_AARCH64_LDST8_LO12:
7800 /* Should always be exported to object file, see
7801 aarch64_force_relocation(). */
7802 gas_assert (!fixP->fx_done);
7803 gas_assert (seg->use_rela_p);
7804 break;
7805
7806 case BFD_RELOC_AARCH64_TLSDESC_ADD:
7807 case BFD_RELOC_AARCH64_TLSDESC_CALL:
7808 case BFD_RELOC_AARCH64_TLSDESC_LDR:
7809 break;
7810
7811 case BFD_RELOC_UNUSED:
7812 /* An error will already have been reported. */
7813 break;
7814
7815 default:
7816 as_bad_where (fixP->fx_file, fixP->fx_line,
7817 _("unexpected %s fixup"),
7818 bfd_get_reloc_code_name (fixP->fx_r_type));
7819 break;
7820 }
7821
7822 apply_fix_return:
7823 /* Free the allocated the struct aarch64_inst.
7824 N.B. currently there are very limited number of fix-up types actually use
7825 this field, so the impact on the performance should be minimal . */
7826 if (fixP->tc_fix_data.inst != NULL)
7827 free (fixP->tc_fix_data.inst);
7828
7829 return;
7830 }
7831
7832 /* Translate internal representation of relocation info to BFD target
7833 format. */
7834
7835 arelent *
7836 tc_gen_reloc (asection * section, fixS * fixp)
7837 {
7838 arelent *reloc;
7839 bfd_reloc_code_real_type code;
7840
7841 reloc = XNEW (arelent);
7842
7843 reloc->sym_ptr_ptr = XNEW (asymbol *);
7844 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7845 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7846
7847 if (fixp->fx_pcrel)
7848 {
7849 if (section->use_rela_p)
7850 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7851 else
7852 fixp->fx_offset = reloc->address;
7853 }
7854 reloc->addend = fixp->fx_offset;
7855
7856 code = fixp->fx_r_type;
7857 switch (code)
7858 {
7859 case BFD_RELOC_16:
7860 if (fixp->fx_pcrel)
7861 code = BFD_RELOC_16_PCREL;
7862 break;
7863
7864 case BFD_RELOC_32:
7865 if (fixp->fx_pcrel)
7866 code = BFD_RELOC_32_PCREL;
7867 break;
7868
7869 case BFD_RELOC_64:
7870 if (fixp->fx_pcrel)
7871 code = BFD_RELOC_64_PCREL;
7872 break;
7873
7874 default:
7875 break;
7876 }
7877
7878 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7879 if (reloc->howto == NULL)
7880 {
7881 as_bad_where (fixp->fx_file, fixp->fx_line,
7882 _
7883 ("cannot represent %s relocation in this object file format"),
7884 bfd_get_reloc_code_name (code));
7885 return NULL;
7886 }
7887
7888 return reloc;
7889 }
7890
7891 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
7892
7893 void
7894 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7895 {
7896 bfd_reloc_code_real_type type;
7897 int pcrel = 0;
7898
7899 /* Pick a reloc.
7900 FIXME: @@ Should look at CPU word size. */
7901 switch (size)
7902 {
7903 case 1:
7904 type = BFD_RELOC_8;
7905 break;
7906 case 2:
7907 type = BFD_RELOC_16;
7908 break;
7909 case 4:
7910 type = BFD_RELOC_32;
7911 break;
7912 case 8:
7913 type = BFD_RELOC_64;
7914 break;
7915 default:
7916 as_bad (_("cannot do %u-byte relocation"), size);
7917 type = BFD_RELOC_UNUSED;
7918 break;
7919 }
7920
7921 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7922 }
7923
7924 int
7925 aarch64_force_relocation (struct fix *fixp)
7926 {
7927 switch (fixp->fx_r_type)
7928 {
7929 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7930 /* Perform these "immediate" internal relocations
7931 even if the symbol is extern or weak. */
7932 return 0;
7933
7934 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7935 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7936 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7937 /* Pseudo relocs that need to be fixed up according to
7938 ilp32_p. */
7939 return 0;
7940
7941 case BFD_RELOC_AARCH64_ADD_LO12:
7942 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7943 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7944 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7945 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7946 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7947 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7948 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7949 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7950 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7951 case BFD_RELOC_AARCH64_LDST128_LO12:
7952 case BFD_RELOC_AARCH64_LDST16_LO12:
7953 case BFD_RELOC_AARCH64_LDST32_LO12:
7954 case BFD_RELOC_AARCH64_LDST64_LO12:
7955 case BFD_RELOC_AARCH64_LDST8_LO12:
7956 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
7957 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7958 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7959 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7960 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
7961 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7962 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7963 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7964 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7965 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7966 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7967 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7968 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7969 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7970 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7971 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7972 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7973 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7974 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7975 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7976 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7977 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7978 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7979 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7980 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7981 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7982 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7983 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7984 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7985 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7986 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7987 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7988 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7989 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7990 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7991 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7992 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7993 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7994 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7995 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7996 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7997 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7998 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7999 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8000 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8001 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8002 /* Always leave these relocations for the linker. */
8003 return 1;
8004
8005 default:
8006 break;
8007 }
8008
8009 return generic_force_reloc (fixp);
8010 }
8011
8012 #ifdef OBJ_ELF
8013
8014 /* Implement md_after_parse_args. This is the earliest time we need to decide
8015 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8016
8017 void
8018 aarch64_after_parse_args (void)
8019 {
8020 if (aarch64_abi != AARCH64_ABI_NONE)
8021 return;
8022
8023 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8024 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8025 aarch64_abi = AARCH64_ABI_ILP32;
8026 else
8027 aarch64_abi = AARCH64_ABI_LP64;
8028 }
8029
8030 const char *
8031 elf64_aarch64_target_format (void)
8032 {
8033 if (strcmp (TARGET_OS, "cloudabi") == 0)
8034 {
8035 /* FIXME: What to do for ilp32_p ? */
8036 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
8037 }
8038 if (target_big_endian)
8039 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8040 else
8041 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8042 }
8043
8044 void
8045 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8046 {
8047 elf_frob_symbol (symp, puntp);
8048 }
8049 #endif
8050
8051 /* MD interface: Finalization. */
8052
8053 /* A good place to do this, although this was probably not intended
8054 for this kind of use. We need to dump the literal pool before
8055 references are made to a null symbol pointer. */
8056
8057 void
8058 aarch64_cleanup (void)
8059 {
8060 literal_pool *pool;
8061
8062 for (pool = list_of_pools; pool; pool = pool->next)
8063 {
8064 /* Put it at the end of the relevant section. */
8065 subseg_set (pool->section, pool->sub_section);
8066 s_ltorg (0);
8067 }
8068 }
8069
8070 #ifdef OBJ_ELF
8071 /* Remove any excess mapping symbols generated for alignment frags in
8072 SEC. We may have created a mapping symbol before a zero byte
8073 alignment; remove it if there's a mapping symbol after the
8074 alignment. */
8075 static void
8076 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8077 void *dummy ATTRIBUTE_UNUSED)
8078 {
8079 segment_info_type *seginfo = seg_info (sec);
8080 fragS *fragp;
8081
8082 if (seginfo == NULL || seginfo->frchainP == NULL)
8083 return;
8084
8085 for (fragp = seginfo->frchainP->frch_root;
8086 fragp != NULL; fragp = fragp->fr_next)
8087 {
8088 symbolS *sym = fragp->tc_frag_data.last_map;
8089 fragS *next = fragp->fr_next;
8090
8091 /* Variable-sized frags have been converted to fixed size by
8092 this point. But if this was variable-sized to start with,
8093 there will be a fixed-size frag after it. So don't handle
8094 next == NULL. */
8095 if (sym == NULL || next == NULL)
8096 continue;
8097
8098 if (S_GET_VALUE (sym) < next->fr_address)
8099 /* Not at the end of this frag. */
8100 continue;
8101 know (S_GET_VALUE (sym) == next->fr_address);
8102
8103 do
8104 {
8105 if (next->tc_frag_data.first_map != NULL)
8106 {
8107 /* Next frag starts with a mapping symbol. Discard this
8108 one. */
8109 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8110 break;
8111 }
8112
8113 if (next->fr_next == NULL)
8114 {
8115 /* This mapping symbol is at the end of the section. Discard
8116 it. */
8117 know (next->fr_fix == 0 && next->fr_var == 0);
8118 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8119 break;
8120 }
8121
8122 /* As long as we have empty frags without any mapping symbols,
8123 keep looking. */
8124 /* If the next frag is non-empty and does not start with a
8125 mapping symbol, then this mapping symbol is required. */
8126 if (next->fr_address != next->fr_next->fr_address)
8127 break;
8128
8129 next = next->fr_next;
8130 }
8131 while (next != NULL);
8132 }
8133 }
8134 #endif
8135
8136 /* Adjust the symbol table. */
8137
8138 void
8139 aarch64_adjust_symtab (void)
8140 {
8141 #ifdef OBJ_ELF
8142 /* Remove any overlapping mapping symbols generated by alignment frags. */
8143 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8144 /* Now do generic ELF adjustments. */
8145 elf_adjust_symtab ();
8146 #endif
8147 }
8148
8149 static void
8150 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8151 {
8152 const char *hash_err;
8153
8154 hash_err = hash_insert (table, key, value);
8155 if (hash_err)
8156 printf ("Internal Error: Can't hash %s\n", key);
8157 }
8158
8159 static void
8160 fill_instruction_hash_table (void)
8161 {
8162 aarch64_opcode *opcode = aarch64_opcode_table;
8163
8164 while (opcode->name != NULL)
8165 {
8166 templates *templ, *new_templ;
8167 templ = hash_find (aarch64_ops_hsh, opcode->name);
8168
8169 new_templ = XNEW (templates);
8170 new_templ->opcode = opcode;
8171 new_templ->next = NULL;
8172
8173 if (!templ)
8174 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8175 else
8176 {
8177 new_templ->next = templ->next;
8178 templ->next = new_templ;
8179 }
8180 ++opcode;
8181 }
8182 }
8183
8184 static inline void
8185 convert_to_upper (char *dst, const char *src, size_t num)
8186 {
8187 unsigned int i;
8188 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8189 *dst = TOUPPER (*src);
8190 *dst = '\0';
8191 }
8192
8193 /* Assume STR point to a lower-case string, allocate, convert and return
8194 the corresponding upper-case string. */
8195 static inline const char*
8196 get_upper_str (const char *str)
8197 {
8198 char *ret;
8199 size_t len = strlen (str);
8200 ret = XNEWVEC (char, len + 1);
8201 convert_to_upper (ret, str, len);
8202 return ret;
8203 }
8204
8205 /* MD interface: Initialization. */
8206
8207 void
8208 md_begin (void)
8209 {
8210 unsigned mach;
8211 unsigned int i;
8212
8213 if ((aarch64_ops_hsh = hash_new ()) == NULL
8214 || (aarch64_cond_hsh = hash_new ()) == NULL
8215 || (aarch64_shift_hsh = hash_new ()) == NULL
8216 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8217 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8218 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8219 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8220 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8221 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8222 || (aarch64_reg_hsh = hash_new ()) == NULL
8223 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8224 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8225 || (aarch64_pldop_hsh = hash_new ()) == NULL
8226 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8227 as_fatal (_("virtual memory exhausted"));
8228
8229 fill_instruction_hash_table ();
8230
8231 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8232 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8233 (void *) (aarch64_sys_regs + i));
8234
8235 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8236 checked_hash_insert (aarch64_pstatefield_hsh,
8237 aarch64_pstatefields[i].name,
8238 (void *) (aarch64_pstatefields + i));
8239
8240 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8241 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8242 aarch64_sys_regs_ic[i].name,
8243 (void *) (aarch64_sys_regs_ic + i));
8244
8245 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8246 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8247 aarch64_sys_regs_dc[i].name,
8248 (void *) (aarch64_sys_regs_dc + i));
8249
8250 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8251 checked_hash_insert (aarch64_sys_regs_at_hsh,
8252 aarch64_sys_regs_at[i].name,
8253 (void *) (aarch64_sys_regs_at + i));
8254
8255 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8256 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8257 aarch64_sys_regs_tlbi[i].name,
8258 (void *) (aarch64_sys_regs_tlbi + i));
8259
8260 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8261 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8262 (void *) (reg_names + i));
8263
8264 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8265 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8266 (void *) (nzcv_names + i));
8267
8268 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8269 {
8270 const char *name = aarch64_operand_modifiers[i].name;
8271 checked_hash_insert (aarch64_shift_hsh, name,
8272 (void *) (aarch64_operand_modifiers + i));
8273 /* Also hash the name in the upper case. */
8274 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8275 (void *) (aarch64_operand_modifiers + i));
8276 }
8277
8278 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8279 {
8280 unsigned int j;
8281 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8282 the same condition code. */
8283 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8284 {
8285 const char *name = aarch64_conds[i].names[j];
8286 if (name == NULL)
8287 break;
8288 checked_hash_insert (aarch64_cond_hsh, name,
8289 (void *) (aarch64_conds + i));
8290 /* Also hash the name in the upper case. */
8291 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8292 (void *) (aarch64_conds + i));
8293 }
8294 }
8295
8296 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8297 {
8298 const char *name = aarch64_barrier_options[i].name;
8299 /* Skip xx00 - the unallocated values of option. */
8300 if ((i & 0x3) == 0)
8301 continue;
8302 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8303 (void *) (aarch64_barrier_options + i));
8304 /* Also hash the name in the upper case. */
8305 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8306 (void *) (aarch64_barrier_options + i));
8307 }
8308
8309 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8310 {
8311 const char* name = aarch64_prfops[i].name;
8312 /* Skip the unallocated hint encodings. */
8313 if (name == NULL)
8314 continue;
8315 checked_hash_insert (aarch64_pldop_hsh, name,
8316 (void *) (aarch64_prfops + i));
8317 /* Also hash the name in the upper case. */
8318 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8319 (void *) (aarch64_prfops + i));
8320 }
8321
8322 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8323 {
8324 const char* name = aarch64_hint_options[i].name;
8325
8326 checked_hash_insert (aarch64_hint_opt_hsh, name,
8327 (void *) (aarch64_hint_options + i));
8328 /* Also hash the name in the upper case. */
8329 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8330 (void *) (aarch64_hint_options + i));
8331 }
8332
8333 /* Set the cpu variant based on the command-line options. */
8334 if (!mcpu_cpu_opt)
8335 mcpu_cpu_opt = march_cpu_opt;
8336
8337 if (!mcpu_cpu_opt)
8338 mcpu_cpu_opt = &cpu_default;
8339
8340 cpu_variant = *mcpu_cpu_opt;
8341
8342 /* Record the CPU type. */
8343 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8344
8345 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8346 }
8347
8348 /* Command line processing. */
8349
8350 const char *md_shortopts = "m:";
8351
8352 #ifdef AARCH64_BI_ENDIAN
8353 #define OPTION_EB (OPTION_MD_BASE + 0)
8354 #define OPTION_EL (OPTION_MD_BASE + 1)
8355 #else
8356 #if TARGET_BYTES_BIG_ENDIAN
8357 #define OPTION_EB (OPTION_MD_BASE + 0)
8358 #else
8359 #define OPTION_EL (OPTION_MD_BASE + 1)
8360 #endif
8361 #endif
8362
8363 struct option md_longopts[] = {
8364 #ifdef OPTION_EB
8365 {"EB", no_argument, NULL, OPTION_EB},
8366 #endif
8367 #ifdef OPTION_EL
8368 {"EL", no_argument, NULL, OPTION_EL},
8369 #endif
8370 {NULL, no_argument, NULL, 0}
8371 };
8372
8373 size_t md_longopts_size = sizeof (md_longopts);
8374
8375 struct aarch64_option_table
8376 {
8377 const char *option; /* Option name to match. */
8378 const char *help; /* Help information. */
8379 int *var; /* Variable to change. */
8380 int value; /* What to change it to. */
8381 char *deprecated; /* If non-null, print this message. */
8382 };
8383
8384 static struct aarch64_option_table aarch64_opts[] = {
8385 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8386 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8387 NULL},
8388 #ifdef DEBUG_AARCH64
8389 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8390 #endif /* DEBUG_AARCH64 */
8391 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8392 NULL},
8393 {"mno-verbose-error", N_("do not output verbose error messages"),
8394 &verbose_error_p, 0, NULL},
8395 {NULL, NULL, NULL, 0, NULL}
8396 };
8397
8398 struct aarch64_cpu_option_table
8399 {
8400 const char *name;
8401 const aarch64_feature_set value;
8402 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8403 case. */
8404 const char *canonical_name;
8405 };
8406
8407 /* This list should, at a minimum, contain all the cpu names
8408 recognized by GCC. */
8409 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8410 {"all", AARCH64_ANY, NULL},
8411 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8412 AARCH64_FEATURE_CRC), "Cortex-A35"},
8413 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8414 AARCH64_FEATURE_CRC), "Cortex-A53"},
8415 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8416 AARCH64_FEATURE_CRC), "Cortex-A57"},
8417 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8418 AARCH64_FEATURE_CRC), "Cortex-A72"},
8419 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8420 AARCH64_FEATURE_CRC), "Cortex-A73"},
8421 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8422 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8423 "Cortex-A55"},
8424 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8425 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8426 "Cortex-A75"},
8427 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8428 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8429 "Samsung Exynos M1"},
8430 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8431 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8432 | AARCH64_FEATURE_RDMA),
8433 "Qualcomm Falkor"},
8434 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8435 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8436 | AARCH64_FEATURE_RDMA),
8437 "Qualcomm QDF24XX"},
8438 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_3,
8439 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8440 "Qualcomm Saphira"},
8441 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8442 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8443 "Cavium ThunderX"},
8444 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8445 AARCH64_FEATURE_CRYPTO),
8446 "Broadcom Vulcan"},
8447 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8448 in earlier releases and is superseded by 'xgene1' in all
8449 tools. */
8450 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8451 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8452 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8453 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8454 {"generic", AARCH64_ARCH_V8, NULL},
8455
8456 {NULL, AARCH64_ARCH_NONE, NULL}
8457 };
8458
8459 struct aarch64_arch_option_table
8460 {
8461 const char *name;
8462 const aarch64_feature_set value;
8463 };
8464
8465 /* This list should, at a minimum, contain all the architecture names
8466 recognized by GCC. */
8467 static const struct aarch64_arch_option_table aarch64_archs[] = {
8468 {"all", AARCH64_ANY},
8469 {"armv8-a", AARCH64_ARCH_V8},
8470 {"armv8.1-a", AARCH64_ARCH_V8_1},
8471 {"armv8.2-a", AARCH64_ARCH_V8_2},
8472 {"armv8.3-a", AARCH64_ARCH_V8_3},
8473 {"armv8.4-a", AARCH64_ARCH_V8_4},
8474 {NULL, AARCH64_ARCH_NONE}
8475 };
8476
8477 /* ISA extensions. */
8478 struct aarch64_option_cpu_value_table
8479 {
8480 const char *name;
8481 const aarch64_feature_set value;
8482 const aarch64_feature_set require; /* Feature dependencies. */
8483 };
8484
8485 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8486 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8487 AARCH64_ARCH_NONE},
8488 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO
8489 | AARCH64_FEATURE_AES
8490 | AARCH64_FEATURE_SHA2, 0),
8491 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8492 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8493 AARCH64_ARCH_NONE},
8494 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8495 AARCH64_ARCH_NONE},
8496 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8497 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8498 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8499 AARCH64_ARCH_NONE},
8500 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8501 AARCH64_ARCH_NONE},
8502 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8503 AARCH64_ARCH_NONE},
8504 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8505 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8506 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8507 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8508 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
8509 AARCH64_FEATURE (AARCH64_FEATURE_FP
8510 | AARCH64_FEATURE_F16, 0)},
8511 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8512 AARCH64_ARCH_NONE},
8513 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
8514 AARCH64_FEATURE (AARCH64_FEATURE_F16
8515 | AARCH64_FEATURE_SIMD
8516 | AARCH64_FEATURE_COMPNUM, 0)},
8517 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
8518 AARCH64_FEATURE (AARCH64_FEATURE_F16
8519 | AARCH64_FEATURE_SIMD, 0)},
8520 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
8521 AARCH64_ARCH_NONE},
8522 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
8523 AARCH64_ARCH_NONE},
8524 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
8525 AARCH64_ARCH_NONE},
8526 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
8527 AARCH64_ARCH_NONE},
8528 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
8529 AARCH64_ARCH_NONE},
8530 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA2
8531 | AARCH64_FEATURE_SHA3, 0),
8532 AARCH64_ARCH_NONE},
8533 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8534 };
8535
8536 struct aarch64_long_option_table
8537 {
8538 const char *option; /* Substring to match. */
8539 const char *help; /* Help information. */
8540 int (*func) (const char *subopt); /* Function to decode sub-option. */
8541 char *deprecated; /* If non-null, print this message. */
8542 };
8543
8544 /* Transitive closure of features depending on set. */
8545 static aarch64_feature_set
8546 aarch64_feature_disable_set (aarch64_feature_set set)
8547 {
8548 const struct aarch64_option_cpu_value_table *opt;
8549 aarch64_feature_set prev = 0;
8550
8551 while (prev != set) {
8552 prev = set;
8553 for (opt = aarch64_features; opt->name != NULL; opt++)
8554 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
8555 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
8556 }
8557 return set;
8558 }
8559
8560 /* Transitive closure of dependencies of set. */
8561 static aarch64_feature_set
8562 aarch64_feature_enable_set (aarch64_feature_set set)
8563 {
8564 const struct aarch64_option_cpu_value_table *opt;
8565 aarch64_feature_set prev = 0;
8566
8567 while (prev != set) {
8568 prev = set;
8569 for (opt = aarch64_features; opt->name != NULL; opt++)
8570 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8571 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8572 }
8573 return set;
8574 }
8575
8576 static int
8577 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8578 bfd_boolean ext_only)
8579 {
8580 /* We insist on extensions being added before being removed. We achieve
8581 this by using the ADDING_VALUE variable to indicate whether we are
8582 adding an extension (1) or removing it (0) and only allowing it to
8583 change in the order -1 -> 1 -> 0. */
8584 int adding_value = -1;
8585 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8586
8587 /* Copy the feature set, so that we can modify it. */
8588 *ext_set = **opt_p;
8589 *opt_p = ext_set;
8590
8591 while (str != NULL && *str != 0)
8592 {
8593 const struct aarch64_option_cpu_value_table *opt;
8594 const char *ext = NULL;
8595 int optlen;
8596
8597 if (!ext_only)
8598 {
8599 if (*str != '+')
8600 {
8601 as_bad (_("invalid architectural extension"));
8602 return 0;
8603 }
8604
8605 ext = strchr (++str, '+');
8606 }
8607
8608 if (ext != NULL)
8609 optlen = ext - str;
8610 else
8611 optlen = strlen (str);
8612
8613 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
8614 {
8615 if (adding_value != 0)
8616 adding_value = 0;
8617 optlen -= 2;
8618 str += 2;
8619 }
8620 else if (optlen > 0)
8621 {
8622 if (adding_value == -1)
8623 adding_value = 1;
8624 else if (adding_value != 1)
8625 {
8626 as_bad (_("must specify extensions to add before specifying "
8627 "those to remove"));
8628 return FALSE;
8629 }
8630 }
8631
8632 if (optlen == 0)
8633 {
8634 as_bad (_("missing architectural extension"));
8635 return 0;
8636 }
8637
8638 gas_assert (adding_value != -1);
8639
8640 for (opt = aarch64_features; opt->name != NULL; opt++)
8641 if (strncmp (opt->name, str, optlen) == 0)
8642 {
8643 aarch64_feature_set set;
8644
8645 /* Add or remove the extension. */
8646 if (adding_value)
8647 {
8648 set = aarch64_feature_enable_set (opt->value);
8649 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
8650 }
8651 else
8652 {
8653 set = aarch64_feature_disable_set (opt->value);
8654 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
8655 }
8656 break;
8657 }
8658
8659 if (opt->name == NULL)
8660 {
8661 as_bad (_("unknown architectural extension `%s'"), str);
8662 return 0;
8663 }
8664
8665 str = ext;
8666 };
8667
8668 return 1;
8669 }
8670
8671 static int
8672 aarch64_parse_cpu (const char *str)
8673 {
8674 const struct aarch64_cpu_option_table *opt;
8675 const char *ext = strchr (str, '+');
8676 size_t optlen;
8677
8678 if (ext != NULL)
8679 optlen = ext - str;
8680 else
8681 optlen = strlen (str);
8682
8683 if (optlen == 0)
8684 {
8685 as_bad (_("missing cpu name `%s'"), str);
8686 return 0;
8687 }
8688
8689 for (opt = aarch64_cpus; opt->name != NULL; opt++)
8690 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8691 {
8692 mcpu_cpu_opt = &opt->value;
8693 if (ext != NULL)
8694 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
8695
8696 return 1;
8697 }
8698
8699 as_bad (_("unknown cpu `%s'"), str);
8700 return 0;
8701 }
8702
8703 static int
8704 aarch64_parse_arch (const char *str)
8705 {
8706 const struct aarch64_arch_option_table *opt;
8707 const char *ext = strchr (str, '+');
8708 size_t optlen;
8709
8710 if (ext != NULL)
8711 optlen = ext - str;
8712 else
8713 optlen = strlen (str);
8714
8715 if (optlen == 0)
8716 {
8717 as_bad (_("missing architecture name `%s'"), str);
8718 return 0;
8719 }
8720
8721 for (opt = aarch64_archs; opt->name != NULL; opt++)
8722 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8723 {
8724 march_cpu_opt = &opt->value;
8725 if (ext != NULL)
8726 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
8727
8728 return 1;
8729 }
8730
8731 as_bad (_("unknown architecture `%s'\n"), str);
8732 return 0;
8733 }
8734
8735 /* ABIs. */
8736 struct aarch64_option_abi_value_table
8737 {
8738 const char *name;
8739 enum aarch64_abi_type value;
8740 };
8741
8742 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
8743 {"ilp32", AARCH64_ABI_ILP32},
8744 {"lp64", AARCH64_ABI_LP64},
8745 };
8746
8747 static int
8748 aarch64_parse_abi (const char *str)
8749 {
8750 unsigned int i;
8751
8752 if (str[0] == '\0')
8753 {
8754 as_bad (_("missing abi name `%s'"), str);
8755 return 0;
8756 }
8757
8758 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
8759 if (strcmp (str, aarch64_abis[i].name) == 0)
8760 {
8761 aarch64_abi = aarch64_abis[i].value;
8762 return 1;
8763 }
8764
8765 as_bad (_("unknown abi `%s'\n"), str);
8766 return 0;
8767 }
8768
8769 static struct aarch64_long_option_table aarch64_long_opts[] = {
8770 #ifdef OBJ_ELF
8771 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
8772 aarch64_parse_abi, NULL},
8773 #endif /* OBJ_ELF */
8774 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
8775 aarch64_parse_cpu, NULL},
8776 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
8777 aarch64_parse_arch, NULL},
8778 {NULL, NULL, 0, NULL}
8779 };
8780
8781 int
8782 md_parse_option (int c, const char *arg)
8783 {
8784 struct aarch64_option_table *opt;
8785 struct aarch64_long_option_table *lopt;
8786
8787 switch (c)
8788 {
8789 #ifdef OPTION_EB
8790 case OPTION_EB:
8791 target_big_endian = 1;
8792 break;
8793 #endif
8794
8795 #ifdef OPTION_EL
8796 case OPTION_EL:
8797 target_big_endian = 0;
8798 break;
8799 #endif
8800
8801 case 'a':
8802 /* Listing option. Just ignore these, we don't support additional
8803 ones. */
8804 return 0;
8805
8806 default:
8807 for (opt = aarch64_opts; opt->option != NULL; opt++)
8808 {
8809 if (c == opt->option[0]
8810 && ((arg == NULL && opt->option[1] == 0)
8811 || streq (arg, opt->option + 1)))
8812 {
8813 /* If the option is deprecated, tell the user. */
8814 if (opt->deprecated != NULL)
8815 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
8816 arg ? arg : "", _(opt->deprecated));
8817
8818 if (opt->var != NULL)
8819 *opt->var = opt->value;
8820
8821 return 1;
8822 }
8823 }
8824
8825 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8826 {
8827 /* These options are expected to have an argument. */
8828 if (c == lopt->option[0]
8829 && arg != NULL
8830 && strncmp (arg, lopt->option + 1,
8831 strlen (lopt->option + 1)) == 0)
8832 {
8833 /* If the option is deprecated, tell the user. */
8834 if (lopt->deprecated != NULL)
8835 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
8836 _(lopt->deprecated));
8837
8838 /* Call the sup-option parser. */
8839 return lopt->func (arg + strlen (lopt->option) - 1);
8840 }
8841 }
8842
8843 return 0;
8844 }
8845
8846 return 1;
8847 }
8848
8849 void
8850 md_show_usage (FILE * fp)
8851 {
8852 struct aarch64_option_table *opt;
8853 struct aarch64_long_option_table *lopt;
8854
8855 fprintf (fp, _(" AArch64-specific assembler options:\n"));
8856
8857 for (opt = aarch64_opts; opt->option != NULL; opt++)
8858 if (opt->help != NULL)
8859 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
8860
8861 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8862 if (lopt->help != NULL)
8863 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
8864
8865 #ifdef OPTION_EB
8866 fprintf (fp, _("\
8867 -EB assemble code for a big-endian cpu\n"));
8868 #endif
8869
8870 #ifdef OPTION_EL
8871 fprintf (fp, _("\
8872 -EL assemble code for a little-endian cpu\n"));
8873 #endif
8874 }
8875
8876 /* Parse a .cpu directive. */
8877
8878 static void
8879 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
8880 {
8881 const struct aarch64_cpu_option_table *opt;
8882 char saved_char;
8883 char *name;
8884 char *ext;
8885 size_t optlen;
8886
8887 name = input_line_pointer;
8888 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8889 input_line_pointer++;
8890 saved_char = *input_line_pointer;
8891 *input_line_pointer = 0;
8892
8893 ext = strchr (name, '+');
8894
8895 if (ext != NULL)
8896 optlen = ext - name;
8897 else
8898 optlen = strlen (name);
8899
8900 /* Skip the first "all" entry. */
8901 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
8902 if (strlen (opt->name) == optlen
8903 && strncmp (name, opt->name, optlen) == 0)
8904 {
8905 mcpu_cpu_opt = &opt->value;
8906 if (ext != NULL)
8907 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8908 return;
8909
8910 cpu_variant = *mcpu_cpu_opt;
8911
8912 *input_line_pointer = saved_char;
8913 demand_empty_rest_of_line ();
8914 return;
8915 }
8916 as_bad (_("unknown cpu `%s'"), name);
8917 *input_line_pointer = saved_char;
8918 ignore_rest_of_line ();
8919 }
8920
8921
8922 /* Parse a .arch directive. */
8923
8924 static void
8925 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
8926 {
8927 const struct aarch64_arch_option_table *opt;
8928 char saved_char;
8929 char *name;
8930 char *ext;
8931 size_t optlen;
8932
8933 name = input_line_pointer;
8934 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8935 input_line_pointer++;
8936 saved_char = *input_line_pointer;
8937 *input_line_pointer = 0;
8938
8939 ext = strchr (name, '+');
8940
8941 if (ext != NULL)
8942 optlen = ext - name;
8943 else
8944 optlen = strlen (name);
8945
8946 /* Skip the first "all" entry. */
8947 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8948 if (strlen (opt->name) == optlen
8949 && strncmp (name, opt->name, optlen) == 0)
8950 {
8951 mcpu_cpu_opt = &opt->value;
8952 if (ext != NULL)
8953 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8954 return;
8955
8956 cpu_variant = *mcpu_cpu_opt;
8957
8958 *input_line_pointer = saved_char;
8959 demand_empty_rest_of_line ();
8960 return;
8961 }
8962
8963 as_bad (_("unknown architecture `%s'\n"), name);
8964 *input_line_pointer = saved_char;
8965 ignore_rest_of_line ();
8966 }
8967
8968 /* Parse a .arch_extension directive. */
8969
8970 static void
8971 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8972 {
8973 char saved_char;
8974 char *ext = input_line_pointer;;
8975
8976 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8977 input_line_pointer++;
8978 saved_char = *input_line_pointer;
8979 *input_line_pointer = 0;
8980
8981 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8982 return;
8983
8984 cpu_variant = *mcpu_cpu_opt;
8985
8986 *input_line_pointer = saved_char;
8987 demand_empty_rest_of_line ();
8988 }
8989
8990 /* Copy symbol information. */
8991
8992 void
8993 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8994 {
8995 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8996 }