]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
[Patch AArch64] Warn on unpredictable stlxrb , stlxrh and stlxr cases.
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 #ifdef OBJ_ELF
59 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
60 static symbolS *GOT_symbol;
61
62 /* Which ABI to use. */
63 enum aarch64_abi_type
64 {
65 AARCH64_ABI_NONE = 0,
66 AARCH64_ABI_LP64 = 1,
67 AARCH64_ABI_ILP32 = 2
68 };
69
70 #ifndef DEFAULT_ARCH
71 #define DEFAULT_ARCH "aarch64"
72 #endif
73
74 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
75 static const char *default_arch = DEFAULT_ARCH;
76
77 /* AArch64 ABI for the output file. */
78 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
79
80 /* When non-zero, program to a 32-bit model, in which the C data types
81 int, long and all pointer types are 32-bit objects (ILP32); or to a
82 64-bit model, in which the C int type is 32-bits but the C long type
83 and all pointer types are 64-bit objects (LP64). */
84 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
85 #endif
86
87 enum vector_el_type
88 {
89 NT_invtype = -1,
90 NT_b,
91 NT_h,
92 NT_s,
93 NT_d,
94 NT_q,
95 NT_zero,
96 NT_merge
97 };
98
99 /* Bits for DEFINED field in vector_type_el. */
100 #define NTA_HASTYPE 1
101 #define NTA_HASINDEX 2
102 #define NTA_HASVARWIDTH 4
103
104 struct vector_type_el
105 {
106 enum vector_el_type type;
107 unsigned char defined;
108 unsigned width;
109 int64_t index;
110 };
111
112 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
113
114 struct reloc
115 {
116 bfd_reloc_code_real_type type;
117 expressionS exp;
118 int pc_rel;
119 enum aarch64_opnd opnd;
120 uint32_t flags;
121 unsigned need_libopcodes_p : 1;
122 };
123
124 struct aarch64_instruction
125 {
126 /* libopcodes structure for instruction intermediate representation. */
127 aarch64_inst base;
128 /* Record assembly errors found during the parsing. */
129 struct
130 {
131 enum aarch64_operand_error_kind kind;
132 const char *error;
133 } parsing_error;
134 /* The condition that appears in the assembly line. */
135 int cond;
136 /* Relocation information (including the GAS internal fixup). */
137 struct reloc reloc;
138 /* Need to generate an immediate in the literal pool. */
139 unsigned gen_lit_pool : 1;
140 };
141
142 typedef struct aarch64_instruction aarch64_instruction;
143
144 static aarch64_instruction inst;
145
146 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
147 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
148
149 /* Diagnostics inline function utilities.
150
151 These are lightweight utilities which should only be called by parse_operands
152 and other parsers. GAS processes each assembly line by parsing it against
153 instruction template(s), in the case of multiple templates (for the same
154 mnemonic name), those templates are tried one by one until one succeeds or
155 all fail. An assembly line may fail a few templates before being
156 successfully parsed; an error saved here in most cases is not a user error
157 but an error indicating the current template is not the right template.
158 Therefore it is very important that errors can be saved at a low cost during
159 the parsing; we don't want to slow down the whole parsing by recording
160 non-user errors in detail.
161
162 Remember that the objective is to help GAS pick up the most appropriate
163 error message in the case of multiple templates, e.g. FMOV which has 8
164 templates. */
165
166 static inline void
167 clear_error (void)
168 {
169 inst.parsing_error.kind = AARCH64_OPDE_NIL;
170 inst.parsing_error.error = NULL;
171 }
172
173 static inline bfd_boolean
174 error_p (void)
175 {
176 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
177 }
178
179 static inline const char *
180 get_error_message (void)
181 {
182 return inst.parsing_error.error;
183 }
184
185 static inline enum aarch64_operand_error_kind
186 get_error_kind (void)
187 {
188 return inst.parsing_error.kind;
189 }
190
191 static inline void
192 set_error (enum aarch64_operand_error_kind kind, const char *error)
193 {
194 inst.parsing_error.kind = kind;
195 inst.parsing_error.error = error;
196 }
197
198 static inline void
199 set_recoverable_error (const char *error)
200 {
201 set_error (AARCH64_OPDE_RECOVERABLE, error);
202 }
203
204 /* Use the DESC field of the corresponding aarch64_operand entry to compose
205 the error message. */
206 static inline void
207 set_default_error (void)
208 {
209 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
210 }
211
212 static inline void
213 set_syntax_error (const char *error)
214 {
215 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
216 }
217
218 static inline void
219 set_first_syntax_error (const char *error)
220 {
221 if (! error_p ())
222 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
223 }
224
225 static inline void
226 set_fatal_syntax_error (const char *error)
227 {
228 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
229 }
230 \f
231 /* Number of littlenums required to hold an extended precision number. */
232 #define MAX_LITTLENUMS 6
233
234 /* Return value for certain parsers when the parsing fails; those parsers
235 return the information of the parsed result, e.g. register number, on
236 success. */
237 #define PARSE_FAIL -1
238
239 /* This is an invalid condition code that means no conditional field is
240 present. */
241 #define COND_ALWAYS 0x10
242
243 typedef struct
244 {
245 const char *template;
246 unsigned long value;
247 } asm_barrier_opt;
248
249 typedef struct
250 {
251 const char *template;
252 uint32_t value;
253 } asm_nzcv;
254
255 struct reloc_entry
256 {
257 char *name;
258 bfd_reloc_code_real_type reloc;
259 };
260
261 /* Macros to define the register types and masks for the purpose
262 of parsing. */
263
264 #undef AARCH64_REG_TYPES
265 #define AARCH64_REG_TYPES \
266 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
267 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
268 BASIC_REG_TYPE(SP_32) /* wsp */ \
269 BASIC_REG_TYPE(SP_64) /* sp */ \
270 BASIC_REG_TYPE(Z_32) /* wzr */ \
271 BASIC_REG_TYPE(Z_64) /* xzr */ \
272 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
273 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
274 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
275 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
276 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
277 BASIC_REG_TYPE(VN) /* v[0-31] */ \
278 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
279 BASIC_REG_TYPE(PN) /* p[0-15] */ \
280 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
281 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
282 /* Typecheck: same, plus SVE registers. */ \
283 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
284 | REG_TYPE(ZN)) \
285 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
286 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
287 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
288 /* Typecheck: same, plus SVE registers. */ \
289 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
290 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
291 | REG_TYPE(ZN)) \
292 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
293 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
294 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
295 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
296 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
298 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
299 /* Typecheck: any [BHSDQ]P FP. */ \
300 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
301 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
302 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
303 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
305 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
306 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
307 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
308 be used for SVE instructions, since Zn and Pn are valid symbols \
309 in other contexts. */ \
310 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
311 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
312 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
313 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
314 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
315 | REG_TYPE(ZN) | REG_TYPE(PN)) \
316 /* Any integer register; used for error messages only. */ \
317 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
318 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
319 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
320 /* Pseudo type to mark the end of the enumerator sequence. */ \
321 BASIC_REG_TYPE(MAX)
322
323 #undef BASIC_REG_TYPE
324 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
325 #undef MULTI_REG_TYPE
326 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
327
328 /* Register type enumerators. */
329 typedef enum aarch64_reg_type_
330 {
331 /* A list of REG_TYPE_*. */
332 AARCH64_REG_TYPES
333 } aarch64_reg_type;
334
335 #undef BASIC_REG_TYPE
336 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
337 #undef REG_TYPE
338 #define REG_TYPE(T) (1 << REG_TYPE_##T)
339 #undef MULTI_REG_TYPE
340 #define MULTI_REG_TYPE(T,V) V,
341
342 /* Structure for a hash table entry for a register. */
343 typedef struct
344 {
345 const char *name;
346 unsigned char number;
347 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
348 unsigned char builtin;
349 } reg_entry;
350
351 /* Values indexed by aarch64_reg_type to assist the type checking. */
352 static const unsigned reg_type_masks[] =
353 {
354 AARCH64_REG_TYPES
355 };
356
357 #undef BASIC_REG_TYPE
358 #undef REG_TYPE
359 #undef MULTI_REG_TYPE
360 #undef AARCH64_REG_TYPES
361
362 /* Diagnostics used when we don't get a register of the expected type.
363 Note: this has to synchronized with aarch64_reg_type definitions
364 above. */
365 static const char *
366 get_reg_expected_msg (aarch64_reg_type reg_type)
367 {
368 const char *msg;
369
370 switch (reg_type)
371 {
372 case REG_TYPE_R_32:
373 msg = N_("integer 32-bit register expected");
374 break;
375 case REG_TYPE_R_64:
376 msg = N_("integer 64-bit register expected");
377 break;
378 case REG_TYPE_R_N:
379 msg = N_("integer register expected");
380 break;
381 case REG_TYPE_R64_SP:
382 msg = N_("64-bit integer or SP register expected");
383 break;
384 case REG_TYPE_SVE_BASE:
385 msg = N_("base register expected");
386 break;
387 case REG_TYPE_R_Z:
388 msg = N_("integer or zero register expected");
389 break;
390 case REG_TYPE_SVE_OFFSET:
391 msg = N_("offset register expected");
392 break;
393 case REG_TYPE_R_SP:
394 msg = N_("integer or SP register expected");
395 break;
396 case REG_TYPE_R_Z_SP:
397 msg = N_("integer, zero or SP register expected");
398 break;
399 case REG_TYPE_FP_B:
400 msg = N_("8-bit SIMD scalar register expected");
401 break;
402 case REG_TYPE_FP_H:
403 msg = N_("16-bit SIMD scalar or floating-point half precision "
404 "register expected");
405 break;
406 case REG_TYPE_FP_S:
407 msg = N_("32-bit SIMD scalar or floating-point single precision "
408 "register expected");
409 break;
410 case REG_TYPE_FP_D:
411 msg = N_("64-bit SIMD scalar or floating-point double precision "
412 "register expected");
413 break;
414 case REG_TYPE_FP_Q:
415 msg = N_("128-bit SIMD scalar or floating-point quad precision "
416 "register expected");
417 break;
418 case REG_TYPE_R_Z_BHSDQ_V:
419 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
420 msg = N_("register expected");
421 break;
422 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
423 msg = N_("SIMD scalar or floating-point register expected");
424 break;
425 case REG_TYPE_VN: /* any V reg */
426 msg = N_("vector register expected");
427 break;
428 case REG_TYPE_ZN:
429 msg = N_("SVE vector register expected");
430 break;
431 case REG_TYPE_PN:
432 msg = N_("SVE predicate register expected");
433 break;
434 default:
435 as_fatal (_("invalid register type %d"), reg_type);
436 }
437 return msg;
438 }
439
440 /* Some well known registers that we refer to directly elsewhere. */
441 #define REG_SP 31
442
443 /* Instructions take 4 bytes in the object file. */
444 #define INSN_SIZE 4
445
446 static struct hash_control *aarch64_ops_hsh;
447 static struct hash_control *aarch64_cond_hsh;
448 static struct hash_control *aarch64_shift_hsh;
449 static struct hash_control *aarch64_sys_regs_hsh;
450 static struct hash_control *aarch64_pstatefield_hsh;
451 static struct hash_control *aarch64_sys_regs_ic_hsh;
452 static struct hash_control *aarch64_sys_regs_dc_hsh;
453 static struct hash_control *aarch64_sys_regs_at_hsh;
454 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
455 static struct hash_control *aarch64_reg_hsh;
456 static struct hash_control *aarch64_barrier_opt_hsh;
457 static struct hash_control *aarch64_nzcv_hsh;
458 static struct hash_control *aarch64_pldop_hsh;
459 static struct hash_control *aarch64_hint_opt_hsh;
460
461 /* Stuff needed to resolve the label ambiguity
462 As:
463 ...
464 label: <insn>
465 may differ from:
466 ...
467 label:
468 <insn> */
469
470 static symbolS *last_label_seen;
471
472 /* Literal pool structure. Held on a per-section
473 and per-sub-section basis. */
474
475 #define MAX_LITERAL_POOL_SIZE 1024
476 typedef struct literal_expression
477 {
478 expressionS exp;
479 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
480 LITTLENUM_TYPE * bignum;
481 } literal_expression;
482
483 typedef struct literal_pool
484 {
485 literal_expression literals[MAX_LITERAL_POOL_SIZE];
486 unsigned int next_free_entry;
487 unsigned int id;
488 symbolS *symbol;
489 segT section;
490 subsegT sub_section;
491 int size;
492 struct literal_pool *next;
493 } literal_pool;
494
495 /* Pointer to a linked list of literal pools. */
496 static literal_pool *list_of_pools = NULL;
497 \f
498 /* Pure syntax. */
499
500 /* This array holds the chars that always start a comment. If the
501 pre-processor is disabled, these aren't very useful. */
502 const char comment_chars[] = "";
503
504 /* This array holds the chars that only start a comment at the beginning of
505 a line. If the line seems to have the form '# 123 filename'
506 .line and .file directives will appear in the pre-processed output. */
507 /* Note that input_file.c hand checks for '#' at the beginning of the
508 first line of the input file. This is because the compiler outputs
509 #NO_APP at the beginning of its output. */
510 /* Also note that comments like this one will always work. */
511 const char line_comment_chars[] = "#";
512
513 const char line_separator_chars[] = ";";
514
515 /* Chars that can be used to separate mant
516 from exp in floating point numbers. */
517 const char EXP_CHARS[] = "eE";
518
519 /* Chars that mean this number is a floating point constant. */
520 /* As in 0f12.456 */
521 /* or 0d1.2345e12 */
522
523 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
524
525 /* Prefix character that indicates the start of an immediate value. */
526 #define is_immediate_prefix(C) ((C) == '#')
527
528 /* Separator character handling. */
529
530 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
531
532 static inline bfd_boolean
533 skip_past_char (char **str, char c)
534 {
535 if (**str == c)
536 {
537 (*str)++;
538 return TRUE;
539 }
540 else
541 return FALSE;
542 }
543
544 #define skip_past_comma(str) skip_past_char (str, ',')
545
546 /* Arithmetic expressions (possibly involving symbols). */
547
548 static bfd_boolean in_my_get_expression_p = FALSE;
549
550 /* Third argument to my_get_expression. */
551 #define GE_NO_PREFIX 0
552 #define GE_OPT_PREFIX 1
553
554 /* Return TRUE if the string pointed by *STR is successfully parsed
555 as an valid expression; *EP will be filled with the information of
556 such an expression. Otherwise return FALSE. */
557
558 static bfd_boolean
559 my_get_expression (expressionS * ep, char **str, int prefix_mode,
560 int reject_absent)
561 {
562 char *save_in;
563 segT seg;
564 int prefix_present_p = 0;
565
566 switch (prefix_mode)
567 {
568 case GE_NO_PREFIX:
569 break;
570 case GE_OPT_PREFIX:
571 if (is_immediate_prefix (**str))
572 {
573 (*str)++;
574 prefix_present_p = 1;
575 }
576 break;
577 default:
578 abort ();
579 }
580
581 memset (ep, 0, sizeof (expressionS));
582
583 save_in = input_line_pointer;
584 input_line_pointer = *str;
585 in_my_get_expression_p = TRUE;
586 seg = expression (ep);
587 in_my_get_expression_p = FALSE;
588
589 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
590 {
591 /* We found a bad expression in md_operand(). */
592 *str = input_line_pointer;
593 input_line_pointer = save_in;
594 if (prefix_present_p && ! error_p ())
595 set_fatal_syntax_error (_("bad expression"));
596 else
597 set_first_syntax_error (_("bad expression"));
598 return FALSE;
599 }
600
601 #ifdef OBJ_AOUT
602 if (seg != absolute_section
603 && seg != text_section
604 && seg != data_section
605 && seg != bss_section && seg != undefined_section)
606 {
607 set_syntax_error (_("bad segment"));
608 *str = input_line_pointer;
609 input_line_pointer = save_in;
610 return FALSE;
611 }
612 #else
613 (void) seg;
614 #endif
615
616 *str = input_line_pointer;
617 input_line_pointer = save_in;
618 return TRUE;
619 }
620
621 /* Turn a string in input_line_pointer into a floating point constant
622 of type TYPE, and store the appropriate bytes in *LITP. The number
623 of LITTLENUMS emitted is stored in *SIZEP. An error message is
624 returned, or NULL on OK. */
625
626 const char *
627 md_atof (int type, char *litP, int *sizeP)
628 {
629 return ieee_md_atof (type, litP, sizeP, target_big_endian);
630 }
631
632 /* We handle all bad expressions here, so that we can report the faulty
633 instruction in the error message. */
634 void
635 md_operand (expressionS * exp)
636 {
637 if (in_my_get_expression_p)
638 exp->X_op = O_illegal;
639 }
640
641 /* Immediate values. */
642
643 /* Errors may be set multiple times during parsing or bit encoding
644 (particularly in the Neon bits), but usually the earliest error which is set
645 will be the most meaningful. Avoid overwriting it with later (cascading)
646 errors by calling this function. */
647
648 static void
649 first_error (const char *error)
650 {
651 if (! error_p ())
652 set_syntax_error (error);
653 }
654
655 /* Similar to first_error, but this function accepts formatted error
656 message. */
657 static void
658 first_error_fmt (const char *format, ...)
659 {
660 va_list args;
661 enum
662 { size = 100 };
663 /* N.B. this single buffer will not cause error messages for different
664 instructions to pollute each other; this is because at the end of
665 processing of each assembly line, error message if any will be
666 collected by as_bad. */
667 static char buffer[size];
668
669 if (! error_p ())
670 {
671 int ret ATTRIBUTE_UNUSED;
672 va_start (args, format);
673 ret = vsnprintf (buffer, size, format, args);
674 know (ret <= size - 1 && ret >= 0);
675 va_end (args);
676 set_syntax_error (buffer);
677 }
678 }
679
680 /* Register parsing. */
681
682 /* Generic register parser which is called by other specialized
683 register parsers.
684 CCP points to what should be the beginning of a register name.
685 If it is indeed a valid register name, advance CCP over it and
686 return the reg_entry structure; otherwise return NULL.
687 It does not issue diagnostics. */
688
689 static reg_entry *
690 parse_reg (char **ccp)
691 {
692 char *start = *ccp;
693 char *p;
694 reg_entry *reg;
695
696 #ifdef REGISTER_PREFIX
697 if (*start != REGISTER_PREFIX)
698 return NULL;
699 start++;
700 #endif
701
702 p = start;
703 if (!ISALPHA (*p) || !is_name_beginner (*p))
704 return NULL;
705
706 do
707 p++;
708 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
709
710 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
711
712 if (!reg)
713 return NULL;
714
715 *ccp = p;
716 return reg;
717 }
718
719 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
720 return FALSE. */
721 static bfd_boolean
722 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
723 {
724 return (reg_type_masks[type] & (1 << reg->type)) != 0;
725 }
726
727 /* Try to parse a base or offset register. Allow SVE base and offset
728 registers if REG_TYPE includes SVE registers. Return the register
729 entry on success, setting *QUALIFIER to the register qualifier.
730 Return null otherwise.
731
732 Note that this function does not issue any diagnostics. */
733
734 static const reg_entry *
735 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
736 aarch64_opnd_qualifier_t *qualifier)
737 {
738 char *str = *ccp;
739 const reg_entry *reg = parse_reg (&str);
740
741 if (reg == NULL)
742 return NULL;
743
744 switch (reg->type)
745 {
746 case REG_TYPE_R_32:
747 case REG_TYPE_SP_32:
748 case REG_TYPE_Z_32:
749 *qualifier = AARCH64_OPND_QLF_W;
750 break;
751
752 case REG_TYPE_R_64:
753 case REG_TYPE_SP_64:
754 case REG_TYPE_Z_64:
755 *qualifier = AARCH64_OPND_QLF_X;
756 break;
757
758 case REG_TYPE_ZN:
759 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
760 || str[0] != '.')
761 return NULL;
762 switch (TOLOWER (str[1]))
763 {
764 case 's':
765 *qualifier = AARCH64_OPND_QLF_S_S;
766 break;
767 case 'd':
768 *qualifier = AARCH64_OPND_QLF_S_D;
769 break;
770 default:
771 return NULL;
772 }
773 str += 2;
774 break;
775
776 default:
777 return NULL;
778 }
779
780 *ccp = str;
781
782 return reg;
783 }
784
785 /* Try to parse a base or offset register. Return the register entry
786 on success, setting *QUALIFIER to the register qualifier. Return null
787 otherwise.
788
789 Note that this function does not issue any diagnostics. */
790
791 static const reg_entry *
792 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
793 {
794 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
795 }
796
797 /* Parse the qualifier of a vector register or vector element of type
798 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
799 succeeds; otherwise return FALSE.
800
801 Accept only one occurrence of:
802 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
803 b h s d q */
804 static bfd_boolean
805 parse_vector_type_for_operand (aarch64_reg_type reg_type,
806 struct vector_type_el *parsed_type, char **str)
807 {
808 char *ptr = *str;
809 unsigned width;
810 unsigned element_size;
811 enum vector_el_type type;
812
813 /* skip '.' */
814 gas_assert (*ptr == '.');
815 ptr++;
816
817 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
818 {
819 width = 0;
820 goto elt_size;
821 }
822 width = strtoul (ptr, &ptr, 10);
823 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
824 {
825 first_error_fmt (_("bad size %d in vector width specifier"), width);
826 return FALSE;
827 }
828
829 elt_size:
830 switch (TOLOWER (*ptr))
831 {
832 case 'b':
833 type = NT_b;
834 element_size = 8;
835 break;
836 case 'h':
837 type = NT_h;
838 element_size = 16;
839 break;
840 case 's':
841 type = NT_s;
842 element_size = 32;
843 break;
844 case 'd':
845 type = NT_d;
846 element_size = 64;
847 break;
848 case 'q':
849 if (reg_type == REG_TYPE_ZN || width == 1)
850 {
851 type = NT_q;
852 element_size = 128;
853 break;
854 }
855 /* fall through. */
856 default:
857 if (*ptr != '\0')
858 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
859 else
860 first_error (_("missing element size"));
861 return FALSE;
862 }
863 if (width != 0 && width * element_size != 64
864 && width * element_size != 128
865 && !(width == 2 && element_size == 16)
866 && !(width == 4 && element_size == 8))
867 {
868 first_error_fmt (_
869 ("invalid element size %d and vector size combination %c"),
870 width, *ptr);
871 return FALSE;
872 }
873 ptr++;
874
875 parsed_type->type = type;
876 parsed_type->width = width;
877
878 *str = ptr;
879
880 return TRUE;
881 }
882
883 /* *STR contains an SVE zero/merge predication suffix. Parse it into
884 *PARSED_TYPE and point *STR at the end of the suffix. */
885
886 static bfd_boolean
887 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
888 {
889 char *ptr = *str;
890
891 /* Skip '/'. */
892 gas_assert (*ptr == '/');
893 ptr++;
894 switch (TOLOWER (*ptr))
895 {
896 case 'z':
897 parsed_type->type = NT_zero;
898 break;
899 case 'm':
900 parsed_type->type = NT_merge;
901 break;
902 default:
903 if (*ptr != '\0' && *ptr != ',')
904 first_error_fmt (_("unexpected character `%c' in predication type"),
905 *ptr);
906 else
907 first_error (_("missing predication type"));
908 return FALSE;
909 }
910 parsed_type->width = 0;
911 *str = ptr + 1;
912 return TRUE;
913 }
914
915 /* Parse a register of the type TYPE.
916
917 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
918 name or the parsed register is not of TYPE.
919
920 Otherwise return the register number, and optionally fill in the actual
921 type of the register in *RTYPE when multiple alternatives were given, and
922 return the register shape and element index information in *TYPEINFO.
923
924 IN_REG_LIST should be set with TRUE if the caller is parsing a register
925 list. */
926
927 static int
928 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
929 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
930 {
931 char *str = *ccp;
932 const reg_entry *reg = parse_reg (&str);
933 struct vector_type_el atype;
934 struct vector_type_el parsetype;
935 bfd_boolean is_typed_vecreg = FALSE;
936
937 atype.defined = 0;
938 atype.type = NT_invtype;
939 atype.width = -1;
940 atype.index = 0;
941
942 if (reg == NULL)
943 {
944 if (typeinfo)
945 *typeinfo = atype;
946 set_default_error ();
947 return PARSE_FAIL;
948 }
949
950 if (! aarch64_check_reg_type (reg, type))
951 {
952 DEBUG_TRACE ("reg type check failed");
953 set_default_error ();
954 return PARSE_FAIL;
955 }
956 type = reg->type;
957
958 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
959 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
960 {
961 if (*str == '.')
962 {
963 if (!parse_vector_type_for_operand (type, &parsetype, &str))
964 return PARSE_FAIL;
965 }
966 else
967 {
968 if (!parse_predication_for_operand (&parsetype, &str))
969 return PARSE_FAIL;
970 }
971
972 /* Register if of the form Vn.[bhsdq]. */
973 is_typed_vecreg = TRUE;
974
975 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
976 {
977 /* The width is always variable; we don't allow an integer width
978 to be specified. */
979 gas_assert (parsetype.width == 0);
980 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
981 }
982 else if (parsetype.width == 0)
983 /* Expect index. In the new scheme we cannot have
984 Vn.[bhsdq] represent a scalar. Therefore any
985 Vn.[bhsdq] should have an index following it.
986 Except in reglists of course. */
987 atype.defined |= NTA_HASINDEX;
988 else
989 atype.defined |= NTA_HASTYPE;
990
991 atype.type = parsetype.type;
992 atype.width = parsetype.width;
993 }
994
995 if (skip_past_char (&str, '['))
996 {
997 expressionS exp;
998
999 /* Reject Sn[index] syntax. */
1000 if (!is_typed_vecreg)
1001 {
1002 first_error (_("this type of register can't be indexed"));
1003 return PARSE_FAIL;
1004 }
1005
1006 if (in_reg_list)
1007 {
1008 first_error (_("index not allowed inside register list"));
1009 return PARSE_FAIL;
1010 }
1011
1012 atype.defined |= NTA_HASINDEX;
1013
1014 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1015
1016 if (exp.X_op != O_constant)
1017 {
1018 first_error (_("constant expression required"));
1019 return PARSE_FAIL;
1020 }
1021
1022 if (! skip_past_char (&str, ']'))
1023 return PARSE_FAIL;
1024
1025 atype.index = exp.X_add_number;
1026 }
1027 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1028 {
1029 /* Indexed vector register expected. */
1030 first_error (_("indexed vector register expected"));
1031 return PARSE_FAIL;
1032 }
1033
1034 /* A vector reg Vn should be typed or indexed. */
1035 if (type == REG_TYPE_VN && atype.defined == 0)
1036 {
1037 first_error (_("invalid use of vector register"));
1038 }
1039
1040 if (typeinfo)
1041 *typeinfo = atype;
1042
1043 if (rtype)
1044 *rtype = type;
1045
1046 *ccp = str;
1047
1048 return reg->number;
1049 }
1050
1051 /* Parse register.
1052
1053 Return the register number on success; return PARSE_FAIL otherwise.
1054
1055 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1056 the register (e.g. NEON double or quad reg when either has been requested).
1057
1058 If this is a NEON vector register with additional type information, fill
1059 in the struct pointed to by VECTYPE (if non-NULL).
1060
1061 This parser does not handle register list. */
1062
1063 static int
1064 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1065 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1066 {
1067 struct vector_type_el atype;
1068 char *str = *ccp;
1069 int reg = parse_typed_reg (&str, type, rtype, &atype,
1070 /*in_reg_list= */ FALSE);
1071
1072 if (reg == PARSE_FAIL)
1073 return PARSE_FAIL;
1074
1075 if (vectype)
1076 *vectype = atype;
1077
1078 *ccp = str;
1079
1080 return reg;
1081 }
1082
1083 static inline bfd_boolean
1084 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1085 {
1086 return
1087 e1.type == e2.type
1088 && e1.defined == e2.defined
1089 && e1.width == e2.width && e1.index == e2.index;
1090 }
1091
1092 /* This function parses a list of vector registers of type TYPE.
1093 On success, it returns the parsed register list information in the
1094 following encoded format:
1095
1096 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1097 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1098
1099 The information of the register shape and/or index is returned in
1100 *VECTYPE.
1101
1102 It returns PARSE_FAIL if the register list is invalid.
1103
1104 The list contains one to four registers.
1105 Each register can be one of:
1106 <Vt>.<T>[<index>]
1107 <Vt>.<T>
1108 All <T> should be identical.
1109 All <index> should be identical.
1110 There are restrictions on <Vt> numbers which are checked later
1111 (by reg_list_valid_p). */
1112
1113 static int
1114 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1115 struct vector_type_el *vectype)
1116 {
1117 char *str = *ccp;
1118 int nb_regs;
1119 struct vector_type_el typeinfo, typeinfo_first;
1120 int val, val_range;
1121 int in_range;
1122 int ret_val;
1123 int i;
1124 bfd_boolean error = FALSE;
1125 bfd_boolean expect_index = FALSE;
1126
1127 if (*str != '{')
1128 {
1129 set_syntax_error (_("expecting {"));
1130 return PARSE_FAIL;
1131 }
1132 str++;
1133
1134 nb_regs = 0;
1135 typeinfo_first.defined = 0;
1136 typeinfo_first.type = NT_invtype;
1137 typeinfo_first.width = -1;
1138 typeinfo_first.index = 0;
1139 ret_val = 0;
1140 val = -1;
1141 val_range = -1;
1142 in_range = 0;
1143 do
1144 {
1145 if (in_range)
1146 {
1147 str++; /* skip over '-' */
1148 val_range = val;
1149 }
1150 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1151 /*in_reg_list= */ TRUE);
1152 if (val == PARSE_FAIL)
1153 {
1154 set_first_syntax_error (_("invalid vector register in list"));
1155 error = TRUE;
1156 continue;
1157 }
1158 /* reject [bhsd]n */
1159 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1160 {
1161 set_first_syntax_error (_("invalid scalar register in list"));
1162 error = TRUE;
1163 continue;
1164 }
1165
1166 if (typeinfo.defined & NTA_HASINDEX)
1167 expect_index = TRUE;
1168
1169 if (in_range)
1170 {
1171 if (val < val_range)
1172 {
1173 set_first_syntax_error
1174 (_("invalid range in vector register list"));
1175 error = TRUE;
1176 }
1177 val_range++;
1178 }
1179 else
1180 {
1181 val_range = val;
1182 if (nb_regs == 0)
1183 typeinfo_first = typeinfo;
1184 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1185 {
1186 set_first_syntax_error
1187 (_("type mismatch in vector register list"));
1188 error = TRUE;
1189 }
1190 }
1191 if (! error)
1192 for (i = val_range; i <= val; i++)
1193 {
1194 ret_val |= i << (5 * nb_regs);
1195 nb_regs++;
1196 }
1197 in_range = 0;
1198 }
1199 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1200
1201 skip_whitespace (str);
1202 if (*str != '}')
1203 {
1204 set_first_syntax_error (_("end of vector register list not found"));
1205 error = TRUE;
1206 }
1207 str++;
1208
1209 skip_whitespace (str);
1210
1211 if (expect_index)
1212 {
1213 if (skip_past_char (&str, '['))
1214 {
1215 expressionS exp;
1216
1217 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1218 if (exp.X_op != O_constant)
1219 {
1220 set_first_syntax_error (_("constant expression required."));
1221 error = TRUE;
1222 }
1223 if (! skip_past_char (&str, ']'))
1224 error = TRUE;
1225 else
1226 typeinfo_first.index = exp.X_add_number;
1227 }
1228 else
1229 {
1230 set_first_syntax_error (_("expected index"));
1231 error = TRUE;
1232 }
1233 }
1234
1235 if (nb_regs > 4)
1236 {
1237 set_first_syntax_error (_("too many registers in vector register list"));
1238 error = TRUE;
1239 }
1240 else if (nb_regs == 0)
1241 {
1242 set_first_syntax_error (_("empty vector register list"));
1243 error = TRUE;
1244 }
1245
1246 *ccp = str;
1247 if (! error)
1248 *vectype = typeinfo_first;
1249
1250 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1251 }
1252
1253 /* Directives: register aliases. */
1254
1255 static reg_entry *
1256 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1257 {
1258 reg_entry *new;
1259 const char *name;
1260
1261 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1262 {
1263 if (new->builtin)
1264 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1265 str);
1266
1267 /* Only warn about a redefinition if it's not defined as the
1268 same register. */
1269 else if (new->number != number || new->type != type)
1270 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1271
1272 return NULL;
1273 }
1274
1275 name = xstrdup (str);
1276 new = XNEW (reg_entry);
1277
1278 new->name = name;
1279 new->number = number;
1280 new->type = type;
1281 new->builtin = FALSE;
1282
1283 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1284 abort ();
1285
1286 return new;
1287 }
1288
1289 /* Look for the .req directive. This is of the form:
1290
1291 new_register_name .req existing_register_name
1292
1293 If we find one, or if it looks sufficiently like one that we want to
1294 handle any error here, return TRUE. Otherwise return FALSE. */
1295
1296 static bfd_boolean
1297 create_register_alias (char *newname, char *p)
1298 {
1299 const reg_entry *old;
1300 char *oldname, *nbuf;
1301 size_t nlen;
1302
1303 /* The input scrubber ensures that whitespace after the mnemonic is
1304 collapsed to single spaces. */
1305 oldname = p;
1306 if (strncmp (oldname, " .req ", 6) != 0)
1307 return FALSE;
1308
1309 oldname += 6;
1310 if (*oldname == '\0')
1311 return FALSE;
1312
1313 old = hash_find (aarch64_reg_hsh, oldname);
1314 if (!old)
1315 {
1316 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1317 return TRUE;
1318 }
1319
1320 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1321 the desired alias name, and p points to its end. If not, then
1322 the desired alias name is in the global original_case_string. */
1323 #ifdef TC_CASE_SENSITIVE
1324 nlen = p - newname;
1325 #else
1326 newname = original_case_string;
1327 nlen = strlen (newname);
1328 #endif
1329
1330 nbuf = xmemdup0 (newname, nlen);
1331
1332 /* Create aliases under the new name as stated; an all-lowercase
1333 version of the new name; and an all-uppercase version of the new
1334 name. */
1335 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1336 {
1337 for (p = nbuf; *p; p++)
1338 *p = TOUPPER (*p);
1339
1340 if (strncmp (nbuf, newname, nlen))
1341 {
1342 /* If this attempt to create an additional alias fails, do not bother
1343 trying to create the all-lower case alias. We will fail and issue
1344 a second, duplicate error message. This situation arises when the
1345 programmer does something like:
1346 foo .req r0
1347 Foo .req r1
1348 The second .req creates the "Foo" alias but then fails to create
1349 the artificial FOO alias because it has already been created by the
1350 first .req. */
1351 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1352 {
1353 free (nbuf);
1354 return TRUE;
1355 }
1356 }
1357
1358 for (p = nbuf; *p; p++)
1359 *p = TOLOWER (*p);
1360
1361 if (strncmp (nbuf, newname, nlen))
1362 insert_reg_alias (nbuf, old->number, old->type);
1363 }
1364
1365 free (nbuf);
1366 return TRUE;
1367 }
1368
1369 /* Should never be called, as .req goes between the alias and the
1370 register name, not at the beginning of the line. */
1371 static void
1372 s_req (int a ATTRIBUTE_UNUSED)
1373 {
1374 as_bad (_("invalid syntax for .req directive"));
1375 }
1376
1377 /* The .unreq directive deletes an alias which was previously defined
1378 by .req. For example:
1379
1380 my_alias .req r11
1381 .unreq my_alias */
1382
1383 static void
1384 s_unreq (int a ATTRIBUTE_UNUSED)
1385 {
1386 char *name;
1387 char saved_char;
1388
1389 name = input_line_pointer;
1390
1391 while (*input_line_pointer != 0
1392 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1393 ++input_line_pointer;
1394
1395 saved_char = *input_line_pointer;
1396 *input_line_pointer = 0;
1397
1398 if (!*name)
1399 as_bad (_("invalid syntax for .unreq directive"));
1400 else
1401 {
1402 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1403
1404 if (!reg)
1405 as_bad (_("unknown register alias '%s'"), name);
1406 else if (reg->builtin)
1407 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1408 name);
1409 else
1410 {
1411 char *p;
1412 char *nbuf;
1413
1414 hash_delete (aarch64_reg_hsh, name, FALSE);
1415 free ((char *) reg->name);
1416 free (reg);
1417
1418 /* Also locate the all upper case and all lower case versions.
1419 Do not complain if we cannot find one or the other as it
1420 was probably deleted above. */
1421
1422 nbuf = strdup (name);
1423 for (p = nbuf; *p; p++)
1424 *p = TOUPPER (*p);
1425 reg = hash_find (aarch64_reg_hsh, nbuf);
1426 if (reg)
1427 {
1428 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1429 free ((char *) reg->name);
1430 free (reg);
1431 }
1432
1433 for (p = nbuf; *p; p++)
1434 *p = TOLOWER (*p);
1435 reg = hash_find (aarch64_reg_hsh, nbuf);
1436 if (reg)
1437 {
1438 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1439 free ((char *) reg->name);
1440 free (reg);
1441 }
1442
1443 free (nbuf);
1444 }
1445 }
1446
1447 *input_line_pointer = saved_char;
1448 demand_empty_rest_of_line ();
1449 }
1450
1451 /* Directives: Instruction set selection. */
1452
1453 #ifdef OBJ_ELF
1454 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1455 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1456 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1457 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1458
1459 /* Create a new mapping symbol for the transition to STATE. */
1460
1461 static void
1462 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1463 {
1464 symbolS *symbolP;
1465 const char *symname;
1466 int type;
1467
1468 switch (state)
1469 {
1470 case MAP_DATA:
1471 symname = "$d";
1472 type = BSF_NO_FLAGS;
1473 break;
1474 case MAP_INSN:
1475 symname = "$x";
1476 type = BSF_NO_FLAGS;
1477 break;
1478 default:
1479 abort ();
1480 }
1481
1482 symbolP = symbol_new (symname, now_seg, value, frag);
1483 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1484
1485 /* Save the mapping symbols for future reference. Also check that
1486 we do not place two mapping symbols at the same offset within a
1487 frag. We'll handle overlap between frags in
1488 check_mapping_symbols.
1489
1490 If .fill or other data filling directive generates zero sized data,
1491 the mapping symbol for the following code will have the same value
1492 as the one generated for the data filling directive. In this case,
1493 we replace the old symbol with the new one at the same address. */
1494 if (value == 0)
1495 {
1496 if (frag->tc_frag_data.first_map != NULL)
1497 {
1498 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1499 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1500 &symbol_lastP);
1501 }
1502 frag->tc_frag_data.first_map = symbolP;
1503 }
1504 if (frag->tc_frag_data.last_map != NULL)
1505 {
1506 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1507 S_GET_VALUE (symbolP));
1508 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1509 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1510 &symbol_lastP);
1511 }
1512 frag->tc_frag_data.last_map = symbolP;
1513 }
1514
1515 /* We must sometimes convert a region marked as code to data during
1516 code alignment, if an odd number of bytes have to be padded. The
1517 code mapping symbol is pushed to an aligned address. */
1518
1519 static void
1520 insert_data_mapping_symbol (enum mstate state,
1521 valueT value, fragS * frag, offsetT bytes)
1522 {
1523 /* If there was already a mapping symbol, remove it. */
1524 if (frag->tc_frag_data.last_map != NULL
1525 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1526 frag->fr_address + value)
1527 {
1528 symbolS *symp = frag->tc_frag_data.last_map;
1529
1530 if (value == 0)
1531 {
1532 know (frag->tc_frag_data.first_map == symp);
1533 frag->tc_frag_data.first_map = NULL;
1534 }
1535 frag->tc_frag_data.last_map = NULL;
1536 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1537 }
1538
1539 make_mapping_symbol (MAP_DATA, value, frag);
1540 make_mapping_symbol (state, value + bytes, frag);
1541 }
1542
1543 static void mapping_state_2 (enum mstate state, int max_chars);
1544
1545 /* Set the mapping state to STATE. Only call this when about to
1546 emit some STATE bytes to the file. */
1547
1548 void
1549 mapping_state (enum mstate state)
1550 {
1551 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1552
1553 if (state == MAP_INSN)
1554 /* AArch64 instructions require 4-byte alignment. When emitting
1555 instructions into any section, record the appropriate section
1556 alignment. */
1557 record_alignment (now_seg, 2);
1558
1559 if (mapstate == state)
1560 /* The mapping symbol has already been emitted.
1561 There is nothing else to do. */
1562 return;
1563
1564 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1565 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1566 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1567 evaluated later in the next else. */
1568 return;
1569 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1570 {
1571 /* Only add the symbol if the offset is > 0:
1572 if we're at the first frag, check it's size > 0;
1573 if we're not at the first frag, then for sure
1574 the offset is > 0. */
1575 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1576 const int add_symbol = (frag_now != frag_first)
1577 || (frag_now_fix () > 0);
1578
1579 if (add_symbol)
1580 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1581 }
1582 #undef TRANSITION
1583
1584 mapping_state_2 (state, 0);
1585 }
1586
1587 /* Same as mapping_state, but MAX_CHARS bytes have already been
1588 allocated. Put the mapping symbol that far back. */
1589
1590 static void
1591 mapping_state_2 (enum mstate state, int max_chars)
1592 {
1593 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1594
1595 if (!SEG_NORMAL (now_seg))
1596 return;
1597
1598 if (mapstate == state)
1599 /* The mapping symbol has already been emitted.
1600 There is nothing else to do. */
1601 return;
1602
1603 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1604 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1605 }
1606 #else
1607 #define mapping_state(x) /* nothing */
1608 #define mapping_state_2(x, y) /* nothing */
1609 #endif
1610
1611 /* Directives: sectioning and alignment. */
1612
1613 static void
1614 s_bss (int ignore ATTRIBUTE_UNUSED)
1615 {
1616 /* We don't support putting frags in the BSS segment, we fake it by
1617 marking in_bss, then looking at s_skip for clues. */
1618 subseg_set (bss_section, 0);
1619 demand_empty_rest_of_line ();
1620 mapping_state (MAP_DATA);
1621 }
1622
1623 static void
1624 s_even (int ignore ATTRIBUTE_UNUSED)
1625 {
1626 /* Never make frag if expect extra pass. */
1627 if (!need_pass_2)
1628 frag_align (1, 0, 0);
1629
1630 record_alignment (now_seg, 1);
1631
1632 demand_empty_rest_of_line ();
1633 }
1634
1635 /* Directives: Literal pools. */
1636
1637 static literal_pool *
1638 find_literal_pool (int size)
1639 {
1640 literal_pool *pool;
1641
1642 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1643 {
1644 if (pool->section == now_seg
1645 && pool->sub_section == now_subseg && pool->size == size)
1646 break;
1647 }
1648
1649 return pool;
1650 }
1651
1652 static literal_pool *
1653 find_or_make_literal_pool (int size)
1654 {
1655 /* Next literal pool ID number. */
1656 static unsigned int latest_pool_num = 1;
1657 literal_pool *pool;
1658
1659 pool = find_literal_pool (size);
1660
1661 if (pool == NULL)
1662 {
1663 /* Create a new pool. */
1664 pool = XNEW (literal_pool);
1665 if (!pool)
1666 return NULL;
1667
1668 /* Currently we always put the literal pool in the current text
1669 section. If we were generating "small" model code where we
1670 knew that all code and initialised data was within 1MB then
1671 we could output literals to mergeable, read-only data
1672 sections. */
1673
1674 pool->next_free_entry = 0;
1675 pool->section = now_seg;
1676 pool->sub_section = now_subseg;
1677 pool->size = size;
1678 pool->next = list_of_pools;
1679 pool->symbol = NULL;
1680
1681 /* Add it to the list. */
1682 list_of_pools = pool;
1683 }
1684
1685 /* New pools, and emptied pools, will have a NULL symbol. */
1686 if (pool->symbol == NULL)
1687 {
1688 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1689 (valueT) 0, &zero_address_frag);
1690 pool->id = latest_pool_num++;
1691 }
1692
1693 /* Done. */
1694 return pool;
1695 }
1696
1697 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1698 Return TRUE on success, otherwise return FALSE. */
1699 static bfd_boolean
1700 add_to_lit_pool (expressionS *exp, int size)
1701 {
1702 literal_pool *pool;
1703 unsigned int entry;
1704
1705 pool = find_or_make_literal_pool (size);
1706
1707 /* Check if this literal value is already in the pool. */
1708 for (entry = 0; entry < pool->next_free_entry; entry++)
1709 {
1710 expressionS * litexp = & pool->literals[entry].exp;
1711
1712 if ((litexp->X_op == exp->X_op)
1713 && (exp->X_op == O_constant)
1714 && (litexp->X_add_number == exp->X_add_number)
1715 && (litexp->X_unsigned == exp->X_unsigned))
1716 break;
1717
1718 if ((litexp->X_op == exp->X_op)
1719 && (exp->X_op == O_symbol)
1720 && (litexp->X_add_number == exp->X_add_number)
1721 && (litexp->X_add_symbol == exp->X_add_symbol)
1722 && (litexp->X_op_symbol == exp->X_op_symbol))
1723 break;
1724 }
1725
1726 /* Do we need to create a new entry? */
1727 if (entry == pool->next_free_entry)
1728 {
1729 if (entry >= MAX_LITERAL_POOL_SIZE)
1730 {
1731 set_syntax_error (_("literal pool overflow"));
1732 return FALSE;
1733 }
1734
1735 pool->literals[entry].exp = *exp;
1736 pool->next_free_entry += 1;
1737 if (exp->X_op == O_big)
1738 {
1739 /* PR 16688: Bignums are held in a single global array. We must
1740 copy and preserve that value now, before it is overwritten. */
1741 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1742 exp->X_add_number);
1743 memcpy (pool->literals[entry].bignum, generic_bignum,
1744 CHARS_PER_LITTLENUM * exp->X_add_number);
1745 }
1746 else
1747 pool->literals[entry].bignum = NULL;
1748 }
1749
1750 exp->X_op = O_symbol;
1751 exp->X_add_number = ((int) entry) * size;
1752 exp->X_add_symbol = pool->symbol;
1753
1754 return TRUE;
1755 }
1756
1757 /* Can't use symbol_new here, so have to create a symbol and then at
1758 a later date assign it a value. That's what these functions do. */
1759
1760 static void
1761 symbol_locate (symbolS * symbolP,
1762 const char *name,/* It is copied, the caller can modify. */
1763 segT segment, /* Segment identifier (SEG_<something>). */
1764 valueT valu, /* Symbol value. */
1765 fragS * frag) /* Associated fragment. */
1766 {
1767 size_t name_length;
1768 char *preserved_copy_of_name;
1769
1770 name_length = strlen (name) + 1; /* +1 for \0. */
1771 obstack_grow (&notes, name, name_length);
1772 preserved_copy_of_name = obstack_finish (&notes);
1773
1774 #ifdef tc_canonicalize_symbol_name
1775 preserved_copy_of_name =
1776 tc_canonicalize_symbol_name (preserved_copy_of_name);
1777 #endif
1778
1779 S_SET_NAME (symbolP, preserved_copy_of_name);
1780
1781 S_SET_SEGMENT (symbolP, segment);
1782 S_SET_VALUE (symbolP, valu);
1783 symbol_clear_list_pointers (symbolP);
1784
1785 symbol_set_frag (symbolP, frag);
1786
1787 /* Link to end of symbol chain. */
1788 {
1789 extern int symbol_table_frozen;
1790
1791 if (symbol_table_frozen)
1792 abort ();
1793 }
1794
1795 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1796
1797 obj_symbol_new_hook (symbolP);
1798
1799 #ifdef tc_symbol_new_hook
1800 tc_symbol_new_hook (symbolP);
1801 #endif
1802
1803 #ifdef DEBUG_SYMS
1804 verify_symbol_chain (symbol_rootP, symbol_lastP);
1805 #endif /* DEBUG_SYMS */
1806 }
1807
1808
1809 static void
1810 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1811 {
1812 unsigned int entry;
1813 literal_pool *pool;
1814 char sym_name[20];
1815 int align;
1816
1817 for (align = 2; align <= 4; align++)
1818 {
1819 int size = 1 << align;
1820
1821 pool = find_literal_pool (size);
1822 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1823 continue;
1824
1825 /* Align pool as you have word accesses.
1826 Only make a frag if we have to. */
1827 if (!need_pass_2)
1828 frag_align (align, 0, 0);
1829
1830 mapping_state (MAP_DATA);
1831
1832 record_alignment (now_seg, align);
1833
1834 sprintf (sym_name, "$$lit_\002%x", pool->id);
1835
1836 symbol_locate (pool->symbol, sym_name, now_seg,
1837 (valueT) frag_now_fix (), frag_now);
1838 symbol_table_insert (pool->symbol);
1839
1840 for (entry = 0; entry < pool->next_free_entry; entry++)
1841 {
1842 expressionS * exp = & pool->literals[entry].exp;
1843
1844 if (exp->X_op == O_big)
1845 {
1846 /* PR 16688: Restore the global bignum value. */
1847 gas_assert (pool->literals[entry].bignum != NULL);
1848 memcpy (generic_bignum, pool->literals[entry].bignum,
1849 CHARS_PER_LITTLENUM * exp->X_add_number);
1850 }
1851
1852 /* First output the expression in the instruction to the pool. */
1853 emit_expr (exp, size); /* .word|.xword */
1854
1855 if (exp->X_op == O_big)
1856 {
1857 free (pool->literals[entry].bignum);
1858 pool->literals[entry].bignum = NULL;
1859 }
1860 }
1861
1862 /* Mark the pool as empty. */
1863 pool->next_free_entry = 0;
1864 pool->symbol = NULL;
1865 }
1866 }
1867
1868 #ifdef OBJ_ELF
1869 /* Forward declarations for functions below, in the MD interface
1870 section. */
1871 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1872 static struct reloc_table_entry * find_reloc_table_entry (char **);
1873
1874 /* Directives: Data. */
1875 /* N.B. the support for relocation suffix in this directive needs to be
1876 implemented properly. */
1877
1878 static void
1879 s_aarch64_elf_cons (int nbytes)
1880 {
1881 expressionS exp;
1882
1883 #ifdef md_flush_pending_output
1884 md_flush_pending_output ();
1885 #endif
1886
1887 if (is_it_end_of_statement ())
1888 {
1889 demand_empty_rest_of_line ();
1890 return;
1891 }
1892
1893 #ifdef md_cons_align
1894 md_cons_align (nbytes);
1895 #endif
1896
1897 mapping_state (MAP_DATA);
1898 do
1899 {
1900 struct reloc_table_entry *reloc;
1901
1902 expression (&exp);
1903
1904 if (exp.X_op != O_symbol)
1905 emit_expr (&exp, (unsigned int) nbytes);
1906 else
1907 {
1908 skip_past_char (&input_line_pointer, '#');
1909 if (skip_past_char (&input_line_pointer, ':'))
1910 {
1911 reloc = find_reloc_table_entry (&input_line_pointer);
1912 if (reloc == NULL)
1913 as_bad (_("unrecognized relocation suffix"));
1914 else
1915 as_bad (_("unimplemented relocation suffix"));
1916 ignore_rest_of_line ();
1917 return;
1918 }
1919 else
1920 emit_expr (&exp, (unsigned int) nbytes);
1921 }
1922 }
1923 while (*input_line_pointer++ == ',');
1924
1925 /* Put terminator back into stream. */
1926 input_line_pointer--;
1927 demand_empty_rest_of_line ();
1928 }
1929
1930 #endif /* OBJ_ELF */
1931
1932 /* Output a 32-bit word, but mark as an instruction. */
1933
1934 static void
1935 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1936 {
1937 expressionS exp;
1938
1939 #ifdef md_flush_pending_output
1940 md_flush_pending_output ();
1941 #endif
1942
1943 if (is_it_end_of_statement ())
1944 {
1945 demand_empty_rest_of_line ();
1946 return;
1947 }
1948
1949 /* Sections are assumed to start aligned. In executable section, there is no
1950 MAP_DATA symbol pending. So we only align the address during
1951 MAP_DATA --> MAP_INSN transition.
1952 For other sections, this is not guaranteed. */
1953 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1954 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1955 frag_align_code (2, 0);
1956
1957 #ifdef OBJ_ELF
1958 mapping_state (MAP_INSN);
1959 #endif
1960
1961 do
1962 {
1963 expression (&exp);
1964 if (exp.X_op != O_constant)
1965 {
1966 as_bad (_("constant expression required"));
1967 ignore_rest_of_line ();
1968 return;
1969 }
1970
1971 if (target_big_endian)
1972 {
1973 unsigned int val = exp.X_add_number;
1974 exp.X_add_number = SWAP_32 (val);
1975 }
1976 emit_expr (&exp, 4);
1977 }
1978 while (*input_line_pointer++ == ',');
1979
1980 /* Put terminator back into stream. */
1981 input_line_pointer--;
1982 demand_empty_rest_of_line ();
1983 }
1984
1985 #ifdef OBJ_ELF
1986 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
1987
1988 static void
1989 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
1990 {
1991 expressionS exp;
1992
1993 expression (&exp);
1994 frag_grow (4);
1995 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1996 BFD_RELOC_AARCH64_TLSDESC_ADD);
1997
1998 demand_empty_rest_of_line ();
1999 }
2000
2001 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2002
2003 static void
2004 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2005 {
2006 expressionS exp;
2007
2008 /* Since we're just labelling the code, there's no need to define a
2009 mapping symbol. */
2010 expression (&exp);
2011 /* Make sure there is enough room in this frag for the following
2012 blr. This trick only works if the blr follows immediately after
2013 the .tlsdesc directive. */
2014 frag_grow (4);
2015 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2016 BFD_RELOC_AARCH64_TLSDESC_CALL);
2017
2018 demand_empty_rest_of_line ();
2019 }
2020
2021 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2022
2023 static void
2024 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2025 {
2026 expressionS exp;
2027
2028 expression (&exp);
2029 frag_grow (4);
2030 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2031 BFD_RELOC_AARCH64_TLSDESC_LDR);
2032
2033 demand_empty_rest_of_line ();
2034 }
2035 #endif /* OBJ_ELF */
2036
2037 static void s_aarch64_arch (int);
2038 static void s_aarch64_cpu (int);
2039 static void s_aarch64_arch_extension (int);
2040
2041 /* This table describes all the machine specific pseudo-ops the assembler
2042 has to support. The fields are:
2043 pseudo-op name without dot
2044 function to call to execute this pseudo-op
2045 Integer arg to pass to the function. */
2046
2047 const pseudo_typeS md_pseudo_table[] = {
2048 /* Never called because '.req' does not start a line. */
2049 {"req", s_req, 0},
2050 {"unreq", s_unreq, 0},
2051 {"bss", s_bss, 0},
2052 {"even", s_even, 0},
2053 {"ltorg", s_ltorg, 0},
2054 {"pool", s_ltorg, 0},
2055 {"cpu", s_aarch64_cpu, 0},
2056 {"arch", s_aarch64_arch, 0},
2057 {"arch_extension", s_aarch64_arch_extension, 0},
2058 {"inst", s_aarch64_inst, 0},
2059 #ifdef OBJ_ELF
2060 {"tlsdescadd", s_tlsdescadd, 0},
2061 {"tlsdesccall", s_tlsdesccall, 0},
2062 {"tlsdescldr", s_tlsdescldr, 0},
2063 {"word", s_aarch64_elf_cons, 4},
2064 {"long", s_aarch64_elf_cons, 4},
2065 {"xword", s_aarch64_elf_cons, 8},
2066 {"dword", s_aarch64_elf_cons, 8},
2067 #endif
2068 {0, 0, 0}
2069 };
2070 \f
2071
2072 /* Check whether STR points to a register name followed by a comma or the
2073 end of line; REG_TYPE indicates which register types are checked
2074 against. Return TRUE if STR is such a register name; otherwise return
2075 FALSE. The function does not intend to produce any diagnostics, but since
2076 the register parser aarch64_reg_parse, which is called by this function,
2077 does produce diagnostics, we call clear_error to clear any diagnostics
2078 that may be generated by aarch64_reg_parse.
2079 Also, the function returns FALSE directly if there is any user error
2080 present at the function entry. This prevents the existing diagnostics
2081 state from being spoiled.
2082 The function currently serves parse_constant_immediate and
2083 parse_big_immediate only. */
2084 static bfd_boolean
2085 reg_name_p (char *str, aarch64_reg_type reg_type)
2086 {
2087 int reg;
2088
2089 /* Prevent the diagnostics state from being spoiled. */
2090 if (error_p ())
2091 return FALSE;
2092
2093 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2094
2095 /* Clear the parsing error that may be set by the reg parser. */
2096 clear_error ();
2097
2098 if (reg == PARSE_FAIL)
2099 return FALSE;
2100
2101 skip_whitespace (str);
2102 if (*str == ',' || is_end_of_line[(unsigned int) *str])
2103 return TRUE;
2104
2105 return FALSE;
2106 }
2107
2108 /* Parser functions used exclusively in instruction operands. */
2109
2110 /* Parse an immediate expression which may not be constant.
2111
2112 To prevent the expression parser from pushing a register name
2113 into the symbol table as an undefined symbol, firstly a check is
2114 done to find out whether STR is a register of type REG_TYPE followed
2115 by a comma or the end of line. Return FALSE if STR is such a string. */
2116
2117 static bfd_boolean
2118 parse_immediate_expression (char **str, expressionS *exp,
2119 aarch64_reg_type reg_type)
2120 {
2121 if (reg_name_p (*str, reg_type))
2122 {
2123 set_recoverable_error (_("immediate operand required"));
2124 return FALSE;
2125 }
2126
2127 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2128
2129 if (exp->X_op == O_absent)
2130 {
2131 set_fatal_syntax_error (_("missing immediate expression"));
2132 return FALSE;
2133 }
2134
2135 return TRUE;
2136 }
2137
2138 /* Constant immediate-value read function for use in insn parsing.
2139 STR points to the beginning of the immediate (with the optional
2140 leading #); *VAL receives the value. REG_TYPE says which register
2141 names should be treated as registers rather than as symbolic immediates.
2142
2143 Return TRUE on success; otherwise return FALSE. */
2144
2145 static bfd_boolean
2146 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2147 {
2148 expressionS exp;
2149
2150 if (! parse_immediate_expression (str, &exp, reg_type))
2151 return FALSE;
2152
2153 if (exp.X_op != O_constant)
2154 {
2155 set_syntax_error (_("constant expression required"));
2156 return FALSE;
2157 }
2158
2159 *val = exp.X_add_number;
2160 return TRUE;
2161 }
2162
2163 static uint32_t
2164 encode_imm_float_bits (uint32_t imm)
2165 {
2166 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2167 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2168 }
2169
2170 /* Return TRUE if the single-precision floating-point value encoded in IMM
2171 can be expressed in the AArch64 8-bit signed floating-point format with
2172 3-bit exponent and normalized 4 bits of precision; in other words, the
2173 floating-point value must be expressable as
2174 (+/-) n / 16 * power (2, r)
2175 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2176
2177 static bfd_boolean
2178 aarch64_imm_float_p (uint32_t imm)
2179 {
2180 /* If a single-precision floating-point value has the following bit
2181 pattern, it can be expressed in the AArch64 8-bit floating-point
2182 format:
2183
2184 3 32222222 2221111111111
2185 1 09876543 21098765432109876543210
2186 n Eeeeeexx xxxx0000000000000000000
2187
2188 where n, e and each x are either 0 or 1 independently, with
2189 E == ~ e. */
2190
2191 uint32_t pattern;
2192
2193 /* Prepare the pattern for 'Eeeeee'. */
2194 if (((imm >> 30) & 0x1) == 0)
2195 pattern = 0x3e000000;
2196 else
2197 pattern = 0x40000000;
2198
2199 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2200 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2201 }
2202
2203 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2204 as an IEEE float without any loss of precision. Store the value in
2205 *FPWORD if so. */
2206
2207 static bfd_boolean
2208 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2209 {
2210 /* If a double-precision floating-point value has the following bit
2211 pattern, it can be expressed in a float:
2212
2213 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2214 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2215 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2216
2217 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2218 if Eeee_eeee != 1111_1111
2219
2220 where n, e, s and S are either 0 or 1 independently and where ~ is the
2221 inverse of E. */
2222
2223 uint32_t pattern;
2224 uint32_t high32 = imm >> 32;
2225 uint32_t low32 = imm;
2226
2227 /* Lower 29 bits need to be 0s. */
2228 if ((imm & 0x1fffffff) != 0)
2229 return FALSE;
2230
2231 /* Prepare the pattern for 'Eeeeeeeee'. */
2232 if (((high32 >> 30) & 0x1) == 0)
2233 pattern = 0x38000000;
2234 else
2235 pattern = 0x40000000;
2236
2237 /* Check E~~~. */
2238 if ((high32 & 0x78000000) != pattern)
2239 return FALSE;
2240
2241 /* Check Eeee_eeee != 1111_1111. */
2242 if ((high32 & 0x7ff00000) == 0x47f00000)
2243 return FALSE;
2244
2245 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2246 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2247 | (low32 >> 29)); /* 3 S bits. */
2248 return TRUE;
2249 }
2250
2251 /* Return true if we should treat OPERAND as a double-precision
2252 floating-point operand rather than a single-precision one. */
2253 static bfd_boolean
2254 double_precision_operand_p (const aarch64_opnd_info *operand)
2255 {
2256 /* Check for unsuffixed SVE registers, which are allowed
2257 for LDR and STR but not in instructions that require an
2258 immediate. We get better error messages if we arbitrarily
2259 pick one size, parse the immediate normally, and then
2260 report the match failure in the normal way. */
2261 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2262 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2263 }
2264
2265 /* Parse a floating-point immediate. Return TRUE on success and return the
2266 value in *IMMED in the format of IEEE754 single-precision encoding.
2267 *CCP points to the start of the string; DP_P is TRUE when the immediate
2268 is expected to be in double-precision (N.B. this only matters when
2269 hexadecimal representation is involved). REG_TYPE says which register
2270 names should be treated as registers rather than as symbolic immediates.
2271
2272 This routine accepts any IEEE float; it is up to the callers to reject
2273 invalid ones. */
2274
2275 static bfd_boolean
2276 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2277 aarch64_reg_type reg_type)
2278 {
2279 char *str = *ccp;
2280 char *fpnum;
2281 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2282 int64_t val = 0;
2283 unsigned fpword = 0;
2284 bfd_boolean hex_p = FALSE;
2285
2286 skip_past_char (&str, '#');
2287
2288 fpnum = str;
2289 skip_whitespace (fpnum);
2290
2291 if (strncmp (fpnum, "0x", 2) == 0)
2292 {
2293 /* Support the hexadecimal representation of the IEEE754 encoding.
2294 Double-precision is expected when DP_P is TRUE, otherwise the
2295 representation should be in single-precision. */
2296 if (! parse_constant_immediate (&str, &val, reg_type))
2297 goto invalid_fp;
2298
2299 if (dp_p)
2300 {
2301 if (!can_convert_double_to_float (val, &fpword))
2302 goto invalid_fp;
2303 }
2304 else if ((uint64_t) val > 0xffffffff)
2305 goto invalid_fp;
2306 else
2307 fpword = val;
2308
2309 hex_p = TRUE;
2310 }
2311 else if (reg_name_p (str, reg_type))
2312 {
2313 set_recoverable_error (_("immediate operand required"));
2314 return FALSE;
2315 }
2316
2317 if (! hex_p)
2318 {
2319 int i;
2320
2321 if ((str = atof_ieee (str, 's', words)) == NULL)
2322 goto invalid_fp;
2323
2324 /* Our FP word must be 32 bits (single-precision FP). */
2325 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2326 {
2327 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2328 fpword |= words[i];
2329 }
2330 }
2331
2332 *immed = fpword;
2333 *ccp = str;
2334 return TRUE;
2335
2336 invalid_fp:
2337 set_fatal_syntax_error (_("invalid floating-point constant"));
2338 return FALSE;
2339 }
2340
2341 /* Less-generic immediate-value read function with the possibility of loading
2342 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2343 instructions.
2344
2345 To prevent the expression parser from pushing a register name into the
2346 symbol table as an undefined symbol, a check is firstly done to find
2347 out whether STR is a register of type REG_TYPE followed by a comma or
2348 the end of line. Return FALSE if STR is such a register. */
2349
2350 static bfd_boolean
2351 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2352 {
2353 char *ptr = *str;
2354
2355 if (reg_name_p (ptr, reg_type))
2356 {
2357 set_syntax_error (_("immediate operand required"));
2358 return FALSE;
2359 }
2360
2361 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2362
2363 if (inst.reloc.exp.X_op == O_constant)
2364 *imm = inst.reloc.exp.X_add_number;
2365
2366 *str = ptr;
2367
2368 return TRUE;
2369 }
2370
2371 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2372 if NEED_LIBOPCODES is non-zero, the fixup will need
2373 assistance from the libopcodes. */
2374
2375 static inline void
2376 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2377 const aarch64_opnd_info *operand,
2378 int need_libopcodes_p)
2379 {
2380 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2381 reloc->opnd = operand->type;
2382 if (need_libopcodes_p)
2383 reloc->need_libopcodes_p = 1;
2384 };
2385
2386 /* Return TRUE if the instruction needs to be fixed up later internally by
2387 the GAS; otherwise return FALSE. */
2388
2389 static inline bfd_boolean
2390 aarch64_gas_internal_fixup_p (void)
2391 {
2392 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2393 }
2394
2395 /* Assign the immediate value to the relevant field in *OPERAND if
2396 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2397 needs an internal fixup in a later stage.
2398 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2399 IMM.VALUE that may get assigned with the constant. */
2400 static inline void
2401 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2402 aarch64_opnd_info *operand,
2403 int addr_off_p,
2404 int need_libopcodes_p,
2405 int skip_p)
2406 {
2407 if (reloc->exp.X_op == O_constant)
2408 {
2409 if (addr_off_p)
2410 operand->addr.offset.imm = reloc->exp.X_add_number;
2411 else
2412 operand->imm.value = reloc->exp.X_add_number;
2413 reloc->type = BFD_RELOC_UNUSED;
2414 }
2415 else
2416 {
2417 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2418 /* Tell libopcodes to ignore this operand or not. This is helpful
2419 when one of the operands needs to be fixed up later but we need
2420 libopcodes to check the other operands. */
2421 operand->skip = skip_p;
2422 }
2423 }
2424
2425 /* Relocation modifiers. Each entry in the table contains the textual
2426 name for the relocation which may be placed before a symbol used as
2427 a load/store offset, or add immediate. It must be surrounded by a
2428 leading and trailing colon, for example:
2429
2430 ldr x0, [x1, #:rello:varsym]
2431 add x0, x1, #:rello:varsym */
2432
2433 struct reloc_table_entry
2434 {
2435 const char *name;
2436 int pc_rel;
2437 bfd_reloc_code_real_type adr_type;
2438 bfd_reloc_code_real_type adrp_type;
2439 bfd_reloc_code_real_type movw_type;
2440 bfd_reloc_code_real_type add_type;
2441 bfd_reloc_code_real_type ldst_type;
2442 bfd_reloc_code_real_type ld_literal_type;
2443 };
2444
2445 static struct reloc_table_entry reloc_table[] = {
2446 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2447 {"lo12", 0,
2448 0, /* adr_type */
2449 0,
2450 0,
2451 BFD_RELOC_AARCH64_ADD_LO12,
2452 BFD_RELOC_AARCH64_LDST_LO12,
2453 0},
2454
2455 /* Higher 21 bits of pc-relative page offset: ADRP */
2456 {"pg_hi21", 1,
2457 0, /* adr_type */
2458 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2459 0,
2460 0,
2461 0,
2462 0},
2463
2464 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2465 {"pg_hi21_nc", 1,
2466 0, /* adr_type */
2467 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2468 0,
2469 0,
2470 0,
2471 0},
2472
2473 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2474 {"abs_g0", 0,
2475 0, /* adr_type */
2476 0,
2477 BFD_RELOC_AARCH64_MOVW_G0,
2478 0,
2479 0,
2480 0},
2481
2482 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2483 {"abs_g0_s", 0,
2484 0, /* adr_type */
2485 0,
2486 BFD_RELOC_AARCH64_MOVW_G0_S,
2487 0,
2488 0,
2489 0},
2490
2491 /* Less significant bits 0-15 of address/value: MOVK, no check */
2492 {"abs_g0_nc", 0,
2493 0, /* adr_type */
2494 0,
2495 BFD_RELOC_AARCH64_MOVW_G0_NC,
2496 0,
2497 0,
2498 0},
2499
2500 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2501 {"abs_g1", 0,
2502 0, /* adr_type */
2503 0,
2504 BFD_RELOC_AARCH64_MOVW_G1,
2505 0,
2506 0,
2507 0},
2508
2509 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2510 {"abs_g1_s", 0,
2511 0, /* adr_type */
2512 0,
2513 BFD_RELOC_AARCH64_MOVW_G1_S,
2514 0,
2515 0,
2516 0},
2517
2518 /* Less significant bits 16-31 of address/value: MOVK, no check */
2519 {"abs_g1_nc", 0,
2520 0, /* adr_type */
2521 0,
2522 BFD_RELOC_AARCH64_MOVW_G1_NC,
2523 0,
2524 0,
2525 0},
2526
2527 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2528 {"abs_g2", 0,
2529 0, /* adr_type */
2530 0,
2531 BFD_RELOC_AARCH64_MOVW_G2,
2532 0,
2533 0,
2534 0},
2535
2536 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2537 {"abs_g2_s", 0,
2538 0, /* adr_type */
2539 0,
2540 BFD_RELOC_AARCH64_MOVW_G2_S,
2541 0,
2542 0,
2543 0},
2544
2545 /* Less significant bits 32-47 of address/value: MOVK, no check */
2546 {"abs_g2_nc", 0,
2547 0, /* adr_type */
2548 0,
2549 BFD_RELOC_AARCH64_MOVW_G2_NC,
2550 0,
2551 0,
2552 0},
2553
2554 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2555 {"abs_g3", 0,
2556 0, /* adr_type */
2557 0,
2558 BFD_RELOC_AARCH64_MOVW_G3,
2559 0,
2560 0,
2561 0},
2562
2563 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2564 {"prel_g0", 1,
2565 0, /* adr_type */
2566 0,
2567 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2568 0,
2569 0,
2570 0},
2571
2572 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2573 {"prel_g0_nc", 1,
2574 0, /* adr_type */
2575 0,
2576 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2577 0,
2578 0,
2579 0},
2580
2581 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2582 {"prel_g1", 1,
2583 0, /* adr_type */
2584 0,
2585 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2586 0,
2587 0,
2588 0},
2589
2590 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2591 {"prel_g1_nc", 1,
2592 0, /* adr_type */
2593 0,
2594 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2595 0,
2596 0,
2597 0},
2598
2599 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2600 {"prel_g2", 1,
2601 0, /* adr_type */
2602 0,
2603 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2604 0,
2605 0,
2606 0},
2607
2608 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2609 {"prel_g2_nc", 1,
2610 0, /* adr_type */
2611 0,
2612 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2613 0,
2614 0,
2615 0},
2616
2617 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2618 {"prel_g3", 1,
2619 0, /* adr_type */
2620 0,
2621 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2622 0,
2623 0,
2624 0},
2625
2626 /* Get to the page containing GOT entry for a symbol. */
2627 {"got", 1,
2628 0, /* adr_type */
2629 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2630 0,
2631 0,
2632 0,
2633 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2634
2635 /* 12 bit offset into the page containing GOT entry for that symbol. */
2636 {"got_lo12", 0,
2637 0, /* adr_type */
2638 0,
2639 0,
2640 0,
2641 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2642 0},
2643
2644 /* 0-15 bits of address/value: MOVk, no check. */
2645 {"gotoff_g0_nc", 0,
2646 0, /* adr_type */
2647 0,
2648 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2649 0,
2650 0,
2651 0},
2652
2653 /* Most significant bits 16-31 of address/value: MOVZ. */
2654 {"gotoff_g1", 0,
2655 0, /* adr_type */
2656 0,
2657 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2658 0,
2659 0,
2660 0},
2661
2662 /* 15 bit offset into the page containing GOT entry for that symbol. */
2663 {"gotoff_lo15", 0,
2664 0, /* adr_type */
2665 0,
2666 0,
2667 0,
2668 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2669 0},
2670
2671 /* Get to the page containing GOT TLS entry for a symbol */
2672 {"gottprel_g0_nc", 0,
2673 0, /* adr_type */
2674 0,
2675 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2676 0,
2677 0,
2678 0},
2679
2680 /* Get to the page containing GOT TLS entry for a symbol */
2681 {"gottprel_g1", 0,
2682 0, /* adr_type */
2683 0,
2684 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2685 0,
2686 0,
2687 0},
2688
2689 /* Get to the page containing GOT TLS entry for a symbol */
2690 {"tlsgd", 0,
2691 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2692 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2693 0,
2694 0,
2695 0,
2696 0},
2697
2698 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2699 {"tlsgd_lo12", 0,
2700 0, /* adr_type */
2701 0,
2702 0,
2703 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2704 0,
2705 0},
2706
2707 /* Lower 16 bits address/value: MOVk. */
2708 {"tlsgd_g0_nc", 0,
2709 0, /* adr_type */
2710 0,
2711 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2712 0,
2713 0,
2714 0},
2715
2716 /* Most significant bits 16-31 of address/value: MOVZ. */
2717 {"tlsgd_g1", 0,
2718 0, /* adr_type */
2719 0,
2720 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2721 0,
2722 0,
2723 0},
2724
2725 /* Get to the page containing GOT TLS entry for a symbol */
2726 {"tlsdesc", 0,
2727 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2728 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2729 0,
2730 0,
2731 0,
2732 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2733
2734 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2735 {"tlsdesc_lo12", 0,
2736 0, /* adr_type */
2737 0,
2738 0,
2739 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2740 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2741 0},
2742
2743 /* Get to the page containing GOT TLS entry for a symbol.
2744 The same as GD, we allocate two consecutive GOT slots
2745 for module index and module offset, the only difference
2746 with GD is the module offset should be initialized to
2747 zero without any outstanding runtime relocation. */
2748 {"tlsldm", 0,
2749 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2750 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2751 0,
2752 0,
2753 0,
2754 0},
2755
2756 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2757 {"tlsldm_lo12_nc", 0,
2758 0, /* adr_type */
2759 0,
2760 0,
2761 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2762 0,
2763 0},
2764
2765 /* 12 bit offset into the module TLS base address. */
2766 {"dtprel_lo12", 0,
2767 0, /* adr_type */
2768 0,
2769 0,
2770 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2771 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2772 0},
2773
2774 /* Same as dtprel_lo12, no overflow check. */
2775 {"dtprel_lo12_nc", 0,
2776 0, /* adr_type */
2777 0,
2778 0,
2779 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2780 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2781 0},
2782
2783 /* bits[23:12] of offset to the module TLS base address. */
2784 {"dtprel_hi12", 0,
2785 0, /* adr_type */
2786 0,
2787 0,
2788 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2789 0,
2790 0},
2791
2792 /* bits[15:0] of offset to the module TLS base address. */
2793 {"dtprel_g0", 0,
2794 0, /* adr_type */
2795 0,
2796 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2797 0,
2798 0,
2799 0},
2800
2801 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2802 {"dtprel_g0_nc", 0,
2803 0, /* adr_type */
2804 0,
2805 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2806 0,
2807 0,
2808 0},
2809
2810 /* bits[31:16] of offset to the module TLS base address. */
2811 {"dtprel_g1", 0,
2812 0, /* adr_type */
2813 0,
2814 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2815 0,
2816 0,
2817 0},
2818
2819 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2820 {"dtprel_g1_nc", 0,
2821 0, /* adr_type */
2822 0,
2823 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2824 0,
2825 0,
2826 0},
2827
2828 /* bits[47:32] of offset to the module TLS base address. */
2829 {"dtprel_g2", 0,
2830 0, /* adr_type */
2831 0,
2832 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2833 0,
2834 0,
2835 0},
2836
2837 /* Lower 16 bit offset into GOT entry for a symbol */
2838 {"tlsdesc_off_g0_nc", 0,
2839 0, /* adr_type */
2840 0,
2841 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2842 0,
2843 0,
2844 0},
2845
2846 /* Higher 16 bit offset into GOT entry for a symbol */
2847 {"tlsdesc_off_g1", 0,
2848 0, /* adr_type */
2849 0,
2850 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2851 0,
2852 0,
2853 0},
2854
2855 /* Get to the page containing GOT TLS entry for a symbol */
2856 {"gottprel", 0,
2857 0, /* adr_type */
2858 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2859 0,
2860 0,
2861 0,
2862 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2863
2864 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2865 {"gottprel_lo12", 0,
2866 0, /* adr_type */
2867 0,
2868 0,
2869 0,
2870 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2871 0},
2872
2873 /* Get tp offset for a symbol. */
2874 {"tprel", 0,
2875 0, /* adr_type */
2876 0,
2877 0,
2878 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2879 0,
2880 0},
2881
2882 /* Get tp offset for a symbol. */
2883 {"tprel_lo12", 0,
2884 0, /* adr_type */
2885 0,
2886 0,
2887 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2888 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2889 0},
2890
2891 /* Get tp offset for a symbol. */
2892 {"tprel_hi12", 0,
2893 0, /* adr_type */
2894 0,
2895 0,
2896 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2897 0,
2898 0},
2899
2900 /* Get tp offset for a symbol. */
2901 {"tprel_lo12_nc", 0,
2902 0, /* adr_type */
2903 0,
2904 0,
2905 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2906 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2907 0},
2908
2909 /* Most significant bits 32-47 of address/value: MOVZ. */
2910 {"tprel_g2", 0,
2911 0, /* adr_type */
2912 0,
2913 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2914 0,
2915 0,
2916 0},
2917
2918 /* Most significant bits 16-31 of address/value: MOVZ. */
2919 {"tprel_g1", 0,
2920 0, /* adr_type */
2921 0,
2922 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2923 0,
2924 0,
2925 0},
2926
2927 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2928 {"tprel_g1_nc", 0,
2929 0, /* adr_type */
2930 0,
2931 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2932 0,
2933 0,
2934 0},
2935
2936 /* Most significant bits 0-15 of address/value: MOVZ. */
2937 {"tprel_g0", 0,
2938 0, /* adr_type */
2939 0,
2940 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2941 0,
2942 0,
2943 0},
2944
2945 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2946 {"tprel_g0_nc", 0,
2947 0, /* adr_type */
2948 0,
2949 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2950 0,
2951 0,
2952 0},
2953
2954 /* 15bit offset from got entry to base address of GOT table. */
2955 {"gotpage_lo15", 0,
2956 0,
2957 0,
2958 0,
2959 0,
2960 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2961 0},
2962
2963 /* 14bit offset from got entry to base address of GOT table. */
2964 {"gotpage_lo14", 0,
2965 0,
2966 0,
2967 0,
2968 0,
2969 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2970 0},
2971 };
2972
2973 /* Given the address of a pointer pointing to the textual name of a
2974 relocation as may appear in assembler source, attempt to find its
2975 details in reloc_table. The pointer will be updated to the character
2976 after the trailing colon. On failure, NULL will be returned;
2977 otherwise return the reloc_table_entry. */
2978
2979 static struct reloc_table_entry *
2980 find_reloc_table_entry (char **str)
2981 {
2982 unsigned int i;
2983 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2984 {
2985 int length = strlen (reloc_table[i].name);
2986
2987 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2988 && (*str)[length] == ':')
2989 {
2990 *str += (length + 1);
2991 return &reloc_table[i];
2992 }
2993 }
2994
2995 return NULL;
2996 }
2997
2998 /* Mode argument to parse_shift and parser_shifter_operand. */
2999 enum parse_shift_mode
3000 {
3001 SHIFTED_NONE, /* no shifter allowed */
3002 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3003 "#imm{,lsl #n}" */
3004 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3005 "#imm" */
3006 SHIFTED_LSL, /* bare "lsl #n" */
3007 SHIFTED_MUL, /* bare "mul #n" */
3008 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3009 SHIFTED_MUL_VL, /* "mul vl" */
3010 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3011 };
3012
3013 /* Parse a <shift> operator on an AArch64 data processing instruction.
3014 Return TRUE on success; otherwise return FALSE. */
3015 static bfd_boolean
3016 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3017 {
3018 const struct aarch64_name_value_pair *shift_op;
3019 enum aarch64_modifier_kind kind;
3020 expressionS exp;
3021 int exp_has_prefix;
3022 char *s = *str;
3023 char *p = s;
3024
3025 for (p = *str; ISALPHA (*p); p++)
3026 ;
3027
3028 if (p == *str)
3029 {
3030 set_syntax_error (_("shift expression expected"));
3031 return FALSE;
3032 }
3033
3034 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
3035
3036 if (shift_op == NULL)
3037 {
3038 set_syntax_error (_("shift operator expected"));
3039 return FALSE;
3040 }
3041
3042 kind = aarch64_get_operand_modifier (shift_op);
3043
3044 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3045 {
3046 set_syntax_error (_("invalid use of 'MSL'"));
3047 return FALSE;
3048 }
3049
3050 if (kind == AARCH64_MOD_MUL
3051 && mode != SHIFTED_MUL
3052 && mode != SHIFTED_MUL_VL)
3053 {
3054 set_syntax_error (_("invalid use of 'MUL'"));
3055 return FALSE;
3056 }
3057
3058 switch (mode)
3059 {
3060 case SHIFTED_LOGIC_IMM:
3061 if (aarch64_extend_operator_p (kind))
3062 {
3063 set_syntax_error (_("extending shift is not permitted"));
3064 return FALSE;
3065 }
3066 break;
3067
3068 case SHIFTED_ARITH_IMM:
3069 if (kind == AARCH64_MOD_ROR)
3070 {
3071 set_syntax_error (_("'ROR' shift is not permitted"));
3072 return FALSE;
3073 }
3074 break;
3075
3076 case SHIFTED_LSL:
3077 if (kind != AARCH64_MOD_LSL)
3078 {
3079 set_syntax_error (_("only 'LSL' shift is permitted"));
3080 return FALSE;
3081 }
3082 break;
3083
3084 case SHIFTED_MUL:
3085 if (kind != AARCH64_MOD_MUL)
3086 {
3087 set_syntax_error (_("only 'MUL' is permitted"));
3088 return FALSE;
3089 }
3090 break;
3091
3092 case SHIFTED_MUL_VL:
3093 /* "MUL VL" consists of two separate tokens. Require the first
3094 token to be "MUL" and look for a following "VL". */
3095 if (kind == AARCH64_MOD_MUL)
3096 {
3097 skip_whitespace (p);
3098 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3099 {
3100 p += 2;
3101 kind = AARCH64_MOD_MUL_VL;
3102 break;
3103 }
3104 }
3105 set_syntax_error (_("only 'MUL VL' is permitted"));
3106 return FALSE;
3107
3108 case SHIFTED_REG_OFFSET:
3109 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3110 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3111 {
3112 set_fatal_syntax_error
3113 (_("invalid shift for the register offset addressing mode"));
3114 return FALSE;
3115 }
3116 break;
3117
3118 case SHIFTED_LSL_MSL:
3119 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3120 {
3121 set_syntax_error (_("invalid shift operator"));
3122 return FALSE;
3123 }
3124 break;
3125
3126 default:
3127 abort ();
3128 }
3129
3130 /* Whitespace can appear here if the next thing is a bare digit. */
3131 skip_whitespace (p);
3132
3133 /* Parse shift amount. */
3134 exp_has_prefix = 0;
3135 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3136 exp.X_op = O_absent;
3137 else
3138 {
3139 if (is_immediate_prefix (*p))
3140 {
3141 p++;
3142 exp_has_prefix = 1;
3143 }
3144 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3145 }
3146 if (kind == AARCH64_MOD_MUL_VL)
3147 /* For consistency, give MUL VL the same shift amount as an implicit
3148 MUL #1. */
3149 operand->shifter.amount = 1;
3150 else if (exp.X_op == O_absent)
3151 {
3152 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3153 {
3154 set_syntax_error (_("missing shift amount"));
3155 return FALSE;
3156 }
3157 operand->shifter.amount = 0;
3158 }
3159 else if (exp.X_op != O_constant)
3160 {
3161 set_syntax_error (_("constant shift amount required"));
3162 return FALSE;
3163 }
3164 /* For parsing purposes, MUL #n has no inherent range. The range
3165 depends on the operand and will be checked by operand-specific
3166 routines. */
3167 else if (kind != AARCH64_MOD_MUL
3168 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3169 {
3170 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3171 return FALSE;
3172 }
3173 else
3174 {
3175 operand->shifter.amount = exp.X_add_number;
3176 operand->shifter.amount_present = 1;
3177 }
3178
3179 operand->shifter.operator_present = 1;
3180 operand->shifter.kind = kind;
3181
3182 *str = p;
3183 return TRUE;
3184 }
3185
3186 /* Parse a <shifter_operand> for a data processing instruction:
3187
3188 #<immediate>
3189 #<immediate>, LSL #imm
3190
3191 Validation of immediate operands is deferred to md_apply_fix.
3192
3193 Return TRUE on success; otherwise return FALSE. */
3194
3195 static bfd_boolean
3196 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3197 enum parse_shift_mode mode)
3198 {
3199 char *p;
3200
3201 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3202 return FALSE;
3203
3204 p = *str;
3205
3206 /* Accept an immediate expression. */
3207 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3208 return FALSE;
3209
3210 /* Accept optional LSL for arithmetic immediate values. */
3211 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3212 if (! parse_shift (&p, operand, SHIFTED_LSL))
3213 return FALSE;
3214
3215 /* Not accept any shifter for logical immediate values. */
3216 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3217 && parse_shift (&p, operand, mode))
3218 {
3219 set_syntax_error (_("unexpected shift operator"));
3220 return FALSE;
3221 }
3222
3223 *str = p;
3224 return TRUE;
3225 }
3226
3227 /* Parse a <shifter_operand> for a data processing instruction:
3228
3229 <Rm>
3230 <Rm>, <shift>
3231 #<immediate>
3232 #<immediate>, LSL #imm
3233
3234 where <shift> is handled by parse_shift above, and the last two
3235 cases are handled by the function above.
3236
3237 Validation of immediate operands is deferred to md_apply_fix.
3238
3239 Return TRUE on success; otherwise return FALSE. */
3240
3241 static bfd_boolean
3242 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3243 enum parse_shift_mode mode)
3244 {
3245 const reg_entry *reg;
3246 aarch64_opnd_qualifier_t qualifier;
3247 enum aarch64_operand_class opd_class
3248 = aarch64_get_operand_class (operand->type);
3249
3250 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3251 if (reg)
3252 {
3253 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3254 {
3255 set_syntax_error (_("unexpected register in the immediate operand"));
3256 return FALSE;
3257 }
3258
3259 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3260 {
3261 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3262 return FALSE;
3263 }
3264
3265 operand->reg.regno = reg->number;
3266 operand->qualifier = qualifier;
3267
3268 /* Accept optional shift operation on register. */
3269 if (! skip_past_comma (str))
3270 return TRUE;
3271
3272 if (! parse_shift (str, operand, mode))
3273 return FALSE;
3274
3275 return TRUE;
3276 }
3277 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3278 {
3279 set_syntax_error
3280 (_("integer register expected in the extended/shifted operand "
3281 "register"));
3282 return FALSE;
3283 }
3284
3285 /* We have a shifted immediate variable. */
3286 return parse_shifter_operand_imm (str, operand, mode);
3287 }
3288
3289 /* Return TRUE on success; return FALSE otherwise. */
3290
3291 static bfd_boolean
3292 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3293 enum parse_shift_mode mode)
3294 {
3295 char *p = *str;
3296
3297 /* Determine if we have the sequence of characters #: or just :
3298 coming next. If we do, then we check for a :rello: relocation
3299 modifier. If we don't, punt the whole lot to
3300 parse_shifter_operand. */
3301
3302 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3303 {
3304 struct reloc_table_entry *entry;
3305
3306 if (p[0] == '#')
3307 p += 2;
3308 else
3309 p++;
3310 *str = p;
3311
3312 /* Try to parse a relocation. Anything else is an error. */
3313 if (!(entry = find_reloc_table_entry (str)))
3314 {
3315 set_syntax_error (_("unknown relocation modifier"));
3316 return FALSE;
3317 }
3318
3319 if (entry->add_type == 0)
3320 {
3321 set_syntax_error
3322 (_("this relocation modifier is not allowed on this instruction"));
3323 return FALSE;
3324 }
3325
3326 /* Save str before we decompose it. */
3327 p = *str;
3328
3329 /* Next, we parse the expression. */
3330 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3331 return FALSE;
3332
3333 /* Record the relocation type (use the ADD variant here). */
3334 inst.reloc.type = entry->add_type;
3335 inst.reloc.pc_rel = entry->pc_rel;
3336
3337 /* If str is empty, we've reached the end, stop here. */
3338 if (**str == '\0')
3339 return TRUE;
3340
3341 /* Otherwise, we have a shifted reloc modifier, so rewind to
3342 recover the variable name and continue parsing for the shifter. */
3343 *str = p;
3344 return parse_shifter_operand_imm (str, operand, mode);
3345 }
3346
3347 return parse_shifter_operand (str, operand, mode);
3348 }
3349
3350 /* Parse all forms of an address expression. Information is written
3351 to *OPERAND and/or inst.reloc.
3352
3353 The A64 instruction set has the following addressing modes:
3354
3355 Offset
3356 [base] // in SIMD ld/st structure
3357 [base{,#0}] // in ld/st exclusive
3358 [base{,#imm}]
3359 [base,Xm{,LSL #imm}]
3360 [base,Xm,SXTX {#imm}]
3361 [base,Wm,(S|U)XTW {#imm}]
3362 Pre-indexed
3363 [base,#imm]!
3364 Post-indexed
3365 [base],#imm
3366 [base],Xm // in SIMD ld/st structure
3367 PC-relative (literal)
3368 label
3369 SVE:
3370 [base,#imm,MUL VL]
3371 [base,Zm.D{,LSL #imm}]
3372 [base,Zm.S,(S|U)XTW {#imm}]
3373 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3374 [Zn.S,#imm]
3375 [Zn.D,#imm]
3376 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3377 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3378 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3379
3380 (As a convenience, the notation "=immediate" is permitted in conjunction
3381 with the pc-relative literal load instructions to automatically place an
3382 immediate value or symbolic address in a nearby literal pool and generate
3383 a hidden label which references it.)
3384
3385 Upon a successful parsing, the address structure in *OPERAND will be
3386 filled in the following way:
3387
3388 .base_regno = <base>
3389 .offset.is_reg // 1 if the offset is a register
3390 .offset.imm = <imm>
3391 .offset.regno = <Rm>
3392
3393 For different addressing modes defined in the A64 ISA:
3394
3395 Offset
3396 .pcrel=0; .preind=1; .postind=0; .writeback=0
3397 Pre-indexed
3398 .pcrel=0; .preind=1; .postind=0; .writeback=1
3399 Post-indexed
3400 .pcrel=0; .preind=0; .postind=1; .writeback=1
3401 PC-relative (literal)
3402 .pcrel=1; .preind=1; .postind=0; .writeback=0
3403
3404 The shift/extension information, if any, will be stored in .shifter.
3405 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3406 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3407 corresponding register.
3408
3409 BASE_TYPE says which types of base register should be accepted and
3410 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3411 is the type of shifter that is allowed for immediate offsets,
3412 or SHIFTED_NONE if none.
3413
3414 In all other respects, it is the caller's responsibility to check
3415 for addressing modes not supported by the instruction, and to set
3416 inst.reloc.type. */
3417
3418 static bfd_boolean
3419 parse_address_main (char **str, aarch64_opnd_info *operand,
3420 aarch64_opnd_qualifier_t *base_qualifier,
3421 aarch64_opnd_qualifier_t *offset_qualifier,
3422 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3423 enum parse_shift_mode imm_shift_mode)
3424 {
3425 char *p = *str;
3426 const reg_entry *reg;
3427 expressionS *exp = &inst.reloc.exp;
3428
3429 *base_qualifier = AARCH64_OPND_QLF_NIL;
3430 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3431 if (! skip_past_char (&p, '['))
3432 {
3433 /* =immediate or label. */
3434 operand->addr.pcrel = 1;
3435 operand->addr.preind = 1;
3436
3437 /* #:<reloc_op>:<symbol> */
3438 skip_past_char (&p, '#');
3439 if (skip_past_char (&p, ':'))
3440 {
3441 bfd_reloc_code_real_type ty;
3442 struct reloc_table_entry *entry;
3443
3444 /* Try to parse a relocation modifier. Anything else is
3445 an error. */
3446 entry = find_reloc_table_entry (&p);
3447 if (! entry)
3448 {
3449 set_syntax_error (_("unknown relocation modifier"));
3450 return FALSE;
3451 }
3452
3453 switch (operand->type)
3454 {
3455 case AARCH64_OPND_ADDR_PCREL21:
3456 /* adr */
3457 ty = entry->adr_type;
3458 break;
3459
3460 default:
3461 ty = entry->ld_literal_type;
3462 break;
3463 }
3464
3465 if (ty == 0)
3466 {
3467 set_syntax_error
3468 (_("this relocation modifier is not allowed on this "
3469 "instruction"));
3470 return FALSE;
3471 }
3472
3473 /* #:<reloc_op>: */
3474 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3475 {
3476 set_syntax_error (_("invalid relocation expression"));
3477 return FALSE;
3478 }
3479
3480 /* #:<reloc_op>:<expr> */
3481 /* Record the relocation type. */
3482 inst.reloc.type = ty;
3483 inst.reloc.pc_rel = entry->pc_rel;
3484 }
3485 else
3486 {
3487
3488 if (skip_past_char (&p, '='))
3489 /* =immediate; need to generate the literal in the literal pool. */
3490 inst.gen_lit_pool = 1;
3491
3492 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3493 {
3494 set_syntax_error (_("invalid address"));
3495 return FALSE;
3496 }
3497 }
3498
3499 *str = p;
3500 return TRUE;
3501 }
3502
3503 /* [ */
3504
3505 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3506 if (!reg || !aarch64_check_reg_type (reg, base_type))
3507 {
3508 set_syntax_error (_(get_reg_expected_msg (base_type)));
3509 return FALSE;
3510 }
3511 operand->addr.base_regno = reg->number;
3512
3513 /* [Xn */
3514 if (skip_past_comma (&p))
3515 {
3516 /* [Xn, */
3517 operand->addr.preind = 1;
3518
3519 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3520 if (reg)
3521 {
3522 if (!aarch64_check_reg_type (reg, offset_type))
3523 {
3524 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3525 return FALSE;
3526 }
3527
3528 /* [Xn,Rm */
3529 operand->addr.offset.regno = reg->number;
3530 operand->addr.offset.is_reg = 1;
3531 /* Shifted index. */
3532 if (skip_past_comma (&p))
3533 {
3534 /* [Xn,Rm, */
3535 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3536 /* Use the diagnostics set in parse_shift, so not set new
3537 error message here. */
3538 return FALSE;
3539 }
3540 /* We only accept:
3541 [base,Xm{,LSL #imm}]
3542 [base,Xm,SXTX {#imm}]
3543 [base,Wm,(S|U)XTW {#imm}] */
3544 if (operand->shifter.kind == AARCH64_MOD_NONE
3545 || operand->shifter.kind == AARCH64_MOD_LSL
3546 || operand->shifter.kind == AARCH64_MOD_SXTX)
3547 {
3548 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3549 {
3550 set_syntax_error (_("invalid use of 32-bit register offset"));
3551 return FALSE;
3552 }
3553 if (aarch64_get_qualifier_esize (*base_qualifier)
3554 != aarch64_get_qualifier_esize (*offset_qualifier))
3555 {
3556 set_syntax_error (_("offset has different size from base"));
3557 return FALSE;
3558 }
3559 }
3560 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3561 {
3562 set_syntax_error (_("invalid use of 64-bit register offset"));
3563 return FALSE;
3564 }
3565 }
3566 else
3567 {
3568 /* [Xn,#:<reloc_op>:<symbol> */
3569 skip_past_char (&p, '#');
3570 if (skip_past_char (&p, ':'))
3571 {
3572 struct reloc_table_entry *entry;
3573
3574 /* Try to parse a relocation modifier. Anything else is
3575 an error. */
3576 if (!(entry = find_reloc_table_entry (&p)))
3577 {
3578 set_syntax_error (_("unknown relocation modifier"));
3579 return FALSE;
3580 }
3581
3582 if (entry->ldst_type == 0)
3583 {
3584 set_syntax_error
3585 (_("this relocation modifier is not allowed on this "
3586 "instruction"));
3587 return FALSE;
3588 }
3589
3590 /* [Xn,#:<reloc_op>: */
3591 /* We now have the group relocation table entry corresponding to
3592 the name in the assembler source. Next, we parse the
3593 expression. */
3594 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3595 {
3596 set_syntax_error (_("invalid relocation expression"));
3597 return FALSE;
3598 }
3599
3600 /* [Xn,#:<reloc_op>:<expr> */
3601 /* Record the load/store relocation type. */
3602 inst.reloc.type = entry->ldst_type;
3603 inst.reloc.pc_rel = entry->pc_rel;
3604 }
3605 else
3606 {
3607 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3608 {
3609 set_syntax_error (_("invalid expression in the address"));
3610 return FALSE;
3611 }
3612 /* [Xn,<expr> */
3613 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3614 /* [Xn,<expr>,<shifter> */
3615 if (! parse_shift (&p, operand, imm_shift_mode))
3616 return FALSE;
3617 }
3618 }
3619 }
3620
3621 if (! skip_past_char (&p, ']'))
3622 {
3623 set_syntax_error (_("']' expected"));
3624 return FALSE;
3625 }
3626
3627 if (skip_past_char (&p, '!'))
3628 {
3629 if (operand->addr.preind && operand->addr.offset.is_reg)
3630 {
3631 set_syntax_error (_("register offset not allowed in pre-indexed "
3632 "addressing mode"));
3633 return FALSE;
3634 }
3635 /* [Xn]! */
3636 operand->addr.writeback = 1;
3637 }
3638 else if (skip_past_comma (&p))
3639 {
3640 /* [Xn], */
3641 operand->addr.postind = 1;
3642 operand->addr.writeback = 1;
3643
3644 if (operand->addr.preind)
3645 {
3646 set_syntax_error (_("cannot combine pre- and post-indexing"));
3647 return FALSE;
3648 }
3649
3650 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3651 if (reg)
3652 {
3653 /* [Xn],Xm */
3654 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3655 {
3656 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3657 return FALSE;
3658 }
3659
3660 operand->addr.offset.regno = reg->number;
3661 operand->addr.offset.is_reg = 1;
3662 }
3663 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3664 {
3665 /* [Xn],#expr */
3666 set_syntax_error (_("invalid expression in the address"));
3667 return FALSE;
3668 }
3669 }
3670
3671 /* If at this point neither .preind nor .postind is set, we have a
3672 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3673 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3674 {
3675 if (operand->addr.writeback)
3676 {
3677 /* Reject [Rn]! */
3678 set_syntax_error (_("missing offset in the pre-indexed address"));
3679 return FALSE;
3680 }
3681
3682 operand->addr.preind = 1;
3683 inst.reloc.exp.X_op = O_constant;
3684 inst.reloc.exp.X_add_number = 0;
3685 }
3686
3687 *str = p;
3688 return TRUE;
3689 }
3690
3691 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3692 on success. */
3693 static bfd_boolean
3694 parse_address (char **str, aarch64_opnd_info *operand)
3695 {
3696 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3697 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3698 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3699 }
3700
3701 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3702 The arguments have the same meaning as for parse_address_main.
3703 Return TRUE on success. */
3704 static bfd_boolean
3705 parse_sve_address (char **str, aarch64_opnd_info *operand,
3706 aarch64_opnd_qualifier_t *base_qualifier,
3707 aarch64_opnd_qualifier_t *offset_qualifier)
3708 {
3709 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3710 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3711 SHIFTED_MUL_VL);
3712 }
3713
3714 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3715 Return TRUE on success; otherwise return FALSE. */
3716 static bfd_boolean
3717 parse_half (char **str, int *internal_fixup_p)
3718 {
3719 char *p = *str;
3720
3721 skip_past_char (&p, '#');
3722
3723 gas_assert (internal_fixup_p);
3724 *internal_fixup_p = 0;
3725
3726 if (*p == ':')
3727 {
3728 struct reloc_table_entry *entry;
3729
3730 /* Try to parse a relocation. Anything else is an error. */
3731 ++p;
3732 if (!(entry = find_reloc_table_entry (&p)))
3733 {
3734 set_syntax_error (_("unknown relocation modifier"));
3735 return FALSE;
3736 }
3737
3738 if (entry->movw_type == 0)
3739 {
3740 set_syntax_error
3741 (_("this relocation modifier is not allowed on this instruction"));
3742 return FALSE;
3743 }
3744
3745 inst.reloc.type = entry->movw_type;
3746 }
3747 else
3748 *internal_fixup_p = 1;
3749
3750 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3751 return FALSE;
3752
3753 *str = p;
3754 return TRUE;
3755 }
3756
3757 /* Parse an operand for an ADRP instruction:
3758 ADRP <Xd>, <label>
3759 Return TRUE on success; otherwise return FALSE. */
3760
3761 static bfd_boolean
3762 parse_adrp (char **str)
3763 {
3764 char *p;
3765
3766 p = *str;
3767 if (*p == ':')
3768 {
3769 struct reloc_table_entry *entry;
3770
3771 /* Try to parse a relocation. Anything else is an error. */
3772 ++p;
3773 if (!(entry = find_reloc_table_entry (&p)))
3774 {
3775 set_syntax_error (_("unknown relocation modifier"));
3776 return FALSE;
3777 }
3778
3779 if (entry->adrp_type == 0)
3780 {
3781 set_syntax_error
3782 (_("this relocation modifier is not allowed on this instruction"));
3783 return FALSE;
3784 }
3785
3786 inst.reloc.type = entry->adrp_type;
3787 }
3788 else
3789 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3790
3791 inst.reloc.pc_rel = 1;
3792
3793 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3794 return FALSE;
3795
3796 *str = p;
3797 return TRUE;
3798 }
3799
3800 /* Miscellaneous. */
3801
3802 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3803 of SIZE tokens in which index I gives the token for field value I,
3804 or is null if field value I is invalid. REG_TYPE says which register
3805 names should be treated as registers rather than as symbolic immediates.
3806
3807 Return true on success, moving *STR past the operand and storing the
3808 field value in *VAL. */
3809
3810 static int
3811 parse_enum_string (char **str, int64_t *val, const char *const *array,
3812 size_t size, aarch64_reg_type reg_type)
3813 {
3814 expressionS exp;
3815 char *p, *q;
3816 size_t i;
3817
3818 /* Match C-like tokens. */
3819 p = q = *str;
3820 while (ISALNUM (*q))
3821 q++;
3822
3823 for (i = 0; i < size; ++i)
3824 if (array[i]
3825 && strncasecmp (array[i], p, q - p) == 0
3826 && array[i][q - p] == 0)
3827 {
3828 *val = i;
3829 *str = q;
3830 return TRUE;
3831 }
3832
3833 if (!parse_immediate_expression (&p, &exp, reg_type))
3834 return FALSE;
3835
3836 if (exp.X_op == O_constant
3837 && (uint64_t) exp.X_add_number < size)
3838 {
3839 *val = exp.X_add_number;
3840 *str = p;
3841 return TRUE;
3842 }
3843
3844 /* Use the default error for this operand. */
3845 return FALSE;
3846 }
3847
3848 /* Parse an option for a preload instruction. Returns the encoding for the
3849 option, or PARSE_FAIL. */
3850
3851 static int
3852 parse_pldop (char **str)
3853 {
3854 char *p, *q;
3855 const struct aarch64_name_value_pair *o;
3856
3857 p = q = *str;
3858 while (ISALNUM (*q))
3859 q++;
3860
3861 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3862 if (!o)
3863 return PARSE_FAIL;
3864
3865 *str = q;
3866 return o->value;
3867 }
3868
3869 /* Parse an option for a barrier instruction. Returns the encoding for the
3870 option, or PARSE_FAIL. */
3871
3872 static int
3873 parse_barrier (char **str)
3874 {
3875 char *p, *q;
3876 const asm_barrier_opt *o;
3877
3878 p = q = *str;
3879 while (ISALPHA (*q))
3880 q++;
3881
3882 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3883 if (!o)
3884 return PARSE_FAIL;
3885
3886 *str = q;
3887 return o->value;
3888 }
3889
3890 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
3891 return 0 if successful. Otherwise return PARSE_FAIL. */
3892
3893 static int
3894 parse_barrier_psb (char **str,
3895 const struct aarch64_name_value_pair ** hint_opt)
3896 {
3897 char *p, *q;
3898 const struct aarch64_name_value_pair *o;
3899
3900 p = q = *str;
3901 while (ISALPHA (*q))
3902 q++;
3903
3904 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3905 if (!o)
3906 {
3907 set_fatal_syntax_error
3908 ( _("unknown or missing option to PSB"));
3909 return PARSE_FAIL;
3910 }
3911
3912 if (o->value != 0x11)
3913 {
3914 /* PSB only accepts option name 'CSYNC'. */
3915 set_syntax_error
3916 (_("the specified option is not accepted for PSB"));
3917 return PARSE_FAIL;
3918 }
3919
3920 *str = q;
3921 *hint_opt = o;
3922 return 0;
3923 }
3924
3925 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3926 Returns the encoding for the option, or PARSE_FAIL.
3927
3928 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3929 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3930
3931 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3932 field, otherwise as a system register.
3933 */
3934
3935 static int
3936 parse_sys_reg (char **str, struct hash_control *sys_regs,
3937 int imple_defined_p, int pstatefield_p,
3938 uint32_t* flags)
3939 {
3940 char *p, *q;
3941 char buf[32];
3942 const aarch64_sys_reg *o;
3943 int value;
3944
3945 p = buf;
3946 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3947 if (p < buf + 31)
3948 *p++ = TOLOWER (*q);
3949 *p = '\0';
3950 /* Assert that BUF be large enough. */
3951 gas_assert (p - buf == q - *str);
3952
3953 o = hash_find (sys_regs, buf);
3954 if (!o)
3955 {
3956 if (!imple_defined_p)
3957 return PARSE_FAIL;
3958 else
3959 {
3960 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3961 unsigned int op0, op1, cn, cm, op2;
3962
3963 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3964 != 5)
3965 return PARSE_FAIL;
3966 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3967 return PARSE_FAIL;
3968 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3969 if (flags)
3970 *flags = 0;
3971 }
3972 }
3973 else
3974 {
3975 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3976 as_bad (_("selected processor does not support PSTATE field "
3977 "name '%s'"), buf);
3978 if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3979 as_bad (_("selected processor does not support system register "
3980 "name '%s'"), buf);
3981 if (aarch64_sys_reg_deprecated_p (o))
3982 as_warn (_("system register name '%s' is deprecated and may be "
3983 "removed in a future release"), buf);
3984 value = o->value;
3985 if (flags)
3986 *flags = o->flags;
3987 }
3988
3989 *str = q;
3990 return value;
3991 }
3992
3993 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3994 for the option, or NULL. */
3995
3996 static const aarch64_sys_ins_reg *
3997 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3998 {
3999 char *p, *q;
4000 char buf[32];
4001 const aarch64_sys_ins_reg *o;
4002
4003 p = buf;
4004 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4005 if (p < buf + 31)
4006 *p++ = TOLOWER (*q);
4007 *p = '\0';
4008
4009 o = hash_find (sys_ins_regs, buf);
4010 if (!o)
4011 return NULL;
4012
4013 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
4014 as_bad (_("selected processor does not support system register "
4015 "name '%s'"), buf);
4016
4017 *str = q;
4018 return o;
4019 }
4020 \f
4021 #define po_char_or_fail(chr) do { \
4022 if (! skip_past_char (&str, chr)) \
4023 goto failure; \
4024 } while (0)
4025
4026 #define po_reg_or_fail(regtype) do { \
4027 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4028 if (val == PARSE_FAIL) \
4029 { \
4030 set_default_error (); \
4031 goto failure; \
4032 } \
4033 } while (0)
4034
4035 #define po_int_reg_or_fail(reg_type) do { \
4036 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4037 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4038 { \
4039 set_default_error (); \
4040 goto failure; \
4041 } \
4042 info->reg.regno = reg->number; \
4043 info->qualifier = qualifier; \
4044 } while (0)
4045
4046 #define po_imm_nc_or_fail() do { \
4047 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4048 goto failure; \
4049 } while (0)
4050
4051 #define po_imm_or_fail(min, max) do { \
4052 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4053 goto failure; \
4054 if (val < min || val > max) \
4055 { \
4056 set_fatal_syntax_error (_("immediate value out of range "\
4057 #min " to "#max)); \
4058 goto failure; \
4059 } \
4060 } while (0)
4061
4062 #define po_enum_or_fail(array) do { \
4063 if (!parse_enum_string (&str, &val, array, \
4064 ARRAY_SIZE (array), imm_reg_type)) \
4065 goto failure; \
4066 } while (0)
4067
4068 #define po_misc_or_fail(expr) do { \
4069 if (!expr) \
4070 goto failure; \
4071 } while (0)
4072 \f
4073 /* encode the 12-bit imm field of Add/sub immediate */
4074 static inline uint32_t
4075 encode_addsub_imm (uint32_t imm)
4076 {
4077 return imm << 10;
4078 }
4079
4080 /* encode the shift amount field of Add/sub immediate */
4081 static inline uint32_t
4082 encode_addsub_imm_shift_amount (uint32_t cnt)
4083 {
4084 return cnt << 22;
4085 }
4086
4087
4088 /* encode the imm field of Adr instruction */
4089 static inline uint32_t
4090 encode_adr_imm (uint32_t imm)
4091 {
4092 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4093 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4094 }
4095
4096 /* encode the immediate field of Move wide immediate */
4097 static inline uint32_t
4098 encode_movw_imm (uint32_t imm)
4099 {
4100 return imm << 5;
4101 }
4102
4103 /* encode the 26-bit offset of unconditional branch */
4104 static inline uint32_t
4105 encode_branch_ofs_26 (uint32_t ofs)
4106 {
4107 return ofs & ((1 << 26) - 1);
4108 }
4109
4110 /* encode the 19-bit offset of conditional branch and compare & branch */
4111 static inline uint32_t
4112 encode_cond_branch_ofs_19 (uint32_t ofs)
4113 {
4114 return (ofs & ((1 << 19) - 1)) << 5;
4115 }
4116
4117 /* encode the 19-bit offset of ld literal */
4118 static inline uint32_t
4119 encode_ld_lit_ofs_19 (uint32_t ofs)
4120 {
4121 return (ofs & ((1 << 19) - 1)) << 5;
4122 }
4123
4124 /* Encode the 14-bit offset of test & branch. */
4125 static inline uint32_t
4126 encode_tst_branch_ofs_14 (uint32_t ofs)
4127 {
4128 return (ofs & ((1 << 14) - 1)) << 5;
4129 }
4130
4131 /* Encode the 16-bit imm field of svc/hvc/smc. */
4132 static inline uint32_t
4133 encode_svc_imm (uint32_t imm)
4134 {
4135 return imm << 5;
4136 }
4137
4138 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4139 static inline uint32_t
4140 reencode_addsub_switch_add_sub (uint32_t opcode)
4141 {
4142 return opcode ^ (1 << 30);
4143 }
4144
4145 static inline uint32_t
4146 reencode_movzn_to_movz (uint32_t opcode)
4147 {
4148 return opcode | (1 << 30);
4149 }
4150
4151 static inline uint32_t
4152 reencode_movzn_to_movn (uint32_t opcode)
4153 {
4154 return opcode & ~(1 << 30);
4155 }
4156
4157 /* Overall per-instruction processing. */
4158
4159 /* We need to be able to fix up arbitrary expressions in some statements.
4160 This is so that we can handle symbols that are an arbitrary distance from
4161 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4162 which returns part of an address in a form which will be valid for
4163 a data instruction. We do this by pushing the expression into a symbol
4164 in the expr_section, and creating a fix for that. */
4165
4166 static fixS *
4167 fix_new_aarch64 (fragS * frag,
4168 int where,
4169 short int size, expressionS * exp, int pc_rel, int reloc)
4170 {
4171 fixS *new_fix;
4172
4173 switch (exp->X_op)
4174 {
4175 case O_constant:
4176 case O_symbol:
4177 case O_add:
4178 case O_subtract:
4179 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4180 break;
4181
4182 default:
4183 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4184 pc_rel, reloc);
4185 break;
4186 }
4187 return new_fix;
4188 }
4189 \f
4190 /* Diagnostics on operands errors. */
4191
4192 /* By default, output verbose error message.
4193 Disable the verbose error message by -mno-verbose-error. */
4194 static int verbose_error_p = 1;
4195
4196 #ifdef DEBUG_AARCH64
4197 /* N.B. this is only for the purpose of debugging. */
4198 const char* operand_mismatch_kind_names[] =
4199 {
4200 "AARCH64_OPDE_NIL",
4201 "AARCH64_OPDE_RECOVERABLE",
4202 "AARCH64_OPDE_SYNTAX_ERROR",
4203 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4204 "AARCH64_OPDE_INVALID_VARIANT",
4205 "AARCH64_OPDE_OUT_OF_RANGE",
4206 "AARCH64_OPDE_UNALIGNED",
4207 "AARCH64_OPDE_REG_LIST",
4208 "AARCH64_OPDE_OTHER_ERROR",
4209 };
4210 #endif /* DEBUG_AARCH64 */
4211
4212 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4213
4214 When multiple errors of different kinds are found in the same assembly
4215 line, only the error of the highest severity will be picked up for
4216 issuing the diagnostics. */
4217
4218 static inline bfd_boolean
4219 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4220 enum aarch64_operand_error_kind rhs)
4221 {
4222 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4223 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4224 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4225 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4226 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4227 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4228 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4229 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4230 return lhs > rhs;
4231 }
4232
4233 /* Helper routine to get the mnemonic name from the assembly instruction
4234 line; should only be called for the diagnosis purpose, as there is
4235 string copy operation involved, which may affect the runtime
4236 performance if used in elsewhere. */
4237
4238 static const char*
4239 get_mnemonic_name (const char *str)
4240 {
4241 static char mnemonic[32];
4242 char *ptr;
4243
4244 /* Get the first 15 bytes and assume that the full name is included. */
4245 strncpy (mnemonic, str, 31);
4246 mnemonic[31] = '\0';
4247
4248 /* Scan up to the end of the mnemonic, which must end in white space,
4249 '.', or end of string. */
4250 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4251 ;
4252
4253 *ptr = '\0';
4254
4255 /* Append '...' to the truncated long name. */
4256 if (ptr - mnemonic == 31)
4257 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4258
4259 return mnemonic;
4260 }
4261
4262 static void
4263 reset_aarch64_instruction (aarch64_instruction *instruction)
4264 {
4265 memset (instruction, '\0', sizeof (aarch64_instruction));
4266 instruction->reloc.type = BFD_RELOC_UNUSED;
4267 }
4268
4269 /* Data structures storing one user error in the assembly code related to
4270 operands. */
4271
4272 struct operand_error_record
4273 {
4274 const aarch64_opcode *opcode;
4275 aarch64_operand_error detail;
4276 struct operand_error_record *next;
4277 };
4278
4279 typedef struct operand_error_record operand_error_record;
4280
4281 struct operand_errors
4282 {
4283 operand_error_record *head;
4284 operand_error_record *tail;
4285 };
4286
4287 typedef struct operand_errors operand_errors;
4288
4289 /* Top-level data structure reporting user errors for the current line of
4290 the assembly code.
4291 The way md_assemble works is that all opcodes sharing the same mnemonic
4292 name are iterated to find a match to the assembly line. In this data
4293 structure, each of the such opcodes will have one operand_error_record
4294 allocated and inserted. In other words, excessive errors related with
4295 a single opcode are disregarded. */
4296 operand_errors operand_error_report;
4297
4298 /* Free record nodes. */
4299 static operand_error_record *free_opnd_error_record_nodes = NULL;
4300
4301 /* Initialize the data structure that stores the operand mismatch
4302 information on assembling one line of the assembly code. */
4303 static void
4304 init_operand_error_report (void)
4305 {
4306 if (operand_error_report.head != NULL)
4307 {
4308 gas_assert (operand_error_report.tail != NULL);
4309 operand_error_report.tail->next = free_opnd_error_record_nodes;
4310 free_opnd_error_record_nodes = operand_error_report.head;
4311 operand_error_report.head = NULL;
4312 operand_error_report.tail = NULL;
4313 return;
4314 }
4315 gas_assert (operand_error_report.tail == NULL);
4316 }
4317
4318 /* Return TRUE if some operand error has been recorded during the
4319 parsing of the current assembly line using the opcode *OPCODE;
4320 otherwise return FALSE. */
4321 static inline bfd_boolean
4322 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4323 {
4324 operand_error_record *record = operand_error_report.head;
4325 return record && record->opcode == opcode;
4326 }
4327
4328 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4329 OPCODE field is initialized with OPCODE.
4330 N.B. only one record for each opcode, i.e. the maximum of one error is
4331 recorded for each instruction template. */
4332
4333 static void
4334 add_operand_error_record (const operand_error_record* new_record)
4335 {
4336 const aarch64_opcode *opcode = new_record->opcode;
4337 operand_error_record* record = operand_error_report.head;
4338
4339 /* The record may have been created for this opcode. If not, we need
4340 to prepare one. */
4341 if (! opcode_has_operand_error_p (opcode))
4342 {
4343 /* Get one empty record. */
4344 if (free_opnd_error_record_nodes == NULL)
4345 {
4346 record = XNEW (operand_error_record);
4347 }
4348 else
4349 {
4350 record = free_opnd_error_record_nodes;
4351 free_opnd_error_record_nodes = record->next;
4352 }
4353 record->opcode = opcode;
4354 /* Insert at the head. */
4355 record->next = operand_error_report.head;
4356 operand_error_report.head = record;
4357 if (operand_error_report.tail == NULL)
4358 operand_error_report.tail = record;
4359 }
4360 else if (record->detail.kind != AARCH64_OPDE_NIL
4361 && record->detail.index <= new_record->detail.index
4362 && operand_error_higher_severity_p (record->detail.kind,
4363 new_record->detail.kind))
4364 {
4365 /* In the case of multiple errors found on operands related with a
4366 single opcode, only record the error of the leftmost operand and
4367 only if the error is of higher severity. */
4368 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4369 " the existing error %s on operand %d",
4370 operand_mismatch_kind_names[new_record->detail.kind],
4371 new_record->detail.index,
4372 operand_mismatch_kind_names[record->detail.kind],
4373 record->detail.index);
4374 return;
4375 }
4376
4377 record->detail = new_record->detail;
4378 }
4379
4380 static inline void
4381 record_operand_error_info (const aarch64_opcode *opcode,
4382 aarch64_operand_error *error_info)
4383 {
4384 operand_error_record record;
4385 record.opcode = opcode;
4386 record.detail = *error_info;
4387 add_operand_error_record (&record);
4388 }
4389
4390 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4391 error message *ERROR, for operand IDX (count from 0). */
4392
4393 static void
4394 record_operand_error (const aarch64_opcode *opcode, int idx,
4395 enum aarch64_operand_error_kind kind,
4396 const char* error)
4397 {
4398 aarch64_operand_error info;
4399 memset(&info, 0, sizeof (info));
4400 info.index = idx;
4401 info.kind = kind;
4402 info.error = error;
4403 info.non_fatal = FALSE;
4404 record_operand_error_info (opcode, &info);
4405 }
4406
4407 static void
4408 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4409 enum aarch64_operand_error_kind kind,
4410 const char* error, const int *extra_data)
4411 {
4412 aarch64_operand_error info;
4413 info.index = idx;
4414 info.kind = kind;
4415 info.error = error;
4416 info.data[0] = extra_data[0];
4417 info.data[1] = extra_data[1];
4418 info.data[2] = extra_data[2];
4419 info.non_fatal = FALSE;
4420 record_operand_error_info (opcode, &info);
4421 }
4422
4423 static void
4424 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4425 const char* error, int lower_bound,
4426 int upper_bound)
4427 {
4428 int data[3] = {lower_bound, upper_bound, 0};
4429 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4430 error, data);
4431 }
4432
4433 /* Remove the operand error record for *OPCODE. */
4434 static void ATTRIBUTE_UNUSED
4435 remove_operand_error_record (const aarch64_opcode *opcode)
4436 {
4437 if (opcode_has_operand_error_p (opcode))
4438 {
4439 operand_error_record* record = operand_error_report.head;
4440 gas_assert (record != NULL && operand_error_report.tail != NULL);
4441 operand_error_report.head = record->next;
4442 record->next = free_opnd_error_record_nodes;
4443 free_opnd_error_record_nodes = record;
4444 if (operand_error_report.head == NULL)
4445 {
4446 gas_assert (operand_error_report.tail == record);
4447 operand_error_report.tail = NULL;
4448 }
4449 }
4450 }
4451
4452 /* Given the instruction in *INSTR, return the index of the best matched
4453 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4454
4455 Return -1 if there is no qualifier sequence; return the first match
4456 if there is multiple matches found. */
4457
4458 static int
4459 find_best_match (const aarch64_inst *instr,
4460 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4461 {
4462 int i, num_opnds, max_num_matched, idx;
4463
4464 num_opnds = aarch64_num_of_operands (instr->opcode);
4465 if (num_opnds == 0)
4466 {
4467 DEBUG_TRACE ("no operand");
4468 return -1;
4469 }
4470
4471 max_num_matched = 0;
4472 idx = 0;
4473
4474 /* For each pattern. */
4475 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4476 {
4477 int j, num_matched;
4478 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4479
4480 /* Most opcodes has much fewer patterns in the list. */
4481 if (empty_qualifier_sequence_p (qualifiers))
4482 {
4483 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4484 break;
4485 }
4486
4487 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4488 if (*qualifiers == instr->operands[j].qualifier)
4489 ++num_matched;
4490
4491 if (num_matched > max_num_matched)
4492 {
4493 max_num_matched = num_matched;
4494 idx = i;
4495 }
4496 }
4497
4498 DEBUG_TRACE ("return with %d", idx);
4499 return idx;
4500 }
4501
4502 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4503 corresponding operands in *INSTR. */
4504
4505 static inline void
4506 assign_qualifier_sequence (aarch64_inst *instr,
4507 const aarch64_opnd_qualifier_t *qualifiers)
4508 {
4509 int i = 0;
4510 int num_opnds = aarch64_num_of_operands (instr->opcode);
4511 gas_assert (num_opnds);
4512 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4513 instr->operands[i].qualifier = *qualifiers;
4514 }
4515
4516 /* Print operands for the diagnosis purpose. */
4517
4518 static void
4519 print_operands (char *buf, const aarch64_opcode *opcode,
4520 const aarch64_opnd_info *opnds)
4521 {
4522 int i;
4523
4524 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4525 {
4526 char str[128];
4527
4528 /* We regard the opcode operand info more, however we also look into
4529 the inst->operands to support the disassembling of the optional
4530 operand.
4531 The two operand code should be the same in all cases, apart from
4532 when the operand can be optional. */
4533 if (opcode->operands[i] == AARCH64_OPND_NIL
4534 || opnds[i].type == AARCH64_OPND_NIL)
4535 break;
4536
4537 /* Generate the operand string in STR. */
4538 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4539 NULL);
4540
4541 /* Delimiter. */
4542 if (str[0] != '\0')
4543 strcat (buf, i == 0 ? " " : ", ");
4544
4545 /* Append the operand string. */
4546 strcat (buf, str);
4547 }
4548 }
4549
4550 /* Send to stderr a string as information. */
4551
4552 static void
4553 output_info (const char *format, ...)
4554 {
4555 const char *file;
4556 unsigned int line;
4557 va_list args;
4558
4559 file = as_where (&line);
4560 if (file)
4561 {
4562 if (line != 0)
4563 fprintf (stderr, "%s:%u: ", file, line);
4564 else
4565 fprintf (stderr, "%s: ", file);
4566 }
4567 fprintf (stderr, _("Info: "));
4568 va_start (args, format);
4569 vfprintf (stderr, format, args);
4570 va_end (args);
4571 (void) putc ('\n', stderr);
4572 }
4573
4574 /* Output one operand error record. */
4575
4576 static void
4577 output_operand_error_record (const operand_error_record *record, char *str)
4578 {
4579 const aarch64_operand_error *detail = &record->detail;
4580 int idx = detail->index;
4581 const aarch64_opcode *opcode = record->opcode;
4582 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4583 : AARCH64_OPND_NIL);
4584
4585 typedef void (*handler_t)(const char *format, ...);
4586 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4587
4588 switch (detail->kind)
4589 {
4590 case AARCH64_OPDE_NIL:
4591 gas_assert (0);
4592 break;
4593 case AARCH64_OPDE_SYNTAX_ERROR:
4594 case AARCH64_OPDE_RECOVERABLE:
4595 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4596 case AARCH64_OPDE_OTHER_ERROR:
4597 /* Use the prepared error message if there is, otherwise use the
4598 operand description string to describe the error. */
4599 if (detail->error != NULL)
4600 {
4601 if (idx < 0)
4602 handler (_("%s -- `%s'"), detail->error, str);
4603 else
4604 handler (_("%s at operand %d -- `%s'"),
4605 detail->error, idx + 1, str);
4606 }
4607 else
4608 {
4609 gas_assert (idx >= 0);
4610 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4611 aarch64_get_operand_desc (opd_code), str);
4612 }
4613 break;
4614
4615 case AARCH64_OPDE_INVALID_VARIANT:
4616 handler (_("operand mismatch -- `%s'"), str);
4617 if (verbose_error_p)
4618 {
4619 /* We will try to correct the erroneous instruction and also provide
4620 more information e.g. all other valid variants.
4621
4622 The string representation of the corrected instruction and other
4623 valid variants are generated by
4624
4625 1) obtaining the intermediate representation of the erroneous
4626 instruction;
4627 2) manipulating the IR, e.g. replacing the operand qualifier;
4628 3) printing out the instruction by calling the printer functions
4629 shared with the disassembler.
4630
4631 The limitation of this method is that the exact input assembly
4632 line cannot be accurately reproduced in some cases, for example an
4633 optional operand present in the actual assembly line will be
4634 omitted in the output; likewise for the optional syntax rules,
4635 e.g. the # before the immediate. Another limitation is that the
4636 assembly symbols and relocation operations in the assembly line
4637 currently cannot be printed out in the error report. Last but not
4638 least, when there is other error(s) co-exist with this error, the
4639 'corrected' instruction may be still incorrect, e.g. given
4640 'ldnp h0,h1,[x0,#6]!'
4641 this diagnosis will provide the version:
4642 'ldnp s0,s1,[x0,#6]!'
4643 which is still not right. */
4644 size_t len = strlen (get_mnemonic_name (str));
4645 int i, qlf_idx;
4646 bfd_boolean result;
4647 char buf[2048];
4648 aarch64_inst *inst_base = &inst.base;
4649 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4650
4651 /* Init inst. */
4652 reset_aarch64_instruction (&inst);
4653 inst_base->opcode = opcode;
4654
4655 /* Reset the error report so that there is no side effect on the
4656 following operand parsing. */
4657 init_operand_error_report ();
4658
4659 /* Fill inst. */
4660 result = parse_operands (str + len, opcode)
4661 && programmer_friendly_fixup (&inst);
4662 gas_assert (result);
4663 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4664 NULL, NULL);
4665 gas_assert (!result);
4666
4667 /* Find the most matched qualifier sequence. */
4668 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4669 gas_assert (qlf_idx > -1);
4670
4671 /* Assign the qualifiers. */
4672 assign_qualifier_sequence (inst_base,
4673 opcode->qualifiers_list[qlf_idx]);
4674
4675 /* Print the hint. */
4676 output_info (_(" did you mean this?"));
4677 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4678 print_operands (buf, opcode, inst_base->operands);
4679 output_info (_(" %s"), buf);
4680
4681 /* Print out other variant(s) if there is any. */
4682 if (qlf_idx != 0 ||
4683 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4684 output_info (_(" other valid variant(s):"));
4685
4686 /* For each pattern. */
4687 qualifiers_list = opcode->qualifiers_list;
4688 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4689 {
4690 /* Most opcodes has much fewer patterns in the list.
4691 First NIL qualifier indicates the end in the list. */
4692 if (empty_qualifier_sequence_p (*qualifiers_list))
4693 break;
4694
4695 if (i != qlf_idx)
4696 {
4697 /* Mnemonics name. */
4698 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4699
4700 /* Assign the qualifiers. */
4701 assign_qualifier_sequence (inst_base, *qualifiers_list);
4702
4703 /* Print instruction. */
4704 print_operands (buf, opcode, inst_base->operands);
4705
4706 output_info (_(" %s"), buf);
4707 }
4708 }
4709 }
4710 break;
4711
4712 case AARCH64_OPDE_UNTIED_OPERAND:
4713 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4714 detail->index + 1, str);
4715 break;
4716
4717 case AARCH64_OPDE_OUT_OF_RANGE:
4718 if (detail->data[0] != detail->data[1])
4719 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4720 detail->error ? detail->error : _("immediate value"),
4721 detail->data[0], detail->data[1], idx + 1, str);
4722 else
4723 handler (_("%s must be %d at operand %d -- `%s'"),
4724 detail->error ? detail->error : _("immediate value"),
4725 detail->data[0], idx + 1, str);
4726 break;
4727
4728 case AARCH64_OPDE_REG_LIST:
4729 if (detail->data[0] == 1)
4730 handler (_("invalid number of registers in the list; "
4731 "only 1 register is expected at operand %d -- `%s'"),
4732 idx + 1, str);
4733 else
4734 handler (_("invalid number of registers in the list; "
4735 "%d registers are expected at operand %d -- `%s'"),
4736 detail->data[0], idx + 1, str);
4737 break;
4738
4739 case AARCH64_OPDE_UNALIGNED:
4740 handler (_("immediate value must be a multiple of "
4741 "%d at operand %d -- `%s'"),
4742 detail->data[0], idx + 1, str);
4743 break;
4744
4745 default:
4746 gas_assert (0);
4747 break;
4748 }
4749 }
4750
4751 /* Process and output the error message about the operand mismatching.
4752
4753 When this function is called, the operand error information had
4754 been collected for an assembly line and there will be multiple
4755 errors in the case of multiple instruction templates; output the
4756 error message that most closely describes the problem.
4757
4758 The errors to be printed can be filtered on printing all errors
4759 or only non-fatal errors. This distinction has to be made because
4760 the error buffer may already be filled with fatal errors we don't want to
4761 print due to the different instruction templates. */
4762
4763 static void
4764 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4765 {
4766 int largest_error_pos;
4767 const char *msg = NULL;
4768 enum aarch64_operand_error_kind kind;
4769 operand_error_record *curr;
4770 operand_error_record *head = operand_error_report.head;
4771 operand_error_record *record = NULL;
4772
4773 /* No error to report. */
4774 if (head == NULL)
4775 return;
4776
4777 gas_assert (head != NULL && operand_error_report.tail != NULL);
4778
4779 /* Only one error. */
4780 if (head == operand_error_report.tail)
4781 {
4782 /* If the only error is a non-fatal one and we don't want to print it,
4783 just exit. */
4784 if (!non_fatal_only || head->detail.non_fatal)
4785 {
4786 DEBUG_TRACE ("single opcode entry with error kind: %s",
4787 operand_mismatch_kind_names[head->detail.kind]);
4788 output_operand_error_record (head, str);
4789 }
4790 return;
4791 }
4792
4793 /* Find the error kind of the highest severity. */
4794 DEBUG_TRACE ("multiple opcode entries with error kind");
4795 kind = AARCH64_OPDE_NIL;
4796 for (curr = head; curr != NULL; curr = curr->next)
4797 {
4798 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4799 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4800 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4801 kind = curr->detail.kind;
4802 }
4803 gas_assert (kind != AARCH64_OPDE_NIL);
4804
4805 /* Pick up one of errors of KIND to report. */
4806 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4807 for (curr = head; curr != NULL; curr = curr->next)
4808 {
4809 /* If we don't want to print non-fatal errors then don't consider them
4810 at all. */
4811 if (curr->detail.kind != kind
4812 || (non_fatal_only && !head->detail.non_fatal))
4813 continue;
4814 /* If there are multiple errors, pick up the one with the highest
4815 mismatching operand index. In the case of multiple errors with
4816 the equally highest operand index, pick up the first one or the
4817 first one with non-NULL error message. */
4818 if (curr->detail.index > largest_error_pos
4819 || (curr->detail.index == largest_error_pos && msg == NULL
4820 && curr->detail.error != NULL))
4821 {
4822 largest_error_pos = curr->detail.index;
4823 record = curr;
4824 msg = record->detail.error;
4825 }
4826 }
4827
4828 /* The way errors are collected in the back-end is a bit non-intuitive. But
4829 essentially, because each operand template is tried recursively you may
4830 always have errors collected from the previous tried OPND. These are
4831 usually skipped if there is one successful match. However now with the
4832 non-fatal errors we have to ignore those previously collected hard errors
4833 when we're only interested in printing the non-fatal ones. This condition
4834 prevents us from printing errors that are not appropriate, since we did
4835 match a condition, but it also has warnings that it wants to print. */
4836 if (non_fatal_only && !record)
4837 return;
4838
4839 gas_assert (largest_error_pos != -2 && record != NULL);
4840 DEBUG_TRACE ("Pick up error kind %s to report",
4841 operand_mismatch_kind_names[record->detail.kind]);
4842
4843 /* Output. */
4844 output_operand_error_record (record, str);
4845 }
4846 \f
4847 /* Write an AARCH64 instruction to buf - always little-endian. */
4848 static void
4849 put_aarch64_insn (char *buf, uint32_t insn)
4850 {
4851 unsigned char *where = (unsigned char *) buf;
4852 where[0] = insn;
4853 where[1] = insn >> 8;
4854 where[2] = insn >> 16;
4855 where[3] = insn >> 24;
4856 }
4857
4858 static uint32_t
4859 get_aarch64_insn (char *buf)
4860 {
4861 unsigned char *where = (unsigned char *) buf;
4862 uint32_t result;
4863 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4864 return result;
4865 }
4866
4867 static void
4868 output_inst (struct aarch64_inst *new_inst)
4869 {
4870 char *to = NULL;
4871
4872 to = frag_more (INSN_SIZE);
4873
4874 frag_now->tc_frag_data.recorded = 1;
4875
4876 put_aarch64_insn (to, inst.base.value);
4877
4878 if (inst.reloc.type != BFD_RELOC_UNUSED)
4879 {
4880 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4881 INSN_SIZE, &inst.reloc.exp,
4882 inst.reloc.pc_rel,
4883 inst.reloc.type);
4884 DEBUG_TRACE ("Prepared relocation fix up");
4885 /* Don't check the addend value against the instruction size,
4886 that's the job of our code in md_apply_fix(). */
4887 fixp->fx_no_overflow = 1;
4888 if (new_inst != NULL)
4889 fixp->tc_fix_data.inst = new_inst;
4890 if (aarch64_gas_internal_fixup_p ())
4891 {
4892 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4893 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4894 fixp->fx_addnumber = inst.reloc.flags;
4895 }
4896 }
4897
4898 dwarf2_emit_insn (INSN_SIZE);
4899 }
4900
4901 /* Link together opcodes of the same name. */
4902
4903 struct templates
4904 {
4905 aarch64_opcode *opcode;
4906 struct templates *next;
4907 };
4908
4909 typedef struct templates templates;
4910
4911 static templates *
4912 lookup_mnemonic (const char *start, int len)
4913 {
4914 templates *templ = NULL;
4915
4916 templ = hash_find_n (aarch64_ops_hsh, start, len);
4917 return templ;
4918 }
4919
4920 /* Subroutine of md_assemble, responsible for looking up the primary
4921 opcode from the mnemonic the user wrote. STR points to the
4922 beginning of the mnemonic. */
4923
4924 static templates *
4925 opcode_lookup (char **str)
4926 {
4927 char *end, *base, *dot;
4928 const aarch64_cond *cond;
4929 char condname[16];
4930 int len;
4931
4932 /* Scan up to the end of the mnemonic, which must end in white space,
4933 '.', or end of string. */
4934 dot = 0;
4935 for (base = end = *str; is_part_of_name(*end); end++)
4936 if (*end == '.' && !dot)
4937 dot = end;
4938
4939 if (end == base || dot == base)
4940 return 0;
4941
4942 inst.cond = COND_ALWAYS;
4943
4944 /* Handle a possible condition. */
4945 if (dot)
4946 {
4947 cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
4948 if (cond)
4949 {
4950 inst.cond = cond->value;
4951 *str = end;
4952 }
4953 else
4954 {
4955 *str = dot;
4956 return 0;
4957 }
4958 len = dot - base;
4959 }
4960 else
4961 {
4962 *str = end;
4963 len = end - base;
4964 }
4965
4966 if (inst.cond == COND_ALWAYS)
4967 {
4968 /* Look for unaffixed mnemonic. */
4969 return lookup_mnemonic (base, len);
4970 }
4971 else if (len <= 13)
4972 {
4973 /* append ".c" to mnemonic if conditional */
4974 memcpy (condname, base, len);
4975 memcpy (condname + len, ".c", 2);
4976 base = condname;
4977 len += 2;
4978 return lookup_mnemonic (base, len);
4979 }
4980
4981 return NULL;
4982 }
4983
4984 /* Internal helper routine converting a vector_type_el structure *VECTYPE
4985 to a corresponding operand qualifier. */
4986
4987 static inline aarch64_opnd_qualifier_t
4988 vectype_to_qualifier (const struct vector_type_el *vectype)
4989 {
4990 /* Element size in bytes indexed by vector_el_type. */
4991 const unsigned char ele_size[5]
4992 = {1, 2, 4, 8, 16};
4993 const unsigned int ele_base [5] =
4994 {
4995 AARCH64_OPND_QLF_V_4B,
4996 AARCH64_OPND_QLF_V_2H,
4997 AARCH64_OPND_QLF_V_2S,
4998 AARCH64_OPND_QLF_V_1D,
4999 AARCH64_OPND_QLF_V_1Q
5000 };
5001
5002 if (!vectype->defined || vectype->type == NT_invtype)
5003 goto vectype_conversion_fail;
5004
5005 if (vectype->type == NT_zero)
5006 return AARCH64_OPND_QLF_P_Z;
5007 if (vectype->type == NT_merge)
5008 return AARCH64_OPND_QLF_P_M;
5009
5010 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5011
5012 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5013 {
5014 /* Special case S_4B. */
5015 if (vectype->type == NT_b && vectype->width == 4)
5016 return AARCH64_OPND_QLF_S_4B;
5017
5018 /* Vector element register. */
5019 return AARCH64_OPND_QLF_S_B + vectype->type;
5020 }
5021 else
5022 {
5023 /* Vector register. */
5024 int reg_size = ele_size[vectype->type] * vectype->width;
5025 unsigned offset;
5026 unsigned shift;
5027 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5028 goto vectype_conversion_fail;
5029
5030 /* The conversion is by calculating the offset from the base operand
5031 qualifier for the vector type. The operand qualifiers are regular
5032 enough that the offset can established by shifting the vector width by
5033 a vector-type dependent amount. */
5034 shift = 0;
5035 if (vectype->type == NT_b)
5036 shift = 3;
5037 else if (vectype->type == NT_h || vectype->type == NT_s)
5038 shift = 2;
5039 else if (vectype->type >= NT_d)
5040 shift = 1;
5041 else
5042 gas_assert (0);
5043
5044 offset = ele_base [vectype->type] + (vectype->width >> shift);
5045 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5046 && offset <= AARCH64_OPND_QLF_V_1Q);
5047 return offset;
5048 }
5049
5050 vectype_conversion_fail:
5051 first_error (_("bad vector arrangement type"));
5052 return AARCH64_OPND_QLF_NIL;
5053 }
5054
5055 /* Process an optional operand that is found omitted from the assembly line.
5056 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5057 instruction's opcode entry while IDX is the index of this omitted operand.
5058 */
5059
5060 static void
5061 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5062 int idx, aarch64_opnd_info *operand)
5063 {
5064 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5065 gas_assert (optional_operand_p (opcode, idx));
5066 gas_assert (!operand->present);
5067
5068 switch (type)
5069 {
5070 case AARCH64_OPND_Rd:
5071 case AARCH64_OPND_Rn:
5072 case AARCH64_OPND_Rm:
5073 case AARCH64_OPND_Rt:
5074 case AARCH64_OPND_Rt2:
5075 case AARCH64_OPND_Rs:
5076 case AARCH64_OPND_Ra:
5077 case AARCH64_OPND_Rt_SYS:
5078 case AARCH64_OPND_Rd_SP:
5079 case AARCH64_OPND_Rn_SP:
5080 case AARCH64_OPND_Rm_SP:
5081 case AARCH64_OPND_Fd:
5082 case AARCH64_OPND_Fn:
5083 case AARCH64_OPND_Fm:
5084 case AARCH64_OPND_Fa:
5085 case AARCH64_OPND_Ft:
5086 case AARCH64_OPND_Ft2:
5087 case AARCH64_OPND_Sd:
5088 case AARCH64_OPND_Sn:
5089 case AARCH64_OPND_Sm:
5090 case AARCH64_OPND_Va:
5091 case AARCH64_OPND_Vd:
5092 case AARCH64_OPND_Vn:
5093 case AARCH64_OPND_Vm:
5094 case AARCH64_OPND_VdD1:
5095 case AARCH64_OPND_VnD1:
5096 operand->reg.regno = default_value;
5097 break;
5098
5099 case AARCH64_OPND_Ed:
5100 case AARCH64_OPND_En:
5101 case AARCH64_OPND_Em:
5102 case AARCH64_OPND_Em16:
5103 case AARCH64_OPND_SM3_IMM2:
5104 operand->reglane.regno = default_value;
5105 break;
5106
5107 case AARCH64_OPND_IDX:
5108 case AARCH64_OPND_BIT_NUM:
5109 case AARCH64_OPND_IMMR:
5110 case AARCH64_OPND_IMMS:
5111 case AARCH64_OPND_SHLL_IMM:
5112 case AARCH64_OPND_IMM_VLSL:
5113 case AARCH64_OPND_IMM_VLSR:
5114 case AARCH64_OPND_CCMP_IMM:
5115 case AARCH64_OPND_FBITS:
5116 case AARCH64_OPND_UIMM4:
5117 case AARCH64_OPND_UIMM3_OP1:
5118 case AARCH64_OPND_UIMM3_OP2:
5119 case AARCH64_OPND_IMM:
5120 case AARCH64_OPND_IMM_2:
5121 case AARCH64_OPND_WIDTH:
5122 case AARCH64_OPND_UIMM7:
5123 case AARCH64_OPND_NZCV:
5124 case AARCH64_OPND_SVE_PATTERN:
5125 case AARCH64_OPND_SVE_PRFOP:
5126 operand->imm.value = default_value;
5127 break;
5128
5129 case AARCH64_OPND_SVE_PATTERN_SCALED:
5130 operand->imm.value = default_value;
5131 operand->shifter.kind = AARCH64_MOD_MUL;
5132 operand->shifter.amount = 1;
5133 break;
5134
5135 case AARCH64_OPND_EXCEPTION:
5136 inst.reloc.type = BFD_RELOC_UNUSED;
5137 break;
5138
5139 case AARCH64_OPND_BARRIER_ISB:
5140 operand->barrier = aarch64_barrier_options + default_value;
5141
5142 default:
5143 break;
5144 }
5145 }
5146
5147 /* Process the relocation type for move wide instructions.
5148 Return TRUE on success; otherwise return FALSE. */
5149
5150 static bfd_boolean
5151 process_movw_reloc_info (void)
5152 {
5153 int is32;
5154 unsigned shift;
5155
5156 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5157
5158 if (inst.base.opcode->op == OP_MOVK)
5159 switch (inst.reloc.type)
5160 {
5161 case BFD_RELOC_AARCH64_MOVW_G0_S:
5162 case BFD_RELOC_AARCH64_MOVW_G1_S:
5163 case BFD_RELOC_AARCH64_MOVW_G2_S:
5164 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5165 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5166 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5167 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5168 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5169 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5170 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5171 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5172 set_syntax_error
5173 (_("the specified relocation type is not allowed for MOVK"));
5174 return FALSE;
5175 default:
5176 break;
5177 }
5178
5179 switch (inst.reloc.type)
5180 {
5181 case BFD_RELOC_AARCH64_MOVW_G0:
5182 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5183 case BFD_RELOC_AARCH64_MOVW_G0_S:
5184 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5185 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5186 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5187 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5188 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5189 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5190 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5191 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5192 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5193 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5194 shift = 0;
5195 break;
5196 case BFD_RELOC_AARCH64_MOVW_G1:
5197 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5198 case BFD_RELOC_AARCH64_MOVW_G1_S:
5199 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5200 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5201 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5202 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5203 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5204 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5205 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5206 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5207 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5208 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5209 shift = 16;
5210 break;
5211 case BFD_RELOC_AARCH64_MOVW_G2:
5212 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5213 case BFD_RELOC_AARCH64_MOVW_G2_S:
5214 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5215 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5216 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5217 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5218 if (is32)
5219 {
5220 set_fatal_syntax_error
5221 (_("the specified relocation type is not allowed for 32-bit "
5222 "register"));
5223 return FALSE;
5224 }
5225 shift = 32;
5226 break;
5227 case BFD_RELOC_AARCH64_MOVW_G3:
5228 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5229 if (is32)
5230 {
5231 set_fatal_syntax_error
5232 (_("the specified relocation type is not allowed for 32-bit "
5233 "register"));
5234 return FALSE;
5235 }
5236 shift = 48;
5237 break;
5238 default:
5239 /* More cases should be added when more MOVW-related relocation types
5240 are supported in GAS. */
5241 gas_assert (aarch64_gas_internal_fixup_p ());
5242 /* The shift amount should have already been set by the parser. */
5243 return TRUE;
5244 }
5245 inst.base.operands[1].shifter.amount = shift;
5246 return TRUE;
5247 }
5248
5249 /* A primitive log calculator. */
5250
5251 static inline unsigned int
5252 get_logsz (unsigned int size)
5253 {
5254 const unsigned char ls[16] =
5255 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5256 if (size > 16)
5257 {
5258 gas_assert (0);
5259 return -1;
5260 }
5261 gas_assert (ls[size - 1] != (unsigned char)-1);
5262 return ls[size - 1];
5263 }
5264
5265 /* Determine and return the real reloc type code for an instruction
5266 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5267
5268 static inline bfd_reloc_code_real_type
5269 ldst_lo12_determine_real_reloc_type (void)
5270 {
5271 unsigned logsz;
5272 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5273 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5274
5275 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5276 {
5277 BFD_RELOC_AARCH64_LDST8_LO12,
5278 BFD_RELOC_AARCH64_LDST16_LO12,
5279 BFD_RELOC_AARCH64_LDST32_LO12,
5280 BFD_RELOC_AARCH64_LDST64_LO12,
5281 BFD_RELOC_AARCH64_LDST128_LO12
5282 },
5283 {
5284 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5285 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5286 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5287 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5288 BFD_RELOC_AARCH64_NONE
5289 },
5290 {
5291 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5292 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5293 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5294 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5295 BFD_RELOC_AARCH64_NONE
5296 },
5297 {
5298 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5299 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5300 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5301 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5302 BFD_RELOC_AARCH64_NONE
5303 },
5304 {
5305 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5306 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5307 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5308 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5309 BFD_RELOC_AARCH64_NONE
5310 }
5311 };
5312
5313 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5314 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5315 || (inst.reloc.type
5316 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5317 || (inst.reloc.type
5318 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5319 || (inst.reloc.type
5320 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5321 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5322
5323 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5324 opd1_qlf =
5325 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5326 1, opd0_qlf, 0);
5327 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5328
5329 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5330 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5331 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5332 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5333 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5334 gas_assert (logsz <= 3);
5335 else
5336 gas_assert (logsz <= 4);
5337
5338 /* In reloc.c, these pseudo relocation types should be defined in similar
5339 order as above reloc_ldst_lo12 array. Because the array index calculation
5340 below relies on this. */
5341 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5342 }
5343
5344 /* Check whether a register list REGINFO is valid. The registers must be
5345 numbered in increasing order (modulo 32), in increments of one or two.
5346
5347 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5348 increments of two.
5349
5350 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5351
5352 static bfd_boolean
5353 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5354 {
5355 uint32_t i, nb_regs, prev_regno, incr;
5356
5357 nb_regs = 1 + (reginfo & 0x3);
5358 reginfo >>= 2;
5359 prev_regno = reginfo & 0x1f;
5360 incr = accept_alternate ? 2 : 1;
5361
5362 for (i = 1; i < nb_regs; ++i)
5363 {
5364 uint32_t curr_regno;
5365 reginfo >>= 5;
5366 curr_regno = reginfo & 0x1f;
5367 if (curr_regno != ((prev_regno + incr) & 0x1f))
5368 return FALSE;
5369 prev_regno = curr_regno;
5370 }
5371
5372 return TRUE;
5373 }
5374
5375 /* Generic instruction operand parser. This does no encoding and no
5376 semantic validation; it merely squirrels values away in the inst
5377 structure. Returns TRUE or FALSE depending on whether the
5378 specified grammar matched. */
5379
5380 static bfd_boolean
5381 parse_operands (char *str, const aarch64_opcode *opcode)
5382 {
5383 int i;
5384 char *backtrack_pos = 0;
5385 const enum aarch64_opnd *operands = opcode->operands;
5386 aarch64_reg_type imm_reg_type;
5387
5388 clear_error ();
5389 skip_whitespace (str);
5390
5391 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5392 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5393 else
5394 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5395
5396 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5397 {
5398 int64_t val;
5399 const reg_entry *reg;
5400 int comma_skipped_p = 0;
5401 aarch64_reg_type rtype;
5402 struct vector_type_el vectype;
5403 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5404 aarch64_opnd_info *info = &inst.base.operands[i];
5405 aarch64_reg_type reg_type;
5406
5407 DEBUG_TRACE ("parse operand %d", i);
5408
5409 /* Assign the operand code. */
5410 info->type = operands[i];
5411
5412 if (optional_operand_p (opcode, i))
5413 {
5414 /* Remember where we are in case we need to backtrack. */
5415 gas_assert (!backtrack_pos);
5416 backtrack_pos = str;
5417 }
5418
5419 /* Expect comma between operands; the backtrack mechanism will take
5420 care of cases of omitted optional operand. */
5421 if (i > 0 && ! skip_past_char (&str, ','))
5422 {
5423 set_syntax_error (_("comma expected between operands"));
5424 goto failure;
5425 }
5426 else
5427 comma_skipped_p = 1;
5428
5429 switch (operands[i])
5430 {
5431 case AARCH64_OPND_Rd:
5432 case AARCH64_OPND_Rn:
5433 case AARCH64_OPND_Rm:
5434 case AARCH64_OPND_Rt:
5435 case AARCH64_OPND_Rt2:
5436 case AARCH64_OPND_Rs:
5437 case AARCH64_OPND_Ra:
5438 case AARCH64_OPND_Rt_SYS:
5439 case AARCH64_OPND_PAIRREG:
5440 case AARCH64_OPND_SVE_Rm:
5441 po_int_reg_or_fail (REG_TYPE_R_Z);
5442 break;
5443
5444 case AARCH64_OPND_Rd_SP:
5445 case AARCH64_OPND_Rn_SP:
5446 case AARCH64_OPND_SVE_Rn_SP:
5447 case AARCH64_OPND_Rm_SP:
5448 po_int_reg_or_fail (REG_TYPE_R_SP);
5449 break;
5450
5451 case AARCH64_OPND_Rm_EXT:
5452 case AARCH64_OPND_Rm_SFT:
5453 po_misc_or_fail (parse_shifter_operand
5454 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5455 ? SHIFTED_ARITH_IMM
5456 : SHIFTED_LOGIC_IMM)));
5457 if (!info->shifter.operator_present)
5458 {
5459 /* Default to LSL if not present. Libopcodes prefers shifter
5460 kind to be explicit. */
5461 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5462 info->shifter.kind = AARCH64_MOD_LSL;
5463 /* For Rm_EXT, libopcodes will carry out further check on whether
5464 or not stack pointer is used in the instruction (Recall that
5465 "the extend operator is not optional unless at least one of
5466 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5467 }
5468 break;
5469
5470 case AARCH64_OPND_Fd:
5471 case AARCH64_OPND_Fn:
5472 case AARCH64_OPND_Fm:
5473 case AARCH64_OPND_Fa:
5474 case AARCH64_OPND_Ft:
5475 case AARCH64_OPND_Ft2:
5476 case AARCH64_OPND_Sd:
5477 case AARCH64_OPND_Sn:
5478 case AARCH64_OPND_Sm:
5479 case AARCH64_OPND_SVE_VZn:
5480 case AARCH64_OPND_SVE_Vd:
5481 case AARCH64_OPND_SVE_Vm:
5482 case AARCH64_OPND_SVE_Vn:
5483 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5484 if (val == PARSE_FAIL)
5485 {
5486 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5487 goto failure;
5488 }
5489 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5490
5491 info->reg.regno = val;
5492 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5493 break;
5494
5495 case AARCH64_OPND_SVE_Pd:
5496 case AARCH64_OPND_SVE_Pg3:
5497 case AARCH64_OPND_SVE_Pg4_5:
5498 case AARCH64_OPND_SVE_Pg4_10:
5499 case AARCH64_OPND_SVE_Pg4_16:
5500 case AARCH64_OPND_SVE_Pm:
5501 case AARCH64_OPND_SVE_Pn:
5502 case AARCH64_OPND_SVE_Pt:
5503 reg_type = REG_TYPE_PN;
5504 goto vector_reg;
5505
5506 case AARCH64_OPND_SVE_Za_5:
5507 case AARCH64_OPND_SVE_Za_16:
5508 case AARCH64_OPND_SVE_Zd:
5509 case AARCH64_OPND_SVE_Zm_5:
5510 case AARCH64_OPND_SVE_Zm_16:
5511 case AARCH64_OPND_SVE_Zn:
5512 case AARCH64_OPND_SVE_Zt:
5513 reg_type = REG_TYPE_ZN;
5514 goto vector_reg;
5515
5516 case AARCH64_OPND_Va:
5517 case AARCH64_OPND_Vd:
5518 case AARCH64_OPND_Vn:
5519 case AARCH64_OPND_Vm:
5520 reg_type = REG_TYPE_VN;
5521 vector_reg:
5522 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5523 if (val == PARSE_FAIL)
5524 {
5525 first_error (_(get_reg_expected_msg (reg_type)));
5526 goto failure;
5527 }
5528 if (vectype.defined & NTA_HASINDEX)
5529 goto failure;
5530
5531 info->reg.regno = val;
5532 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5533 && vectype.type == NT_invtype)
5534 /* Unqualified Pn and Zn registers are allowed in certain
5535 contexts. Rely on F_STRICT qualifier checking to catch
5536 invalid uses. */
5537 info->qualifier = AARCH64_OPND_QLF_NIL;
5538 else
5539 {
5540 info->qualifier = vectype_to_qualifier (&vectype);
5541 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5542 goto failure;
5543 }
5544 break;
5545
5546 case AARCH64_OPND_VdD1:
5547 case AARCH64_OPND_VnD1:
5548 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5549 if (val == PARSE_FAIL)
5550 {
5551 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5552 goto failure;
5553 }
5554 if (vectype.type != NT_d || vectype.index != 1)
5555 {
5556 set_fatal_syntax_error
5557 (_("the top half of a 128-bit FP/SIMD register is expected"));
5558 goto failure;
5559 }
5560 info->reg.regno = val;
5561 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5562 here; it is correct for the purpose of encoding/decoding since
5563 only the register number is explicitly encoded in the related
5564 instructions, although this appears a bit hacky. */
5565 info->qualifier = AARCH64_OPND_QLF_S_D;
5566 break;
5567
5568 case AARCH64_OPND_SVE_Zm3_INDEX:
5569 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5570 case AARCH64_OPND_SVE_Zm4_INDEX:
5571 case AARCH64_OPND_SVE_Zn_INDEX:
5572 reg_type = REG_TYPE_ZN;
5573 goto vector_reg_index;
5574
5575 case AARCH64_OPND_Ed:
5576 case AARCH64_OPND_En:
5577 case AARCH64_OPND_Em:
5578 case AARCH64_OPND_Em16:
5579 case AARCH64_OPND_SM3_IMM2:
5580 reg_type = REG_TYPE_VN;
5581 vector_reg_index:
5582 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5583 if (val == PARSE_FAIL)
5584 {
5585 first_error (_(get_reg_expected_msg (reg_type)));
5586 goto failure;
5587 }
5588 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5589 goto failure;
5590
5591 info->reglane.regno = val;
5592 info->reglane.index = vectype.index;
5593 info->qualifier = vectype_to_qualifier (&vectype);
5594 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5595 goto failure;
5596 break;
5597
5598 case AARCH64_OPND_SVE_ZnxN:
5599 case AARCH64_OPND_SVE_ZtxN:
5600 reg_type = REG_TYPE_ZN;
5601 goto vector_reg_list;
5602
5603 case AARCH64_OPND_LVn:
5604 case AARCH64_OPND_LVt:
5605 case AARCH64_OPND_LVt_AL:
5606 case AARCH64_OPND_LEt:
5607 reg_type = REG_TYPE_VN;
5608 vector_reg_list:
5609 if (reg_type == REG_TYPE_ZN
5610 && get_opcode_dependent_value (opcode) == 1
5611 && *str != '{')
5612 {
5613 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5614 if (val == PARSE_FAIL)
5615 {
5616 first_error (_(get_reg_expected_msg (reg_type)));
5617 goto failure;
5618 }
5619 info->reglist.first_regno = val;
5620 info->reglist.num_regs = 1;
5621 }
5622 else
5623 {
5624 val = parse_vector_reg_list (&str, reg_type, &vectype);
5625 if (val == PARSE_FAIL)
5626 goto failure;
5627 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5628 {
5629 set_fatal_syntax_error (_("invalid register list"));
5630 goto failure;
5631 }
5632 info->reglist.first_regno = (val >> 2) & 0x1f;
5633 info->reglist.num_regs = (val & 0x3) + 1;
5634 }
5635 if (operands[i] == AARCH64_OPND_LEt)
5636 {
5637 if (!(vectype.defined & NTA_HASINDEX))
5638 goto failure;
5639 info->reglist.has_index = 1;
5640 info->reglist.index = vectype.index;
5641 }
5642 else
5643 {
5644 if (vectype.defined & NTA_HASINDEX)
5645 goto failure;
5646 if (!(vectype.defined & NTA_HASTYPE))
5647 {
5648 if (reg_type == REG_TYPE_ZN)
5649 set_fatal_syntax_error (_("missing type suffix"));
5650 goto failure;
5651 }
5652 }
5653 info->qualifier = vectype_to_qualifier (&vectype);
5654 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5655 goto failure;
5656 break;
5657
5658 case AARCH64_OPND_CRn:
5659 case AARCH64_OPND_CRm:
5660 {
5661 char prefix = *(str++);
5662 if (prefix != 'c' && prefix != 'C')
5663 goto failure;
5664
5665 po_imm_nc_or_fail ();
5666 if (val > 15)
5667 {
5668 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5669 goto failure;
5670 }
5671 info->qualifier = AARCH64_OPND_QLF_CR;
5672 info->imm.value = val;
5673 break;
5674 }
5675
5676 case AARCH64_OPND_SHLL_IMM:
5677 case AARCH64_OPND_IMM_VLSR:
5678 po_imm_or_fail (1, 64);
5679 info->imm.value = val;
5680 break;
5681
5682 case AARCH64_OPND_CCMP_IMM:
5683 case AARCH64_OPND_SIMM5:
5684 case AARCH64_OPND_FBITS:
5685 case AARCH64_OPND_UIMM4:
5686 case AARCH64_OPND_UIMM3_OP1:
5687 case AARCH64_OPND_UIMM3_OP2:
5688 case AARCH64_OPND_IMM_VLSL:
5689 case AARCH64_OPND_IMM:
5690 case AARCH64_OPND_IMM_2:
5691 case AARCH64_OPND_WIDTH:
5692 case AARCH64_OPND_SVE_INV_LIMM:
5693 case AARCH64_OPND_SVE_LIMM:
5694 case AARCH64_OPND_SVE_LIMM_MOV:
5695 case AARCH64_OPND_SVE_SHLIMM_PRED:
5696 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5697 case AARCH64_OPND_SVE_SHRIMM_PRED:
5698 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5699 case AARCH64_OPND_SVE_SIMM5:
5700 case AARCH64_OPND_SVE_SIMM5B:
5701 case AARCH64_OPND_SVE_SIMM6:
5702 case AARCH64_OPND_SVE_SIMM8:
5703 case AARCH64_OPND_SVE_UIMM3:
5704 case AARCH64_OPND_SVE_UIMM7:
5705 case AARCH64_OPND_SVE_UIMM8:
5706 case AARCH64_OPND_SVE_UIMM8_53:
5707 case AARCH64_OPND_IMM_ROT1:
5708 case AARCH64_OPND_IMM_ROT2:
5709 case AARCH64_OPND_IMM_ROT3:
5710 case AARCH64_OPND_SVE_IMM_ROT1:
5711 case AARCH64_OPND_SVE_IMM_ROT2:
5712 po_imm_nc_or_fail ();
5713 info->imm.value = val;
5714 break;
5715
5716 case AARCH64_OPND_SVE_AIMM:
5717 case AARCH64_OPND_SVE_ASIMM:
5718 po_imm_nc_or_fail ();
5719 info->imm.value = val;
5720 skip_whitespace (str);
5721 if (skip_past_comma (&str))
5722 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5723 else
5724 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5725 break;
5726
5727 case AARCH64_OPND_SVE_PATTERN:
5728 po_enum_or_fail (aarch64_sve_pattern_array);
5729 info->imm.value = val;
5730 break;
5731
5732 case AARCH64_OPND_SVE_PATTERN_SCALED:
5733 po_enum_or_fail (aarch64_sve_pattern_array);
5734 info->imm.value = val;
5735 if (skip_past_comma (&str)
5736 && !parse_shift (&str, info, SHIFTED_MUL))
5737 goto failure;
5738 if (!info->shifter.operator_present)
5739 {
5740 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5741 info->shifter.kind = AARCH64_MOD_MUL;
5742 info->shifter.amount = 1;
5743 }
5744 break;
5745
5746 case AARCH64_OPND_SVE_PRFOP:
5747 po_enum_or_fail (aarch64_sve_prfop_array);
5748 info->imm.value = val;
5749 break;
5750
5751 case AARCH64_OPND_UIMM7:
5752 po_imm_or_fail (0, 127);
5753 info->imm.value = val;
5754 break;
5755
5756 case AARCH64_OPND_IDX:
5757 case AARCH64_OPND_MASK:
5758 case AARCH64_OPND_BIT_NUM:
5759 case AARCH64_OPND_IMMR:
5760 case AARCH64_OPND_IMMS:
5761 po_imm_or_fail (0, 63);
5762 info->imm.value = val;
5763 break;
5764
5765 case AARCH64_OPND_IMM0:
5766 po_imm_nc_or_fail ();
5767 if (val != 0)
5768 {
5769 set_fatal_syntax_error (_("immediate zero expected"));
5770 goto failure;
5771 }
5772 info->imm.value = 0;
5773 break;
5774
5775 case AARCH64_OPND_FPIMM0:
5776 {
5777 int qfloat;
5778 bfd_boolean res1 = FALSE, res2 = FALSE;
5779 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5780 it is probably not worth the effort to support it. */
5781 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5782 imm_reg_type))
5783 && (error_p ()
5784 || !(res2 = parse_constant_immediate (&str, &val,
5785 imm_reg_type))))
5786 goto failure;
5787 if ((res1 && qfloat == 0) || (res2 && val == 0))
5788 {
5789 info->imm.value = 0;
5790 info->imm.is_fp = 1;
5791 break;
5792 }
5793 set_fatal_syntax_error (_("immediate zero expected"));
5794 goto failure;
5795 }
5796
5797 case AARCH64_OPND_IMM_MOV:
5798 {
5799 char *saved = str;
5800 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5801 reg_name_p (str, REG_TYPE_VN))
5802 goto failure;
5803 str = saved;
5804 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5805 GE_OPT_PREFIX, 1));
5806 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5807 later. fix_mov_imm_insn will try to determine a machine
5808 instruction (MOVZ, MOVN or ORR) for it and will issue an error
5809 message if the immediate cannot be moved by a single
5810 instruction. */
5811 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5812 inst.base.operands[i].skip = 1;
5813 }
5814 break;
5815
5816 case AARCH64_OPND_SIMD_IMM:
5817 case AARCH64_OPND_SIMD_IMM_SFT:
5818 if (! parse_big_immediate (&str, &val, imm_reg_type))
5819 goto failure;
5820 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5821 /* addr_off_p */ 0,
5822 /* need_libopcodes_p */ 1,
5823 /* skip_p */ 1);
5824 /* Parse shift.
5825 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5826 shift, we don't check it here; we leave the checking to
5827 the libopcodes (operand_general_constraint_met_p). By
5828 doing this, we achieve better diagnostics. */
5829 if (skip_past_comma (&str)
5830 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5831 goto failure;
5832 if (!info->shifter.operator_present
5833 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5834 {
5835 /* Default to LSL if not present. Libopcodes prefers shifter
5836 kind to be explicit. */
5837 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5838 info->shifter.kind = AARCH64_MOD_LSL;
5839 }
5840 break;
5841
5842 case AARCH64_OPND_FPIMM:
5843 case AARCH64_OPND_SIMD_FPIMM:
5844 case AARCH64_OPND_SVE_FPIMM8:
5845 {
5846 int qfloat;
5847 bfd_boolean dp_p;
5848
5849 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5850 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5851 || !aarch64_imm_float_p (qfloat))
5852 {
5853 if (!error_p ())
5854 set_fatal_syntax_error (_("invalid floating-point"
5855 " constant"));
5856 goto failure;
5857 }
5858 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5859 inst.base.operands[i].imm.is_fp = 1;
5860 }
5861 break;
5862
5863 case AARCH64_OPND_SVE_I1_HALF_ONE:
5864 case AARCH64_OPND_SVE_I1_HALF_TWO:
5865 case AARCH64_OPND_SVE_I1_ZERO_ONE:
5866 {
5867 int qfloat;
5868 bfd_boolean dp_p;
5869
5870 dp_p = double_precision_operand_p (&inst.base.operands[0]);
5871 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
5872 {
5873 if (!error_p ())
5874 set_fatal_syntax_error (_("invalid floating-point"
5875 " constant"));
5876 goto failure;
5877 }
5878 inst.base.operands[i].imm.value = qfloat;
5879 inst.base.operands[i].imm.is_fp = 1;
5880 }
5881 break;
5882
5883 case AARCH64_OPND_LIMM:
5884 po_misc_or_fail (parse_shifter_operand (&str, info,
5885 SHIFTED_LOGIC_IMM));
5886 if (info->shifter.operator_present)
5887 {
5888 set_fatal_syntax_error
5889 (_("shift not allowed for bitmask immediate"));
5890 goto failure;
5891 }
5892 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5893 /* addr_off_p */ 0,
5894 /* need_libopcodes_p */ 1,
5895 /* skip_p */ 1);
5896 break;
5897
5898 case AARCH64_OPND_AIMM:
5899 if (opcode->op == OP_ADD)
5900 /* ADD may have relocation types. */
5901 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5902 SHIFTED_ARITH_IMM));
5903 else
5904 po_misc_or_fail (parse_shifter_operand (&str, info,
5905 SHIFTED_ARITH_IMM));
5906 switch (inst.reloc.type)
5907 {
5908 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5909 info->shifter.amount = 12;
5910 break;
5911 case BFD_RELOC_UNUSED:
5912 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5913 if (info->shifter.kind != AARCH64_MOD_NONE)
5914 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5915 inst.reloc.pc_rel = 0;
5916 break;
5917 default:
5918 break;
5919 }
5920 info->imm.value = 0;
5921 if (!info->shifter.operator_present)
5922 {
5923 /* Default to LSL if not present. Libopcodes prefers shifter
5924 kind to be explicit. */
5925 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5926 info->shifter.kind = AARCH64_MOD_LSL;
5927 }
5928 break;
5929
5930 case AARCH64_OPND_HALF:
5931 {
5932 /* #<imm16> or relocation. */
5933 int internal_fixup_p;
5934 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5935 if (internal_fixup_p)
5936 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5937 skip_whitespace (str);
5938 if (skip_past_comma (&str))
5939 {
5940 /* {, LSL #<shift>} */
5941 if (! aarch64_gas_internal_fixup_p ())
5942 {
5943 set_fatal_syntax_error (_("can't mix relocation modifier "
5944 "with explicit shift"));
5945 goto failure;
5946 }
5947 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5948 }
5949 else
5950 inst.base.operands[i].shifter.amount = 0;
5951 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5952 inst.base.operands[i].imm.value = 0;
5953 if (! process_movw_reloc_info ())
5954 goto failure;
5955 }
5956 break;
5957
5958 case AARCH64_OPND_EXCEPTION:
5959 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
5960 imm_reg_type));
5961 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5962 /* addr_off_p */ 0,
5963 /* need_libopcodes_p */ 0,
5964 /* skip_p */ 1);
5965 break;
5966
5967 case AARCH64_OPND_NZCV:
5968 {
5969 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5970 if (nzcv != NULL)
5971 {
5972 str += 4;
5973 info->imm.value = nzcv->value;
5974 break;
5975 }
5976 po_imm_or_fail (0, 15);
5977 info->imm.value = val;
5978 }
5979 break;
5980
5981 case AARCH64_OPND_COND:
5982 case AARCH64_OPND_COND1:
5983 {
5984 char *start = str;
5985 do
5986 str++;
5987 while (ISALPHA (*str));
5988 info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
5989 if (info->cond == NULL)
5990 {
5991 set_syntax_error (_("invalid condition"));
5992 goto failure;
5993 }
5994 else if (operands[i] == AARCH64_OPND_COND1
5995 && (info->cond->value & 0xe) == 0xe)
5996 {
5997 /* Do not allow AL or NV. */
5998 set_default_error ();
5999 goto failure;
6000 }
6001 }
6002 break;
6003
6004 case AARCH64_OPND_ADDR_ADRP:
6005 po_misc_or_fail (parse_adrp (&str));
6006 /* Clear the value as operand needs to be relocated. */
6007 info->imm.value = 0;
6008 break;
6009
6010 case AARCH64_OPND_ADDR_PCREL14:
6011 case AARCH64_OPND_ADDR_PCREL19:
6012 case AARCH64_OPND_ADDR_PCREL21:
6013 case AARCH64_OPND_ADDR_PCREL26:
6014 po_misc_or_fail (parse_address (&str, info));
6015 if (!info->addr.pcrel)
6016 {
6017 set_syntax_error (_("invalid pc-relative address"));
6018 goto failure;
6019 }
6020 if (inst.gen_lit_pool
6021 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6022 {
6023 /* Only permit "=value" in the literal load instructions.
6024 The literal will be generated by programmer_friendly_fixup. */
6025 set_syntax_error (_("invalid use of \"=immediate\""));
6026 goto failure;
6027 }
6028 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6029 {
6030 set_syntax_error (_("unrecognized relocation suffix"));
6031 goto failure;
6032 }
6033 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6034 {
6035 info->imm.value = inst.reloc.exp.X_add_number;
6036 inst.reloc.type = BFD_RELOC_UNUSED;
6037 }
6038 else
6039 {
6040 info->imm.value = 0;
6041 if (inst.reloc.type == BFD_RELOC_UNUSED)
6042 switch (opcode->iclass)
6043 {
6044 case compbranch:
6045 case condbranch:
6046 /* e.g. CBZ or B.COND */
6047 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6048 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6049 break;
6050 case testbranch:
6051 /* e.g. TBZ */
6052 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6053 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6054 break;
6055 case branch_imm:
6056 /* e.g. B or BL */
6057 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6058 inst.reloc.type =
6059 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6060 : BFD_RELOC_AARCH64_JUMP26;
6061 break;
6062 case loadlit:
6063 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6064 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6065 break;
6066 case pcreladdr:
6067 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6068 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6069 break;
6070 default:
6071 gas_assert (0);
6072 abort ();
6073 }
6074 inst.reloc.pc_rel = 1;
6075 }
6076 break;
6077
6078 case AARCH64_OPND_ADDR_SIMPLE:
6079 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6080 {
6081 /* [<Xn|SP>{, #<simm>}] */
6082 char *start = str;
6083 /* First use the normal address-parsing routines, to get
6084 the usual syntax errors. */
6085 po_misc_or_fail (parse_address (&str, info));
6086 if (info->addr.pcrel || info->addr.offset.is_reg
6087 || !info->addr.preind || info->addr.postind
6088 || info->addr.writeback)
6089 {
6090 set_syntax_error (_("invalid addressing mode"));
6091 goto failure;
6092 }
6093
6094 /* Then retry, matching the specific syntax of these addresses. */
6095 str = start;
6096 po_char_or_fail ('[');
6097 po_reg_or_fail (REG_TYPE_R64_SP);
6098 /* Accept optional ", #0". */
6099 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6100 && skip_past_char (&str, ','))
6101 {
6102 skip_past_char (&str, '#');
6103 if (! skip_past_char (&str, '0'))
6104 {
6105 set_fatal_syntax_error
6106 (_("the optional immediate offset can only be 0"));
6107 goto failure;
6108 }
6109 }
6110 po_char_or_fail (']');
6111 break;
6112 }
6113
6114 case AARCH64_OPND_ADDR_REGOFF:
6115 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6116 po_misc_or_fail (parse_address (&str, info));
6117 regoff_addr:
6118 if (info->addr.pcrel || !info->addr.offset.is_reg
6119 || !info->addr.preind || info->addr.postind
6120 || info->addr.writeback)
6121 {
6122 set_syntax_error (_("invalid addressing mode"));
6123 goto failure;
6124 }
6125 if (!info->shifter.operator_present)
6126 {
6127 /* Default to LSL if not present. Libopcodes prefers shifter
6128 kind to be explicit. */
6129 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6130 info->shifter.kind = AARCH64_MOD_LSL;
6131 }
6132 /* Qualifier to be deduced by libopcodes. */
6133 break;
6134
6135 case AARCH64_OPND_ADDR_SIMM7:
6136 po_misc_or_fail (parse_address (&str, info));
6137 if (info->addr.pcrel || info->addr.offset.is_reg
6138 || (!info->addr.preind && !info->addr.postind))
6139 {
6140 set_syntax_error (_("invalid addressing mode"));
6141 goto failure;
6142 }
6143 if (inst.reloc.type != BFD_RELOC_UNUSED)
6144 {
6145 set_syntax_error (_("relocation not allowed"));
6146 goto failure;
6147 }
6148 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6149 /* addr_off_p */ 1,
6150 /* need_libopcodes_p */ 1,
6151 /* skip_p */ 0);
6152 break;
6153
6154 case AARCH64_OPND_ADDR_SIMM9:
6155 case AARCH64_OPND_ADDR_SIMM9_2:
6156 po_misc_or_fail (parse_address (&str, info));
6157 if (info->addr.pcrel || info->addr.offset.is_reg
6158 || (!info->addr.preind && !info->addr.postind)
6159 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6160 && info->addr.writeback))
6161 {
6162 set_syntax_error (_("invalid addressing mode"));
6163 goto failure;
6164 }
6165 if (inst.reloc.type != BFD_RELOC_UNUSED)
6166 {
6167 set_syntax_error (_("relocation not allowed"));
6168 goto failure;
6169 }
6170 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6171 /* addr_off_p */ 1,
6172 /* need_libopcodes_p */ 1,
6173 /* skip_p */ 0);
6174 break;
6175
6176 case AARCH64_OPND_ADDR_SIMM10:
6177 case AARCH64_OPND_ADDR_OFFSET:
6178 po_misc_or_fail (parse_address (&str, info));
6179 if (info->addr.pcrel || info->addr.offset.is_reg
6180 || !info->addr.preind || info->addr.postind)
6181 {
6182 set_syntax_error (_("invalid addressing mode"));
6183 goto failure;
6184 }
6185 if (inst.reloc.type != BFD_RELOC_UNUSED)
6186 {
6187 set_syntax_error (_("relocation not allowed"));
6188 goto failure;
6189 }
6190 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6191 /* addr_off_p */ 1,
6192 /* need_libopcodes_p */ 1,
6193 /* skip_p */ 0);
6194 break;
6195
6196 case AARCH64_OPND_ADDR_UIMM12:
6197 po_misc_or_fail (parse_address (&str, info));
6198 if (info->addr.pcrel || info->addr.offset.is_reg
6199 || !info->addr.preind || info->addr.writeback)
6200 {
6201 set_syntax_error (_("invalid addressing mode"));
6202 goto failure;
6203 }
6204 if (inst.reloc.type == BFD_RELOC_UNUSED)
6205 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6206 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6207 || (inst.reloc.type
6208 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6209 || (inst.reloc.type
6210 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6211 || (inst.reloc.type
6212 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6213 || (inst.reloc.type
6214 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6215 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6216 /* Leave qualifier to be determined by libopcodes. */
6217 break;
6218
6219 case AARCH64_OPND_SIMD_ADDR_POST:
6220 /* [<Xn|SP>], <Xm|#<amount>> */
6221 po_misc_or_fail (parse_address (&str, info));
6222 if (!info->addr.postind || !info->addr.writeback)
6223 {
6224 set_syntax_error (_("invalid addressing mode"));
6225 goto failure;
6226 }
6227 if (!info->addr.offset.is_reg)
6228 {
6229 if (inst.reloc.exp.X_op == O_constant)
6230 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6231 else
6232 {
6233 set_fatal_syntax_error
6234 (_("writeback value must be an immediate constant"));
6235 goto failure;
6236 }
6237 }
6238 /* No qualifier. */
6239 break;
6240
6241 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6242 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6243 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6244 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6245 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6246 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6247 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6248 case AARCH64_OPND_SVE_ADDR_RI_U6:
6249 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6250 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6251 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6252 /* [X<n>{, #imm, MUL VL}]
6253 [X<n>{, #imm}]
6254 but recognizing SVE registers. */
6255 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6256 &offset_qualifier));
6257 if (base_qualifier != AARCH64_OPND_QLF_X)
6258 {
6259 set_syntax_error (_("invalid addressing mode"));
6260 goto failure;
6261 }
6262 sve_regimm:
6263 if (info->addr.pcrel || info->addr.offset.is_reg
6264 || !info->addr.preind || info->addr.writeback)
6265 {
6266 set_syntax_error (_("invalid addressing mode"));
6267 goto failure;
6268 }
6269 if (inst.reloc.type != BFD_RELOC_UNUSED
6270 || inst.reloc.exp.X_op != O_constant)
6271 {
6272 /* Make sure this has priority over
6273 "invalid addressing mode". */
6274 set_fatal_syntax_error (_("constant offset required"));
6275 goto failure;
6276 }
6277 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6278 break;
6279
6280 case AARCH64_OPND_SVE_ADDR_R:
6281 /* [<Xn|SP>{, <R><m>}]
6282 but recognizing SVE registers. */
6283 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6284 &offset_qualifier));
6285 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6286 {
6287 offset_qualifier = AARCH64_OPND_QLF_X;
6288 info->addr.offset.is_reg = 1;
6289 info->addr.offset.regno = 31;
6290 }
6291 else if (base_qualifier != AARCH64_OPND_QLF_X
6292 || offset_qualifier != AARCH64_OPND_QLF_X)
6293 {
6294 set_syntax_error (_("invalid addressing mode"));
6295 goto failure;
6296 }
6297 goto regoff_addr;
6298
6299 case AARCH64_OPND_SVE_ADDR_RR:
6300 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6301 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6302 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6303 case AARCH64_OPND_SVE_ADDR_RX:
6304 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6305 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6306 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6307 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6308 but recognizing SVE registers. */
6309 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6310 &offset_qualifier));
6311 if (base_qualifier != AARCH64_OPND_QLF_X
6312 || offset_qualifier != AARCH64_OPND_QLF_X)
6313 {
6314 set_syntax_error (_("invalid addressing mode"));
6315 goto failure;
6316 }
6317 goto regoff_addr;
6318
6319 case AARCH64_OPND_SVE_ADDR_RZ:
6320 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6321 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6322 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6323 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6324 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6325 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6326 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6327 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6328 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6329 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6330 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6331 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6332 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6333 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6334 &offset_qualifier));
6335 if (base_qualifier != AARCH64_OPND_QLF_X
6336 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6337 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6338 {
6339 set_syntax_error (_("invalid addressing mode"));
6340 goto failure;
6341 }
6342 info->qualifier = offset_qualifier;
6343 goto regoff_addr;
6344
6345 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6346 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6347 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6348 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6349 /* [Z<n>.<T>{, #imm}] */
6350 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6351 &offset_qualifier));
6352 if (base_qualifier != AARCH64_OPND_QLF_S_S
6353 && base_qualifier != AARCH64_OPND_QLF_S_D)
6354 {
6355 set_syntax_error (_("invalid addressing mode"));
6356 goto failure;
6357 }
6358 info->qualifier = base_qualifier;
6359 goto sve_regimm;
6360
6361 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6362 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6363 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6364 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6365 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6366
6367 We don't reject:
6368
6369 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6370
6371 here since we get better error messages by leaving it to
6372 the qualifier checking routines. */
6373 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6374 &offset_qualifier));
6375 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6376 && base_qualifier != AARCH64_OPND_QLF_S_D)
6377 || offset_qualifier != base_qualifier)
6378 {
6379 set_syntax_error (_("invalid addressing mode"));
6380 goto failure;
6381 }
6382 info->qualifier = base_qualifier;
6383 goto regoff_addr;
6384
6385 case AARCH64_OPND_SYSREG:
6386 {
6387 uint32_t sysreg_flags;
6388 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6389 &sysreg_flags)) == PARSE_FAIL)
6390 {
6391 set_syntax_error (_("unknown or missing system register name"));
6392 goto failure;
6393 }
6394 inst.base.operands[i].sysreg.value = val;
6395 inst.base.operands[i].sysreg.flags = sysreg_flags;
6396 break;
6397 }
6398
6399 case AARCH64_OPND_PSTATEFIELD:
6400 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6401 == PARSE_FAIL)
6402 {
6403 set_syntax_error (_("unknown or missing PSTATE field name"));
6404 goto failure;
6405 }
6406 inst.base.operands[i].pstatefield = val;
6407 break;
6408
6409 case AARCH64_OPND_SYSREG_IC:
6410 inst.base.operands[i].sysins_op =
6411 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6412 goto sys_reg_ins;
6413 case AARCH64_OPND_SYSREG_DC:
6414 inst.base.operands[i].sysins_op =
6415 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6416 goto sys_reg_ins;
6417 case AARCH64_OPND_SYSREG_AT:
6418 inst.base.operands[i].sysins_op =
6419 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6420 goto sys_reg_ins;
6421 case AARCH64_OPND_SYSREG_TLBI:
6422 inst.base.operands[i].sysins_op =
6423 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6424 sys_reg_ins:
6425 if (inst.base.operands[i].sysins_op == NULL)
6426 {
6427 set_fatal_syntax_error ( _("unknown or missing operation name"));
6428 goto failure;
6429 }
6430 break;
6431
6432 case AARCH64_OPND_BARRIER:
6433 case AARCH64_OPND_BARRIER_ISB:
6434 val = parse_barrier (&str);
6435 if (val != PARSE_FAIL
6436 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6437 {
6438 /* ISB only accepts options name 'sy'. */
6439 set_syntax_error
6440 (_("the specified option is not accepted in ISB"));
6441 /* Turn off backtrack as this optional operand is present. */
6442 backtrack_pos = 0;
6443 goto failure;
6444 }
6445 /* This is an extension to accept a 0..15 immediate. */
6446 if (val == PARSE_FAIL)
6447 po_imm_or_fail (0, 15);
6448 info->barrier = aarch64_barrier_options + val;
6449 break;
6450
6451 case AARCH64_OPND_PRFOP:
6452 val = parse_pldop (&str);
6453 /* This is an extension to accept a 0..31 immediate. */
6454 if (val == PARSE_FAIL)
6455 po_imm_or_fail (0, 31);
6456 inst.base.operands[i].prfop = aarch64_prfops + val;
6457 break;
6458
6459 case AARCH64_OPND_BARRIER_PSB:
6460 val = parse_barrier_psb (&str, &(info->hint_option));
6461 if (val == PARSE_FAIL)
6462 goto failure;
6463 break;
6464
6465 default:
6466 as_fatal (_("unhandled operand code %d"), operands[i]);
6467 }
6468
6469 /* If we get here, this operand was successfully parsed. */
6470 inst.base.operands[i].present = 1;
6471 continue;
6472
6473 failure:
6474 /* The parse routine should already have set the error, but in case
6475 not, set a default one here. */
6476 if (! error_p ())
6477 set_default_error ();
6478
6479 if (! backtrack_pos)
6480 goto parse_operands_return;
6481
6482 {
6483 /* We reach here because this operand is marked as optional, and
6484 either no operand was supplied or the operand was supplied but it
6485 was syntactically incorrect. In the latter case we report an
6486 error. In the former case we perform a few more checks before
6487 dropping through to the code to insert the default operand. */
6488
6489 char *tmp = backtrack_pos;
6490 char endchar = END_OF_INSN;
6491
6492 if (i != (aarch64_num_of_operands (opcode) - 1))
6493 endchar = ',';
6494 skip_past_char (&tmp, ',');
6495
6496 if (*tmp != endchar)
6497 /* The user has supplied an operand in the wrong format. */
6498 goto parse_operands_return;
6499
6500 /* Make sure there is not a comma before the optional operand.
6501 For example the fifth operand of 'sys' is optional:
6502
6503 sys #0,c0,c0,#0, <--- wrong
6504 sys #0,c0,c0,#0 <--- correct. */
6505 if (comma_skipped_p && i && endchar == END_OF_INSN)
6506 {
6507 set_fatal_syntax_error
6508 (_("unexpected comma before the omitted optional operand"));
6509 goto parse_operands_return;
6510 }
6511 }
6512
6513 /* Reaching here means we are dealing with an optional operand that is
6514 omitted from the assembly line. */
6515 gas_assert (optional_operand_p (opcode, i));
6516 info->present = 0;
6517 process_omitted_operand (operands[i], opcode, i, info);
6518
6519 /* Try again, skipping the optional operand at backtrack_pos. */
6520 str = backtrack_pos;
6521 backtrack_pos = 0;
6522
6523 /* Clear any error record after the omitted optional operand has been
6524 successfully handled. */
6525 clear_error ();
6526 }
6527
6528 /* Check if we have parsed all the operands. */
6529 if (*str != '\0' && ! error_p ())
6530 {
6531 /* Set I to the index of the last present operand; this is
6532 for the purpose of diagnostics. */
6533 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6534 ;
6535 set_fatal_syntax_error
6536 (_("unexpected characters following instruction"));
6537 }
6538
6539 parse_operands_return:
6540
6541 if (error_p ())
6542 {
6543 DEBUG_TRACE ("parsing FAIL: %s - %s",
6544 operand_mismatch_kind_names[get_error_kind ()],
6545 get_error_message ());
6546 /* Record the operand error properly; this is useful when there
6547 are multiple instruction templates for a mnemonic name, so that
6548 later on, we can select the error that most closely describes
6549 the problem. */
6550 record_operand_error (opcode, i, get_error_kind (),
6551 get_error_message ());
6552 return FALSE;
6553 }
6554 else
6555 {
6556 DEBUG_TRACE ("parsing SUCCESS");
6557 return TRUE;
6558 }
6559 }
6560
6561 /* It does some fix-up to provide some programmer friendly feature while
6562 keeping the libopcodes happy, i.e. libopcodes only accepts
6563 the preferred architectural syntax.
6564 Return FALSE if there is any failure; otherwise return TRUE. */
6565
6566 static bfd_boolean
6567 programmer_friendly_fixup (aarch64_instruction *instr)
6568 {
6569 aarch64_inst *base = &instr->base;
6570 const aarch64_opcode *opcode = base->opcode;
6571 enum aarch64_op op = opcode->op;
6572 aarch64_opnd_info *operands = base->operands;
6573
6574 DEBUG_TRACE ("enter");
6575
6576 switch (opcode->iclass)
6577 {
6578 case testbranch:
6579 /* TBNZ Xn|Wn, #uimm6, label
6580 Test and Branch Not Zero: conditionally jumps to label if bit number
6581 uimm6 in register Xn is not zero. The bit number implies the width of
6582 the register, which may be written and should be disassembled as Wn if
6583 uimm is less than 32. */
6584 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6585 {
6586 if (operands[1].imm.value >= 32)
6587 {
6588 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6589 0, 31);
6590 return FALSE;
6591 }
6592 operands[0].qualifier = AARCH64_OPND_QLF_X;
6593 }
6594 break;
6595 case loadlit:
6596 /* LDR Wt, label | =value
6597 As a convenience assemblers will typically permit the notation
6598 "=value" in conjunction with the pc-relative literal load instructions
6599 to automatically place an immediate value or symbolic address in a
6600 nearby literal pool and generate a hidden label which references it.
6601 ISREG has been set to 0 in the case of =value. */
6602 if (instr->gen_lit_pool
6603 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6604 {
6605 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6606 if (op == OP_LDRSW_LIT)
6607 size = 4;
6608 if (instr->reloc.exp.X_op != O_constant
6609 && instr->reloc.exp.X_op != O_big
6610 && instr->reloc.exp.X_op != O_symbol)
6611 {
6612 record_operand_error (opcode, 1,
6613 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6614 _("constant expression expected"));
6615 return FALSE;
6616 }
6617 if (! add_to_lit_pool (&instr->reloc.exp, size))
6618 {
6619 record_operand_error (opcode, 1,
6620 AARCH64_OPDE_OTHER_ERROR,
6621 _("literal pool insertion failed"));
6622 return FALSE;
6623 }
6624 }
6625 break;
6626 case log_shift:
6627 case bitfield:
6628 /* UXT[BHW] Wd, Wn
6629 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6630 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6631 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6632 A programmer-friendly assembler should accept a destination Xd in
6633 place of Wd, however that is not the preferred form for disassembly.
6634 */
6635 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6636 && operands[1].qualifier == AARCH64_OPND_QLF_W
6637 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6638 operands[0].qualifier = AARCH64_OPND_QLF_W;
6639 break;
6640
6641 case addsub_ext:
6642 {
6643 /* In the 64-bit form, the final register operand is written as Wm
6644 for all but the (possibly omitted) UXTX/LSL and SXTX
6645 operators.
6646 As a programmer-friendly assembler, we accept e.g.
6647 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6648 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6649 int idx = aarch64_operand_index (opcode->operands,
6650 AARCH64_OPND_Rm_EXT);
6651 gas_assert (idx == 1 || idx == 2);
6652 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6653 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6654 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6655 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6656 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6657 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6658 }
6659 break;
6660
6661 default:
6662 break;
6663 }
6664
6665 DEBUG_TRACE ("exit with SUCCESS");
6666 return TRUE;
6667 }
6668
6669 /* Check for loads and stores that will cause unpredictable behavior. */
6670
6671 static void
6672 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6673 {
6674 aarch64_inst *base = &instr->base;
6675 const aarch64_opcode *opcode = base->opcode;
6676 const aarch64_opnd_info *opnds = base->operands;
6677 switch (opcode->iclass)
6678 {
6679 case ldst_pos:
6680 case ldst_imm9:
6681 case ldst_imm10:
6682 case ldst_unscaled:
6683 case ldst_unpriv:
6684 /* Loading/storing the base register is unpredictable if writeback. */
6685 if ((aarch64_get_operand_class (opnds[0].type)
6686 == AARCH64_OPND_CLASS_INT_REG)
6687 && opnds[0].reg.regno == opnds[1].addr.base_regno
6688 && opnds[1].addr.base_regno != REG_SP
6689 && opnds[1].addr.writeback)
6690 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6691 break;
6692 case ldstpair_off:
6693 case ldstnapair_offs:
6694 case ldstpair_indexed:
6695 /* Loading/storing the base register is unpredictable if writeback. */
6696 if ((aarch64_get_operand_class (opnds[0].type)
6697 == AARCH64_OPND_CLASS_INT_REG)
6698 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6699 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6700 && opnds[2].addr.base_regno != REG_SP
6701 && opnds[2].addr.writeback)
6702 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6703 /* Load operations must load different registers. */
6704 if ((opcode->opcode & (1 << 22))
6705 && opnds[0].reg.regno == opnds[1].reg.regno)
6706 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6707 break;
6708
6709 case ldstexcl:
6710 /* It is unpredictable if the destination and status registers are the
6711 same. */
6712 if ((aarch64_get_operand_class (opnds[0].type)
6713 == AARCH64_OPND_CLASS_INT_REG)
6714 && (aarch64_get_operand_class (opnds[1].type)
6715 == AARCH64_OPND_CLASS_INT_REG)
6716 && (opnds[0].reg.regno == opnds[1].reg.regno
6717 || opnds[0].reg.regno == opnds[2].reg.regno))
6718 as_warn (_("unpredictable: identical transfer and status registers"
6719 " --`%s'"),
6720 str);
6721
6722 break;
6723
6724 default:
6725 break;
6726 }
6727 }
6728
6729 /* A wrapper function to interface with libopcodes on encoding and
6730 record the error message if there is any.
6731
6732 Return TRUE on success; otherwise return FALSE. */
6733
6734 static bfd_boolean
6735 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6736 aarch64_insn *code)
6737 {
6738 aarch64_operand_error error_info;
6739 memset (&error_info, '\0', sizeof (error_info));
6740 error_info.kind = AARCH64_OPDE_NIL;
6741 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info)
6742 && !error_info.non_fatal)
6743 return TRUE;
6744
6745 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6746 record_operand_error_info (opcode, &error_info);
6747 return error_info.non_fatal;
6748 }
6749
6750 #ifdef DEBUG_AARCH64
6751 static inline void
6752 dump_opcode_operands (const aarch64_opcode *opcode)
6753 {
6754 int i = 0;
6755 while (opcode->operands[i] != AARCH64_OPND_NIL)
6756 {
6757 aarch64_verbose ("\t\t opnd%d: %s", i,
6758 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6759 ? aarch64_get_operand_name (opcode->operands[i])
6760 : aarch64_get_operand_desc (opcode->operands[i]));
6761 ++i;
6762 }
6763 }
6764 #endif /* DEBUG_AARCH64 */
6765
6766 /* This is the guts of the machine-dependent assembler. STR points to a
6767 machine dependent instruction. This function is supposed to emit
6768 the frags/bytes it assembles to. */
6769
6770 void
6771 md_assemble (char *str)
6772 {
6773 char *p = str;
6774 templates *template;
6775 aarch64_opcode *opcode;
6776 aarch64_inst *inst_base;
6777 unsigned saved_cond;
6778
6779 /* Align the previous label if needed. */
6780 if (last_label_seen != NULL)
6781 {
6782 symbol_set_frag (last_label_seen, frag_now);
6783 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6784 S_SET_SEGMENT (last_label_seen, now_seg);
6785 }
6786
6787 inst.reloc.type = BFD_RELOC_UNUSED;
6788
6789 DEBUG_TRACE ("\n\n");
6790 DEBUG_TRACE ("==============================");
6791 DEBUG_TRACE ("Enter md_assemble with %s", str);
6792
6793 template = opcode_lookup (&p);
6794 if (!template)
6795 {
6796 /* It wasn't an instruction, but it might be a register alias of
6797 the form alias .req reg directive. */
6798 if (!create_register_alias (str, p))
6799 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6800 str);
6801 return;
6802 }
6803
6804 skip_whitespace (p);
6805 if (*p == ',')
6806 {
6807 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6808 get_mnemonic_name (str), str);
6809 return;
6810 }
6811
6812 init_operand_error_report ();
6813
6814 /* Sections are assumed to start aligned. In executable section, there is no
6815 MAP_DATA symbol pending. So we only align the address during
6816 MAP_DATA --> MAP_INSN transition.
6817 For other sections, this is not guaranteed. */
6818 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6819 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6820 frag_align_code (2, 0);
6821
6822 saved_cond = inst.cond;
6823 reset_aarch64_instruction (&inst);
6824 inst.cond = saved_cond;
6825
6826 /* Iterate through all opcode entries with the same mnemonic name. */
6827 do
6828 {
6829 opcode = template->opcode;
6830
6831 DEBUG_TRACE ("opcode %s found", opcode->name);
6832 #ifdef DEBUG_AARCH64
6833 if (debug_dump)
6834 dump_opcode_operands (opcode);
6835 #endif /* DEBUG_AARCH64 */
6836
6837 mapping_state (MAP_INSN);
6838
6839 inst_base = &inst.base;
6840 inst_base->opcode = opcode;
6841
6842 /* Truly conditionally executed instructions, e.g. b.cond. */
6843 if (opcode->flags & F_COND)
6844 {
6845 gas_assert (inst.cond != COND_ALWAYS);
6846 inst_base->cond = get_cond_from_value (inst.cond);
6847 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6848 }
6849 else if (inst.cond != COND_ALWAYS)
6850 {
6851 /* It shouldn't arrive here, where the assembly looks like a
6852 conditional instruction but the found opcode is unconditional. */
6853 gas_assert (0);
6854 continue;
6855 }
6856
6857 if (parse_operands (p, opcode)
6858 && programmer_friendly_fixup (&inst)
6859 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6860 {
6861 /* Check that this instruction is supported for this CPU. */
6862 if (!opcode->avariant
6863 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6864 {
6865 as_bad (_("selected processor does not support `%s'"), str);
6866 return;
6867 }
6868
6869 warn_unpredictable_ldst (&inst, str);
6870
6871 if (inst.reloc.type == BFD_RELOC_UNUSED
6872 || !inst.reloc.need_libopcodes_p)
6873 output_inst (NULL);
6874 else
6875 {
6876 /* If there is relocation generated for the instruction,
6877 store the instruction information for the future fix-up. */
6878 struct aarch64_inst *copy;
6879 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6880 copy = XNEW (struct aarch64_inst);
6881 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6882 output_inst (copy);
6883 }
6884
6885 /* Issue non-fatal messages if any. */
6886 output_operand_error_report (str, TRUE);
6887 return;
6888 }
6889
6890 template = template->next;
6891 if (template != NULL)
6892 {
6893 reset_aarch64_instruction (&inst);
6894 inst.cond = saved_cond;
6895 }
6896 }
6897 while (template != NULL);
6898
6899 /* Issue the error messages if any. */
6900 output_operand_error_report (str, FALSE);
6901 }
6902
6903 /* Various frobbings of labels and their addresses. */
6904
6905 void
6906 aarch64_start_line_hook (void)
6907 {
6908 last_label_seen = NULL;
6909 }
6910
6911 void
6912 aarch64_frob_label (symbolS * sym)
6913 {
6914 last_label_seen = sym;
6915
6916 dwarf2_emit_label (sym);
6917 }
6918
6919 int
6920 aarch64_data_in_code (void)
6921 {
6922 if (!strncmp (input_line_pointer + 1, "data:", 5))
6923 {
6924 *input_line_pointer = '/';
6925 input_line_pointer += 5;
6926 *input_line_pointer = 0;
6927 return 1;
6928 }
6929
6930 return 0;
6931 }
6932
6933 char *
6934 aarch64_canonicalize_symbol_name (char *name)
6935 {
6936 int len;
6937
6938 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6939 *(name + len - 5) = 0;
6940
6941 return name;
6942 }
6943 \f
6944 /* Table of all register names defined by default. The user can
6945 define additional names with .req. Note that all register names
6946 should appear in both upper and lowercase variants. Some registers
6947 also have mixed-case names. */
6948
6949 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6950 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
6951 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
6952 #define REGSET16(p,t) \
6953 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6954 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6955 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6956 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
6957 #define REGSET31(p,t) \
6958 REGSET16(p, t), \
6959 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6960 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6961 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6962 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6963 #define REGSET(p,t) \
6964 REGSET31(p,t), REGNUM(p,31,t)
6965
6966 /* These go into aarch64_reg_hsh hash-table. */
6967 static const reg_entry reg_names[] = {
6968 /* Integer registers. */
6969 REGSET31 (x, R_64), REGSET31 (X, R_64),
6970 REGSET31 (w, R_32), REGSET31 (W, R_32),
6971
6972 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
6973 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
6974 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
6975 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
6976 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6977 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6978
6979 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6980 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6981
6982 /* Floating-point single precision registers. */
6983 REGSET (s, FP_S), REGSET (S, FP_S),
6984
6985 /* Floating-point double precision registers. */
6986 REGSET (d, FP_D), REGSET (D, FP_D),
6987
6988 /* Floating-point half precision registers. */
6989 REGSET (h, FP_H), REGSET (H, FP_H),
6990
6991 /* Floating-point byte precision registers. */
6992 REGSET (b, FP_B), REGSET (B, FP_B),
6993
6994 /* Floating-point quad precision registers. */
6995 REGSET (q, FP_Q), REGSET (Q, FP_Q),
6996
6997 /* FP/SIMD registers. */
6998 REGSET (v, VN), REGSET (V, VN),
6999
7000 /* SVE vector registers. */
7001 REGSET (z, ZN), REGSET (Z, ZN),
7002
7003 /* SVE predicate registers. */
7004 REGSET16 (p, PN), REGSET16 (P, PN)
7005 };
7006
7007 #undef REGDEF
7008 #undef REGDEF_ALIAS
7009 #undef REGNUM
7010 #undef REGSET16
7011 #undef REGSET31
7012 #undef REGSET
7013
7014 #define N 1
7015 #define n 0
7016 #define Z 1
7017 #define z 0
7018 #define C 1
7019 #define c 0
7020 #define V 1
7021 #define v 0
7022 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7023 static const asm_nzcv nzcv_names[] = {
7024 {"nzcv", B (n, z, c, v)},
7025 {"nzcV", B (n, z, c, V)},
7026 {"nzCv", B (n, z, C, v)},
7027 {"nzCV", B (n, z, C, V)},
7028 {"nZcv", B (n, Z, c, v)},
7029 {"nZcV", B (n, Z, c, V)},
7030 {"nZCv", B (n, Z, C, v)},
7031 {"nZCV", B (n, Z, C, V)},
7032 {"Nzcv", B (N, z, c, v)},
7033 {"NzcV", B (N, z, c, V)},
7034 {"NzCv", B (N, z, C, v)},
7035 {"NzCV", B (N, z, C, V)},
7036 {"NZcv", B (N, Z, c, v)},
7037 {"NZcV", B (N, Z, c, V)},
7038 {"NZCv", B (N, Z, C, v)},
7039 {"NZCV", B (N, Z, C, V)}
7040 };
7041
7042 #undef N
7043 #undef n
7044 #undef Z
7045 #undef z
7046 #undef C
7047 #undef c
7048 #undef V
7049 #undef v
7050 #undef B
7051 \f
7052 /* MD interface: bits in the object file. */
7053
7054 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7055 for use in the a.out file, and stores them in the array pointed to by buf.
7056 This knows about the endian-ness of the target machine and does
7057 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7058 2 (short) and 4 (long) Floating numbers are put out as a series of
7059 LITTLENUMS (shorts, here at least). */
7060
7061 void
7062 md_number_to_chars (char *buf, valueT val, int n)
7063 {
7064 if (target_big_endian)
7065 number_to_chars_bigendian (buf, val, n);
7066 else
7067 number_to_chars_littleendian (buf, val, n);
7068 }
7069
7070 /* MD interface: Sections. */
7071
7072 /* Estimate the size of a frag before relaxing. Assume everything fits in
7073 4 bytes. */
7074
7075 int
7076 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7077 {
7078 fragp->fr_var = 4;
7079 return 4;
7080 }
7081
7082 /* Round up a section size to the appropriate boundary. */
7083
7084 valueT
7085 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7086 {
7087 return size;
7088 }
7089
7090 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7091 of an rs_align_code fragment.
7092
7093 Here we fill the frag with the appropriate info for padding the
7094 output stream. The resulting frag will consist of a fixed (fr_fix)
7095 and of a repeating (fr_var) part.
7096
7097 The fixed content is always emitted before the repeating content and
7098 these two parts are used as follows in constructing the output:
7099 - the fixed part will be used to align to a valid instruction word
7100 boundary, in case that we start at a misaligned address; as no
7101 executable instruction can live at the misaligned location, we
7102 simply fill with zeros;
7103 - the variable part will be used to cover the remaining padding and
7104 we fill using the AArch64 NOP instruction.
7105
7106 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7107 enough storage space for up to 3 bytes for padding the back to a valid
7108 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7109
7110 void
7111 aarch64_handle_align (fragS * fragP)
7112 {
7113 /* NOP = d503201f */
7114 /* AArch64 instructions are always little-endian. */
7115 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7116
7117 int bytes, fix, noop_size;
7118 char *p;
7119
7120 if (fragP->fr_type != rs_align_code)
7121 return;
7122
7123 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7124 p = fragP->fr_literal + fragP->fr_fix;
7125
7126 #ifdef OBJ_ELF
7127 gas_assert (fragP->tc_frag_data.recorded);
7128 #endif
7129
7130 noop_size = sizeof (aarch64_noop);
7131
7132 fix = bytes & (noop_size - 1);
7133 if (fix)
7134 {
7135 #ifdef OBJ_ELF
7136 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7137 #endif
7138 memset (p, 0, fix);
7139 p += fix;
7140 fragP->fr_fix += fix;
7141 }
7142
7143 if (noop_size)
7144 memcpy (p, aarch64_noop, noop_size);
7145 fragP->fr_var = noop_size;
7146 }
7147
7148 /* Perform target specific initialisation of a frag.
7149 Note - despite the name this initialisation is not done when the frag
7150 is created, but only when its type is assigned. A frag can be created
7151 and used a long time before its type is set, so beware of assuming that
7152 this initialisation is performed first. */
7153
7154 #ifndef OBJ_ELF
7155 void
7156 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7157 int max_chars ATTRIBUTE_UNUSED)
7158 {
7159 }
7160
7161 #else /* OBJ_ELF is defined. */
7162 void
7163 aarch64_init_frag (fragS * fragP, int max_chars)
7164 {
7165 /* Record a mapping symbol for alignment frags. We will delete this
7166 later if the alignment ends up empty. */
7167 if (!fragP->tc_frag_data.recorded)
7168 fragP->tc_frag_data.recorded = 1;
7169
7170 /* PR 21809: Do not set a mapping state for debug sections
7171 - it just confuses other tools. */
7172 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
7173 return;
7174
7175 switch (fragP->fr_type)
7176 {
7177 case rs_align_test:
7178 case rs_fill:
7179 mapping_state_2 (MAP_DATA, max_chars);
7180 break;
7181 case rs_align:
7182 /* PR 20364: We can get alignment frags in code sections,
7183 so do not just assume that we should use the MAP_DATA state. */
7184 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7185 break;
7186 case rs_align_code:
7187 mapping_state_2 (MAP_INSN, max_chars);
7188 break;
7189 default:
7190 break;
7191 }
7192 }
7193 \f
7194 /* Initialize the DWARF-2 unwind information for this procedure. */
7195
7196 void
7197 tc_aarch64_frame_initial_instructions (void)
7198 {
7199 cfi_add_CFA_def_cfa (REG_SP, 0);
7200 }
7201 #endif /* OBJ_ELF */
7202
7203 /* Convert REGNAME to a DWARF-2 register number. */
7204
7205 int
7206 tc_aarch64_regname_to_dw2regnum (char *regname)
7207 {
7208 const reg_entry *reg = parse_reg (&regname);
7209 if (reg == NULL)
7210 return -1;
7211
7212 switch (reg->type)
7213 {
7214 case REG_TYPE_SP_32:
7215 case REG_TYPE_SP_64:
7216 case REG_TYPE_R_32:
7217 case REG_TYPE_R_64:
7218 return reg->number;
7219
7220 case REG_TYPE_FP_B:
7221 case REG_TYPE_FP_H:
7222 case REG_TYPE_FP_S:
7223 case REG_TYPE_FP_D:
7224 case REG_TYPE_FP_Q:
7225 return reg->number + 64;
7226
7227 default:
7228 break;
7229 }
7230 return -1;
7231 }
7232
7233 /* Implement DWARF2_ADDR_SIZE. */
7234
7235 int
7236 aarch64_dwarf2_addr_size (void)
7237 {
7238 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7239 if (ilp32_p)
7240 return 4;
7241 #endif
7242 return bfd_arch_bits_per_address (stdoutput) / 8;
7243 }
7244
7245 /* MD interface: Symbol and relocation handling. */
7246
7247 /* Return the address within the segment that a PC-relative fixup is
7248 relative to. For AArch64 PC-relative fixups applied to instructions
7249 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7250
7251 long
7252 md_pcrel_from_section (fixS * fixP, segT seg)
7253 {
7254 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7255
7256 /* If this is pc-relative and we are going to emit a relocation
7257 then we just want to put out any pipeline compensation that the linker
7258 will need. Otherwise we want to use the calculated base. */
7259 if (fixP->fx_pcrel
7260 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7261 || aarch64_force_relocation (fixP)))
7262 base = 0;
7263
7264 /* AArch64 should be consistent for all pc-relative relocations. */
7265 return base + AARCH64_PCREL_OFFSET;
7266 }
7267
7268 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7269 Otherwise we have no need to default values of symbols. */
7270
7271 symbolS *
7272 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7273 {
7274 #ifdef OBJ_ELF
7275 if (name[0] == '_' && name[1] == 'G'
7276 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7277 {
7278 if (!GOT_symbol)
7279 {
7280 if (symbol_find (name))
7281 as_bad (_("GOT already in the symbol table"));
7282
7283 GOT_symbol = symbol_new (name, undefined_section,
7284 (valueT) 0, &zero_address_frag);
7285 }
7286
7287 return GOT_symbol;
7288 }
7289 #endif
7290
7291 return 0;
7292 }
7293
7294 /* Return non-zero if the indicated VALUE has overflowed the maximum
7295 range expressible by a unsigned number with the indicated number of
7296 BITS. */
7297
7298 static bfd_boolean
7299 unsigned_overflow (valueT value, unsigned bits)
7300 {
7301 valueT lim;
7302 if (bits >= sizeof (valueT) * 8)
7303 return FALSE;
7304 lim = (valueT) 1 << bits;
7305 return (value >= lim);
7306 }
7307
7308
7309 /* Return non-zero if the indicated VALUE has overflowed the maximum
7310 range expressible by an signed number with the indicated number of
7311 BITS. */
7312
7313 static bfd_boolean
7314 signed_overflow (offsetT value, unsigned bits)
7315 {
7316 offsetT lim;
7317 if (bits >= sizeof (offsetT) * 8)
7318 return FALSE;
7319 lim = (offsetT) 1 << (bits - 1);
7320 return (value < -lim || value >= lim);
7321 }
7322
7323 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7324 unsigned immediate offset load/store instruction, try to encode it as
7325 an unscaled, 9-bit, signed immediate offset load/store instruction.
7326 Return TRUE if it is successful; otherwise return FALSE.
7327
7328 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7329 in response to the standard LDR/STR mnemonics when the immediate offset is
7330 unambiguous, i.e. when it is negative or unaligned. */
7331
7332 static bfd_boolean
7333 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7334 {
7335 int idx;
7336 enum aarch64_op new_op;
7337 const aarch64_opcode *new_opcode;
7338
7339 gas_assert (instr->opcode->iclass == ldst_pos);
7340
7341 switch (instr->opcode->op)
7342 {
7343 case OP_LDRB_POS:new_op = OP_LDURB; break;
7344 case OP_STRB_POS: new_op = OP_STURB; break;
7345 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7346 case OP_LDRH_POS: new_op = OP_LDURH; break;
7347 case OP_STRH_POS: new_op = OP_STURH; break;
7348 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7349 case OP_LDR_POS: new_op = OP_LDUR; break;
7350 case OP_STR_POS: new_op = OP_STUR; break;
7351 case OP_LDRF_POS: new_op = OP_LDURV; break;
7352 case OP_STRF_POS: new_op = OP_STURV; break;
7353 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7354 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7355 default: new_op = OP_NIL; break;
7356 }
7357
7358 if (new_op == OP_NIL)
7359 return FALSE;
7360
7361 new_opcode = aarch64_get_opcode (new_op);
7362 gas_assert (new_opcode != NULL);
7363
7364 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7365 instr->opcode->op, new_opcode->op);
7366
7367 aarch64_replace_opcode (instr, new_opcode);
7368
7369 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7370 qualifier matching may fail because the out-of-date qualifier will
7371 prevent the operand being updated with a new and correct qualifier. */
7372 idx = aarch64_operand_index (instr->opcode->operands,
7373 AARCH64_OPND_ADDR_SIMM9);
7374 gas_assert (idx == 1);
7375 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7376
7377 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7378
7379 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
7380 return FALSE;
7381
7382 return TRUE;
7383 }
7384
7385 /* Called by fix_insn to fix a MOV immediate alias instruction.
7386
7387 Operand for a generic move immediate instruction, which is an alias
7388 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7389 a 32-bit/64-bit immediate value into general register. An assembler error
7390 shall result if the immediate cannot be created by a single one of these
7391 instructions. If there is a choice, then to ensure reversability an
7392 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7393
7394 static void
7395 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7396 {
7397 const aarch64_opcode *opcode;
7398
7399 /* Need to check if the destination is SP/ZR. The check has to be done
7400 before any aarch64_replace_opcode. */
7401 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7402 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7403
7404 instr->operands[1].imm.value = value;
7405 instr->operands[1].skip = 0;
7406
7407 if (try_mov_wide_p)
7408 {
7409 /* Try the MOVZ alias. */
7410 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7411 aarch64_replace_opcode (instr, opcode);
7412 if (aarch64_opcode_encode (instr->opcode, instr,
7413 &instr->value, NULL, NULL))
7414 {
7415 put_aarch64_insn (buf, instr->value);
7416 return;
7417 }
7418 /* Try the MOVK alias. */
7419 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7420 aarch64_replace_opcode (instr, opcode);
7421 if (aarch64_opcode_encode (instr->opcode, instr,
7422 &instr->value, NULL, NULL))
7423 {
7424 put_aarch64_insn (buf, instr->value);
7425 return;
7426 }
7427 }
7428
7429 if (try_mov_bitmask_p)
7430 {
7431 /* Try the ORR alias. */
7432 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7433 aarch64_replace_opcode (instr, opcode);
7434 if (aarch64_opcode_encode (instr->opcode, instr,
7435 &instr->value, NULL, NULL))
7436 {
7437 put_aarch64_insn (buf, instr->value);
7438 return;
7439 }
7440 }
7441
7442 as_bad_where (fixP->fx_file, fixP->fx_line,
7443 _("immediate cannot be moved by a single instruction"));
7444 }
7445
7446 /* An instruction operand which is immediate related may have symbol used
7447 in the assembly, e.g.
7448
7449 mov w0, u32
7450 .set u32, 0x00ffff00
7451
7452 At the time when the assembly instruction is parsed, a referenced symbol,
7453 like 'u32' in the above example may not have been seen; a fixS is created
7454 in such a case and is handled here after symbols have been resolved.
7455 Instruction is fixed up with VALUE using the information in *FIXP plus
7456 extra information in FLAGS.
7457
7458 This function is called by md_apply_fix to fix up instructions that need
7459 a fix-up described above but does not involve any linker-time relocation. */
7460
7461 static void
7462 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7463 {
7464 int idx;
7465 uint32_t insn;
7466 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7467 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7468 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7469
7470 if (new_inst)
7471 {
7472 /* Now the instruction is about to be fixed-up, so the operand that
7473 was previously marked as 'ignored' needs to be unmarked in order
7474 to get the encoding done properly. */
7475 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7476 new_inst->operands[idx].skip = 0;
7477 }
7478
7479 gas_assert (opnd != AARCH64_OPND_NIL);
7480
7481 switch (opnd)
7482 {
7483 case AARCH64_OPND_EXCEPTION:
7484 if (unsigned_overflow (value, 16))
7485 as_bad_where (fixP->fx_file, fixP->fx_line,
7486 _("immediate out of range"));
7487 insn = get_aarch64_insn (buf);
7488 insn |= encode_svc_imm (value);
7489 put_aarch64_insn (buf, insn);
7490 break;
7491
7492 case AARCH64_OPND_AIMM:
7493 /* ADD or SUB with immediate.
7494 NOTE this assumes we come here with a add/sub shifted reg encoding
7495 3 322|2222|2 2 2 21111 111111
7496 1 098|7654|3 2 1 09876 543210 98765 43210
7497 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7498 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7499 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7500 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7501 ->
7502 3 322|2222|2 2 221111111111
7503 1 098|7654|3 2 109876543210 98765 43210
7504 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7505 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7506 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7507 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7508 Fields sf Rn Rd are already set. */
7509 insn = get_aarch64_insn (buf);
7510 if (value < 0)
7511 {
7512 /* Add <-> sub. */
7513 insn = reencode_addsub_switch_add_sub (insn);
7514 value = -value;
7515 }
7516
7517 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7518 && unsigned_overflow (value, 12))
7519 {
7520 /* Try to shift the value by 12 to make it fit. */
7521 if (((value >> 12) << 12) == value
7522 && ! unsigned_overflow (value, 12 + 12))
7523 {
7524 value >>= 12;
7525 insn |= encode_addsub_imm_shift_amount (1);
7526 }
7527 }
7528
7529 if (unsigned_overflow (value, 12))
7530 as_bad_where (fixP->fx_file, fixP->fx_line,
7531 _("immediate out of range"));
7532
7533 insn |= encode_addsub_imm (value);
7534
7535 put_aarch64_insn (buf, insn);
7536 break;
7537
7538 case AARCH64_OPND_SIMD_IMM:
7539 case AARCH64_OPND_SIMD_IMM_SFT:
7540 case AARCH64_OPND_LIMM:
7541 /* Bit mask immediate. */
7542 gas_assert (new_inst != NULL);
7543 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7544 new_inst->operands[idx].imm.value = value;
7545 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7546 &new_inst->value, NULL, NULL))
7547 put_aarch64_insn (buf, new_inst->value);
7548 else
7549 as_bad_where (fixP->fx_file, fixP->fx_line,
7550 _("invalid immediate"));
7551 break;
7552
7553 case AARCH64_OPND_HALF:
7554 /* 16-bit unsigned immediate. */
7555 if (unsigned_overflow (value, 16))
7556 as_bad_where (fixP->fx_file, fixP->fx_line,
7557 _("immediate out of range"));
7558 insn = get_aarch64_insn (buf);
7559 insn |= encode_movw_imm (value & 0xffff);
7560 put_aarch64_insn (buf, insn);
7561 break;
7562
7563 case AARCH64_OPND_IMM_MOV:
7564 /* Operand for a generic move immediate instruction, which is
7565 an alias instruction that generates a single MOVZ, MOVN or ORR
7566 instruction to loads a 32-bit/64-bit immediate value into general
7567 register. An assembler error shall result if the immediate cannot be
7568 created by a single one of these instructions. If there is a choice,
7569 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7570 and MOVZ or MOVN to ORR. */
7571 gas_assert (new_inst != NULL);
7572 fix_mov_imm_insn (fixP, buf, new_inst, value);
7573 break;
7574
7575 case AARCH64_OPND_ADDR_SIMM7:
7576 case AARCH64_OPND_ADDR_SIMM9:
7577 case AARCH64_OPND_ADDR_SIMM9_2:
7578 case AARCH64_OPND_ADDR_SIMM10:
7579 case AARCH64_OPND_ADDR_UIMM12:
7580 /* Immediate offset in an address. */
7581 insn = get_aarch64_insn (buf);
7582
7583 gas_assert (new_inst != NULL && new_inst->value == insn);
7584 gas_assert (new_inst->opcode->operands[1] == opnd
7585 || new_inst->opcode->operands[2] == opnd);
7586
7587 /* Get the index of the address operand. */
7588 if (new_inst->opcode->operands[1] == opnd)
7589 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7590 idx = 1;
7591 else
7592 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7593 idx = 2;
7594
7595 /* Update the resolved offset value. */
7596 new_inst->operands[idx].addr.offset.imm = value;
7597
7598 /* Encode/fix-up. */
7599 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7600 &new_inst->value, NULL, NULL))
7601 {
7602 put_aarch64_insn (buf, new_inst->value);
7603 break;
7604 }
7605 else if (new_inst->opcode->iclass == ldst_pos
7606 && try_to_encode_as_unscaled_ldst (new_inst))
7607 {
7608 put_aarch64_insn (buf, new_inst->value);
7609 break;
7610 }
7611
7612 as_bad_where (fixP->fx_file, fixP->fx_line,
7613 _("immediate offset out of range"));
7614 break;
7615
7616 default:
7617 gas_assert (0);
7618 as_fatal (_("unhandled operand code %d"), opnd);
7619 }
7620 }
7621
7622 /* Apply a fixup (fixP) to segment data, once it has been determined
7623 by our caller that we have all the info we need to fix it up.
7624
7625 Parameter valP is the pointer to the value of the bits. */
7626
7627 void
7628 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7629 {
7630 offsetT value = *valP;
7631 uint32_t insn;
7632 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7633 int scale;
7634 unsigned flags = fixP->fx_addnumber;
7635
7636 DEBUG_TRACE ("\n\n");
7637 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7638 DEBUG_TRACE ("Enter md_apply_fix");
7639
7640 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7641
7642 /* Note whether this will delete the relocation. */
7643
7644 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7645 fixP->fx_done = 1;
7646
7647 /* Process the relocations. */
7648 switch (fixP->fx_r_type)
7649 {
7650 case BFD_RELOC_NONE:
7651 /* This will need to go in the object file. */
7652 fixP->fx_done = 0;
7653 break;
7654
7655 case BFD_RELOC_8:
7656 case BFD_RELOC_8_PCREL:
7657 if (fixP->fx_done || !seg->use_rela_p)
7658 md_number_to_chars (buf, value, 1);
7659 break;
7660
7661 case BFD_RELOC_16:
7662 case BFD_RELOC_16_PCREL:
7663 if (fixP->fx_done || !seg->use_rela_p)
7664 md_number_to_chars (buf, value, 2);
7665 break;
7666
7667 case BFD_RELOC_32:
7668 case BFD_RELOC_32_PCREL:
7669 if (fixP->fx_done || !seg->use_rela_p)
7670 md_number_to_chars (buf, value, 4);
7671 break;
7672
7673 case BFD_RELOC_64:
7674 case BFD_RELOC_64_PCREL:
7675 if (fixP->fx_done || !seg->use_rela_p)
7676 md_number_to_chars (buf, value, 8);
7677 break;
7678
7679 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7680 /* We claim that these fixups have been processed here, even if
7681 in fact we generate an error because we do not have a reloc
7682 for them, so tc_gen_reloc() will reject them. */
7683 fixP->fx_done = 1;
7684 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7685 {
7686 as_bad_where (fixP->fx_file, fixP->fx_line,
7687 _("undefined symbol %s used as an immediate value"),
7688 S_GET_NAME (fixP->fx_addsy));
7689 goto apply_fix_return;
7690 }
7691 fix_insn (fixP, flags, value);
7692 break;
7693
7694 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7695 if (fixP->fx_done || !seg->use_rela_p)
7696 {
7697 if (value & 3)
7698 as_bad_where (fixP->fx_file, fixP->fx_line,
7699 _("pc-relative load offset not word aligned"));
7700 if (signed_overflow (value, 21))
7701 as_bad_where (fixP->fx_file, fixP->fx_line,
7702 _("pc-relative load offset out of range"));
7703 insn = get_aarch64_insn (buf);
7704 insn |= encode_ld_lit_ofs_19 (value >> 2);
7705 put_aarch64_insn (buf, insn);
7706 }
7707 break;
7708
7709 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7710 if (fixP->fx_done || !seg->use_rela_p)
7711 {
7712 if (signed_overflow (value, 21))
7713 as_bad_where (fixP->fx_file, fixP->fx_line,
7714 _("pc-relative address offset out of range"));
7715 insn = get_aarch64_insn (buf);
7716 insn |= encode_adr_imm (value);
7717 put_aarch64_insn (buf, insn);
7718 }
7719 break;
7720
7721 case BFD_RELOC_AARCH64_BRANCH19:
7722 if (fixP->fx_done || !seg->use_rela_p)
7723 {
7724 if (value & 3)
7725 as_bad_where (fixP->fx_file, fixP->fx_line,
7726 _("conditional branch target not word aligned"));
7727 if (signed_overflow (value, 21))
7728 as_bad_where (fixP->fx_file, fixP->fx_line,
7729 _("conditional branch out of range"));
7730 insn = get_aarch64_insn (buf);
7731 insn |= encode_cond_branch_ofs_19 (value >> 2);
7732 put_aarch64_insn (buf, insn);
7733 }
7734 break;
7735
7736 case BFD_RELOC_AARCH64_TSTBR14:
7737 if (fixP->fx_done || !seg->use_rela_p)
7738 {
7739 if (value & 3)
7740 as_bad_where (fixP->fx_file, fixP->fx_line,
7741 _("conditional branch target not word aligned"));
7742 if (signed_overflow (value, 16))
7743 as_bad_where (fixP->fx_file, fixP->fx_line,
7744 _("conditional branch out of range"));
7745 insn = get_aarch64_insn (buf);
7746 insn |= encode_tst_branch_ofs_14 (value >> 2);
7747 put_aarch64_insn (buf, insn);
7748 }
7749 break;
7750
7751 case BFD_RELOC_AARCH64_CALL26:
7752 case BFD_RELOC_AARCH64_JUMP26:
7753 if (fixP->fx_done || !seg->use_rela_p)
7754 {
7755 if (value & 3)
7756 as_bad_where (fixP->fx_file, fixP->fx_line,
7757 _("branch target not word aligned"));
7758 if (signed_overflow (value, 28))
7759 as_bad_where (fixP->fx_file, fixP->fx_line,
7760 _("branch out of range"));
7761 insn = get_aarch64_insn (buf);
7762 insn |= encode_branch_ofs_26 (value >> 2);
7763 put_aarch64_insn (buf, insn);
7764 }
7765 break;
7766
7767 case BFD_RELOC_AARCH64_MOVW_G0:
7768 case BFD_RELOC_AARCH64_MOVW_G0_NC:
7769 case BFD_RELOC_AARCH64_MOVW_G0_S:
7770 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7771 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
7772 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
7773 scale = 0;
7774 goto movw_common;
7775 case BFD_RELOC_AARCH64_MOVW_G1:
7776 case BFD_RELOC_AARCH64_MOVW_G1_NC:
7777 case BFD_RELOC_AARCH64_MOVW_G1_S:
7778 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7779 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
7780 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
7781 scale = 16;
7782 goto movw_common;
7783 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7784 scale = 0;
7785 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7786 /* Should always be exported to object file, see
7787 aarch64_force_relocation(). */
7788 gas_assert (!fixP->fx_done);
7789 gas_assert (seg->use_rela_p);
7790 goto movw_common;
7791 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7792 scale = 16;
7793 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7794 /* Should always be exported to object file, see
7795 aarch64_force_relocation(). */
7796 gas_assert (!fixP->fx_done);
7797 gas_assert (seg->use_rela_p);
7798 goto movw_common;
7799 case BFD_RELOC_AARCH64_MOVW_G2:
7800 case BFD_RELOC_AARCH64_MOVW_G2_NC:
7801 case BFD_RELOC_AARCH64_MOVW_G2_S:
7802 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
7803 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
7804 scale = 32;
7805 goto movw_common;
7806 case BFD_RELOC_AARCH64_MOVW_G3:
7807 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
7808 scale = 48;
7809 movw_common:
7810 if (fixP->fx_done || !seg->use_rela_p)
7811 {
7812 insn = get_aarch64_insn (buf);
7813
7814 if (!fixP->fx_done)
7815 {
7816 /* REL signed addend must fit in 16 bits */
7817 if (signed_overflow (value, 16))
7818 as_bad_where (fixP->fx_file, fixP->fx_line,
7819 _("offset out of range"));
7820 }
7821 else
7822 {
7823 /* Check for overflow and scale. */
7824 switch (fixP->fx_r_type)
7825 {
7826 case BFD_RELOC_AARCH64_MOVW_G0:
7827 case BFD_RELOC_AARCH64_MOVW_G1:
7828 case BFD_RELOC_AARCH64_MOVW_G2:
7829 case BFD_RELOC_AARCH64_MOVW_G3:
7830 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7831 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7832 if (unsigned_overflow (value, scale + 16))
7833 as_bad_where (fixP->fx_file, fixP->fx_line,
7834 _("unsigned value out of range"));
7835 break;
7836 case BFD_RELOC_AARCH64_MOVW_G0_S:
7837 case BFD_RELOC_AARCH64_MOVW_G1_S:
7838 case BFD_RELOC_AARCH64_MOVW_G2_S:
7839 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
7840 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
7841 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
7842 /* NOTE: We can only come here with movz or movn. */
7843 if (signed_overflow (value, scale + 16))
7844 as_bad_where (fixP->fx_file, fixP->fx_line,
7845 _("signed value out of range"));
7846 if (value < 0)
7847 {
7848 /* Force use of MOVN. */
7849 value = ~value;
7850 insn = reencode_movzn_to_movn (insn);
7851 }
7852 else
7853 {
7854 /* Force use of MOVZ. */
7855 insn = reencode_movzn_to_movz (insn);
7856 }
7857 break;
7858 default:
7859 /* Unchecked relocations. */
7860 break;
7861 }
7862 value >>= scale;
7863 }
7864
7865 /* Insert value into MOVN/MOVZ/MOVK instruction. */
7866 insn |= encode_movw_imm (value & 0xffff);
7867
7868 put_aarch64_insn (buf, insn);
7869 }
7870 break;
7871
7872 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7873 fixP->fx_r_type = (ilp32_p
7874 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7875 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7876 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7877 /* Should always be exported to object file, see
7878 aarch64_force_relocation(). */
7879 gas_assert (!fixP->fx_done);
7880 gas_assert (seg->use_rela_p);
7881 break;
7882
7883 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7884 fixP->fx_r_type = (ilp32_p
7885 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7886 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
7887 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7888 /* Should always be exported to object file, see
7889 aarch64_force_relocation(). */
7890 gas_assert (!fixP->fx_done);
7891 gas_assert (seg->use_rela_p);
7892 break;
7893
7894 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
7895 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7896 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7897 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7898 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
7899 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7900 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7901 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7902 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7903 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7904 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7905 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7906 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7907 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7908 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7909 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7910 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7911 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7912 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7913 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7914 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7915 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7916 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7917 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7918 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7919 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7920 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7921 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7922 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7923 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7924 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7925 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7926 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7927 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7928 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7929 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7930 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
7931 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
7932 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
7933 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
7934 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
7935 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
7936 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
7937 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
7938 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7939 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7940 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7941 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7942 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7943 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7944 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7945 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7946 S_SET_THREAD_LOCAL (fixP->fx_addsy);
7947 /* Should always be exported to object file, see
7948 aarch64_force_relocation(). */
7949 gas_assert (!fixP->fx_done);
7950 gas_assert (seg->use_rela_p);
7951 break;
7952
7953 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7954 /* Should always be exported to object file, see
7955 aarch64_force_relocation(). */
7956 fixP->fx_r_type = (ilp32_p
7957 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7958 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7959 gas_assert (!fixP->fx_done);
7960 gas_assert (seg->use_rela_p);
7961 break;
7962
7963 case BFD_RELOC_AARCH64_ADD_LO12:
7964 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7965 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7966 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7967 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7968 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7969 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7970 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7971 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7972 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7973 case BFD_RELOC_AARCH64_LDST128_LO12:
7974 case BFD_RELOC_AARCH64_LDST16_LO12:
7975 case BFD_RELOC_AARCH64_LDST32_LO12:
7976 case BFD_RELOC_AARCH64_LDST64_LO12:
7977 case BFD_RELOC_AARCH64_LDST8_LO12:
7978 /* Should always be exported to object file, see
7979 aarch64_force_relocation(). */
7980 gas_assert (!fixP->fx_done);
7981 gas_assert (seg->use_rela_p);
7982 break;
7983
7984 case BFD_RELOC_AARCH64_TLSDESC_ADD:
7985 case BFD_RELOC_AARCH64_TLSDESC_CALL:
7986 case BFD_RELOC_AARCH64_TLSDESC_LDR:
7987 break;
7988
7989 case BFD_RELOC_UNUSED:
7990 /* An error will already have been reported. */
7991 break;
7992
7993 default:
7994 as_bad_where (fixP->fx_file, fixP->fx_line,
7995 _("unexpected %s fixup"),
7996 bfd_get_reloc_code_name (fixP->fx_r_type));
7997 break;
7998 }
7999
8000 apply_fix_return:
8001 /* Free the allocated the struct aarch64_inst.
8002 N.B. currently there are very limited number of fix-up types actually use
8003 this field, so the impact on the performance should be minimal . */
8004 if (fixP->tc_fix_data.inst != NULL)
8005 free (fixP->tc_fix_data.inst);
8006
8007 return;
8008 }
8009
8010 /* Translate internal representation of relocation info to BFD target
8011 format. */
8012
8013 arelent *
8014 tc_gen_reloc (asection * section, fixS * fixp)
8015 {
8016 arelent *reloc;
8017 bfd_reloc_code_real_type code;
8018
8019 reloc = XNEW (arelent);
8020
8021 reloc->sym_ptr_ptr = XNEW (asymbol *);
8022 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8023 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8024
8025 if (fixp->fx_pcrel)
8026 {
8027 if (section->use_rela_p)
8028 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8029 else
8030 fixp->fx_offset = reloc->address;
8031 }
8032 reloc->addend = fixp->fx_offset;
8033
8034 code = fixp->fx_r_type;
8035 switch (code)
8036 {
8037 case BFD_RELOC_16:
8038 if (fixp->fx_pcrel)
8039 code = BFD_RELOC_16_PCREL;
8040 break;
8041
8042 case BFD_RELOC_32:
8043 if (fixp->fx_pcrel)
8044 code = BFD_RELOC_32_PCREL;
8045 break;
8046
8047 case BFD_RELOC_64:
8048 if (fixp->fx_pcrel)
8049 code = BFD_RELOC_64_PCREL;
8050 break;
8051
8052 default:
8053 break;
8054 }
8055
8056 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8057 if (reloc->howto == NULL)
8058 {
8059 as_bad_where (fixp->fx_file, fixp->fx_line,
8060 _
8061 ("cannot represent %s relocation in this object file format"),
8062 bfd_get_reloc_code_name (code));
8063 return NULL;
8064 }
8065
8066 return reloc;
8067 }
8068
8069 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8070
8071 void
8072 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8073 {
8074 bfd_reloc_code_real_type type;
8075 int pcrel = 0;
8076
8077 /* Pick a reloc.
8078 FIXME: @@ Should look at CPU word size. */
8079 switch (size)
8080 {
8081 case 1:
8082 type = BFD_RELOC_8;
8083 break;
8084 case 2:
8085 type = BFD_RELOC_16;
8086 break;
8087 case 4:
8088 type = BFD_RELOC_32;
8089 break;
8090 case 8:
8091 type = BFD_RELOC_64;
8092 break;
8093 default:
8094 as_bad (_("cannot do %u-byte relocation"), size);
8095 type = BFD_RELOC_UNUSED;
8096 break;
8097 }
8098
8099 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8100 }
8101
8102 int
8103 aarch64_force_relocation (struct fix *fixp)
8104 {
8105 switch (fixp->fx_r_type)
8106 {
8107 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8108 /* Perform these "immediate" internal relocations
8109 even if the symbol is extern or weak. */
8110 return 0;
8111
8112 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8113 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8114 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8115 /* Pseudo relocs that need to be fixed up according to
8116 ilp32_p. */
8117 return 0;
8118
8119 case BFD_RELOC_AARCH64_ADD_LO12:
8120 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8121 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8122 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8123 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8124 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8125 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8126 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8127 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8128 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8129 case BFD_RELOC_AARCH64_LDST128_LO12:
8130 case BFD_RELOC_AARCH64_LDST16_LO12:
8131 case BFD_RELOC_AARCH64_LDST32_LO12:
8132 case BFD_RELOC_AARCH64_LDST64_LO12:
8133 case BFD_RELOC_AARCH64_LDST8_LO12:
8134 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8135 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8136 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8137 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8138 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8139 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8140 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8141 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8142 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8143 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8144 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8145 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8146 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8147 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8148 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8149 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8150 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8151 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8152 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8153 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8154 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8155 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8156 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8157 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8158 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8159 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8160 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8161 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8162 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8163 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8164 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8165 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8166 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8167 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8168 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8169 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8170 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8171 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8172 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8173 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8174 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8175 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8176 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8177 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8178 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8179 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8180 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8181 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8182 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8183 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8184 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8185 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8186 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8187 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8188 /* Always leave these relocations for the linker. */
8189 return 1;
8190
8191 default:
8192 break;
8193 }
8194
8195 return generic_force_reloc (fixp);
8196 }
8197
8198 #ifdef OBJ_ELF
8199
8200 /* Implement md_after_parse_args. This is the earliest time we need to decide
8201 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8202
8203 void
8204 aarch64_after_parse_args (void)
8205 {
8206 if (aarch64_abi != AARCH64_ABI_NONE)
8207 return;
8208
8209 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8210 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8211 aarch64_abi = AARCH64_ABI_ILP32;
8212 else
8213 aarch64_abi = AARCH64_ABI_LP64;
8214 }
8215
8216 const char *
8217 elf64_aarch64_target_format (void)
8218 {
8219 if (strcmp (TARGET_OS, "cloudabi") == 0)
8220 {
8221 /* FIXME: What to do for ilp32_p ? */
8222 return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
8223 }
8224 if (target_big_endian)
8225 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8226 else
8227 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8228 }
8229
8230 void
8231 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8232 {
8233 elf_frob_symbol (symp, puntp);
8234 }
8235 #endif
8236
8237 /* MD interface: Finalization. */
8238
8239 /* A good place to do this, although this was probably not intended
8240 for this kind of use. We need to dump the literal pool before
8241 references are made to a null symbol pointer. */
8242
8243 void
8244 aarch64_cleanup (void)
8245 {
8246 literal_pool *pool;
8247
8248 for (pool = list_of_pools; pool; pool = pool->next)
8249 {
8250 /* Put it at the end of the relevant section. */
8251 subseg_set (pool->section, pool->sub_section);
8252 s_ltorg (0);
8253 }
8254 }
8255
8256 #ifdef OBJ_ELF
8257 /* Remove any excess mapping symbols generated for alignment frags in
8258 SEC. We may have created a mapping symbol before a zero byte
8259 alignment; remove it if there's a mapping symbol after the
8260 alignment. */
8261 static void
8262 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8263 void *dummy ATTRIBUTE_UNUSED)
8264 {
8265 segment_info_type *seginfo = seg_info (sec);
8266 fragS *fragp;
8267
8268 if (seginfo == NULL || seginfo->frchainP == NULL)
8269 return;
8270
8271 for (fragp = seginfo->frchainP->frch_root;
8272 fragp != NULL; fragp = fragp->fr_next)
8273 {
8274 symbolS *sym = fragp->tc_frag_data.last_map;
8275 fragS *next = fragp->fr_next;
8276
8277 /* Variable-sized frags have been converted to fixed size by
8278 this point. But if this was variable-sized to start with,
8279 there will be a fixed-size frag after it. So don't handle
8280 next == NULL. */
8281 if (sym == NULL || next == NULL)
8282 continue;
8283
8284 if (S_GET_VALUE (sym) < next->fr_address)
8285 /* Not at the end of this frag. */
8286 continue;
8287 know (S_GET_VALUE (sym) == next->fr_address);
8288
8289 do
8290 {
8291 if (next->tc_frag_data.first_map != NULL)
8292 {
8293 /* Next frag starts with a mapping symbol. Discard this
8294 one. */
8295 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8296 break;
8297 }
8298
8299 if (next->fr_next == NULL)
8300 {
8301 /* This mapping symbol is at the end of the section. Discard
8302 it. */
8303 know (next->fr_fix == 0 && next->fr_var == 0);
8304 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8305 break;
8306 }
8307
8308 /* As long as we have empty frags without any mapping symbols,
8309 keep looking. */
8310 /* If the next frag is non-empty and does not start with a
8311 mapping symbol, then this mapping symbol is required. */
8312 if (next->fr_address != next->fr_next->fr_address)
8313 break;
8314
8315 next = next->fr_next;
8316 }
8317 while (next != NULL);
8318 }
8319 }
8320 #endif
8321
8322 /* Adjust the symbol table. */
8323
8324 void
8325 aarch64_adjust_symtab (void)
8326 {
8327 #ifdef OBJ_ELF
8328 /* Remove any overlapping mapping symbols generated by alignment frags. */
8329 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8330 /* Now do generic ELF adjustments. */
8331 elf_adjust_symtab ();
8332 #endif
8333 }
8334
8335 static void
8336 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8337 {
8338 const char *hash_err;
8339
8340 hash_err = hash_insert (table, key, value);
8341 if (hash_err)
8342 printf ("Internal Error: Can't hash %s\n", key);
8343 }
8344
8345 static void
8346 fill_instruction_hash_table (void)
8347 {
8348 aarch64_opcode *opcode = aarch64_opcode_table;
8349
8350 while (opcode->name != NULL)
8351 {
8352 templates *templ, *new_templ;
8353 templ = hash_find (aarch64_ops_hsh, opcode->name);
8354
8355 new_templ = XNEW (templates);
8356 new_templ->opcode = opcode;
8357 new_templ->next = NULL;
8358
8359 if (!templ)
8360 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8361 else
8362 {
8363 new_templ->next = templ->next;
8364 templ->next = new_templ;
8365 }
8366 ++opcode;
8367 }
8368 }
8369
8370 static inline void
8371 convert_to_upper (char *dst, const char *src, size_t num)
8372 {
8373 unsigned int i;
8374 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8375 *dst = TOUPPER (*src);
8376 *dst = '\0';
8377 }
8378
8379 /* Assume STR point to a lower-case string, allocate, convert and return
8380 the corresponding upper-case string. */
8381 static inline const char*
8382 get_upper_str (const char *str)
8383 {
8384 char *ret;
8385 size_t len = strlen (str);
8386 ret = XNEWVEC (char, len + 1);
8387 convert_to_upper (ret, str, len);
8388 return ret;
8389 }
8390
8391 /* MD interface: Initialization. */
8392
8393 void
8394 md_begin (void)
8395 {
8396 unsigned mach;
8397 unsigned int i;
8398
8399 if ((aarch64_ops_hsh = hash_new ()) == NULL
8400 || (aarch64_cond_hsh = hash_new ()) == NULL
8401 || (aarch64_shift_hsh = hash_new ()) == NULL
8402 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8403 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8404 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8405 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8406 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8407 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8408 || (aarch64_reg_hsh = hash_new ()) == NULL
8409 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8410 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8411 || (aarch64_pldop_hsh = hash_new ()) == NULL
8412 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8413 as_fatal (_("virtual memory exhausted"));
8414
8415 fill_instruction_hash_table ();
8416
8417 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8418 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8419 (void *) (aarch64_sys_regs + i));
8420
8421 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8422 checked_hash_insert (aarch64_pstatefield_hsh,
8423 aarch64_pstatefields[i].name,
8424 (void *) (aarch64_pstatefields + i));
8425
8426 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8427 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8428 aarch64_sys_regs_ic[i].name,
8429 (void *) (aarch64_sys_regs_ic + i));
8430
8431 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8432 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8433 aarch64_sys_regs_dc[i].name,
8434 (void *) (aarch64_sys_regs_dc + i));
8435
8436 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8437 checked_hash_insert (aarch64_sys_regs_at_hsh,
8438 aarch64_sys_regs_at[i].name,
8439 (void *) (aarch64_sys_regs_at + i));
8440
8441 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8442 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8443 aarch64_sys_regs_tlbi[i].name,
8444 (void *) (aarch64_sys_regs_tlbi + i));
8445
8446 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8447 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8448 (void *) (reg_names + i));
8449
8450 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8451 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8452 (void *) (nzcv_names + i));
8453
8454 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8455 {
8456 const char *name = aarch64_operand_modifiers[i].name;
8457 checked_hash_insert (aarch64_shift_hsh, name,
8458 (void *) (aarch64_operand_modifiers + i));
8459 /* Also hash the name in the upper case. */
8460 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8461 (void *) (aarch64_operand_modifiers + i));
8462 }
8463
8464 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8465 {
8466 unsigned int j;
8467 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8468 the same condition code. */
8469 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8470 {
8471 const char *name = aarch64_conds[i].names[j];
8472 if (name == NULL)
8473 break;
8474 checked_hash_insert (aarch64_cond_hsh, name,
8475 (void *) (aarch64_conds + i));
8476 /* Also hash the name in the upper case. */
8477 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8478 (void *) (aarch64_conds + i));
8479 }
8480 }
8481
8482 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8483 {
8484 const char *name = aarch64_barrier_options[i].name;
8485 /* Skip xx00 - the unallocated values of option. */
8486 if ((i & 0x3) == 0)
8487 continue;
8488 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8489 (void *) (aarch64_barrier_options + i));
8490 /* Also hash the name in the upper case. */
8491 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8492 (void *) (aarch64_barrier_options + i));
8493 }
8494
8495 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8496 {
8497 const char* name = aarch64_prfops[i].name;
8498 /* Skip the unallocated hint encodings. */
8499 if (name == NULL)
8500 continue;
8501 checked_hash_insert (aarch64_pldop_hsh, name,
8502 (void *) (aarch64_prfops + i));
8503 /* Also hash the name in the upper case. */
8504 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8505 (void *) (aarch64_prfops + i));
8506 }
8507
8508 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8509 {
8510 const char* name = aarch64_hint_options[i].name;
8511
8512 checked_hash_insert (aarch64_hint_opt_hsh, name,
8513 (void *) (aarch64_hint_options + i));
8514 /* Also hash the name in the upper case. */
8515 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8516 (void *) (aarch64_hint_options + i));
8517 }
8518
8519 /* Set the cpu variant based on the command-line options. */
8520 if (!mcpu_cpu_opt)
8521 mcpu_cpu_opt = march_cpu_opt;
8522
8523 if (!mcpu_cpu_opt)
8524 mcpu_cpu_opt = &cpu_default;
8525
8526 cpu_variant = *mcpu_cpu_opt;
8527
8528 /* Record the CPU type. */
8529 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8530
8531 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8532 }
8533
8534 /* Command line processing. */
8535
8536 const char *md_shortopts = "m:";
8537
8538 #ifdef AARCH64_BI_ENDIAN
8539 #define OPTION_EB (OPTION_MD_BASE + 0)
8540 #define OPTION_EL (OPTION_MD_BASE + 1)
8541 #else
8542 #if TARGET_BYTES_BIG_ENDIAN
8543 #define OPTION_EB (OPTION_MD_BASE + 0)
8544 #else
8545 #define OPTION_EL (OPTION_MD_BASE + 1)
8546 #endif
8547 #endif
8548
8549 struct option md_longopts[] = {
8550 #ifdef OPTION_EB
8551 {"EB", no_argument, NULL, OPTION_EB},
8552 #endif
8553 #ifdef OPTION_EL
8554 {"EL", no_argument, NULL, OPTION_EL},
8555 #endif
8556 {NULL, no_argument, NULL, 0}
8557 };
8558
8559 size_t md_longopts_size = sizeof (md_longopts);
8560
8561 struct aarch64_option_table
8562 {
8563 const char *option; /* Option name to match. */
8564 const char *help; /* Help information. */
8565 int *var; /* Variable to change. */
8566 int value; /* What to change it to. */
8567 char *deprecated; /* If non-null, print this message. */
8568 };
8569
8570 static struct aarch64_option_table aarch64_opts[] = {
8571 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8572 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8573 NULL},
8574 #ifdef DEBUG_AARCH64
8575 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8576 #endif /* DEBUG_AARCH64 */
8577 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8578 NULL},
8579 {"mno-verbose-error", N_("do not output verbose error messages"),
8580 &verbose_error_p, 0, NULL},
8581 {NULL, NULL, NULL, 0, NULL}
8582 };
8583
8584 struct aarch64_cpu_option_table
8585 {
8586 const char *name;
8587 const aarch64_feature_set value;
8588 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8589 case. */
8590 const char *canonical_name;
8591 };
8592
8593 /* This list should, at a minimum, contain all the cpu names
8594 recognized by GCC. */
8595 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8596 {"all", AARCH64_ANY, NULL},
8597 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8598 AARCH64_FEATURE_CRC), "Cortex-A35"},
8599 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8600 AARCH64_FEATURE_CRC), "Cortex-A53"},
8601 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8602 AARCH64_FEATURE_CRC), "Cortex-A57"},
8603 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8604 AARCH64_FEATURE_CRC), "Cortex-A72"},
8605 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8606 AARCH64_FEATURE_CRC), "Cortex-A73"},
8607 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8608 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8609 "Cortex-A55"},
8610 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8611 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8612 "Cortex-A75"},
8613 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8614 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8615 "Cortex-A76"},
8616 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8617 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8618 "Samsung Exynos M1"},
8619 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8620 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8621 | AARCH64_FEATURE_RDMA),
8622 "Qualcomm Falkor"},
8623 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8624 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8625 | AARCH64_FEATURE_RDMA),
8626 "Qualcomm QDF24XX"},
8627 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8628 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8629 "Qualcomm Saphira"},
8630 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8631 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8632 "Cavium ThunderX"},
8633 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8634 AARCH64_FEATURE_CRYPTO),
8635 "Broadcom Vulcan"},
8636 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8637 in earlier releases and is superseded by 'xgene1' in all
8638 tools. */
8639 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8640 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8641 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8642 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8643 {"generic", AARCH64_ARCH_V8, NULL},
8644
8645 {NULL, AARCH64_ARCH_NONE, NULL}
8646 };
8647
8648 struct aarch64_arch_option_table
8649 {
8650 const char *name;
8651 const aarch64_feature_set value;
8652 };
8653
8654 /* This list should, at a minimum, contain all the architecture names
8655 recognized by GCC. */
8656 static const struct aarch64_arch_option_table aarch64_archs[] = {
8657 {"all", AARCH64_ANY},
8658 {"armv8-a", AARCH64_ARCH_V8},
8659 {"armv8.1-a", AARCH64_ARCH_V8_1},
8660 {"armv8.2-a", AARCH64_ARCH_V8_2},
8661 {"armv8.3-a", AARCH64_ARCH_V8_3},
8662 {"armv8.4-a", AARCH64_ARCH_V8_4},
8663 {NULL, AARCH64_ARCH_NONE}
8664 };
8665
8666 /* ISA extensions. */
8667 struct aarch64_option_cpu_value_table
8668 {
8669 const char *name;
8670 const aarch64_feature_set value;
8671 const aarch64_feature_set require; /* Feature dependencies. */
8672 };
8673
8674 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8675 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8676 AARCH64_ARCH_NONE},
8677 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO
8678 | AARCH64_FEATURE_AES
8679 | AARCH64_FEATURE_SHA2, 0),
8680 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8681 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8682 AARCH64_ARCH_NONE},
8683 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8684 AARCH64_ARCH_NONE},
8685 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8686 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8687 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8688 AARCH64_ARCH_NONE},
8689 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8690 AARCH64_ARCH_NONE},
8691 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8692 AARCH64_ARCH_NONE},
8693 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8694 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8695 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8696 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8697 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
8698 AARCH64_FEATURE (AARCH64_FEATURE_FP
8699 | AARCH64_FEATURE_F16, 0)},
8700 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8701 AARCH64_ARCH_NONE},
8702 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
8703 AARCH64_FEATURE (AARCH64_FEATURE_F16
8704 | AARCH64_FEATURE_SIMD
8705 | AARCH64_FEATURE_COMPNUM, 0)},
8706 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
8707 AARCH64_FEATURE (AARCH64_FEATURE_F16
8708 | AARCH64_FEATURE_SIMD, 0)},
8709 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
8710 AARCH64_ARCH_NONE},
8711 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
8712 AARCH64_ARCH_NONE},
8713 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
8714 AARCH64_ARCH_NONE},
8715 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
8716 AARCH64_ARCH_NONE},
8717 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
8718 AARCH64_ARCH_NONE},
8719 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA2
8720 | AARCH64_FEATURE_SHA3, 0),
8721 AARCH64_ARCH_NONE},
8722 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8723 };
8724
8725 struct aarch64_long_option_table
8726 {
8727 const char *option; /* Substring to match. */
8728 const char *help; /* Help information. */
8729 int (*func) (const char *subopt); /* Function to decode sub-option. */
8730 char *deprecated; /* If non-null, print this message. */
8731 };
8732
8733 /* Transitive closure of features depending on set. */
8734 static aarch64_feature_set
8735 aarch64_feature_disable_set (aarch64_feature_set set)
8736 {
8737 const struct aarch64_option_cpu_value_table *opt;
8738 aarch64_feature_set prev = 0;
8739
8740 while (prev != set) {
8741 prev = set;
8742 for (opt = aarch64_features; opt->name != NULL; opt++)
8743 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
8744 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
8745 }
8746 return set;
8747 }
8748
8749 /* Transitive closure of dependencies of set. */
8750 static aarch64_feature_set
8751 aarch64_feature_enable_set (aarch64_feature_set set)
8752 {
8753 const struct aarch64_option_cpu_value_table *opt;
8754 aarch64_feature_set prev = 0;
8755
8756 while (prev != set) {
8757 prev = set;
8758 for (opt = aarch64_features; opt->name != NULL; opt++)
8759 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8760 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8761 }
8762 return set;
8763 }
8764
8765 static int
8766 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8767 bfd_boolean ext_only)
8768 {
8769 /* We insist on extensions being added before being removed. We achieve
8770 this by using the ADDING_VALUE variable to indicate whether we are
8771 adding an extension (1) or removing it (0) and only allowing it to
8772 change in the order -1 -> 1 -> 0. */
8773 int adding_value = -1;
8774 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8775
8776 /* Copy the feature set, so that we can modify it. */
8777 *ext_set = **opt_p;
8778 *opt_p = ext_set;
8779
8780 while (str != NULL && *str != 0)
8781 {
8782 const struct aarch64_option_cpu_value_table *opt;
8783 const char *ext = NULL;
8784 int optlen;
8785
8786 if (!ext_only)
8787 {
8788 if (*str != '+')
8789 {
8790 as_bad (_("invalid architectural extension"));
8791 return 0;
8792 }
8793
8794 ext = strchr (++str, '+');
8795 }
8796
8797 if (ext != NULL)
8798 optlen = ext - str;
8799 else
8800 optlen = strlen (str);
8801
8802 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
8803 {
8804 if (adding_value != 0)
8805 adding_value = 0;
8806 optlen -= 2;
8807 str += 2;
8808 }
8809 else if (optlen > 0)
8810 {
8811 if (adding_value == -1)
8812 adding_value = 1;
8813 else if (adding_value != 1)
8814 {
8815 as_bad (_("must specify extensions to add before specifying "
8816 "those to remove"));
8817 return FALSE;
8818 }
8819 }
8820
8821 if (optlen == 0)
8822 {
8823 as_bad (_("missing architectural extension"));
8824 return 0;
8825 }
8826
8827 gas_assert (adding_value != -1);
8828
8829 for (opt = aarch64_features; opt->name != NULL; opt++)
8830 if (strncmp (opt->name, str, optlen) == 0)
8831 {
8832 aarch64_feature_set set;
8833
8834 /* Add or remove the extension. */
8835 if (adding_value)
8836 {
8837 set = aarch64_feature_enable_set (opt->value);
8838 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
8839 }
8840 else
8841 {
8842 set = aarch64_feature_disable_set (opt->value);
8843 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
8844 }
8845 break;
8846 }
8847
8848 if (opt->name == NULL)
8849 {
8850 as_bad (_("unknown architectural extension `%s'"), str);
8851 return 0;
8852 }
8853
8854 str = ext;
8855 };
8856
8857 return 1;
8858 }
8859
8860 static int
8861 aarch64_parse_cpu (const char *str)
8862 {
8863 const struct aarch64_cpu_option_table *opt;
8864 const char *ext = strchr (str, '+');
8865 size_t optlen;
8866
8867 if (ext != NULL)
8868 optlen = ext - str;
8869 else
8870 optlen = strlen (str);
8871
8872 if (optlen == 0)
8873 {
8874 as_bad (_("missing cpu name `%s'"), str);
8875 return 0;
8876 }
8877
8878 for (opt = aarch64_cpus; opt->name != NULL; opt++)
8879 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8880 {
8881 mcpu_cpu_opt = &opt->value;
8882 if (ext != NULL)
8883 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
8884
8885 return 1;
8886 }
8887
8888 as_bad (_("unknown cpu `%s'"), str);
8889 return 0;
8890 }
8891
8892 static int
8893 aarch64_parse_arch (const char *str)
8894 {
8895 const struct aarch64_arch_option_table *opt;
8896 const char *ext = strchr (str, '+');
8897 size_t optlen;
8898
8899 if (ext != NULL)
8900 optlen = ext - str;
8901 else
8902 optlen = strlen (str);
8903
8904 if (optlen == 0)
8905 {
8906 as_bad (_("missing architecture name `%s'"), str);
8907 return 0;
8908 }
8909
8910 for (opt = aarch64_archs; opt->name != NULL; opt++)
8911 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8912 {
8913 march_cpu_opt = &opt->value;
8914 if (ext != NULL)
8915 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
8916
8917 return 1;
8918 }
8919
8920 as_bad (_("unknown architecture `%s'\n"), str);
8921 return 0;
8922 }
8923
8924 /* ABIs. */
8925 struct aarch64_option_abi_value_table
8926 {
8927 const char *name;
8928 enum aarch64_abi_type value;
8929 };
8930
8931 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
8932 {"ilp32", AARCH64_ABI_ILP32},
8933 {"lp64", AARCH64_ABI_LP64},
8934 };
8935
8936 static int
8937 aarch64_parse_abi (const char *str)
8938 {
8939 unsigned int i;
8940
8941 if (str[0] == '\0')
8942 {
8943 as_bad (_("missing abi name `%s'"), str);
8944 return 0;
8945 }
8946
8947 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
8948 if (strcmp (str, aarch64_abis[i].name) == 0)
8949 {
8950 aarch64_abi = aarch64_abis[i].value;
8951 return 1;
8952 }
8953
8954 as_bad (_("unknown abi `%s'\n"), str);
8955 return 0;
8956 }
8957
8958 static struct aarch64_long_option_table aarch64_long_opts[] = {
8959 #ifdef OBJ_ELF
8960 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
8961 aarch64_parse_abi, NULL},
8962 #endif /* OBJ_ELF */
8963 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
8964 aarch64_parse_cpu, NULL},
8965 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
8966 aarch64_parse_arch, NULL},
8967 {NULL, NULL, 0, NULL}
8968 };
8969
8970 int
8971 md_parse_option (int c, const char *arg)
8972 {
8973 struct aarch64_option_table *opt;
8974 struct aarch64_long_option_table *lopt;
8975
8976 switch (c)
8977 {
8978 #ifdef OPTION_EB
8979 case OPTION_EB:
8980 target_big_endian = 1;
8981 break;
8982 #endif
8983
8984 #ifdef OPTION_EL
8985 case OPTION_EL:
8986 target_big_endian = 0;
8987 break;
8988 #endif
8989
8990 case 'a':
8991 /* Listing option. Just ignore these, we don't support additional
8992 ones. */
8993 return 0;
8994
8995 default:
8996 for (opt = aarch64_opts; opt->option != NULL; opt++)
8997 {
8998 if (c == opt->option[0]
8999 && ((arg == NULL && opt->option[1] == 0)
9000 || streq (arg, opt->option + 1)))
9001 {
9002 /* If the option is deprecated, tell the user. */
9003 if (opt->deprecated != NULL)
9004 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9005 arg ? arg : "", _(opt->deprecated));
9006
9007 if (opt->var != NULL)
9008 *opt->var = opt->value;
9009
9010 return 1;
9011 }
9012 }
9013
9014 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9015 {
9016 /* These options are expected to have an argument. */
9017 if (c == lopt->option[0]
9018 && arg != NULL
9019 && strncmp (arg, lopt->option + 1,
9020 strlen (lopt->option + 1)) == 0)
9021 {
9022 /* If the option is deprecated, tell the user. */
9023 if (lopt->deprecated != NULL)
9024 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9025 _(lopt->deprecated));
9026
9027 /* Call the sup-option parser. */
9028 return lopt->func (arg + strlen (lopt->option) - 1);
9029 }
9030 }
9031
9032 return 0;
9033 }
9034
9035 return 1;
9036 }
9037
9038 void
9039 md_show_usage (FILE * fp)
9040 {
9041 struct aarch64_option_table *opt;
9042 struct aarch64_long_option_table *lopt;
9043
9044 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9045
9046 for (opt = aarch64_opts; opt->option != NULL; opt++)
9047 if (opt->help != NULL)
9048 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9049
9050 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9051 if (lopt->help != NULL)
9052 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9053
9054 #ifdef OPTION_EB
9055 fprintf (fp, _("\
9056 -EB assemble code for a big-endian cpu\n"));
9057 #endif
9058
9059 #ifdef OPTION_EL
9060 fprintf (fp, _("\
9061 -EL assemble code for a little-endian cpu\n"));
9062 #endif
9063 }
9064
9065 /* Parse a .cpu directive. */
9066
9067 static void
9068 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9069 {
9070 const struct aarch64_cpu_option_table *opt;
9071 char saved_char;
9072 char *name;
9073 char *ext;
9074 size_t optlen;
9075
9076 name = input_line_pointer;
9077 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9078 input_line_pointer++;
9079 saved_char = *input_line_pointer;
9080 *input_line_pointer = 0;
9081
9082 ext = strchr (name, '+');
9083
9084 if (ext != NULL)
9085 optlen = ext - name;
9086 else
9087 optlen = strlen (name);
9088
9089 /* Skip the first "all" entry. */
9090 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9091 if (strlen (opt->name) == optlen
9092 && strncmp (name, opt->name, optlen) == 0)
9093 {
9094 mcpu_cpu_opt = &opt->value;
9095 if (ext != NULL)
9096 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9097 return;
9098
9099 cpu_variant = *mcpu_cpu_opt;
9100
9101 *input_line_pointer = saved_char;
9102 demand_empty_rest_of_line ();
9103 return;
9104 }
9105 as_bad (_("unknown cpu `%s'"), name);
9106 *input_line_pointer = saved_char;
9107 ignore_rest_of_line ();
9108 }
9109
9110
9111 /* Parse a .arch directive. */
9112
9113 static void
9114 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9115 {
9116 const struct aarch64_arch_option_table *opt;
9117 char saved_char;
9118 char *name;
9119 char *ext;
9120 size_t optlen;
9121
9122 name = input_line_pointer;
9123 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9124 input_line_pointer++;
9125 saved_char = *input_line_pointer;
9126 *input_line_pointer = 0;
9127
9128 ext = strchr (name, '+');
9129
9130 if (ext != NULL)
9131 optlen = ext - name;
9132 else
9133 optlen = strlen (name);
9134
9135 /* Skip the first "all" entry. */
9136 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9137 if (strlen (opt->name) == optlen
9138 && strncmp (name, opt->name, optlen) == 0)
9139 {
9140 mcpu_cpu_opt = &opt->value;
9141 if (ext != NULL)
9142 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9143 return;
9144
9145 cpu_variant = *mcpu_cpu_opt;
9146
9147 *input_line_pointer = saved_char;
9148 demand_empty_rest_of_line ();
9149 return;
9150 }
9151
9152 as_bad (_("unknown architecture `%s'\n"), name);
9153 *input_line_pointer = saved_char;
9154 ignore_rest_of_line ();
9155 }
9156
9157 /* Parse a .arch_extension directive. */
9158
9159 static void
9160 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9161 {
9162 char saved_char;
9163 char *ext = input_line_pointer;;
9164
9165 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9166 input_line_pointer++;
9167 saved_char = *input_line_pointer;
9168 *input_line_pointer = 0;
9169
9170 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9171 return;
9172
9173 cpu_variant = *mcpu_cpu_opt;
9174
9175 *input_line_pointer = saved_char;
9176 demand_empty_rest_of_line ();
9177 }
9178
9179 /* Copy symbol information. */
9180
9181 void
9182 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9183 {
9184 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9185 }