]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
Update year range in copyright notice of binutils files
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bfd_boolean
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Return value for certain parsers when the parsing fails; those parsers
242 return the information of the parsed result, e.g. register number, on
243 success. */
244 #define PARSE_FAIL -1
245
246 /* This is an invalid condition code that means no conditional field is
247 present. */
248 #define COND_ALWAYS 0x10
249
250 typedef struct
251 {
252 const char *template;
253 uint32_t value;
254 } asm_nzcv;
255
256 struct reloc_entry
257 {
258 char *name;
259 bfd_reloc_code_real_type reloc;
260 };
261
262 /* Macros to define the register types and masks for the purpose
263 of parsing. */
264
265 #undef AARCH64_REG_TYPES
266 #define AARCH64_REG_TYPES \
267 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
268 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
269 BASIC_REG_TYPE(SP_32) /* wsp */ \
270 BASIC_REG_TYPE(SP_64) /* sp */ \
271 BASIC_REG_TYPE(Z_32) /* wzr */ \
272 BASIC_REG_TYPE(Z_64) /* xzr */ \
273 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
274 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
275 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
276 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
277 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
278 BASIC_REG_TYPE(VN) /* v[0-31] */ \
279 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
280 BASIC_REG_TYPE(PN) /* p[0-15] */ \
281 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
282 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
283 /* Typecheck: same, plus SVE registers. */ \
284 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
285 | REG_TYPE(ZN)) \
286 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
287 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
289 /* Typecheck: same, plus SVE registers. */ \
290 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
291 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
292 | REG_TYPE(ZN)) \
293 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
294 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
295 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
296 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
297 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
298 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
299 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
300 /* Typecheck: any [BHSDQ]P FP. */ \
301 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
302 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
303 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
304 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
306 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
307 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
308 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
309 be used for SVE instructions, since Zn and Pn are valid symbols \
310 in other contexts. */ \
311 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
312 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
313 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
314 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
315 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
316 | REG_TYPE(ZN) | REG_TYPE(PN)) \
317 /* Any integer register; used for error messages only. */ \
318 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
319 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
320 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
321 /* Pseudo type to mark the end of the enumerator sequence. */ \
322 BASIC_REG_TYPE(MAX)
323
324 #undef BASIC_REG_TYPE
325 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
326 #undef MULTI_REG_TYPE
327 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
328
329 /* Register type enumerators. */
330 typedef enum aarch64_reg_type_
331 {
332 /* A list of REG_TYPE_*. */
333 AARCH64_REG_TYPES
334 } aarch64_reg_type;
335
336 #undef BASIC_REG_TYPE
337 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
338 #undef REG_TYPE
339 #define REG_TYPE(T) (1 << REG_TYPE_##T)
340 #undef MULTI_REG_TYPE
341 #define MULTI_REG_TYPE(T,V) V,
342
343 /* Structure for a hash table entry for a register. */
344 typedef struct
345 {
346 const char *name;
347 unsigned char number;
348 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
349 unsigned char builtin;
350 } reg_entry;
351
352 /* Values indexed by aarch64_reg_type to assist the type checking. */
353 static const unsigned reg_type_masks[] =
354 {
355 AARCH64_REG_TYPES
356 };
357
358 #undef BASIC_REG_TYPE
359 #undef REG_TYPE
360 #undef MULTI_REG_TYPE
361 #undef AARCH64_REG_TYPES
362
363 /* Diagnostics used when we don't get a register of the expected type.
364 Note: this has to synchronized with aarch64_reg_type definitions
365 above. */
366 static const char *
367 get_reg_expected_msg (aarch64_reg_type reg_type)
368 {
369 const char *msg;
370
371 switch (reg_type)
372 {
373 case REG_TYPE_R_32:
374 msg = N_("integer 32-bit register expected");
375 break;
376 case REG_TYPE_R_64:
377 msg = N_("integer 64-bit register expected");
378 break;
379 case REG_TYPE_R_N:
380 msg = N_("integer register expected");
381 break;
382 case REG_TYPE_R64_SP:
383 msg = N_("64-bit integer or SP register expected");
384 break;
385 case REG_TYPE_SVE_BASE:
386 msg = N_("base register expected");
387 break;
388 case REG_TYPE_R_Z:
389 msg = N_("integer or zero register expected");
390 break;
391 case REG_TYPE_SVE_OFFSET:
392 msg = N_("offset register expected");
393 break;
394 case REG_TYPE_R_SP:
395 msg = N_("integer or SP register expected");
396 break;
397 case REG_TYPE_R_Z_SP:
398 msg = N_("integer, zero or SP register expected");
399 break;
400 case REG_TYPE_FP_B:
401 msg = N_("8-bit SIMD scalar register expected");
402 break;
403 case REG_TYPE_FP_H:
404 msg = N_("16-bit SIMD scalar or floating-point half precision "
405 "register expected");
406 break;
407 case REG_TYPE_FP_S:
408 msg = N_("32-bit SIMD scalar or floating-point single precision "
409 "register expected");
410 break;
411 case REG_TYPE_FP_D:
412 msg = N_("64-bit SIMD scalar or floating-point double precision "
413 "register expected");
414 break;
415 case REG_TYPE_FP_Q:
416 msg = N_("128-bit SIMD scalar or floating-point quad precision "
417 "register expected");
418 break;
419 case REG_TYPE_R_Z_BHSDQ_V:
420 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
421 msg = N_("register expected");
422 break;
423 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
424 msg = N_("SIMD scalar or floating-point register expected");
425 break;
426 case REG_TYPE_VN: /* any V reg */
427 msg = N_("vector register expected");
428 break;
429 case REG_TYPE_ZN:
430 msg = N_("SVE vector register expected");
431 break;
432 case REG_TYPE_PN:
433 msg = N_("SVE predicate register expected");
434 break;
435 default:
436 as_fatal (_("invalid register type %d"), reg_type);
437 }
438 return msg;
439 }
440
441 /* Some well known registers that we refer to directly elsewhere. */
442 #define REG_SP 31
443 #define REG_ZR 31
444
445 /* Instructions take 4 bytes in the object file. */
446 #define INSN_SIZE 4
447
448 static htab_t aarch64_ops_hsh;
449 static htab_t aarch64_cond_hsh;
450 static htab_t aarch64_shift_hsh;
451 static htab_t aarch64_sys_regs_hsh;
452 static htab_t aarch64_pstatefield_hsh;
453 static htab_t aarch64_sys_regs_ic_hsh;
454 static htab_t aarch64_sys_regs_dc_hsh;
455 static htab_t aarch64_sys_regs_at_hsh;
456 static htab_t aarch64_sys_regs_tlbi_hsh;
457 static htab_t aarch64_sys_regs_sr_hsh;
458 static htab_t aarch64_reg_hsh;
459 static htab_t aarch64_barrier_opt_hsh;
460 static htab_t aarch64_nzcv_hsh;
461 static htab_t aarch64_pldop_hsh;
462 static htab_t aarch64_hint_opt_hsh;
463
464 /* Stuff needed to resolve the label ambiguity
465 As:
466 ...
467 label: <insn>
468 may differ from:
469 ...
470 label:
471 <insn> */
472
473 static symbolS *last_label_seen;
474
475 /* Literal pool structure. Held on a per-section
476 and per-sub-section basis. */
477
478 #define MAX_LITERAL_POOL_SIZE 1024
479 typedef struct literal_expression
480 {
481 expressionS exp;
482 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
483 LITTLENUM_TYPE * bignum;
484 } literal_expression;
485
486 typedef struct literal_pool
487 {
488 literal_expression literals[MAX_LITERAL_POOL_SIZE];
489 unsigned int next_free_entry;
490 unsigned int id;
491 symbolS *symbol;
492 segT section;
493 subsegT sub_section;
494 int size;
495 struct literal_pool *next;
496 } literal_pool;
497
498 /* Pointer to a linked list of literal pools. */
499 static literal_pool *list_of_pools = NULL;
500 \f
501 /* Pure syntax. */
502
503 /* This array holds the chars that always start a comment. If the
504 pre-processor is disabled, these aren't very useful. */
505 const char comment_chars[] = "";
506
507 /* This array holds the chars that only start a comment at the beginning of
508 a line. If the line seems to have the form '# 123 filename'
509 .line and .file directives will appear in the pre-processed output. */
510 /* Note that input_file.c hand checks for '#' at the beginning of the
511 first line of the input file. This is because the compiler outputs
512 #NO_APP at the beginning of its output. */
513 /* Also note that comments like this one will always work. */
514 const char line_comment_chars[] = "#";
515
516 const char line_separator_chars[] = ";";
517
518 /* Chars that can be used to separate mant
519 from exp in floating point numbers. */
520 const char EXP_CHARS[] = "eE";
521
522 /* Chars that mean this number is a floating point constant. */
523 /* As in 0f12.456 */
524 /* or 0d1.2345e12 */
525
526 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhH";
527
528 /* Prefix character that indicates the start of an immediate value. */
529 #define is_immediate_prefix(C) ((C) == '#')
530
531 /* Separator character handling. */
532
533 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
534
535 static inline bfd_boolean
536 skip_past_char (char **str, char c)
537 {
538 if (**str == c)
539 {
540 (*str)++;
541 return TRUE;
542 }
543 else
544 return FALSE;
545 }
546
547 #define skip_past_comma(str) skip_past_char (str, ',')
548
549 /* Arithmetic expressions (possibly involving symbols). */
550
551 static bfd_boolean in_my_get_expression_p = FALSE;
552
553 /* Third argument to my_get_expression. */
554 #define GE_NO_PREFIX 0
555 #define GE_OPT_PREFIX 1
556
557 /* Return TRUE if the string pointed by *STR is successfully parsed
558 as an valid expression; *EP will be filled with the information of
559 such an expression. Otherwise return FALSE. */
560
561 static bfd_boolean
562 my_get_expression (expressionS * ep, char **str, int prefix_mode,
563 int reject_absent)
564 {
565 char *save_in;
566 segT seg;
567 int prefix_present_p = 0;
568
569 switch (prefix_mode)
570 {
571 case GE_NO_PREFIX:
572 break;
573 case GE_OPT_PREFIX:
574 if (is_immediate_prefix (**str))
575 {
576 (*str)++;
577 prefix_present_p = 1;
578 }
579 break;
580 default:
581 abort ();
582 }
583
584 memset (ep, 0, sizeof (expressionS));
585
586 save_in = input_line_pointer;
587 input_line_pointer = *str;
588 in_my_get_expression_p = TRUE;
589 seg = expression (ep);
590 in_my_get_expression_p = FALSE;
591
592 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
593 {
594 /* We found a bad expression in md_operand(). */
595 *str = input_line_pointer;
596 input_line_pointer = save_in;
597 if (prefix_present_p && ! error_p ())
598 set_fatal_syntax_error (_("bad expression"));
599 else
600 set_first_syntax_error (_("bad expression"));
601 return FALSE;
602 }
603
604 #ifdef OBJ_AOUT
605 if (seg != absolute_section
606 && seg != text_section
607 && seg != data_section
608 && seg != bss_section && seg != undefined_section)
609 {
610 set_syntax_error (_("bad segment"));
611 *str = input_line_pointer;
612 input_line_pointer = save_in;
613 return FALSE;
614 }
615 #else
616 (void) seg;
617 #endif
618
619 *str = input_line_pointer;
620 input_line_pointer = save_in;
621 return TRUE;
622 }
623
624 /* Turn a string in input_line_pointer into a floating point constant
625 of type TYPE, and store the appropriate bytes in *LITP. The number
626 of LITTLENUMS emitted is stored in *SIZEP. An error message is
627 returned, or NULL on OK. */
628
629 const char *
630 md_atof (int type, char *litP, int *sizeP)
631 {
632 /* If this is a bfloat16 type, then parse it slightly differently -
633 as it does not follow the IEEE standard exactly. */
634 if (type == 'b')
635 {
636 char * t;
637 LITTLENUM_TYPE words[MAX_LITTLENUMS];
638 FLONUM_TYPE generic_float;
639
640 t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float);
641
642 if (t)
643 input_line_pointer = t;
644 else
645 return _("invalid floating point number");
646
647 switch (generic_float.sign)
648 {
649 /* Is +Inf. */
650 case 'P':
651 words[0] = 0x7f80;
652 break;
653
654 /* Is -Inf. */
655 case 'N':
656 words[0] = 0xff80;
657 break;
658
659 /* Is NaN. */
660 /* bfloat16 has two types of NaN - quiet and signalling.
661 Quiet NaN has bit[6] == 1 && faction != 0, whereas
662 signalling Nan's have bit[0] == 0 && fraction != 0.
663 Chose this specific encoding as it is the same form
664 as used by other IEEE 754 encodings in GAS. */
665 case 0:
666 words[0] = 0x7fff;
667 break;
668
669 default:
670 break;
671 }
672
673 *sizeP = 2;
674
675 md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE));
676
677 return NULL;
678 }
679
680 return ieee_md_atof (type, litP, sizeP, target_big_endian);
681 }
682
683 /* We handle all bad expressions here, so that we can report the faulty
684 instruction in the error message. */
685 void
686 md_operand (expressionS * exp)
687 {
688 if (in_my_get_expression_p)
689 exp->X_op = O_illegal;
690 }
691
692 /* Immediate values. */
693
694 /* Errors may be set multiple times during parsing or bit encoding
695 (particularly in the Neon bits), but usually the earliest error which is set
696 will be the most meaningful. Avoid overwriting it with later (cascading)
697 errors by calling this function. */
698
699 static void
700 first_error (const char *error)
701 {
702 if (! error_p ())
703 set_syntax_error (error);
704 }
705
706 /* Similar to first_error, but this function accepts formatted error
707 message. */
708 static void
709 first_error_fmt (const char *format, ...)
710 {
711 va_list args;
712 enum
713 { size = 100 };
714 /* N.B. this single buffer will not cause error messages for different
715 instructions to pollute each other; this is because at the end of
716 processing of each assembly line, error message if any will be
717 collected by as_bad. */
718 static char buffer[size];
719
720 if (! error_p ())
721 {
722 int ret ATTRIBUTE_UNUSED;
723 va_start (args, format);
724 ret = vsnprintf (buffer, size, format, args);
725 know (ret <= size - 1 && ret >= 0);
726 va_end (args);
727 set_syntax_error (buffer);
728 }
729 }
730
731 /* Register parsing. */
732
733 /* Generic register parser which is called by other specialized
734 register parsers.
735 CCP points to what should be the beginning of a register name.
736 If it is indeed a valid register name, advance CCP over it and
737 return the reg_entry structure; otherwise return NULL.
738 It does not issue diagnostics. */
739
740 static reg_entry *
741 parse_reg (char **ccp)
742 {
743 char *start = *ccp;
744 char *p;
745 reg_entry *reg;
746
747 #ifdef REGISTER_PREFIX
748 if (*start != REGISTER_PREFIX)
749 return NULL;
750 start++;
751 #endif
752
753 p = start;
754 if (!ISALPHA (*p) || !is_name_beginner (*p))
755 return NULL;
756
757 do
758 p++;
759 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
760
761 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
762
763 if (!reg)
764 return NULL;
765
766 *ccp = p;
767 return reg;
768 }
769
770 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
771 return FALSE. */
772 static bfd_boolean
773 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
774 {
775 return (reg_type_masks[type] & (1 << reg->type)) != 0;
776 }
777
778 /* Try to parse a base or offset register. Allow SVE base and offset
779 registers if REG_TYPE includes SVE registers. Return the register
780 entry on success, setting *QUALIFIER to the register qualifier.
781 Return null otherwise.
782
783 Note that this function does not issue any diagnostics. */
784
785 static const reg_entry *
786 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
787 aarch64_opnd_qualifier_t *qualifier)
788 {
789 char *str = *ccp;
790 const reg_entry *reg = parse_reg (&str);
791
792 if (reg == NULL)
793 return NULL;
794
795 switch (reg->type)
796 {
797 case REG_TYPE_R_32:
798 case REG_TYPE_SP_32:
799 case REG_TYPE_Z_32:
800 *qualifier = AARCH64_OPND_QLF_W;
801 break;
802
803 case REG_TYPE_R_64:
804 case REG_TYPE_SP_64:
805 case REG_TYPE_Z_64:
806 *qualifier = AARCH64_OPND_QLF_X;
807 break;
808
809 case REG_TYPE_ZN:
810 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
811 || str[0] != '.')
812 return NULL;
813 switch (TOLOWER (str[1]))
814 {
815 case 's':
816 *qualifier = AARCH64_OPND_QLF_S_S;
817 break;
818 case 'd':
819 *qualifier = AARCH64_OPND_QLF_S_D;
820 break;
821 default:
822 return NULL;
823 }
824 str += 2;
825 break;
826
827 default:
828 return NULL;
829 }
830
831 *ccp = str;
832
833 return reg;
834 }
835
836 /* Try to parse a base or offset register. Return the register entry
837 on success, setting *QUALIFIER to the register qualifier. Return null
838 otherwise.
839
840 Note that this function does not issue any diagnostics. */
841
842 static const reg_entry *
843 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
844 {
845 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
846 }
847
848 /* Parse the qualifier of a vector register or vector element of type
849 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
850 succeeds; otherwise return FALSE.
851
852 Accept only one occurrence of:
853 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
854 b h s d q */
855 static bfd_boolean
856 parse_vector_type_for_operand (aarch64_reg_type reg_type,
857 struct vector_type_el *parsed_type, char **str)
858 {
859 char *ptr = *str;
860 unsigned width;
861 unsigned element_size;
862 enum vector_el_type type;
863
864 /* skip '.' */
865 gas_assert (*ptr == '.');
866 ptr++;
867
868 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
869 {
870 width = 0;
871 goto elt_size;
872 }
873 width = strtoul (ptr, &ptr, 10);
874 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
875 {
876 first_error_fmt (_("bad size %d in vector width specifier"), width);
877 return FALSE;
878 }
879
880 elt_size:
881 switch (TOLOWER (*ptr))
882 {
883 case 'b':
884 type = NT_b;
885 element_size = 8;
886 break;
887 case 'h':
888 type = NT_h;
889 element_size = 16;
890 break;
891 case 's':
892 type = NT_s;
893 element_size = 32;
894 break;
895 case 'd':
896 type = NT_d;
897 element_size = 64;
898 break;
899 case 'q':
900 if (reg_type == REG_TYPE_ZN || width == 1)
901 {
902 type = NT_q;
903 element_size = 128;
904 break;
905 }
906 /* fall through. */
907 default:
908 if (*ptr != '\0')
909 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
910 else
911 first_error (_("missing element size"));
912 return FALSE;
913 }
914 if (width != 0 && width * element_size != 64
915 && width * element_size != 128
916 && !(width == 2 && element_size == 16)
917 && !(width == 4 && element_size == 8))
918 {
919 first_error_fmt (_
920 ("invalid element size %d and vector size combination %c"),
921 width, *ptr);
922 return FALSE;
923 }
924 ptr++;
925
926 parsed_type->type = type;
927 parsed_type->width = width;
928
929 *str = ptr;
930
931 return TRUE;
932 }
933
934 /* *STR contains an SVE zero/merge predication suffix. Parse it into
935 *PARSED_TYPE and point *STR at the end of the suffix. */
936
937 static bfd_boolean
938 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
939 {
940 char *ptr = *str;
941
942 /* Skip '/'. */
943 gas_assert (*ptr == '/');
944 ptr++;
945 switch (TOLOWER (*ptr))
946 {
947 case 'z':
948 parsed_type->type = NT_zero;
949 break;
950 case 'm':
951 parsed_type->type = NT_merge;
952 break;
953 default:
954 if (*ptr != '\0' && *ptr != ',')
955 first_error_fmt (_("unexpected character `%c' in predication type"),
956 *ptr);
957 else
958 first_error (_("missing predication type"));
959 return FALSE;
960 }
961 parsed_type->width = 0;
962 *str = ptr + 1;
963 return TRUE;
964 }
965
966 /* Parse a register of the type TYPE.
967
968 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
969 name or the parsed register is not of TYPE.
970
971 Otherwise return the register number, and optionally fill in the actual
972 type of the register in *RTYPE when multiple alternatives were given, and
973 return the register shape and element index information in *TYPEINFO.
974
975 IN_REG_LIST should be set with TRUE if the caller is parsing a register
976 list. */
977
978 static int
979 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
980 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
981 {
982 char *str = *ccp;
983 const reg_entry *reg = parse_reg (&str);
984 struct vector_type_el atype;
985 struct vector_type_el parsetype;
986 bfd_boolean is_typed_vecreg = FALSE;
987
988 atype.defined = 0;
989 atype.type = NT_invtype;
990 atype.width = -1;
991 atype.index = 0;
992
993 if (reg == NULL)
994 {
995 if (typeinfo)
996 *typeinfo = atype;
997 set_default_error ();
998 return PARSE_FAIL;
999 }
1000
1001 if (! aarch64_check_reg_type (reg, type))
1002 {
1003 DEBUG_TRACE ("reg type check failed");
1004 set_default_error ();
1005 return PARSE_FAIL;
1006 }
1007 type = reg->type;
1008
1009 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
1010 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
1011 {
1012 if (*str == '.')
1013 {
1014 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1015 return PARSE_FAIL;
1016 }
1017 else
1018 {
1019 if (!parse_predication_for_operand (&parsetype, &str))
1020 return PARSE_FAIL;
1021 }
1022
1023 /* Register if of the form Vn.[bhsdq]. */
1024 is_typed_vecreg = TRUE;
1025
1026 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1027 {
1028 /* The width is always variable; we don't allow an integer width
1029 to be specified. */
1030 gas_assert (parsetype.width == 0);
1031 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1032 }
1033 else if (parsetype.width == 0)
1034 /* Expect index. In the new scheme we cannot have
1035 Vn.[bhsdq] represent a scalar. Therefore any
1036 Vn.[bhsdq] should have an index following it.
1037 Except in reglists of course. */
1038 atype.defined |= NTA_HASINDEX;
1039 else
1040 atype.defined |= NTA_HASTYPE;
1041
1042 atype.type = parsetype.type;
1043 atype.width = parsetype.width;
1044 }
1045
1046 if (skip_past_char (&str, '['))
1047 {
1048 expressionS exp;
1049
1050 /* Reject Sn[index] syntax. */
1051 if (!is_typed_vecreg)
1052 {
1053 first_error (_("this type of register can't be indexed"));
1054 return PARSE_FAIL;
1055 }
1056
1057 if (in_reg_list)
1058 {
1059 first_error (_("index not allowed inside register list"));
1060 return PARSE_FAIL;
1061 }
1062
1063 atype.defined |= NTA_HASINDEX;
1064
1065 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1066
1067 if (exp.X_op != O_constant)
1068 {
1069 first_error (_("constant expression required"));
1070 return PARSE_FAIL;
1071 }
1072
1073 if (! skip_past_char (&str, ']'))
1074 return PARSE_FAIL;
1075
1076 atype.index = exp.X_add_number;
1077 }
1078 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1079 {
1080 /* Indexed vector register expected. */
1081 first_error (_("indexed vector register expected"));
1082 return PARSE_FAIL;
1083 }
1084
1085 /* A vector reg Vn should be typed or indexed. */
1086 if (type == REG_TYPE_VN && atype.defined == 0)
1087 {
1088 first_error (_("invalid use of vector register"));
1089 }
1090
1091 if (typeinfo)
1092 *typeinfo = atype;
1093
1094 if (rtype)
1095 *rtype = type;
1096
1097 *ccp = str;
1098
1099 return reg->number;
1100 }
1101
1102 /* Parse register.
1103
1104 Return the register number on success; return PARSE_FAIL otherwise.
1105
1106 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1107 the register (e.g. NEON double or quad reg when either has been requested).
1108
1109 If this is a NEON vector register with additional type information, fill
1110 in the struct pointed to by VECTYPE (if non-NULL).
1111
1112 This parser does not handle register list. */
1113
1114 static int
1115 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1116 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1117 {
1118 struct vector_type_el atype;
1119 char *str = *ccp;
1120 int reg = parse_typed_reg (&str, type, rtype, &atype,
1121 /*in_reg_list= */ FALSE);
1122
1123 if (reg == PARSE_FAIL)
1124 return PARSE_FAIL;
1125
1126 if (vectype)
1127 *vectype = atype;
1128
1129 *ccp = str;
1130
1131 return reg;
1132 }
1133
1134 static inline bfd_boolean
1135 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1136 {
1137 return
1138 e1.type == e2.type
1139 && e1.defined == e2.defined
1140 && e1.width == e2.width && e1.index == e2.index;
1141 }
1142
1143 /* This function parses a list of vector registers of type TYPE.
1144 On success, it returns the parsed register list information in the
1145 following encoded format:
1146
1147 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1148 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1149
1150 The information of the register shape and/or index is returned in
1151 *VECTYPE.
1152
1153 It returns PARSE_FAIL if the register list is invalid.
1154
1155 The list contains one to four registers.
1156 Each register can be one of:
1157 <Vt>.<T>[<index>]
1158 <Vt>.<T>
1159 All <T> should be identical.
1160 All <index> should be identical.
1161 There are restrictions on <Vt> numbers which are checked later
1162 (by reg_list_valid_p). */
1163
1164 static int
1165 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1166 struct vector_type_el *vectype)
1167 {
1168 char *str = *ccp;
1169 int nb_regs;
1170 struct vector_type_el typeinfo, typeinfo_first;
1171 int val, val_range;
1172 int in_range;
1173 int ret_val;
1174 int i;
1175 bfd_boolean error = FALSE;
1176 bfd_boolean expect_index = FALSE;
1177
1178 if (*str != '{')
1179 {
1180 set_syntax_error (_("expecting {"));
1181 return PARSE_FAIL;
1182 }
1183 str++;
1184
1185 nb_regs = 0;
1186 typeinfo_first.defined = 0;
1187 typeinfo_first.type = NT_invtype;
1188 typeinfo_first.width = -1;
1189 typeinfo_first.index = 0;
1190 ret_val = 0;
1191 val = -1;
1192 val_range = -1;
1193 in_range = 0;
1194 do
1195 {
1196 if (in_range)
1197 {
1198 str++; /* skip over '-' */
1199 val_range = val;
1200 }
1201 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1202 /*in_reg_list= */ TRUE);
1203 if (val == PARSE_FAIL)
1204 {
1205 set_first_syntax_error (_("invalid vector register in list"));
1206 error = TRUE;
1207 continue;
1208 }
1209 /* reject [bhsd]n */
1210 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1211 {
1212 set_first_syntax_error (_("invalid scalar register in list"));
1213 error = TRUE;
1214 continue;
1215 }
1216
1217 if (typeinfo.defined & NTA_HASINDEX)
1218 expect_index = TRUE;
1219
1220 if (in_range)
1221 {
1222 if (val < val_range)
1223 {
1224 set_first_syntax_error
1225 (_("invalid range in vector register list"));
1226 error = TRUE;
1227 }
1228 val_range++;
1229 }
1230 else
1231 {
1232 val_range = val;
1233 if (nb_regs == 0)
1234 typeinfo_first = typeinfo;
1235 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1236 {
1237 set_first_syntax_error
1238 (_("type mismatch in vector register list"));
1239 error = TRUE;
1240 }
1241 }
1242 if (! error)
1243 for (i = val_range; i <= val; i++)
1244 {
1245 ret_val |= i << (5 * nb_regs);
1246 nb_regs++;
1247 }
1248 in_range = 0;
1249 }
1250 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1251
1252 skip_whitespace (str);
1253 if (*str != '}')
1254 {
1255 set_first_syntax_error (_("end of vector register list not found"));
1256 error = TRUE;
1257 }
1258 str++;
1259
1260 skip_whitespace (str);
1261
1262 if (expect_index)
1263 {
1264 if (skip_past_char (&str, '['))
1265 {
1266 expressionS exp;
1267
1268 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1269 if (exp.X_op != O_constant)
1270 {
1271 set_first_syntax_error (_("constant expression required."));
1272 error = TRUE;
1273 }
1274 if (! skip_past_char (&str, ']'))
1275 error = TRUE;
1276 else
1277 typeinfo_first.index = exp.X_add_number;
1278 }
1279 else
1280 {
1281 set_first_syntax_error (_("expected index"));
1282 error = TRUE;
1283 }
1284 }
1285
1286 if (nb_regs > 4)
1287 {
1288 set_first_syntax_error (_("too many registers in vector register list"));
1289 error = TRUE;
1290 }
1291 else if (nb_regs == 0)
1292 {
1293 set_first_syntax_error (_("empty vector register list"));
1294 error = TRUE;
1295 }
1296
1297 *ccp = str;
1298 if (! error)
1299 *vectype = typeinfo_first;
1300
1301 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1302 }
1303
1304 /* Directives: register aliases. */
1305
1306 static reg_entry *
1307 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1308 {
1309 reg_entry *new;
1310 const char *name;
1311
1312 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1313 {
1314 if (new->builtin)
1315 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1316 str);
1317
1318 /* Only warn about a redefinition if it's not defined as the
1319 same register. */
1320 else if (new->number != number || new->type != type)
1321 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1322
1323 return NULL;
1324 }
1325
1326 name = xstrdup (str);
1327 new = XNEW (reg_entry);
1328
1329 new->name = name;
1330 new->number = number;
1331 new->type = type;
1332 new->builtin = FALSE;
1333
1334 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1335
1336 return new;
1337 }
1338
1339 /* Look for the .req directive. This is of the form:
1340
1341 new_register_name .req existing_register_name
1342
1343 If we find one, or if it looks sufficiently like one that we want to
1344 handle any error here, return TRUE. Otherwise return FALSE. */
1345
1346 static bfd_boolean
1347 create_register_alias (char *newname, char *p)
1348 {
1349 const reg_entry *old;
1350 char *oldname, *nbuf;
1351 size_t nlen;
1352
1353 /* The input scrubber ensures that whitespace after the mnemonic is
1354 collapsed to single spaces. */
1355 oldname = p;
1356 if (strncmp (oldname, " .req ", 6) != 0)
1357 return FALSE;
1358
1359 oldname += 6;
1360 if (*oldname == '\0')
1361 return FALSE;
1362
1363 old = str_hash_find (aarch64_reg_hsh, oldname);
1364 if (!old)
1365 {
1366 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1367 return TRUE;
1368 }
1369
1370 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1371 the desired alias name, and p points to its end. If not, then
1372 the desired alias name is in the global original_case_string. */
1373 #ifdef TC_CASE_SENSITIVE
1374 nlen = p - newname;
1375 #else
1376 newname = original_case_string;
1377 nlen = strlen (newname);
1378 #endif
1379
1380 nbuf = xmemdup0 (newname, nlen);
1381
1382 /* Create aliases under the new name as stated; an all-lowercase
1383 version of the new name; and an all-uppercase version of the new
1384 name. */
1385 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1386 {
1387 for (p = nbuf; *p; p++)
1388 *p = TOUPPER (*p);
1389
1390 if (strncmp (nbuf, newname, nlen))
1391 {
1392 /* If this attempt to create an additional alias fails, do not bother
1393 trying to create the all-lower case alias. We will fail and issue
1394 a second, duplicate error message. This situation arises when the
1395 programmer does something like:
1396 foo .req r0
1397 Foo .req r1
1398 The second .req creates the "Foo" alias but then fails to create
1399 the artificial FOO alias because it has already been created by the
1400 first .req. */
1401 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1402 {
1403 free (nbuf);
1404 return TRUE;
1405 }
1406 }
1407
1408 for (p = nbuf; *p; p++)
1409 *p = TOLOWER (*p);
1410
1411 if (strncmp (nbuf, newname, nlen))
1412 insert_reg_alias (nbuf, old->number, old->type);
1413 }
1414
1415 free (nbuf);
1416 return TRUE;
1417 }
1418
1419 /* Should never be called, as .req goes between the alias and the
1420 register name, not at the beginning of the line. */
1421 static void
1422 s_req (int a ATTRIBUTE_UNUSED)
1423 {
1424 as_bad (_("invalid syntax for .req directive"));
1425 }
1426
1427 /* The .unreq directive deletes an alias which was previously defined
1428 by .req. For example:
1429
1430 my_alias .req r11
1431 .unreq my_alias */
1432
1433 static void
1434 s_unreq (int a ATTRIBUTE_UNUSED)
1435 {
1436 char *name;
1437 char saved_char;
1438
1439 name = input_line_pointer;
1440
1441 while (*input_line_pointer != 0
1442 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1443 ++input_line_pointer;
1444
1445 saved_char = *input_line_pointer;
1446 *input_line_pointer = 0;
1447
1448 if (!*name)
1449 as_bad (_("invalid syntax for .unreq directive"));
1450 else
1451 {
1452 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1453
1454 if (!reg)
1455 as_bad (_("unknown register alias '%s'"), name);
1456 else if (reg->builtin)
1457 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1458 name);
1459 else
1460 {
1461 char *p;
1462 char *nbuf;
1463
1464 str_hash_delete (aarch64_reg_hsh, name);
1465 free ((char *) reg->name);
1466 free (reg);
1467
1468 /* Also locate the all upper case and all lower case versions.
1469 Do not complain if we cannot find one or the other as it
1470 was probably deleted above. */
1471
1472 nbuf = strdup (name);
1473 for (p = nbuf; *p; p++)
1474 *p = TOUPPER (*p);
1475 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1476 if (reg)
1477 {
1478 str_hash_delete (aarch64_reg_hsh, nbuf);
1479 free ((char *) reg->name);
1480 free (reg);
1481 }
1482
1483 for (p = nbuf; *p; p++)
1484 *p = TOLOWER (*p);
1485 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1486 if (reg)
1487 {
1488 str_hash_delete (aarch64_reg_hsh, nbuf);
1489 free ((char *) reg->name);
1490 free (reg);
1491 }
1492
1493 free (nbuf);
1494 }
1495 }
1496
1497 *input_line_pointer = saved_char;
1498 demand_empty_rest_of_line ();
1499 }
1500
1501 /* Directives: Instruction set selection. */
1502
1503 #ifdef OBJ_ELF
1504 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1505 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1506 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1507 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1508
1509 /* Create a new mapping symbol for the transition to STATE. */
1510
1511 static void
1512 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1513 {
1514 symbolS *symbolP;
1515 const char *symname;
1516 int type;
1517
1518 switch (state)
1519 {
1520 case MAP_DATA:
1521 symname = "$d";
1522 type = BSF_NO_FLAGS;
1523 break;
1524 case MAP_INSN:
1525 symname = "$x";
1526 type = BSF_NO_FLAGS;
1527 break;
1528 default:
1529 abort ();
1530 }
1531
1532 symbolP = symbol_new (symname, now_seg, frag, value);
1533 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1534
1535 /* Save the mapping symbols for future reference. Also check that
1536 we do not place two mapping symbols at the same offset within a
1537 frag. We'll handle overlap between frags in
1538 check_mapping_symbols.
1539
1540 If .fill or other data filling directive generates zero sized data,
1541 the mapping symbol for the following code will have the same value
1542 as the one generated for the data filling directive. In this case,
1543 we replace the old symbol with the new one at the same address. */
1544 if (value == 0)
1545 {
1546 if (frag->tc_frag_data.first_map != NULL)
1547 {
1548 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1549 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1550 &symbol_lastP);
1551 }
1552 frag->tc_frag_data.first_map = symbolP;
1553 }
1554 if (frag->tc_frag_data.last_map != NULL)
1555 {
1556 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1557 S_GET_VALUE (symbolP));
1558 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1559 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1560 &symbol_lastP);
1561 }
1562 frag->tc_frag_data.last_map = symbolP;
1563 }
1564
1565 /* We must sometimes convert a region marked as code to data during
1566 code alignment, if an odd number of bytes have to be padded. The
1567 code mapping symbol is pushed to an aligned address. */
1568
1569 static void
1570 insert_data_mapping_symbol (enum mstate state,
1571 valueT value, fragS * frag, offsetT bytes)
1572 {
1573 /* If there was already a mapping symbol, remove it. */
1574 if (frag->tc_frag_data.last_map != NULL
1575 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1576 frag->fr_address + value)
1577 {
1578 symbolS *symp = frag->tc_frag_data.last_map;
1579
1580 if (value == 0)
1581 {
1582 know (frag->tc_frag_data.first_map == symp);
1583 frag->tc_frag_data.first_map = NULL;
1584 }
1585 frag->tc_frag_data.last_map = NULL;
1586 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1587 }
1588
1589 make_mapping_symbol (MAP_DATA, value, frag);
1590 make_mapping_symbol (state, value + bytes, frag);
1591 }
1592
1593 static void mapping_state_2 (enum mstate state, int max_chars);
1594
1595 /* Set the mapping state to STATE. Only call this when about to
1596 emit some STATE bytes to the file. */
1597
1598 void
1599 mapping_state (enum mstate state)
1600 {
1601 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1602
1603 if (state == MAP_INSN)
1604 /* AArch64 instructions require 4-byte alignment. When emitting
1605 instructions into any section, record the appropriate section
1606 alignment. */
1607 record_alignment (now_seg, 2);
1608
1609 if (mapstate == state)
1610 /* The mapping symbol has already been emitted.
1611 There is nothing else to do. */
1612 return;
1613
1614 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1615 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1616 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1617 evaluated later in the next else. */
1618 return;
1619 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1620 {
1621 /* Only add the symbol if the offset is > 0:
1622 if we're at the first frag, check it's size > 0;
1623 if we're not at the first frag, then for sure
1624 the offset is > 0. */
1625 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1626 const int add_symbol = (frag_now != frag_first)
1627 || (frag_now_fix () > 0);
1628
1629 if (add_symbol)
1630 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1631 }
1632 #undef TRANSITION
1633
1634 mapping_state_2 (state, 0);
1635 }
1636
1637 /* Same as mapping_state, but MAX_CHARS bytes have already been
1638 allocated. Put the mapping symbol that far back. */
1639
1640 static void
1641 mapping_state_2 (enum mstate state, int max_chars)
1642 {
1643 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1644
1645 if (!SEG_NORMAL (now_seg))
1646 return;
1647
1648 if (mapstate == state)
1649 /* The mapping symbol has already been emitted.
1650 There is nothing else to do. */
1651 return;
1652
1653 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1654 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1655 }
1656 #else
1657 #define mapping_state(x) /* nothing */
1658 #define mapping_state_2(x, y) /* nothing */
1659 #endif
1660
1661 /* Directives: sectioning and alignment. */
1662
1663 static void
1664 s_bss (int ignore ATTRIBUTE_UNUSED)
1665 {
1666 /* We don't support putting frags in the BSS segment, we fake it by
1667 marking in_bss, then looking at s_skip for clues. */
1668 subseg_set (bss_section, 0);
1669 demand_empty_rest_of_line ();
1670 mapping_state (MAP_DATA);
1671 }
1672
1673 static void
1674 s_even (int ignore ATTRIBUTE_UNUSED)
1675 {
1676 /* Never make frag if expect extra pass. */
1677 if (!need_pass_2)
1678 frag_align (1, 0, 0);
1679
1680 record_alignment (now_seg, 1);
1681
1682 demand_empty_rest_of_line ();
1683 }
1684
1685 /* Directives: Literal pools. */
1686
1687 static literal_pool *
1688 find_literal_pool (int size)
1689 {
1690 literal_pool *pool;
1691
1692 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1693 {
1694 if (pool->section == now_seg
1695 && pool->sub_section == now_subseg && pool->size == size)
1696 break;
1697 }
1698
1699 return pool;
1700 }
1701
1702 static literal_pool *
1703 find_or_make_literal_pool (int size)
1704 {
1705 /* Next literal pool ID number. */
1706 static unsigned int latest_pool_num = 1;
1707 literal_pool *pool;
1708
1709 pool = find_literal_pool (size);
1710
1711 if (pool == NULL)
1712 {
1713 /* Create a new pool. */
1714 pool = XNEW (literal_pool);
1715 if (!pool)
1716 return NULL;
1717
1718 /* Currently we always put the literal pool in the current text
1719 section. If we were generating "small" model code where we
1720 knew that all code and initialised data was within 1MB then
1721 we could output literals to mergeable, read-only data
1722 sections. */
1723
1724 pool->next_free_entry = 0;
1725 pool->section = now_seg;
1726 pool->sub_section = now_subseg;
1727 pool->size = size;
1728 pool->next = list_of_pools;
1729 pool->symbol = NULL;
1730
1731 /* Add it to the list. */
1732 list_of_pools = pool;
1733 }
1734
1735 /* New pools, and emptied pools, will have a NULL symbol. */
1736 if (pool->symbol == NULL)
1737 {
1738 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1739 &zero_address_frag, 0);
1740 pool->id = latest_pool_num++;
1741 }
1742
1743 /* Done. */
1744 return pool;
1745 }
1746
1747 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1748 Return TRUE on success, otherwise return FALSE. */
1749 static bfd_boolean
1750 add_to_lit_pool (expressionS *exp, int size)
1751 {
1752 literal_pool *pool;
1753 unsigned int entry;
1754
1755 pool = find_or_make_literal_pool (size);
1756
1757 /* Check if this literal value is already in the pool. */
1758 for (entry = 0; entry < pool->next_free_entry; entry++)
1759 {
1760 expressionS * litexp = & pool->literals[entry].exp;
1761
1762 if ((litexp->X_op == exp->X_op)
1763 && (exp->X_op == O_constant)
1764 && (litexp->X_add_number == exp->X_add_number)
1765 && (litexp->X_unsigned == exp->X_unsigned))
1766 break;
1767
1768 if ((litexp->X_op == exp->X_op)
1769 && (exp->X_op == O_symbol)
1770 && (litexp->X_add_number == exp->X_add_number)
1771 && (litexp->X_add_symbol == exp->X_add_symbol)
1772 && (litexp->X_op_symbol == exp->X_op_symbol))
1773 break;
1774 }
1775
1776 /* Do we need to create a new entry? */
1777 if (entry == pool->next_free_entry)
1778 {
1779 if (entry >= MAX_LITERAL_POOL_SIZE)
1780 {
1781 set_syntax_error (_("literal pool overflow"));
1782 return FALSE;
1783 }
1784
1785 pool->literals[entry].exp = *exp;
1786 pool->next_free_entry += 1;
1787 if (exp->X_op == O_big)
1788 {
1789 /* PR 16688: Bignums are held in a single global array. We must
1790 copy and preserve that value now, before it is overwritten. */
1791 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1792 exp->X_add_number);
1793 memcpy (pool->literals[entry].bignum, generic_bignum,
1794 CHARS_PER_LITTLENUM * exp->X_add_number);
1795 }
1796 else
1797 pool->literals[entry].bignum = NULL;
1798 }
1799
1800 exp->X_op = O_symbol;
1801 exp->X_add_number = ((int) entry) * size;
1802 exp->X_add_symbol = pool->symbol;
1803
1804 return TRUE;
1805 }
1806
1807 /* Can't use symbol_new here, so have to create a symbol and then at
1808 a later date assign it a value. That's what these functions do. */
1809
1810 static void
1811 symbol_locate (symbolS * symbolP,
1812 const char *name,/* It is copied, the caller can modify. */
1813 segT segment, /* Segment identifier (SEG_<something>). */
1814 valueT valu, /* Symbol value. */
1815 fragS * frag) /* Associated fragment. */
1816 {
1817 size_t name_length;
1818 char *preserved_copy_of_name;
1819
1820 name_length = strlen (name) + 1; /* +1 for \0. */
1821 obstack_grow (&notes, name, name_length);
1822 preserved_copy_of_name = obstack_finish (&notes);
1823
1824 #ifdef tc_canonicalize_symbol_name
1825 preserved_copy_of_name =
1826 tc_canonicalize_symbol_name (preserved_copy_of_name);
1827 #endif
1828
1829 S_SET_NAME (symbolP, preserved_copy_of_name);
1830
1831 S_SET_SEGMENT (symbolP, segment);
1832 S_SET_VALUE (symbolP, valu);
1833 symbol_clear_list_pointers (symbolP);
1834
1835 symbol_set_frag (symbolP, frag);
1836
1837 /* Link to end of symbol chain. */
1838 {
1839 extern int symbol_table_frozen;
1840
1841 if (symbol_table_frozen)
1842 abort ();
1843 }
1844
1845 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1846
1847 obj_symbol_new_hook (symbolP);
1848
1849 #ifdef tc_symbol_new_hook
1850 tc_symbol_new_hook (symbolP);
1851 #endif
1852
1853 #ifdef DEBUG_SYMS
1854 verify_symbol_chain (symbol_rootP, symbol_lastP);
1855 #endif /* DEBUG_SYMS */
1856 }
1857
1858
1859 static void
1860 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1861 {
1862 unsigned int entry;
1863 literal_pool *pool;
1864 char sym_name[20];
1865 int align;
1866
1867 for (align = 2; align <= 4; align++)
1868 {
1869 int size = 1 << align;
1870
1871 pool = find_literal_pool (size);
1872 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1873 continue;
1874
1875 /* Align pool as you have word accesses.
1876 Only make a frag if we have to. */
1877 if (!need_pass_2)
1878 frag_align (align, 0, 0);
1879
1880 mapping_state (MAP_DATA);
1881
1882 record_alignment (now_seg, align);
1883
1884 sprintf (sym_name, "$$lit_\002%x", pool->id);
1885
1886 symbol_locate (pool->symbol, sym_name, now_seg,
1887 (valueT) frag_now_fix (), frag_now);
1888 symbol_table_insert (pool->symbol);
1889
1890 for (entry = 0; entry < pool->next_free_entry; entry++)
1891 {
1892 expressionS * exp = & pool->literals[entry].exp;
1893
1894 if (exp->X_op == O_big)
1895 {
1896 /* PR 16688: Restore the global bignum value. */
1897 gas_assert (pool->literals[entry].bignum != NULL);
1898 memcpy (generic_bignum, pool->literals[entry].bignum,
1899 CHARS_PER_LITTLENUM * exp->X_add_number);
1900 }
1901
1902 /* First output the expression in the instruction to the pool. */
1903 emit_expr (exp, size); /* .word|.xword */
1904
1905 if (exp->X_op == O_big)
1906 {
1907 free (pool->literals[entry].bignum);
1908 pool->literals[entry].bignum = NULL;
1909 }
1910 }
1911
1912 /* Mark the pool as empty. */
1913 pool->next_free_entry = 0;
1914 pool->symbol = NULL;
1915 }
1916 }
1917
1918 #ifdef OBJ_ELF
1919 /* Forward declarations for functions below, in the MD interface
1920 section. */
1921 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1922 static struct reloc_table_entry * find_reloc_table_entry (char **);
1923
1924 /* Directives: Data. */
1925 /* N.B. the support for relocation suffix in this directive needs to be
1926 implemented properly. */
1927
1928 static void
1929 s_aarch64_elf_cons (int nbytes)
1930 {
1931 expressionS exp;
1932
1933 #ifdef md_flush_pending_output
1934 md_flush_pending_output ();
1935 #endif
1936
1937 if (is_it_end_of_statement ())
1938 {
1939 demand_empty_rest_of_line ();
1940 return;
1941 }
1942
1943 #ifdef md_cons_align
1944 md_cons_align (nbytes);
1945 #endif
1946
1947 mapping_state (MAP_DATA);
1948 do
1949 {
1950 struct reloc_table_entry *reloc;
1951
1952 expression (&exp);
1953
1954 if (exp.X_op != O_symbol)
1955 emit_expr (&exp, (unsigned int) nbytes);
1956 else
1957 {
1958 skip_past_char (&input_line_pointer, '#');
1959 if (skip_past_char (&input_line_pointer, ':'))
1960 {
1961 reloc = find_reloc_table_entry (&input_line_pointer);
1962 if (reloc == NULL)
1963 as_bad (_("unrecognized relocation suffix"));
1964 else
1965 as_bad (_("unimplemented relocation suffix"));
1966 ignore_rest_of_line ();
1967 return;
1968 }
1969 else
1970 emit_expr (&exp, (unsigned int) nbytes);
1971 }
1972 }
1973 while (*input_line_pointer++ == ',');
1974
1975 /* Put terminator back into stream. */
1976 input_line_pointer--;
1977 demand_empty_rest_of_line ();
1978 }
1979
1980 /* Mark symbol that it follows a variant PCS convention. */
1981
1982 static void
1983 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1984 {
1985 char *name;
1986 char c;
1987 symbolS *sym;
1988 asymbol *bfdsym;
1989 elf_symbol_type *elfsym;
1990
1991 c = get_symbol_name (&name);
1992 if (!*name)
1993 as_bad (_("Missing symbol name in directive"));
1994 sym = symbol_find_or_make (name);
1995 restore_line_pointer (c);
1996 demand_empty_rest_of_line ();
1997 bfdsym = symbol_get_bfdsym (sym);
1998 elfsym = elf_symbol_from (bfdsym);
1999 gas_assert (elfsym);
2000 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2001 }
2002 #endif /* OBJ_ELF */
2003
2004 /* Output a 32-bit word, but mark as an instruction. */
2005
2006 static void
2007 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2008 {
2009 expressionS exp;
2010
2011 #ifdef md_flush_pending_output
2012 md_flush_pending_output ();
2013 #endif
2014
2015 if (is_it_end_of_statement ())
2016 {
2017 demand_empty_rest_of_line ();
2018 return;
2019 }
2020
2021 /* Sections are assumed to start aligned. In executable section, there is no
2022 MAP_DATA symbol pending. So we only align the address during
2023 MAP_DATA --> MAP_INSN transition.
2024 For other sections, this is not guaranteed. */
2025 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2026 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2027 frag_align_code (2, 0);
2028
2029 #ifdef OBJ_ELF
2030 mapping_state (MAP_INSN);
2031 #endif
2032
2033 do
2034 {
2035 expression (&exp);
2036 if (exp.X_op != O_constant)
2037 {
2038 as_bad (_("constant expression required"));
2039 ignore_rest_of_line ();
2040 return;
2041 }
2042
2043 if (target_big_endian)
2044 {
2045 unsigned int val = exp.X_add_number;
2046 exp.X_add_number = SWAP_32 (val);
2047 }
2048 emit_expr (&exp, 4);
2049 }
2050 while (*input_line_pointer++ == ',');
2051
2052 /* Put terminator back into stream. */
2053 input_line_pointer--;
2054 demand_empty_rest_of_line ();
2055 }
2056
2057 static void
2058 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2059 {
2060 demand_empty_rest_of_line ();
2061 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2062 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2063 }
2064
2065 #ifdef OBJ_ELF
2066 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2067
2068 static void
2069 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2070 {
2071 expressionS exp;
2072
2073 expression (&exp);
2074 frag_grow (4);
2075 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2076 BFD_RELOC_AARCH64_TLSDESC_ADD);
2077
2078 demand_empty_rest_of_line ();
2079 }
2080
2081 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2082
2083 static void
2084 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2085 {
2086 expressionS exp;
2087
2088 /* Since we're just labelling the code, there's no need to define a
2089 mapping symbol. */
2090 expression (&exp);
2091 /* Make sure there is enough room in this frag for the following
2092 blr. This trick only works if the blr follows immediately after
2093 the .tlsdesc directive. */
2094 frag_grow (4);
2095 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2096 BFD_RELOC_AARCH64_TLSDESC_CALL);
2097
2098 demand_empty_rest_of_line ();
2099 }
2100
2101 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2102
2103 static void
2104 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2105 {
2106 expressionS exp;
2107
2108 expression (&exp);
2109 frag_grow (4);
2110 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2111 BFD_RELOC_AARCH64_TLSDESC_LDR);
2112
2113 demand_empty_rest_of_line ();
2114 }
2115 #endif /* OBJ_ELF */
2116
2117 static void s_aarch64_arch (int);
2118 static void s_aarch64_cpu (int);
2119 static void s_aarch64_arch_extension (int);
2120
2121 /* This table describes all the machine specific pseudo-ops the assembler
2122 has to support. The fields are:
2123 pseudo-op name without dot
2124 function to call to execute this pseudo-op
2125 Integer arg to pass to the function. */
2126
2127 const pseudo_typeS md_pseudo_table[] = {
2128 /* Never called because '.req' does not start a line. */
2129 {"req", s_req, 0},
2130 {"unreq", s_unreq, 0},
2131 {"bss", s_bss, 0},
2132 {"even", s_even, 0},
2133 {"ltorg", s_ltorg, 0},
2134 {"pool", s_ltorg, 0},
2135 {"cpu", s_aarch64_cpu, 0},
2136 {"arch", s_aarch64_arch, 0},
2137 {"arch_extension", s_aarch64_arch_extension, 0},
2138 {"inst", s_aarch64_inst, 0},
2139 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2140 #ifdef OBJ_ELF
2141 {"tlsdescadd", s_tlsdescadd, 0},
2142 {"tlsdesccall", s_tlsdesccall, 0},
2143 {"tlsdescldr", s_tlsdescldr, 0},
2144 {"word", s_aarch64_elf_cons, 4},
2145 {"long", s_aarch64_elf_cons, 4},
2146 {"xword", s_aarch64_elf_cons, 8},
2147 {"dword", s_aarch64_elf_cons, 8},
2148 {"variant_pcs", s_variant_pcs, 0},
2149 #endif
2150 {"float16", float_cons, 'h'},
2151 {"bfloat16", float_cons, 'b'},
2152 {0, 0, 0}
2153 };
2154 \f
2155
2156 /* Check whether STR points to a register name followed by a comma or the
2157 end of line; REG_TYPE indicates which register types are checked
2158 against. Return TRUE if STR is such a register name; otherwise return
2159 FALSE. The function does not intend to produce any diagnostics, but since
2160 the register parser aarch64_reg_parse, which is called by this function,
2161 does produce diagnostics, we call clear_error to clear any diagnostics
2162 that may be generated by aarch64_reg_parse.
2163 Also, the function returns FALSE directly if there is any user error
2164 present at the function entry. This prevents the existing diagnostics
2165 state from being spoiled.
2166 The function currently serves parse_constant_immediate and
2167 parse_big_immediate only. */
2168 static bfd_boolean
2169 reg_name_p (char *str, aarch64_reg_type reg_type)
2170 {
2171 int reg;
2172
2173 /* Prevent the diagnostics state from being spoiled. */
2174 if (error_p ())
2175 return FALSE;
2176
2177 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2178
2179 /* Clear the parsing error that may be set by the reg parser. */
2180 clear_error ();
2181
2182 if (reg == PARSE_FAIL)
2183 return FALSE;
2184
2185 skip_whitespace (str);
2186 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2187 return TRUE;
2188
2189 return FALSE;
2190 }
2191
2192 /* Parser functions used exclusively in instruction operands. */
2193
2194 /* Parse an immediate expression which may not be constant.
2195
2196 To prevent the expression parser from pushing a register name
2197 into the symbol table as an undefined symbol, firstly a check is
2198 done to find out whether STR is a register of type REG_TYPE followed
2199 by a comma or the end of line. Return FALSE if STR is such a string. */
2200
2201 static bfd_boolean
2202 parse_immediate_expression (char **str, expressionS *exp,
2203 aarch64_reg_type reg_type)
2204 {
2205 if (reg_name_p (*str, reg_type))
2206 {
2207 set_recoverable_error (_("immediate operand required"));
2208 return FALSE;
2209 }
2210
2211 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2212
2213 if (exp->X_op == O_absent)
2214 {
2215 set_fatal_syntax_error (_("missing immediate expression"));
2216 return FALSE;
2217 }
2218
2219 return TRUE;
2220 }
2221
2222 /* Constant immediate-value read function for use in insn parsing.
2223 STR points to the beginning of the immediate (with the optional
2224 leading #); *VAL receives the value. REG_TYPE says which register
2225 names should be treated as registers rather than as symbolic immediates.
2226
2227 Return TRUE on success; otherwise return FALSE. */
2228
2229 static bfd_boolean
2230 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2231 {
2232 expressionS exp;
2233
2234 if (! parse_immediate_expression (str, &exp, reg_type))
2235 return FALSE;
2236
2237 if (exp.X_op != O_constant)
2238 {
2239 set_syntax_error (_("constant expression required"));
2240 return FALSE;
2241 }
2242
2243 *val = exp.X_add_number;
2244 return TRUE;
2245 }
2246
2247 static uint32_t
2248 encode_imm_float_bits (uint32_t imm)
2249 {
2250 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2251 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2252 }
2253
2254 /* Return TRUE if the single-precision floating-point value encoded in IMM
2255 can be expressed in the AArch64 8-bit signed floating-point format with
2256 3-bit exponent and normalized 4 bits of precision; in other words, the
2257 floating-point value must be expressable as
2258 (+/-) n / 16 * power (2, r)
2259 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2260
2261 static bfd_boolean
2262 aarch64_imm_float_p (uint32_t imm)
2263 {
2264 /* If a single-precision floating-point value has the following bit
2265 pattern, it can be expressed in the AArch64 8-bit floating-point
2266 format:
2267
2268 3 32222222 2221111111111
2269 1 09876543 21098765432109876543210
2270 n Eeeeeexx xxxx0000000000000000000
2271
2272 where n, e and each x are either 0 or 1 independently, with
2273 E == ~ e. */
2274
2275 uint32_t pattern;
2276
2277 /* Prepare the pattern for 'Eeeeee'. */
2278 if (((imm >> 30) & 0x1) == 0)
2279 pattern = 0x3e000000;
2280 else
2281 pattern = 0x40000000;
2282
2283 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2284 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2285 }
2286
2287 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2288 as an IEEE float without any loss of precision. Store the value in
2289 *FPWORD if so. */
2290
2291 static bfd_boolean
2292 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2293 {
2294 /* If a double-precision floating-point value has the following bit
2295 pattern, it can be expressed in a float:
2296
2297 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2298 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2299 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2300
2301 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2302 if Eeee_eeee != 1111_1111
2303
2304 where n, e, s and S are either 0 or 1 independently and where ~ is the
2305 inverse of E. */
2306
2307 uint32_t pattern;
2308 uint32_t high32 = imm >> 32;
2309 uint32_t low32 = imm;
2310
2311 /* Lower 29 bits need to be 0s. */
2312 if ((imm & 0x1fffffff) != 0)
2313 return FALSE;
2314
2315 /* Prepare the pattern for 'Eeeeeeeee'. */
2316 if (((high32 >> 30) & 0x1) == 0)
2317 pattern = 0x38000000;
2318 else
2319 pattern = 0x40000000;
2320
2321 /* Check E~~~. */
2322 if ((high32 & 0x78000000) != pattern)
2323 return FALSE;
2324
2325 /* Check Eeee_eeee != 1111_1111. */
2326 if ((high32 & 0x7ff00000) == 0x47f00000)
2327 return FALSE;
2328
2329 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2330 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2331 | (low32 >> 29)); /* 3 S bits. */
2332 return TRUE;
2333 }
2334
2335 /* Return true if we should treat OPERAND as a double-precision
2336 floating-point operand rather than a single-precision one. */
2337 static bfd_boolean
2338 double_precision_operand_p (const aarch64_opnd_info *operand)
2339 {
2340 /* Check for unsuffixed SVE registers, which are allowed
2341 for LDR and STR but not in instructions that require an
2342 immediate. We get better error messages if we arbitrarily
2343 pick one size, parse the immediate normally, and then
2344 report the match failure in the normal way. */
2345 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2346 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2347 }
2348
2349 /* Parse a floating-point immediate. Return TRUE on success and return the
2350 value in *IMMED in the format of IEEE754 single-precision encoding.
2351 *CCP points to the start of the string; DP_P is TRUE when the immediate
2352 is expected to be in double-precision (N.B. this only matters when
2353 hexadecimal representation is involved). REG_TYPE says which register
2354 names should be treated as registers rather than as symbolic immediates.
2355
2356 This routine accepts any IEEE float; it is up to the callers to reject
2357 invalid ones. */
2358
2359 static bfd_boolean
2360 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2361 aarch64_reg_type reg_type)
2362 {
2363 char *str = *ccp;
2364 char *fpnum;
2365 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2366 int64_t val = 0;
2367 unsigned fpword = 0;
2368 bfd_boolean hex_p = FALSE;
2369
2370 skip_past_char (&str, '#');
2371
2372 fpnum = str;
2373 skip_whitespace (fpnum);
2374
2375 if (strncmp (fpnum, "0x", 2) == 0)
2376 {
2377 /* Support the hexadecimal representation of the IEEE754 encoding.
2378 Double-precision is expected when DP_P is TRUE, otherwise the
2379 representation should be in single-precision. */
2380 if (! parse_constant_immediate (&str, &val, reg_type))
2381 goto invalid_fp;
2382
2383 if (dp_p)
2384 {
2385 if (!can_convert_double_to_float (val, &fpword))
2386 goto invalid_fp;
2387 }
2388 else if ((uint64_t) val > 0xffffffff)
2389 goto invalid_fp;
2390 else
2391 fpword = val;
2392
2393 hex_p = TRUE;
2394 }
2395 else if (reg_name_p (str, reg_type))
2396 {
2397 set_recoverable_error (_("immediate operand required"));
2398 return FALSE;
2399 }
2400
2401 if (! hex_p)
2402 {
2403 int i;
2404
2405 if ((str = atof_ieee (str, 's', words)) == NULL)
2406 goto invalid_fp;
2407
2408 /* Our FP word must be 32 bits (single-precision FP). */
2409 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2410 {
2411 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2412 fpword |= words[i];
2413 }
2414 }
2415
2416 *immed = fpword;
2417 *ccp = str;
2418 return TRUE;
2419
2420 invalid_fp:
2421 set_fatal_syntax_error (_("invalid floating-point constant"));
2422 return FALSE;
2423 }
2424
2425 /* Less-generic immediate-value read function with the possibility of loading
2426 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2427 instructions.
2428
2429 To prevent the expression parser from pushing a register name into the
2430 symbol table as an undefined symbol, a check is firstly done to find
2431 out whether STR is a register of type REG_TYPE followed by a comma or
2432 the end of line. Return FALSE if STR is such a register. */
2433
2434 static bfd_boolean
2435 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2436 {
2437 char *ptr = *str;
2438
2439 if (reg_name_p (ptr, reg_type))
2440 {
2441 set_syntax_error (_("immediate operand required"));
2442 return FALSE;
2443 }
2444
2445 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2446
2447 if (inst.reloc.exp.X_op == O_constant)
2448 *imm = inst.reloc.exp.X_add_number;
2449
2450 *str = ptr;
2451
2452 return TRUE;
2453 }
2454
2455 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2456 if NEED_LIBOPCODES is non-zero, the fixup will need
2457 assistance from the libopcodes. */
2458
2459 static inline void
2460 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2461 const aarch64_opnd_info *operand,
2462 int need_libopcodes_p)
2463 {
2464 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2465 reloc->opnd = operand->type;
2466 if (need_libopcodes_p)
2467 reloc->need_libopcodes_p = 1;
2468 };
2469
2470 /* Return TRUE if the instruction needs to be fixed up later internally by
2471 the GAS; otherwise return FALSE. */
2472
2473 static inline bfd_boolean
2474 aarch64_gas_internal_fixup_p (void)
2475 {
2476 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2477 }
2478
2479 /* Assign the immediate value to the relevant field in *OPERAND if
2480 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2481 needs an internal fixup in a later stage.
2482 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2483 IMM.VALUE that may get assigned with the constant. */
2484 static inline void
2485 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2486 aarch64_opnd_info *operand,
2487 int addr_off_p,
2488 int need_libopcodes_p,
2489 int skip_p)
2490 {
2491 if (reloc->exp.X_op == O_constant)
2492 {
2493 if (addr_off_p)
2494 operand->addr.offset.imm = reloc->exp.X_add_number;
2495 else
2496 operand->imm.value = reloc->exp.X_add_number;
2497 reloc->type = BFD_RELOC_UNUSED;
2498 }
2499 else
2500 {
2501 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2502 /* Tell libopcodes to ignore this operand or not. This is helpful
2503 when one of the operands needs to be fixed up later but we need
2504 libopcodes to check the other operands. */
2505 operand->skip = skip_p;
2506 }
2507 }
2508
2509 /* Relocation modifiers. Each entry in the table contains the textual
2510 name for the relocation which may be placed before a symbol used as
2511 a load/store offset, or add immediate. It must be surrounded by a
2512 leading and trailing colon, for example:
2513
2514 ldr x0, [x1, #:rello:varsym]
2515 add x0, x1, #:rello:varsym */
2516
2517 struct reloc_table_entry
2518 {
2519 const char *name;
2520 int pc_rel;
2521 bfd_reloc_code_real_type adr_type;
2522 bfd_reloc_code_real_type adrp_type;
2523 bfd_reloc_code_real_type movw_type;
2524 bfd_reloc_code_real_type add_type;
2525 bfd_reloc_code_real_type ldst_type;
2526 bfd_reloc_code_real_type ld_literal_type;
2527 };
2528
2529 static struct reloc_table_entry reloc_table[] = {
2530 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2531 {"lo12", 0,
2532 0, /* adr_type */
2533 0,
2534 0,
2535 BFD_RELOC_AARCH64_ADD_LO12,
2536 BFD_RELOC_AARCH64_LDST_LO12,
2537 0},
2538
2539 /* Higher 21 bits of pc-relative page offset: ADRP */
2540 {"pg_hi21", 1,
2541 0, /* adr_type */
2542 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2543 0,
2544 0,
2545 0,
2546 0},
2547
2548 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2549 {"pg_hi21_nc", 1,
2550 0, /* adr_type */
2551 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2552 0,
2553 0,
2554 0,
2555 0},
2556
2557 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2558 {"abs_g0", 0,
2559 0, /* adr_type */
2560 0,
2561 BFD_RELOC_AARCH64_MOVW_G0,
2562 0,
2563 0,
2564 0},
2565
2566 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2567 {"abs_g0_s", 0,
2568 0, /* adr_type */
2569 0,
2570 BFD_RELOC_AARCH64_MOVW_G0_S,
2571 0,
2572 0,
2573 0},
2574
2575 /* Less significant bits 0-15 of address/value: MOVK, no check */
2576 {"abs_g0_nc", 0,
2577 0, /* adr_type */
2578 0,
2579 BFD_RELOC_AARCH64_MOVW_G0_NC,
2580 0,
2581 0,
2582 0},
2583
2584 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2585 {"abs_g1", 0,
2586 0, /* adr_type */
2587 0,
2588 BFD_RELOC_AARCH64_MOVW_G1,
2589 0,
2590 0,
2591 0},
2592
2593 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2594 {"abs_g1_s", 0,
2595 0, /* adr_type */
2596 0,
2597 BFD_RELOC_AARCH64_MOVW_G1_S,
2598 0,
2599 0,
2600 0},
2601
2602 /* Less significant bits 16-31 of address/value: MOVK, no check */
2603 {"abs_g1_nc", 0,
2604 0, /* adr_type */
2605 0,
2606 BFD_RELOC_AARCH64_MOVW_G1_NC,
2607 0,
2608 0,
2609 0},
2610
2611 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2612 {"abs_g2", 0,
2613 0, /* adr_type */
2614 0,
2615 BFD_RELOC_AARCH64_MOVW_G2,
2616 0,
2617 0,
2618 0},
2619
2620 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2621 {"abs_g2_s", 0,
2622 0, /* adr_type */
2623 0,
2624 BFD_RELOC_AARCH64_MOVW_G2_S,
2625 0,
2626 0,
2627 0},
2628
2629 /* Less significant bits 32-47 of address/value: MOVK, no check */
2630 {"abs_g2_nc", 0,
2631 0, /* adr_type */
2632 0,
2633 BFD_RELOC_AARCH64_MOVW_G2_NC,
2634 0,
2635 0,
2636 0},
2637
2638 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2639 {"abs_g3", 0,
2640 0, /* adr_type */
2641 0,
2642 BFD_RELOC_AARCH64_MOVW_G3,
2643 0,
2644 0,
2645 0},
2646
2647 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2648 {"prel_g0", 1,
2649 0, /* adr_type */
2650 0,
2651 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2652 0,
2653 0,
2654 0},
2655
2656 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2657 {"prel_g0_nc", 1,
2658 0, /* adr_type */
2659 0,
2660 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2661 0,
2662 0,
2663 0},
2664
2665 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2666 {"prel_g1", 1,
2667 0, /* adr_type */
2668 0,
2669 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2670 0,
2671 0,
2672 0},
2673
2674 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2675 {"prel_g1_nc", 1,
2676 0, /* adr_type */
2677 0,
2678 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2679 0,
2680 0,
2681 0},
2682
2683 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2684 {"prel_g2", 1,
2685 0, /* adr_type */
2686 0,
2687 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2688 0,
2689 0,
2690 0},
2691
2692 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2693 {"prel_g2_nc", 1,
2694 0, /* adr_type */
2695 0,
2696 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2697 0,
2698 0,
2699 0},
2700
2701 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2702 {"prel_g3", 1,
2703 0, /* adr_type */
2704 0,
2705 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2706 0,
2707 0,
2708 0},
2709
2710 /* Get to the page containing GOT entry for a symbol. */
2711 {"got", 1,
2712 0, /* adr_type */
2713 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2714 0,
2715 0,
2716 0,
2717 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2718
2719 /* 12 bit offset into the page containing GOT entry for that symbol. */
2720 {"got_lo12", 0,
2721 0, /* adr_type */
2722 0,
2723 0,
2724 0,
2725 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2726 0},
2727
2728 /* 0-15 bits of address/value: MOVk, no check. */
2729 {"gotoff_g0_nc", 0,
2730 0, /* adr_type */
2731 0,
2732 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2733 0,
2734 0,
2735 0},
2736
2737 /* Most significant bits 16-31 of address/value: MOVZ. */
2738 {"gotoff_g1", 0,
2739 0, /* adr_type */
2740 0,
2741 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2742 0,
2743 0,
2744 0},
2745
2746 /* 15 bit offset into the page containing GOT entry for that symbol. */
2747 {"gotoff_lo15", 0,
2748 0, /* adr_type */
2749 0,
2750 0,
2751 0,
2752 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2753 0},
2754
2755 /* Get to the page containing GOT TLS entry for a symbol */
2756 {"gottprel_g0_nc", 0,
2757 0, /* adr_type */
2758 0,
2759 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2760 0,
2761 0,
2762 0},
2763
2764 /* Get to the page containing GOT TLS entry for a symbol */
2765 {"gottprel_g1", 0,
2766 0, /* adr_type */
2767 0,
2768 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2769 0,
2770 0,
2771 0},
2772
2773 /* Get to the page containing GOT TLS entry for a symbol */
2774 {"tlsgd", 0,
2775 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2776 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2777 0,
2778 0,
2779 0,
2780 0},
2781
2782 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2783 {"tlsgd_lo12", 0,
2784 0, /* adr_type */
2785 0,
2786 0,
2787 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2788 0,
2789 0},
2790
2791 /* Lower 16 bits address/value: MOVk. */
2792 {"tlsgd_g0_nc", 0,
2793 0, /* adr_type */
2794 0,
2795 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2796 0,
2797 0,
2798 0},
2799
2800 /* Most significant bits 16-31 of address/value: MOVZ. */
2801 {"tlsgd_g1", 0,
2802 0, /* adr_type */
2803 0,
2804 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2805 0,
2806 0,
2807 0},
2808
2809 /* Get to the page containing GOT TLS entry for a symbol */
2810 {"tlsdesc", 0,
2811 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2812 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2813 0,
2814 0,
2815 0,
2816 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2817
2818 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2819 {"tlsdesc_lo12", 0,
2820 0, /* adr_type */
2821 0,
2822 0,
2823 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2824 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2825 0},
2826
2827 /* Get to the page containing GOT TLS entry for a symbol.
2828 The same as GD, we allocate two consecutive GOT slots
2829 for module index and module offset, the only difference
2830 with GD is the module offset should be initialized to
2831 zero without any outstanding runtime relocation. */
2832 {"tlsldm", 0,
2833 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2834 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2835 0,
2836 0,
2837 0,
2838 0},
2839
2840 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2841 {"tlsldm_lo12_nc", 0,
2842 0, /* adr_type */
2843 0,
2844 0,
2845 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2846 0,
2847 0},
2848
2849 /* 12 bit offset into the module TLS base address. */
2850 {"dtprel_lo12", 0,
2851 0, /* adr_type */
2852 0,
2853 0,
2854 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2855 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2856 0},
2857
2858 /* Same as dtprel_lo12, no overflow check. */
2859 {"dtprel_lo12_nc", 0,
2860 0, /* adr_type */
2861 0,
2862 0,
2863 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2864 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2865 0},
2866
2867 /* bits[23:12] of offset to the module TLS base address. */
2868 {"dtprel_hi12", 0,
2869 0, /* adr_type */
2870 0,
2871 0,
2872 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2873 0,
2874 0},
2875
2876 /* bits[15:0] of offset to the module TLS base address. */
2877 {"dtprel_g0", 0,
2878 0, /* adr_type */
2879 0,
2880 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2881 0,
2882 0,
2883 0},
2884
2885 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2886 {"dtprel_g0_nc", 0,
2887 0, /* adr_type */
2888 0,
2889 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2890 0,
2891 0,
2892 0},
2893
2894 /* bits[31:16] of offset to the module TLS base address. */
2895 {"dtprel_g1", 0,
2896 0, /* adr_type */
2897 0,
2898 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2899 0,
2900 0,
2901 0},
2902
2903 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2904 {"dtprel_g1_nc", 0,
2905 0, /* adr_type */
2906 0,
2907 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2908 0,
2909 0,
2910 0},
2911
2912 /* bits[47:32] of offset to the module TLS base address. */
2913 {"dtprel_g2", 0,
2914 0, /* adr_type */
2915 0,
2916 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2917 0,
2918 0,
2919 0},
2920
2921 /* Lower 16 bit offset into GOT entry for a symbol */
2922 {"tlsdesc_off_g0_nc", 0,
2923 0, /* adr_type */
2924 0,
2925 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2926 0,
2927 0,
2928 0},
2929
2930 /* Higher 16 bit offset into GOT entry for a symbol */
2931 {"tlsdesc_off_g1", 0,
2932 0, /* adr_type */
2933 0,
2934 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2935 0,
2936 0,
2937 0},
2938
2939 /* Get to the page containing GOT TLS entry for a symbol */
2940 {"gottprel", 0,
2941 0, /* adr_type */
2942 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2943 0,
2944 0,
2945 0,
2946 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2947
2948 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2949 {"gottprel_lo12", 0,
2950 0, /* adr_type */
2951 0,
2952 0,
2953 0,
2954 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2955 0},
2956
2957 /* Get tp offset for a symbol. */
2958 {"tprel", 0,
2959 0, /* adr_type */
2960 0,
2961 0,
2962 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2963 0,
2964 0},
2965
2966 /* Get tp offset for a symbol. */
2967 {"tprel_lo12", 0,
2968 0, /* adr_type */
2969 0,
2970 0,
2971 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2972 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2973 0},
2974
2975 /* Get tp offset for a symbol. */
2976 {"tprel_hi12", 0,
2977 0, /* adr_type */
2978 0,
2979 0,
2980 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2981 0,
2982 0},
2983
2984 /* Get tp offset for a symbol. */
2985 {"tprel_lo12_nc", 0,
2986 0, /* adr_type */
2987 0,
2988 0,
2989 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2990 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2991 0},
2992
2993 /* Most significant bits 32-47 of address/value: MOVZ. */
2994 {"tprel_g2", 0,
2995 0, /* adr_type */
2996 0,
2997 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2998 0,
2999 0,
3000 0},
3001
3002 /* Most significant bits 16-31 of address/value: MOVZ. */
3003 {"tprel_g1", 0,
3004 0, /* adr_type */
3005 0,
3006 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3007 0,
3008 0,
3009 0},
3010
3011 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3012 {"tprel_g1_nc", 0,
3013 0, /* adr_type */
3014 0,
3015 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3016 0,
3017 0,
3018 0},
3019
3020 /* Most significant bits 0-15 of address/value: MOVZ. */
3021 {"tprel_g0", 0,
3022 0, /* adr_type */
3023 0,
3024 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3025 0,
3026 0,
3027 0},
3028
3029 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3030 {"tprel_g0_nc", 0,
3031 0, /* adr_type */
3032 0,
3033 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3034 0,
3035 0,
3036 0},
3037
3038 /* 15bit offset from got entry to base address of GOT table. */
3039 {"gotpage_lo15", 0,
3040 0,
3041 0,
3042 0,
3043 0,
3044 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3045 0},
3046
3047 /* 14bit offset from got entry to base address of GOT table. */
3048 {"gotpage_lo14", 0,
3049 0,
3050 0,
3051 0,
3052 0,
3053 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3054 0},
3055 };
3056
3057 /* Given the address of a pointer pointing to the textual name of a
3058 relocation as may appear in assembler source, attempt to find its
3059 details in reloc_table. The pointer will be updated to the character
3060 after the trailing colon. On failure, NULL will be returned;
3061 otherwise return the reloc_table_entry. */
3062
3063 static struct reloc_table_entry *
3064 find_reloc_table_entry (char **str)
3065 {
3066 unsigned int i;
3067 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3068 {
3069 int length = strlen (reloc_table[i].name);
3070
3071 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3072 && (*str)[length] == ':')
3073 {
3074 *str += (length + 1);
3075 return &reloc_table[i];
3076 }
3077 }
3078
3079 return NULL;
3080 }
3081
3082 /* Mode argument to parse_shift and parser_shifter_operand. */
3083 enum parse_shift_mode
3084 {
3085 SHIFTED_NONE, /* no shifter allowed */
3086 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3087 "#imm{,lsl #n}" */
3088 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3089 "#imm" */
3090 SHIFTED_LSL, /* bare "lsl #n" */
3091 SHIFTED_MUL, /* bare "mul #n" */
3092 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3093 SHIFTED_MUL_VL, /* "mul vl" */
3094 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3095 };
3096
3097 /* Parse a <shift> operator on an AArch64 data processing instruction.
3098 Return TRUE on success; otherwise return FALSE. */
3099 static bfd_boolean
3100 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3101 {
3102 const struct aarch64_name_value_pair *shift_op;
3103 enum aarch64_modifier_kind kind;
3104 expressionS exp;
3105 int exp_has_prefix;
3106 char *s = *str;
3107 char *p = s;
3108
3109 for (p = *str; ISALPHA (*p); p++)
3110 ;
3111
3112 if (p == *str)
3113 {
3114 set_syntax_error (_("shift expression expected"));
3115 return FALSE;
3116 }
3117
3118 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3119
3120 if (shift_op == NULL)
3121 {
3122 set_syntax_error (_("shift operator expected"));
3123 return FALSE;
3124 }
3125
3126 kind = aarch64_get_operand_modifier (shift_op);
3127
3128 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3129 {
3130 set_syntax_error (_("invalid use of 'MSL'"));
3131 return FALSE;
3132 }
3133
3134 if (kind == AARCH64_MOD_MUL
3135 && mode != SHIFTED_MUL
3136 && mode != SHIFTED_MUL_VL)
3137 {
3138 set_syntax_error (_("invalid use of 'MUL'"));
3139 return FALSE;
3140 }
3141
3142 switch (mode)
3143 {
3144 case SHIFTED_LOGIC_IMM:
3145 if (aarch64_extend_operator_p (kind))
3146 {
3147 set_syntax_error (_("extending shift is not permitted"));
3148 return FALSE;
3149 }
3150 break;
3151
3152 case SHIFTED_ARITH_IMM:
3153 if (kind == AARCH64_MOD_ROR)
3154 {
3155 set_syntax_error (_("'ROR' shift is not permitted"));
3156 return FALSE;
3157 }
3158 break;
3159
3160 case SHIFTED_LSL:
3161 if (kind != AARCH64_MOD_LSL)
3162 {
3163 set_syntax_error (_("only 'LSL' shift is permitted"));
3164 return FALSE;
3165 }
3166 break;
3167
3168 case SHIFTED_MUL:
3169 if (kind != AARCH64_MOD_MUL)
3170 {
3171 set_syntax_error (_("only 'MUL' is permitted"));
3172 return FALSE;
3173 }
3174 break;
3175
3176 case SHIFTED_MUL_VL:
3177 /* "MUL VL" consists of two separate tokens. Require the first
3178 token to be "MUL" and look for a following "VL". */
3179 if (kind == AARCH64_MOD_MUL)
3180 {
3181 skip_whitespace (p);
3182 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3183 {
3184 p += 2;
3185 kind = AARCH64_MOD_MUL_VL;
3186 break;
3187 }
3188 }
3189 set_syntax_error (_("only 'MUL VL' is permitted"));
3190 return FALSE;
3191
3192 case SHIFTED_REG_OFFSET:
3193 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3194 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3195 {
3196 set_fatal_syntax_error
3197 (_("invalid shift for the register offset addressing mode"));
3198 return FALSE;
3199 }
3200 break;
3201
3202 case SHIFTED_LSL_MSL:
3203 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3204 {
3205 set_syntax_error (_("invalid shift operator"));
3206 return FALSE;
3207 }
3208 break;
3209
3210 default:
3211 abort ();
3212 }
3213
3214 /* Whitespace can appear here if the next thing is a bare digit. */
3215 skip_whitespace (p);
3216
3217 /* Parse shift amount. */
3218 exp_has_prefix = 0;
3219 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3220 exp.X_op = O_absent;
3221 else
3222 {
3223 if (is_immediate_prefix (*p))
3224 {
3225 p++;
3226 exp_has_prefix = 1;
3227 }
3228 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3229 }
3230 if (kind == AARCH64_MOD_MUL_VL)
3231 /* For consistency, give MUL VL the same shift amount as an implicit
3232 MUL #1. */
3233 operand->shifter.amount = 1;
3234 else if (exp.X_op == O_absent)
3235 {
3236 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3237 {
3238 set_syntax_error (_("missing shift amount"));
3239 return FALSE;
3240 }
3241 operand->shifter.amount = 0;
3242 }
3243 else if (exp.X_op != O_constant)
3244 {
3245 set_syntax_error (_("constant shift amount required"));
3246 return FALSE;
3247 }
3248 /* For parsing purposes, MUL #n has no inherent range. The range
3249 depends on the operand and will be checked by operand-specific
3250 routines. */
3251 else if (kind != AARCH64_MOD_MUL
3252 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3253 {
3254 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3255 return FALSE;
3256 }
3257 else
3258 {
3259 operand->shifter.amount = exp.X_add_number;
3260 operand->shifter.amount_present = 1;
3261 }
3262
3263 operand->shifter.operator_present = 1;
3264 operand->shifter.kind = kind;
3265
3266 *str = p;
3267 return TRUE;
3268 }
3269
3270 /* Parse a <shifter_operand> for a data processing instruction:
3271
3272 #<immediate>
3273 #<immediate>, LSL #imm
3274
3275 Validation of immediate operands is deferred to md_apply_fix.
3276
3277 Return TRUE on success; otherwise return FALSE. */
3278
3279 static bfd_boolean
3280 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3281 enum parse_shift_mode mode)
3282 {
3283 char *p;
3284
3285 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3286 return FALSE;
3287
3288 p = *str;
3289
3290 /* Accept an immediate expression. */
3291 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3292 return FALSE;
3293
3294 /* Accept optional LSL for arithmetic immediate values. */
3295 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3296 if (! parse_shift (&p, operand, SHIFTED_LSL))
3297 return FALSE;
3298
3299 /* Not accept any shifter for logical immediate values. */
3300 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3301 && parse_shift (&p, operand, mode))
3302 {
3303 set_syntax_error (_("unexpected shift operator"));
3304 return FALSE;
3305 }
3306
3307 *str = p;
3308 return TRUE;
3309 }
3310
3311 /* Parse a <shifter_operand> for a data processing instruction:
3312
3313 <Rm>
3314 <Rm>, <shift>
3315 #<immediate>
3316 #<immediate>, LSL #imm
3317
3318 where <shift> is handled by parse_shift above, and the last two
3319 cases are handled by the function above.
3320
3321 Validation of immediate operands is deferred to md_apply_fix.
3322
3323 Return TRUE on success; otherwise return FALSE. */
3324
3325 static bfd_boolean
3326 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3327 enum parse_shift_mode mode)
3328 {
3329 const reg_entry *reg;
3330 aarch64_opnd_qualifier_t qualifier;
3331 enum aarch64_operand_class opd_class
3332 = aarch64_get_operand_class (operand->type);
3333
3334 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3335 if (reg)
3336 {
3337 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3338 {
3339 set_syntax_error (_("unexpected register in the immediate operand"));
3340 return FALSE;
3341 }
3342
3343 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3344 {
3345 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3346 return FALSE;
3347 }
3348
3349 operand->reg.regno = reg->number;
3350 operand->qualifier = qualifier;
3351
3352 /* Accept optional shift operation on register. */
3353 if (! skip_past_comma (str))
3354 return TRUE;
3355
3356 if (! parse_shift (str, operand, mode))
3357 return FALSE;
3358
3359 return TRUE;
3360 }
3361 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3362 {
3363 set_syntax_error
3364 (_("integer register expected in the extended/shifted operand "
3365 "register"));
3366 return FALSE;
3367 }
3368
3369 /* We have a shifted immediate variable. */
3370 return parse_shifter_operand_imm (str, operand, mode);
3371 }
3372
3373 /* Return TRUE on success; return FALSE otherwise. */
3374
3375 static bfd_boolean
3376 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3377 enum parse_shift_mode mode)
3378 {
3379 char *p = *str;
3380
3381 /* Determine if we have the sequence of characters #: or just :
3382 coming next. If we do, then we check for a :rello: relocation
3383 modifier. If we don't, punt the whole lot to
3384 parse_shifter_operand. */
3385
3386 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3387 {
3388 struct reloc_table_entry *entry;
3389
3390 if (p[0] == '#')
3391 p += 2;
3392 else
3393 p++;
3394 *str = p;
3395
3396 /* Try to parse a relocation. Anything else is an error. */
3397 if (!(entry = find_reloc_table_entry (str)))
3398 {
3399 set_syntax_error (_("unknown relocation modifier"));
3400 return FALSE;
3401 }
3402
3403 if (entry->add_type == 0)
3404 {
3405 set_syntax_error
3406 (_("this relocation modifier is not allowed on this instruction"));
3407 return FALSE;
3408 }
3409
3410 /* Save str before we decompose it. */
3411 p = *str;
3412
3413 /* Next, we parse the expression. */
3414 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3415 return FALSE;
3416
3417 /* Record the relocation type (use the ADD variant here). */
3418 inst.reloc.type = entry->add_type;
3419 inst.reloc.pc_rel = entry->pc_rel;
3420
3421 /* If str is empty, we've reached the end, stop here. */
3422 if (**str == '\0')
3423 return TRUE;
3424
3425 /* Otherwise, we have a shifted reloc modifier, so rewind to
3426 recover the variable name and continue parsing for the shifter. */
3427 *str = p;
3428 return parse_shifter_operand_imm (str, operand, mode);
3429 }
3430
3431 return parse_shifter_operand (str, operand, mode);
3432 }
3433
3434 /* Parse all forms of an address expression. Information is written
3435 to *OPERAND and/or inst.reloc.
3436
3437 The A64 instruction set has the following addressing modes:
3438
3439 Offset
3440 [base] // in SIMD ld/st structure
3441 [base{,#0}] // in ld/st exclusive
3442 [base{,#imm}]
3443 [base,Xm{,LSL #imm}]
3444 [base,Xm,SXTX {#imm}]
3445 [base,Wm,(S|U)XTW {#imm}]
3446 Pre-indexed
3447 [base]! // in ldraa/ldrab exclusive
3448 [base,#imm]!
3449 Post-indexed
3450 [base],#imm
3451 [base],Xm // in SIMD ld/st structure
3452 PC-relative (literal)
3453 label
3454 SVE:
3455 [base,#imm,MUL VL]
3456 [base,Zm.D{,LSL #imm}]
3457 [base,Zm.S,(S|U)XTW {#imm}]
3458 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3459 [Zn.S,#imm]
3460 [Zn.D,#imm]
3461 [Zn.S{, Xm}]
3462 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3463 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3464 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3465
3466 (As a convenience, the notation "=immediate" is permitted in conjunction
3467 with the pc-relative literal load instructions to automatically place an
3468 immediate value or symbolic address in a nearby literal pool and generate
3469 a hidden label which references it.)
3470
3471 Upon a successful parsing, the address structure in *OPERAND will be
3472 filled in the following way:
3473
3474 .base_regno = <base>
3475 .offset.is_reg // 1 if the offset is a register
3476 .offset.imm = <imm>
3477 .offset.regno = <Rm>
3478
3479 For different addressing modes defined in the A64 ISA:
3480
3481 Offset
3482 .pcrel=0; .preind=1; .postind=0; .writeback=0
3483 Pre-indexed
3484 .pcrel=0; .preind=1; .postind=0; .writeback=1
3485 Post-indexed
3486 .pcrel=0; .preind=0; .postind=1; .writeback=1
3487 PC-relative (literal)
3488 .pcrel=1; .preind=1; .postind=0; .writeback=0
3489
3490 The shift/extension information, if any, will be stored in .shifter.
3491 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3492 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3493 corresponding register.
3494
3495 BASE_TYPE says which types of base register should be accepted and
3496 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3497 is the type of shifter that is allowed for immediate offsets,
3498 or SHIFTED_NONE if none.
3499
3500 In all other respects, it is the caller's responsibility to check
3501 for addressing modes not supported by the instruction, and to set
3502 inst.reloc.type. */
3503
3504 static bfd_boolean
3505 parse_address_main (char **str, aarch64_opnd_info *operand,
3506 aarch64_opnd_qualifier_t *base_qualifier,
3507 aarch64_opnd_qualifier_t *offset_qualifier,
3508 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3509 enum parse_shift_mode imm_shift_mode)
3510 {
3511 char *p = *str;
3512 const reg_entry *reg;
3513 expressionS *exp = &inst.reloc.exp;
3514
3515 *base_qualifier = AARCH64_OPND_QLF_NIL;
3516 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3517 if (! skip_past_char (&p, '['))
3518 {
3519 /* =immediate or label. */
3520 operand->addr.pcrel = 1;
3521 operand->addr.preind = 1;
3522
3523 /* #:<reloc_op>:<symbol> */
3524 skip_past_char (&p, '#');
3525 if (skip_past_char (&p, ':'))
3526 {
3527 bfd_reloc_code_real_type ty;
3528 struct reloc_table_entry *entry;
3529
3530 /* Try to parse a relocation modifier. Anything else is
3531 an error. */
3532 entry = find_reloc_table_entry (&p);
3533 if (! entry)
3534 {
3535 set_syntax_error (_("unknown relocation modifier"));
3536 return FALSE;
3537 }
3538
3539 switch (operand->type)
3540 {
3541 case AARCH64_OPND_ADDR_PCREL21:
3542 /* adr */
3543 ty = entry->adr_type;
3544 break;
3545
3546 default:
3547 ty = entry->ld_literal_type;
3548 break;
3549 }
3550
3551 if (ty == 0)
3552 {
3553 set_syntax_error
3554 (_("this relocation modifier is not allowed on this "
3555 "instruction"));
3556 return FALSE;
3557 }
3558
3559 /* #:<reloc_op>: */
3560 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3561 {
3562 set_syntax_error (_("invalid relocation expression"));
3563 return FALSE;
3564 }
3565
3566 /* #:<reloc_op>:<expr> */
3567 /* Record the relocation type. */
3568 inst.reloc.type = ty;
3569 inst.reloc.pc_rel = entry->pc_rel;
3570 }
3571 else
3572 {
3573
3574 if (skip_past_char (&p, '='))
3575 /* =immediate; need to generate the literal in the literal pool. */
3576 inst.gen_lit_pool = 1;
3577
3578 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3579 {
3580 set_syntax_error (_("invalid address"));
3581 return FALSE;
3582 }
3583 }
3584
3585 *str = p;
3586 return TRUE;
3587 }
3588
3589 /* [ */
3590
3591 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3592 if (!reg || !aarch64_check_reg_type (reg, base_type))
3593 {
3594 set_syntax_error (_(get_reg_expected_msg (base_type)));
3595 return FALSE;
3596 }
3597 operand->addr.base_regno = reg->number;
3598
3599 /* [Xn */
3600 if (skip_past_comma (&p))
3601 {
3602 /* [Xn, */
3603 operand->addr.preind = 1;
3604
3605 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3606 if (reg)
3607 {
3608 if (!aarch64_check_reg_type (reg, offset_type))
3609 {
3610 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3611 return FALSE;
3612 }
3613
3614 /* [Xn,Rm */
3615 operand->addr.offset.regno = reg->number;
3616 operand->addr.offset.is_reg = 1;
3617 /* Shifted index. */
3618 if (skip_past_comma (&p))
3619 {
3620 /* [Xn,Rm, */
3621 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3622 /* Use the diagnostics set in parse_shift, so not set new
3623 error message here. */
3624 return FALSE;
3625 }
3626 /* We only accept:
3627 [base,Xm] # For vector plus scalar SVE2 indexing.
3628 [base,Xm{,LSL #imm}]
3629 [base,Xm,SXTX {#imm}]
3630 [base,Wm,(S|U)XTW {#imm}] */
3631 if (operand->shifter.kind == AARCH64_MOD_NONE
3632 || operand->shifter.kind == AARCH64_MOD_LSL
3633 || operand->shifter.kind == AARCH64_MOD_SXTX)
3634 {
3635 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3636 {
3637 set_syntax_error (_("invalid use of 32-bit register offset"));
3638 return FALSE;
3639 }
3640 if (aarch64_get_qualifier_esize (*base_qualifier)
3641 != aarch64_get_qualifier_esize (*offset_qualifier)
3642 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3643 || *base_qualifier != AARCH64_OPND_QLF_S_S
3644 || *offset_qualifier != AARCH64_OPND_QLF_X))
3645 {
3646 set_syntax_error (_("offset has different size from base"));
3647 return FALSE;
3648 }
3649 }
3650 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3651 {
3652 set_syntax_error (_("invalid use of 64-bit register offset"));
3653 return FALSE;
3654 }
3655 }
3656 else
3657 {
3658 /* [Xn,#:<reloc_op>:<symbol> */
3659 skip_past_char (&p, '#');
3660 if (skip_past_char (&p, ':'))
3661 {
3662 struct reloc_table_entry *entry;
3663
3664 /* Try to parse a relocation modifier. Anything else is
3665 an error. */
3666 if (!(entry = find_reloc_table_entry (&p)))
3667 {
3668 set_syntax_error (_("unknown relocation modifier"));
3669 return FALSE;
3670 }
3671
3672 if (entry->ldst_type == 0)
3673 {
3674 set_syntax_error
3675 (_("this relocation modifier is not allowed on this "
3676 "instruction"));
3677 return FALSE;
3678 }
3679
3680 /* [Xn,#:<reloc_op>: */
3681 /* We now have the group relocation table entry corresponding to
3682 the name in the assembler source. Next, we parse the
3683 expression. */
3684 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3685 {
3686 set_syntax_error (_("invalid relocation expression"));
3687 return FALSE;
3688 }
3689
3690 /* [Xn,#:<reloc_op>:<expr> */
3691 /* Record the load/store relocation type. */
3692 inst.reloc.type = entry->ldst_type;
3693 inst.reloc.pc_rel = entry->pc_rel;
3694 }
3695 else
3696 {
3697 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3698 {
3699 set_syntax_error (_("invalid expression in the address"));
3700 return FALSE;
3701 }
3702 /* [Xn,<expr> */
3703 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3704 /* [Xn,<expr>,<shifter> */
3705 if (! parse_shift (&p, operand, imm_shift_mode))
3706 return FALSE;
3707 }
3708 }
3709 }
3710
3711 if (! skip_past_char (&p, ']'))
3712 {
3713 set_syntax_error (_("']' expected"));
3714 return FALSE;
3715 }
3716
3717 if (skip_past_char (&p, '!'))
3718 {
3719 if (operand->addr.preind && operand->addr.offset.is_reg)
3720 {
3721 set_syntax_error (_("register offset not allowed in pre-indexed "
3722 "addressing mode"));
3723 return FALSE;
3724 }
3725 /* [Xn]! */
3726 operand->addr.writeback = 1;
3727 }
3728 else if (skip_past_comma (&p))
3729 {
3730 /* [Xn], */
3731 operand->addr.postind = 1;
3732 operand->addr.writeback = 1;
3733
3734 if (operand->addr.preind)
3735 {
3736 set_syntax_error (_("cannot combine pre- and post-indexing"));
3737 return FALSE;
3738 }
3739
3740 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3741 if (reg)
3742 {
3743 /* [Xn],Xm */
3744 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3745 {
3746 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3747 return FALSE;
3748 }
3749
3750 operand->addr.offset.regno = reg->number;
3751 operand->addr.offset.is_reg = 1;
3752 }
3753 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3754 {
3755 /* [Xn],#expr */
3756 set_syntax_error (_("invalid expression in the address"));
3757 return FALSE;
3758 }
3759 }
3760
3761 /* If at this point neither .preind nor .postind is set, we have a
3762 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3763 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3764 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3765 [Zn.<T>, xzr]. */
3766 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3767 {
3768 if (operand->addr.writeback)
3769 {
3770 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3771 {
3772 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3773 operand->addr.offset.is_reg = 0;
3774 operand->addr.offset.imm = 0;
3775 operand->addr.preind = 1;
3776 }
3777 else
3778 {
3779 /* Reject [Rn]! */
3780 set_syntax_error (_("missing offset in the pre-indexed address"));
3781 return FALSE;
3782 }
3783 }
3784 else
3785 {
3786 operand->addr.preind = 1;
3787 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3788 {
3789 operand->addr.offset.is_reg = 1;
3790 operand->addr.offset.regno = REG_ZR;
3791 *offset_qualifier = AARCH64_OPND_QLF_X;
3792 }
3793 else
3794 {
3795 inst.reloc.exp.X_op = O_constant;
3796 inst.reloc.exp.X_add_number = 0;
3797 }
3798 }
3799 }
3800
3801 *str = p;
3802 return TRUE;
3803 }
3804
3805 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3806 on success. */
3807 static bfd_boolean
3808 parse_address (char **str, aarch64_opnd_info *operand)
3809 {
3810 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3811 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3812 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3813 }
3814
3815 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3816 The arguments have the same meaning as for parse_address_main.
3817 Return TRUE on success. */
3818 static bfd_boolean
3819 parse_sve_address (char **str, aarch64_opnd_info *operand,
3820 aarch64_opnd_qualifier_t *base_qualifier,
3821 aarch64_opnd_qualifier_t *offset_qualifier)
3822 {
3823 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3824 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3825 SHIFTED_MUL_VL);
3826 }
3827
3828 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3829 Return TRUE on success; otherwise return FALSE. */
3830 static bfd_boolean
3831 parse_half (char **str, int *internal_fixup_p)
3832 {
3833 char *p = *str;
3834
3835 skip_past_char (&p, '#');
3836
3837 gas_assert (internal_fixup_p);
3838 *internal_fixup_p = 0;
3839
3840 if (*p == ':')
3841 {
3842 struct reloc_table_entry *entry;
3843
3844 /* Try to parse a relocation. Anything else is an error. */
3845 ++p;
3846 if (!(entry = find_reloc_table_entry (&p)))
3847 {
3848 set_syntax_error (_("unknown relocation modifier"));
3849 return FALSE;
3850 }
3851
3852 if (entry->movw_type == 0)
3853 {
3854 set_syntax_error
3855 (_("this relocation modifier is not allowed on this instruction"));
3856 return FALSE;
3857 }
3858
3859 inst.reloc.type = entry->movw_type;
3860 }
3861 else
3862 *internal_fixup_p = 1;
3863
3864 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3865 return FALSE;
3866
3867 *str = p;
3868 return TRUE;
3869 }
3870
3871 /* Parse an operand for an ADRP instruction:
3872 ADRP <Xd>, <label>
3873 Return TRUE on success; otherwise return FALSE. */
3874
3875 static bfd_boolean
3876 parse_adrp (char **str)
3877 {
3878 char *p;
3879
3880 p = *str;
3881 if (*p == ':')
3882 {
3883 struct reloc_table_entry *entry;
3884
3885 /* Try to parse a relocation. Anything else is an error. */
3886 ++p;
3887 if (!(entry = find_reloc_table_entry (&p)))
3888 {
3889 set_syntax_error (_("unknown relocation modifier"));
3890 return FALSE;
3891 }
3892
3893 if (entry->adrp_type == 0)
3894 {
3895 set_syntax_error
3896 (_("this relocation modifier is not allowed on this instruction"));
3897 return FALSE;
3898 }
3899
3900 inst.reloc.type = entry->adrp_type;
3901 }
3902 else
3903 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3904
3905 inst.reloc.pc_rel = 1;
3906
3907 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3908 return FALSE;
3909
3910 *str = p;
3911 return TRUE;
3912 }
3913
3914 /* Miscellaneous. */
3915
3916 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3917 of SIZE tokens in which index I gives the token for field value I,
3918 or is null if field value I is invalid. REG_TYPE says which register
3919 names should be treated as registers rather than as symbolic immediates.
3920
3921 Return true on success, moving *STR past the operand and storing the
3922 field value in *VAL. */
3923
3924 static int
3925 parse_enum_string (char **str, int64_t *val, const char *const *array,
3926 size_t size, aarch64_reg_type reg_type)
3927 {
3928 expressionS exp;
3929 char *p, *q;
3930 size_t i;
3931
3932 /* Match C-like tokens. */
3933 p = q = *str;
3934 while (ISALNUM (*q))
3935 q++;
3936
3937 for (i = 0; i < size; ++i)
3938 if (array[i]
3939 && strncasecmp (array[i], p, q - p) == 0
3940 && array[i][q - p] == 0)
3941 {
3942 *val = i;
3943 *str = q;
3944 return TRUE;
3945 }
3946
3947 if (!parse_immediate_expression (&p, &exp, reg_type))
3948 return FALSE;
3949
3950 if (exp.X_op == O_constant
3951 && (uint64_t) exp.X_add_number < size)
3952 {
3953 *val = exp.X_add_number;
3954 *str = p;
3955 return TRUE;
3956 }
3957
3958 /* Use the default error for this operand. */
3959 return FALSE;
3960 }
3961
3962 /* Parse an option for a preload instruction. Returns the encoding for the
3963 option, or PARSE_FAIL. */
3964
3965 static int
3966 parse_pldop (char **str)
3967 {
3968 char *p, *q;
3969 const struct aarch64_name_value_pair *o;
3970
3971 p = q = *str;
3972 while (ISALNUM (*q))
3973 q++;
3974
3975 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
3976 if (!o)
3977 return PARSE_FAIL;
3978
3979 *str = q;
3980 return o->value;
3981 }
3982
3983 /* Parse an option for a barrier instruction. Returns the encoding for the
3984 option, or PARSE_FAIL. */
3985
3986 static int
3987 parse_barrier (char **str)
3988 {
3989 char *p, *q;
3990 const struct aarch64_name_value_pair *o;
3991
3992 p = q = *str;
3993 while (ISALPHA (*q))
3994 q++;
3995
3996 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3997 if (!o)
3998 return PARSE_FAIL;
3999
4000 *str = q;
4001 return o->value;
4002 }
4003
4004 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4005 return 0 if successful. Otherwise return PARSE_FAIL. */
4006
4007 static int
4008 parse_barrier_psb (char **str,
4009 const struct aarch64_name_value_pair ** hint_opt)
4010 {
4011 char *p, *q;
4012 const struct aarch64_name_value_pair *o;
4013
4014 p = q = *str;
4015 while (ISALPHA (*q))
4016 q++;
4017
4018 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4019 if (!o)
4020 {
4021 set_fatal_syntax_error
4022 ( _("unknown or missing option to PSB/TSB"));
4023 return PARSE_FAIL;
4024 }
4025
4026 if (o->value != 0x11)
4027 {
4028 /* PSB only accepts option name 'CSYNC'. */
4029 set_syntax_error
4030 (_("the specified option is not accepted for PSB/TSB"));
4031 return PARSE_FAIL;
4032 }
4033
4034 *str = q;
4035 *hint_opt = o;
4036 return 0;
4037 }
4038
4039 /* Parse an operand for CSR (CSRE instruction). */
4040
4041 static int
4042 parse_csr_operand (char **str)
4043 {
4044 char *p, *q;
4045
4046 p = q = *str;
4047 while (ISALPHA (*q))
4048 q++;
4049
4050 /* Instruction has only one operand PDEC which encodes Rt field of the
4051 operation to 0b11111. */
4052 if (strcasecmp(p, "pdec"))
4053 {
4054 set_syntax_error (_("CSR instruction accepts only PDEC"));
4055 return PARSE_FAIL;
4056 }
4057
4058 *str = q;
4059 return 0;
4060 }
4061
4062 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4063 return 0 if successful. Otherwise return PARSE_FAIL. */
4064
4065 static int
4066 parse_bti_operand (char **str,
4067 const struct aarch64_name_value_pair ** hint_opt)
4068 {
4069 char *p, *q;
4070 const struct aarch64_name_value_pair *o;
4071
4072 p = q = *str;
4073 while (ISALPHA (*q))
4074 q++;
4075
4076 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4077 if (!o)
4078 {
4079 set_fatal_syntax_error
4080 ( _("unknown option to BTI"));
4081 return PARSE_FAIL;
4082 }
4083
4084 switch (o->value)
4085 {
4086 /* Valid BTI operands. */
4087 case HINT_OPD_C:
4088 case HINT_OPD_J:
4089 case HINT_OPD_JC:
4090 break;
4091
4092 default:
4093 set_syntax_error
4094 (_("unknown option to BTI"));
4095 return PARSE_FAIL;
4096 }
4097
4098 *str = q;
4099 *hint_opt = o;
4100 return 0;
4101 }
4102
4103 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4104 Returns the encoding for the option, or PARSE_FAIL.
4105
4106 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4107 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4108
4109 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4110 field, otherwise as a system register.
4111 */
4112
4113 static int
4114 parse_sys_reg (char **str, htab_t sys_regs,
4115 int imple_defined_p, int pstatefield_p,
4116 uint32_t* flags)
4117 {
4118 char *p, *q;
4119 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4120 const aarch64_sys_reg *o;
4121 int value;
4122
4123 p = buf;
4124 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4125 if (p < buf + (sizeof (buf) - 1))
4126 *p++ = TOLOWER (*q);
4127 *p = '\0';
4128
4129 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4130 valid system register. This is enforced by construction of the hash
4131 table. */
4132 if (p - buf != q - *str)
4133 return PARSE_FAIL;
4134
4135 o = str_hash_find (sys_regs, buf);
4136 if (!o)
4137 {
4138 if (!imple_defined_p)
4139 return PARSE_FAIL;
4140 else
4141 {
4142 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4143 unsigned int op0, op1, cn, cm, op2;
4144
4145 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4146 != 5)
4147 return PARSE_FAIL;
4148 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4149 return PARSE_FAIL;
4150 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4151 if (flags)
4152 *flags = 0;
4153 }
4154 }
4155 else
4156 {
4157 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4158 as_bad (_("selected processor does not support PSTATE field "
4159 "name '%s'"), buf);
4160 if (!pstatefield_p
4161 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4162 o->value, o->flags, o->features))
4163 as_bad (_("selected processor does not support system register "
4164 "name '%s'"), buf);
4165 if (aarch64_sys_reg_deprecated_p (o->flags))
4166 as_warn (_("system register name '%s' is deprecated and may be "
4167 "removed in a future release"), buf);
4168 value = o->value;
4169 if (flags)
4170 *flags = o->flags;
4171 }
4172
4173 *str = q;
4174 return value;
4175 }
4176
4177 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4178 for the option, or NULL. */
4179
4180 static const aarch64_sys_ins_reg *
4181 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4182 {
4183 char *p, *q;
4184 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4185 const aarch64_sys_ins_reg *o;
4186
4187 p = buf;
4188 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4189 if (p < buf + (sizeof (buf) - 1))
4190 *p++ = TOLOWER (*q);
4191 *p = '\0';
4192
4193 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4194 valid system register. This is enforced by construction of the hash
4195 table. */
4196 if (p - buf != q - *str)
4197 return NULL;
4198
4199 o = str_hash_find (sys_ins_regs, buf);
4200 if (!o)
4201 return NULL;
4202
4203 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4204 o->name, o->value, o->flags, 0))
4205 as_bad (_("selected processor does not support system register "
4206 "name '%s'"), buf);
4207 if (aarch64_sys_reg_deprecated_p (o->flags))
4208 as_warn (_("system register name '%s' is deprecated and may be "
4209 "removed in a future release"), buf);
4210
4211 *str = q;
4212 return o;
4213 }
4214 \f
4215 #define po_char_or_fail(chr) do { \
4216 if (! skip_past_char (&str, chr)) \
4217 goto failure; \
4218 } while (0)
4219
4220 #define po_reg_or_fail(regtype) do { \
4221 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4222 if (val == PARSE_FAIL) \
4223 { \
4224 set_default_error (); \
4225 goto failure; \
4226 } \
4227 } while (0)
4228
4229 #define po_int_reg_or_fail(reg_type) do { \
4230 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4231 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4232 { \
4233 set_default_error (); \
4234 goto failure; \
4235 } \
4236 info->reg.regno = reg->number; \
4237 info->qualifier = qualifier; \
4238 } while (0)
4239
4240 #define po_imm_nc_or_fail() do { \
4241 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4242 goto failure; \
4243 } while (0)
4244
4245 #define po_imm_or_fail(min, max) do { \
4246 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4247 goto failure; \
4248 if (val < min || val > max) \
4249 { \
4250 set_fatal_syntax_error (_("immediate value out of range "\
4251 #min " to "#max)); \
4252 goto failure; \
4253 } \
4254 } while (0)
4255
4256 #define po_enum_or_fail(array) do { \
4257 if (!parse_enum_string (&str, &val, array, \
4258 ARRAY_SIZE (array), imm_reg_type)) \
4259 goto failure; \
4260 } while (0)
4261
4262 #define po_misc_or_fail(expr) do { \
4263 if (!expr) \
4264 goto failure; \
4265 } while (0)
4266 \f
4267 /* encode the 12-bit imm field of Add/sub immediate */
4268 static inline uint32_t
4269 encode_addsub_imm (uint32_t imm)
4270 {
4271 return imm << 10;
4272 }
4273
4274 /* encode the shift amount field of Add/sub immediate */
4275 static inline uint32_t
4276 encode_addsub_imm_shift_amount (uint32_t cnt)
4277 {
4278 return cnt << 22;
4279 }
4280
4281
4282 /* encode the imm field of Adr instruction */
4283 static inline uint32_t
4284 encode_adr_imm (uint32_t imm)
4285 {
4286 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4287 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4288 }
4289
4290 /* encode the immediate field of Move wide immediate */
4291 static inline uint32_t
4292 encode_movw_imm (uint32_t imm)
4293 {
4294 return imm << 5;
4295 }
4296
4297 /* encode the 26-bit offset of unconditional branch */
4298 static inline uint32_t
4299 encode_branch_ofs_26 (uint32_t ofs)
4300 {
4301 return ofs & ((1 << 26) - 1);
4302 }
4303
4304 /* encode the 19-bit offset of conditional branch and compare & branch */
4305 static inline uint32_t
4306 encode_cond_branch_ofs_19 (uint32_t ofs)
4307 {
4308 return (ofs & ((1 << 19) - 1)) << 5;
4309 }
4310
4311 /* encode the 19-bit offset of ld literal */
4312 static inline uint32_t
4313 encode_ld_lit_ofs_19 (uint32_t ofs)
4314 {
4315 return (ofs & ((1 << 19) - 1)) << 5;
4316 }
4317
4318 /* Encode the 14-bit offset of test & branch. */
4319 static inline uint32_t
4320 encode_tst_branch_ofs_14 (uint32_t ofs)
4321 {
4322 return (ofs & ((1 << 14) - 1)) << 5;
4323 }
4324
4325 /* Encode the 16-bit imm field of svc/hvc/smc. */
4326 static inline uint32_t
4327 encode_svc_imm (uint32_t imm)
4328 {
4329 return imm << 5;
4330 }
4331
4332 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4333 static inline uint32_t
4334 reencode_addsub_switch_add_sub (uint32_t opcode)
4335 {
4336 return opcode ^ (1 << 30);
4337 }
4338
4339 static inline uint32_t
4340 reencode_movzn_to_movz (uint32_t opcode)
4341 {
4342 return opcode | (1 << 30);
4343 }
4344
4345 static inline uint32_t
4346 reencode_movzn_to_movn (uint32_t opcode)
4347 {
4348 return opcode & ~(1 << 30);
4349 }
4350
4351 /* Overall per-instruction processing. */
4352
4353 /* We need to be able to fix up arbitrary expressions in some statements.
4354 This is so that we can handle symbols that are an arbitrary distance from
4355 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4356 which returns part of an address in a form which will be valid for
4357 a data instruction. We do this by pushing the expression into a symbol
4358 in the expr_section, and creating a fix for that. */
4359
4360 static fixS *
4361 fix_new_aarch64 (fragS * frag,
4362 int where,
4363 short int size,
4364 expressionS * exp,
4365 int pc_rel,
4366 int reloc)
4367 {
4368 fixS *new_fix;
4369
4370 switch (exp->X_op)
4371 {
4372 case O_constant:
4373 case O_symbol:
4374 case O_add:
4375 case O_subtract:
4376 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4377 break;
4378
4379 default:
4380 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4381 pc_rel, reloc);
4382 break;
4383 }
4384 return new_fix;
4385 }
4386 \f
4387 /* Diagnostics on operands errors. */
4388
4389 /* By default, output verbose error message.
4390 Disable the verbose error message by -mno-verbose-error. */
4391 static int verbose_error_p = 1;
4392
4393 #ifdef DEBUG_AARCH64
4394 /* N.B. this is only for the purpose of debugging. */
4395 const char* operand_mismatch_kind_names[] =
4396 {
4397 "AARCH64_OPDE_NIL",
4398 "AARCH64_OPDE_RECOVERABLE",
4399 "AARCH64_OPDE_SYNTAX_ERROR",
4400 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4401 "AARCH64_OPDE_INVALID_VARIANT",
4402 "AARCH64_OPDE_OUT_OF_RANGE",
4403 "AARCH64_OPDE_UNALIGNED",
4404 "AARCH64_OPDE_REG_LIST",
4405 "AARCH64_OPDE_OTHER_ERROR",
4406 };
4407 #endif /* DEBUG_AARCH64 */
4408
4409 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4410
4411 When multiple errors of different kinds are found in the same assembly
4412 line, only the error of the highest severity will be picked up for
4413 issuing the diagnostics. */
4414
4415 static inline bfd_boolean
4416 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4417 enum aarch64_operand_error_kind rhs)
4418 {
4419 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4420 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4421 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4422 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4423 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4424 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4425 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4426 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4427 return lhs > rhs;
4428 }
4429
4430 /* Helper routine to get the mnemonic name from the assembly instruction
4431 line; should only be called for the diagnosis purpose, as there is
4432 string copy operation involved, which may affect the runtime
4433 performance if used in elsewhere. */
4434
4435 static const char*
4436 get_mnemonic_name (const char *str)
4437 {
4438 static char mnemonic[32];
4439 char *ptr;
4440
4441 /* Get the first 15 bytes and assume that the full name is included. */
4442 strncpy (mnemonic, str, 31);
4443 mnemonic[31] = '\0';
4444
4445 /* Scan up to the end of the mnemonic, which must end in white space,
4446 '.', or end of string. */
4447 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4448 ;
4449
4450 *ptr = '\0';
4451
4452 /* Append '...' to the truncated long name. */
4453 if (ptr - mnemonic == 31)
4454 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4455
4456 return mnemonic;
4457 }
4458
4459 static void
4460 reset_aarch64_instruction (aarch64_instruction *instruction)
4461 {
4462 memset (instruction, '\0', sizeof (aarch64_instruction));
4463 instruction->reloc.type = BFD_RELOC_UNUSED;
4464 }
4465
4466 /* Data structures storing one user error in the assembly code related to
4467 operands. */
4468
4469 struct operand_error_record
4470 {
4471 const aarch64_opcode *opcode;
4472 aarch64_operand_error detail;
4473 struct operand_error_record *next;
4474 };
4475
4476 typedef struct operand_error_record operand_error_record;
4477
4478 struct operand_errors
4479 {
4480 operand_error_record *head;
4481 operand_error_record *tail;
4482 };
4483
4484 typedef struct operand_errors operand_errors;
4485
4486 /* Top-level data structure reporting user errors for the current line of
4487 the assembly code.
4488 The way md_assemble works is that all opcodes sharing the same mnemonic
4489 name are iterated to find a match to the assembly line. In this data
4490 structure, each of the such opcodes will have one operand_error_record
4491 allocated and inserted. In other words, excessive errors related with
4492 a single opcode are disregarded. */
4493 operand_errors operand_error_report;
4494
4495 /* Free record nodes. */
4496 static operand_error_record *free_opnd_error_record_nodes = NULL;
4497
4498 /* Initialize the data structure that stores the operand mismatch
4499 information on assembling one line of the assembly code. */
4500 static void
4501 init_operand_error_report (void)
4502 {
4503 if (operand_error_report.head != NULL)
4504 {
4505 gas_assert (operand_error_report.tail != NULL);
4506 operand_error_report.tail->next = free_opnd_error_record_nodes;
4507 free_opnd_error_record_nodes = operand_error_report.head;
4508 operand_error_report.head = NULL;
4509 operand_error_report.tail = NULL;
4510 return;
4511 }
4512 gas_assert (operand_error_report.tail == NULL);
4513 }
4514
4515 /* Return TRUE if some operand error has been recorded during the
4516 parsing of the current assembly line using the opcode *OPCODE;
4517 otherwise return FALSE. */
4518 static inline bfd_boolean
4519 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4520 {
4521 operand_error_record *record = operand_error_report.head;
4522 return record && record->opcode == opcode;
4523 }
4524
4525 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4526 OPCODE field is initialized with OPCODE.
4527 N.B. only one record for each opcode, i.e. the maximum of one error is
4528 recorded for each instruction template. */
4529
4530 static void
4531 add_operand_error_record (const operand_error_record* new_record)
4532 {
4533 const aarch64_opcode *opcode = new_record->opcode;
4534 operand_error_record* record = operand_error_report.head;
4535
4536 /* The record may have been created for this opcode. If not, we need
4537 to prepare one. */
4538 if (! opcode_has_operand_error_p (opcode))
4539 {
4540 /* Get one empty record. */
4541 if (free_opnd_error_record_nodes == NULL)
4542 {
4543 record = XNEW (operand_error_record);
4544 }
4545 else
4546 {
4547 record = free_opnd_error_record_nodes;
4548 free_opnd_error_record_nodes = record->next;
4549 }
4550 record->opcode = opcode;
4551 /* Insert at the head. */
4552 record->next = operand_error_report.head;
4553 operand_error_report.head = record;
4554 if (operand_error_report.tail == NULL)
4555 operand_error_report.tail = record;
4556 }
4557 else if (record->detail.kind != AARCH64_OPDE_NIL
4558 && record->detail.index <= new_record->detail.index
4559 && operand_error_higher_severity_p (record->detail.kind,
4560 new_record->detail.kind))
4561 {
4562 /* In the case of multiple errors found on operands related with a
4563 single opcode, only record the error of the leftmost operand and
4564 only if the error is of higher severity. */
4565 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4566 " the existing error %s on operand %d",
4567 operand_mismatch_kind_names[new_record->detail.kind],
4568 new_record->detail.index,
4569 operand_mismatch_kind_names[record->detail.kind],
4570 record->detail.index);
4571 return;
4572 }
4573
4574 record->detail = new_record->detail;
4575 }
4576
4577 static inline void
4578 record_operand_error_info (const aarch64_opcode *opcode,
4579 aarch64_operand_error *error_info)
4580 {
4581 operand_error_record record;
4582 record.opcode = opcode;
4583 record.detail = *error_info;
4584 add_operand_error_record (&record);
4585 }
4586
4587 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4588 error message *ERROR, for operand IDX (count from 0). */
4589
4590 static void
4591 record_operand_error (const aarch64_opcode *opcode, int idx,
4592 enum aarch64_operand_error_kind kind,
4593 const char* error)
4594 {
4595 aarch64_operand_error info;
4596 memset(&info, 0, sizeof (info));
4597 info.index = idx;
4598 info.kind = kind;
4599 info.error = error;
4600 info.non_fatal = FALSE;
4601 record_operand_error_info (opcode, &info);
4602 }
4603
4604 static void
4605 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4606 enum aarch64_operand_error_kind kind,
4607 const char* error, const int *extra_data)
4608 {
4609 aarch64_operand_error info;
4610 info.index = idx;
4611 info.kind = kind;
4612 info.error = error;
4613 info.data[0] = extra_data[0];
4614 info.data[1] = extra_data[1];
4615 info.data[2] = extra_data[2];
4616 info.non_fatal = FALSE;
4617 record_operand_error_info (opcode, &info);
4618 }
4619
4620 static void
4621 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4622 const char* error, int lower_bound,
4623 int upper_bound)
4624 {
4625 int data[3] = {lower_bound, upper_bound, 0};
4626 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4627 error, data);
4628 }
4629
4630 /* Remove the operand error record for *OPCODE. */
4631 static void ATTRIBUTE_UNUSED
4632 remove_operand_error_record (const aarch64_opcode *opcode)
4633 {
4634 if (opcode_has_operand_error_p (opcode))
4635 {
4636 operand_error_record* record = operand_error_report.head;
4637 gas_assert (record != NULL && operand_error_report.tail != NULL);
4638 operand_error_report.head = record->next;
4639 record->next = free_opnd_error_record_nodes;
4640 free_opnd_error_record_nodes = record;
4641 if (operand_error_report.head == NULL)
4642 {
4643 gas_assert (operand_error_report.tail == record);
4644 operand_error_report.tail = NULL;
4645 }
4646 }
4647 }
4648
4649 /* Given the instruction in *INSTR, return the index of the best matched
4650 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4651
4652 Return -1 if there is no qualifier sequence; return the first match
4653 if there is multiple matches found. */
4654
4655 static int
4656 find_best_match (const aarch64_inst *instr,
4657 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4658 {
4659 int i, num_opnds, max_num_matched, idx;
4660
4661 num_opnds = aarch64_num_of_operands (instr->opcode);
4662 if (num_opnds == 0)
4663 {
4664 DEBUG_TRACE ("no operand");
4665 return -1;
4666 }
4667
4668 max_num_matched = 0;
4669 idx = 0;
4670
4671 /* For each pattern. */
4672 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4673 {
4674 int j, num_matched;
4675 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4676
4677 /* Most opcodes has much fewer patterns in the list. */
4678 if (empty_qualifier_sequence_p (qualifiers))
4679 {
4680 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4681 break;
4682 }
4683
4684 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4685 if (*qualifiers == instr->operands[j].qualifier)
4686 ++num_matched;
4687
4688 if (num_matched > max_num_matched)
4689 {
4690 max_num_matched = num_matched;
4691 idx = i;
4692 }
4693 }
4694
4695 DEBUG_TRACE ("return with %d", idx);
4696 return idx;
4697 }
4698
4699 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4700 corresponding operands in *INSTR. */
4701
4702 static inline void
4703 assign_qualifier_sequence (aarch64_inst *instr,
4704 const aarch64_opnd_qualifier_t *qualifiers)
4705 {
4706 int i = 0;
4707 int num_opnds = aarch64_num_of_operands (instr->opcode);
4708 gas_assert (num_opnds);
4709 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4710 instr->operands[i].qualifier = *qualifiers;
4711 }
4712
4713 /* Print operands for the diagnosis purpose. */
4714
4715 static void
4716 print_operands (char *buf, const aarch64_opcode *opcode,
4717 const aarch64_opnd_info *opnds)
4718 {
4719 int i;
4720
4721 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4722 {
4723 char str[128];
4724
4725 /* We regard the opcode operand info more, however we also look into
4726 the inst->operands to support the disassembling of the optional
4727 operand.
4728 The two operand code should be the same in all cases, apart from
4729 when the operand can be optional. */
4730 if (opcode->operands[i] == AARCH64_OPND_NIL
4731 || opnds[i].type == AARCH64_OPND_NIL)
4732 break;
4733
4734 /* Generate the operand string in STR. */
4735 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4736 NULL, cpu_variant);
4737
4738 /* Delimiter. */
4739 if (str[0] != '\0')
4740 strcat (buf, i == 0 ? " " : ", ");
4741
4742 /* Append the operand string. */
4743 strcat (buf, str);
4744 }
4745 }
4746
4747 /* Send to stderr a string as information. */
4748
4749 static void
4750 output_info (const char *format, ...)
4751 {
4752 const char *file;
4753 unsigned int line;
4754 va_list args;
4755
4756 file = as_where (&line);
4757 if (file)
4758 {
4759 if (line != 0)
4760 fprintf (stderr, "%s:%u: ", file, line);
4761 else
4762 fprintf (stderr, "%s: ", file);
4763 }
4764 fprintf (stderr, _("Info: "));
4765 va_start (args, format);
4766 vfprintf (stderr, format, args);
4767 va_end (args);
4768 (void) putc ('\n', stderr);
4769 }
4770
4771 /* Output one operand error record. */
4772
4773 static void
4774 output_operand_error_record (const operand_error_record *record, char *str)
4775 {
4776 const aarch64_operand_error *detail = &record->detail;
4777 int idx = detail->index;
4778 const aarch64_opcode *opcode = record->opcode;
4779 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4780 : AARCH64_OPND_NIL);
4781
4782 typedef void (*handler_t)(const char *format, ...);
4783 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4784
4785 switch (detail->kind)
4786 {
4787 case AARCH64_OPDE_NIL:
4788 gas_assert (0);
4789 break;
4790 case AARCH64_OPDE_SYNTAX_ERROR:
4791 case AARCH64_OPDE_RECOVERABLE:
4792 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4793 case AARCH64_OPDE_OTHER_ERROR:
4794 /* Use the prepared error message if there is, otherwise use the
4795 operand description string to describe the error. */
4796 if (detail->error != NULL)
4797 {
4798 if (idx < 0)
4799 handler (_("%s -- `%s'"), detail->error, str);
4800 else
4801 handler (_("%s at operand %d -- `%s'"),
4802 detail->error, idx + 1, str);
4803 }
4804 else
4805 {
4806 gas_assert (idx >= 0);
4807 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4808 aarch64_get_operand_desc (opd_code), str);
4809 }
4810 break;
4811
4812 case AARCH64_OPDE_INVALID_VARIANT:
4813 handler (_("operand mismatch -- `%s'"), str);
4814 if (verbose_error_p)
4815 {
4816 /* We will try to correct the erroneous instruction and also provide
4817 more information e.g. all other valid variants.
4818
4819 The string representation of the corrected instruction and other
4820 valid variants are generated by
4821
4822 1) obtaining the intermediate representation of the erroneous
4823 instruction;
4824 2) manipulating the IR, e.g. replacing the operand qualifier;
4825 3) printing out the instruction by calling the printer functions
4826 shared with the disassembler.
4827
4828 The limitation of this method is that the exact input assembly
4829 line cannot be accurately reproduced in some cases, for example an
4830 optional operand present in the actual assembly line will be
4831 omitted in the output; likewise for the optional syntax rules,
4832 e.g. the # before the immediate. Another limitation is that the
4833 assembly symbols and relocation operations in the assembly line
4834 currently cannot be printed out in the error report. Last but not
4835 least, when there is other error(s) co-exist with this error, the
4836 'corrected' instruction may be still incorrect, e.g. given
4837 'ldnp h0,h1,[x0,#6]!'
4838 this diagnosis will provide the version:
4839 'ldnp s0,s1,[x0,#6]!'
4840 which is still not right. */
4841 size_t len = strlen (get_mnemonic_name (str));
4842 int i, qlf_idx;
4843 bfd_boolean result;
4844 char buf[2048];
4845 aarch64_inst *inst_base = &inst.base;
4846 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4847
4848 /* Init inst. */
4849 reset_aarch64_instruction (&inst);
4850 inst_base->opcode = opcode;
4851
4852 /* Reset the error report so that there is no side effect on the
4853 following operand parsing. */
4854 init_operand_error_report ();
4855
4856 /* Fill inst. */
4857 result = parse_operands (str + len, opcode)
4858 && programmer_friendly_fixup (&inst);
4859 gas_assert (result);
4860 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4861 NULL, NULL, insn_sequence);
4862 gas_assert (!result);
4863
4864 /* Find the most matched qualifier sequence. */
4865 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4866 gas_assert (qlf_idx > -1);
4867
4868 /* Assign the qualifiers. */
4869 assign_qualifier_sequence (inst_base,
4870 opcode->qualifiers_list[qlf_idx]);
4871
4872 /* Print the hint. */
4873 output_info (_(" did you mean this?"));
4874 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4875 print_operands (buf, opcode, inst_base->operands);
4876 output_info (_(" %s"), buf);
4877
4878 /* Print out other variant(s) if there is any. */
4879 if (qlf_idx != 0 ||
4880 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4881 output_info (_(" other valid variant(s):"));
4882
4883 /* For each pattern. */
4884 qualifiers_list = opcode->qualifiers_list;
4885 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4886 {
4887 /* Most opcodes has much fewer patterns in the list.
4888 First NIL qualifier indicates the end in the list. */
4889 if (empty_qualifier_sequence_p (*qualifiers_list))
4890 break;
4891
4892 if (i != qlf_idx)
4893 {
4894 /* Mnemonics name. */
4895 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4896
4897 /* Assign the qualifiers. */
4898 assign_qualifier_sequence (inst_base, *qualifiers_list);
4899
4900 /* Print instruction. */
4901 print_operands (buf, opcode, inst_base->operands);
4902
4903 output_info (_(" %s"), buf);
4904 }
4905 }
4906 }
4907 break;
4908
4909 case AARCH64_OPDE_UNTIED_OPERAND:
4910 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4911 detail->index + 1, str);
4912 break;
4913
4914 case AARCH64_OPDE_OUT_OF_RANGE:
4915 if (detail->data[0] != detail->data[1])
4916 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4917 detail->error ? detail->error : _("immediate value"),
4918 detail->data[0], detail->data[1], idx + 1, str);
4919 else
4920 handler (_("%s must be %d at operand %d -- `%s'"),
4921 detail->error ? detail->error : _("immediate value"),
4922 detail->data[0], idx + 1, str);
4923 break;
4924
4925 case AARCH64_OPDE_REG_LIST:
4926 if (detail->data[0] == 1)
4927 handler (_("invalid number of registers in the list; "
4928 "only 1 register is expected at operand %d -- `%s'"),
4929 idx + 1, str);
4930 else
4931 handler (_("invalid number of registers in the list; "
4932 "%d registers are expected at operand %d -- `%s'"),
4933 detail->data[0], idx + 1, str);
4934 break;
4935
4936 case AARCH64_OPDE_UNALIGNED:
4937 handler (_("immediate value must be a multiple of "
4938 "%d at operand %d -- `%s'"),
4939 detail->data[0], idx + 1, str);
4940 break;
4941
4942 default:
4943 gas_assert (0);
4944 break;
4945 }
4946 }
4947
4948 /* Process and output the error message about the operand mismatching.
4949
4950 When this function is called, the operand error information had
4951 been collected for an assembly line and there will be multiple
4952 errors in the case of multiple instruction templates; output the
4953 error message that most closely describes the problem.
4954
4955 The errors to be printed can be filtered on printing all errors
4956 or only non-fatal errors. This distinction has to be made because
4957 the error buffer may already be filled with fatal errors we don't want to
4958 print due to the different instruction templates. */
4959
4960 static void
4961 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4962 {
4963 int largest_error_pos;
4964 const char *msg = NULL;
4965 enum aarch64_operand_error_kind kind;
4966 operand_error_record *curr;
4967 operand_error_record *head = operand_error_report.head;
4968 operand_error_record *record = NULL;
4969
4970 /* No error to report. */
4971 if (head == NULL)
4972 return;
4973
4974 gas_assert (head != NULL && operand_error_report.tail != NULL);
4975
4976 /* Only one error. */
4977 if (head == operand_error_report.tail)
4978 {
4979 /* If the only error is a non-fatal one and we don't want to print it,
4980 just exit. */
4981 if (!non_fatal_only || head->detail.non_fatal)
4982 {
4983 DEBUG_TRACE ("single opcode entry with error kind: %s",
4984 operand_mismatch_kind_names[head->detail.kind]);
4985 output_operand_error_record (head, str);
4986 }
4987 return;
4988 }
4989
4990 /* Find the error kind of the highest severity. */
4991 DEBUG_TRACE ("multiple opcode entries with error kind");
4992 kind = AARCH64_OPDE_NIL;
4993 for (curr = head; curr != NULL; curr = curr->next)
4994 {
4995 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4996 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4997 if (operand_error_higher_severity_p (curr->detail.kind, kind)
4998 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4999 kind = curr->detail.kind;
5000 }
5001
5002 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5003
5004 /* Pick up one of errors of KIND to report. */
5005 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5006 for (curr = head; curr != NULL; curr = curr->next)
5007 {
5008 /* If we don't want to print non-fatal errors then don't consider them
5009 at all. */
5010 if (curr->detail.kind != kind
5011 || (non_fatal_only && !curr->detail.non_fatal))
5012 continue;
5013 /* If there are multiple errors, pick up the one with the highest
5014 mismatching operand index. In the case of multiple errors with
5015 the equally highest operand index, pick up the first one or the
5016 first one with non-NULL error message. */
5017 if (curr->detail.index > largest_error_pos
5018 || (curr->detail.index == largest_error_pos && msg == NULL
5019 && curr->detail.error != NULL))
5020 {
5021 largest_error_pos = curr->detail.index;
5022 record = curr;
5023 msg = record->detail.error;
5024 }
5025 }
5026
5027 /* The way errors are collected in the back-end is a bit non-intuitive. But
5028 essentially, because each operand template is tried recursively you may
5029 always have errors collected from the previous tried OPND. These are
5030 usually skipped if there is one successful match. However now with the
5031 non-fatal errors we have to ignore those previously collected hard errors
5032 when we're only interested in printing the non-fatal ones. This condition
5033 prevents us from printing errors that are not appropriate, since we did
5034 match a condition, but it also has warnings that it wants to print. */
5035 if (non_fatal_only && !record)
5036 return;
5037
5038 gas_assert (largest_error_pos != -2 && record != NULL);
5039 DEBUG_TRACE ("Pick up error kind %s to report",
5040 operand_mismatch_kind_names[record->detail.kind]);
5041
5042 /* Output. */
5043 output_operand_error_record (record, str);
5044 }
5045 \f
5046 /* Write an AARCH64 instruction to buf - always little-endian. */
5047 static void
5048 put_aarch64_insn (char *buf, uint32_t insn)
5049 {
5050 unsigned char *where = (unsigned char *) buf;
5051 where[0] = insn;
5052 where[1] = insn >> 8;
5053 where[2] = insn >> 16;
5054 where[3] = insn >> 24;
5055 }
5056
5057 static uint32_t
5058 get_aarch64_insn (char *buf)
5059 {
5060 unsigned char *where = (unsigned char *) buf;
5061 uint32_t result;
5062 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5063 | ((uint32_t) where[3] << 24)));
5064 return result;
5065 }
5066
5067 static void
5068 output_inst (struct aarch64_inst *new_inst)
5069 {
5070 char *to = NULL;
5071
5072 to = frag_more (INSN_SIZE);
5073
5074 frag_now->tc_frag_data.recorded = 1;
5075
5076 put_aarch64_insn (to, inst.base.value);
5077
5078 if (inst.reloc.type != BFD_RELOC_UNUSED)
5079 {
5080 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5081 INSN_SIZE, &inst.reloc.exp,
5082 inst.reloc.pc_rel,
5083 inst.reloc.type);
5084 DEBUG_TRACE ("Prepared relocation fix up");
5085 /* Don't check the addend value against the instruction size,
5086 that's the job of our code in md_apply_fix(). */
5087 fixp->fx_no_overflow = 1;
5088 if (new_inst != NULL)
5089 fixp->tc_fix_data.inst = new_inst;
5090 if (aarch64_gas_internal_fixup_p ())
5091 {
5092 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5093 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5094 fixp->fx_addnumber = inst.reloc.flags;
5095 }
5096 }
5097
5098 dwarf2_emit_insn (INSN_SIZE);
5099 }
5100
5101 /* Link together opcodes of the same name. */
5102
5103 struct templates
5104 {
5105 aarch64_opcode *opcode;
5106 struct templates *next;
5107 };
5108
5109 typedef struct templates templates;
5110
5111 static templates *
5112 lookup_mnemonic (const char *start, int len)
5113 {
5114 templates *templ = NULL;
5115
5116 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5117 return templ;
5118 }
5119
5120 /* Subroutine of md_assemble, responsible for looking up the primary
5121 opcode from the mnemonic the user wrote. STR points to the
5122 beginning of the mnemonic. */
5123
5124 static templates *
5125 opcode_lookup (char **str)
5126 {
5127 char *end, *base, *dot;
5128 const aarch64_cond *cond;
5129 char condname[16];
5130 int len;
5131
5132 /* Scan up to the end of the mnemonic, which must end in white space,
5133 '.', or end of string. */
5134 dot = 0;
5135 for (base = end = *str; is_part_of_name(*end); end++)
5136 if (*end == '.' && !dot)
5137 dot = end;
5138
5139 if (end == base || dot == base)
5140 return 0;
5141
5142 inst.cond = COND_ALWAYS;
5143
5144 /* Handle a possible condition. */
5145 if (dot)
5146 {
5147 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5148 if (cond)
5149 {
5150 inst.cond = cond->value;
5151 *str = end;
5152 }
5153 else
5154 {
5155 *str = dot;
5156 return 0;
5157 }
5158 len = dot - base;
5159 }
5160 else
5161 {
5162 *str = end;
5163 len = end - base;
5164 }
5165
5166 if (inst.cond == COND_ALWAYS)
5167 {
5168 /* Look for unaffixed mnemonic. */
5169 return lookup_mnemonic (base, len);
5170 }
5171 else if (len <= 13)
5172 {
5173 /* append ".c" to mnemonic if conditional */
5174 memcpy (condname, base, len);
5175 memcpy (condname + len, ".c", 2);
5176 base = condname;
5177 len += 2;
5178 return lookup_mnemonic (base, len);
5179 }
5180
5181 return NULL;
5182 }
5183
5184 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5185 to a corresponding operand qualifier. */
5186
5187 static inline aarch64_opnd_qualifier_t
5188 vectype_to_qualifier (const struct vector_type_el *vectype)
5189 {
5190 /* Element size in bytes indexed by vector_el_type. */
5191 const unsigned char ele_size[5]
5192 = {1, 2, 4, 8, 16};
5193 const unsigned int ele_base [5] =
5194 {
5195 AARCH64_OPND_QLF_V_4B,
5196 AARCH64_OPND_QLF_V_2H,
5197 AARCH64_OPND_QLF_V_2S,
5198 AARCH64_OPND_QLF_V_1D,
5199 AARCH64_OPND_QLF_V_1Q
5200 };
5201
5202 if (!vectype->defined || vectype->type == NT_invtype)
5203 goto vectype_conversion_fail;
5204
5205 if (vectype->type == NT_zero)
5206 return AARCH64_OPND_QLF_P_Z;
5207 if (vectype->type == NT_merge)
5208 return AARCH64_OPND_QLF_P_M;
5209
5210 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5211
5212 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5213 {
5214 /* Special case S_4B. */
5215 if (vectype->type == NT_b && vectype->width == 4)
5216 return AARCH64_OPND_QLF_S_4B;
5217
5218 /* Special case S_2H. */
5219 if (vectype->type == NT_h && vectype->width == 2)
5220 return AARCH64_OPND_QLF_S_2H;
5221
5222 /* Vector element register. */
5223 return AARCH64_OPND_QLF_S_B + vectype->type;
5224 }
5225 else
5226 {
5227 /* Vector register. */
5228 int reg_size = ele_size[vectype->type] * vectype->width;
5229 unsigned offset;
5230 unsigned shift;
5231 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5232 goto vectype_conversion_fail;
5233
5234 /* The conversion is by calculating the offset from the base operand
5235 qualifier for the vector type. The operand qualifiers are regular
5236 enough that the offset can established by shifting the vector width by
5237 a vector-type dependent amount. */
5238 shift = 0;
5239 if (vectype->type == NT_b)
5240 shift = 3;
5241 else if (vectype->type == NT_h || vectype->type == NT_s)
5242 shift = 2;
5243 else if (vectype->type >= NT_d)
5244 shift = 1;
5245 else
5246 gas_assert (0);
5247
5248 offset = ele_base [vectype->type] + (vectype->width >> shift);
5249 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5250 && offset <= AARCH64_OPND_QLF_V_1Q);
5251 return offset;
5252 }
5253
5254 vectype_conversion_fail:
5255 first_error (_("bad vector arrangement type"));
5256 return AARCH64_OPND_QLF_NIL;
5257 }
5258
5259 /* Process an optional operand that is found omitted from the assembly line.
5260 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5261 instruction's opcode entry while IDX is the index of this omitted operand.
5262 */
5263
5264 static void
5265 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5266 int idx, aarch64_opnd_info *operand)
5267 {
5268 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5269 gas_assert (optional_operand_p (opcode, idx));
5270 gas_assert (!operand->present);
5271
5272 switch (type)
5273 {
5274 case AARCH64_OPND_Rd:
5275 case AARCH64_OPND_Rn:
5276 case AARCH64_OPND_Rm:
5277 case AARCH64_OPND_Rt:
5278 case AARCH64_OPND_Rt2:
5279 case AARCH64_OPND_Rt_LS64:
5280 case AARCH64_OPND_Rt_SP:
5281 case AARCH64_OPND_Rs:
5282 case AARCH64_OPND_Ra:
5283 case AARCH64_OPND_Rt_SYS:
5284 case AARCH64_OPND_Rd_SP:
5285 case AARCH64_OPND_Rn_SP:
5286 case AARCH64_OPND_Rm_SP:
5287 case AARCH64_OPND_Fd:
5288 case AARCH64_OPND_Fn:
5289 case AARCH64_OPND_Fm:
5290 case AARCH64_OPND_Fa:
5291 case AARCH64_OPND_Ft:
5292 case AARCH64_OPND_Ft2:
5293 case AARCH64_OPND_Sd:
5294 case AARCH64_OPND_Sn:
5295 case AARCH64_OPND_Sm:
5296 case AARCH64_OPND_Va:
5297 case AARCH64_OPND_Vd:
5298 case AARCH64_OPND_Vn:
5299 case AARCH64_OPND_Vm:
5300 case AARCH64_OPND_VdD1:
5301 case AARCH64_OPND_VnD1:
5302 operand->reg.regno = default_value;
5303 break;
5304
5305 case AARCH64_OPND_Ed:
5306 case AARCH64_OPND_En:
5307 case AARCH64_OPND_Em:
5308 case AARCH64_OPND_Em16:
5309 case AARCH64_OPND_SM3_IMM2:
5310 operand->reglane.regno = default_value;
5311 break;
5312
5313 case AARCH64_OPND_IDX:
5314 case AARCH64_OPND_BIT_NUM:
5315 case AARCH64_OPND_IMMR:
5316 case AARCH64_OPND_IMMS:
5317 case AARCH64_OPND_SHLL_IMM:
5318 case AARCH64_OPND_IMM_VLSL:
5319 case AARCH64_OPND_IMM_VLSR:
5320 case AARCH64_OPND_CCMP_IMM:
5321 case AARCH64_OPND_FBITS:
5322 case AARCH64_OPND_UIMM4:
5323 case AARCH64_OPND_UIMM3_OP1:
5324 case AARCH64_OPND_UIMM3_OP2:
5325 case AARCH64_OPND_IMM:
5326 case AARCH64_OPND_IMM_2:
5327 case AARCH64_OPND_WIDTH:
5328 case AARCH64_OPND_UIMM7:
5329 case AARCH64_OPND_NZCV:
5330 case AARCH64_OPND_SVE_PATTERN:
5331 case AARCH64_OPND_SVE_PRFOP:
5332 operand->imm.value = default_value;
5333 break;
5334
5335 case AARCH64_OPND_SVE_PATTERN_SCALED:
5336 operand->imm.value = default_value;
5337 operand->shifter.kind = AARCH64_MOD_MUL;
5338 operand->shifter.amount = 1;
5339 break;
5340
5341 case AARCH64_OPND_EXCEPTION:
5342 inst.reloc.type = BFD_RELOC_UNUSED;
5343 break;
5344
5345 case AARCH64_OPND_BARRIER_ISB:
5346 operand->barrier = aarch64_barrier_options + default_value;
5347 break;
5348
5349 case AARCH64_OPND_BTI_TARGET:
5350 operand->hint_option = aarch64_hint_options + default_value;
5351 break;
5352
5353 default:
5354 break;
5355 }
5356 }
5357
5358 /* Process the relocation type for move wide instructions.
5359 Return TRUE on success; otherwise return FALSE. */
5360
5361 static bfd_boolean
5362 process_movw_reloc_info (void)
5363 {
5364 int is32;
5365 unsigned shift;
5366
5367 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5368
5369 if (inst.base.opcode->op == OP_MOVK)
5370 switch (inst.reloc.type)
5371 {
5372 case BFD_RELOC_AARCH64_MOVW_G0_S:
5373 case BFD_RELOC_AARCH64_MOVW_G1_S:
5374 case BFD_RELOC_AARCH64_MOVW_G2_S:
5375 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5376 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5377 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5378 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5379 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5380 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5381 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5382 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5383 set_syntax_error
5384 (_("the specified relocation type is not allowed for MOVK"));
5385 return FALSE;
5386 default:
5387 break;
5388 }
5389
5390 switch (inst.reloc.type)
5391 {
5392 case BFD_RELOC_AARCH64_MOVW_G0:
5393 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5394 case BFD_RELOC_AARCH64_MOVW_G0_S:
5395 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5396 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5397 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5398 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5399 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5400 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5401 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5402 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5403 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5404 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5405 shift = 0;
5406 break;
5407 case BFD_RELOC_AARCH64_MOVW_G1:
5408 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5409 case BFD_RELOC_AARCH64_MOVW_G1_S:
5410 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5411 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5412 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5413 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5414 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5415 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5416 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5417 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5418 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5419 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5420 shift = 16;
5421 break;
5422 case BFD_RELOC_AARCH64_MOVW_G2:
5423 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5424 case BFD_RELOC_AARCH64_MOVW_G2_S:
5425 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5426 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5427 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5428 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5429 if (is32)
5430 {
5431 set_fatal_syntax_error
5432 (_("the specified relocation type is not allowed for 32-bit "
5433 "register"));
5434 return FALSE;
5435 }
5436 shift = 32;
5437 break;
5438 case BFD_RELOC_AARCH64_MOVW_G3:
5439 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5440 if (is32)
5441 {
5442 set_fatal_syntax_error
5443 (_("the specified relocation type is not allowed for 32-bit "
5444 "register"));
5445 return FALSE;
5446 }
5447 shift = 48;
5448 break;
5449 default:
5450 /* More cases should be added when more MOVW-related relocation types
5451 are supported in GAS. */
5452 gas_assert (aarch64_gas_internal_fixup_p ());
5453 /* The shift amount should have already been set by the parser. */
5454 return TRUE;
5455 }
5456 inst.base.operands[1].shifter.amount = shift;
5457 return TRUE;
5458 }
5459
5460 /* A primitive log calculator. */
5461
5462 static inline unsigned int
5463 get_logsz (unsigned int size)
5464 {
5465 const unsigned char ls[16] =
5466 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5467 if (size > 16)
5468 {
5469 gas_assert (0);
5470 return -1;
5471 }
5472 gas_assert (ls[size - 1] != (unsigned char)-1);
5473 return ls[size - 1];
5474 }
5475
5476 /* Determine and return the real reloc type code for an instruction
5477 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5478
5479 static inline bfd_reloc_code_real_type
5480 ldst_lo12_determine_real_reloc_type (void)
5481 {
5482 unsigned logsz;
5483 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5484 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5485
5486 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5487 {
5488 BFD_RELOC_AARCH64_LDST8_LO12,
5489 BFD_RELOC_AARCH64_LDST16_LO12,
5490 BFD_RELOC_AARCH64_LDST32_LO12,
5491 BFD_RELOC_AARCH64_LDST64_LO12,
5492 BFD_RELOC_AARCH64_LDST128_LO12
5493 },
5494 {
5495 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5496 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5497 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5498 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5499 BFD_RELOC_AARCH64_NONE
5500 },
5501 {
5502 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5503 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5504 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5505 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5506 BFD_RELOC_AARCH64_NONE
5507 },
5508 {
5509 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5510 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5511 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5512 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5513 BFD_RELOC_AARCH64_NONE
5514 },
5515 {
5516 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5517 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5518 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5519 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5520 BFD_RELOC_AARCH64_NONE
5521 }
5522 };
5523
5524 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5525 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5526 || (inst.reloc.type
5527 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5528 || (inst.reloc.type
5529 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5530 || (inst.reloc.type
5531 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5532 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5533
5534 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5535 opd1_qlf =
5536 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5537 1, opd0_qlf, 0);
5538 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5539
5540 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5541 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5542 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5543 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5544 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5545 gas_assert (logsz <= 3);
5546 else
5547 gas_assert (logsz <= 4);
5548
5549 /* In reloc.c, these pseudo relocation types should be defined in similar
5550 order as above reloc_ldst_lo12 array. Because the array index calculation
5551 below relies on this. */
5552 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5553 }
5554
5555 /* Check whether a register list REGINFO is valid. The registers must be
5556 numbered in increasing order (modulo 32), in increments of one or two.
5557
5558 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5559 increments of two.
5560
5561 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5562
5563 static bfd_boolean
5564 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5565 {
5566 uint32_t i, nb_regs, prev_regno, incr;
5567
5568 nb_regs = 1 + (reginfo & 0x3);
5569 reginfo >>= 2;
5570 prev_regno = reginfo & 0x1f;
5571 incr = accept_alternate ? 2 : 1;
5572
5573 for (i = 1; i < nb_regs; ++i)
5574 {
5575 uint32_t curr_regno;
5576 reginfo >>= 5;
5577 curr_regno = reginfo & 0x1f;
5578 if (curr_regno != ((prev_regno + incr) & 0x1f))
5579 return FALSE;
5580 prev_regno = curr_regno;
5581 }
5582
5583 return TRUE;
5584 }
5585
5586 /* Generic instruction operand parser. This does no encoding and no
5587 semantic validation; it merely squirrels values away in the inst
5588 structure. Returns TRUE or FALSE depending on whether the
5589 specified grammar matched. */
5590
5591 static bfd_boolean
5592 parse_operands (char *str, const aarch64_opcode *opcode)
5593 {
5594 int i;
5595 char *backtrack_pos = 0;
5596 const enum aarch64_opnd *operands = opcode->operands;
5597 aarch64_reg_type imm_reg_type;
5598
5599 clear_error ();
5600 skip_whitespace (str);
5601
5602 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5603 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5604 else
5605 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5606
5607 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5608 {
5609 int64_t val;
5610 const reg_entry *reg;
5611 int comma_skipped_p = 0;
5612 aarch64_reg_type rtype;
5613 struct vector_type_el vectype;
5614 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5615 aarch64_opnd_info *info = &inst.base.operands[i];
5616 aarch64_reg_type reg_type;
5617
5618 DEBUG_TRACE ("parse operand %d", i);
5619
5620 /* Assign the operand code. */
5621 info->type = operands[i];
5622
5623 if (optional_operand_p (opcode, i))
5624 {
5625 /* Remember where we are in case we need to backtrack. */
5626 gas_assert (!backtrack_pos);
5627 backtrack_pos = str;
5628 }
5629
5630 /* Expect comma between operands; the backtrack mechanism will take
5631 care of cases of omitted optional operand. */
5632 if (i > 0 && ! skip_past_char (&str, ','))
5633 {
5634 set_syntax_error (_("comma expected between operands"));
5635 goto failure;
5636 }
5637 else
5638 comma_skipped_p = 1;
5639
5640 switch (operands[i])
5641 {
5642 case AARCH64_OPND_Rd:
5643 case AARCH64_OPND_Rn:
5644 case AARCH64_OPND_Rm:
5645 case AARCH64_OPND_Rt:
5646 case AARCH64_OPND_Rt2:
5647 case AARCH64_OPND_Rs:
5648 case AARCH64_OPND_Ra:
5649 case AARCH64_OPND_Rt_LS64:
5650 case AARCH64_OPND_Rt_SYS:
5651 case AARCH64_OPND_PAIRREG:
5652 case AARCH64_OPND_SVE_Rm:
5653 po_int_reg_or_fail (REG_TYPE_R_Z);
5654
5655 /* In LS64 load/store instructions Rt register number must be even
5656 and <=22. */
5657 if (operands[i] == AARCH64_OPND_Rt_LS64)
5658 {
5659 /* We've already checked if this is valid register.
5660 This will check if register number (Rt) is not undefined for LS64
5661 instructions:
5662 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
5663 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
5664 {
5665 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
5666 goto failure;
5667 }
5668 }
5669 break;
5670
5671 case AARCH64_OPND_Rd_SP:
5672 case AARCH64_OPND_Rn_SP:
5673 case AARCH64_OPND_Rt_SP:
5674 case AARCH64_OPND_SVE_Rn_SP:
5675 case AARCH64_OPND_Rm_SP:
5676 po_int_reg_or_fail (REG_TYPE_R_SP);
5677 break;
5678
5679 case AARCH64_OPND_Rm_EXT:
5680 case AARCH64_OPND_Rm_SFT:
5681 po_misc_or_fail (parse_shifter_operand
5682 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5683 ? SHIFTED_ARITH_IMM
5684 : SHIFTED_LOGIC_IMM)));
5685 if (!info->shifter.operator_present)
5686 {
5687 /* Default to LSL if not present. Libopcodes prefers shifter
5688 kind to be explicit. */
5689 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5690 info->shifter.kind = AARCH64_MOD_LSL;
5691 /* For Rm_EXT, libopcodes will carry out further check on whether
5692 or not stack pointer is used in the instruction (Recall that
5693 "the extend operator is not optional unless at least one of
5694 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5695 }
5696 break;
5697
5698 case AARCH64_OPND_Fd:
5699 case AARCH64_OPND_Fn:
5700 case AARCH64_OPND_Fm:
5701 case AARCH64_OPND_Fa:
5702 case AARCH64_OPND_Ft:
5703 case AARCH64_OPND_Ft2:
5704 case AARCH64_OPND_Sd:
5705 case AARCH64_OPND_Sn:
5706 case AARCH64_OPND_Sm:
5707 case AARCH64_OPND_SVE_VZn:
5708 case AARCH64_OPND_SVE_Vd:
5709 case AARCH64_OPND_SVE_Vm:
5710 case AARCH64_OPND_SVE_Vn:
5711 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5712 if (val == PARSE_FAIL)
5713 {
5714 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5715 goto failure;
5716 }
5717 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5718
5719 info->reg.regno = val;
5720 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5721 break;
5722
5723 case AARCH64_OPND_SVE_Pd:
5724 case AARCH64_OPND_SVE_Pg3:
5725 case AARCH64_OPND_SVE_Pg4_5:
5726 case AARCH64_OPND_SVE_Pg4_10:
5727 case AARCH64_OPND_SVE_Pg4_16:
5728 case AARCH64_OPND_SVE_Pm:
5729 case AARCH64_OPND_SVE_Pn:
5730 case AARCH64_OPND_SVE_Pt:
5731 reg_type = REG_TYPE_PN;
5732 goto vector_reg;
5733
5734 case AARCH64_OPND_SVE_Za_5:
5735 case AARCH64_OPND_SVE_Za_16:
5736 case AARCH64_OPND_SVE_Zd:
5737 case AARCH64_OPND_SVE_Zm_5:
5738 case AARCH64_OPND_SVE_Zm_16:
5739 case AARCH64_OPND_SVE_Zn:
5740 case AARCH64_OPND_SVE_Zt:
5741 reg_type = REG_TYPE_ZN;
5742 goto vector_reg;
5743
5744 case AARCH64_OPND_Va:
5745 case AARCH64_OPND_Vd:
5746 case AARCH64_OPND_Vn:
5747 case AARCH64_OPND_Vm:
5748 reg_type = REG_TYPE_VN;
5749 vector_reg:
5750 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5751 if (val == PARSE_FAIL)
5752 {
5753 first_error (_(get_reg_expected_msg (reg_type)));
5754 goto failure;
5755 }
5756 if (vectype.defined & NTA_HASINDEX)
5757 goto failure;
5758
5759 info->reg.regno = val;
5760 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5761 && vectype.type == NT_invtype)
5762 /* Unqualified Pn and Zn registers are allowed in certain
5763 contexts. Rely on F_STRICT qualifier checking to catch
5764 invalid uses. */
5765 info->qualifier = AARCH64_OPND_QLF_NIL;
5766 else
5767 {
5768 info->qualifier = vectype_to_qualifier (&vectype);
5769 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5770 goto failure;
5771 }
5772 break;
5773
5774 case AARCH64_OPND_VdD1:
5775 case AARCH64_OPND_VnD1:
5776 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5777 if (val == PARSE_FAIL)
5778 {
5779 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5780 goto failure;
5781 }
5782 if (vectype.type != NT_d || vectype.index != 1)
5783 {
5784 set_fatal_syntax_error
5785 (_("the top half of a 128-bit FP/SIMD register is expected"));
5786 goto failure;
5787 }
5788 info->reg.regno = val;
5789 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5790 here; it is correct for the purpose of encoding/decoding since
5791 only the register number is explicitly encoded in the related
5792 instructions, although this appears a bit hacky. */
5793 info->qualifier = AARCH64_OPND_QLF_S_D;
5794 break;
5795
5796 case AARCH64_OPND_SVE_Zm3_INDEX:
5797 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5798 case AARCH64_OPND_SVE_Zm3_11_INDEX:
5799 case AARCH64_OPND_SVE_Zm4_11_INDEX:
5800 case AARCH64_OPND_SVE_Zm4_INDEX:
5801 case AARCH64_OPND_SVE_Zn_INDEX:
5802 reg_type = REG_TYPE_ZN;
5803 goto vector_reg_index;
5804
5805 case AARCH64_OPND_Ed:
5806 case AARCH64_OPND_En:
5807 case AARCH64_OPND_Em:
5808 case AARCH64_OPND_Em16:
5809 case AARCH64_OPND_SM3_IMM2:
5810 reg_type = REG_TYPE_VN;
5811 vector_reg_index:
5812 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5813 if (val == PARSE_FAIL)
5814 {
5815 first_error (_(get_reg_expected_msg (reg_type)));
5816 goto failure;
5817 }
5818 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5819 goto failure;
5820
5821 info->reglane.regno = val;
5822 info->reglane.index = vectype.index;
5823 info->qualifier = vectype_to_qualifier (&vectype);
5824 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5825 goto failure;
5826 break;
5827
5828 case AARCH64_OPND_SVE_ZnxN:
5829 case AARCH64_OPND_SVE_ZtxN:
5830 reg_type = REG_TYPE_ZN;
5831 goto vector_reg_list;
5832
5833 case AARCH64_OPND_LVn:
5834 case AARCH64_OPND_LVt:
5835 case AARCH64_OPND_LVt_AL:
5836 case AARCH64_OPND_LEt:
5837 reg_type = REG_TYPE_VN;
5838 vector_reg_list:
5839 if (reg_type == REG_TYPE_ZN
5840 && get_opcode_dependent_value (opcode) == 1
5841 && *str != '{')
5842 {
5843 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5844 if (val == PARSE_FAIL)
5845 {
5846 first_error (_(get_reg_expected_msg (reg_type)));
5847 goto failure;
5848 }
5849 info->reglist.first_regno = val;
5850 info->reglist.num_regs = 1;
5851 }
5852 else
5853 {
5854 val = parse_vector_reg_list (&str, reg_type, &vectype);
5855 if (val == PARSE_FAIL)
5856 goto failure;
5857
5858 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5859 {
5860 set_fatal_syntax_error (_("invalid register list"));
5861 goto failure;
5862 }
5863
5864 if (vectype.width != 0 && *str != ',')
5865 {
5866 set_fatal_syntax_error
5867 (_("expected element type rather than vector type"));
5868 goto failure;
5869 }
5870
5871 info->reglist.first_regno = (val >> 2) & 0x1f;
5872 info->reglist.num_regs = (val & 0x3) + 1;
5873 }
5874 if (operands[i] == AARCH64_OPND_LEt)
5875 {
5876 if (!(vectype.defined & NTA_HASINDEX))
5877 goto failure;
5878 info->reglist.has_index = 1;
5879 info->reglist.index = vectype.index;
5880 }
5881 else
5882 {
5883 if (vectype.defined & NTA_HASINDEX)
5884 goto failure;
5885 if (!(vectype.defined & NTA_HASTYPE))
5886 {
5887 if (reg_type == REG_TYPE_ZN)
5888 set_fatal_syntax_error (_("missing type suffix"));
5889 goto failure;
5890 }
5891 }
5892 info->qualifier = vectype_to_qualifier (&vectype);
5893 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5894 goto failure;
5895 break;
5896
5897 case AARCH64_OPND_CRn:
5898 case AARCH64_OPND_CRm:
5899 {
5900 char prefix = *(str++);
5901 if (prefix != 'c' && prefix != 'C')
5902 goto failure;
5903
5904 po_imm_nc_or_fail ();
5905 if (val > 15)
5906 {
5907 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5908 goto failure;
5909 }
5910 info->qualifier = AARCH64_OPND_QLF_CR;
5911 info->imm.value = val;
5912 break;
5913 }
5914
5915 case AARCH64_OPND_SHLL_IMM:
5916 case AARCH64_OPND_IMM_VLSR:
5917 po_imm_or_fail (1, 64);
5918 info->imm.value = val;
5919 break;
5920
5921 case AARCH64_OPND_CCMP_IMM:
5922 case AARCH64_OPND_SIMM5:
5923 case AARCH64_OPND_FBITS:
5924 case AARCH64_OPND_TME_UIMM16:
5925 case AARCH64_OPND_UIMM4:
5926 case AARCH64_OPND_UIMM4_ADDG:
5927 case AARCH64_OPND_UIMM10:
5928 case AARCH64_OPND_UIMM3_OP1:
5929 case AARCH64_OPND_UIMM3_OP2:
5930 case AARCH64_OPND_IMM_VLSL:
5931 case AARCH64_OPND_IMM:
5932 case AARCH64_OPND_IMM_2:
5933 case AARCH64_OPND_WIDTH:
5934 case AARCH64_OPND_SVE_INV_LIMM:
5935 case AARCH64_OPND_SVE_LIMM:
5936 case AARCH64_OPND_SVE_LIMM_MOV:
5937 case AARCH64_OPND_SVE_SHLIMM_PRED:
5938 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5939 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
5940 case AARCH64_OPND_SVE_SHRIMM_PRED:
5941 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5942 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
5943 case AARCH64_OPND_SVE_SIMM5:
5944 case AARCH64_OPND_SVE_SIMM5B:
5945 case AARCH64_OPND_SVE_SIMM6:
5946 case AARCH64_OPND_SVE_SIMM8:
5947 case AARCH64_OPND_SVE_UIMM3:
5948 case AARCH64_OPND_SVE_UIMM7:
5949 case AARCH64_OPND_SVE_UIMM8:
5950 case AARCH64_OPND_SVE_UIMM8_53:
5951 case AARCH64_OPND_IMM_ROT1:
5952 case AARCH64_OPND_IMM_ROT2:
5953 case AARCH64_OPND_IMM_ROT3:
5954 case AARCH64_OPND_SVE_IMM_ROT1:
5955 case AARCH64_OPND_SVE_IMM_ROT2:
5956 case AARCH64_OPND_SVE_IMM_ROT3:
5957 po_imm_nc_or_fail ();
5958 info->imm.value = val;
5959 break;
5960
5961 case AARCH64_OPND_SVE_AIMM:
5962 case AARCH64_OPND_SVE_ASIMM:
5963 po_imm_nc_or_fail ();
5964 info->imm.value = val;
5965 skip_whitespace (str);
5966 if (skip_past_comma (&str))
5967 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5968 else
5969 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5970 break;
5971
5972 case AARCH64_OPND_SVE_PATTERN:
5973 po_enum_or_fail (aarch64_sve_pattern_array);
5974 info->imm.value = val;
5975 break;
5976
5977 case AARCH64_OPND_SVE_PATTERN_SCALED:
5978 po_enum_or_fail (aarch64_sve_pattern_array);
5979 info->imm.value = val;
5980 if (skip_past_comma (&str)
5981 && !parse_shift (&str, info, SHIFTED_MUL))
5982 goto failure;
5983 if (!info->shifter.operator_present)
5984 {
5985 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5986 info->shifter.kind = AARCH64_MOD_MUL;
5987 info->shifter.amount = 1;
5988 }
5989 break;
5990
5991 case AARCH64_OPND_SVE_PRFOP:
5992 po_enum_or_fail (aarch64_sve_prfop_array);
5993 info->imm.value = val;
5994 break;
5995
5996 case AARCH64_OPND_UIMM7:
5997 po_imm_or_fail (0, 127);
5998 info->imm.value = val;
5999 break;
6000
6001 case AARCH64_OPND_IDX:
6002 case AARCH64_OPND_MASK:
6003 case AARCH64_OPND_BIT_NUM:
6004 case AARCH64_OPND_IMMR:
6005 case AARCH64_OPND_IMMS:
6006 po_imm_or_fail (0, 63);
6007 info->imm.value = val;
6008 break;
6009
6010 case AARCH64_OPND_IMM0:
6011 po_imm_nc_or_fail ();
6012 if (val != 0)
6013 {
6014 set_fatal_syntax_error (_("immediate zero expected"));
6015 goto failure;
6016 }
6017 info->imm.value = 0;
6018 break;
6019
6020 case AARCH64_OPND_FPIMM0:
6021 {
6022 int qfloat;
6023 bfd_boolean res1 = FALSE, res2 = FALSE;
6024 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6025 it is probably not worth the effort to support it. */
6026 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
6027 imm_reg_type))
6028 && (error_p ()
6029 || !(res2 = parse_constant_immediate (&str, &val,
6030 imm_reg_type))))
6031 goto failure;
6032 if ((res1 && qfloat == 0) || (res2 && val == 0))
6033 {
6034 info->imm.value = 0;
6035 info->imm.is_fp = 1;
6036 break;
6037 }
6038 set_fatal_syntax_error (_("immediate zero expected"));
6039 goto failure;
6040 }
6041
6042 case AARCH64_OPND_IMM_MOV:
6043 {
6044 char *saved = str;
6045 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6046 reg_name_p (str, REG_TYPE_VN))
6047 goto failure;
6048 str = saved;
6049 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6050 GE_OPT_PREFIX, 1));
6051 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6052 later. fix_mov_imm_insn will try to determine a machine
6053 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6054 message if the immediate cannot be moved by a single
6055 instruction. */
6056 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6057 inst.base.operands[i].skip = 1;
6058 }
6059 break;
6060
6061 case AARCH64_OPND_SIMD_IMM:
6062 case AARCH64_OPND_SIMD_IMM_SFT:
6063 if (! parse_big_immediate (&str, &val, imm_reg_type))
6064 goto failure;
6065 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6066 /* addr_off_p */ 0,
6067 /* need_libopcodes_p */ 1,
6068 /* skip_p */ 1);
6069 /* Parse shift.
6070 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6071 shift, we don't check it here; we leave the checking to
6072 the libopcodes (operand_general_constraint_met_p). By
6073 doing this, we achieve better diagnostics. */
6074 if (skip_past_comma (&str)
6075 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6076 goto failure;
6077 if (!info->shifter.operator_present
6078 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6079 {
6080 /* Default to LSL if not present. Libopcodes prefers shifter
6081 kind to be explicit. */
6082 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6083 info->shifter.kind = AARCH64_MOD_LSL;
6084 }
6085 break;
6086
6087 case AARCH64_OPND_FPIMM:
6088 case AARCH64_OPND_SIMD_FPIMM:
6089 case AARCH64_OPND_SVE_FPIMM8:
6090 {
6091 int qfloat;
6092 bfd_boolean dp_p;
6093
6094 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6095 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6096 || !aarch64_imm_float_p (qfloat))
6097 {
6098 if (!error_p ())
6099 set_fatal_syntax_error (_("invalid floating-point"
6100 " constant"));
6101 goto failure;
6102 }
6103 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6104 inst.base.operands[i].imm.is_fp = 1;
6105 }
6106 break;
6107
6108 case AARCH64_OPND_SVE_I1_HALF_ONE:
6109 case AARCH64_OPND_SVE_I1_HALF_TWO:
6110 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6111 {
6112 int qfloat;
6113 bfd_boolean dp_p;
6114
6115 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6116 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6117 {
6118 if (!error_p ())
6119 set_fatal_syntax_error (_("invalid floating-point"
6120 " constant"));
6121 goto failure;
6122 }
6123 inst.base.operands[i].imm.value = qfloat;
6124 inst.base.operands[i].imm.is_fp = 1;
6125 }
6126 break;
6127
6128 case AARCH64_OPND_LIMM:
6129 po_misc_or_fail (parse_shifter_operand (&str, info,
6130 SHIFTED_LOGIC_IMM));
6131 if (info->shifter.operator_present)
6132 {
6133 set_fatal_syntax_error
6134 (_("shift not allowed for bitmask immediate"));
6135 goto failure;
6136 }
6137 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6138 /* addr_off_p */ 0,
6139 /* need_libopcodes_p */ 1,
6140 /* skip_p */ 1);
6141 break;
6142
6143 case AARCH64_OPND_AIMM:
6144 if (opcode->op == OP_ADD)
6145 /* ADD may have relocation types. */
6146 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6147 SHIFTED_ARITH_IMM));
6148 else
6149 po_misc_or_fail (parse_shifter_operand (&str, info,
6150 SHIFTED_ARITH_IMM));
6151 switch (inst.reloc.type)
6152 {
6153 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6154 info->shifter.amount = 12;
6155 break;
6156 case BFD_RELOC_UNUSED:
6157 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6158 if (info->shifter.kind != AARCH64_MOD_NONE)
6159 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6160 inst.reloc.pc_rel = 0;
6161 break;
6162 default:
6163 break;
6164 }
6165 info->imm.value = 0;
6166 if (!info->shifter.operator_present)
6167 {
6168 /* Default to LSL if not present. Libopcodes prefers shifter
6169 kind to be explicit. */
6170 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6171 info->shifter.kind = AARCH64_MOD_LSL;
6172 }
6173 break;
6174
6175 case AARCH64_OPND_HALF:
6176 {
6177 /* #<imm16> or relocation. */
6178 int internal_fixup_p;
6179 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6180 if (internal_fixup_p)
6181 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6182 skip_whitespace (str);
6183 if (skip_past_comma (&str))
6184 {
6185 /* {, LSL #<shift>} */
6186 if (! aarch64_gas_internal_fixup_p ())
6187 {
6188 set_fatal_syntax_error (_("can't mix relocation modifier "
6189 "with explicit shift"));
6190 goto failure;
6191 }
6192 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6193 }
6194 else
6195 inst.base.operands[i].shifter.amount = 0;
6196 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6197 inst.base.operands[i].imm.value = 0;
6198 if (! process_movw_reloc_info ())
6199 goto failure;
6200 }
6201 break;
6202
6203 case AARCH64_OPND_EXCEPTION:
6204 case AARCH64_OPND_UNDEFINED:
6205 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6206 imm_reg_type));
6207 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6208 /* addr_off_p */ 0,
6209 /* need_libopcodes_p */ 0,
6210 /* skip_p */ 1);
6211 break;
6212
6213 case AARCH64_OPND_NZCV:
6214 {
6215 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6216 if (nzcv != NULL)
6217 {
6218 str += 4;
6219 info->imm.value = nzcv->value;
6220 break;
6221 }
6222 po_imm_or_fail (0, 15);
6223 info->imm.value = val;
6224 }
6225 break;
6226
6227 case AARCH64_OPND_COND:
6228 case AARCH64_OPND_COND1:
6229 {
6230 char *start = str;
6231 do
6232 str++;
6233 while (ISALPHA (*str));
6234 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6235 if (info->cond == NULL)
6236 {
6237 set_syntax_error (_("invalid condition"));
6238 goto failure;
6239 }
6240 else if (operands[i] == AARCH64_OPND_COND1
6241 && (info->cond->value & 0xe) == 0xe)
6242 {
6243 /* Do not allow AL or NV. */
6244 set_default_error ();
6245 goto failure;
6246 }
6247 }
6248 break;
6249
6250 case AARCH64_OPND_ADDR_ADRP:
6251 po_misc_or_fail (parse_adrp (&str));
6252 /* Clear the value as operand needs to be relocated. */
6253 info->imm.value = 0;
6254 break;
6255
6256 case AARCH64_OPND_ADDR_PCREL14:
6257 case AARCH64_OPND_ADDR_PCREL19:
6258 case AARCH64_OPND_ADDR_PCREL21:
6259 case AARCH64_OPND_ADDR_PCREL26:
6260 po_misc_or_fail (parse_address (&str, info));
6261 if (!info->addr.pcrel)
6262 {
6263 set_syntax_error (_("invalid pc-relative address"));
6264 goto failure;
6265 }
6266 if (inst.gen_lit_pool
6267 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6268 {
6269 /* Only permit "=value" in the literal load instructions.
6270 The literal will be generated by programmer_friendly_fixup. */
6271 set_syntax_error (_("invalid use of \"=immediate\""));
6272 goto failure;
6273 }
6274 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6275 {
6276 set_syntax_error (_("unrecognized relocation suffix"));
6277 goto failure;
6278 }
6279 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6280 {
6281 info->imm.value = inst.reloc.exp.X_add_number;
6282 inst.reloc.type = BFD_RELOC_UNUSED;
6283 }
6284 else
6285 {
6286 info->imm.value = 0;
6287 if (inst.reloc.type == BFD_RELOC_UNUSED)
6288 switch (opcode->iclass)
6289 {
6290 case compbranch:
6291 case condbranch:
6292 /* e.g. CBZ or B.COND */
6293 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6294 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6295 break;
6296 case testbranch:
6297 /* e.g. TBZ */
6298 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6299 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6300 break;
6301 case branch_imm:
6302 /* e.g. B or BL */
6303 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6304 inst.reloc.type =
6305 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6306 : BFD_RELOC_AARCH64_JUMP26;
6307 break;
6308 case loadlit:
6309 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6310 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6311 break;
6312 case pcreladdr:
6313 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6314 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6315 break;
6316 default:
6317 gas_assert (0);
6318 abort ();
6319 }
6320 inst.reloc.pc_rel = 1;
6321 }
6322 break;
6323
6324 case AARCH64_OPND_ADDR_SIMPLE:
6325 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6326 {
6327 /* [<Xn|SP>{, #<simm>}] */
6328 char *start = str;
6329 /* First use the normal address-parsing routines, to get
6330 the usual syntax errors. */
6331 po_misc_or_fail (parse_address (&str, info));
6332 if (info->addr.pcrel || info->addr.offset.is_reg
6333 || !info->addr.preind || info->addr.postind
6334 || info->addr.writeback)
6335 {
6336 set_syntax_error (_("invalid addressing mode"));
6337 goto failure;
6338 }
6339
6340 /* Then retry, matching the specific syntax of these addresses. */
6341 str = start;
6342 po_char_or_fail ('[');
6343 po_reg_or_fail (REG_TYPE_R64_SP);
6344 /* Accept optional ", #0". */
6345 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6346 && skip_past_char (&str, ','))
6347 {
6348 skip_past_char (&str, '#');
6349 if (! skip_past_char (&str, '0'))
6350 {
6351 set_fatal_syntax_error
6352 (_("the optional immediate offset can only be 0"));
6353 goto failure;
6354 }
6355 }
6356 po_char_or_fail (']');
6357 break;
6358 }
6359
6360 case AARCH64_OPND_ADDR_REGOFF:
6361 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6362 po_misc_or_fail (parse_address (&str, info));
6363 regoff_addr:
6364 if (info->addr.pcrel || !info->addr.offset.is_reg
6365 || !info->addr.preind || info->addr.postind
6366 || info->addr.writeback)
6367 {
6368 set_syntax_error (_("invalid addressing mode"));
6369 goto failure;
6370 }
6371 if (!info->shifter.operator_present)
6372 {
6373 /* Default to LSL if not present. Libopcodes prefers shifter
6374 kind to be explicit. */
6375 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6376 info->shifter.kind = AARCH64_MOD_LSL;
6377 }
6378 /* Qualifier to be deduced by libopcodes. */
6379 break;
6380
6381 case AARCH64_OPND_ADDR_SIMM7:
6382 po_misc_or_fail (parse_address (&str, info));
6383 if (info->addr.pcrel || info->addr.offset.is_reg
6384 || (!info->addr.preind && !info->addr.postind))
6385 {
6386 set_syntax_error (_("invalid addressing mode"));
6387 goto failure;
6388 }
6389 if (inst.reloc.type != BFD_RELOC_UNUSED)
6390 {
6391 set_syntax_error (_("relocation not allowed"));
6392 goto failure;
6393 }
6394 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6395 /* addr_off_p */ 1,
6396 /* need_libopcodes_p */ 1,
6397 /* skip_p */ 0);
6398 break;
6399
6400 case AARCH64_OPND_ADDR_SIMM9:
6401 case AARCH64_OPND_ADDR_SIMM9_2:
6402 case AARCH64_OPND_ADDR_SIMM11:
6403 case AARCH64_OPND_ADDR_SIMM13:
6404 po_misc_or_fail (parse_address (&str, info));
6405 if (info->addr.pcrel || info->addr.offset.is_reg
6406 || (!info->addr.preind && !info->addr.postind)
6407 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6408 && info->addr.writeback))
6409 {
6410 set_syntax_error (_("invalid addressing mode"));
6411 goto failure;
6412 }
6413 if (inst.reloc.type != BFD_RELOC_UNUSED)
6414 {
6415 set_syntax_error (_("relocation not allowed"));
6416 goto failure;
6417 }
6418 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6419 /* addr_off_p */ 1,
6420 /* need_libopcodes_p */ 1,
6421 /* skip_p */ 0);
6422 break;
6423
6424 case AARCH64_OPND_ADDR_SIMM10:
6425 case AARCH64_OPND_ADDR_OFFSET:
6426 po_misc_or_fail (parse_address (&str, info));
6427 if (info->addr.pcrel || info->addr.offset.is_reg
6428 || !info->addr.preind || info->addr.postind)
6429 {
6430 set_syntax_error (_("invalid addressing mode"));
6431 goto failure;
6432 }
6433 if (inst.reloc.type != BFD_RELOC_UNUSED)
6434 {
6435 set_syntax_error (_("relocation not allowed"));
6436 goto failure;
6437 }
6438 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6439 /* addr_off_p */ 1,
6440 /* need_libopcodes_p */ 1,
6441 /* skip_p */ 0);
6442 break;
6443
6444 case AARCH64_OPND_ADDR_UIMM12:
6445 po_misc_or_fail (parse_address (&str, info));
6446 if (info->addr.pcrel || info->addr.offset.is_reg
6447 || !info->addr.preind || info->addr.writeback)
6448 {
6449 set_syntax_error (_("invalid addressing mode"));
6450 goto failure;
6451 }
6452 if (inst.reloc.type == BFD_RELOC_UNUSED)
6453 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6454 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6455 || (inst.reloc.type
6456 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6457 || (inst.reloc.type
6458 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6459 || (inst.reloc.type
6460 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6461 || (inst.reloc.type
6462 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6463 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6464 /* Leave qualifier to be determined by libopcodes. */
6465 break;
6466
6467 case AARCH64_OPND_SIMD_ADDR_POST:
6468 /* [<Xn|SP>], <Xm|#<amount>> */
6469 po_misc_or_fail (parse_address (&str, info));
6470 if (!info->addr.postind || !info->addr.writeback)
6471 {
6472 set_syntax_error (_("invalid addressing mode"));
6473 goto failure;
6474 }
6475 if (!info->addr.offset.is_reg)
6476 {
6477 if (inst.reloc.exp.X_op == O_constant)
6478 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6479 else
6480 {
6481 set_fatal_syntax_error
6482 (_("writeback value must be an immediate constant"));
6483 goto failure;
6484 }
6485 }
6486 /* No qualifier. */
6487 break;
6488
6489 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6490 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
6491 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6492 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6493 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6494 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6495 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6496 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6497 case AARCH64_OPND_SVE_ADDR_RI_U6:
6498 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6499 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6500 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6501 /* [X<n>{, #imm, MUL VL}]
6502 [X<n>{, #imm}]
6503 but recognizing SVE registers. */
6504 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6505 &offset_qualifier));
6506 if (base_qualifier != AARCH64_OPND_QLF_X)
6507 {
6508 set_syntax_error (_("invalid addressing mode"));
6509 goto failure;
6510 }
6511 sve_regimm:
6512 if (info->addr.pcrel || info->addr.offset.is_reg
6513 || !info->addr.preind || info->addr.writeback)
6514 {
6515 set_syntax_error (_("invalid addressing mode"));
6516 goto failure;
6517 }
6518 if (inst.reloc.type != BFD_RELOC_UNUSED
6519 || inst.reloc.exp.X_op != O_constant)
6520 {
6521 /* Make sure this has priority over
6522 "invalid addressing mode". */
6523 set_fatal_syntax_error (_("constant offset required"));
6524 goto failure;
6525 }
6526 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6527 break;
6528
6529 case AARCH64_OPND_SVE_ADDR_R:
6530 /* [<Xn|SP>{, <R><m>}]
6531 but recognizing SVE registers. */
6532 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6533 &offset_qualifier));
6534 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6535 {
6536 offset_qualifier = AARCH64_OPND_QLF_X;
6537 info->addr.offset.is_reg = 1;
6538 info->addr.offset.regno = 31;
6539 }
6540 else if (base_qualifier != AARCH64_OPND_QLF_X
6541 || offset_qualifier != AARCH64_OPND_QLF_X)
6542 {
6543 set_syntax_error (_("invalid addressing mode"));
6544 goto failure;
6545 }
6546 goto regoff_addr;
6547
6548 case AARCH64_OPND_SVE_ADDR_RR:
6549 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6550 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6551 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6552 case AARCH64_OPND_SVE_ADDR_RX:
6553 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6554 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6555 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6556 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6557 but recognizing SVE registers. */
6558 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6559 &offset_qualifier));
6560 if (base_qualifier != AARCH64_OPND_QLF_X
6561 || offset_qualifier != AARCH64_OPND_QLF_X)
6562 {
6563 set_syntax_error (_("invalid addressing mode"));
6564 goto failure;
6565 }
6566 goto regoff_addr;
6567
6568 case AARCH64_OPND_SVE_ADDR_RZ:
6569 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6570 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6571 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6572 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6573 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6574 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6575 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6576 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6577 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6578 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6579 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6580 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6581 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6582 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6583 &offset_qualifier));
6584 if (base_qualifier != AARCH64_OPND_QLF_X
6585 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6586 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6587 {
6588 set_syntax_error (_("invalid addressing mode"));
6589 goto failure;
6590 }
6591 info->qualifier = offset_qualifier;
6592 goto regoff_addr;
6593
6594 case AARCH64_OPND_SVE_ADDR_ZX:
6595 /* [Zn.<T>{, <Xm>}]. */
6596 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6597 &offset_qualifier));
6598 /* Things to check:
6599 base_qualifier either S_S or S_D
6600 offset_qualifier must be X
6601 */
6602 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6603 && base_qualifier != AARCH64_OPND_QLF_S_D)
6604 || offset_qualifier != AARCH64_OPND_QLF_X)
6605 {
6606 set_syntax_error (_("invalid addressing mode"));
6607 goto failure;
6608 }
6609 info->qualifier = base_qualifier;
6610 if (!info->addr.offset.is_reg || info->addr.pcrel
6611 || !info->addr.preind || info->addr.writeback
6612 || info->shifter.operator_present != 0)
6613 {
6614 set_syntax_error (_("invalid addressing mode"));
6615 goto failure;
6616 }
6617 info->shifter.kind = AARCH64_MOD_LSL;
6618 break;
6619
6620
6621 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6622 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6623 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6624 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6625 /* [Z<n>.<T>{, #imm}] */
6626 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6627 &offset_qualifier));
6628 if (base_qualifier != AARCH64_OPND_QLF_S_S
6629 && base_qualifier != AARCH64_OPND_QLF_S_D)
6630 {
6631 set_syntax_error (_("invalid addressing mode"));
6632 goto failure;
6633 }
6634 info->qualifier = base_qualifier;
6635 goto sve_regimm;
6636
6637 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6638 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6639 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6640 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6641 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6642
6643 We don't reject:
6644
6645 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6646
6647 here since we get better error messages by leaving it to
6648 the qualifier checking routines. */
6649 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6650 &offset_qualifier));
6651 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6652 && base_qualifier != AARCH64_OPND_QLF_S_D)
6653 || offset_qualifier != base_qualifier)
6654 {
6655 set_syntax_error (_("invalid addressing mode"));
6656 goto failure;
6657 }
6658 info->qualifier = base_qualifier;
6659 goto regoff_addr;
6660
6661 case AARCH64_OPND_SYSREG:
6662 {
6663 uint32_t sysreg_flags;
6664 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6665 &sysreg_flags)) == PARSE_FAIL)
6666 {
6667 set_syntax_error (_("unknown or missing system register name"));
6668 goto failure;
6669 }
6670 inst.base.operands[i].sysreg.value = val;
6671 inst.base.operands[i].sysreg.flags = sysreg_flags;
6672 break;
6673 }
6674
6675 case AARCH64_OPND_PSTATEFIELD:
6676 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6677 == PARSE_FAIL)
6678 {
6679 set_syntax_error (_("unknown or missing PSTATE field name"));
6680 goto failure;
6681 }
6682 inst.base.operands[i].pstatefield = val;
6683 break;
6684
6685 case AARCH64_OPND_SYSREG_IC:
6686 inst.base.operands[i].sysins_op =
6687 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6688 goto sys_reg_ins;
6689
6690 case AARCH64_OPND_SYSREG_DC:
6691 inst.base.operands[i].sysins_op =
6692 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6693 goto sys_reg_ins;
6694
6695 case AARCH64_OPND_SYSREG_AT:
6696 inst.base.operands[i].sysins_op =
6697 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6698 goto sys_reg_ins;
6699
6700 case AARCH64_OPND_SYSREG_SR:
6701 inst.base.operands[i].sysins_op =
6702 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6703 goto sys_reg_ins;
6704
6705 case AARCH64_OPND_SYSREG_TLBI:
6706 inst.base.operands[i].sysins_op =
6707 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6708 sys_reg_ins:
6709 if (inst.base.operands[i].sysins_op == NULL)
6710 {
6711 set_fatal_syntax_error ( _("unknown or missing operation name"));
6712 goto failure;
6713 }
6714 break;
6715
6716 case AARCH64_OPND_BARRIER:
6717 case AARCH64_OPND_BARRIER_ISB:
6718 val = parse_barrier (&str);
6719 if (val != PARSE_FAIL
6720 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6721 {
6722 /* ISB only accepts options name 'sy'. */
6723 set_syntax_error
6724 (_("the specified option is not accepted in ISB"));
6725 /* Turn off backtrack as this optional operand is present. */
6726 backtrack_pos = 0;
6727 goto failure;
6728 }
6729 if (val != PARSE_FAIL
6730 && operands[i] == AARCH64_OPND_BARRIER)
6731 {
6732 /* Regular barriers accept options CRm (C0-C15).
6733 DSB nXS barrier variant accepts values > 15. */
6734 if (val < 0 || val > 15)
6735 {
6736 set_syntax_error (_("the specified option is not accepted in DSB"));
6737 goto failure;
6738 }
6739 }
6740 /* This is an extension to accept a 0..15 immediate. */
6741 if (val == PARSE_FAIL)
6742 po_imm_or_fail (0, 15);
6743 info->barrier = aarch64_barrier_options + val;
6744 break;
6745
6746 case AARCH64_OPND_BARRIER_DSB_NXS:
6747 val = parse_barrier (&str);
6748 if (val != PARSE_FAIL)
6749 {
6750 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
6751 if (!(val == 16 || val == 20 || val == 24 || val == 28))
6752 {
6753 set_syntax_error (_("the specified option is not accepted in DSB"));
6754 /* Turn off backtrack as this optional operand is present. */
6755 backtrack_pos = 0;
6756 goto failure;
6757 }
6758 }
6759 else
6760 {
6761 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
6762 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
6763 if (! parse_constant_immediate (&str, &val, imm_reg_type))
6764 goto failure;
6765 if (!(val == 16 || val == 20 || val == 24 || val == 28))
6766 {
6767 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
6768 goto failure;
6769 }
6770 }
6771 /* Option index is encoded as 2-bit value in val<3:2>. */
6772 val = (val >> 2) - 4;
6773 info->barrier = aarch64_barrier_dsb_nxs_options + val;
6774 break;
6775
6776 case AARCH64_OPND_PRFOP:
6777 val = parse_pldop (&str);
6778 /* This is an extension to accept a 0..31 immediate. */
6779 if (val == PARSE_FAIL)
6780 po_imm_or_fail (0, 31);
6781 inst.base.operands[i].prfop = aarch64_prfops + val;
6782 break;
6783
6784 case AARCH64_OPND_BARRIER_PSB:
6785 val = parse_barrier_psb (&str, &(info->hint_option));
6786 if (val == PARSE_FAIL)
6787 goto failure;
6788 break;
6789
6790 case AARCH64_OPND_BTI_TARGET:
6791 val = parse_bti_operand (&str, &(info->hint_option));
6792 if (val == PARSE_FAIL)
6793 goto failure;
6794 break;
6795
6796 case AARCH64_OPND_CSRE_CSR:
6797 val = parse_csr_operand (&str);
6798 if (val == PARSE_FAIL)
6799 goto failure;
6800 break;
6801
6802 default:
6803 as_fatal (_("unhandled operand code %d"), operands[i]);
6804 }
6805
6806 /* If we get here, this operand was successfully parsed. */
6807 inst.base.operands[i].present = 1;
6808 continue;
6809
6810 failure:
6811 /* The parse routine should already have set the error, but in case
6812 not, set a default one here. */
6813 if (! error_p ())
6814 set_default_error ();
6815
6816 if (! backtrack_pos)
6817 goto parse_operands_return;
6818
6819 {
6820 /* We reach here because this operand is marked as optional, and
6821 either no operand was supplied or the operand was supplied but it
6822 was syntactically incorrect. In the latter case we report an
6823 error. In the former case we perform a few more checks before
6824 dropping through to the code to insert the default operand. */
6825
6826 char *tmp = backtrack_pos;
6827 char endchar = END_OF_INSN;
6828
6829 if (i != (aarch64_num_of_operands (opcode) - 1))
6830 endchar = ',';
6831 skip_past_char (&tmp, ',');
6832
6833 if (*tmp != endchar)
6834 /* The user has supplied an operand in the wrong format. */
6835 goto parse_operands_return;
6836
6837 /* Make sure there is not a comma before the optional operand.
6838 For example the fifth operand of 'sys' is optional:
6839
6840 sys #0,c0,c0,#0, <--- wrong
6841 sys #0,c0,c0,#0 <--- correct. */
6842 if (comma_skipped_p && i && endchar == END_OF_INSN)
6843 {
6844 set_fatal_syntax_error
6845 (_("unexpected comma before the omitted optional operand"));
6846 goto parse_operands_return;
6847 }
6848 }
6849
6850 /* Reaching here means we are dealing with an optional operand that is
6851 omitted from the assembly line. */
6852 gas_assert (optional_operand_p (opcode, i));
6853 info->present = 0;
6854 process_omitted_operand (operands[i], opcode, i, info);
6855
6856 /* Try again, skipping the optional operand at backtrack_pos. */
6857 str = backtrack_pos;
6858 backtrack_pos = 0;
6859
6860 /* Clear any error record after the omitted optional operand has been
6861 successfully handled. */
6862 clear_error ();
6863 }
6864
6865 /* Check if we have parsed all the operands. */
6866 if (*str != '\0' && ! error_p ())
6867 {
6868 /* Set I to the index of the last present operand; this is
6869 for the purpose of diagnostics. */
6870 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6871 ;
6872 set_fatal_syntax_error
6873 (_("unexpected characters following instruction"));
6874 }
6875
6876 parse_operands_return:
6877
6878 if (error_p ())
6879 {
6880 DEBUG_TRACE ("parsing FAIL: %s - %s",
6881 operand_mismatch_kind_names[get_error_kind ()],
6882 get_error_message ());
6883 /* Record the operand error properly; this is useful when there
6884 are multiple instruction templates for a mnemonic name, so that
6885 later on, we can select the error that most closely describes
6886 the problem. */
6887 record_operand_error (opcode, i, get_error_kind (),
6888 get_error_message ());
6889 return FALSE;
6890 }
6891 else
6892 {
6893 DEBUG_TRACE ("parsing SUCCESS");
6894 return TRUE;
6895 }
6896 }
6897
6898 /* It does some fix-up to provide some programmer friendly feature while
6899 keeping the libopcodes happy, i.e. libopcodes only accepts
6900 the preferred architectural syntax.
6901 Return FALSE if there is any failure; otherwise return TRUE. */
6902
6903 static bfd_boolean
6904 programmer_friendly_fixup (aarch64_instruction *instr)
6905 {
6906 aarch64_inst *base = &instr->base;
6907 const aarch64_opcode *opcode = base->opcode;
6908 enum aarch64_op op = opcode->op;
6909 aarch64_opnd_info *operands = base->operands;
6910
6911 DEBUG_TRACE ("enter");
6912
6913 switch (opcode->iclass)
6914 {
6915 case testbranch:
6916 /* TBNZ Xn|Wn, #uimm6, label
6917 Test and Branch Not Zero: conditionally jumps to label if bit number
6918 uimm6 in register Xn is not zero. The bit number implies the width of
6919 the register, which may be written and should be disassembled as Wn if
6920 uimm is less than 32. */
6921 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6922 {
6923 if (operands[1].imm.value >= 32)
6924 {
6925 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6926 0, 31);
6927 return FALSE;
6928 }
6929 operands[0].qualifier = AARCH64_OPND_QLF_X;
6930 }
6931 break;
6932 case loadlit:
6933 /* LDR Wt, label | =value
6934 As a convenience assemblers will typically permit the notation
6935 "=value" in conjunction with the pc-relative literal load instructions
6936 to automatically place an immediate value or symbolic address in a
6937 nearby literal pool and generate a hidden label which references it.
6938 ISREG has been set to 0 in the case of =value. */
6939 if (instr->gen_lit_pool
6940 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6941 {
6942 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6943 if (op == OP_LDRSW_LIT)
6944 size = 4;
6945 if (instr->reloc.exp.X_op != O_constant
6946 && instr->reloc.exp.X_op != O_big
6947 && instr->reloc.exp.X_op != O_symbol)
6948 {
6949 record_operand_error (opcode, 1,
6950 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6951 _("constant expression expected"));
6952 return FALSE;
6953 }
6954 if (! add_to_lit_pool (&instr->reloc.exp, size))
6955 {
6956 record_operand_error (opcode, 1,
6957 AARCH64_OPDE_OTHER_ERROR,
6958 _("literal pool insertion failed"));
6959 return FALSE;
6960 }
6961 }
6962 break;
6963 case log_shift:
6964 case bitfield:
6965 /* UXT[BHW] Wd, Wn
6966 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6967 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6968 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6969 A programmer-friendly assembler should accept a destination Xd in
6970 place of Wd, however that is not the preferred form for disassembly.
6971 */
6972 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6973 && operands[1].qualifier == AARCH64_OPND_QLF_W
6974 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6975 operands[0].qualifier = AARCH64_OPND_QLF_W;
6976 break;
6977
6978 case addsub_ext:
6979 {
6980 /* In the 64-bit form, the final register operand is written as Wm
6981 for all but the (possibly omitted) UXTX/LSL and SXTX
6982 operators.
6983 As a programmer-friendly assembler, we accept e.g.
6984 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6985 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6986 int idx = aarch64_operand_index (opcode->operands,
6987 AARCH64_OPND_Rm_EXT);
6988 gas_assert (idx == 1 || idx == 2);
6989 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6990 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6991 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6992 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6993 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6994 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6995 }
6996 break;
6997
6998 default:
6999 break;
7000 }
7001
7002 DEBUG_TRACE ("exit with SUCCESS");
7003 return TRUE;
7004 }
7005
7006 /* Check for loads and stores that will cause unpredictable behavior. */
7007
7008 static void
7009 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7010 {
7011 aarch64_inst *base = &instr->base;
7012 const aarch64_opcode *opcode = base->opcode;
7013 const aarch64_opnd_info *opnds = base->operands;
7014 switch (opcode->iclass)
7015 {
7016 case ldst_pos:
7017 case ldst_imm9:
7018 case ldst_imm10:
7019 case ldst_unscaled:
7020 case ldst_unpriv:
7021 /* Loading/storing the base register is unpredictable if writeback. */
7022 if ((aarch64_get_operand_class (opnds[0].type)
7023 == AARCH64_OPND_CLASS_INT_REG)
7024 && opnds[0].reg.regno == opnds[1].addr.base_regno
7025 && opnds[1].addr.base_regno != REG_SP
7026 /* Exempt STG/STZG/ST2G/STZ2G. */
7027 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7028 && opnds[1].addr.writeback)
7029 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7030 break;
7031
7032 case ldstpair_off:
7033 case ldstnapair_offs:
7034 case ldstpair_indexed:
7035 /* Loading/storing the base register is unpredictable if writeback. */
7036 if ((aarch64_get_operand_class (opnds[0].type)
7037 == AARCH64_OPND_CLASS_INT_REG)
7038 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7039 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7040 && opnds[2].addr.base_regno != REG_SP
7041 /* Exempt STGP. */
7042 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7043 && opnds[2].addr.writeback)
7044 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7045 /* Load operations must load different registers. */
7046 if ((opcode->opcode & (1 << 22))
7047 && opnds[0].reg.regno == opnds[1].reg.regno)
7048 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7049 break;
7050
7051 case ldstexcl:
7052 /* It is unpredictable if the destination and status registers are the
7053 same. */
7054 if ((aarch64_get_operand_class (opnds[0].type)
7055 == AARCH64_OPND_CLASS_INT_REG)
7056 && (aarch64_get_operand_class (opnds[1].type)
7057 == AARCH64_OPND_CLASS_INT_REG)
7058 && (opnds[0].reg.regno == opnds[1].reg.regno
7059 || opnds[0].reg.regno == opnds[2].reg.regno))
7060 as_warn (_("unpredictable: identical transfer and status registers"
7061 " --`%s'"),
7062 str);
7063
7064 break;
7065
7066 default:
7067 break;
7068 }
7069 }
7070
7071 static void
7072 force_automatic_sequence_close (void)
7073 {
7074 if (now_instr_sequence.instr)
7075 {
7076 as_warn (_("previous `%s' sequence has not been closed"),
7077 now_instr_sequence.instr->opcode->name);
7078 init_insn_sequence (NULL, &now_instr_sequence);
7079 }
7080 }
7081
7082 /* A wrapper function to interface with libopcodes on encoding and
7083 record the error message if there is any.
7084
7085 Return TRUE on success; otherwise return FALSE. */
7086
7087 static bfd_boolean
7088 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7089 aarch64_insn *code)
7090 {
7091 aarch64_operand_error error_info;
7092 memset (&error_info, '\0', sizeof (error_info));
7093 error_info.kind = AARCH64_OPDE_NIL;
7094 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7095 && !error_info.non_fatal)
7096 return TRUE;
7097
7098 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7099 record_operand_error_info (opcode, &error_info);
7100 return error_info.non_fatal;
7101 }
7102
7103 #ifdef DEBUG_AARCH64
7104 static inline void
7105 dump_opcode_operands (const aarch64_opcode *opcode)
7106 {
7107 int i = 0;
7108 while (opcode->operands[i] != AARCH64_OPND_NIL)
7109 {
7110 aarch64_verbose ("\t\t opnd%d: %s", i,
7111 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7112 ? aarch64_get_operand_name (opcode->operands[i])
7113 : aarch64_get_operand_desc (opcode->operands[i]));
7114 ++i;
7115 }
7116 }
7117 #endif /* DEBUG_AARCH64 */
7118
7119 /* This is the guts of the machine-dependent assembler. STR points to a
7120 machine dependent instruction. This function is supposed to emit
7121 the frags/bytes it assembles to. */
7122
7123 void
7124 md_assemble (char *str)
7125 {
7126 char *p = str;
7127 templates *template;
7128 aarch64_opcode *opcode;
7129 aarch64_inst *inst_base;
7130 unsigned saved_cond;
7131
7132 /* Align the previous label if needed. */
7133 if (last_label_seen != NULL)
7134 {
7135 symbol_set_frag (last_label_seen, frag_now);
7136 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7137 S_SET_SEGMENT (last_label_seen, now_seg);
7138 }
7139
7140 /* Update the current insn_sequence from the segment. */
7141 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
7142
7143 inst.reloc.type = BFD_RELOC_UNUSED;
7144
7145 DEBUG_TRACE ("\n\n");
7146 DEBUG_TRACE ("==============================");
7147 DEBUG_TRACE ("Enter md_assemble with %s", str);
7148
7149 template = opcode_lookup (&p);
7150 if (!template)
7151 {
7152 /* It wasn't an instruction, but it might be a register alias of
7153 the form alias .req reg directive. */
7154 if (!create_register_alias (str, p))
7155 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7156 str);
7157 return;
7158 }
7159
7160 skip_whitespace (p);
7161 if (*p == ',')
7162 {
7163 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7164 get_mnemonic_name (str), str);
7165 return;
7166 }
7167
7168 init_operand_error_report ();
7169
7170 /* Sections are assumed to start aligned. In executable section, there is no
7171 MAP_DATA symbol pending. So we only align the address during
7172 MAP_DATA --> MAP_INSN transition.
7173 For other sections, this is not guaranteed. */
7174 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7175 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7176 frag_align_code (2, 0);
7177
7178 saved_cond = inst.cond;
7179 reset_aarch64_instruction (&inst);
7180 inst.cond = saved_cond;
7181
7182 /* Iterate through all opcode entries with the same mnemonic name. */
7183 do
7184 {
7185 opcode = template->opcode;
7186
7187 DEBUG_TRACE ("opcode %s found", opcode->name);
7188 #ifdef DEBUG_AARCH64
7189 if (debug_dump)
7190 dump_opcode_operands (opcode);
7191 #endif /* DEBUG_AARCH64 */
7192
7193 mapping_state (MAP_INSN);
7194
7195 inst_base = &inst.base;
7196 inst_base->opcode = opcode;
7197
7198 /* Truly conditionally executed instructions, e.g. b.cond. */
7199 if (opcode->flags & F_COND)
7200 {
7201 gas_assert (inst.cond != COND_ALWAYS);
7202 inst_base->cond = get_cond_from_value (inst.cond);
7203 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7204 }
7205 else if (inst.cond != COND_ALWAYS)
7206 {
7207 /* It shouldn't arrive here, where the assembly looks like a
7208 conditional instruction but the found opcode is unconditional. */
7209 gas_assert (0);
7210 continue;
7211 }
7212
7213 if (parse_operands (p, opcode)
7214 && programmer_friendly_fixup (&inst)
7215 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7216 {
7217 /* Check that this instruction is supported for this CPU. */
7218 if (!opcode->avariant
7219 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7220 {
7221 as_bad (_("selected processor does not support `%s'"), str);
7222 return;
7223 }
7224
7225 warn_unpredictable_ldst (&inst, str);
7226
7227 if (inst.reloc.type == BFD_RELOC_UNUSED
7228 || !inst.reloc.need_libopcodes_p)
7229 output_inst (NULL);
7230 else
7231 {
7232 /* If there is relocation generated for the instruction,
7233 store the instruction information for the future fix-up. */
7234 struct aarch64_inst *copy;
7235 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7236 copy = XNEW (struct aarch64_inst);
7237 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7238 output_inst (copy);
7239 }
7240
7241 /* Issue non-fatal messages if any. */
7242 output_operand_error_report (str, TRUE);
7243 return;
7244 }
7245
7246 template = template->next;
7247 if (template != NULL)
7248 {
7249 reset_aarch64_instruction (&inst);
7250 inst.cond = saved_cond;
7251 }
7252 }
7253 while (template != NULL);
7254
7255 /* Issue the error messages if any. */
7256 output_operand_error_report (str, FALSE);
7257 }
7258
7259 /* Various frobbings of labels and their addresses. */
7260
7261 void
7262 aarch64_start_line_hook (void)
7263 {
7264 last_label_seen = NULL;
7265 }
7266
7267 void
7268 aarch64_frob_label (symbolS * sym)
7269 {
7270 last_label_seen = sym;
7271
7272 dwarf2_emit_label (sym);
7273 }
7274
7275 void
7276 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7277 {
7278 /* Check to see if we have a block to close. */
7279 force_automatic_sequence_close ();
7280 }
7281
7282 int
7283 aarch64_data_in_code (void)
7284 {
7285 if (!strncmp (input_line_pointer + 1, "data:", 5))
7286 {
7287 *input_line_pointer = '/';
7288 input_line_pointer += 5;
7289 *input_line_pointer = 0;
7290 return 1;
7291 }
7292
7293 return 0;
7294 }
7295
7296 char *
7297 aarch64_canonicalize_symbol_name (char *name)
7298 {
7299 int len;
7300
7301 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7302 *(name + len - 5) = 0;
7303
7304 return name;
7305 }
7306 \f
7307 /* Table of all register names defined by default. The user can
7308 define additional names with .req. Note that all register names
7309 should appear in both upper and lowercase variants. Some registers
7310 also have mixed-case names. */
7311
7312 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
7313 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
7314 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7315 #define REGSET16(p,t) \
7316 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7317 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7318 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7319 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7320 #define REGSET31(p,t) \
7321 REGSET16(p, t), \
7322 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7323 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7324 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7325 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7326 #define REGSET(p,t) \
7327 REGSET31(p,t), REGNUM(p,31,t)
7328
7329 /* These go into aarch64_reg_hsh hash-table. */
7330 static const reg_entry reg_names[] = {
7331 /* Integer registers. */
7332 REGSET31 (x, R_64), REGSET31 (X, R_64),
7333 REGSET31 (w, R_32), REGSET31 (W, R_32),
7334
7335 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7336 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7337 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7338 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7339 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7340 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7341
7342 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7343 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7344
7345 /* Floating-point single precision registers. */
7346 REGSET (s, FP_S), REGSET (S, FP_S),
7347
7348 /* Floating-point double precision registers. */
7349 REGSET (d, FP_D), REGSET (D, FP_D),
7350
7351 /* Floating-point half precision registers. */
7352 REGSET (h, FP_H), REGSET (H, FP_H),
7353
7354 /* Floating-point byte precision registers. */
7355 REGSET (b, FP_B), REGSET (B, FP_B),
7356
7357 /* Floating-point quad precision registers. */
7358 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7359
7360 /* FP/SIMD registers. */
7361 REGSET (v, VN), REGSET (V, VN),
7362
7363 /* SVE vector registers. */
7364 REGSET (z, ZN), REGSET (Z, ZN),
7365
7366 /* SVE predicate registers. */
7367 REGSET16 (p, PN), REGSET16 (P, PN)
7368 };
7369
7370 #undef REGDEF
7371 #undef REGDEF_ALIAS
7372 #undef REGNUM
7373 #undef REGSET16
7374 #undef REGSET31
7375 #undef REGSET
7376
7377 #define N 1
7378 #define n 0
7379 #define Z 1
7380 #define z 0
7381 #define C 1
7382 #define c 0
7383 #define V 1
7384 #define v 0
7385 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7386 static const asm_nzcv nzcv_names[] = {
7387 {"nzcv", B (n, z, c, v)},
7388 {"nzcV", B (n, z, c, V)},
7389 {"nzCv", B (n, z, C, v)},
7390 {"nzCV", B (n, z, C, V)},
7391 {"nZcv", B (n, Z, c, v)},
7392 {"nZcV", B (n, Z, c, V)},
7393 {"nZCv", B (n, Z, C, v)},
7394 {"nZCV", B (n, Z, C, V)},
7395 {"Nzcv", B (N, z, c, v)},
7396 {"NzcV", B (N, z, c, V)},
7397 {"NzCv", B (N, z, C, v)},
7398 {"NzCV", B (N, z, C, V)},
7399 {"NZcv", B (N, Z, c, v)},
7400 {"NZcV", B (N, Z, c, V)},
7401 {"NZCv", B (N, Z, C, v)},
7402 {"NZCV", B (N, Z, C, V)}
7403 };
7404
7405 #undef N
7406 #undef n
7407 #undef Z
7408 #undef z
7409 #undef C
7410 #undef c
7411 #undef V
7412 #undef v
7413 #undef B
7414 \f
7415 /* MD interface: bits in the object file. */
7416
7417 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7418 for use in the a.out file, and stores them in the array pointed to by buf.
7419 This knows about the endian-ness of the target machine and does
7420 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7421 2 (short) and 4 (long) Floating numbers are put out as a series of
7422 LITTLENUMS (shorts, here at least). */
7423
7424 void
7425 md_number_to_chars (char *buf, valueT val, int n)
7426 {
7427 if (target_big_endian)
7428 number_to_chars_bigendian (buf, val, n);
7429 else
7430 number_to_chars_littleendian (buf, val, n);
7431 }
7432
7433 /* MD interface: Sections. */
7434
7435 /* Estimate the size of a frag before relaxing. Assume everything fits in
7436 4 bytes. */
7437
7438 int
7439 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7440 {
7441 fragp->fr_var = 4;
7442 return 4;
7443 }
7444
7445 /* Round up a section size to the appropriate boundary. */
7446
7447 valueT
7448 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7449 {
7450 return size;
7451 }
7452
7453 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7454 of an rs_align_code fragment.
7455
7456 Here we fill the frag with the appropriate info for padding the
7457 output stream. The resulting frag will consist of a fixed (fr_fix)
7458 and of a repeating (fr_var) part.
7459
7460 The fixed content is always emitted before the repeating content and
7461 these two parts are used as follows in constructing the output:
7462 - the fixed part will be used to align to a valid instruction word
7463 boundary, in case that we start at a misaligned address; as no
7464 executable instruction can live at the misaligned location, we
7465 simply fill with zeros;
7466 - the variable part will be used to cover the remaining padding and
7467 we fill using the AArch64 NOP instruction.
7468
7469 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7470 enough storage space for up to 3 bytes for padding the back to a valid
7471 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7472
7473 void
7474 aarch64_handle_align (fragS * fragP)
7475 {
7476 /* NOP = d503201f */
7477 /* AArch64 instructions are always little-endian. */
7478 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7479
7480 int bytes, fix, noop_size;
7481 char *p;
7482
7483 if (fragP->fr_type != rs_align_code)
7484 return;
7485
7486 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7487 p = fragP->fr_literal + fragP->fr_fix;
7488
7489 #ifdef OBJ_ELF
7490 gas_assert (fragP->tc_frag_data.recorded);
7491 #endif
7492
7493 noop_size = sizeof (aarch64_noop);
7494
7495 fix = bytes & (noop_size - 1);
7496 if (fix)
7497 {
7498 #ifdef OBJ_ELF
7499 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7500 #endif
7501 memset (p, 0, fix);
7502 p += fix;
7503 fragP->fr_fix += fix;
7504 }
7505
7506 if (noop_size)
7507 memcpy (p, aarch64_noop, noop_size);
7508 fragP->fr_var = noop_size;
7509 }
7510
7511 /* Perform target specific initialisation of a frag.
7512 Note - despite the name this initialisation is not done when the frag
7513 is created, but only when its type is assigned. A frag can be created
7514 and used a long time before its type is set, so beware of assuming that
7515 this initialisation is performed first. */
7516
7517 #ifndef OBJ_ELF
7518 void
7519 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7520 int max_chars ATTRIBUTE_UNUSED)
7521 {
7522 }
7523
7524 #else /* OBJ_ELF is defined. */
7525 void
7526 aarch64_init_frag (fragS * fragP, int max_chars)
7527 {
7528 /* Record a mapping symbol for alignment frags. We will delete this
7529 later if the alignment ends up empty. */
7530 if (!fragP->tc_frag_data.recorded)
7531 fragP->tc_frag_data.recorded = 1;
7532
7533 /* PR 21809: Do not set a mapping state for debug sections
7534 - it just confuses other tools. */
7535 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
7536 return;
7537
7538 switch (fragP->fr_type)
7539 {
7540 case rs_align_test:
7541 case rs_fill:
7542 mapping_state_2 (MAP_DATA, max_chars);
7543 break;
7544 case rs_align:
7545 /* PR 20364: We can get alignment frags in code sections,
7546 so do not just assume that we should use the MAP_DATA state. */
7547 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7548 break;
7549 case rs_align_code:
7550 mapping_state_2 (MAP_INSN, max_chars);
7551 break;
7552 default:
7553 break;
7554 }
7555 }
7556 \f
7557 /* Initialize the DWARF-2 unwind information for this procedure. */
7558
7559 void
7560 tc_aarch64_frame_initial_instructions (void)
7561 {
7562 cfi_add_CFA_def_cfa (REG_SP, 0);
7563 }
7564 #endif /* OBJ_ELF */
7565
7566 /* Convert REGNAME to a DWARF-2 register number. */
7567
7568 int
7569 tc_aarch64_regname_to_dw2regnum (char *regname)
7570 {
7571 const reg_entry *reg = parse_reg (&regname);
7572 if (reg == NULL)
7573 return -1;
7574
7575 switch (reg->type)
7576 {
7577 case REG_TYPE_SP_32:
7578 case REG_TYPE_SP_64:
7579 case REG_TYPE_R_32:
7580 case REG_TYPE_R_64:
7581 return reg->number;
7582
7583 case REG_TYPE_FP_B:
7584 case REG_TYPE_FP_H:
7585 case REG_TYPE_FP_S:
7586 case REG_TYPE_FP_D:
7587 case REG_TYPE_FP_Q:
7588 return reg->number + 64;
7589
7590 default:
7591 break;
7592 }
7593 return -1;
7594 }
7595
7596 /* Implement DWARF2_ADDR_SIZE. */
7597
7598 int
7599 aarch64_dwarf2_addr_size (void)
7600 {
7601 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7602 if (ilp32_p)
7603 return 4;
7604 #endif
7605 return bfd_arch_bits_per_address (stdoutput) / 8;
7606 }
7607
7608 /* MD interface: Symbol and relocation handling. */
7609
7610 /* Return the address within the segment that a PC-relative fixup is
7611 relative to. For AArch64 PC-relative fixups applied to instructions
7612 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7613
7614 long
7615 md_pcrel_from_section (fixS * fixP, segT seg)
7616 {
7617 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7618
7619 /* If this is pc-relative and we are going to emit a relocation
7620 then we just want to put out any pipeline compensation that the linker
7621 will need. Otherwise we want to use the calculated base. */
7622 if (fixP->fx_pcrel
7623 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7624 || aarch64_force_relocation (fixP)))
7625 base = 0;
7626
7627 /* AArch64 should be consistent for all pc-relative relocations. */
7628 return base + AARCH64_PCREL_OFFSET;
7629 }
7630
7631 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7632 Otherwise we have no need to default values of symbols. */
7633
7634 symbolS *
7635 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7636 {
7637 #ifdef OBJ_ELF
7638 if (name[0] == '_' && name[1] == 'G'
7639 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7640 {
7641 if (!GOT_symbol)
7642 {
7643 if (symbol_find (name))
7644 as_bad (_("GOT already in the symbol table"));
7645
7646 GOT_symbol = symbol_new (name, undefined_section,
7647 &zero_address_frag, 0);
7648 }
7649
7650 return GOT_symbol;
7651 }
7652 #endif
7653
7654 return 0;
7655 }
7656
7657 /* Return non-zero if the indicated VALUE has overflowed the maximum
7658 range expressible by a unsigned number with the indicated number of
7659 BITS. */
7660
7661 static bfd_boolean
7662 unsigned_overflow (valueT value, unsigned bits)
7663 {
7664 valueT lim;
7665 if (bits >= sizeof (valueT) * 8)
7666 return FALSE;
7667 lim = (valueT) 1 << bits;
7668 return (value >= lim);
7669 }
7670
7671
7672 /* Return non-zero if the indicated VALUE has overflowed the maximum
7673 range expressible by an signed number with the indicated number of
7674 BITS. */
7675
7676 static bfd_boolean
7677 signed_overflow (offsetT value, unsigned bits)
7678 {
7679 offsetT lim;
7680 if (bits >= sizeof (offsetT) * 8)
7681 return FALSE;
7682 lim = (offsetT) 1 << (bits - 1);
7683 return (value < -lim || value >= lim);
7684 }
7685
7686 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7687 unsigned immediate offset load/store instruction, try to encode it as
7688 an unscaled, 9-bit, signed immediate offset load/store instruction.
7689 Return TRUE if it is successful; otherwise return FALSE.
7690
7691 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7692 in response to the standard LDR/STR mnemonics when the immediate offset is
7693 unambiguous, i.e. when it is negative or unaligned. */
7694
7695 static bfd_boolean
7696 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7697 {
7698 int idx;
7699 enum aarch64_op new_op;
7700 const aarch64_opcode *new_opcode;
7701
7702 gas_assert (instr->opcode->iclass == ldst_pos);
7703
7704 switch (instr->opcode->op)
7705 {
7706 case OP_LDRB_POS:new_op = OP_LDURB; break;
7707 case OP_STRB_POS: new_op = OP_STURB; break;
7708 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7709 case OP_LDRH_POS: new_op = OP_LDURH; break;
7710 case OP_STRH_POS: new_op = OP_STURH; break;
7711 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7712 case OP_LDR_POS: new_op = OP_LDUR; break;
7713 case OP_STR_POS: new_op = OP_STUR; break;
7714 case OP_LDRF_POS: new_op = OP_LDURV; break;
7715 case OP_STRF_POS: new_op = OP_STURV; break;
7716 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7717 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7718 default: new_op = OP_NIL; break;
7719 }
7720
7721 if (new_op == OP_NIL)
7722 return FALSE;
7723
7724 new_opcode = aarch64_get_opcode (new_op);
7725 gas_assert (new_opcode != NULL);
7726
7727 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7728 instr->opcode->op, new_opcode->op);
7729
7730 aarch64_replace_opcode (instr, new_opcode);
7731
7732 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7733 qualifier matching may fail because the out-of-date qualifier will
7734 prevent the operand being updated with a new and correct qualifier. */
7735 idx = aarch64_operand_index (instr->opcode->operands,
7736 AARCH64_OPND_ADDR_SIMM9);
7737 gas_assert (idx == 1);
7738 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7739
7740 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7741
7742 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7743 insn_sequence))
7744 return FALSE;
7745
7746 return TRUE;
7747 }
7748
7749 /* Called by fix_insn to fix a MOV immediate alias instruction.
7750
7751 Operand for a generic move immediate instruction, which is an alias
7752 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7753 a 32-bit/64-bit immediate value into general register. An assembler error
7754 shall result if the immediate cannot be created by a single one of these
7755 instructions. If there is a choice, then to ensure reversability an
7756 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7757
7758 static void
7759 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7760 {
7761 const aarch64_opcode *opcode;
7762
7763 /* Need to check if the destination is SP/ZR. The check has to be done
7764 before any aarch64_replace_opcode. */
7765 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7766 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7767
7768 instr->operands[1].imm.value = value;
7769 instr->operands[1].skip = 0;
7770
7771 if (try_mov_wide_p)
7772 {
7773 /* Try the MOVZ alias. */
7774 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7775 aarch64_replace_opcode (instr, opcode);
7776 if (aarch64_opcode_encode (instr->opcode, instr,
7777 &instr->value, NULL, NULL, insn_sequence))
7778 {
7779 put_aarch64_insn (buf, instr->value);
7780 return;
7781 }
7782 /* Try the MOVK alias. */
7783 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7784 aarch64_replace_opcode (instr, opcode);
7785 if (aarch64_opcode_encode (instr->opcode, instr,
7786 &instr->value, NULL, NULL, insn_sequence))
7787 {
7788 put_aarch64_insn (buf, instr->value);
7789 return;
7790 }
7791 }
7792
7793 if (try_mov_bitmask_p)
7794 {
7795 /* Try the ORR alias. */
7796 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7797 aarch64_replace_opcode (instr, opcode);
7798 if (aarch64_opcode_encode (instr->opcode, instr,
7799 &instr->value, NULL, NULL, insn_sequence))
7800 {
7801 put_aarch64_insn (buf, instr->value);
7802 return;
7803 }
7804 }
7805
7806 as_bad_where (fixP->fx_file, fixP->fx_line,
7807 _("immediate cannot be moved by a single instruction"));
7808 }
7809
7810 /* An instruction operand which is immediate related may have symbol used
7811 in the assembly, e.g.
7812
7813 mov w0, u32
7814 .set u32, 0x00ffff00
7815
7816 At the time when the assembly instruction is parsed, a referenced symbol,
7817 like 'u32' in the above example may not have been seen; a fixS is created
7818 in such a case and is handled here after symbols have been resolved.
7819 Instruction is fixed up with VALUE using the information in *FIXP plus
7820 extra information in FLAGS.
7821
7822 This function is called by md_apply_fix to fix up instructions that need
7823 a fix-up described above but does not involve any linker-time relocation. */
7824
7825 static void
7826 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7827 {
7828 int idx;
7829 uint32_t insn;
7830 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7831 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7832 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7833
7834 if (new_inst)
7835 {
7836 /* Now the instruction is about to be fixed-up, so the operand that
7837 was previously marked as 'ignored' needs to be unmarked in order
7838 to get the encoding done properly. */
7839 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7840 new_inst->operands[idx].skip = 0;
7841 }
7842
7843 gas_assert (opnd != AARCH64_OPND_NIL);
7844
7845 switch (opnd)
7846 {
7847 case AARCH64_OPND_EXCEPTION:
7848 case AARCH64_OPND_UNDEFINED:
7849 if (unsigned_overflow (value, 16))
7850 as_bad_where (fixP->fx_file, fixP->fx_line,
7851 _("immediate out of range"));
7852 insn = get_aarch64_insn (buf);
7853 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
7854 put_aarch64_insn (buf, insn);
7855 break;
7856
7857 case AARCH64_OPND_AIMM:
7858 /* ADD or SUB with immediate.
7859 NOTE this assumes we come here with a add/sub shifted reg encoding
7860 3 322|2222|2 2 2 21111 111111
7861 1 098|7654|3 2 1 09876 543210 98765 43210
7862 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7863 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7864 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7865 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7866 ->
7867 3 322|2222|2 2 221111111111
7868 1 098|7654|3 2 109876543210 98765 43210
7869 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7870 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7871 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7872 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7873 Fields sf Rn Rd are already set. */
7874 insn = get_aarch64_insn (buf);
7875 if (value < 0)
7876 {
7877 /* Add <-> sub. */
7878 insn = reencode_addsub_switch_add_sub (insn);
7879 value = -value;
7880 }
7881
7882 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7883 && unsigned_overflow (value, 12))
7884 {
7885 /* Try to shift the value by 12 to make it fit. */
7886 if (((value >> 12) << 12) == value
7887 && ! unsigned_overflow (value, 12 + 12))
7888 {
7889 value >>= 12;
7890 insn |= encode_addsub_imm_shift_amount (1);
7891 }
7892 }
7893
7894 if (unsigned_overflow (value, 12))
7895 as_bad_where (fixP->fx_file, fixP->fx_line,
7896 _("immediate out of range"));
7897
7898 insn |= encode_addsub_imm (value);
7899
7900 put_aarch64_insn (buf, insn);
7901 break;
7902
7903 case AARCH64_OPND_SIMD_IMM:
7904 case AARCH64_OPND_SIMD_IMM_SFT:
7905 case AARCH64_OPND_LIMM:
7906 /* Bit mask immediate. */
7907 gas_assert (new_inst != NULL);
7908 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7909 new_inst->operands[idx].imm.value = value;
7910 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7911 &new_inst->value, NULL, NULL, insn_sequence))
7912 put_aarch64_insn (buf, new_inst->value);
7913 else
7914 as_bad_where (fixP->fx_file, fixP->fx_line,
7915 _("invalid immediate"));
7916 break;
7917
7918 case AARCH64_OPND_HALF:
7919 /* 16-bit unsigned immediate. */
7920 if (unsigned_overflow (value, 16))
7921 as_bad_where (fixP->fx_file, fixP->fx_line,
7922 _("immediate out of range"));
7923 insn = get_aarch64_insn (buf);
7924 insn |= encode_movw_imm (value & 0xffff);
7925 put_aarch64_insn (buf, insn);
7926 break;
7927
7928 case AARCH64_OPND_IMM_MOV:
7929 /* Operand for a generic move immediate instruction, which is
7930 an alias instruction that generates a single MOVZ, MOVN or ORR
7931 instruction to loads a 32-bit/64-bit immediate value into general
7932 register. An assembler error shall result if the immediate cannot be
7933 created by a single one of these instructions. If there is a choice,
7934 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7935 and MOVZ or MOVN to ORR. */
7936 gas_assert (new_inst != NULL);
7937 fix_mov_imm_insn (fixP, buf, new_inst, value);
7938 break;
7939
7940 case AARCH64_OPND_ADDR_SIMM7:
7941 case AARCH64_OPND_ADDR_SIMM9:
7942 case AARCH64_OPND_ADDR_SIMM9_2:
7943 case AARCH64_OPND_ADDR_SIMM10:
7944 case AARCH64_OPND_ADDR_UIMM12:
7945 case AARCH64_OPND_ADDR_SIMM11:
7946 case AARCH64_OPND_ADDR_SIMM13:
7947 /* Immediate offset in an address. */
7948 insn = get_aarch64_insn (buf);
7949
7950 gas_assert (new_inst != NULL && new_inst->value == insn);
7951 gas_assert (new_inst->opcode->operands[1] == opnd
7952 || new_inst->opcode->operands[2] == opnd);
7953
7954 /* Get the index of the address operand. */
7955 if (new_inst->opcode->operands[1] == opnd)
7956 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7957 idx = 1;
7958 else
7959 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7960 idx = 2;
7961
7962 /* Update the resolved offset value. */
7963 new_inst->operands[idx].addr.offset.imm = value;
7964
7965 /* Encode/fix-up. */
7966 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7967 &new_inst->value, NULL, NULL, insn_sequence))
7968 {
7969 put_aarch64_insn (buf, new_inst->value);
7970 break;
7971 }
7972 else if (new_inst->opcode->iclass == ldst_pos
7973 && try_to_encode_as_unscaled_ldst (new_inst))
7974 {
7975 put_aarch64_insn (buf, new_inst->value);
7976 break;
7977 }
7978
7979 as_bad_where (fixP->fx_file, fixP->fx_line,
7980 _("immediate offset out of range"));
7981 break;
7982
7983 default:
7984 gas_assert (0);
7985 as_fatal (_("unhandled operand code %d"), opnd);
7986 }
7987 }
7988
7989 /* Apply a fixup (fixP) to segment data, once it has been determined
7990 by our caller that we have all the info we need to fix it up.
7991
7992 Parameter valP is the pointer to the value of the bits. */
7993
7994 void
7995 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7996 {
7997 offsetT value = *valP;
7998 uint32_t insn;
7999 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8000 int scale;
8001 unsigned flags = fixP->fx_addnumber;
8002
8003 DEBUG_TRACE ("\n\n");
8004 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8005 DEBUG_TRACE ("Enter md_apply_fix");
8006
8007 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8008
8009 /* Note whether this will delete the relocation. */
8010
8011 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
8012 fixP->fx_done = 1;
8013
8014 /* Process the relocations. */
8015 switch (fixP->fx_r_type)
8016 {
8017 case BFD_RELOC_NONE:
8018 /* This will need to go in the object file. */
8019 fixP->fx_done = 0;
8020 break;
8021
8022 case BFD_RELOC_8:
8023 case BFD_RELOC_8_PCREL:
8024 if (fixP->fx_done || !seg->use_rela_p)
8025 md_number_to_chars (buf, value, 1);
8026 break;
8027
8028 case BFD_RELOC_16:
8029 case BFD_RELOC_16_PCREL:
8030 if (fixP->fx_done || !seg->use_rela_p)
8031 md_number_to_chars (buf, value, 2);
8032 break;
8033
8034 case BFD_RELOC_32:
8035 case BFD_RELOC_32_PCREL:
8036 if (fixP->fx_done || !seg->use_rela_p)
8037 md_number_to_chars (buf, value, 4);
8038 break;
8039
8040 case BFD_RELOC_64:
8041 case BFD_RELOC_64_PCREL:
8042 if (fixP->fx_done || !seg->use_rela_p)
8043 md_number_to_chars (buf, value, 8);
8044 break;
8045
8046 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8047 /* We claim that these fixups have been processed here, even if
8048 in fact we generate an error because we do not have a reloc
8049 for them, so tc_gen_reloc() will reject them. */
8050 fixP->fx_done = 1;
8051 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8052 {
8053 as_bad_where (fixP->fx_file, fixP->fx_line,
8054 _("undefined symbol %s used as an immediate value"),
8055 S_GET_NAME (fixP->fx_addsy));
8056 goto apply_fix_return;
8057 }
8058 fix_insn (fixP, flags, value);
8059 break;
8060
8061 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8062 if (fixP->fx_done || !seg->use_rela_p)
8063 {
8064 if (value & 3)
8065 as_bad_where (fixP->fx_file, fixP->fx_line,
8066 _("pc-relative load offset not word aligned"));
8067 if (signed_overflow (value, 21))
8068 as_bad_where (fixP->fx_file, fixP->fx_line,
8069 _("pc-relative load offset out of range"));
8070 insn = get_aarch64_insn (buf);
8071 insn |= encode_ld_lit_ofs_19 (value >> 2);
8072 put_aarch64_insn (buf, insn);
8073 }
8074 break;
8075
8076 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8077 if (fixP->fx_done || !seg->use_rela_p)
8078 {
8079 if (signed_overflow (value, 21))
8080 as_bad_where (fixP->fx_file, fixP->fx_line,
8081 _("pc-relative address offset out of range"));
8082 insn = get_aarch64_insn (buf);
8083 insn |= encode_adr_imm (value);
8084 put_aarch64_insn (buf, insn);
8085 }
8086 break;
8087
8088 case BFD_RELOC_AARCH64_BRANCH19:
8089 if (fixP->fx_done || !seg->use_rela_p)
8090 {
8091 if (value & 3)
8092 as_bad_where (fixP->fx_file, fixP->fx_line,
8093 _("conditional branch target not word aligned"));
8094 if (signed_overflow (value, 21))
8095 as_bad_where (fixP->fx_file, fixP->fx_line,
8096 _("conditional branch out of range"));
8097 insn = get_aarch64_insn (buf);
8098 insn |= encode_cond_branch_ofs_19 (value >> 2);
8099 put_aarch64_insn (buf, insn);
8100 }
8101 break;
8102
8103 case BFD_RELOC_AARCH64_TSTBR14:
8104 if (fixP->fx_done || !seg->use_rela_p)
8105 {
8106 if (value & 3)
8107 as_bad_where (fixP->fx_file, fixP->fx_line,
8108 _("conditional branch target not word aligned"));
8109 if (signed_overflow (value, 16))
8110 as_bad_where (fixP->fx_file, fixP->fx_line,
8111 _("conditional branch out of range"));
8112 insn = get_aarch64_insn (buf);
8113 insn |= encode_tst_branch_ofs_14 (value >> 2);
8114 put_aarch64_insn (buf, insn);
8115 }
8116 break;
8117
8118 case BFD_RELOC_AARCH64_CALL26:
8119 case BFD_RELOC_AARCH64_JUMP26:
8120 if (fixP->fx_done || !seg->use_rela_p)
8121 {
8122 if (value & 3)
8123 as_bad_where (fixP->fx_file, fixP->fx_line,
8124 _("branch target not word aligned"));
8125 if (signed_overflow (value, 28))
8126 as_bad_where (fixP->fx_file, fixP->fx_line,
8127 _("branch out of range"));
8128 insn = get_aarch64_insn (buf);
8129 insn |= encode_branch_ofs_26 (value >> 2);
8130 put_aarch64_insn (buf, insn);
8131 }
8132 break;
8133
8134 case BFD_RELOC_AARCH64_MOVW_G0:
8135 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8136 case BFD_RELOC_AARCH64_MOVW_G0_S:
8137 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8138 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8139 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8140 scale = 0;
8141 goto movw_common;
8142 case BFD_RELOC_AARCH64_MOVW_G1:
8143 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8144 case BFD_RELOC_AARCH64_MOVW_G1_S:
8145 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8146 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8147 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8148 scale = 16;
8149 goto movw_common;
8150 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8151 scale = 0;
8152 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8153 /* Should always be exported to object file, see
8154 aarch64_force_relocation(). */
8155 gas_assert (!fixP->fx_done);
8156 gas_assert (seg->use_rela_p);
8157 goto movw_common;
8158 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8159 scale = 16;
8160 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8161 /* Should always be exported to object file, see
8162 aarch64_force_relocation(). */
8163 gas_assert (!fixP->fx_done);
8164 gas_assert (seg->use_rela_p);
8165 goto movw_common;
8166 case BFD_RELOC_AARCH64_MOVW_G2:
8167 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8168 case BFD_RELOC_AARCH64_MOVW_G2_S:
8169 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8170 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8171 scale = 32;
8172 goto movw_common;
8173 case BFD_RELOC_AARCH64_MOVW_G3:
8174 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8175 scale = 48;
8176 movw_common:
8177 if (fixP->fx_done || !seg->use_rela_p)
8178 {
8179 insn = get_aarch64_insn (buf);
8180
8181 if (!fixP->fx_done)
8182 {
8183 /* REL signed addend must fit in 16 bits */
8184 if (signed_overflow (value, 16))
8185 as_bad_where (fixP->fx_file, fixP->fx_line,
8186 _("offset out of range"));
8187 }
8188 else
8189 {
8190 /* Check for overflow and scale. */
8191 switch (fixP->fx_r_type)
8192 {
8193 case BFD_RELOC_AARCH64_MOVW_G0:
8194 case BFD_RELOC_AARCH64_MOVW_G1:
8195 case BFD_RELOC_AARCH64_MOVW_G2:
8196 case BFD_RELOC_AARCH64_MOVW_G3:
8197 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8198 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8199 if (unsigned_overflow (value, scale + 16))
8200 as_bad_where (fixP->fx_file, fixP->fx_line,
8201 _("unsigned value out of range"));
8202 break;
8203 case BFD_RELOC_AARCH64_MOVW_G0_S:
8204 case BFD_RELOC_AARCH64_MOVW_G1_S:
8205 case BFD_RELOC_AARCH64_MOVW_G2_S:
8206 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8207 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8208 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8209 /* NOTE: We can only come here with movz or movn. */
8210 if (signed_overflow (value, scale + 16))
8211 as_bad_where (fixP->fx_file, fixP->fx_line,
8212 _("signed value out of range"));
8213 if (value < 0)
8214 {
8215 /* Force use of MOVN. */
8216 value = ~value;
8217 insn = reencode_movzn_to_movn (insn);
8218 }
8219 else
8220 {
8221 /* Force use of MOVZ. */
8222 insn = reencode_movzn_to_movz (insn);
8223 }
8224 break;
8225 default:
8226 /* Unchecked relocations. */
8227 break;
8228 }
8229 value >>= scale;
8230 }
8231
8232 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8233 insn |= encode_movw_imm (value & 0xffff);
8234
8235 put_aarch64_insn (buf, insn);
8236 }
8237 break;
8238
8239 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8240 fixP->fx_r_type = (ilp32_p
8241 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8242 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8243 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8244 /* Should always be exported to object file, see
8245 aarch64_force_relocation(). */
8246 gas_assert (!fixP->fx_done);
8247 gas_assert (seg->use_rela_p);
8248 break;
8249
8250 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8251 fixP->fx_r_type = (ilp32_p
8252 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8253 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8254 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8255 /* Should always be exported to object file, see
8256 aarch64_force_relocation(). */
8257 gas_assert (!fixP->fx_done);
8258 gas_assert (seg->use_rela_p);
8259 break;
8260
8261 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8262 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8263 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8264 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8265 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8266 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8267 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8268 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8269 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8270 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8271 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8272 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8273 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8274 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8275 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8276 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8277 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8278 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8279 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8280 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8281 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8282 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8283 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8284 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8285 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8286 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8287 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8288 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8289 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8290 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8291 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8292 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8293 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8294 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8295 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8296 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8297 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8298 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8299 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8300 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8301 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8302 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8303 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8304 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8305 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8306 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8307 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8308 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8309 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8310 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8311 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8312 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8313 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8314 /* Should always be exported to object file, see
8315 aarch64_force_relocation(). */
8316 gas_assert (!fixP->fx_done);
8317 gas_assert (seg->use_rela_p);
8318 break;
8319
8320 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8321 /* Should always be exported to object file, see
8322 aarch64_force_relocation(). */
8323 fixP->fx_r_type = (ilp32_p
8324 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8325 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8326 gas_assert (!fixP->fx_done);
8327 gas_assert (seg->use_rela_p);
8328 break;
8329
8330 case BFD_RELOC_AARCH64_ADD_LO12:
8331 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8332 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8333 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8334 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8335 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8336 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8337 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8338 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8339 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8340 case BFD_RELOC_AARCH64_LDST128_LO12:
8341 case BFD_RELOC_AARCH64_LDST16_LO12:
8342 case BFD_RELOC_AARCH64_LDST32_LO12:
8343 case BFD_RELOC_AARCH64_LDST64_LO12:
8344 case BFD_RELOC_AARCH64_LDST8_LO12:
8345 /* Should always be exported to object file, see
8346 aarch64_force_relocation(). */
8347 gas_assert (!fixP->fx_done);
8348 gas_assert (seg->use_rela_p);
8349 break;
8350
8351 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8352 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8353 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8354 break;
8355
8356 case BFD_RELOC_UNUSED:
8357 /* An error will already have been reported. */
8358 break;
8359
8360 default:
8361 as_bad_where (fixP->fx_file, fixP->fx_line,
8362 _("unexpected %s fixup"),
8363 bfd_get_reloc_code_name (fixP->fx_r_type));
8364 break;
8365 }
8366
8367 apply_fix_return:
8368 /* Free the allocated the struct aarch64_inst.
8369 N.B. currently there are very limited number of fix-up types actually use
8370 this field, so the impact on the performance should be minimal . */
8371 free (fixP->tc_fix_data.inst);
8372
8373 return;
8374 }
8375
8376 /* Translate internal representation of relocation info to BFD target
8377 format. */
8378
8379 arelent *
8380 tc_gen_reloc (asection * section, fixS * fixp)
8381 {
8382 arelent *reloc;
8383 bfd_reloc_code_real_type code;
8384
8385 reloc = XNEW (arelent);
8386
8387 reloc->sym_ptr_ptr = XNEW (asymbol *);
8388 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8389 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8390
8391 if (fixp->fx_pcrel)
8392 {
8393 if (section->use_rela_p)
8394 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8395 else
8396 fixp->fx_offset = reloc->address;
8397 }
8398 reloc->addend = fixp->fx_offset;
8399
8400 code = fixp->fx_r_type;
8401 switch (code)
8402 {
8403 case BFD_RELOC_16:
8404 if (fixp->fx_pcrel)
8405 code = BFD_RELOC_16_PCREL;
8406 break;
8407
8408 case BFD_RELOC_32:
8409 if (fixp->fx_pcrel)
8410 code = BFD_RELOC_32_PCREL;
8411 break;
8412
8413 case BFD_RELOC_64:
8414 if (fixp->fx_pcrel)
8415 code = BFD_RELOC_64_PCREL;
8416 break;
8417
8418 default:
8419 break;
8420 }
8421
8422 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8423 if (reloc->howto == NULL)
8424 {
8425 as_bad_where (fixp->fx_file, fixp->fx_line,
8426 _
8427 ("cannot represent %s relocation in this object file format"),
8428 bfd_get_reloc_code_name (code));
8429 return NULL;
8430 }
8431
8432 return reloc;
8433 }
8434
8435 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8436
8437 void
8438 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8439 {
8440 bfd_reloc_code_real_type type;
8441 int pcrel = 0;
8442
8443 /* Pick a reloc.
8444 FIXME: @@ Should look at CPU word size. */
8445 switch (size)
8446 {
8447 case 1:
8448 type = BFD_RELOC_8;
8449 break;
8450 case 2:
8451 type = BFD_RELOC_16;
8452 break;
8453 case 4:
8454 type = BFD_RELOC_32;
8455 break;
8456 case 8:
8457 type = BFD_RELOC_64;
8458 break;
8459 default:
8460 as_bad (_("cannot do %u-byte relocation"), size);
8461 type = BFD_RELOC_UNUSED;
8462 break;
8463 }
8464
8465 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8466 }
8467
8468 int
8469 aarch64_force_relocation (struct fix *fixp)
8470 {
8471 switch (fixp->fx_r_type)
8472 {
8473 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8474 /* Perform these "immediate" internal relocations
8475 even if the symbol is extern or weak. */
8476 return 0;
8477
8478 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8479 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8480 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8481 /* Pseudo relocs that need to be fixed up according to
8482 ilp32_p. */
8483 return 0;
8484
8485 case BFD_RELOC_AARCH64_ADD_LO12:
8486 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8487 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8488 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8489 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8490 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8491 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8492 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8493 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8494 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8495 case BFD_RELOC_AARCH64_LDST128_LO12:
8496 case BFD_RELOC_AARCH64_LDST16_LO12:
8497 case BFD_RELOC_AARCH64_LDST32_LO12:
8498 case BFD_RELOC_AARCH64_LDST64_LO12:
8499 case BFD_RELOC_AARCH64_LDST8_LO12:
8500 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8501 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8502 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8503 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8504 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8505 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8506 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8507 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8508 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8509 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8510 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8511 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8512 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8513 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8514 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8515 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8516 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8517 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8518 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8519 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8520 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8521 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8522 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8523 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8524 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8525 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8526 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8527 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8528 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8529 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8530 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8531 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8532 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8533 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8534 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8535 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8536 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8537 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8538 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8539 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8540 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8541 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8542 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8543 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8544 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8545 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8546 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8547 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8548 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8549 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8550 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8551 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8552 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8553 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8554 /* Always leave these relocations for the linker. */
8555 return 1;
8556
8557 default:
8558 break;
8559 }
8560
8561 return generic_force_reloc (fixp);
8562 }
8563
8564 #ifdef OBJ_ELF
8565
8566 /* Implement md_after_parse_args. This is the earliest time we need to decide
8567 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8568
8569 void
8570 aarch64_after_parse_args (void)
8571 {
8572 if (aarch64_abi != AARCH64_ABI_NONE)
8573 return;
8574
8575 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8576 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8577 aarch64_abi = AARCH64_ABI_ILP32;
8578 else
8579 aarch64_abi = AARCH64_ABI_LP64;
8580 }
8581
8582 const char *
8583 elf64_aarch64_target_format (void)
8584 {
8585 #ifdef TE_CLOUDABI
8586 /* FIXME: What to do for ilp32_p ? */
8587 if (target_big_endian)
8588 return "elf64-bigaarch64-cloudabi";
8589 else
8590 return "elf64-littleaarch64-cloudabi";
8591 #else
8592 if (target_big_endian)
8593 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8594 else
8595 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8596 #endif
8597 }
8598
8599 void
8600 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8601 {
8602 elf_frob_symbol (symp, puntp);
8603 }
8604 #endif
8605
8606 /* MD interface: Finalization. */
8607
8608 /* A good place to do this, although this was probably not intended
8609 for this kind of use. We need to dump the literal pool before
8610 references are made to a null symbol pointer. */
8611
8612 void
8613 aarch64_cleanup (void)
8614 {
8615 literal_pool *pool;
8616
8617 for (pool = list_of_pools; pool; pool = pool->next)
8618 {
8619 /* Put it at the end of the relevant section. */
8620 subseg_set (pool->section, pool->sub_section);
8621 s_ltorg (0);
8622 }
8623 }
8624
8625 #ifdef OBJ_ELF
8626 /* Remove any excess mapping symbols generated for alignment frags in
8627 SEC. We may have created a mapping symbol before a zero byte
8628 alignment; remove it if there's a mapping symbol after the
8629 alignment. */
8630 static void
8631 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8632 void *dummy ATTRIBUTE_UNUSED)
8633 {
8634 segment_info_type *seginfo = seg_info (sec);
8635 fragS *fragp;
8636
8637 if (seginfo == NULL || seginfo->frchainP == NULL)
8638 return;
8639
8640 for (fragp = seginfo->frchainP->frch_root;
8641 fragp != NULL; fragp = fragp->fr_next)
8642 {
8643 symbolS *sym = fragp->tc_frag_data.last_map;
8644 fragS *next = fragp->fr_next;
8645
8646 /* Variable-sized frags have been converted to fixed size by
8647 this point. But if this was variable-sized to start with,
8648 there will be a fixed-size frag after it. So don't handle
8649 next == NULL. */
8650 if (sym == NULL || next == NULL)
8651 continue;
8652
8653 if (S_GET_VALUE (sym) < next->fr_address)
8654 /* Not at the end of this frag. */
8655 continue;
8656 know (S_GET_VALUE (sym) == next->fr_address);
8657
8658 do
8659 {
8660 if (next->tc_frag_data.first_map != NULL)
8661 {
8662 /* Next frag starts with a mapping symbol. Discard this
8663 one. */
8664 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8665 break;
8666 }
8667
8668 if (next->fr_next == NULL)
8669 {
8670 /* This mapping symbol is at the end of the section. Discard
8671 it. */
8672 know (next->fr_fix == 0 && next->fr_var == 0);
8673 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8674 break;
8675 }
8676
8677 /* As long as we have empty frags without any mapping symbols,
8678 keep looking. */
8679 /* If the next frag is non-empty and does not start with a
8680 mapping symbol, then this mapping symbol is required. */
8681 if (next->fr_address != next->fr_next->fr_address)
8682 break;
8683
8684 next = next->fr_next;
8685 }
8686 while (next != NULL);
8687 }
8688 }
8689 #endif
8690
8691 /* Adjust the symbol table. */
8692
8693 void
8694 aarch64_adjust_symtab (void)
8695 {
8696 #ifdef OBJ_ELF
8697 /* Remove any overlapping mapping symbols generated by alignment frags. */
8698 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8699 /* Now do generic ELF adjustments. */
8700 elf_adjust_symtab ();
8701 #endif
8702 }
8703
8704 static void
8705 checked_hash_insert (htab_t table, const char *key, void *value)
8706 {
8707 str_hash_insert (table, key, value, 0);
8708 }
8709
8710 static void
8711 sysreg_hash_insert (htab_t table, const char *key, void *value)
8712 {
8713 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
8714 checked_hash_insert (table, key, value);
8715 }
8716
8717 static void
8718 fill_instruction_hash_table (void)
8719 {
8720 aarch64_opcode *opcode = aarch64_opcode_table;
8721
8722 while (opcode->name != NULL)
8723 {
8724 templates *templ, *new_templ;
8725 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
8726
8727 new_templ = XNEW (templates);
8728 new_templ->opcode = opcode;
8729 new_templ->next = NULL;
8730
8731 if (!templ)
8732 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8733 else
8734 {
8735 new_templ->next = templ->next;
8736 templ->next = new_templ;
8737 }
8738 ++opcode;
8739 }
8740 }
8741
8742 static inline void
8743 convert_to_upper (char *dst, const char *src, size_t num)
8744 {
8745 unsigned int i;
8746 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8747 *dst = TOUPPER (*src);
8748 *dst = '\0';
8749 }
8750
8751 /* Assume STR point to a lower-case string, allocate, convert and return
8752 the corresponding upper-case string. */
8753 static inline const char*
8754 get_upper_str (const char *str)
8755 {
8756 char *ret;
8757 size_t len = strlen (str);
8758 ret = XNEWVEC (char, len + 1);
8759 convert_to_upper (ret, str, len);
8760 return ret;
8761 }
8762
8763 /* MD interface: Initialization. */
8764
8765 void
8766 md_begin (void)
8767 {
8768 unsigned mach;
8769 unsigned int i;
8770
8771 aarch64_ops_hsh = str_htab_create ();
8772 aarch64_cond_hsh = str_htab_create ();
8773 aarch64_shift_hsh = str_htab_create ();
8774 aarch64_sys_regs_hsh = str_htab_create ();
8775 aarch64_pstatefield_hsh = str_htab_create ();
8776 aarch64_sys_regs_ic_hsh = str_htab_create ();
8777 aarch64_sys_regs_dc_hsh = str_htab_create ();
8778 aarch64_sys_regs_at_hsh = str_htab_create ();
8779 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
8780 aarch64_sys_regs_sr_hsh = str_htab_create ();
8781 aarch64_reg_hsh = str_htab_create ();
8782 aarch64_barrier_opt_hsh = str_htab_create ();
8783 aarch64_nzcv_hsh = str_htab_create ();
8784 aarch64_pldop_hsh = str_htab_create ();
8785 aarch64_hint_opt_hsh = str_htab_create ();
8786
8787 fill_instruction_hash_table ();
8788
8789 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8790 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8791 (void *) (aarch64_sys_regs + i));
8792
8793 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8794 sysreg_hash_insert (aarch64_pstatefield_hsh,
8795 aarch64_pstatefields[i].name,
8796 (void *) (aarch64_pstatefields + i));
8797
8798 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8799 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
8800 aarch64_sys_regs_ic[i].name,
8801 (void *) (aarch64_sys_regs_ic + i));
8802
8803 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8804 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
8805 aarch64_sys_regs_dc[i].name,
8806 (void *) (aarch64_sys_regs_dc + i));
8807
8808 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8809 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
8810 aarch64_sys_regs_at[i].name,
8811 (void *) (aarch64_sys_regs_at + i));
8812
8813 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8814 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
8815 aarch64_sys_regs_tlbi[i].name,
8816 (void *) (aarch64_sys_regs_tlbi + i));
8817
8818 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8819 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
8820 aarch64_sys_regs_sr[i].name,
8821 (void *) (aarch64_sys_regs_sr + i));
8822
8823 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8824 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8825 (void *) (reg_names + i));
8826
8827 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8828 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8829 (void *) (nzcv_names + i));
8830
8831 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8832 {
8833 const char *name = aarch64_operand_modifiers[i].name;
8834 checked_hash_insert (aarch64_shift_hsh, name,
8835 (void *) (aarch64_operand_modifiers + i));
8836 /* Also hash the name in the upper case. */
8837 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8838 (void *) (aarch64_operand_modifiers + i));
8839 }
8840
8841 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8842 {
8843 unsigned int j;
8844 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8845 the same condition code. */
8846 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8847 {
8848 const char *name = aarch64_conds[i].names[j];
8849 if (name == NULL)
8850 break;
8851 checked_hash_insert (aarch64_cond_hsh, name,
8852 (void *) (aarch64_conds + i));
8853 /* Also hash the name in the upper case. */
8854 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8855 (void *) (aarch64_conds + i));
8856 }
8857 }
8858
8859 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8860 {
8861 const char *name = aarch64_barrier_options[i].name;
8862 /* Skip xx00 - the unallocated values of option. */
8863 if ((i & 0x3) == 0)
8864 continue;
8865 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8866 (void *) (aarch64_barrier_options + i));
8867 /* Also hash the name in the upper case. */
8868 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8869 (void *) (aarch64_barrier_options + i));
8870 }
8871
8872 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
8873 {
8874 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
8875 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8876 (void *) (aarch64_barrier_dsb_nxs_options + i));
8877 /* Also hash the name in the upper case. */
8878 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8879 (void *) (aarch64_barrier_dsb_nxs_options + i));
8880 }
8881
8882 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8883 {
8884 const char* name = aarch64_prfops[i].name;
8885 /* Skip the unallocated hint encodings. */
8886 if (name == NULL)
8887 continue;
8888 checked_hash_insert (aarch64_pldop_hsh, name,
8889 (void *) (aarch64_prfops + i));
8890 /* Also hash the name in the upper case. */
8891 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8892 (void *) (aarch64_prfops + i));
8893 }
8894
8895 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8896 {
8897 const char* name = aarch64_hint_options[i].name;
8898 const char* upper_name = get_upper_str(name);
8899
8900 checked_hash_insert (aarch64_hint_opt_hsh, name,
8901 (void *) (aarch64_hint_options + i));
8902
8903 /* Also hash the name in the upper case if not the same. */
8904 if (strcmp (name, upper_name) != 0)
8905 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
8906 (void *) (aarch64_hint_options + i));
8907 }
8908
8909 /* Set the cpu variant based on the command-line options. */
8910 if (!mcpu_cpu_opt)
8911 mcpu_cpu_opt = march_cpu_opt;
8912
8913 if (!mcpu_cpu_opt)
8914 mcpu_cpu_opt = &cpu_default;
8915
8916 cpu_variant = *mcpu_cpu_opt;
8917
8918 /* Record the CPU type. */
8919 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8920
8921 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8922 }
8923
8924 /* Command line processing. */
8925
8926 const char *md_shortopts = "m:";
8927
8928 #ifdef AARCH64_BI_ENDIAN
8929 #define OPTION_EB (OPTION_MD_BASE + 0)
8930 #define OPTION_EL (OPTION_MD_BASE + 1)
8931 #else
8932 #if TARGET_BYTES_BIG_ENDIAN
8933 #define OPTION_EB (OPTION_MD_BASE + 0)
8934 #else
8935 #define OPTION_EL (OPTION_MD_BASE + 1)
8936 #endif
8937 #endif
8938
8939 struct option md_longopts[] = {
8940 #ifdef OPTION_EB
8941 {"EB", no_argument, NULL, OPTION_EB},
8942 #endif
8943 #ifdef OPTION_EL
8944 {"EL", no_argument, NULL, OPTION_EL},
8945 #endif
8946 {NULL, no_argument, NULL, 0}
8947 };
8948
8949 size_t md_longopts_size = sizeof (md_longopts);
8950
8951 struct aarch64_option_table
8952 {
8953 const char *option; /* Option name to match. */
8954 const char *help; /* Help information. */
8955 int *var; /* Variable to change. */
8956 int value; /* What to change it to. */
8957 char *deprecated; /* If non-null, print this message. */
8958 };
8959
8960 static struct aarch64_option_table aarch64_opts[] = {
8961 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8962 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8963 NULL},
8964 #ifdef DEBUG_AARCH64
8965 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8966 #endif /* DEBUG_AARCH64 */
8967 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8968 NULL},
8969 {"mno-verbose-error", N_("do not output verbose error messages"),
8970 &verbose_error_p, 0, NULL},
8971 {NULL, NULL, NULL, 0, NULL}
8972 };
8973
8974 struct aarch64_cpu_option_table
8975 {
8976 const char *name;
8977 const aarch64_feature_set value;
8978 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8979 case. */
8980 const char *canonical_name;
8981 };
8982
8983 /* This list should, at a minimum, contain all the cpu names
8984 recognized by GCC. */
8985 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8986 {"all", AARCH64_ANY, NULL},
8987 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
8988 AARCH64_FEATURE_CRC), "Cortex-A34"},
8989 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8990 AARCH64_FEATURE_CRC), "Cortex-A35"},
8991 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8992 AARCH64_FEATURE_CRC), "Cortex-A53"},
8993 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8994 AARCH64_FEATURE_CRC), "Cortex-A57"},
8995 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8996 AARCH64_FEATURE_CRC), "Cortex-A72"},
8997 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8998 AARCH64_FEATURE_CRC), "Cortex-A73"},
8999 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9000 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9001 "Cortex-A55"},
9002 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9003 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9004 "Cortex-A75"},
9005 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9006 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9007 "Cortex-A76"},
9008 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9009 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9010 | AARCH64_FEATURE_DOTPROD
9011 | AARCH64_FEATURE_SSBS),
9012 "Cortex-A76AE"},
9013 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9014 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9015 | AARCH64_FEATURE_DOTPROD
9016 | AARCH64_FEATURE_SSBS),
9017 "Cortex-A77"},
9018 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9019 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9020 | AARCH64_FEATURE_DOTPROD
9021 | AARCH64_FEATURE_SSBS),
9022 "Cortex-A65"},
9023 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9024 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9025 | AARCH64_FEATURE_DOTPROD
9026 | AARCH64_FEATURE_SSBS),
9027 "Cortex-A65AE"},
9028 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9029 AARCH64_FEATURE_F16
9030 | AARCH64_FEATURE_RCPC
9031 | AARCH64_FEATURE_DOTPROD
9032 | AARCH64_FEATURE_SSBS
9033 | AARCH64_FEATURE_PROFILE),
9034 "Cortex-A78"},
9035 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9036 AARCH64_FEATURE_F16
9037 | AARCH64_FEATURE_RCPC
9038 | AARCH64_FEATURE_DOTPROD
9039 | AARCH64_FEATURE_SSBS
9040 | AARCH64_FEATURE_PROFILE),
9041 "Cortex-A78AE"},
9042 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9043 AARCH64_FEATURE_DOTPROD
9044 | AARCH64_FEATURE_F16
9045 | AARCH64_FEATURE_FLAGM
9046 | AARCH64_FEATURE_PAC
9047 | AARCH64_FEATURE_PROFILE
9048 | AARCH64_FEATURE_RCPC
9049 | AARCH64_FEATURE_SSBS),
9050 "Cortex-A78C"},
9051 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9052 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9053 | AARCH64_FEATURE_DOTPROD
9054 | AARCH64_FEATURE_PROFILE),
9055 "Ares"},
9056 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9057 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9058 "Samsung Exynos M1"},
9059 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9060 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9061 | AARCH64_FEATURE_RDMA),
9062 "Qualcomm Falkor"},
9063 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9064 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9065 | AARCH64_FEATURE_DOTPROD
9066 | AARCH64_FEATURE_SSBS),
9067 "Neoverse E1"},
9068 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9069 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9070 | AARCH64_FEATURE_DOTPROD
9071 | AARCH64_FEATURE_PROFILE),
9072 "Neoverse N1"},
9073 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9074 AARCH64_FEATURE_BFLOAT16
9075 | AARCH64_FEATURE_I8MM
9076 | AARCH64_FEATURE_F16
9077 | AARCH64_FEATURE_SVE
9078 | AARCH64_FEATURE_SVE2
9079 | AARCH64_FEATURE_SVE2_BITPERM
9080 | AARCH64_FEATURE_MEMTAG
9081 | AARCH64_FEATURE_RNG),
9082 "Neoverse N2"},
9083 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9084 AARCH64_FEATURE_PROFILE
9085 | AARCH64_FEATURE_CVADP
9086 | AARCH64_FEATURE_SVE
9087 | AARCH64_FEATURE_SSBS
9088 | AARCH64_FEATURE_RNG
9089 | AARCH64_FEATURE_F16
9090 | AARCH64_FEATURE_BFLOAT16
9091 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9092 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9093 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9094 | AARCH64_FEATURE_RDMA),
9095 "Qualcomm QDF24XX"},
9096 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9097 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9098 "Qualcomm Saphira"},
9099 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9100 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9101 "Cavium ThunderX"},
9102 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9103 AARCH64_FEATURE_CRYPTO),
9104 "Broadcom Vulcan"},
9105 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9106 in earlier releases and is superseded by 'xgene1' in all
9107 tools. */
9108 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9109 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9110 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9111 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9112 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9113 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9114 AARCH64_FEATURE_F16
9115 | AARCH64_FEATURE_RCPC
9116 | AARCH64_FEATURE_DOTPROD
9117 | AARCH64_FEATURE_SSBS
9118 | AARCH64_FEATURE_PROFILE),
9119 "Cortex-X1"},
9120 {"generic", AARCH64_ARCH_V8, NULL},
9121
9122 {NULL, AARCH64_ARCH_NONE, NULL}
9123 };
9124
9125 struct aarch64_arch_option_table
9126 {
9127 const char *name;
9128 const aarch64_feature_set value;
9129 };
9130
9131 /* This list should, at a minimum, contain all the architecture names
9132 recognized by GCC. */
9133 static const struct aarch64_arch_option_table aarch64_archs[] = {
9134 {"all", AARCH64_ANY},
9135 {"armv8-a", AARCH64_ARCH_V8},
9136 {"armv8.1-a", AARCH64_ARCH_V8_1},
9137 {"armv8.2-a", AARCH64_ARCH_V8_2},
9138 {"armv8.3-a", AARCH64_ARCH_V8_3},
9139 {"armv8.4-a", AARCH64_ARCH_V8_4},
9140 {"armv8.5-a", AARCH64_ARCH_V8_5},
9141 {"armv8.6-a", AARCH64_ARCH_V8_6},
9142 {"armv8.7-a", AARCH64_ARCH_V8_7},
9143 {"armv8-r", AARCH64_ARCH_V8_R},
9144 {NULL, AARCH64_ARCH_NONE}
9145 };
9146
9147 /* ISA extensions. */
9148 struct aarch64_option_cpu_value_table
9149 {
9150 const char *name;
9151 const aarch64_feature_set value;
9152 const aarch64_feature_set require; /* Feature dependencies. */
9153 };
9154
9155 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9156 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9157 AARCH64_ARCH_NONE},
9158 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9159 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9160 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9161 AARCH64_ARCH_NONE},
9162 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9163 AARCH64_ARCH_NONE},
9164 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9165 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9166 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9167 AARCH64_ARCH_NONE},
9168 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9169 AARCH64_ARCH_NONE},
9170 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9171 AARCH64_ARCH_NONE},
9172 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9173 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9174 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9175 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9176 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9177 AARCH64_FEATURE (AARCH64_FEATURE_FP
9178 | AARCH64_FEATURE_F16, 0)},
9179 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9180 AARCH64_ARCH_NONE},
9181 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9182 AARCH64_FEATURE (AARCH64_FEATURE_F16
9183 | AARCH64_FEATURE_SIMD
9184 | AARCH64_FEATURE_COMPNUM, 0)},
9185 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9186 AARCH64_ARCH_NONE},
9187 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9188 AARCH64_FEATURE (AARCH64_FEATURE_F16
9189 | AARCH64_FEATURE_SIMD, 0)},
9190 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9191 AARCH64_ARCH_NONE},
9192 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9193 AARCH64_ARCH_NONE},
9194 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9195 AARCH64_ARCH_NONE},
9196 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9197 AARCH64_ARCH_NONE},
9198 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9199 AARCH64_ARCH_NONE},
9200 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9201 AARCH64_ARCH_NONE},
9202 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9203 AARCH64_ARCH_NONE},
9204 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9205 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9206 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9207 AARCH64_ARCH_NONE},
9208 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9209 AARCH64_ARCH_NONE},
9210 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9211 AARCH64_ARCH_NONE},
9212 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9213 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9214 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9215 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9216 | AARCH64_FEATURE_SM4, 0)},
9217 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9218 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9219 | AARCH64_FEATURE_AES, 0)},
9220 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9221 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9222 | AARCH64_FEATURE_SHA3, 0)},
9223 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9224 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9225 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9226 AARCH64_ARCH_NONE},
9227 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9228 AARCH64_ARCH_NONE},
9229 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9230 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9231 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9232 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9233 {"csre", AARCH64_FEATURE (AARCH64_FEATURE_CSRE, 0),
9234 AARCH64_ARCH_NONE},
9235 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9236 AARCH64_ARCH_NONE},
9237 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
9238 AARCH64_ARCH_NONE},
9239 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
9240 AARCH64_ARCH_NONE},
9241 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9242 };
9243
9244 struct aarch64_long_option_table
9245 {
9246 const char *option; /* Substring to match. */
9247 const char *help; /* Help information. */
9248 int (*func) (const char *subopt); /* Function to decode sub-option. */
9249 char *deprecated; /* If non-null, print this message. */
9250 };
9251
9252 /* Transitive closure of features depending on set. */
9253 static aarch64_feature_set
9254 aarch64_feature_disable_set (aarch64_feature_set set)
9255 {
9256 const struct aarch64_option_cpu_value_table *opt;
9257 aarch64_feature_set prev = 0;
9258
9259 while (prev != set) {
9260 prev = set;
9261 for (opt = aarch64_features; opt->name != NULL; opt++)
9262 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9263 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9264 }
9265 return set;
9266 }
9267
9268 /* Transitive closure of dependencies of set. */
9269 static aarch64_feature_set
9270 aarch64_feature_enable_set (aarch64_feature_set set)
9271 {
9272 const struct aarch64_option_cpu_value_table *opt;
9273 aarch64_feature_set prev = 0;
9274
9275 while (prev != set) {
9276 prev = set;
9277 for (opt = aarch64_features; opt->name != NULL; opt++)
9278 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9279 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9280 }
9281 return set;
9282 }
9283
9284 static int
9285 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9286 bfd_boolean ext_only)
9287 {
9288 /* We insist on extensions being added before being removed. We achieve
9289 this by using the ADDING_VALUE variable to indicate whether we are
9290 adding an extension (1) or removing it (0) and only allowing it to
9291 change in the order -1 -> 1 -> 0. */
9292 int adding_value = -1;
9293 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9294
9295 /* Copy the feature set, so that we can modify it. */
9296 *ext_set = **opt_p;
9297 *opt_p = ext_set;
9298
9299 while (str != NULL && *str != 0)
9300 {
9301 const struct aarch64_option_cpu_value_table *opt;
9302 const char *ext = NULL;
9303 int optlen;
9304
9305 if (!ext_only)
9306 {
9307 if (*str != '+')
9308 {
9309 as_bad (_("invalid architectural extension"));
9310 return 0;
9311 }
9312
9313 ext = strchr (++str, '+');
9314 }
9315
9316 if (ext != NULL)
9317 optlen = ext - str;
9318 else
9319 optlen = strlen (str);
9320
9321 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
9322 {
9323 if (adding_value != 0)
9324 adding_value = 0;
9325 optlen -= 2;
9326 str += 2;
9327 }
9328 else if (optlen > 0)
9329 {
9330 if (adding_value == -1)
9331 adding_value = 1;
9332 else if (adding_value != 1)
9333 {
9334 as_bad (_("must specify extensions to add before specifying "
9335 "those to remove"));
9336 return FALSE;
9337 }
9338 }
9339
9340 if (optlen == 0)
9341 {
9342 as_bad (_("missing architectural extension"));
9343 return 0;
9344 }
9345
9346 gas_assert (adding_value != -1);
9347
9348 for (opt = aarch64_features; opt->name != NULL; opt++)
9349 if (strncmp (opt->name, str, optlen) == 0)
9350 {
9351 aarch64_feature_set set;
9352
9353 /* Add or remove the extension. */
9354 if (adding_value)
9355 {
9356 set = aarch64_feature_enable_set (opt->value);
9357 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9358 }
9359 else
9360 {
9361 set = aarch64_feature_disable_set (opt->value);
9362 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9363 }
9364 break;
9365 }
9366
9367 if (opt->name == NULL)
9368 {
9369 as_bad (_("unknown architectural extension `%s'"), str);
9370 return 0;
9371 }
9372
9373 str = ext;
9374 };
9375
9376 return 1;
9377 }
9378
9379 static int
9380 aarch64_parse_cpu (const char *str)
9381 {
9382 const struct aarch64_cpu_option_table *opt;
9383 const char *ext = strchr (str, '+');
9384 size_t optlen;
9385
9386 if (ext != NULL)
9387 optlen = ext - str;
9388 else
9389 optlen = strlen (str);
9390
9391 if (optlen == 0)
9392 {
9393 as_bad (_("missing cpu name `%s'"), str);
9394 return 0;
9395 }
9396
9397 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9398 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9399 {
9400 mcpu_cpu_opt = &opt->value;
9401 if (ext != NULL)
9402 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
9403
9404 return 1;
9405 }
9406
9407 as_bad (_("unknown cpu `%s'"), str);
9408 return 0;
9409 }
9410
9411 static int
9412 aarch64_parse_arch (const char *str)
9413 {
9414 const struct aarch64_arch_option_table *opt;
9415 const char *ext = strchr (str, '+');
9416 size_t optlen;
9417
9418 if (ext != NULL)
9419 optlen = ext - str;
9420 else
9421 optlen = strlen (str);
9422
9423 if (optlen == 0)
9424 {
9425 as_bad (_("missing architecture name `%s'"), str);
9426 return 0;
9427 }
9428
9429 for (opt = aarch64_archs; opt->name != NULL; opt++)
9430 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9431 {
9432 march_cpu_opt = &opt->value;
9433 if (ext != NULL)
9434 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
9435
9436 return 1;
9437 }
9438
9439 as_bad (_("unknown architecture `%s'\n"), str);
9440 return 0;
9441 }
9442
9443 /* ABIs. */
9444 struct aarch64_option_abi_value_table
9445 {
9446 const char *name;
9447 enum aarch64_abi_type value;
9448 };
9449
9450 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9451 {"ilp32", AARCH64_ABI_ILP32},
9452 {"lp64", AARCH64_ABI_LP64},
9453 };
9454
9455 static int
9456 aarch64_parse_abi (const char *str)
9457 {
9458 unsigned int i;
9459
9460 if (str[0] == '\0')
9461 {
9462 as_bad (_("missing abi name `%s'"), str);
9463 return 0;
9464 }
9465
9466 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9467 if (strcmp (str, aarch64_abis[i].name) == 0)
9468 {
9469 aarch64_abi = aarch64_abis[i].value;
9470 return 1;
9471 }
9472
9473 as_bad (_("unknown abi `%s'\n"), str);
9474 return 0;
9475 }
9476
9477 static struct aarch64_long_option_table aarch64_long_opts[] = {
9478 #ifdef OBJ_ELF
9479 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9480 aarch64_parse_abi, NULL},
9481 #endif /* OBJ_ELF */
9482 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9483 aarch64_parse_cpu, NULL},
9484 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9485 aarch64_parse_arch, NULL},
9486 {NULL, NULL, 0, NULL}
9487 };
9488
9489 int
9490 md_parse_option (int c, const char *arg)
9491 {
9492 struct aarch64_option_table *opt;
9493 struct aarch64_long_option_table *lopt;
9494
9495 switch (c)
9496 {
9497 #ifdef OPTION_EB
9498 case OPTION_EB:
9499 target_big_endian = 1;
9500 break;
9501 #endif
9502
9503 #ifdef OPTION_EL
9504 case OPTION_EL:
9505 target_big_endian = 0;
9506 break;
9507 #endif
9508
9509 case 'a':
9510 /* Listing option. Just ignore these, we don't support additional
9511 ones. */
9512 return 0;
9513
9514 default:
9515 for (opt = aarch64_opts; opt->option != NULL; opt++)
9516 {
9517 if (c == opt->option[0]
9518 && ((arg == NULL && opt->option[1] == 0)
9519 || streq (arg, opt->option + 1)))
9520 {
9521 /* If the option is deprecated, tell the user. */
9522 if (opt->deprecated != NULL)
9523 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9524 arg ? arg : "", _(opt->deprecated));
9525
9526 if (opt->var != NULL)
9527 *opt->var = opt->value;
9528
9529 return 1;
9530 }
9531 }
9532
9533 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9534 {
9535 /* These options are expected to have an argument. */
9536 if (c == lopt->option[0]
9537 && arg != NULL
9538 && strncmp (arg, lopt->option + 1,
9539 strlen (lopt->option + 1)) == 0)
9540 {
9541 /* If the option is deprecated, tell the user. */
9542 if (lopt->deprecated != NULL)
9543 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9544 _(lopt->deprecated));
9545
9546 /* Call the sup-option parser. */
9547 return lopt->func (arg + strlen (lopt->option) - 1);
9548 }
9549 }
9550
9551 return 0;
9552 }
9553
9554 return 1;
9555 }
9556
9557 void
9558 md_show_usage (FILE * fp)
9559 {
9560 struct aarch64_option_table *opt;
9561 struct aarch64_long_option_table *lopt;
9562
9563 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9564
9565 for (opt = aarch64_opts; opt->option != NULL; opt++)
9566 if (opt->help != NULL)
9567 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9568
9569 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9570 if (lopt->help != NULL)
9571 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9572
9573 #ifdef OPTION_EB
9574 fprintf (fp, _("\
9575 -EB assemble code for a big-endian cpu\n"));
9576 #endif
9577
9578 #ifdef OPTION_EL
9579 fprintf (fp, _("\
9580 -EL assemble code for a little-endian cpu\n"));
9581 #endif
9582 }
9583
9584 /* Parse a .cpu directive. */
9585
9586 static void
9587 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9588 {
9589 const struct aarch64_cpu_option_table *opt;
9590 char saved_char;
9591 char *name;
9592 char *ext;
9593 size_t optlen;
9594
9595 name = input_line_pointer;
9596 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9597 input_line_pointer++;
9598 saved_char = *input_line_pointer;
9599 *input_line_pointer = 0;
9600
9601 ext = strchr (name, '+');
9602
9603 if (ext != NULL)
9604 optlen = ext - name;
9605 else
9606 optlen = strlen (name);
9607
9608 /* Skip the first "all" entry. */
9609 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9610 if (strlen (opt->name) == optlen
9611 && strncmp (name, opt->name, optlen) == 0)
9612 {
9613 mcpu_cpu_opt = &opt->value;
9614 if (ext != NULL)
9615 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9616 return;
9617
9618 cpu_variant = *mcpu_cpu_opt;
9619
9620 *input_line_pointer = saved_char;
9621 demand_empty_rest_of_line ();
9622 return;
9623 }
9624 as_bad (_("unknown cpu `%s'"), name);
9625 *input_line_pointer = saved_char;
9626 ignore_rest_of_line ();
9627 }
9628
9629
9630 /* Parse a .arch directive. */
9631
9632 static void
9633 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9634 {
9635 const struct aarch64_arch_option_table *opt;
9636 char saved_char;
9637 char *name;
9638 char *ext;
9639 size_t optlen;
9640
9641 name = input_line_pointer;
9642 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9643 input_line_pointer++;
9644 saved_char = *input_line_pointer;
9645 *input_line_pointer = 0;
9646
9647 ext = strchr (name, '+');
9648
9649 if (ext != NULL)
9650 optlen = ext - name;
9651 else
9652 optlen = strlen (name);
9653
9654 /* Skip the first "all" entry. */
9655 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9656 if (strlen (opt->name) == optlen
9657 && strncmp (name, opt->name, optlen) == 0)
9658 {
9659 mcpu_cpu_opt = &opt->value;
9660 if (ext != NULL)
9661 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9662 return;
9663
9664 cpu_variant = *mcpu_cpu_opt;
9665
9666 *input_line_pointer = saved_char;
9667 demand_empty_rest_of_line ();
9668 return;
9669 }
9670
9671 as_bad (_("unknown architecture `%s'\n"), name);
9672 *input_line_pointer = saved_char;
9673 ignore_rest_of_line ();
9674 }
9675
9676 /* Parse a .arch_extension directive. */
9677
9678 static void
9679 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9680 {
9681 char saved_char;
9682 char *ext = input_line_pointer;;
9683
9684 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9685 input_line_pointer++;
9686 saved_char = *input_line_pointer;
9687 *input_line_pointer = 0;
9688
9689 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9690 return;
9691
9692 cpu_variant = *mcpu_cpu_opt;
9693
9694 *input_line_pointer = saved_char;
9695 demand_empty_rest_of_line ();
9696 }
9697
9698 /* Copy symbol information. */
9699
9700 void
9701 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9702 {
9703 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9704 }
9705
9706 #ifdef OBJ_ELF
9707 /* Same as elf_copy_symbol_attributes, but without copying st_other.
9708 This is needed so AArch64 specific st_other values can be independently
9709 specified for an IFUNC resolver (that is called by the dynamic linker)
9710 and the symbol it resolves (aliased to the resolver). In particular,
9711 if a function symbol has special st_other value set via directives,
9712 then attaching an IFUNC resolver to that symbol should not override
9713 the st_other setting. Requiring the directive on the IFUNC resolver
9714 symbol would be unexpected and problematic in C code, where the two
9715 symbols appear as two independent function declarations. */
9716
9717 void
9718 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
9719 {
9720 struct elf_obj_sy *srcelf = symbol_get_obj (src);
9721 struct elf_obj_sy *destelf = symbol_get_obj (dest);
9722 if (srcelf->size)
9723 {
9724 if (destelf->size == NULL)
9725 destelf->size = XNEW (expressionS);
9726 *destelf->size = *srcelf->size;
9727 }
9728 else
9729 {
9730 free (destelf->size);
9731 destelf->size = NULL;
9732 }
9733 S_SET_SIZE (dest, S_GET_SIZE (src));
9734 }
9735 #endif