]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
[aarch64] GAS doesn't validate the architecture version for any tlbi registers. ...
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2020 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bfd_boolean
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Return value for certain parsers when the parsing fails; those parsers
242 return the information of the parsed result, e.g. register number, on
243 success. */
244 #define PARSE_FAIL -1
245
246 /* This is an invalid condition code that means no conditional field is
247 present. */
248 #define COND_ALWAYS 0x10
249
250 typedef struct
251 {
252 const char *template;
253 unsigned long value;
254 } asm_barrier_opt;
255
256 typedef struct
257 {
258 const char *template;
259 uint32_t value;
260 } asm_nzcv;
261
262 struct reloc_entry
263 {
264 char *name;
265 bfd_reloc_code_real_type reloc;
266 };
267
268 /* Macros to define the register types and masks for the purpose
269 of parsing. */
270
271 #undef AARCH64_REG_TYPES
272 #define AARCH64_REG_TYPES \
273 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
274 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
275 BASIC_REG_TYPE(SP_32) /* wsp */ \
276 BASIC_REG_TYPE(SP_64) /* sp */ \
277 BASIC_REG_TYPE(Z_32) /* wzr */ \
278 BASIC_REG_TYPE(Z_64) /* xzr */ \
279 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
280 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
281 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
282 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
283 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
284 BASIC_REG_TYPE(VN) /* v[0-31] */ \
285 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
286 BASIC_REG_TYPE(PN) /* p[0-15] */ \
287 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
288 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
289 /* Typecheck: same, plus SVE registers. */ \
290 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
291 | REG_TYPE(ZN)) \
292 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
293 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Typecheck: same, plus SVE registers. */ \
296 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
298 | REG_TYPE(ZN)) \
299 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
300 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
301 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
302 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
303 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
306 /* Typecheck: any [BHSDQ]P FP. */ \
307 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
308 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
309 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
310 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
311 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
312 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
313 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
314 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
315 be used for SVE instructions, since Zn and Pn are valid symbols \
316 in other contexts. */ \
317 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
318 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
319 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
320 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
321 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
322 | REG_TYPE(ZN) | REG_TYPE(PN)) \
323 /* Any integer register; used for error messages only. */ \
324 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
325 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
326 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
327 /* Pseudo type to mark the end of the enumerator sequence. */ \
328 BASIC_REG_TYPE(MAX)
329
330 #undef BASIC_REG_TYPE
331 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
332 #undef MULTI_REG_TYPE
333 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
334
335 /* Register type enumerators. */
336 typedef enum aarch64_reg_type_
337 {
338 /* A list of REG_TYPE_*. */
339 AARCH64_REG_TYPES
340 } aarch64_reg_type;
341
342 #undef BASIC_REG_TYPE
343 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
344 #undef REG_TYPE
345 #define REG_TYPE(T) (1 << REG_TYPE_##T)
346 #undef MULTI_REG_TYPE
347 #define MULTI_REG_TYPE(T,V) V,
348
349 /* Structure for a hash table entry for a register. */
350 typedef struct
351 {
352 const char *name;
353 unsigned char number;
354 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
355 unsigned char builtin;
356 } reg_entry;
357
358 /* Values indexed by aarch64_reg_type to assist the type checking. */
359 static const unsigned reg_type_masks[] =
360 {
361 AARCH64_REG_TYPES
362 };
363
364 #undef BASIC_REG_TYPE
365 #undef REG_TYPE
366 #undef MULTI_REG_TYPE
367 #undef AARCH64_REG_TYPES
368
369 /* Diagnostics used when we don't get a register of the expected type.
370 Note: this has to synchronized with aarch64_reg_type definitions
371 above. */
372 static const char *
373 get_reg_expected_msg (aarch64_reg_type reg_type)
374 {
375 const char *msg;
376
377 switch (reg_type)
378 {
379 case REG_TYPE_R_32:
380 msg = N_("integer 32-bit register expected");
381 break;
382 case REG_TYPE_R_64:
383 msg = N_("integer 64-bit register expected");
384 break;
385 case REG_TYPE_R_N:
386 msg = N_("integer register expected");
387 break;
388 case REG_TYPE_R64_SP:
389 msg = N_("64-bit integer or SP register expected");
390 break;
391 case REG_TYPE_SVE_BASE:
392 msg = N_("base register expected");
393 break;
394 case REG_TYPE_R_Z:
395 msg = N_("integer or zero register expected");
396 break;
397 case REG_TYPE_SVE_OFFSET:
398 msg = N_("offset register expected");
399 break;
400 case REG_TYPE_R_SP:
401 msg = N_("integer or SP register expected");
402 break;
403 case REG_TYPE_R_Z_SP:
404 msg = N_("integer, zero or SP register expected");
405 break;
406 case REG_TYPE_FP_B:
407 msg = N_("8-bit SIMD scalar register expected");
408 break;
409 case REG_TYPE_FP_H:
410 msg = N_("16-bit SIMD scalar or floating-point half precision "
411 "register expected");
412 break;
413 case REG_TYPE_FP_S:
414 msg = N_("32-bit SIMD scalar or floating-point single precision "
415 "register expected");
416 break;
417 case REG_TYPE_FP_D:
418 msg = N_("64-bit SIMD scalar or floating-point double precision "
419 "register expected");
420 break;
421 case REG_TYPE_FP_Q:
422 msg = N_("128-bit SIMD scalar or floating-point quad precision "
423 "register expected");
424 break;
425 case REG_TYPE_R_Z_BHSDQ_V:
426 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
427 msg = N_("register expected");
428 break;
429 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
430 msg = N_("SIMD scalar or floating-point register expected");
431 break;
432 case REG_TYPE_VN: /* any V reg */
433 msg = N_("vector register expected");
434 break;
435 case REG_TYPE_ZN:
436 msg = N_("SVE vector register expected");
437 break;
438 case REG_TYPE_PN:
439 msg = N_("SVE predicate register expected");
440 break;
441 default:
442 as_fatal (_("invalid register type %d"), reg_type);
443 }
444 return msg;
445 }
446
447 /* Some well known registers that we refer to directly elsewhere. */
448 #define REG_SP 31
449 #define REG_ZR 31
450
451 /* Instructions take 4 bytes in the object file. */
452 #define INSN_SIZE 4
453
454 static struct hash_control *aarch64_ops_hsh;
455 static struct hash_control *aarch64_cond_hsh;
456 static struct hash_control *aarch64_shift_hsh;
457 static struct hash_control *aarch64_sys_regs_hsh;
458 static struct hash_control *aarch64_pstatefield_hsh;
459 static struct hash_control *aarch64_sys_regs_ic_hsh;
460 static struct hash_control *aarch64_sys_regs_dc_hsh;
461 static struct hash_control *aarch64_sys_regs_at_hsh;
462 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
463 static struct hash_control *aarch64_sys_regs_sr_hsh;
464 static struct hash_control *aarch64_reg_hsh;
465 static struct hash_control *aarch64_barrier_opt_hsh;
466 static struct hash_control *aarch64_nzcv_hsh;
467 static struct hash_control *aarch64_pldop_hsh;
468 static struct hash_control *aarch64_hint_opt_hsh;
469
470 /* Stuff needed to resolve the label ambiguity
471 As:
472 ...
473 label: <insn>
474 may differ from:
475 ...
476 label:
477 <insn> */
478
479 static symbolS *last_label_seen;
480
481 /* Literal pool structure. Held on a per-section
482 and per-sub-section basis. */
483
484 #define MAX_LITERAL_POOL_SIZE 1024
485 typedef struct literal_expression
486 {
487 expressionS exp;
488 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
489 LITTLENUM_TYPE * bignum;
490 } literal_expression;
491
492 typedef struct literal_pool
493 {
494 literal_expression literals[MAX_LITERAL_POOL_SIZE];
495 unsigned int next_free_entry;
496 unsigned int id;
497 symbolS *symbol;
498 segT section;
499 subsegT sub_section;
500 int size;
501 struct literal_pool *next;
502 } literal_pool;
503
504 /* Pointer to a linked list of literal pools. */
505 static literal_pool *list_of_pools = NULL;
506 \f
507 /* Pure syntax. */
508
509 /* This array holds the chars that always start a comment. If the
510 pre-processor is disabled, these aren't very useful. */
511 const char comment_chars[] = "";
512
513 /* This array holds the chars that only start a comment at the beginning of
514 a line. If the line seems to have the form '# 123 filename'
515 .line and .file directives will appear in the pre-processed output. */
516 /* Note that input_file.c hand checks for '#' at the beginning of the
517 first line of the input file. This is because the compiler outputs
518 #NO_APP at the beginning of its output. */
519 /* Also note that comments like this one will always work. */
520 const char line_comment_chars[] = "#";
521
522 const char line_separator_chars[] = ";";
523
524 /* Chars that can be used to separate mant
525 from exp in floating point numbers. */
526 const char EXP_CHARS[] = "eE";
527
528 /* Chars that mean this number is a floating point constant. */
529 /* As in 0f12.456 */
530 /* or 0d1.2345e12 */
531
532 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhH";
533
534 /* Prefix character that indicates the start of an immediate value. */
535 #define is_immediate_prefix(C) ((C) == '#')
536
537 /* Separator character handling. */
538
539 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
540
541 static inline bfd_boolean
542 skip_past_char (char **str, char c)
543 {
544 if (**str == c)
545 {
546 (*str)++;
547 return TRUE;
548 }
549 else
550 return FALSE;
551 }
552
553 #define skip_past_comma(str) skip_past_char (str, ',')
554
555 /* Arithmetic expressions (possibly involving symbols). */
556
557 static bfd_boolean in_my_get_expression_p = FALSE;
558
559 /* Third argument to my_get_expression. */
560 #define GE_NO_PREFIX 0
561 #define GE_OPT_PREFIX 1
562
563 /* Return TRUE if the string pointed by *STR is successfully parsed
564 as an valid expression; *EP will be filled with the information of
565 such an expression. Otherwise return FALSE. */
566
567 static bfd_boolean
568 my_get_expression (expressionS * ep, char **str, int prefix_mode,
569 int reject_absent)
570 {
571 char *save_in;
572 segT seg;
573 int prefix_present_p = 0;
574
575 switch (prefix_mode)
576 {
577 case GE_NO_PREFIX:
578 break;
579 case GE_OPT_PREFIX:
580 if (is_immediate_prefix (**str))
581 {
582 (*str)++;
583 prefix_present_p = 1;
584 }
585 break;
586 default:
587 abort ();
588 }
589
590 memset (ep, 0, sizeof (expressionS));
591
592 save_in = input_line_pointer;
593 input_line_pointer = *str;
594 in_my_get_expression_p = TRUE;
595 seg = expression (ep);
596 in_my_get_expression_p = FALSE;
597
598 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
599 {
600 /* We found a bad expression in md_operand(). */
601 *str = input_line_pointer;
602 input_line_pointer = save_in;
603 if (prefix_present_p && ! error_p ())
604 set_fatal_syntax_error (_("bad expression"));
605 else
606 set_first_syntax_error (_("bad expression"));
607 return FALSE;
608 }
609
610 #ifdef OBJ_AOUT
611 if (seg != absolute_section
612 && seg != text_section
613 && seg != data_section
614 && seg != bss_section && seg != undefined_section)
615 {
616 set_syntax_error (_("bad segment"));
617 *str = input_line_pointer;
618 input_line_pointer = save_in;
619 return FALSE;
620 }
621 #else
622 (void) seg;
623 #endif
624
625 *str = input_line_pointer;
626 input_line_pointer = save_in;
627 return TRUE;
628 }
629
630 /* Turn a string in input_line_pointer into a floating point constant
631 of type TYPE, and store the appropriate bytes in *LITP. The number
632 of LITTLENUMS emitted is stored in *SIZEP. An error message is
633 returned, or NULL on OK. */
634
635 const char *
636 md_atof (int type, char *litP, int *sizeP)
637 {
638 /* If this is a bfloat16 type, then parse it slightly differently -
639 as it does not follow the IEEE standard exactly. */
640 if (type == 'b')
641 {
642 char * t;
643 LITTLENUM_TYPE words[MAX_LITTLENUMS];
644 FLONUM_TYPE generic_float;
645
646 t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float);
647
648 if (t)
649 input_line_pointer = t;
650 else
651 return _("invalid floating point number");
652
653 switch (generic_float.sign)
654 {
655 /* Is +Inf. */
656 case 'P':
657 words[0] = 0x7f80;
658 break;
659
660 /* Is -Inf. */
661 case 'N':
662 words[0] = 0xff80;
663 break;
664
665 /* Is NaN. */
666 /* bfloat16 has two types of NaN - quiet and signalling.
667 Quiet NaN has bit[6] == 1 && faction != 0, whereas
668 signalling Nan's have bit[0] == 0 && fraction != 0.
669 Chose this specific encoding as it is the same form
670 as used by other IEEE 754 encodings in GAS. */
671 case 0:
672 words[0] = 0x7fff;
673 break;
674
675 default:
676 break;
677 }
678
679 *sizeP = 2;
680
681 md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE));
682
683 return NULL;
684 }
685
686 return ieee_md_atof (type, litP, sizeP, target_big_endian);
687 }
688
689 /* We handle all bad expressions here, so that we can report the faulty
690 instruction in the error message. */
691 void
692 md_operand (expressionS * exp)
693 {
694 if (in_my_get_expression_p)
695 exp->X_op = O_illegal;
696 }
697
698 /* Immediate values. */
699
700 /* Errors may be set multiple times during parsing or bit encoding
701 (particularly in the Neon bits), but usually the earliest error which is set
702 will be the most meaningful. Avoid overwriting it with later (cascading)
703 errors by calling this function. */
704
705 static void
706 first_error (const char *error)
707 {
708 if (! error_p ())
709 set_syntax_error (error);
710 }
711
712 /* Similar to first_error, but this function accepts formatted error
713 message. */
714 static void
715 first_error_fmt (const char *format, ...)
716 {
717 va_list args;
718 enum
719 { size = 100 };
720 /* N.B. this single buffer will not cause error messages for different
721 instructions to pollute each other; this is because at the end of
722 processing of each assembly line, error message if any will be
723 collected by as_bad. */
724 static char buffer[size];
725
726 if (! error_p ())
727 {
728 int ret ATTRIBUTE_UNUSED;
729 va_start (args, format);
730 ret = vsnprintf (buffer, size, format, args);
731 know (ret <= size - 1 && ret >= 0);
732 va_end (args);
733 set_syntax_error (buffer);
734 }
735 }
736
737 /* Register parsing. */
738
739 /* Generic register parser which is called by other specialized
740 register parsers.
741 CCP points to what should be the beginning of a register name.
742 If it is indeed a valid register name, advance CCP over it and
743 return the reg_entry structure; otherwise return NULL.
744 It does not issue diagnostics. */
745
746 static reg_entry *
747 parse_reg (char **ccp)
748 {
749 char *start = *ccp;
750 char *p;
751 reg_entry *reg;
752
753 #ifdef REGISTER_PREFIX
754 if (*start != REGISTER_PREFIX)
755 return NULL;
756 start++;
757 #endif
758
759 p = start;
760 if (!ISALPHA (*p) || !is_name_beginner (*p))
761 return NULL;
762
763 do
764 p++;
765 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
766
767 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
768
769 if (!reg)
770 return NULL;
771
772 *ccp = p;
773 return reg;
774 }
775
776 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
777 return FALSE. */
778 static bfd_boolean
779 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
780 {
781 return (reg_type_masks[type] & (1 << reg->type)) != 0;
782 }
783
784 /* Try to parse a base or offset register. Allow SVE base and offset
785 registers if REG_TYPE includes SVE registers. Return the register
786 entry on success, setting *QUALIFIER to the register qualifier.
787 Return null otherwise.
788
789 Note that this function does not issue any diagnostics. */
790
791 static const reg_entry *
792 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
793 aarch64_opnd_qualifier_t *qualifier)
794 {
795 char *str = *ccp;
796 const reg_entry *reg = parse_reg (&str);
797
798 if (reg == NULL)
799 return NULL;
800
801 switch (reg->type)
802 {
803 case REG_TYPE_R_32:
804 case REG_TYPE_SP_32:
805 case REG_TYPE_Z_32:
806 *qualifier = AARCH64_OPND_QLF_W;
807 break;
808
809 case REG_TYPE_R_64:
810 case REG_TYPE_SP_64:
811 case REG_TYPE_Z_64:
812 *qualifier = AARCH64_OPND_QLF_X;
813 break;
814
815 case REG_TYPE_ZN:
816 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
817 || str[0] != '.')
818 return NULL;
819 switch (TOLOWER (str[1]))
820 {
821 case 's':
822 *qualifier = AARCH64_OPND_QLF_S_S;
823 break;
824 case 'd':
825 *qualifier = AARCH64_OPND_QLF_S_D;
826 break;
827 default:
828 return NULL;
829 }
830 str += 2;
831 break;
832
833 default:
834 return NULL;
835 }
836
837 *ccp = str;
838
839 return reg;
840 }
841
842 /* Try to parse a base or offset register. Return the register entry
843 on success, setting *QUALIFIER to the register qualifier. Return null
844 otherwise.
845
846 Note that this function does not issue any diagnostics. */
847
848 static const reg_entry *
849 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
850 {
851 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
852 }
853
854 /* Parse the qualifier of a vector register or vector element of type
855 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
856 succeeds; otherwise return FALSE.
857
858 Accept only one occurrence of:
859 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
860 b h s d q */
861 static bfd_boolean
862 parse_vector_type_for_operand (aarch64_reg_type reg_type,
863 struct vector_type_el *parsed_type, char **str)
864 {
865 char *ptr = *str;
866 unsigned width;
867 unsigned element_size;
868 enum vector_el_type type;
869
870 /* skip '.' */
871 gas_assert (*ptr == '.');
872 ptr++;
873
874 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
875 {
876 width = 0;
877 goto elt_size;
878 }
879 width = strtoul (ptr, &ptr, 10);
880 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
881 {
882 first_error_fmt (_("bad size %d in vector width specifier"), width);
883 return FALSE;
884 }
885
886 elt_size:
887 switch (TOLOWER (*ptr))
888 {
889 case 'b':
890 type = NT_b;
891 element_size = 8;
892 break;
893 case 'h':
894 type = NT_h;
895 element_size = 16;
896 break;
897 case 's':
898 type = NT_s;
899 element_size = 32;
900 break;
901 case 'd':
902 type = NT_d;
903 element_size = 64;
904 break;
905 case 'q':
906 if (reg_type == REG_TYPE_ZN || width == 1)
907 {
908 type = NT_q;
909 element_size = 128;
910 break;
911 }
912 /* fall through. */
913 default:
914 if (*ptr != '\0')
915 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
916 else
917 first_error (_("missing element size"));
918 return FALSE;
919 }
920 if (width != 0 && width * element_size != 64
921 && width * element_size != 128
922 && !(width == 2 && element_size == 16)
923 && !(width == 4 && element_size == 8))
924 {
925 first_error_fmt (_
926 ("invalid element size %d and vector size combination %c"),
927 width, *ptr);
928 return FALSE;
929 }
930 ptr++;
931
932 parsed_type->type = type;
933 parsed_type->width = width;
934
935 *str = ptr;
936
937 return TRUE;
938 }
939
940 /* *STR contains an SVE zero/merge predication suffix. Parse it into
941 *PARSED_TYPE and point *STR at the end of the suffix. */
942
943 static bfd_boolean
944 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
945 {
946 char *ptr = *str;
947
948 /* Skip '/'. */
949 gas_assert (*ptr == '/');
950 ptr++;
951 switch (TOLOWER (*ptr))
952 {
953 case 'z':
954 parsed_type->type = NT_zero;
955 break;
956 case 'm':
957 parsed_type->type = NT_merge;
958 break;
959 default:
960 if (*ptr != '\0' && *ptr != ',')
961 first_error_fmt (_("unexpected character `%c' in predication type"),
962 *ptr);
963 else
964 first_error (_("missing predication type"));
965 return FALSE;
966 }
967 parsed_type->width = 0;
968 *str = ptr + 1;
969 return TRUE;
970 }
971
972 /* Parse a register of the type TYPE.
973
974 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
975 name or the parsed register is not of TYPE.
976
977 Otherwise return the register number, and optionally fill in the actual
978 type of the register in *RTYPE when multiple alternatives were given, and
979 return the register shape and element index information in *TYPEINFO.
980
981 IN_REG_LIST should be set with TRUE if the caller is parsing a register
982 list. */
983
984 static int
985 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
986 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
987 {
988 char *str = *ccp;
989 const reg_entry *reg = parse_reg (&str);
990 struct vector_type_el atype;
991 struct vector_type_el parsetype;
992 bfd_boolean is_typed_vecreg = FALSE;
993
994 atype.defined = 0;
995 atype.type = NT_invtype;
996 atype.width = -1;
997 atype.index = 0;
998
999 if (reg == NULL)
1000 {
1001 if (typeinfo)
1002 *typeinfo = atype;
1003 set_default_error ();
1004 return PARSE_FAIL;
1005 }
1006
1007 if (! aarch64_check_reg_type (reg, type))
1008 {
1009 DEBUG_TRACE ("reg type check failed");
1010 set_default_error ();
1011 return PARSE_FAIL;
1012 }
1013 type = reg->type;
1014
1015 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
1016 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
1017 {
1018 if (*str == '.')
1019 {
1020 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1021 return PARSE_FAIL;
1022 }
1023 else
1024 {
1025 if (!parse_predication_for_operand (&parsetype, &str))
1026 return PARSE_FAIL;
1027 }
1028
1029 /* Register if of the form Vn.[bhsdq]. */
1030 is_typed_vecreg = TRUE;
1031
1032 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1033 {
1034 /* The width is always variable; we don't allow an integer width
1035 to be specified. */
1036 gas_assert (parsetype.width == 0);
1037 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1038 }
1039 else if (parsetype.width == 0)
1040 /* Expect index. In the new scheme we cannot have
1041 Vn.[bhsdq] represent a scalar. Therefore any
1042 Vn.[bhsdq] should have an index following it.
1043 Except in reglists of course. */
1044 atype.defined |= NTA_HASINDEX;
1045 else
1046 atype.defined |= NTA_HASTYPE;
1047
1048 atype.type = parsetype.type;
1049 atype.width = parsetype.width;
1050 }
1051
1052 if (skip_past_char (&str, '['))
1053 {
1054 expressionS exp;
1055
1056 /* Reject Sn[index] syntax. */
1057 if (!is_typed_vecreg)
1058 {
1059 first_error (_("this type of register can't be indexed"));
1060 return PARSE_FAIL;
1061 }
1062
1063 if (in_reg_list)
1064 {
1065 first_error (_("index not allowed inside register list"));
1066 return PARSE_FAIL;
1067 }
1068
1069 atype.defined |= NTA_HASINDEX;
1070
1071 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1072
1073 if (exp.X_op != O_constant)
1074 {
1075 first_error (_("constant expression required"));
1076 return PARSE_FAIL;
1077 }
1078
1079 if (! skip_past_char (&str, ']'))
1080 return PARSE_FAIL;
1081
1082 atype.index = exp.X_add_number;
1083 }
1084 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1085 {
1086 /* Indexed vector register expected. */
1087 first_error (_("indexed vector register expected"));
1088 return PARSE_FAIL;
1089 }
1090
1091 /* A vector reg Vn should be typed or indexed. */
1092 if (type == REG_TYPE_VN && atype.defined == 0)
1093 {
1094 first_error (_("invalid use of vector register"));
1095 }
1096
1097 if (typeinfo)
1098 *typeinfo = atype;
1099
1100 if (rtype)
1101 *rtype = type;
1102
1103 *ccp = str;
1104
1105 return reg->number;
1106 }
1107
1108 /* Parse register.
1109
1110 Return the register number on success; return PARSE_FAIL otherwise.
1111
1112 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1113 the register (e.g. NEON double or quad reg when either has been requested).
1114
1115 If this is a NEON vector register with additional type information, fill
1116 in the struct pointed to by VECTYPE (if non-NULL).
1117
1118 This parser does not handle register list. */
1119
1120 static int
1121 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1122 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1123 {
1124 struct vector_type_el atype;
1125 char *str = *ccp;
1126 int reg = parse_typed_reg (&str, type, rtype, &atype,
1127 /*in_reg_list= */ FALSE);
1128
1129 if (reg == PARSE_FAIL)
1130 return PARSE_FAIL;
1131
1132 if (vectype)
1133 *vectype = atype;
1134
1135 *ccp = str;
1136
1137 return reg;
1138 }
1139
1140 static inline bfd_boolean
1141 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1142 {
1143 return
1144 e1.type == e2.type
1145 && e1.defined == e2.defined
1146 && e1.width == e2.width && e1.index == e2.index;
1147 }
1148
1149 /* This function parses a list of vector registers of type TYPE.
1150 On success, it returns the parsed register list information in the
1151 following encoded format:
1152
1153 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1154 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1155
1156 The information of the register shape and/or index is returned in
1157 *VECTYPE.
1158
1159 It returns PARSE_FAIL if the register list is invalid.
1160
1161 The list contains one to four registers.
1162 Each register can be one of:
1163 <Vt>.<T>[<index>]
1164 <Vt>.<T>
1165 All <T> should be identical.
1166 All <index> should be identical.
1167 There are restrictions on <Vt> numbers which are checked later
1168 (by reg_list_valid_p). */
1169
1170 static int
1171 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1172 struct vector_type_el *vectype)
1173 {
1174 char *str = *ccp;
1175 int nb_regs;
1176 struct vector_type_el typeinfo, typeinfo_first;
1177 int val, val_range;
1178 int in_range;
1179 int ret_val;
1180 int i;
1181 bfd_boolean error = FALSE;
1182 bfd_boolean expect_index = FALSE;
1183
1184 if (*str != '{')
1185 {
1186 set_syntax_error (_("expecting {"));
1187 return PARSE_FAIL;
1188 }
1189 str++;
1190
1191 nb_regs = 0;
1192 typeinfo_first.defined = 0;
1193 typeinfo_first.type = NT_invtype;
1194 typeinfo_first.width = -1;
1195 typeinfo_first.index = 0;
1196 ret_val = 0;
1197 val = -1;
1198 val_range = -1;
1199 in_range = 0;
1200 do
1201 {
1202 if (in_range)
1203 {
1204 str++; /* skip over '-' */
1205 val_range = val;
1206 }
1207 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1208 /*in_reg_list= */ TRUE);
1209 if (val == PARSE_FAIL)
1210 {
1211 set_first_syntax_error (_("invalid vector register in list"));
1212 error = TRUE;
1213 continue;
1214 }
1215 /* reject [bhsd]n */
1216 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1217 {
1218 set_first_syntax_error (_("invalid scalar register in list"));
1219 error = TRUE;
1220 continue;
1221 }
1222
1223 if (typeinfo.defined & NTA_HASINDEX)
1224 expect_index = TRUE;
1225
1226 if (in_range)
1227 {
1228 if (val < val_range)
1229 {
1230 set_first_syntax_error
1231 (_("invalid range in vector register list"));
1232 error = TRUE;
1233 }
1234 val_range++;
1235 }
1236 else
1237 {
1238 val_range = val;
1239 if (nb_regs == 0)
1240 typeinfo_first = typeinfo;
1241 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1242 {
1243 set_first_syntax_error
1244 (_("type mismatch in vector register list"));
1245 error = TRUE;
1246 }
1247 }
1248 if (! error)
1249 for (i = val_range; i <= val; i++)
1250 {
1251 ret_val |= i << (5 * nb_regs);
1252 nb_regs++;
1253 }
1254 in_range = 0;
1255 }
1256 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1257
1258 skip_whitespace (str);
1259 if (*str != '}')
1260 {
1261 set_first_syntax_error (_("end of vector register list not found"));
1262 error = TRUE;
1263 }
1264 str++;
1265
1266 skip_whitespace (str);
1267
1268 if (expect_index)
1269 {
1270 if (skip_past_char (&str, '['))
1271 {
1272 expressionS exp;
1273
1274 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1275 if (exp.X_op != O_constant)
1276 {
1277 set_first_syntax_error (_("constant expression required."));
1278 error = TRUE;
1279 }
1280 if (! skip_past_char (&str, ']'))
1281 error = TRUE;
1282 else
1283 typeinfo_first.index = exp.X_add_number;
1284 }
1285 else
1286 {
1287 set_first_syntax_error (_("expected index"));
1288 error = TRUE;
1289 }
1290 }
1291
1292 if (nb_regs > 4)
1293 {
1294 set_first_syntax_error (_("too many registers in vector register list"));
1295 error = TRUE;
1296 }
1297 else if (nb_regs == 0)
1298 {
1299 set_first_syntax_error (_("empty vector register list"));
1300 error = TRUE;
1301 }
1302
1303 *ccp = str;
1304 if (! error)
1305 *vectype = typeinfo_first;
1306
1307 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1308 }
1309
1310 /* Directives: register aliases. */
1311
1312 static reg_entry *
1313 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1314 {
1315 reg_entry *new;
1316 const char *name;
1317
1318 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1319 {
1320 if (new->builtin)
1321 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1322 str);
1323
1324 /* Only warn about a redefinition if it's not defined as the
1325 same register. */
1326 else if (new->number != number || new->type != type)
1327 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1328
1329 return NULL;
1330 }
1331
1332 name = xstrdup (str);
1333 new = XNEW (reg_entry);
1334
1335 new->name = name;
1336 new->number = number;
1337 new->type = type;
1338 new->builtin = FALSE;
1339
1340 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1341 abort ();
1342
1343 return new;
1344 }
1345
1346 /* Look for the .req directive. This is of the form:
1347
1348 new_register_name .req existing_register_name
1349
1350 If we find one, or if it looks sufficiently like one that we want to
1351 handle any error here, return TRUE. Otherwise return FALSE. */
1352
1353 static bfd_boolean
1354 create_register_alias (char *newname, char *p)
1355 {
1356 const reg_entry *old;
1357 char *oldname, *nbuf;
1358 size_t nlen;
1359
1360 /* The input scrubber ensures that whitespace after the mnemonic is
1361 collapsed to single spaces. */
1362 oldname = p;
1363 if (strncmp (oldname, " .req ", 6) != 0)
1364 return FALSE;
1365
1366 oldname += 6;
1367 if (*oldname == '\0')
1368 return FALSE;
1369
1370 old = hash_find (aarch64_reg_hsh, oldname);
1371 if (!old)
1372 {
1373 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1374 return TRUE;
1375 }
1376
1377 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1378 the desired alias name, and p points to its end. If not, then
1379 the desired alias name is in the global original_case_string. */
1380 #ifdef TC_CASE_SENSITIVE
1381 nlen = p - newname;
1382 #else
1383 newname = original_case_string;
1384 nlen = strlen (newname);
1385 #endif
1386
1387 nbuf = xmemdup0 (newname, nlen);
1388
1389 /* Create aliases under the new name as stated; an all-lowercase
1390 version of the new name; and an all-uppercase version of the new
1391 name. */
1392 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1393 {
1394 for (p = nbuf; *p; p++)
1395 *p = TOUPPER (*p);
1396
1397 if (strncmp (nbuf, newname, nlen))
1398 {
1399 /* If this attempt to create an additional alias fails, do not bother
1400 trying to create the all-lower case alias. We will fail and issue
1401 a second, duplicate error message. This situation arises when the
1402 programmer does something like:
1403 foo .req r0
1404 Foo .req r1
1405 The second .req creates the "Foo" alias but then fails to create
1406 the artificial FOO alias because it has already been created by the
1407 first .req. */
1408 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1409 {
1410 free (nbuf);
1411 return TRUE;
1412 }
1413 }
1414
1415 for (p = nbuf; *p; p++)
1416 *p = TOLOWER (*p);
1417
1418 if (strncmp (nbuf, newname, nlen))
1419 insert_reg_alias (nbuf, old->number, old->type);
1420 }
1421
1422 free (nbuf);
1423 return TRUE;
1424 }
1425
1426 /* Should never be called, as .req goes between the alias and the
1427 register name, not at the beginning of the line. */
1428 static void
1429 s_req (int a ATTRIBUTE_UNUSED)
1430 {
1431 as_bad (_("invalid syntax for .req directive"));
1432 }
1433
1434 /* The .unreq directive deletes an alias which was previously defined
1435 by .req. For example:
1436
1437 my_alias .req r11
1438 .unreq my_alias */
1439
1440 static void
1441 s_unreq (int a ATTRIBUTE_UNUSED)
1442 {
1443 char *name;
1444 char saved_char;
1445
1446 name = input_line_pointer;
1447
1448 while (*input_line_pointer != 0
1449 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1450 ++input_line_pointer;
1451
1452 saved_char = *input_line_pointer;
1453 *input_line_pointer = 0;
1454
1455 if (!*name)
1456 as_bad (_("invalid syntax for .unreq directive"));
1457 else
1458 {
1459 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1460
1461 if (!reg)
1462 as_bad (_("unknown register alias '%s'"), name);
1463 else if (reg->builtin)
1464 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1465 name);
1466 else
1467 {
1468 char *p;
1469 char *nbuf;
1470
1471 hash_delete (aarch64_reg_hsh, name, FALSE);
1472 free ((char *) reg->name);
1473 free (reg);
1474
1475 /* Also locate the all upper case and all lower case versions.
1476 Do not complain if we cannot find one or the other as it
1477 was probably deleted above. */
1478
1479 nbuf = strdup (name);
1480 for (p = nbuf; *p; p++)
1481 *p = TOUPPER (*p);
1482 reg = hash_find (aarch64_reg_hsh, nbuf);
1483 if (reg)
1484 {
1485 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1486 free ((char *) reg->name);
1487 free (reg);
1488 }
1489
1490 for (p = nbuf; *p; p++)
1491 *p = TOLOWER (*p);
1492 reg = hash_find (aarch64_reg_hsh, nbuf);
1493 if (reg)
1494 {
1495 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1496 free ((char *) reg->name);
1497 free (reg);
1498 }
1499
1500 free (nbuf);
1501 }
1502 }
1503
1504 *input_line_pointer = saved_char;
1505 demand_empty_rest_of_line ();
1506 }
1507
1508 /* Directives: Instruction set selection. */
1509
1510 #ifdef OBJ_ELF
1511 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1512 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1513 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1514 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1515
1516 /* Create a new mapping symbol for the transition to STATE. */
1517
1518 static void
1519 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1520 {
1521 symbolS *symbolP;
1522 const char *symname;
1523 int type;
1524
1525 switch (state)
1526 {
1527 case MAP_DATA:
1528 symname = "$d";
1529 type = BSF_NO_FLAGS;
1530 break;
1531 case MAP_INSN:
1532 symname = "$x";
1533 type = BSF_NO_FLAGS;
1534 break;
1535 default:
1536 abort ();
1537 }
1538
1539 symbolP = symbol_new (symname, now_seg, value, frag);
1540 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1541
1542 /* Save the mapping symbols for future reference. Also check that
1543 we do not place two mapping symbols at the same offset within a
1544 frag. We'll handle overlap between frags in
1545 check_mapping_symbols.
1546
1547 If .fill or other data filling directive generates zero sized data,
1548 the mapping symbol for the following code will have the same value
1549 as the one generated for the data filling directive. In this case,
1550 we replace the old symbol with the new one at the same address. */
1551 if (value == 0)
1552 {
1553 if (frag->tc_frag_data.first_map != NULL)
1554 {
1555 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1556 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1557 &symbol_lastP);
1558 }
1559 frag->tc_frag_data.first_map = symbolP;
1560 }
1561 if (frag->tc_frag_data.last_map != NULL)
1562 {
1563 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1564 S_GET_VALUE (symbolP));
1565 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1566 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1567 &symbol_lastP);
1568 }
1569 frag->tc_frag_data.last_map = symbolP;
1570 }
1571
1572 /* We must sometimes convert a region marked as code to data during
1573 code alignment, if an odd number of bytes have to be padded. The
1574 code mapping symbol is pushed to an aligned address. */
1575
1576 static void
1577 insert_data_mapping_symbol (enum mstate state,
1578 valueT value, fragS * frag, offsetT bytes)
1579 {
1580 /* If there was already a mapping symbol, remove it. */
1581 if (frag->tc_frag_data.last_map != NULL
1582 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1583 frag->fr_address + value)
1584 {
1585 symbolS *symp = frag->tc_frag_data.last_map;
1586
1587 if (value == 0)
1588 {
1589 know (frag->tc_frag_data.first_map == symp);
1590 frag->tc_frag_data.first_map = NULL;
1591 }
1592 frag->tc_frag_data.last_map = NULL;
1593 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1594 }
1595
1596 make_mapping_symbol (MAP_DATA, value, frag);
1597 make_mapping_symbol (state, value + bytes, frag);
1598 }
1599
1600 static void mapping_state_2 (enum mstate state, int max_chars);
1601
1602 /* Set the mapping state to STATE. Only call this when about to
1603 emit some STATE bytes to the file. */
1604
1605 void
1606 mapping_state (enum mstate state)
1607 {
1608 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1609
1610 if (state == MAP_INSN)
1611 /* AArch64 instructions require 4-byte alignment. When emitting
1612 instructions into any section, record the appropriate section
1613 alignment. */
1614 record_alignment (now_seg, 2);
1615
1616 if (mapstate == state)
1617 /* The mapping symbol has already been emitted.
1618 There is nothing else to do. */
1619 return;
1620
1621 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1622 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1623 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1624 evaluated later in the next else. */
1625 return;
1626 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1627 {
1628 /* Only add the symbol if the offset is > 0:
1629 if we're at the first frag, check it's size > 0;
1630 if we're not at the first frag, then for sure
1631 the offset is > 0. */
1632 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1633 const int add_symbol = (frag_now != frag_first)
1634 || (frag_now_fix () > 0);
1635
1636 if (add_symbol)
1637 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1638 }
1639 #undef TRANSITION
1640
1641 mapping_state_2 (state, 0);
1642 }
1643
1644 /* Same as mapping_state, but MAX_CHARS bytes have already been
1645 allocated. Put the mapping symbol that far back. */
1646
1647 static void
1648 mapping_state_2 (enum mstate state, int max_chars)
1649 {
1650 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1651
1652 if (!SEG_NORMAL (now_seg))
1653 return;
1654
1655 if (mapstate == state)
1656 /* The mapping symbol has already been emitted.
1657 There is nothing else to do. */
1658 return;
1659
1660 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1661 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1662 }
1663 #else
1664 #define mapping_state(x) /* nothing */
1665 #define mapping_state_2(x, y) /* nothing */
1666 #endif
1667
1668 /* Directives: sectioning and alignment. */
1669
1670 static void
1671 s_bss (int ignore ATTRIBUTE_UNUSED)
1672 {
1673 /* We don't support putting frags in the BSS segment, we fake it by
1674 marking in_bss, then looking at s_skip for clues. */
1675 subseg_set (bss_section, 0);
1676 demand_empty_rest_of_line ();
1677 mapping_state (MAP_DATA);
1678 }
1679
1680 static void
1681 s_even (int ignore ATTRIBUTE_UNUSED)
1682 {
1683 /* Never make frag if expect extra pass. */
1684 if (!need_pass_2)
1685 frag_align (1, 0, 0);
1686
1687 record_alignment (now_seg, 1);
1688
1689 demand_empty_rest_of_line ();
1690 }
1691
1692 /* Directives: Literal pools. */
1693
1694 static literal_pool *
1695 find_literal_pool (int size)
1696 {
1697 literal_pool *pool;
1698
1699 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1700 {
1701 if (pool->section == now_seg
1702 && pool->sub_section == now_subseg && pool->size == size)
1703 break;
1704 }
1705
1706 return pool;
1707 }
1708
1709 static literal_pool *
1710 find_or_make_literal_pool (int size)
1711 {
1712 /* Next literal pool ID number. */
1713 static unsigned int latest_pool_num = 1;
1714 literal_pool *pool;
1715
1716 pool = find_literal_pool (size);
1717
1718 if (pool == NULL)
1719 {
1720 /* Create a new pool. */
1721 pool = XNEW (literal_pool);
1722 if (!pool)
1723 return NULL;
1724
1725 /* Currently we always put the literal pool in the current text
1726 section. If we were generating "small" model code where we
1727 knew that all code and initialised data was within 1MB then
1728 we could output literals to mergeable, read-only data
1729 sections. */
1730
1731 pool->next_free_entry = 0;
1732 pool->section = now_seg;
1733 pool->sub_section = now_subseg;
1734 pool->size = size;
1735 pool->next = list_of_pools;
1736 pool->symbol = NULL;
1737
1738 /* Add it to the list. */
1739 list_of_pools = pool;
1740 }
1741
1742 /* New pools, and emptied pools, will have a NULL symbol. */
1743 if (pool->symbol == NULL)
1744 {
1745 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1746 (valueT) 0, &zero_address_frag);
1747 pool->id = latest_pool_num++;
1748 }
1749
1750 /* Done. */
1751 return pool;
1752 }
1753
1754 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1755 Return TRUE on success, otherwise return FALSE. */
1756 static bfd_boolean
1757 add_to_lit_pool (expressionS *exp, int size)
1758 {
1759 literal_pool *pool;
1760 unsigned int entry;
1761
1762 pool = find_or_make_literal_pool (size);
1763
1764 /* Check if this literal value is already in the pool. */
1765 for (entry = 0; entry < pool->next_free_entry; entry++)
1766 {
1767 expressionS * litexp = & pool->literals[entry].exp;
1768
1769 if ((litexp->X_op == exp->X_op)
1770 && (exp->X_op == O_constant)
1771 && (litexp->X_add_number == exp->X_add_number)
1772 && (litexp->X_unsigned == exp->X_unsigned))
1773 break;
1774
1775 if ((litexp->X_op == exp->X_op)
1776 && (exp->X_op == O_symbol)
1777 && (litexp->X_add_number == exp->X_add_number)
1778 && (litexp->X_add_symbol == exp->X_add_symbol)
1779 && (litexp->X_op_symbol == exp->X_op_symbol))
1780 break;
1781 }
1782
1783 /* Do we need to create a new entry? */
1784 if (entry == pool->next_free_entry)
1785 {
1786 if (entry >= MAX_LITERAL_POOL_SIZE)
1787 {
1788 set_syntax_error (_("literal pool overflow"));
1789 return FALSE;
1790 }
1791
1792 pool->literals[entry].exp = *exp;
1793 pool->next_free_entry += 1;
1794 if (exp->X_op == O_big)
1795 {
1796 /* PR 16688: Bignums are held in a single global array. We must
1797 copy and preserve that value now, before it is overwritten. */
1798 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1799 exp->X_add_number);
1800 memcpy (pool->literals[entry].bignum, generic_bignum,
1801 CHARS_PER_LITTLENUM * exp->X_add_number);
1802 }
1803 else
1804 pool->literals[entry].bignum = NULL;
1805 }
1806
1807 exp->X_op = O_symbol;
1808 exp->X_add_number = ((int) entry) * size;
1809 exp->X_add_symbol = pool->symbol;
1810
1811 return TRUE;
1812 }
1813
1814 /* Can't use symbol_new here, so have to create a symbol and then at
1815 a later date assign it a value. That's what these functions do. */
1816
1817 static void
1818 symbol_locate (symbolS * symbolP,
1819 const char *name,/* It is copied, the caller can modify. */
1820 segT segment, /* Segment identifier (SEG_<something>). */
1821 valueT valu, /* Symbol value. */
1822 fragS * frag) /* Associated fragment. */
1823 {
1824 size_t name_length;
1825 char *preserved_copy_of_name;
1826
1827 name_length = strlen (name) + 1; /* +1 for \0. */
1828 obstack_grow (&notes, name, name_length);
1829 preserved_copy_of_name = obstack_finish (&notes);
1830
1831 #ifdef tc_canonicalize_symbol_name
1832 preserved_copy_of_name =
1833 tc_canonicalize_symbol_name (preserved_copy_of_name);
1834 #endif
1835
1836 S_SET_NAME (symbolP, preserved_copy_of_name);
1837
1838 S_SET_SEGMENT (symbolP, segment);
1839 S_SET_VALUE (symbolP, valu);
1840 symbol_clear_list_pointers (symbolP);
1841
1842 symbol_set_frag (symbolP, frag);
1843
1844 /* Link to end of symbol chain. */
1845 {
1846 extern int symbol_table_frozen;
1847
1848 if (symbol_table_frozen)
1849 abort ();
1850 }
1851
1852 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1853
1854 obj_symbol_new_hook (symbolP);
1855
1856 #ifdef tc_symbol_new_hook
1857 tc_symbol_new_hook (symbolP);
1858 #endif
1859
1860 #ifdef DEBUG_SYMS
1861 verify_symbol_chain (symbol_rootP, symbol_lastP);
1862 #endif /* DEBUG_SYMS */
1863 }
1864
1865
1866 static void
1867 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1868 {
1869 unsigned int entry;
1870 literal_pool *pool;
1871 char sym_name[20];
1872 int align;
1873
1874 for (align = 2; align <= 4; align++)
1875 {
1876 int size = 1 << align;
1877
1878 pool = find_literal_pool (size);
1879 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1880 continue;
1881
1882 /* Align pool as you have word accesses.
1883 Only make a frag if we have to. */
1884 if (!need_pass_2)
1885 frag_align (align, 0, 0);
1886
1887 mapping_state (MAP_DATA);
1888
1889 record_alignment (now_seg, align);
1890
1891 sprintf (sym_name, "$$lit_\002%x", pool->id);
1892
1893 symbol_locate (pool->symbol, sym_name, now_seg,
1894 (valueT) frag_now_fix (), frag_now);
1895 symbol_table_insert (pool->symbol);
1896
1897 for (entry = 0; entry < pool->next_free_entry; entry++)
1898 {
1899 expressionS * exp = & pool->literals[entry].exp;
1900
1901 if (exp->X_op == O_big)
1902 {
1903 /* PR 16688: Restore the global bignum value. */
1904 gas_assert (pool->literals[entry].bignum != NULL);
1905 memcpy (generic_bignum, pool->literals[entry].bignum,
1906 CHARS_PER_LITTLENUM * exp->X_add_number);
1907 }
1908
1909 /* First output the expression in the instruction to the pool. */
1910 emit_expr (exp, size); /* .word|.xword */
1911
1912 if (exp->X_op == O_big)
1913 {
1914 free (pool->literals[entry].bignum);
1915 pool->literals[entry].bignum = NULL;
1916 }
1917 }
1918
1919 /* Mark the pool as empty. */
1920 pool->next_free_entry = 0;
1921 pool->symbol = NULL;
1922 }
1923 }
1924
1925 #ifdef OBJ_ELF
1926 /* Forward declarations for functions below, in the MD interface
1927 section. */
1928 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1929 static struct reloc_table_entry * find_reloc_table_entry (char **);
1930
1931 /* Directives: Data. */
1932 /* N.B. the support for relocation suffix in this directive needs to be
1933 implemented properly. */
1934
1935 static void
1936 s_aarch64_elf_cons (int nbytes)
1937 {
1938 expressionS exp;
1939
1940 #ifdef md_flush_pending_output
1941 md_flush_pending_output ();
1942 #endif
1943
1944 if (is_it_end_of_statement ())
1945 {
1946 demand_empty_rest_of_line ();
1947 return;
1948 }
1949
1950 #ifdef md_cons_align
1951 md_cons_align (nbytes);
1952 #endif
1953
1954 mapping_state (MAP_DATA);
1955 do
1956 {
1957 struct reloc_table_entry *reloc;
1958
1959 expression (&exp);
1960
1961 if (exp.X_op != O_symbol)
1962 emit_expr (&exp, (unsigned int) nbytes);
1963 else
1964 {
1965 skip_past_char (&input_line_pointer, '#');
1966 if (skip_past_char (&input_line_pointer, ':'))
1967 {
1968 reloc = find_reloc_table_entry (&input_line_pointer);
1969 if (reloc == NULL)
1970 as_bad (_("unrecognized relocation suffix"));
1971 else
1972 as_bad (_("unimplemented relocation suffix"));
1973 ignore_rest_of_line ();
1974 return;
1975 }
1976 else
1977 emit_expr (&exp, (unsigned int) nbytes);
1978 }
1979 }
1980 while (*input_line_pointer++ == ',');
1981
1982 /* Put terminator back into stream. */
1983 input_line_pointer--;
1984 demand_empty_rest_of_line ();
1985 }
1986
1987 /* Mark symbol that it follows a variant PCS convention. */
1988
1989 static void
1990 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1991 {
1992 char *name;
1993 char c;
1994 symbolS *sym;
1995 asymbol *bfdsym;
1996 elf_symbol_type *elfsym;
1997
1998 c = get_symbol_name (&name);
1999 if (!*name)
2000 as_bad (_("Missing symbol name in directive"));
2001 sym = symbol_find_or_make (name);
2002 restore_line_pointer (c);
2003 demand_empty_rest_of_line ();
2004 bfdsym = symbol_get_bfdsym (sym);
2005 elfsym = elf_symbol_from (bfd_asymbol_bfd (bfdsym), bfdsym);
2006 gas_assert (elfsym);
2007 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2008 }
2009 #endif /* OBJ_ELF */
2010
2011 /* Output a 32-bit word, but mark as an instruction. */
2012
2013 static void
2014 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2015 {
2016 expressionS exp;
2017
2018 #ifdef md_flush_pending_output
2019 md_flush_pending_output ();
2020 #endif
2021
2022 if (is_it_end_of_statement ())
2023 {
2024 demand_empty_rest_of_line ();
2025 return;
2026 }
2027
2028 /* Sections are assumed to start aligned. In executable section, there is no
2029 MAP_DATA symbol pending. So we only align the address during
2030 MAP_DATA --> MAP_INSN transition.
2031 For other sections, this is not guaranteed. */
2032 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2033 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2034 frag_align_code (2, 0);
2035
2036 #ifdef OBJ_ELF
2037 mapping_state (MAP_INSN);
2038 #endif
2039
2040 do
2041 {
2042 expression (&exp);
2043 if (exp.X_op != O_constant)
2044 {
2045 as_bad (_("constant expression required"));
2046 ignore_rest_of_line ();
2047 return;
2048 }
2049
2050 if (target_big_endian)
2051 {
2052 unsigned int val = exp.X_add_number;
2053 exp.X_add_number = SWAP_32 (val);
2054 }
2055 emit_expr (&exp, 4);
2056 }
2057 while (*input_line_pointer++ == ',');
2058
2059 /* Put terminator back into stream. */
2060 input_line_pointer--;
2061 demand_empty_rest_of_line ();
2062 }
2063
2064 static void
2065 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2066 {
2067 demand_empty_rest_of_line ();
2068 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2069 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2070 }
2071
2072 #ifdef OBJ_ELF
2073 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2074
2075 static void
2076 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2077 {
2078 expressionS exp;
2079
2080 expression (&exp);
2081 frag_grow (4);
2082 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2083 BFD_RELOC_AARCH64_TLSDESC_ADD);
2084
2085 demand_empty_rest_of_line ();
2086 }
2087
2088 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2089
2090 static void
2091 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2092 {
2093 expressionS exp;
2094
2095 /* Since we're just labelling the code, there's no need to define a
2096 mapping symbol. */
2097 expression (&exp);
2098 /* Make sure there is enough room in this frag for the following
2099 blr. This trick only works if the blr follows immediately after
2100 the .tlsdesc directive. */
2101 frag_grow (4);
2102 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2103 BFD_RELOC_AARCH64_TLSDESC_CALL);
2104
2105 demand_empty_rest_of_line ();
2106 }
2107
2108 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2109
2110 static void
2111 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2112 {
2113 expressionS exp;
2114
2115 expression (&exp);
2116 frag_grow (4);
2117 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2118 BFD_RELOC_AARCH64_TLSDESC_LDR);
2119
2120 demand_empty_rest_of_line ();
2121 }
2122 #endif /* OBJ_ELF */
2123
2124 static void s_aarch64_arch (int);
2125 static void s_aarch64_cpu (int);
2126 static void s_aarch64_arch_extension (int);
2127
2128 /* This table describes all the machine specific pseudo-ops the assembler
2129 has to support. The fields are:
2130 pseudo-op name without dot
2131 function to call to execute this pseudo-op
2132 Integer arg to pass to the function. */
2133
2134 const pseudo_typeS md_pseudo_table[] = {
2135 /* Never called because '.req' does not start a line. */
2136 {"req", s_req, 0},
2137 {"unreq", s_unreq, 0},
2138 {"bss", s_bss, 0},
2139 {"even", s_even, 0},
2140 {"ltorg", s_ltorg, 0},
2141 {"pool", s_ltorg, 0},
2142 {"cpu", s_aarch64_cpu, 0},
2143 {"arch", s_aarch64_arch, 0},
2144 {"arch_extension", s_aarch64_arch_extension, 0},
2145 {"inst", s_aarch64_inst, 0},
2146 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2147 #ifdef OBJ_ELF
2148 {"tlsdescadd", s_tlsdescadd, 0},
2149 {"tlsdesccall", s_tlsdesccall, 0},
2150 {"tlsdescldr", s_tlsdescldr, 0},
2151 {"word", s_aarch64_elf_cons, 4},
2152 {"long", s_aarch64_elf_cons, 4},
2153 {"xword", s_aarch64_elf_cons, 8},
2154 {"dword", s_aarch64_elf_cons, 8},
2155 {"variant_pcs", s_variant_pcs, 0},
2156 #endif
2157 {"float16", float_cons, 'h'},
2158 {"bfloat16", float_cons, 'b'},
2159 {0, 0, 0}
2160 };
2161 \f
2162
2163 /* Check whether STR points to a register name followed by a comma or the
2164 end of line; REG_TYPE indicates which register types are checked
2165 against. Return TRUE if STR is such a register name; otherwise return
2166 FALSE. The function does not intend to produce any diagnostics, but since
2167 the register parser aarch64_reg_parse, which is called by this function,
2168 does produce diagnostics, we call clear_error to clear any diagnostics
2169 that may be generated by aarch64_reg_parse.
2170 Also, the function returns FALSE directly if there is any user error
2171 present at the function entry. This prevents the existing diagnostics
2172 state from being spoiled.
2173 The function currently serves parse_constant_immediate and
2174 parse_big_immediate only. */
2175 static bfd_boolean
2176 reg_name_p (char *str, aarch64_reg_type reg_type)
2177 {
2178 int reg;
2179
2180 /* Prevent the diagnostics state from being spoiled. */
2181 if (error_p ())
2182 return FALSE;
2183
2184 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2185
2186 /* Clear the parsing error that may be set by the reg parser. */
2187 clear_error ();
2188
2189 if (reg == PARSE_FAIL)
2190 return FALSE;
2191
2192 skip_whitespace (str);
2193 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2194 return TRUE;
2195
2196 return FALSE;
2197 }
2198
2199 /* Parser functions used exclusively in instruction operands. */
2200
2201 /* Parse an immediate expression which may not be constant.
2202
2203 To prevent the expression parser from pushing a register name
2204 into the symbol table as an undefined symbol, firstly a check is
2205 done to find out whether STR is a register of type REG_TYPE followed
2206 by a comma or the end of line. Return FALSE if STR is such a string. */
2207
2208 static bfd_boolean
2209 parse_immediate_expression (char **str, expressionS *exp,
2210 aarch64_reg_type reg_type)
2211 {
2212 if (reg_name_p (*str, reg_type))
2213 {
2214 set_recoverable_error (_("immediate operand required"));
2215 return FALSE;
2216 }
2217
2218 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2219
2220 if (exp->X_op == O_absent)
2221 {
2222 set_fatal_syntax_error (_("missing immediate expression"));
2223 return FALSE;
2224 }
2225
2226 return TRUE;
2227 }
2228
2229 /* Constant immediate-value read function for use in insn parsing.
2230 STR points to the beginning of the immediate (with the optional
2231 leading #); *VAL receives the value. REG_TYPE says which register
2232 names should be treated as registers rather than as symbolic immediates.
2233
2234 Return TRUE on success; otherwise return FALSE. */
2235
2236 static bfd_boolean
2237 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2238 {
2239 expressionS exp;
2240
2241 if (! parse_immediate_expression (str, &exp, reg_type))
2242 return FALSE;
2243
2244 if (exp.X_op != O_constant)
2245 {
2246 set_syntax_error (_("constant expression required"));
2247 return FALSE;
2248 }
2249
2250 *val = exp.X_add_number;
2251 return TRUE;
2252 }
2253
2254 static uint32_t
2255 encode_imm_float_bits (uint32_t imm)
2256 {
2257 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2258 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2259 }
2260
2261 /* Return TRUE if the single-precision floating-point value encoded in IMM
2262 can be expressed in the AArch64 8-bit signed floating-point format with
2263 3-bit exponent and normalized 4 bits of precision; in other words, the
2264 floating-point value must be expressable as
2265 (+/-) n / 16 * power (2, r)
2266 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2267
2268 static bfd_boolean
2269 aarch64_imm_float_p (uint32_t imm)
2270 {
2271 /* If a single-precision floating-point value has the following bit
2272 pattern, it can be expressed in the AArch64 8-bit floating-point
2273 format:
2274
2275 3 32222222 2221111111111
2276 1 09876543 21098765432109876543210
2277 n Eeeeeexx xxxx0000000000000000000
2278
2279 where n, e and each x are either 0 or 1 independently, with
2280 E == ~ e. */
2281
2282 uint32_t pattern;
2283
2284 /* Prepare the pattern for 'Eeeeee'. */
2285 if (((imm >> 30) & 0x1) == 0)
2286 pattern = 0x3e000000;
2287 else
2288 pattern = 0x40000000;
2289
2290 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2291 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2292 }
2293
2294 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2295 as an IEEE float without any loss of precision. Store the value in
2296 *FPWORD if so. */
2297
2298 static bfd_boolean
2299 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2300 {
2301 /* If a double-precision floating-point value has the following bit
2302 pattern, it can be expressed in a float:
2303
2304 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2305 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2306 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2307
2308 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2309 if Eeee_eeee != 1111_1111
2310
2311 where n, e, s and S are either 0 or 1 independently and where ~ is the
2312 inverse of E. */
2313
2314 uint32_t pattern;
2315 uint32_t high32 = imm >> 32;
2316 uint32_t low32 = imm;
2317
2318 /* Lower 29 bits need to be 0s. */
2319 if ((imm & 0x1fffffff) != 0)
2320 return FALSE;
2321
2322 /* Prepare the pattern for 'Eeeeeeeee'. */
2323 if (((high32 >> 30) & 0x1) == 0)
2324 pattern = 0x38000000;
2325 else
2326 pattern = 0x40000000;
2327
2328 /* Check E~~~. */
2329 if ((high32 & 0x78000000) != pattern)
2330 return FALSE;
2331
2332 /* Check Eeee_eeee != 1111_1111. */
2333 if ((high32 & 0x7ff00000) == 0x47f00000)
2334 return FALSE;
2335
2336 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2337 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2338 | (low32 >> 29)); /* 3 S bits. */
2339 return TRUE;
2340 }
2341
2342 /* Return true if we should treat OPERAND as a double-precision
2343 floating-point operand rather than a single-precision one. */
2344 static bfd_boolean
2345 double_precision_operand_p (const aarch64_opnd_info *operand)
2346 {
2347 /* Check for unsuffixed SVE registers, which are allowed
2348 for LDR and STR but not in instructions that require an
2349 immediate. We get better error messages if we arbitrarily
2350 pick one size, parse the immediate normally, and then
2351 report the match failure in the normal way. */
2352 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2353 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2354 }
2355
2356 /* Parse a floating-point immediate. Return TRUE on success and return the
2357 value in *IMMED in the format of IEEE754 single-precision encoding.
2358 *CCP points to the start of the string; DP_P is TRUE when the immediate
2359 is expected to be in double-precision (N.B. this only matters when
2360 hexadecimal representation is involved). REG_TYPE says which register
2361 names should be treated as registers rather than as symbolic immediates.
2362
2363 This routine accepts any IEEE float; it is up to the callers to reject
2364 invalid ones. */
2365
2366 static bfd_boolean
2367 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2368 aarch64_reg_type reg_type)
2369 {
2370 char *str = *ccp;
2371 char *fpnum;
2372 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2373 int64_t val = 0;
2374 unsigned fpword = 0;
2375 bfd_boolean hex_p = FALSE;
2376
2377 skip_past_char (&str, '#');
2378
2379 fpnum = str;
2380 skip_whitespace (fpnum);
2381
2382 if (strncmp (fpnum, "0x", 2) == 0)
2383 {
2384 /* Support the hexadecimal representation of the IEEE754 encoding.
2385 Double-precision is expected when DP_P is TRUE, otherwise the
2386 representation should be in single-precision. */
2387 if (! parse_constant_immediate (&str, &val, reg_type))
2388 goto invalid_fp;
2389
2390 if (dp_p)
2391 {
2392 if (!can_convert_double_to_float (val, &fpword))
2393 goto invalid_fp;
2394 }
2395 else if ((uint64_t) val > 0xffffffff)
2396 goto invalid_fp;
2397 else
2398 fpword = val;
2399
2400 hex_p = TRUE;
2401 }
2402 else if (reg_name_p (str, reg_type))
2403 {
2404 set_recoverable_error (_("immediate operand required"));
2405 return FALSE;
2406 }
2407
2408 if (! hex_p)
2409 {
2410 int i;
2411
2412 if ((str = atof_ieee (str, 's', words)) == NULL)
2413 goto invalid_fp;
2414
2415 /* Our FP word must be 32 bits (single-precision FP). */
2416 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2417 {
2418 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2419 fpword |= words[i];
2420 }
2421 }
2422
2423 *immed = fpword;
2424 *ccp = str;
2425 return TRUE;
2426
2427 invalid_fp:
2428 set_fatal_syntax_error (_("invalid floating-point constant"));
2429 return FALSE;
2430 }
2431
2432 /* Less-generic immediate-value read function with the possibility of loading
2433 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2434 instructions.
2435
2436 To prevent the expression parser from pushing a register name into the
2437 symbol table as an undefined symbol, a check is firstly done to find
2438 out whether STR is a register of type REG_TYPE followed by a comma or
2439 the end of line. Return FALSE if STR is such a register. */
2440
2441 static bfd_boolean
2442 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2443 {
2444 char *ptr = *str;
2445
2446 if (reg_name_p (ptr, reg_type))
2447 {
2448 set_syntax_error (_("immediate operand required"));
2449 return FALSE;
2450 }
2451
2452 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2453
2454 if (inst.reloc.exp.X_op == O_constant)
2455 *imm = inst.reloc.exp.X_add_number;
2456
2457 *str = ptr;
2458
2459 return TRUE;
2460 }
2461
2462 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2463 if NEED_LIBOPCODES is non-zero, the fixup will need
2464 assistance from the libopcodes. */
2465
2466 static inline void
2467 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2468 const aarch64_opnd_info *operand,
2469 int need_libopcodes_p)
2470 {
2471 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2472 reloc->opnd = operand->type;
2473 if (need_libopcodes_p)
2474 reloc->need_libopcodes_p = 1;
2475 };
2476
2477 /* Return TRUE if the instruction needs to be fixed up later internally by
2478 the GAS; otherwise return FALSE. */
2479
2480 static inline bfd_boolean
2481 aarch64_gas_internal_fixup_p (void)
2482 {
2483 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2484 }
2485
2486 /* Assign the immediate value to the relevant field in *OPERAND if
2487 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2488 needs an internal fixup in a later stage.
2489 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2490 IMM.VALUE that may get assigned with the constant. */
2491 static inline void
2492 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2493 aarch64_opnd_info *operand,
2494 int addr_off_p,
2495 int need_libopcodes_p,
2496 int skip_p)
2497 {
2498 if (reloc->exp.X_op == O_constant)
2499 {
2500 if (addr_off_p)
2501 operand->addr.offset.imm = reloc->exp.X_add_number;
2502 else
2503 operand->imm.value = reloc->exp.X_add_number;
2504 reloc->type = BFD_RELOC_UNUSED;
2505 }
2506 else
2507 {
2508 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2509 /* Tell libopcodes to ignore this operand or not. This is helpful
2510 when one of the operands needs to be fixed up later but we need
2511 libopcodes to check the other operands. */
2512 operand->skip = skip_p;
2513 }
2514 }
2515
2516 /* Relocation modifiers. Each entry in the table contains the textual
2517 name for the relocation which may be placed before a symbol used as
2518 a load/store offset, or add immediate. It must be surrounded by a
2519 leading and trailing colon, for example:
2520
2521 ldr x0, [x1, #:rello:varsym]
2522 add x0, x1, #:rello:varsym */
2523
2524 struct reloc_table_entry
2525 {
2526 const char *name;
2527 int pc_rel;
2528 bfd_reloc_code_real_type adr_type;
2529 bfd_reloc_code_real_type adrp_type;
2530 bfd_reloc_code_real_type movw_type;
2531 bfd_reloc_code_real_type add_type;
2532 bfd_reloc_code_real_type ldst_type;
2533 bfd_reloc_code_real_type ld_literal_type;
2534 };
2535
2536 static struct reloc_table_entry reloc_table[] = {
2537 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2538 {"lo12", 0,
2539 0, /* adr_type */
2540 0,
2541 0,
2542 BFD_RELOC_AARCH64_ADD_LO12,
2543 BFD_RELOC_AARCH64_LDST_LO12,
2544 0},
2545
2546 /* Higher 21 bits of pc-relative page offset: ADRP */
2547 {"pg_hi21", 1,
2548 0, /* adr_type */
2549 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2550 0,
2551 0,
2552 0,
2553 0},
2554
2555 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2556 {"pg_hi21_nc", 1,
2557 0, /* adr_type */
2558 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2559 0,
2560 0,
2561 0,
2562 0},
2563
2564 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2565 {"abs_g0", 0,
2566 0, /* adr_type */
2567 0,
2568 BFD_RELOC_AARCH64_MOVW_G0,
2569 0,
2570 0,
2571 0},
2572
2573 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2574 {"abs_g0_s", 0,
2575 0, /* adr_type */
2576 0,
2577 BFD_RELOC_AARCH64_MOVW_G0_S,
2578 0,
2579 0,
2580 0},
2581
2582 /* Less significant bits 0-15 of address/value: MOVK, no check */
2583 {"abs_g0_nc", 0,
2584 0, /* adr_type */
2585 0,
2586 BFD_RELOC_AARCH64_MOVW_G0_NC,
2587 0,
2588 0,
2589 0},
2590
2591 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2592 {"abs_g1", 0,
2593 0, /* adr_type */
2594 0,
2595 BFD_RELOC_AARCH64_MOVW_G1,
2596 0,
2597 0,
2598 0},
2599
2600 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2601 {"abs_g1_s", 0,
2602 0, /* adr_type */
2603 0,
2604 BFD_RELOC_AARCH64_MOVW_G1_S,
2605 0,
2606 0,
2607 0},
2608
2609 /* Less significant bits 16-31 of address/value: MOVK, no check */
2610 {"abs_g1_nc", 0,
2611 0, /* adr_type */
2612 0,
2613 BFD_RELOC_AARCH64_MOVW_G1_NC,
2614 0,
2615 0,
2616 0},
2617
2618 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2619 {"abs_g2", 0,
2620 0, /* adr_type */
2621 0,
2622 BFD_RELOC_AARCH64_MOVW_G2,
2623 0,
2624 0,
2625 0},
2626
2627 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2628 {"abs_g2_s", 0,
2629 0, /* adr_type */
2630 0,
2631 BFD_RELOC_AARCH64_MOVW_G2_S,
2632 0,
2633 0,
2634 0},
2635
2636 /* Less significant bits 32-47 of address/value: MOVK, no check */
2637 {"abs_g2_nc", 0,
2638 0, /* adr_type */
2639 0,
2640 BFD_RELOC_AARCH64_MOVW_G2_NC,
2641 0,
2642 0,
2643 0},
2644
2645 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2646 {"abs_g3", 0,
2647 0, /* adr_type */
2648 0,
2649 BFD_RELOC_AARCH64_MOVW_G3,
2650 0,
2651 0,
2652 0},
2653
2654 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2655 {"prel_g0", 1,
2656 0, /* adr_type */
2657 0,
2658 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2659 0,
2660 0,
2661 0},
2662
2663 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2664 {"prel_g0_nc", 1,
2665 0, /* adr_type */
2666 0,
2667 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2668 0,
2669 0,
2670 0},
2671
2672 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2673 {"prel_g1", 1,
2674 0, /* adr_type */
2675 0,
2676 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2677 0,
2678 0,
2679 0},
2680
2681 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2682 {"prel_g1_nc", 1,
2683 0, /* adr_type */
2684 0,
2685 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2686 0,
2687 0,
2688 0},
2689
2690 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2691 {"prel_g2", 1,
2692 0, /* adr_type */
2693 0,
2694 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2695 0,
2696 0,
2697 0},
2698
2699 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2700 {"prel_g2_nc", 1,
2701 0, /* adr_type */
2702 0,
2703 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2704 0,
2705 0,
2706 0},
2707
2708 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2709 {"prel_g3", 1,
2710 0, /* adr_type */
2711 0,
2712 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2713 0,
2714 0,
2715 0},
2716
2717 /* Get to the page containing GOT entry for a symbol. */
2718 {"got", 1,
2719 0, /* adr_type */
2720 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2721 0,
2722 0,
2723 0,
2724 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2725
2726 /* 12 bit offset into the page containing GOT entry for that symbol. */
2727 {"got_lo12", 0,
2728 0, /* adr_type */
2729 0,
2730 0,
2731 0,
2732 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2733 0},
2734
2735 /* 0-15 bits of address/value: MOVk, no check. */
2736 {"gotoff_g0_nc", 0,
2737 0, /* adr_type */
2738 0,
2739 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2740 0,
2741 0,
2742 0},
2743
2744 /* Most significant bits 16-31 of address/value: MOVZ. */
2745 {"gotoff_g1", 0,
2746 0, /* adr_type */
2747 0,
2748 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2749 0,
2750 0,
2751 0},
2752
2753 /* 15 bit offset into the page containing GOT entry for that symbol. */
2754 {"gotoff_lo15", 0,
2755 0, /* adr_type */
2756 0,
2757 0,
2758 0,
2759 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2760 0},
2761
2762 /* Get to the page containing GOT TLS entry for a symbol */
2763 {"gottprel_g0_nc", 0,
2764 0, /* adr_type */
2765 0,
2766 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2767 0,
2768 0,
2769 0},
2770
2771 /* Get to the page containing GOT TLS entry for a symbol */
2772 {"gottprel_g1", 0,
2773 0, /* adr_type */
2774 0,
2775 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2776 0,
2777 0,
2778 0},
2779
2780 /* Get to the page containing GOT TLS entry for a symbol */
2781 {"tlsgd", 0,
2782 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2783 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2784 0,
2785 0,
2786 0,
2787 0},
2788
2789 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2790 {"tlsgd_lo12", 0,
2791 0, /* adr_type */
2792 0,
2793 0,
2794 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2795 0,
2796 0},
2797
2798 /* Lower 16 bits address/value: MOVk. */
2799 {"tlsgd_g0_nc", 0,
2800 0, /* adr_type */
2801 0,
2802 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2803 0,
2804 0,
2805 0},
2806
2807 /* Most significant bits 16-31 of address/value: MOVZ. */
2808 {"tlsgd_g1", 0,
2809 0, /* adr_type */
2810 0,
2811 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2812 0,
2813 0,
2814 0},
2815
2816 /* Get to the page containing GOT TLS entry for a symbol */
2817 {"tlsdesc", 0,
2818 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2819 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2820 0,
2821 0,
2822 0,
2823 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2824
2825 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2826 {"tlsdesc_lo12", 0,
2827 0, /* adr_type */
2828 0,
2829 0,
2830 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2831 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2832 0},
2833
2834 /* Get to the page containing GOT TLS entry for a symbol.
2835 The same as GD, we allocate two consecutive GOT slots
2836 for module index and module offset, the only difference
2837 with GD is the module offset should be initialized to
2838 zero without any outstanding runtime relocation. */
2839 {"tlsldm", 0,
2840 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2841 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2842 0,
2843 0,
2844 0,
2845 0},
2846
2847 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2848 {"tlsldm_lo12_nc", 0,
2849 0, /* adr_type */
2850 0,
2851 0,
2852 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2853 0,
2854 0},
2855
2856 /* 12 bit offset into the module TLS base address. */
2857 {"dtprel_lo12", 0,
2858 0, /* adr_type */
2859 0,
2860 0,
2861 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2862 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2863 0},
2864
2865 /* Same as dtprel_lo12, no overflow check. */
2866 {"dtprel_lo12_nc", 0,
2867 0, /* adr_type */
2868 0,
2869 0,
2870 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2871 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2872 0},
2873
2874 /* bits[23:12] of offset to the module TLS base address. */
2875 {"dtprel_hi12", 0,
2876 0, /* adr_type */
2877 0,
2878 0,
2879 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2880 0,
2881 0},
2882
2883 /* bits[15:0] of offset to the module TLS base address. */
2884 {"dtprel_g0", 0,
2885 0, /* adr_type */
2886 0,
2887 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2888 0,
2889 0,
2890 0},
2891
2892 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2893 {"dtprel_g0_nc", 0,
2894 0, /* adr_type */
2895 0,
2896 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2897 0,
2898 0,
2899 0},
2900
2901 /* bits[31:16] of offset to the module TLS base address. */
2902 {"dtprel_g1", 0,
2903 0, /* adr_type */
2904 0,
2905 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2906 0,
2907 0,
2908 0},
2909
2910 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2911 {"dtprel_g1_nc", 0,
2912 0, /* adr_type */
2913 0,
2914 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2915 0,
2916 0,
2917 0},
2918
2919 /* bits[47:32] of offset to the module TLS base address. */
2920 {"dtprel_g2", 0,
2921 0, /* adr_type */
2922 0,
2923 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2924 0,
2925 0,
2926 0},
2927
2928 /* Lower 16 bit offset into GOT entry for a symbol */
2929 {"tlsdesc_off_g0_nc", 0,
2930 0, /* adr_type */
2931 0,
2932 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2933 0,
2934 0,
2935 0},
2936
2937 /* Higher 16 bit offset into GOT entry for a symbol */
2938 {"tlsdesc_off_g1", 0,
2939 0, /* adr_type */
2940 0,
2941 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2942 0,
2943 0,
2944 0},
2945
2946 /* Get to the page containing GOT TLS entry for a symbol */
2947 {"gottprel", 0,
2948 0, /* adr_type */
2949 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2950 0,
2951 0,
2952 0,
2953 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2954
2955 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2956 {"gottprel_lo12", 0,
2957 0, /* adr_type */
2958 0,
2959 0,
2960 0,
2961 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2962 0},
2963
2964 /* Get tp offset for a symbol. */
2965 {"tprel", 0,
2966 0, /* adr_type */
2967 0,
2968 0,
2969 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2970 0,
2971 0},
2972
2973 /* Get tp offset for a symbol. */
2974 {"tprel_lo12", 0,
2975 0, /* adr_type */
2976 0,
2977 0,
2978 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2979 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2980 0},
2981
2982 /* Get tp offset for a symbol. */
2983 {"tprel_hi12", 0,
2984 0, /* adr_type */
2985 0,
2986 0,
2987 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2988 0,
2989 0},
2990
2991 /* Get tp offset for a symbol. */
2992 {"tprel_lo12_nc", 0,
2993 0, /* adr_type */
2994 0,
2995 0,
2996 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2997 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2998 0},
2999
3000 /* Most significant bits 32-47 of address/value: MOVZ. */
3001 {"tprel_g2", 0,
3002 0, /* adr_type */
3003 0,
3004 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3005 0,
3006 0,
3007 0},
3008
3009 /* Most significant bits 16-31 of address/value: MOVZ. */
3010 {"tprel_g1", 0,
3011 0, /* adr_type */
3012 0,
3013 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3014 0,
3015 0,
3016 0},
3017
3018 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3019 {"tprel_g1_nc", 0,
3020 0, /* adr_type */
3021 0,
3022 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3023 0,
3024 0,
3025 0},
3026
3027 /* Most significant bits 0-15 of address/value: MOVZ. */
3028 {"tprel_g0", 0,
3029 0, /* adr_type */
3030 0,
3031 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3032 0,
3033 0,
3034 0},
3035
3036 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3037 {"tprel_g0_nc", 0,
3038 0, /* adr_type */
3039 0,
3040 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3041 0,
3042 0,
3043 0},
3044
3045 /* 15bit offset from got entry to base address of GOT table. */
3046 {"gotpage_lo15", 0,
3047 0,
3048 0,
3049 0,
3050 0,
3051 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3052 0},
3053
3054 /* 14bit offset from got entry to base address of GOT table. */
3055 {"gotpage_lo14", 0,
3056 0,
3057 0,
3058 0,
3059 0,
3060 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3061 0},
3062 };
3063
3064 /* Given the address of a pointer pointing to the textual name of a
3065 relocation as may appear in assembler source, attempt to find its
3066 details in reloc_table. The pointer will be updated to the character
3067 after the trailing colon. On failure, NULL will be returned;
3068 otherwise return the reloc_table_entry. */
3069
3070 static struct reloc_table_entry *
3071 find_reloc_table_entry (char **str)
3072 {
3073 unsigned int i;
3074 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3075 {
3076 int length = strlen (reloc_table[i].name);
3077
3078 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3079 && (*str)[length] == ':')
3080 {
3081 *str += (length + 1);
3082 return &reloc_table[i];
3083 }
3084 }
3085
3086 return NULL;
3087 }
3088
3089 /* Mode argument to parse_shift and parser_shifter_operand. */
3090 enum parse_shift_mode
3091 {
3092 SHIFTED_NONE, /* no shifter allowed */
3093 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3094 "#imm{,lsl #n}" */
3095 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3096 "#imm" */
3097 SHIFTED_LSL, /* bare "lsl #n" */
3098 SHIFTED_MUL, /* bare "mul #n" */
3099 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3100 SHIFTED_MUL_VL, /* "mul vl" */
3101 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3102 };
3103
3104 /* Parse a <shift> operator on an AArch64 data processing instruction.
3105 Return TRUE on success; otherwise return FALSE. */
3106 static bfd_boolean
3107 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3108 {
3109 const struct aarch64_name_value_pair *shift_op;
3110 enum aarch64_modifier_kind kind;
3111 expressionS exp;
3112 int exp_has_prefix;
3113 char *s = *str;
3114 char *p = s;
3115
3116 for (p = *str; ISALPHA (*p); p++)
3117 ;
3118
3119 if (p == *str)
3120 {
3121 set_syntax_error (_("shift expression expected"));
3122 return FALSE;
3123 }
3124
3125 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
3126
3127 if (shift_op == NULL)
3128 {
3129 set_syntax_error (_("shift operator expected"));
3130 return FALSE;
3131 }
3132
3133 kind = aarch64_get_operand_modifier (shift_op);
3134
3135 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3136 {
3137 set_syntax_error (_("invalid use of 'MSL'"));
3138 return FALSE;
3139 }
3140
3141 if (kind == AARCH64_MOD_MUL
3142 && mode != SHIFTED_MUL
3143 && mode != SHIFTED_MUL_VL)
3144 {
3145 set_syntax_error (_("invalid use of 'MUL'"));
3146 return FALSE;
3147 }
3148
3149 switch (mode)
3150 {
3151 case SHIFTED_LOGIC_IMM:
3152 if (aarch64_extend_operator_p (kind))
3153 {
3154 set_syntax_error (_("extending shift is not permitted"));
3155 return FALSE;
3156 }
3157 break;
3158
3159 case SHIFTED_ARITH_IMM:
3160 if (kind == AARCH64_MOD_ROR)
3161 {
3162 set_syntax_error (_("'ROR' shift is not permitted"));
3163 return FALSE;
3164 }
3165 break;
3166
3167 case SHIFTED_LSL:
3168 if (kind != AARCH64_MOD_LSL)
3169 {
3170 set_syntax_error (_("only 'LSL' shift is permitted"));
3171 return FALSE;
3172 }
3173 break;
3174
3175 case SHIFTED_MUL:
3176 if (kind != AARCH64_MOD_MUL)
3177 {
3178 set_syntax_error (_("only 'MUL' is permitted"));
3179 return FALSE;
3180 }
3181 break;
3182
3183 case SHIFTED_MUL_VL:
3184 /* "MUL VL" consists of two separate tokens. Require the first
3185 token to be "MUL" and look for a following "VL". */
3186 if (kind == AARCH64_MOD_MUL)
3187 {
3188 skip_whitespace (p);
3189 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3190 {
3191 p += 2;
3192 kind = AARCH64_MOD_MUL_VL;
3193 break;
3194 }
3195 }
3196 set_syntax_error (_("only 'MUL VL' is permitted"));
3197 return FALSE;
3198
3199 case SHIFTED_REG_OFFSET:
3200 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3201 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3202 {
3203 set_fatal_syntax_error
3204 (_("invalid shift for the register offset addressing mode"));
3205 return FALSE;
3206 }
3207 break;
3208
3209 case SHIFTED_LSL_MSL:
3210 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3211 {
3212 set_syntax_error (_("invalid shift operator"));
3213 return FALSE;
3214 }
3215 break;
3216
3217 default:
3218 abort ();
3219 }
3220
3221 /* Whitespace can appear here if the next thing is a bare digit. */
3222 skip_whitespace (p);
3223
3224 /* Parse shift amount. */
3225 exp_has_prefix = 0;
3226 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3227 exp.X_op = O_absent;
3228 else
3229 {
3230 if (is_immediate_prefix (*p))
3231 {
3232 p++;
3233 exp_has_prefix = 1;
3234 }
3235 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3236 }
3237 if (kind == AARCH64_MOD_MUL_VL)
3238 /* For consistency, give MUL VL the same shift amount as an implicit
3239 MUL #1. */
3240 operand->shifter.amount = 1;
3241 else if (exp.X_op == O_absent)
3242 {
3243 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3244 {
3245 set_syntax_error (_("missing shift amount"));
3246 return FALSE;
3247 }
3248 operand->shifter.amount = 0;
3249 }
3250 else if (exp.X_op != O_constant)
3251 {
3252 set_syntax_error (_("constant shift amount required"));
3253 return FALSE;
3254 }
3255 /* For parsing purposes, MUL #n has no inherent range. The range
3256 depends on the operand and will be checked by operand-specific
3257 routines. */
3258 else if (kind != AARCH64_MOD_MUL
3259 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3260 {
3261 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3262 return FALSE;
3263 }
3264 else
3265 {
3266 operand->shifter.amount = exp.X_add_number;
3267 operand->shifter.amount_present = 1;
3268 }
3269
3270 operand->shifter.operator_present = 1;
3271 operand->shifter.kind = kind;
3272
3273 *str = p;
3274 return TRUE;
3275 }
3276
3277 /* Parse a <shifter_operand> for a data processing instruction:
3278
3279 #<immediate>
3280 #<immediate>, LSL #imm
3281
3282 Validation of immediate operands is deferred to md_apply_fix.
3283
3284 Return TRUE on success; otherwise return FALSE. */
3285
3286 static bfd_boolean
3287 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3288 enum parse_shift_mode mode)
3289 {
3290 char *p;
3291
3292 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3293 return FALSE;
3294
3295 p = *str;
3296
3297 /* Accept an immediate expression. */
3298 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3299 return FALSE;
3300
3301 /* Accept optional LSL for arithmetic immediate values. */
3302 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3303 if (! parse_shift (&p, operand, SHIFTED_LSL))
3304 return FALSE;
3305
3306 /* Not accept any shifter for logical immediate values. */
3307 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3308 && parse_shift (&p, operand, mode))
3309 {
3310 set_syntax_error (_("unexpected shift operator"));
3311 return FALSE;
3312 }
3313
3314 *str = p;
3315 return TRUE;
3316 }
3317
3318 /* Parse a <shifter_operand> for a data processing instruction:
3319
3320 <Rm>
3321 <Rm>, <shift>
3322 #<immediate>
3323 #<immediate>, LSL #imm
3324
3325 where <shift> is handled by parse_shift above, and the last two
3326 cases are handled by the function above.
3327
3328 Validation of immediate operands is deferred to md_apply_fix.
3329
3330 Return TRUE on success; otherwise return FALSE. */
3331
3332 static bfd_boolean
3333 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3334 enum parse_shift_mode mode)
3335 {
3336 const reg_entry *reg;
3337 aarch64_opnd_qualifier_t qualifier;
3338 enum aarch64_operand_class opd_class
3339 = aarch64_get_operand_class (operand->type);
3340
3341 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3342 if (reg)
3343 {
3344 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3345 {
3346 set_syntax_error (_("unexpected register in the immediate operand"));
3347 return FALSE;
3348 }
3349
3350 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3351 {
3352 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3353 return FALSE;
3354 }
3355
3356 operand->reg.regno = reg->number;
3357 operand->qualifier = qualifier;
3358
3359 /* Accept optional shift operation on register. */
3360 if (! skip_past_comma (str))
3361 return TRUE;
3362
3363 if (! parse_shift (str, operand, mode))
3364 return FALSE;
3365
3366 return TRUE;
3367 }
3368 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3369 {
3370 set_syntax_error
3371 (_("integer register expected in the extended/shifted operand "
3372 "register"));
3373 return FALSE;
3374 }
3375
3376 /* We have a shifted immediate variable. */
3377 return parse_shifter_operand_imm (str, operand, mode);
3378 }
3379
3380 /* Return TRUE on success; return FALSE otherwise. */
3381
3382 static bfd_boolean
3383 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3384 enum parse_shift_mode mode)
3385 {
3386 char *p = *str;
3387
3388 /* Determine if we have the sequence of characters #: or just :
3389 coming next. If we do, then we check for a :rello: relocation
3390 modifier. If we don't, punt the whole lot to
3391 parse_shifter_operand. */
3392
3393 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3394 {
3395 struct reloc_table_entry *entry;
3396
3397 if (p[0] == '#')
3398 p += 2;
3399 else
3400 p++;
3401 *str = p;
3402
3403 /* Try to parse a relocation. Anything else is an error. */
3404 if (!(entry = find_reloc_table_entry (str)))
3405 {
3406 set_syntax_error (_("unknown relocation modifier"));
3407 return FALSE;
3408 }
3409
3410 if (entry->add_type == 0)
3411 {
3412 set_syntax_error
3413 (_("this relocation modifier is not allowed on this instruction"));
3414 return FALSE;
3415 }
3416
3417 /* Save str before we decompose it. */
3418 p = *str;
3419
3420 /* Next, we parse the expression. */
3421 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3422 return FALSE;
3423
3424 /* Record the relocation type (use the ADD variant here). */
3425 inst.reloc.type = entry->add_type;
3426 inst.reloc.pc_rel = entry->pc_rel;
3427
3428 /* If str is empty, we've reached the end, stop here. */
3429 if (**str == '\0')
3430 return TRUE;
3431
3432 /* Otherwise, we have a shifted reloc modifier, so rewind to
3433 recover the variable name and continue parsing for the shifter. */
3434 *str = p;
3435 return parse_shifter_operand_imm (str, operand, mode);
3436 }
3437
3438 return parse_shifter_operand (str, operand, mode);
3439 }
3440
3441 /* Parse all forms of an address expression. Information is written
3442 to *OPERAND and/or inst.reloc.
3443
3444 The A64 instruction set has the following addressing modes:
3445
3446 Offset
3447 [base] // in SIMD ld/st structure
3448 [base{,#0}] // in ld/st exclusive
3449 [base{,#imm}]
3450 [base,Xm{,LSL #imm}]
3451 [base,Xm,SXTX {#imm}]
3452 [base,Wm,(S|U)XTW {#imm}]
3453 Pre-indexed
3454 [base]! // in ldraa/ldrab exclusive
3455 [base,#imm]!
3456 Post-indexed
3457 [base],#imm
3458 [base],Xm // in SIMD ld/st structure
3459 PC-relative (literal)
3460 label
3461 SVE:
3462 [base,#imm,MUL VL]
3463 [base,Zm.D{,LSL #imm}]
3464 [base,Zm.S,(S|U)XTW {#imm}]
3465 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3466 [Zn.S,#imm]
3467 [Zn.D,#imm]
3468 [Zn.S{, Xm}]
3469 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3470 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3471 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3472
3473 (As a convenience, the notation "=immediate" is permitted in conjunction
3474 with the pc-relative literal load instructions to automatically place an
3475 immediate value or symbolic address in a nearby literal pool and generate
3476 a hidden label which references it.)
3477
3478 Upon a successful parsing, the address structure in *OPERAND will be
3479 filled in the following way:
3480
3481 .base_regno = <base>
3482 .offset.is_reg // 1 if the offset is a register
3483 .offset.imm = <imm>
3484 .offset.regno = <Rm>
3485
3486 For different addressing modes defined in the A64 ISA:
3487
3488 Offset
3489 .pcrel=0; .preind=1; .postind=0; .writeback=0
3490 Pre-indexed
3491 .pcrel=0; .preind=1; .postind=0; .writeback=1
3492 Post-indexed
3493 .pcrel=0; .preind=0; .postind=1; .writeback=1
3494 PC-relative (literal)
3495 .pcrel=1; .preind=1; .postind=0; .writeback=0
3496
3497 The shift/extension information, if any, will be stored in .shifter.
3498 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3499 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3500 corresponding register.
3501
3502 BASE_TYPE says which types of base register should be accepted and
3503 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3504 is the type of shifter that is allowed for immediate offsets,
3505 or SHIFTED_NONE if none.
3506
3507 In all other respects, it is the caller's responsibility to check
3508 for addressing modes not supported by the instruction, and to set
3509 inst.reloc.type. */
3510
3511 static bfd_boolean
3512 parse_address_main (char **str, aarch64_opnd_info *operand,
3513 aarch64_opnd_qualifier_t *base_qualifier,
3514 aarch64_opnd_qualifier_t *offset_qualifier,
3515 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3516 enum parse_shift_mode imm_shift_mode)
3517 {
3518 char *p = *str;
3519 const reg_entry *reg;
3520 expressionS *exp = &inst.reloc.exp;
3521
3522 *base_qualifier = AARCH64_OPND_QLF_NIL;
3523 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3524 if (! skip_past_char (&p, '['))
3525 {
3526 /* =immediate or label. */
3527 operand->addr.pcrel = 1;
3528 operand->addr.preind = 1;
3529
3530 /* #:<reloc_op>:<symbol> */
3531 skip_past_char (&p, '#');
3532 if (skip_past_char (&p, ':'))
3533 {
3534 bfd_reloc_code_real_type ty;
3535 struct reloc_table_entry *entry;
3536
3537 /* Try to parse a relocation modifier. Anything else is
3538 an error. */
3539 entry = find_reloc_table_entry (&p);
3540 if (! entry)
3541 {
3542 set_syntax_error (_("unknown relocation modifier"));
3543 return FALSE;
3544 }
3545
3546 switch (operand->type)
3547 {
3548 case AARCH64_OPND_ADDR_PCREL21:
3549 /* adr */
3550 ty = entry->adr_type;
3551 break;
3552
3553 default:
3554 ty = entry->ld_literal_type;
3555 break;
3556 }
3557
3558 if (ty == 0)
3559 {
3560 set_syntax_error
3561 (_("this relocation modifier is not allowed on this "
3562 "instruction"));
3563 return FALSE;
3564 }
3565
3566 /* #:<reloc_op>: */
3567 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3568 {
3569 set_syntax_error (_("invalid relocation expression"));
3570 return FALSE;
3571 }
3572
3573 /* #:<reloc_op>:<expr> */
3574 /* Record the relocation type. */
3575 inst.reloc.type = ty;
3576 inst.reloc.pc_rel = entry->pc_rel;
3577 }
3578 else
3579 {
3580
3581 if (skip_past_char (&p, '='))
3582 /* =immediate; need to generate the literal in the literal pool. */
3583 inst.gen_lit_pool = 1;
3584
3585 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3586 {
3587 set_syntax_error (_("invalid address"));
3588 return FALSE;
3589 }
3590 }
3591
3592 *str = p;
3593 return TRUE;
3594 }
3595
3596 /* [ */
3597
3598 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3599 if (!reg || !aarch64_check_reg_type (reg, base_type))
3600 {
3601 set_syntax_error (_(get_reg_expected_msg (base_type)));
3602 return FALSE;
3603 }
3604 operand->addr.base_regno = reg->number;
3605
3606 /* [Xn */
3607 if (skip_past_comma (&p))
3608 {
3609 /* [Xn, */
3610 operand->addr.preind = 1;
3611
3612 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3613 if (reg)
3614 {
3615 if (!aarch64_check_reg_type (reg, offset_type))
3616 {
3617 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3618 return FALSE;
3619 }
3620
3621 /* [Xn,Rm */
3622 operand->addr.offset.regno = reg->number;
3623 operand->addr.offset.is_reg = 1;
3624 /* Shifted index. */
3625 if (skip_past_comma (&p))
3626 {
3627 /* [Xn,Rm, */
3628 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3629 /* Use the diagnostics set in parse_shift, so not set new
3630 error message here. */
3631 return FALSE;
3632 }
3633 /* We only accept:
3634 [base,Xm] # For vector plus scalar SVE2 indexing.
3635 [base,Xm{,LSL #imm}]
3636 [base,Xm,SXTX {#imm}]
3637 [base,Wm,(S|U)XTW {#imm}] */
3638 if (operand->shifter.kind == AARCH64_MOD_NONE
3639 || operand->shifter.kind == AARCH64_MOD_LSL
3640 || operand->shifter.kind == AARCH64_MOD_SXTX)
3641 {
3642 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3643 {
3644 set_syntax_error (_("invalid use of 32-bit register offset"));
3645 return FALSE;
3646 }
3647 if (aarch64_get_qualifier_esize (*base_qualifier)
3648 != aarch64_get_qualifier_esize (*offset_qualifier)
3649 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3650 || *base_qualifier != AARCH64_OPND_QLF_S_S
3651 || *offset_qualifier != AARCH64_OPND_QLF_X))
3652 {
3653 set_syntax_error (_("offset has different size from base"));
3654 return FALSE;
3655 }
3656 }
3657 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3658 {
3659 set_syntax_error (_("invalid use of 64-bit register offset"));
3660 return FALSE;
3661 }
3662 }
3663 else
3664 {
3665 /* [Xn,#:<reloc_op>:<symbol> */
3666 skip_past_char (&p, '#');
3667 if (skip_past_char (&p, ':'))
3668 {
3669 struct reloc_table_entry *entry;
3670
3671 /* Try to parse a relocation modifier. Anything else is
3672 an error. */
3673 if (!(entry = find_reloc_table_entry (&p)))
3674 {
3675 set_syntax_error (_("unknown relocation modifier"));
3676 return FALSE;
3677 }
3678
3679 if (entry->ldst_type == 0)
3680 {
3681 set_syntax_error
3682 (_("this relocation modifier is not allowed on this "
3683 "instruction"));
3684 return FALSE;
3685 }
3686
3687 /* [Xn,#:<reloc_op>: */
3688 /* We now have the group relocation table entry corresponding to
3689 the name in the assembler source. Next, we parse the
3690 expression. */
3691 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3692 {
3693 set_syntax_error (_("invalid relocation expression"));
3694 return FALSE;
3695 }
3696
3697 /* [Xn,#:<reloc_op>:<expr> */
3698 /* Record the load/store relocation type. */
3699 inst.reloc.type = entry->ldst_type;
3700 inst.reloc.pc_rel = entry->pc_rel;
3701 }
3702 else
3703 {
3704 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3705 {
3706 set_syntax_error (_("invalid expression in the address"));
3707 return FALSE;
3708 }
3709 /* [Xn,<expr> */
3710 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3711 /* [Xn,<expr>,<shifter> */
3712 if (! parse_shift (&p, operand, imm_shift_mode))
3713 return FALSE;
3714 }
3715 }
3716 }
3717
3718 if (! skip_past_char (&p, ']'))
3719 {
3720 set_syntax_error (_("']' expected"));
3721 return FALSE;
3722 }
3723
3724 if (skip_past_char (&p, '!'))
3725 {
3726 if (operand->addr.preind && operand->addr.offset.is_reg)
3727 {
3728 set_syntax_error (_("register offset not allowed in pre-indexed "
3729 "addressing mode"));
3730 return FALSE;
3731 }
3732 /* [Xn]! */
3733 operand->addr.writeback = 1;
3734 }
3735 else if (skip_past_comma (&p))
3736 {
3737 /* [Xn], */
3738 operand->addr.postind = 1;
3739 operand->addr.writeback = 1;
3740
3741 if (operand->addr.preind)
3742 {
3743 set_syntax_error (_("cannot combine pre- and post-indexing"));
3744 return FALSE;
3745 }
3746
3747 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3748 if (reg)
3749 {
3750 /* [Xn],Xm */
3751 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3752 {
3753 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3754 return FALSE;
3755 }
3756
3757 operand->addr.offset.regno = reg->number;
3758 operand->addr.offset.is_reg = 1;
3759 }
3760 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3761 {
3762 /* [Xn],#expr */
3763 set_syntax_error (_("invalid expression in the address"));
3764 return FALSE;
3765 }
3766 }
3767
3768 /* If at this point neither .preind nor .postind is set, we have a
3769 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3770 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3771 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3772 [Zn.<T>, xzr]. */
3773 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3774 {
3775 if (operand->addr.writeback)
3776 {
3777 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3778 {
3779 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3780 operand->addr.offset.is_reg = 0;
3781 operand->addr.offset.imm = 0;
3782 operand->addr.preind = 1;
3783 }
3784 else
3785 {
3786 /* Reject [Rn]! */
3787 set_syntax_error (_("missing offset in the pre-indexed address"));
3788 return FALSE;
3789 }
3790 }
3791 else
3792 {
3793 operand->addr.preind = 1;
3794 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3795 {
3796 operand->addr.offset.is_reg = 1;
3797 operand->addr.offset.regno = REG_ZR;
3798 *offset_qualifier = AARCH64_OPND_QLF_X;
3799 }
3800 else
3801 {
3802 inst.reloc.exp.X_op = O_constant;
3803 inst.reloc.exp.X_add_number = 0;
3804 }
3805 }
3806 }
3807
3808 *str = p;
3809 return TRUE;
3810 }
3811
3812 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3813 on success. */
3814 static bfd_boolean
3815 parse_address (char **str, aarch64_opnd_info *operand)
3816 {
3817 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3818 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3819 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3820 }
3821
3822 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3823 The arguments have the same meaning as for parse_address_main.
3824 Return TRUE on success. */
3825 static bfd_boolean
3826 parse_sve_address (char **str, aarch64_opnd_info *operand,
3827 aarch64_opnd_qualifier_t *base_qualifier,
3828 aarch64_opnd_qualifier_t *offset_qualifier)
3829 {
3830 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3831 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3832 SHIFTED_MUL_VL);
3833 }
3834
3835 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3836 Return TRUE on success; otherwise return FALSE. */
3837 static bfd_boolean
3838 parse_half (char **str, int *internal_fixup_p)
3839 {
3840 char *p = *str;
3841
3842 skip_past_char (&p, '#');
3843
3844 gas_assert (internal_fixup_p);
3845 *internal_fixup_p = 0;
3846
3847 if (*p == ':')
3848 {
3849 struct reloc_table_entry *entry;
3850
3851 /* Try to parse a relocation. Anything else is an error. */
3852 ++p;
3853 if (!(entry = find_reloc_table_entry (&p)))
3854 {
3855 set_syntax_error (_("unknown relocation modifier"));
3856 return FALSE;
3857 }
3858
3859 if (entry->movw_type == 0)
3860 {
3861 set_syntax_error
3862 (_("this relocation modifier is not allowed on this instruction"));
3863 return FALSE;
3864 }
3865
3866 inst.reloc.type = entry->movw_type;
3867 }
3868 else
3869 *internal_fixup_p = 1;
3870
3871 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3872 return FALSE;
3873
3874 *str = p;
3875 return TRUE;
3876 }
3877
3878 /* Parse an operand for an ADRP instruction:
3879 ADRP <Xd>, <label>
3880 Return TRUE on success; otherwise return FALSE. */
3881
3882 static bfd_boolean
3883 parse_adrp (char **str)
3884 {
3885 char *p;
3886
3887 p = *str;
3888 if (*p == ':')
3889 {
3890 struct reloc_table_entry *entry;
3891
3892 /* Try to parse a relocation. Anything else is an error. */
3893 ++p;
3894 if (!(entry = find_reloc_table_entry (&p)))
3895 {
3896 set_syntax_error (_("unknown relocation modifier"));
3897 return FALSE;
3898 }
3899
3900 if (entry->adrp_type == 0)
3901 {
3902 set_syntax_error
3903 (_("this relocation modifier is not allowed on this instruction"));
3904 return FALSE;
3905 }
3906
3907 inst.reloc.type = entry->adrp_type;
3908 }
3909 else
3910 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3911
3912 inst.reloc.pc_rel = 1;
3913
3914 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3915 return FALSE;
3916
3917 *str = p;
3918 return TRUE;
3919 }
3920
3921 /* Miscellaneous. */
3922
3923 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3924 of SIZE tokens in which index I gives the token for field value I,
3925 or is null if field value I is invalid. REG_TYPE says which register
3926 names should be treated as registers rather than as symbolic immediates.
3927
3928 Return true on success, moving *STR past the operand and storing the
3929 field value in *VAL. */
3930
3931 static int
3932 parse_enum_string (char **str, int64_t *val, const char *const *array,
3933 size_t size, aarch64_reg_type reg_type)
3934 {
3935 expressionS exp;
3936 char *p, *q;
3937 size_t i;
3938
3939 /* Match C-like tokens. */
3940 p = q = *str;
3941 while (ISALNUM (*q))
3942 q++;
3943
3944 for (i = 0; i < size; ++i)
3945 if (array[i]
3946 && strncasecmp (array[i], p, q - p) == 0
3947 && array[i][q - p] == 0)
3948 {
3949 *val = i;
3950 *str = q;
3951 return TRUE;
3952 }
3953
3954 if (!parse_immediate_expression (&p, &exp, reg_type))
3955 return FALSE;
3956
3957 if (exp.X_op == O_constant
3958 && (uint64_t) exp.X_add_number < size)
3959 {
3960 *val = exp.X_add_number;
3961 *str = p;
3962 return TRUE;
3963 }
3964
3965 /* Use the default error for this operand. */
3966 return FALSE;
3967 }
3968
3969 /* Parse an option for a preload instruction. Returns the encoding for the
3970 option, or PARSE_FAIL. */
3971
3972 static int
3973 parse_pldop (char **str)
3974 {
3975 char *p, *q;
3976 const struct aarch64_name_value_pair *o;
3977
3978 p = q = *str;
3979 while (ISALNUM (*q))
3980 q++;
3981
3982 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3983 if (!o)
3984 return PARSE_FAIL;
3985
3986 *str = q;
3987 return o->value;
3988 }
3989
3990 /* Parse an option for a barrier instruction. Returns the encoding for the
3991 option, or PARSE_FAIL. */
3992
3993 static int
3994 parse_barrier (char **str)
3995 {
3996 char *p, *q;
3997 const asm_barrier_opt *o;
3998
3999 p = q = *str;
4000 while (ISALPHA (*q))
4001 q++;
4002
4003 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4004 if (!o)
4005 return PARSE_FAIL;
4006
4007 *str = q;
4008 return o->value;
4009 }
4010
4011 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4012 return 0 if successful. Otherwise return PARSE_FAIL. */
4013
4014 static int
4015 parse_barrier_psb (char **str,
4016 const struct aarch64_name_value_pair ** hint_opt)
4017 {
4018 char *p, *q;
4019 const struct aarch64_name_value_pair *o;
4020
4021 p = q = *str;
4022 while (ISALPHA (*q))
4023 q++;
4024
4025 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4026 if (!o)
4027 {
4028 set_fatal_syntax_error
4029 ( _("unknown or missing option to PSB/TSB"));
4030 return PARSE_FAIL;
4031 }
4032
4033 if (o->value != 0x11)
4034 {
4035 /* PSB only accepts option name 'CSYNC'. */
4036 set_syntax_error
4037 (_("the specified option is not accepted for PSB/TSB"));
4038 return PARSE_FAIL;
4039 }
4040
4041 *str = q;
4042 *hint_opt = o;
4043 return 0;
4044 }
4045
4046 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4047 return 0 if successful. Otherwise return PARSE_FAIL. */
4048
4049 static int
4050 parse_bti_operand (char **str,
4051 const struct aarch64_name_value_pair ** hint_opt)
4052 {
4053 char *p, *q;
4054 const struct aarch64_name_value_pair *o;
4055
4056 p = q = *str;
4057 while (ISALPHA (*q))
4058 q++;
4059
4060 o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4061 if (!o)
4062 {
4063 set_fatal_syntax_error
4064 ( _("unknown option to BTI"));
4065 return PARSE_FAIL;
4066 }
4067
4068 switch (o->value)
4069 {
4070 /* Valid BTI operands. */
4071 case HINT_OPD_C:
4072 case HINT_OPD_J:
4073 case HINT_OPD_JC:
4074 break;
4075
4076 default:
4077 set_syntax_error
4078 (_("unknown option to BTI"));
4079 return PARSE_FAIL;
4080 }
4081
4082 *str = q;
4083 *hint_opt = o;
4084 return 0;
4085 }
4086
4087 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4088 Returns the encoding for the option, or PARSE_FAIL.
4089
4090 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4091 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4092
4093 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4094 field, otherwise as a system register.
4095 */
4096
4097 static int
4098 parse_sys_reg (char **str, struct hash_control *sys_regs,
4099 int imple_defined_p, int pstatefield_p,
4100 uint32_t* flags)
4101 {
4102 char *p, *q;
4103 char buf[32];
4104 const aarch64_sys_reg *o;
4105 int value;
4106
4107 p = buf;
4108 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4109 if (p < buf + 31)
4110 *p++ = TOLOWER (*q);
4111 *p = '\0';
4112 /* Assert that BUF be large enough. */
4113 gas_assert (p - buf == q - *str);
4114
4115 o = hash_find (sys_regs, buf);
4116 if (!o)
4117 {
4118 if (!imple_defined_p)
4119 return PARSE_FAIL;
4120 else
4121 {
4122 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4123 unsigned int op0, op1, cn, cm, op2;
4124
4125 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4126 != 5)
4127 return PARSE_FAIL;
4128 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4129 return PARSE_FAIL;
4130 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4131 if (flags)
4132 *flags = 0;
4133 }
4134 }
4135 else
4136 {
4137 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4138 as_bad (_("selected processor does not support PSTATE field "
4139 "name '%s'"), buf);
4140 if (!pstatefield_p
4141 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->value,
4142 o->flags, o->features))
4143 as_bad (_("selected processor does not support system register "
4144 "name '%s'"), buf);
4145 if (aarch64_sys_reg_deprecated_p (o->flags))
4146 as_warn (_("system register name '%s' is deprecated and may be "
4147 "removed in a future release"), buf);
4148 value = o->value;
4149 if (flags)
4150 *flags = o->flags;
4151 }
4152
4153 *str = q;
4154 return value;
4155 }
4156
4157 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4158 for the option, or NULL. */
4159
4160 static const aarch64_sys_ins_reg *
4161 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
4162 {
4163 char *p, *q;
4164 char buf[32];
4165 const aarch64_sys_ins_reg *o;
4166
4167 p = buf;
4168 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4169 if (p < buf + 31)
4170 *p++ = TOLOWER (*q);
4171 *p = '\0';
4172
4173 o = hash_find (sys_ins_regs, buf);
4174 if (!o)
4175 return NULL;
4176
4177 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o->value, o->flags, 0))
4178 as_bad (_("selected processor does not support system register "
4179 "name '%s'"), buf);
4180 if (aarch64_sys_reg_deprecated_p (o->flags))
4181 as_warn (_("system register name '%s' is deprecated and may be "
4182 "removed in a future release"), buf);
4183
4184 *str = q;
4185 return o;
4186 }
4187 \f
4188 #define po_char_or_fail(chr) do { \
4189 if (! skip_past_char (&str, chr)) \
4190 goto failure; \
4191 } while (0)
4192
4193 #define po_reg_or_fail(regtype) do { \
4194 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4195 if (val == PARSE_FAIL) \
4196 { \
4197 set_default_error (); \
4198 goto failure; \
4199 } \
4200 } while (0)
4201
4202 #define po_int_reg_or_fail(reg_type) do { \
4203 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4204 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4205 { \
4206 set_default_error (); \
4207 goto failure; \
4208 } \
4209 info->reg.regno = reg->number; \
4210 info->qualifier = qualifier; \
4211 } while (0)
4212
4213 #define po_imm_nc_or_fail() do { \
4214 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4215 goto failure; \
4216 } while (0)
4217
4218 #define po_imm_or_fail(min, max) do { \
4219 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4220 goto failure; \
4221 if (val < min || val > max) \
4222 { \
4223 set_fatal_syntax_error (_("immediate value out of range "\
4224 #min " to "#max)); \
4225 goto failure; \
4226 } \
4227 } while (0)
4228
4229 #define po_enum_or_fail(array) do { \
4230 if (!parse_enum_string (&str, &val, array, \
4231 ARRAY_SIZE (array), imm_reg_type)) \
4232 goto failure; \
4233 } while (0)
4234
4235 #define po_misc_or_fail(expr) do { \
4236 if (!expr) \
4237 goto failure; \
4238 } while (0)
4239 \f
4240 /* encode the 12-bit imm field of Add/sub immediate */
4241 static inline uint32_t
4242 encode_addsub_imm (uint32_t imm)
4243 {
4244 return imm << 10;
4245 }
4246
4247 /* encode the shift amount field of Add/sub immediate */
4248 static inline uint32_t
4249 encode_addsub_imm_shift_amount (uint32_t cnt)
4250 {
4251 return cnt << 22;
4252 }
4253
4254
4255 /* encode the imm field of Adr instruction */
4256 static inline uint32_t
4257 encode_adr_imm (uint32_t imm)
4258 {
4259 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4260 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4261 }
4262
4263 /* encode the immediate field of Move wide immediate */
4264 static inline uint32_t
4265 encode_movw_imm (uint32_t imm)
4266 {
4267 return imm << 5;
4268 }
4269
4270 /* encode the 26-bit offset of unconditional branch */
4271 static inline uint32_t
4272 encode_branch_ofs_26 (uint32_t ofs)
4273 {
4274 return ofs & ((1 << 26) - 1);
4275 }
4276
4277 /* encode the 19-bit offset of conditional branch and compare & branch */
4278 static inline uint32_t
4279 encode_cond_branch_ofs_19 (uint32_t ofs)
4280 {
4281 return (ofs & ((1 << 19) - 1)) << 5;
4282 }
4283
4284 /* encode the 19-bit offset of ld literal */
4285 static inline uint32_t
4286 encode_ld_lit_ofs_19 (uint32_t ofs)
4287 {
4288 return (ofs & ((1 << 19) - 1)) << 5;
4289 }
4290
4291 /* Encode the 14-bit offset of test & branch. */
4292 static inline uint32_t
4293 encode_tst_branch_ofs_14 (uint32_t ofs)
4294 {
4295 return (ofs & ((1 << 14) - 1)) << 5;
4296 }
4297
4298 /* Encode the 16-bit imm field of svc/hvc/smc. */
4299 static inline uint32_t
4300 encode_svc_imm (uint32_t imm)
4301 {
4302 return imm << 5;
4303 }
4304
4305 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4306 static inline uint32_t
4307 reencode_addsub_switch_add_sub (uint32_t opcode)
4308 {
4309 return opcode ^ (1 << 30);
4310 }
4311
4312 static inline uint32_t
4313 reencode_movzn_to_movz (uint32_t opcode)
4314 {
4315 return opcode | (1 << 30);
4316 }
4317
4318 static inline uint32_t
4319 reencode_movzn_to_movn (uint32_t opcode)
4320 {
4321 return opcode & ~(1 << 30);
4322 }
4323
4324 /* Overall per-instruction processing. */
4325
4326 /* We need to be able to fix up arbitrary expressions in some statements.
4327 This is so that we can handle symbols that are an arbitrary distance from
4328 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4329 which returns part of an address in a form which will be valid for
4330 a data instruction. We do this by pushing the expression into a symbol
4331 in the expr_section, and creating a fix for that. */
4332
4333 static fixS *
4334 fix_new_aarch64 (fragS * frag,
4335 int where,
4336 short int size,
4337 expressionS * exp,
4338 int pc_rel,
4339 int reloc)
4340 {
4341 fixS *new_fix;
4342
4343 switch (exp->X_op)
4344 {
4345 case O_constant:
4346 case O_symbol:
4347 case O_add:
4348 case O_subtract:
4349 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4350 break;
4351
4352 default:
4353 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4354 pc_rel, reloc);
4355 break;
4356 }
4357 return new_fix;
4358 }
4359 \f
4360 /* Diagnostics on operands errors. */
4361
4362 /* By default, output verbose error message.
4363 Disable the verbose error message by -mno-verbose-error. */
4364 static int verbose_error_p = 1;
4365
4366 #ifdef DEBUG_AARCH64
4367 /* N.B. this is only for the purpose of debugging. */
4368 const char* operand_mismatch_kind_names[] =
4369 {
4370 "AARCH64_OPDE_NIL",
4371 "AARCH64_OPDE_RECOVERABLE",
4372 "AARCH64_OPDE_SYNTAX_ERROR",
4373 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4374 "AARCH64_OPDE_INVALID_VARIANT",
4375 "AARCH64_OPDE_OUT_OF_RANGE",
4376 "AARCH64_OPDE_UNALIGNED",
4377 "AARCH64_OPDE_REG_LIST",
4378 "AARCH64_OPDE_OTHER_ERROR",
4379 };
4380 #endif /* DEBUG_AARCH64 */
4381
4382 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4383
4384 When multiple errors of different kinds are found in the same assembly
4385 line, only the error of the highest severity will be picked up for
4386 issuing the diagnostics. */
4387
4388 static inline bfd_boolean
4389 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4390 enum aarch64_operand_error_kind rhs)
4391 {
4392 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4393 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4394 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4395 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4396 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4397 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4398 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4399 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4400 return lhs > rhs;
4401 }
4402
4403 /* Helper routine to get the mnemonic name from the assembly instruction
4404 line; should only be called for the diagnosis purpose, as there is
4405 string copy operation involved, which may affect the runtime
4406 performance if used in elsewhere. */
4407
4408 static const char*
4409 get_mnemonic_name (const char *str)
4410 {
4411 static char mnemonic[32];
4412 char *ptr;
4413
4414 /* Get the first 15 bytes and assume that the full name is included. */
4415 strncpy (mnemonic, str, 31);
4416 mnemonic[31] = '\0';
4417
4418 /* Scan up to the end of the mnemonic, which must end in white space,
4419 '.', or end of string. */
4420 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4421 ;
4422
4423 *ptr = '\0';
4424
4425 /* Append '...' to the truncated long name. */
4426 if (ptr - mnemonic == 31)
4427 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4428
4429 return mnemonic;
4430 }
4431
4432 static void
4433 reset_aarch64_instruction (aarch64_instruction *instruction)
4434 {
4435 memset (instruction, '\0', sizeof (aarch64_instruction));
4436 instruction->reloc.type = BFD_RELOC_UNUSED;
4437 }
4438
4439 /* Data structures storing one user error in the assembly code related to
4440 operands. */
4441
4442 struct operand_error_record
4443 {
4444 const aarch64_opcode *opcode;
4445 aarch64_operand_error detail;
4446 struct operand_error_record *next;
4447 };
4448
4449 typedef struct operand_error_record operand_error_record;
4450
4451 struct operand_errors
4452 {
4453 operand_error_record *head;
4454 operand_error_record *tail;
4455 };
4456
4457 typedef struct operand_errors operand_errors;
4458
4459 /* Top-level data structure reporting user errors for the current line of
4460 the assembly code.
4461 The way md_assemble works is that all opcodes sharing the same mnemonic
4462 name are iterated to find a match to the assembly line. In this data
4463 structure, each of the such opcodes will have one operand_error_record
4464 allocated and inserted. In other words, excessive errors related with
4465 a single opcode are disregarded. */
4466 operand_errors operand_error_report;
4467
4468 /* Free record nodes. */
4469 static operand_error_record *free_opnd_error_record_nodes = NULL;
4470
4471 /* Initialize the data structure that stores the operand mismatch
4472 information on assembling one line of the assembly code. */
4473 static void
4474 init_operand_error_report (void)
4475 {
4476 if (operand_error_report.head != NULL)
4477 {
4478 gas_assert (operand_error_report.tail != NULL);
4479 operand_error_report.tail->next = free_opnd_error_record_nodes;
4480 free_opnd_error_record_nodes = operand_error_report.head;
4481 operand_error_report.head = NULL;
4482 operand_error_report.tail = NULL;
4483 return;
4484 }
4485 gas_assert (operand_error_report.tail == NULL);
4486 }
4487
4488 /* Return TRUE if some operand error has been recorded during the
4489 parsing of the current assembly line using the opcode *OPCODE;
4490 otherwise return FALSE. */
4491 static inline bfd_boolean
4492 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4493 {
4494 operand_error_record *record = operand_error_report.head;
4495 return record && record->opcode == opcode;
4496 }
4497
4498 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4499 OPCODE field is initialized with OPCODE.
4500 N.B. only one record for each opcode, i.e. the maximum of one error is
4501 recorded for each instruction template. */
4502
4503 static void
4504 add_operand_error_record (const operand_error_record* new_record)
4505 {
4506 const aarch64_opcode *opcode = new_record->opcode;
4507 operand_error_record* record = operand_error_report.head;
4508
4509 /* The record may have been created for this opcode. If not, we need
4510 to prepare one. */
4511 if (! opcode_has_operand_error_p (opcode))
4512 {
4513 /* Get one empty record. */
4514 if (free_opnd_error_record_nodes == NULL)
4515 {
4516 record = XNEW (operand_error_record);
4517 }
4518 else
4519 {
4520 record = free_opnd_error_record_nodes;
4521 free_opnd_error_record_nodes = record->next;
4522 }
4523 record->opcode = opcode;
4524 /* Insert at the head. */
4525 record->next = operand_error_report.head;
4526 operand_error_report.head = record;
4527 if (operand_error_report.tail == NULL)
4528 operand_error_report.tail = record;
4529 }
4530 else if (record->detail.kind != AARCH64_OPDE_NIL
4531 && record->detail.index <= new_record->detail.index
4532 && operand_error_higher_severity_p (record->detail.kind,
4533 new_record->detail.kind))
4534 {
4535 /* In the case of multiple errors found on operands related with a
4536 single opcode, only record the error of the leftmost operand and
4537 only if the error is of higher severity. */
4538 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4539 " the existing error %s on operand %d",
4540 operand_mismatch_kind_names[new_record->detail.kind],
4541 new_record->detail.index,
4542 operand_mismatch_kind_names[record->detail.kind],
4543 record->detail.index);
4544 return;
4545 }
4546
4547 record->detail = new_record->detail;
4548 }
4549
4550 static inline void
4551 record_operand_error_info (const aarch64_opcode *opcode,
4552 aarch64_operand_error *error_info)
4553 {
4554 operand_error_record record;
4555 record.opcode = opcode;
4556 record.detail = *error_info;
4557 add_operand_error_record (&record);
4558 }
4559
4560 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4561 error message *ERROR, for operand IDX (count from 0). */
4562
4563 static void
4564 record_operand_error (const aarch64_opcode *opcode, int idx,
4565 enum aarch64_operand_error_kind kind,
4566 const char* error)
4567 {
4568 aarch64_operand_error info;
4569 memset(&info, 0, sizeof (info));
4570 info.index = idx;
4571 info.kind = kind;
4572 info.error = error;
4573 info.non_fatal = FALSE;
4574 record_operand_error_info (opcode, &info);
4575 }
4576
4577 static void
4578 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4579 enum aarch64_operand_error_kind kind,
4580 const char* error, const int *extra_data)
4581 {
4582 aarch64_operand_error info;
4583 info.index = idx;
4584 info.kind = kind;
4585 info.error = error;
4586 info.data[0] = extra_data[0];
4587 info.data[1] = extra_data[1];
4588 info.data[2] = extra_data[2];
4589 info.non_fatal = FALSE;
4590 record_operand_error_info (opcode, &info);
4591 }
4592
4593 static void
4594 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4595 const char* error, int lower_bound,
4596 int upper_bound)
4597 {
4598 int data[3] = {lower_bound, upper_bound, 0};
4599 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4600 error, data);
4601 }
4602
4603 /* Remove the operand error record for *OPCODE. */
4604 static void ATTRIBUTE_UNUSED
4605 remove_operand_error_record (const aarch64_opcode *opcode)
4606 {
4607 if (opcode_has_operand_error_p (opcode))
4608 {
4609 operand_error_record* record = operand_error_report.head;
4610 gas_assert (record != NULL && operand_error_report.tail != NULL);
4611 operand_error_report.head = record->next;
4612 record->next = free_opnd_error_record_nodes;
4613 free_opnd_error_record_nodes = record;
4614 if (operand_error_report.head == NULL)
4615 {
4616 gas_assert (operand_error_report.tail == record);
4617 operand_error_report.tail = NULL;
4618 }
4619 }
4620 }
4621
4622 /* Given the instruction in *INSTR, return the index of the best matched
4623 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4624
4625 Return -1 if there is no qualifier sequence; return the first match
4626 if there is multiple matches found. */
4627
4628 static int
4629 find_best_match (const aarch64_inst *instr,
4630 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4631 {
4632 int i, num_opnds, max_num_matched, idx;
4633
4634 num_opnds = aarch64_num_of_operands (instr->opcode);
4635 if (num_opnds == 0)
4636 {
4637 DEBUG_TRACE ("no operand");
4638 return -1;
4639 }
4640
4641 max_num_matched = 0;
4642 idx = 0;
4643
4644 /* For each pattern. */
4645 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4646 {
4647 int j, num_matched;
4648 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4649
4650 /* Most opcodes has much fewer patterns in the list. */
4651 if (empty_qualifier_sequence_p (qualifiers))
4652 {
4653 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4654 break;
4655 }
4656
4657 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4658 if (*qualifiers == instr->operands[j].qualifier)
4659 ++num_matched;
4660
4661 if (num_matched > max_num_matched)
4662 {
4663 max_num_matched = num_matched;
4664 idx = i;
4665 }
4666 }
4667
4668 DEBUG_TRACE ("return with %d", idx);
4669 return idx;
4670 }
4671
4672 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4673 corresponding operands in *INSTR. */
4674
4675 static inline void
4676 assign_qualifier_sequence (aarch64_inst *instr,
4677 const aarch64_opnd_qualifier_t *qualifiers)
4678 {
4679 int i = 0;
4680 int num_opnds = aarch64_num_of_operands (instr->opcode);
4681 gas_assert (num_opnds);
4682 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4683 instr->operands[i].qualifier = *qualifiers;
4684 }
4685
4686 /* Print operands for the diagnosis purpose. */
4687
4688 static void
4689 print_operands (char *buf, const aarch64_opcode *opcode,
4690 const aarch64_opnd_info *opnds)
4691 {
4692 int i;
4693
4694 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4695 {
4696 char str[128];
4697
4698 /* We regard the opcode operand info more, however we also look into
4699 the inst->operands to support the disassembling of the optional
4700 operand.
4701 The two operand code should be the same in all cases, apart from
4702 when the operand can be optional. */
4703 if (opcode->operands[i] == AARCH64_OPND_NIL
4704 || opnds[i].type == AARCH64_OPND_NIL)
4705 break;
4706
4707 /* Generate the operand string in STR. */
4708 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4709 NULL);
4710
4711 /* Delimiter. */
4712 if (str[0] != '\0')
4713 strcat (buf, i == 0 ? " " : ", ");
4714
4715 /* Append the operand string. */
4716 strcat (buf, str);
4717 }
4718 }
4719
4720 /* Send to stderr a string as information. */
4721
4722 static void
4723 output_info (const char *format, ...)
4724 {
4725 const char *file;
4726 unsigned int line;
4727 va_list args;
4728
4729 file = as_where (&line);
4730 if (file)
4731 {
4732 if (line != 0)
4733 fprintf (stderr, "%s:%u: ", file, line);
4734 else
4735 fprintf (stderr, "%s: ", file);
4736 }
4737 fprintf (stderr, _("Info: "));
4738 va_start (args, format);
4739 vfprintf (stderr, format, args);
4740 va_end (args);
4741 (void) putc ('\n', stderr);
4742 }
4743
4744 /* Output one operand error record. */
4745
4746 static void
4747 output_operand_error_record (const operand_error_record *record, char *str)
4748 {
4749 const aarch64_operand_error *detail = &record->detail;
4750 int idx = detail->index;
4751 const aarch64_opcode *opcode = record->opcode;
4752 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4753 : AARCH64_OPND_NIL);
4754
4755 typedef void (*handler_t)(const char *format, ...);
4756 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4757
4758 switch (detail->kind)
4759 {
4760 case AARCH64_OPDE_NIL:
4761 gas_assert (0);
4762 break;
4763 case AARCH64_OPDE_SYNTAX_ERROR:
4764 case AARCH64_OPDE_RECOVERABLE:
4765 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4766 case AARCH64_OPDE_OTHER_ERROR:
4767 /* Use the prepared error message if there is, otherwise use the
4768 operand description string to describe the error. */
4769 if (detail->error != NULL)
4770 {
4771 if (idx < 0)
4772 handler (_("%s -- `%s'"), detail->error, str);
4773 else
4774 handler (_("%s at operand %d -- `%s'"),
4775 detail->error, idx + 1, str);
4776 }
4777 else
4778 {
4779 gas_assert (idx >= 0);
4780 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4781 aarch64_get_operand_desc (opd_code), str);
4782 }
4783 break;
4784
4785 case AARCH64_OPDE_INVALID_VARIANT:
4786 handler (_("operand mismatch -- `%s'"), str);
4787 if (verbose_error_p)
4788 {
4789 /* We will try to correct the erroneous instruction and also provide
4790 more information e.g. all other valid variants.
4791
4792 The string representation of the corrected instruction and other
4793 valid variants are generated by
4794
4795 1) obtaining the intermediate representation of the erroneous
4796 instruction;
4797 2) manipulating the IR, e.g. replacing the operand qualifier;
4798 3) printing out the instruction by calling the printer functions
4799 shared with the disassembler.
4800
4801 The limitation of this method is that the exact input assembly
4802 line cannot be accurately reproduced in some cases, for example an
4803 optional operand present in the actual assembly line will be
4804 omitted in the output; likewise for the optional syntax rules,
4805 e.g. the # before the immediate. Another limitation is that the
4806 assembly symbols and relocation operations in the assembly line
4807 currently cannot be printed out in the error report. Last but not
4808 least, when there is other error(s) co-exist with this error, the
4809 'corrected' instruction may be still incorrect, e.g. given
4810 'ldnp h0,h1,[x0,#6]!'
4811 this diagnosis will provide the version:
4812 'ldnp s0,s1,[x0,#6]!'
4813 which is still not right. */
4814 size_t len = strlen (get_mnemonic_name (str));
4815 int i, qlf_idx;
4816 bfd_boolean result;
4817 char buf[2048];
4818 aarch64_inst *inst_base = &inst.base;
4819 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4820
4821 /* Init inst. */
4822 reset_aarch64_instruction (&inst);
4823 inst_base->opcode = opcode;
4824
4825 /* Reset the error report so that there is no side effect on the
4826 following operand parsing. */
4827 init_operand_error_report ();
4828
4829 /* Fill inst. */
4830 result = parse_operands (str + len, opcode)
4831 && programmer_friendly_fixup (&inst);
4832 gas_assert (result);
4833 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4834 NULL, NULL, insn_sequence);
4835 gas_assert (!result);
4836
4837 /* Find the most matched qualifier sequence. */
4838 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4839 gas_assert (qlf_idx > -1);
4840
4841 /* Assign the qualifiers. */
4842 assign_qualifier_sequence (inst_base,
4843 opcode->qualifiers_list[qlf_idx]);
4844
4845 /* Print the hint. */
4846 output_info (_(" did you mean this?"));
4847 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4848 print_operands (buf, opcode, inst_base->operands);
4849 output_info (_(" %s"), buf);
4850
4851 /* Print out other variant(s) if there is any. */
4852 if (qlf_idx != 0 ||
4853 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4854 output_info (_(" other valid variant(s):"));
4855
4856 /* For each pattern. */
4857 qualifiers_list = opcode->qualifiers_list;
4858 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4859 {
4860 /* Most opcodes has much fewer patterns in the list.
4861 First NIL qualifier indicates the end in the list. */
4862 if (empty_qualifier_sequence_p (*qualifiers_list))
4863 break;
4864
4865 if (i != qlf_idx)
4866 {
4867 /* Mnemonics name. */
4868 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4869
4870 /* Assign the qualifiers. */
4871 assign_qualifier_sequence (inst_base, *qualifiers_list);
4872
4873 /* Print instruction. */
4874 print_operands (buf, opcode, inst_base->operands);
4875
4876 output_info (_(" %s"), buf);
4877 }
4878 }
4879 }
4880 break;
4881
4882 case AARCH64_OPDE_UNTIED_OPERAND:
4883 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4884 detail->index + 1, str);
4885 break;
4886
4887 case AARCH64_OPDE_OUT_OF_RANGE:
4888 if (detail->data[0] != detail->data[1])
4889 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4890 detail->error ? detail->error : _("immediate value"),
4891 detail->data[0], detail->data[1], idx + 1, str);
4892 else
4893 handler (_("%s must be %d at operand %d -- `%s'"),
4894 detail->error ? detail->error : _("immediate value"),
4895 detail->data[0], idx + 1, str);
4896 break;
4897
4898 case AARCH64_OPDE_REG_LIST:
4899 if (detail->data[0] == 1)
4900 handler (_("invalid number of registers in the list; "
4901 "only 1 register is expected at operand %d -- `%s'"),
4902 idx + 1, str);
4903 else
4904 handler (_("invalid number of registers in the list; "
4905 "%d registers are expected at operand %d -- `%s'"),
4906 detail->data[0], idx + 1, str);
4907 break;
4908
4909 case AARCH64_OPDE_UNALIGNED:
4910 handler (_("immediate value must be a multiple of "
4911 "%d at operand %d -- `%s'"),
4912 detail->data[0], idx + 1, str);
4913 break;
4914
4915 default:
4916 gas_assert (0);
4917 break;
4918 }
4919 }
4920
4921 /* Process and output the error message about the operand mismatching.
4922
4923 When this function is called, the operand error information had
4924 been collected for an assembly line and there will be multiple
4925 errors in the case of multiple instruction templates; output the
4926 error message that most closely describes the problem.
4927
4928 The errors to be printed can be filtered on printing all errors
4929 or only non-fatal errors. This distinction has to be made because
4930 the error buffer may already be filled with fatal errors we don't want to
4931 print due to the different instruction templates. */
4932
4933 static void
4934 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4935 {
4936 int largest_error_pos;
4937 const char *msg = NULL;
4938 enum aarch64_operand_error_kind kind;
4939 operand_error_record *curr;
4940 operand_error_record *head = operand_error_report.head;
4941 operand_error_record *record = NULL;
4942
4943 /* No error to report. */
4944 if (head == NULL)
4945 return;
4946
4947 gas_assert (head != NULL && operand_error_report.tail != NULL);
4948
4949 /* Only one error. */
4950 if (head == operand_error_report.tail)
4951 {
4952 /* If the only error is a non-fatal one and we don't want to print it,
4953 just exit. */
4954 if (!non_fatal_only || head->detail.non_fatal)
4955 {
4956 DEBUG_TRACE ("single opcode entry with error kind: %s",
4957 operand_mismatch_kind_names[head->detail.kind]);
4958 output_operand_error_record (head, str);
4959 }
4960 return;
4961 }
4962
4963 /* Find the error kind of the highest severity. */
4964 DEBUG_TRACE ("multiple opcode entries with error kind");
4965 kind = AARCH64_OPDE_NIL;
4966 for (curr = head; curr != NULL; curr = curr->next)
4967 {
4968 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4969 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4970 if (operand_error_higher_severity_p (curr->detail.kind, kind)
4971 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4972 kind = curr->detail.kind;
4973 }
4974
4975 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
4976
4977 /* Pick up one of errors of KIND to report. */
4978 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4979 for (curr = head; curr != NULL; curr = curr->next)
4980 {
4981 /* If we don't want to print non-fatal errors then don't consider them
4982 at all. */
4983 if (curr->detail.kind != kind
4984 || (non_fatal_only && !curr->detail.non_fatal))
4985 continue;
4986 /* If there are multiple errors, pick up the one with the highest
4987 mismatching operand index. In the case of multiple errors with
4988 the equally highest operand index, pick up the first one or the
4989 first one with non-NULL error message. */
4990 if (curr->detail.index > largest_error_pos
4991 || (curr->detail.index == largest_error_pos && msg == NULL
4992 && curr->detail.error != NULL))
4993 {
4994 largest_error_pos = curr->detail.index;
4995 record = curr;
4996 msg = record->detail.error;
4997 }
4998 }
4999
5000 /* The way errors are collected in the back-end is a bit non-intuitive. But
5001 essentially, because each operand template is tried recursively you may
5002 always have errors collected from the previous tried OPND. These are
5003 usually skipped if there is one successful match. However now with the
5004 non-fatal errors we have to ignore those previously collected hard errors
5005 when we're only interested in printing the non-fatal ones. This condition
5006 prevents us from printing errors that are not appropriate, since we did
5007 match a condition, but it also has warnings that it wants to print. */
5008 if (non_fatal_only && !record)
5009 return;
5010
5011 gas_assert (largest_error_pos != -2 && record != NULL);
5012 DEBUG_TRACE ("Pick up error kind %s to report",
5013 operand_mismatch_kind_names[record->detail.kind]);
5014
5015 /* Output. */
5016 output_operand_error_record (record, str);
5017 }
5018 \f
5019 /* Write an AARCH64 instruction to buf - always little-endian. */
5020 static void
5021 put_aarch64_insn (char *buf, uint32_t insn)
5022 {
5023 unsigned char *where = (unsigned char *) buf;
5024 where[0] = insn;
5025 where[1] = insn >> 8;
5026 where[2] = insn >> 16;
5027 where[3] = insn >> 24;
5028 }
5029
5030 static uint32_t
5031 get_aarch64_insn (char *buf)
5032 {
5033 unsigned char *where = (unsigned char *) buf;
5034 uint32_t result;
5035 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5036 | ((uint32_t) where[3] << 24)));
5037 return result;
5038 }
5039
5040 static void
5041 output_inst (struct aarch64_inst *new_inst)
5042 {
5043 char *to = NULL;
5044
5045 to = frag_more (INSN_SIZE);
5046
5047 frag_now->tc_frag_data.recorded = 1;
5048
5049 put_aarch64_insn (to, inst.base.value);
5050
5051 if (inst.reloc.type != BFD_RELOC_UNUSED)
5052 {
5053 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5054 INSN_SIZE, &inst.reloc.exp,
5055 inst.reloc.pc_rel,
5056 inst.reloc.type);
5057 DEBUG_TRACE ("Prepared relocation fix up");
5058 /* Don't check the addend value against the instruction size,
5059 that's the job of our code in md_apply_fix(). */
5060 fixp->fx_no_overflow = 1;
5061 if (new_inst != NULL)
5062 fixp->tc_fix_data.inst = new_inst;
5063 if (aarch64_gas_internal_fixup_p ())
5064 {
5065 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5066 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5067 fixp->fx_addnumber = inst.reloc.flags;
5068 }
5069 }
5070
5071 dwarf2_emit_insn (INSN_SIZE);
5072 }
5073
5074 /* Link together opcodes of the same name. */
5075
5076 struct templates
5077 {
5078 aarch64_opcode *opcode;
5079 struct templates *next;
5080 };
5081
5082 typedef struct templates templates;
5083
5084 static templates *
5085 lookup_mnemonic (const char *start, int len)
5086 {
5087 templates *templ = NULL;
5088
5089 templ = hash_find_n (aarch64_ops_hsh, start, len);
5090 return templ;
5091 }
5092
5093 /* Subroutine of md_assemble, responsible for looking up the primary
5094 opcode from the mnemonic the user wrote. STR points to the
5095 beginning of the mnemonic. */
5096
5097 static templates *
5098 opcode_lookup (char **str)
5099 {
5100 char *end, *base, *dot;
5101 const aarch64_cond *cond;
5102 char condname[16];
5103 int len;
5104
5105 /* Scan up to the end of the mnemonic, which must end in white space,
5106 '.', or end of string. */
5107 dot = 0;
5108 for (base = end = *str; is_part_of_name(*end); end++)
5109 if (*end == '.' && !dot)
5110 dot = end;
5111
5112 if (end == base || dot == base)
5113 return 0;
5114
5115 inst.cond = COND_ALWAYS;
5116
5117 /* Handle a possible condition. */
5118 if (dot)
5119 {
5120 cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5121 if (cond)
5122 {
5123 inst.cond = cond->value;
5124 *str = end;
5125 }
5126 else
5127 {
5128 *str = dot;
5129 return 0;
5130 }
5131 len = dot - base;
5132 }
5133 else
5134 {
5135 *str = end;
5136 len = end - base;
5137 }
5138
5139 if (inst.cond == COND_ALWAYS)
5140 {
5141 /* Look for unaffixed mnemonic. */
5142 return lookup_mnemonic (base, len);
5143 }
5144 else if (len <= 13)
5145 {
5146 /* append ".c" to mnemonic if conditional */
5147 memcpy (condname, base, len);
5148 memcpy (condname + len, ".c", 2);
5149 base = condname;
5150 len += 2;
5151 return lookup_mnemonic (base, len);
5152 }
5153
5154 return NULL;
5155 }
5156
5157 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5158 to a corresponding operand qualifier. */
5159
5160 static inline aarch64_opnd_qualifier_t
5161 vectype_to_qualifier (const struct vector_type_el *vectype)
5162 {
5163 /* Element size in bytes indexed by vector_el_type. */
5164 const unsigned char ele_size[5]
5165 = {1, 2, 4, 8, 16};
5166 const unsigned int ele_base [5] =
5167 {
5168 AARCH64_OPND_QLF_V_4B,
5169 AARCH64_OPND_QLF_V_2H,
5170 AARCH64_OPND_QLF_V_2S,
5171 AARCH64_OPND_QLF_V_1D,
5172 AARCH64_OPND_QLF_V_1Q
5173 };
5174
5175 if (!vectype->defined || vectype->type == NT_invtype)
5176 goto vectype_conversion_fail;
5177
5178 if (vectype->type == NT_zero)
5179 return AARCH64_OPND_QLF_P_Z;
5180 if (vectype->type == NT_merge)
5181 return AARCH64_OPND_QLF_P_M;
5182
5183 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5184
5185 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5186 {
5187 /* Special case S_4B. */
5188 if (vectype->type == NT_b && vectype->width == 4)
5189 return AARCH64_OPND_QLF_S_4B;
5190
5191 /* Special case S_2H. */
5192 if (vectype->type == NT_h && vectype->width == 2)
5193 return AARCH64_OPND_QLF_S_2H;
5194
5195 /* Vector element register. */
5196 return AARCH64_OPND_QLF_S_B + vectype->type;
5197 }
5198 else
5199 {
5200 /* Vector register. */
5201 int reg_size = ele_size[vectype->type] * vectype->width;
5202 unsigned offset;
5203 unsigned shift;
5204 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5205 goto vectype_conversion_fail;
5206
5207 /* The conversion is by calculating the offset from the base operand
5208 qualifier for the vector type. The operand qualifiers are regular
5209 enough that the offset can established by shifting the vector width by
5210 a vector-type dependent amount. */
5211 shift = 0;
5212 if (vectype->type == NT_b)
5213 shift = 3;
5214 else if (vectype->type == NT_h || vectype->type == NT_s)
5215 shift = 2;
5216 else if (vectype->type >= NT_d)
5217 shift = 1;
5218 else
5219 gas_assert (0);
5220
5221 offset = ele_base [vectype->type] + (vectype->width >> shift);
5222 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5223 && offset <= AARCH64_OPND_QLF_V_1Q);
5224 return offset;
5225 }
5226
5227 vectype_conversion_fail:
5228 first_error (_("bad vector arrangement type"));
5229 return AARCH64_OPND_QLF_NIL;
5230 }
5231
5232 /* Process an optional operand that is found omitted from the assembly line.
5233 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5234 instruction's opcode entry while IDX is the index of this omitted operand.
5235 */
5236
5237 static void
5238 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5239 int idx, aarch64_opnd_info *operand)
5240 {
5241 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5242 gas_assert (optional_operand_p (opcode, idx));
5243 gas_assert (!operand->present);
5244
5245 switch (type)
5246 {
5247 case AARCH64_OPND_Rd:
5248 case AARCH64_OPND_Rn:
5249 case AARCH64_OPND_Rm:
5250 case AARCH64_OPND_Rt:
5251 case AARCH64_OPND_Rt2:
5252 case AARCH64_OPND_Rt_SP:
5253 case AARCH64_OPND_Rs:
5254 case AARCH64_OPND_Ra:
5255 case AARCH64_OPND_Rt_SYS:
5256 case AARCH64_OPND_Rd_SP:
5257 case AARCH64_OPND_Rn_SP:
5258 case AARCH64_OPND_Rm_SP:
5259 case AARCH64_OPND_Fd:
5260 case AARCH64_OPND_Fn:
5261 case AARCH64_OPND_Fm:
5262 case AARCH64_OPND_Fa:
5263 case AARCH64_OPND_Ft:
5264 case AARCH64_OPND_Ft2:
5265 case AARCH64_OPND_Sd:
5266 case AARCH64_OPND_Sn:
5267 case AARCH64_OPND_Sm:
5268 case AARCH64_OPND_Va:
5269 case AARCH64_OPND_Vd:
5270 case AARCH64_OPND_Vn:
5271 case AARCH64_OPND_Vm:
5272 case AARCH64_OPND_VdD1:
5273 case AARCH64_OPND_VnD1:
5274 operand->reg.regno = default_value;
5275 break;
5276
5277 case AARCH64_OPND_Ed:
5278 case AARCH64_OPND_En:
5279 case AARCH64_OPND_Em:
5280 case AARCH64_OPND_Em16:
5281 case AARCH64_OPND_SM3_IMM2:
5282 operand->reglane.regno = default_value;
5283 break;
5284
5285 case AARCH64_OPND_IDX:
5286 case AARCH64_OPND_BIT_NUM:
5287 case AARCH64_OPND_IMMR:
5288 case AARCH64_OPND_IMMS:
5289 case AARCH64_OPND_SHLL_IMM:
5290 case AARCH64_OPND_IMM_VLSL:
5291 case AARCH64_OPND_IMM_VLSR:
5292 case AARCH64_OPND_CCMP_IMM:
5293 case AARCH64_OPND_FBITS:
5294 case AARCH64_OPND_UIMM4:
5295 case AARCH64_OPND_UIMM3_OP1:
5296 case AARCH64_OPND_UIMM3_OP2:
5297 case AARCH64_OPND_IMM:
5298 case AARCH64_OPND_IMM_2:
5299 case AARCH64_OPND_WIDTH:
5300 case AARCH64_OPND_UIMM7:
5301 case AARCH64_OPND_NZCV:
5302 case AARCH64_OPND_SVE_PATTERN:
5303 case AARCH64_OPND_SVE_PRFOP:
5304 operand->imm.value = default_value;
5305 break;
5306
5307 case AARCH64_OPND_SVE_PATTERN_SCALED:
5308 operand->imm.value = default_value;
5309 operand->shifter.kind = AARCH64_MOD_MUL;
5310 operand->shifter.amount = 1;
5311 break;
5312
5313 case AARCH64_OPND_EXCEPTION:
5314 inst.reloc.type = BFD_RELOC_UNUSED;
5315 break;
5316
5317 case AARCH64_OPND_BARRIER_ISB:
5318 operand->barrier = aarch64_barrier_options + default_value;
5319 break;
5320
5321 case AARCH64_OPND_BTI_TARGET:
5322 operand->hint_option = aarch64_hint_options + default_value;
5323 break;
5324
5325 default:
5326 break;
5327 }
5328 }
5329
5330 /* Process the relocation type for move wide instructions.
5331 Return TRUE on success; otherwise return FALSE. */
5332
5333 static bfd_boolean
5334 process_movw_reloc_info (void)
5335 {
5336 int is32;
5337 unsigned shift;
5338
5339 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5340
5341 if (inst.base.opcode->op == OP_MOVK)
5342 switch (inst.reloc.type)
5343 {
5344 case BFD_RELOC_AARCH64_MOVW_G0_S:
5345 case BFD_RELOC_AARCH64_MOVW_G1_S:
5346 case BFD_RELOC_AARCH64_MOVW_G2_S:
5347 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5348 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5349 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5350 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5351 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5352 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5353 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5354 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5355 set_syntax_error
5356 (_("the specified relocation type is not allowed for MOVK"));
5357 return FALSE;
5358 default:
5359 break;
5360 }
5361
5362 switch (inst.reloc.type)
5363 {
5364 case BFD_RELOC_AARCH64_MOVW_G0:
5365 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5366 case BFD_RELOC_AARCH64_MOVW_G0_S:
5367 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5368 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5369 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5370 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5371 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5372 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5373 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5374 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5375 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5376 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5377 shift = 0;
5378 break;
5379 case BFD_RELOC_AARCH64_MOVW_G1:
5380 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5381 case BFD_RELOC_AARCH64_MOVW_G1_S:
5382 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5383 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5384 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5385 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5386 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5387 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5388 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5389 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5390 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5391 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5392 shift = 16;
5393 break;
5394 case BFD_RELOC_AARCH64_MOVW_G2:
5395 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5396 case BFD_RELOC_AARCH64_MOVW_G2_S:
5397 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5398 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5399 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5400 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5401 if (is32)
5402 {
5403 set_fatal_syntax_error
5404 (_("the specified relocation type is not allowed for 32-bit "
5405 "register"));
5406 return FALSE;
5407 }
5408 shift = 32;
5409 break;
5410 case BFD_RELOC_AARCH64_MOVW_G3:
5411 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5412 if (is32)
5413 {
5414 set_fatal_syntax_error
5415 (_("the specified relocation type is not allowed for 32-bit "
5416 "register"));
5417 return FALSE;
5418 }
5419 shift = 48;
5420 break;
5421 default:
5422 /* More cases should be added when more MOVW-related relocation types
5423 are supported in GAS. */
5424 gas_assert (aarch64_gas_internal_fixup_p ());
5425 /* The shift amount should have already been set by the parser. */
5426 return TRUE;
5427 }
5428 inst.base.operands[1].shifter.amount = shift;
5429 return TRUE;
5430 }
5431
5432 /* A primitive log calculator. */
5433
5434 static inline unsigned int
5435 get_logsz (unsigned int size)
5436 {
5437 const unsigned char ls[16] =
5438 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5439 if (size > 16)
5440 {
5441 gas_assert (0);
5442 return -1;
5443 }
5444 gas_assert (ls[size - 1] != (unsigned char)-1);
5445 return ls[size - 1];
5446 }
5447
5448 /* Determine and return the real reloc type code for an instruction
5449 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5450
5451 static inline bfd_reloc_code_real_type
5452 ldst_lo12_determine_real_reloc_type (void)
5453 {
5454 unsigned logsz;
5455 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5456 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5457
5458 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5459 {
5460 BFD_RELOC_AARCH64_LDST8_LO12,
5461 BFD_RELOC_AARCH64_LDST16_LO12,
5462 BFD_RELOC_AARCH64_LDST32_LO12,
5463 BFD_RELOC_AARCH64_LDST64_LO12,
5464 BFD_RELOC_AARCH64_LDST128_LO12
5465 },
5466 {
5467 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5468 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5469 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5470 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5471 BFD_RELOC_AARCH64_NONE
5472 },
5473 {
5474 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5475 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5476 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5477 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5478 BFD_RELOC_AARCH64_NONE
5479 },
5480 {
5481 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5482 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5483 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5484 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5485 BFD_RELOC_AARCH64_NONE
5486 },
5487 {
5488 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5489 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5490 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5491 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5492 BFD_RELOC_AARCH64_NONE
5493 }
5494 };
5495
5496 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5497 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5498 || (inst.reloc.type
5499 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5500 || (inst.reloc.type
5501 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5502 || (inst.reloc.type
5503 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5504 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5505
5506 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5507 opd1_qlf =
5508 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5509 1, opd0_qlf, 0);
5510 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5511
5512 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5513 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5514 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5515 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5516 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5517 gas_assert (logsz <= 3);
5518 else
5519 gas_assert (logsz <= 4);
5520
5521 /* In reloc.c, these pseudo relocation types should be defined in similar
5522 order as above reloc_ldst_lo12 array. Because the array index calculation
5523 below relies on this. */
5524 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5525 }
5526
5527 /* Check whether a register list REGINFO is valid. The registers must be
5528 numbered in increasing order (modulo 32), in increments of one or two.
5529
5530 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5531 increments of two.
5532
5533 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5534
5535 static bfd_boolean
5536 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5537 {
5538 uint32_t i, nb_regs, prev_regno, incr;
5539
5540 nb_regs = 1 + (reginfo & 0x3);
5541 reginfo >>= 2;
5542 prev_regno = reginfo & 0x1f;
5543 incr = accept_alternate ? 2 : 1;
5544
5545 for (i = 1; i < nb_regs; ++i)
5546 {
5547 uint32_t curr_regno;
5548 reginfo >>= 5;
5549 curr_regno = reginfo & 0x1f;
5550 if (curr_regno != ((prev_regno + incr) & 0x1f))
5551 return FALSE;
5552 prev_regno = curr_regno;
5553 }
5554
5555 return TRUE;
5556 }
5557
5558 /* Generic instruction operand parser. This does no encoding and no
5559 semantic validation; it merely squirrels values away in the inst
5560 structure. Returns TRUE or FALSE depending on whether the
5561 specified grammar matched. */
5562
5563 static bfd_boolean
5564 parse_operands (char *str, const aarch64_opcode *opcode)
5565 {
5566 int i;
5567 char *backtrack_pos = 0;
5568 const enum aarch64_opnd *operands = opcode->operands;
5569 aarch64_reg_type imm_reg_type;
5570
5571 clear_error ();
5572 skip_whitespace (str);
5573
5574 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5575 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5576 else
5577 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5578
5579 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5580 {
5581 int64_t val;
5582 const reg_entry *reg;
5583 int comma_skipped_p = 0;
5584 aarch64_reg_type rtype;
5585 struct vector_type_el vectype;
5586 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5587 aarch64_opnd_info *info = &inst.base.operands[i];
5588 aarch64_reg_type reg_type;
5589
5590 DEBUG_TRACE ("parse operand %d", i);
5591
5592 /* Assign the operand code. */
5593 info->type = operands[i];
5594
5595 if (optional_operand_p (opcode, i))
5596 {
5597 /* Remember where we are in case we need to backtrack. */
5598 gas_assert (!backtrack_pos);
5599 backtrack_pos = str;
5600 }
5601
5602 /* Expect comma between operands; the backtrack mechanism will take
5603 care of cases of omitted optional operand. */
5604 if (i > 0 && ! skip_past_char (&str, ','))
5605 {
5606 set_syntax_error (_("comma expected between operands"));
5607 goto failure;
5608 }
5609 else
5610 comma_skipped_p = 1;
5611
5612 switch (operands[i])
5613 {
5614 case AARCH64_OPND_Rd:
5615 case AARCH64_OPND_Rn:
5616 case AARCH64_OPND_Rm:
5617 case AARCH64_OPND_Rt:
5618 case AARCH64_OPND_Rt2:
5619 case AARCH64_OPND_Rs:
5620 case AARCH64_OPND_Ra:
5621 case AARCH64_OPND_Rt_SYS:
5622 case AARCH64_OPND_PAIRREG:
5623 case AARCH64_OPND_SVE_Rm:
5624 po_int_reg_or_fail (REG_TYPE_R_Z);
5625 break;
5626
5627 case AARCH64_OPND_Rd_SP:
5628 case AARCH64_OPND_Rn_SP:
5629 case AARCH64_OPND_Rt_SP:
5630 case AARCH64_OPND_SVE_Rn_SP:
5631 case AARCH64_OPND_Rm_SP:
5632 po_int_reg_or_fail (REG_TYPE_R_SP);
5633 break;
5634
5635 case AARCH64_OPND_Rm_EXT:
5636 case AARCH64_OPND_Rm_SFT:
5637 po_misc_or_fail (parse_shifter_operand
5638 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5639 ? SHIFTED_ARITH_IMM
5640 : SHIFTED_LOGIC_IMM)));
5641 if (!info->shifter.operator_present)
5642 {
5643 /* Default to LSL if not present. Libopcodes prefers shifter
5644 kind to be explicit. */
5645 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5646 info->shifter.kind = AARCH64_MOD_LSL;
5647 /* For Rm_EXT, libopcodes will carry out further check on whether
5648 or not stack pointer is used in the instruction (Recall that
5649 "the extend operator is not optional unless at least one of
5650 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5651 }
5652 break;
5653
5654 case AARCH64_OPND_Fd:
5655 case AARCH64_OPND_Fn:
5656 case AARCH64_OPND_Fm:
5657 case AARCH64_OPND_Fa:
5658 case AARCH64_OPND_Ft:
5659 case AARCH64_OPND_Ft2:
5660 case AARCH64_OPND_Sd:
5661 case AARCH64_OPND_Sn:
5662 case AARCH64_OPND_Sm:
5663 case AARCH64_OPND_SVE_VZn:
5664 case AARCH64_OPND_SVE_Vd:
5665 case AARCH64_OPND_SVE_Vm:
5666 case AARCH64_OPND_SVE_Vn:
5667 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5668 if (val == PARSE_FAIL)
5669 {
5670 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5671 goto failure;
5672 }
5673 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5674
5675 info->reg.regno = val;
5676 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5677 break;
5678
5679 case AARCH64_OPND_SVE_Pd:
5680 case AARCH64_OPND_SVE_Pg3:
5681 case AARCH64_OPND_SVE_Pg4_5:
5682 case AARCH64_OPND_SVE_Pg4_10:
5683 case AARCH64_OPND_SVE_Pg4_16:
5684 case AARCH64_OPND_SVE_Pm:
5685 case AARCH64_OPND_SVE_Pn:
5686 case AARCH64_OPND_SVE_Pt:
5687 reg_type = REG_TYPE_PN;
5688 goto vector_reg;
5689
5690 case AARCH64_OPND_SVE_Za_5:
5691 case AARCH64_OPND_SVE_Za_16:
5692 case AARCH64_OPND_SVE_Zd:
5693 case AARCH64_OPND_SVE_Zm_5:
5694 case AARCH64_OPND_SVE_Zm_16:
5695 case AARCH64_OPND_SVE_Zn:
5696 case AARCH64_OPND_SVE_Zt:
5697 reg_type = REG_TYPE_ZN;
5698 goto vector_reg;
5699
5700 case AARCH64_OPND_Va:
5701 case AARCH64_OPND_Vd:
5702 case AARCH64_OPND_Vn:
5703 case AARCH64_OPND_Vm:
5704 reg_type = REG_TYPE_VN;
5705 vector_reg:
5706 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5707 if (val == PARSE_FAIL)
5708 {
5709 first_error (_(get_reg_expected_msg (reg_type)));
5710 goto failure;
5711 }
5712 if (vectype.defined & NTA_HASINDEX)
5713 goto failure;
5714
5715 info->reg.regno = val;
5716 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5717 && vectype.type == NT_invtype)
5718 /* Unqualified Pn and Zn registers are allowed in certain
5719 contexts. Rely on F_STRICT qualifier checking to catch
5720 invalid uses. */
5721 info->qualifier = AARCH64_OPND_QLF_NIL;
5722 else
5723 {
5724 info->qualifier = vectype_to_qualifier (&vectype);
5725 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5726 goto failure;
5727 }
5728 break;
5729
5730 case AARCH64_OPND_VdD1:
5731 case AARCH64_OPND_VnD1:
5732 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5733 if (val == PARSE_FAIL)
5734 {
5735 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5736 goto failure;
5737 }
5738 if (vectype.type != NT_d || vectype.index != 1)
5739 {
5740 set_fatal_syntax_error
5741 (_("the top half of a 128-bit FP/SIMD register is expected"));
5742 goto failure;
5743 }
5744 info->reg.regno = val;
5745 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5746 here; it is correct for the purpose of encoding/decoding since
5747 only the register number is explicitly encoded in the related
5748 instructions, although this appears a bit hacky. */
5749 info->qualifier = AARCH64_OPND_QLF_S_D;
5750 break;
5751
5752 case AARCH64_OPND_SVE_Zm3_INDEX:
5753 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5754 case AARCH64_OPND_SVE_Zm3_11_INDEX:
5755 case AARCH64_OPND_SVE_Zm4_11_INDEX:
5756 case AARCH64_OPND_SVE_Zm4_INDEX:
5757 case AARCH64_OPND_SVE_Zn_INDEX:
5758 reg_type = REG_TYPE_ZN;
5759 goto vector_reg_index;
5760
5761 case AARCH64_OPND_Ed:
5762 case AARCH64_OPND_En:
5763 case AARCH64_OPND_Em:
5764 case AARCH64_OPND_Em16:
5765 case AARCH64_OPND_SM3_IMM2:
5766 reg_type = REG_TYPE_VN;
5767 vector_reg_index:
5768 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5769 if (val == PARSE_FAIL)
5770 {
5771 first_error (_(get_reg_expected_msg (reg_type)));
5772 goto failure;
5773 }
5774 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5775 goto failure;
5776
5777 info->reglane.regno = val;
5778 info->reglane.index = vectype.index;
5779 info->qualifier = vectype_to_qualifier (&vectype);
5780 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5781 goto failure;
5782 break;
5783
5784 case AARCH64_OPND_SVE_ZnxN:
5785 case AARCH64_OPND_SVE_ZtxN:
5786 reg_type = REG_TYPE_ZN;
5787 goto vector_reg_list;
5788
5789 case AARCH64_OPND_LVn:
5790 case AARCH64_OPND_LVt:
5791 case AARCH64_OPND_LVt_AL:
5792 case AARCH64_OPND_LEt:
5793 reg_type = REG_TYPE_VN;
5794 vector_reg_list:
5795 if (reg_type == REG_TYPE_ZN
5796 && get_opcode_dependent_value (opcode) == 1
5797 && *str != '{')
5798 {
5799 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5800 if (val == PARSE_FAIL)
5801 {
5802 first_error (_(get_reg_expected_msg (reg_type)));
5803 goto failure;
5804 }
5805 info->reglist.first_regno = val;
5806 info->reglist.num_regs = 1;
5807 }
5808 else
5809 {
5810 val = parse_vector_reg_list (&str, reg_type, &vectype);
5811 if (val == PARSE_FAIL)
5812 goto failure;
5813
5814 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5815 {
5816 set_fatal_syntax_error (_("invalid register list"));
5817 goto failure;
5818 }
5819
5820 if (vectype.width != 0 && *str != ',')
5821 {
5822 set_fatal_syntax_error
5823 (_("expected element type rather than vector type"));
5824 goto failure;
5825 }
5826
5827 info->reglist.first_regno = (val >> 2) & 0x1f;
5828 info->reglist.num_regs = (val & 0x3) + 1;
5829 }
5830 if (operands[i] == AARCH64_OPND_LEt)
5831 {
5832 if (!(vectype.defined & NTA_HASINDEX))
5833 goto failure;
5834 info->reglist.has_index = 1;
5835 info->reglist.index = vectype.index;
5836 }
5837 else
5838 {
5839 if (vectype.defined & NTA_HASINDEX)
5840 goto failure;
5841 if (!(vectype.defined & NTA_HASTYPE))
5842 {
5843 if (reg_type == REG_TYPE_ZN)
5844 set_fatal_syntax_error (_("missing type suffix"));
5845 goto failure;
5846 }
5847 }
5848 info->qualifier = vectype_to_qualifier (&vectype);
5849 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5850 goto failure;
5851 break;
5852
5853 case AARCH64_OPND_CRn:
5854 case AARCH64_OPND_CRm:
5855 {
5856 char prefix = *(str++);
5857 if (prefix != 'c' && prefix != 'C')
5858 goto failure;
5859
5860 po_imm_nc_or_fail ();
5861 if (val > 15)
5862 {
5863 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5864 goto failure;
5865 }
5866 info->qualifier = AARCH64_OPND_QLF_CR;
5867 info->imm.value = val;
5868 break;
5869 }
5870
5871 case AARCH64_OPND_SHLL_IMM:
5872 case AARCH64_OPND_IMM_VLSR:
5873 po_imm_or_fail (1, 64);
5874 info->imm.value = val;
5875 break;
5876
5877 case AARCH64_OPND_CCMP_IMM:
5878 case AARCH64_OPND_SIMM5:
5879 case AARCH64_OPND_FBITS:
5880 case AARCH64_OPND_TME_UIMM16:
5881 case AARCH64_OPND_UIMM4:
5882 case AARCH64_OPND_UIMM4_ADDG:
5883 case AARCH64_OPND_UIMM10:
5884 case AARCH64_OPND_UIMM3_OP1:
5885 case AARCH64_OPND_UIMM3_OP2:
5886 case AARCH64_OPND_IMM_VLSL:
5887 case AARCH64_OPND_IMM:
5888 case AARCH64_OPND_IMM_2:
5889 case AARCH64_OPND_WIDTH:
5890 case AARCH64_OPND_SVE_INV_LIMM:
5891 case AARCH64_OPND_SVE_LIMM:
5892 case AARCH64_OPND_SVE_LIMM_MOV:
5893 case AARCH64_OPND_SVE_SHLIMM_PRED:
5894 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5895 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
5896 case AARCH64_OPND_SVE_SHRIMM_PRED:
5897 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5898 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
5899 case AARCH64_OPND_SVE_SIMM5:
5900 case AARCH64_OPND_SVE_SIMM5B:
5901 case AARCH64_OPND_SVE_SIMM6:
5902 case AARCH64_OPND_SVE_SIMM8:
5903 case AARCH64_OPND_SVE_UIMM3:
5904 case AARCH64_OPND_SVE_UIMM7:
5905 case AARCH64_OPND_SVE_UIMM8:
5906 case AARCH64_OPND_SVE_UIMM8_53:
5907 case AARCH64_OPND_IMM_ROT1:
5908 case AARCH64_OPND_IMM_ROT2:
5909 case AARCH64_OPND_IMM_ROT3:
5910 case AARCH64_OPND_SVE_IMM_ROT1:
5911 case AARCH64_OPND_SVE_IMM_ROT2:
5912 case AARCH64_OPND_SVE_IMM_ROT3:
5913 po_imm_nc_or_fail ();
5914 info->imm.value = val;
5915 break;
5916
5917 case AARCH64_OPND_SVE_AIMM:
5918 case AARCH64_OPND_SVE_ASIMM:
5919 po_imm_nc_or_fail ();
5920 info->imm.value = val;
5921 skip_whitespace (str);
5922 if (skip_past_comma (&str))
5923 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5924 else
5925 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5926 break;
5927
5928 case AARCH64_OPND_SVE_PATTERN:
5929 po_enum_or_fail (aarch64_sve_pattern_array);
5930 info->imm.value = val;
5931 break;
5932
5933 case AARCH64_OPND_SVE_PATTERN_SCALED:
5934 po_enum_or_fail (aarch64_sve_pattern_array);
5935 info->imm.value = val;
5936 if (skip_past_comma (&str)
5937 && !parse_shift (&str, info, SHIFTED_MUL))
5938 goto failure;
5939 if (!info->shifter.operator_present)
5940 {
5941 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5942 info->shifter.kind = AARCH64_MOD_MUL;
5943 info->shifter.amount = 1;
5944 }
5945 break;
5946
5947 case AARCH64_OPND_SVE_PRFOP:
5948 po_enum_or_fail (aarch64_sve_prfop_array);
5949 info->imm.value = val;
5950 break;
5951
5952 case AARCH64_OPND_UIMM7:
5953 po_imm_or_fail (0, 127);
5954 info->imm.value = val;
5955 break;
5956
5957 case AARCH64_OPND_IDX:
5958 case AARCH64_OPND_MASK:
5959 case AARCH64_OPND_BIT_NUM:
5960 case AARCH64_OPND_IMMR:
5961 case AARCH64_OPND_IMMS:
5962 po_imm_or_fail (0, 63);
5963 info->imm.value = val;
5964 break;
5965
5966 case AARCH64_OPND_IMM0:
5967 po_imm_nc_or_fail ();
5968 if (val != 0)
5969 {
5970 set_fatal_syntax_error (_("immediate zero expected"));
5971 goto failure;
5972 }
5973 info->imm.value = 0;
5974 break;
5975
5976 case AARCH64_OPND_FPIMM0:
5977 {
5978 int qfloat;
5979 bfd_boolean res1 = FALSE, res2 = FALSE;
5980 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5981 it is probably not worth the effort to support it. */
5982 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5983 imm_reg_type))
5984 && (error_p ()
5985 || !(res2 = parse_constant_immediate (&str, &val,
5986 imm_reg_type))))
5987 goto failure;
5988 if ((res1 && qfloat == 0) || (res2 && val == 0))
5989 {
5990 info->imm.value = 0;
5991 info->imm.is_fp = 1;
5992 break;
5993 }
5994 set_fatal_syntax_error (_("immediate zero expected"));
5995 goto failure;
5996 }
5997
5998 case AARCH64_OPND_IMM_MOV:
5999 {
6000 char *saved = str;
6001 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6002 reg_name_p (str, REG_TYPE_VN))
6003 goto failure;
6004 str = saved;
6005 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6006 GE_OPT_PREFIX, 1));
6007 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6008 later. fix_mov_imm_insn will try to determine a machine
6009 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6010 message if the immediate cannot be moved by a single
6011 instruction. */
6012 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6013 inst.base.operands[i].skip = 1;
6014 }
6015 break;
6016
6017 case AARCH64_OPND_SIMD_IMM:
6018 case AARCH64_OPND_SIMD_IMM_SFT:
6019 if (! parse_big_immediate (&str, &val, imm_reg_type))
6020 goto failure;
6021 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6022 /* addr_off_p */ 0,
6023 /* need_libopcodes_p */ 1,
6024 /* skip_p */ 1);
6025 /* Parse shift.
6026 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6027 shift, we don't check it here; we leave the checking to
6028 the libopcodes (operand_general_constraint_met_p). By
6029 doing this, we achieve better diagnostics. */
6030 if (skip_past_comma (&str)
6031 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6032 goto failure;
6033 if (!info->shifter.operator_present
6034 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6035 {
6036 /* Default to LSL if not present. Libopcodes prefers shifter
6037 kind to be explicit. */
6038 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6039 info->shifter.kind = AARCH64_MOD_LSL;
6040 }
6041 break;
6042
6043 case AARCH64_OPND_FPIMM:
6044 case AARCH64_OPND_SIMD_FPIMM:
6045 case AARCH64_OPND_SVE_FPIMM8:
6046 {
6047 int qfloat;
6048 bfd_boolean dp_p;
6049
6050 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6051 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6052 || !aarch64_imm_float_p (qfloat))
6053 {
6054 if (!error_p ())
6055 set_fatal_syntax_error (_("invalid floating-point"
6056 " constant"));
6057 goto failure;
6058 }
6059 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6060 inst.base.operands[i].imm.is_fp = 1;
6061 }
6062 break;
6063
6064 case AARCH64_OPND_SVE_I1_HALF_ONE:
6065 case AARCH64_OPND_SVE_I1_HALF_TWO:
6066 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6067 {
6068 int qfloat;
6069 bfd_boolean dp_p;
6070
6071 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6072 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6073 {
6074 if (!error_p ())
6075 set_fatal_syntax_error (_("invalid floating-point"
6076 " constant"));
6077 goto failure;
6078 }
6079 inst.base.operands[i].imm.value = qfloat;
6080 inst.base.operands[i].imm.is_fp = 1;
6081 }
6082 break;
6083
6084 case AARCH64_OPND_LIMM:
6085 po_misc_or_fail (parse_shifter_operand (&str, info,
6086 SHIFTED_LOGIC_IMM));
6087 if (info->shifter.operator_present)
6088 {
6089 set_fatal_syntax_error
6090 (_("shift not allowed for bitmask immediate"));
6091 goto failure;
6092 }
6093 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6094 /* addr_off_p */ 0,
6095 /* need_libopcodes_p */ 1,
6096 /* skip_p */ 1);
6097 break;
6098
6099 case AARCH64_OPND_AIMM:
6100 if (opcode->op == OP_ADD)
6101 /* ADD may have relocation types. */
6102 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6103 SHIFTED_ARITH_IMM));
6104 else
6105 po_misc_or_fail (parse_shifter_operand (&str, info,
6106 SHIFTED_ARITH_IMM));
6107 switch (inst.reloc.type)
6108 {
6109 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6110 info->shifter.amount = 12;
6111 break;
6112 case BFD_RELOC_UNUSED:
6113 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6114 if (info->shifter.kind != AARCH64_MOD_NONE)
6115 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6116 inst.reloc.pc_rel = 0;
6117 break;
6118 default:
6119 break;
6120 }
6121 info->imm.value = 0;
6122 if (!info->shifter.operator_present)
6123 {
6124 /* Default to LSL if not present. Libopcodes prefers shifter
6125 kind to be explicit. */
6126 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6127 info->shifter.kind = AARCH64_MOD_LSL;
6128 }
6129 break;
6130
6131 case AARCH64_OPND_HALF:
6132 {
6133 /* #<imm16> or relocation. */
6134 int internal_fixup_p;
6135 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6136 if (internal_fixup_p)
6137 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6138 skip_whitespace (str);
6139 if (skip_past_comma (&str))
6140 {
6141 /* {, LSL #<shift>} */
6142 if (! aarch64_gas_internal_fixup_p ())
6143 {
6144 set_fatal_syntax_error (_("can't mix relocation modifier "
6145 "with explicit shift"));
6146 goto failure;
6147 }
6148 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6149 }
6150 else
6151 inst.base.operands[i].shifter.amount = 0;
6152 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6153 inst.base.operands[i].imm.value = 0;
6154 if (! process_movw_reloc_info ())
6155 goto failure;
6156 }
6157 break;
6158
6159 case AARCH64_OPND_EXCEPTION:
6160 case AARCH64_OPND_UNDEFINED:
6161 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6162 imm_reg_type));
6163 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6164 /* addr_off_p */ 0,
6165 /* need_libopcodes_p */ 0,
6166 /* skip_p */ 1);
6167 break;
6168
6169 case AARCH64_OPND_NZCV:
6170 {
6171 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
6172 if (nzcv != NULL)
6173 {
6174 str += 4;
6175 info->imm.value = nzcv->value;
6176 break;
6177 }
6178 po_imm_or_fail (0, 15);
6179 info->imm.value = val;
6180 }
6181 break;
6182
6183 case AARCH64_OPND_COND:
6184 case AARCH64_OPND_COND1:
6185 {
6186 char *start = str;
6187 do
6188 str++;
6189 while (ISALPHA (*str));
6190 info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
6191 if (info->cond == NULL)
6192 {
6193 set_syntax_error (_("invalid condition"));
6194 goto failure;
6195 }
6196 else if (operands[i] == AARCH64_OPND_COND1
6197 && (info->cond->value & 0xe) == 0xe)
6198 {
6199 /* Do not allow AL or NV. */
6200 set_default_error ();
6201 goto failure;
6202 }
6203 }
6204 break;
6205
6206 case AARCH64_OPND_ADDR_ADRP:
6207 po_misc_or_fail (parse_adrp (&str));
6208 /* Clear the value as operand needs to be relocated. */
6209 info->imm.value = 0;
6210 break;
6211
6212 case AARCH64_OPND_ADDR_PCREL14:
6213 case AARCH64_OPND_ADDR_PCREL19:
6214 case AARCH64_OPND_ADDR_PCREL21:
6215 case AARCH64_OPND_ADDR_PCREL26:
6216 po_misc_or_fail (parse_address (&str, info));
6217 if (!info->addr.pcrel)
6218 {
6219 set_syntax_error (_("invalid pc-relative address"));
6220 goto failure;
6221 }
6222 if (inst.gen_lit_pool
6223 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6224 {
6225 /* Only permit "=value" in the literal load instructions.
6226 The literal will be generated by programmer_friendly_fixup. */
6227 set_syntax_error (_("invalid use of \"=immediate\""));
6228 goto failure;
6229 }
6230 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6231 {
6232 set_syntax_error (_("unrecognized relocation suffix"));
6233 goto failure;
6234 }
6235 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6236 {
6237 info->imm.value = inst.reloc.exp.X_add_number;
6238 inst.reloc.type = BFD_RELOC_UNUSED;
6239 }
6240 else
6241 {
6242 info->imm.value = 0;
6243 if (inst.reloc.type == BFD_RELOC_UNUSED)
6244 switch (opcode->iclass)
6245 {
6246 case compbranch:
6247 case condbranch:
6248 /* e.g. CBZ or B.COND */
6249 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6250 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6251 break;
6252 case testbranch:
6253 /* e.g. TBZ */
6254 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6255 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6256 break;
6257 case branch_imm:
6258 /* e.g. B or BL */
6259 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6260 inst.reloc.type =
6261 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6262 : BFD_RELOC_AARCH64_JUMP26;
6263 break;
6264 case loadlit:
6265 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6266 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6267 break;
6268 case pcreladdr:
6269 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6270 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6271 break;
6272 default:
6273 gas_assert (0);
6274 abort ();
6275 }
6276 inst.reloc.pc_rel = 1;
6277 }
6278 break;
6279
6280 case AARCH64_OPND_ADDR_SIMPLE:
6281 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6282 {
6283 /* [<Xn|SP>{, #<simm>}] */
6284 char *start = str;
6285 /* First use the normal address-parsing routines, to get
6286 the usual syntax errors. */
6287 po_misc_or_fail (parse_address (&str, info));
6288 if (info->addr.pcrel || info->addr.offset.is_reg
6289 || !info->addr.preind || info->addr.postind
6290 || info->addr.writeback)
6291 {
6292 set_syntax_error (_("invalid addressing mode"));
6293 goto failure;
6294 }
6295
6296 /* Then retry, matching the specific syntax of these addresses. */
6297 str = start;
6298 po_char_or_fail ('[');
6299 po_reg_or_fail (REG_TYPE_R64_SP);
6300 /* Accept optional ", #0". */
6301 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6302 && skip_past_char (&str, ','))
6303 {
6304 skip_past_char (&str, '#');
6305 if (! skip_past_char (&str, '0'))
6306 {
6307 set_fatal_syntax_error
6308 (_("the optional immediate offset can only be 0"));
6309 goto failure;
6310 }
6311 }
6312 po_char_or_fail (']');
6313 break;
6314 }
6315
6316 case AARCH64_OPND_ADDR_REGOFF:
6317 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6318 po_misc_or_fail (parse_address (&str, info));
6319 regoff_addr:
6320 if (info->addr.pcrel || !info->addr.offset.is_reg
6321 || !info->addr.preind || info->addr.postind
6322 || info->addr.writeback)
6323 {
6324 set_syntax_error (_("invalid addressing mode"));
6325 goto failure;
6326 }
6327 if (!info->shifter.operator_present)
6328 {
6329 /* Default to LSL if not present. Libopcodes prefers shifter
6330 kind to be explicit. */
6331 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6332 info->shifter.kind = AARCH64_MOD_LSL;
6333 }
6334 /* Qualifier to be deduced by libopcodes. */
6335 break;
6336
6337 case AARCH64_OPND_ADDR_SIMM7:
6338 po_misc_or_fail (parse_address (&str, info));
6339 if (info->addr.pcrel || info->addr.offset.is_reg
6340 || (!info->addr.preind && !info->addr.postind))
6341 {
6342 set_syntax_error (_("invalid addressing mode"));
6343 goto failure;
6344 }
6345 if (inst.reloc.type != BFD_RELOC_UNUSED)
6346 {
6347 set_syntax_error (_("relocation not allowed"));
6348 goto failure;
6349 }
6350 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6351 /* addr_off_p */ 1,
6352 /* need_libopcodes_p */ 1,
6353 /* skip_p */ 0);
6354 break;
6355
6356 case AARCH64_OPND_ADDR_SIMM9:
6357 case AARCH64_OPND_ADDR_SIMM9_2:
6358 case AARCH64_OPND_ADDR_SIMM11:
6359 case AARCH64_OPND_ADDR_SIMM13:
6360 po_misc_or_fail (parse_address (&str, info));
6361 if (info->addr.pcrel || info->addr.offset.is_reg
6362 || (!info->addr.preind && !info->addr.postind)
6363 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6364 && info->addr.writeback))
6365 {
6366 set_syntax_error (_("invalid addressing mode"));
6367 goto failure;
6368 }
6369 if (inst.reloc.type != BFD_RELOC_UNUSED)
6370 {
6371 set_syntax_error (_("relocation not allowed"));
6372 goto failure;
6373 }
6374 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6375 /* addr_off_p */ 1,
6376 /* need_libopcodes_p */ 1,
6377 /* skip_p */ 0);
6378 break;
6379
6380 case AARCH64_OPND_ADDR_SIMM10:
6381 case AARCH64_OPND_ADDR_OFFSET:
6382 po_misc_or_fail (parse_address (&str, info));
6383 if (info->addr.pcrel || info->addr.offset.is_reg
6384 || !info->addr.preind || info->addr.postind)
6385 {
6386 set_syntax_error (_("invalid addressing mode"));
6387 goto failure;
6388 }
6389 if (inst.reloc.type != BFD_RELOC_UNUSED)
6390 {
6391 set_syntax_error (_("relocation not allowed"));
6392 goto failure;
6393 }
6394 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6395 /* addr_off_p */ 1,
6396 /* need_libopcodes_p */ 1,
6397 /* skip_p */ 0);
6398 break;
6399
6400 case AARCH64_OPND_ADDR_UIMM12:
6401 po_misc_or_fail (parse_address (&str, info));
6402 if (info->addr.pcrel || info->addr.offset.is_reg
6403 || !info->addr.preind || info->addr.writeback)
6404 {
6405 set_syntax_error (_("invalid addressing mode"));
6406 goto failure;
6407 }
6408 if (inst.reloc.type == BFD_RELOC_UNUSED)
6409 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6410 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6411 || (inst.reloc.type
6412 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6413 || (inst.reloc.type
6414 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6415 || (inst.reloc.type
6416 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6417 || (inst.reloc.type
6418 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6419 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6420 /* Leave qualifier to be determined by libopcodes. */
6421 break;
6422
6423 case AARCH64_OPND_SIMD_ADDR_POST:
6424 /* [<Xn|SP>], <Xm|#<amount>> */
6425 po_misc_or_fail (parse_address (&str, info));
6426 if (!info->addr.postind || !info->addr.writeback)
6427 {
6428 set_syntax_error (_("invalid addressing mode"));
6429 goto failure;
6430 }
6431 if (!info->addr.offset.is_reg)
6432 {
6433 if (inst.reloc.exp.X_op == O_constant)
6434 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6435 else
6436 {
6437 set_fatal_syntax_error
6438 (_("writeback value must be an immediate constant"));
6439 goto failure;
6440 }
6441 }
6442 /* No qualifier. */
6443 break;
6444
6445 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6446 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
6447 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6448 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6449 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6450 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6451 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6452 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6453 case AARCH64_OPND_SVE_ADDR_RI_U6:
6454 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6455 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6456 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6457 /* [X<n>{, #imm, MUL VL}]
6458 [X<n>{, #imm}]
6459 but recognizing SVE registers. */
6460 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6461 &offset_qualifier));
6462 if (base_qualifier != AARCH64_OPND_QLF_X)
6463 {
6464 set_syntax_error (_("invalid addressing mode"));
6465 goto failure;
6466 }
6467 sve_regimm:
6468 if (info->addr.pcrel || info->addr.offset.is_reg
6469 || !info->addr.preind || info->addr.writeback)
6470 {
6471 set_syntax_error (_("invalid addressing mode"));
6472 goto failure;
6473 }
6474 if (inst.reloc.type != BFD_RELOC_UNUSED
6475 || inst.reloc.exp.X_op != O_constant)
6476 {
6477 /* Make sure this has priority over
6478 "invalid addressing mode". */
6479 set_fatal_syntax_error (_("constant offset required"));
6480 goto failure;
6481 }
6482 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6483 break;
6484
6485 case AARCH64_OPND_SVE_ADDR_R:
6486 /* [<Xn|SP>{, <R><m>}]
6487 but recognizing SVE registers. */
6488 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6489 &offset_qualifier));
6490 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6491 {
6492 offset_qualifier = AARCH64_OPND_QLF_X;
6493 info->addr.offset.is_reg = 1;
6494 info->addr.offset.regno = 31;
6495 }
6496 else if (base_qualifier != AARCH64_OPND_QLF_X
6497 || offset_qualifier != AARCH64_OPND_QLF_X)
6498 {
6499 set_syntax_error (_("invalid addressing mode"));
6500 goto failure;
6501 }
6502 goto regoff_addr;
6503
6504 case AARCH64_OPND_SVE_ADDR_RR:
6505 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6506 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6507 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6508 case AARCH64_OPND_SVE_ADDR_RX:
6509 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6510 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6511 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6512 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6513 but recognizing SVE registers. */
6514 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6515 &offset_qualifier));
6516 if (base_qualifier != AARCH64_OPND_QLF_X
6517 || offset_qualifier != AARCH64_OPND_QLF_X)
6518 {
6519 set_syntax_error (_("invalid addressing mode"));
6520 goto failure;
6521 }
6522 goto regoff_addr;
6523
6524 case AARCH64_OPND_SVE_ADDR_RZ:
6525 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6526 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6527 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6528 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6529 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6530 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6531 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6532 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6533 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6534 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6535 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6536 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6537 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6538 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6539 &offset_qualifier));
6540 if (base_qualifier != AARCH64_OPND_QLF_X
6541 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6542 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6543 {
6544 set_syntax_error (_("invalid addressing mode"));
6545 goto failure;
6546 }
6547 info->qualifier = offset_qualifier;
6548 goto regoff_addr;
6549
6550 case AARCH64_OPND_SVE_ADDR_ZX:
6551 /* [Zn.<T>{, <Xm>}]. */
6552 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6553 &offset_qualifier));
6554 /* Things to check:
6555 base_qualifier either S_S or S_D
6556 offset_qualifier must be X
6557 */
6558 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6559 && base_qualifier != AARCH64_OPND_QLF_S_D)
6560 || offset_qualifier != AARCH64_OPND_QLF_X)
6561 {
6562 set_syntax_error (_("invalid addressing mode"));
6563 goto failure;
6564 }
6565 info->qualifier = base_qualifier;
6566 if (!info->addr.offset.is_reg || info->addr.pcrel
6567 || !info->addr.preind || info->addr.writeback
6568 || info->shifter.operator_present != 0)
6569 {
6570 set_syntax_error (_("invalid addressing mode"));
6571 goto failure;
6572 }
6573 info->shifter.kind = AARCH64_MOD_LSL;
6574 break;
6575
6576
6577 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6578 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6579 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6580 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6581 /* [Z<n>.<T>{, #imm}] */
6582 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6583 &offset_qualifier));
6584 if (base_qualifier != AARCH64_OPND_QLF_S_S
6585 && base_qualifier != AARCH64_OPND_QLF_S_D)
6586 {
6587 set_syntax_error (_("invalid addressing mode"));
6588 goto failure;
6589 }
6590 info->qualifier = base_qualifier;
6591 goto sve_regimm;
6592
6593 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6594 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6595 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6596 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6597 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6598
6599 We don't reject:
6600
6601 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6602
6603 here since we get better error messages by leaving it to
6604 the qualifier checking routines. */
6605 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6606 &offset_qualifier));
6607 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6608 && base_qualifier != AARCH64_OPND_QLF_S_D)
6609 || offset_qualifier != base_qualifier)
6610 {
6611 set_syntax_error (_("invalid addressing mode"));
6612 goto failure;
6613 }
6614 info->qualifier = base_qualifier;
6615 goto regoff_addr;
6616
6617 case AARCH64_OPND_SYSREG:
6618 {
6619 uint32_t sysreg_flags;
6620 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6621 &sysreg_flags)) == PARSE_FAIL)
6622 {
6623 set_syntax_error (_("unknown or missing system register name"));
6624 goto failure;
6625 }
6626 inst.base.operands[i].sysreg.value = val;
6627 inst.base.operands[i].sysreg.flags = sysreg_flags;
6628 break;
6629 }
6630
6631 case AARCH64_OPND_PSTATEFIELD:
6632 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6633 == PARSE_FAIL)
6634 {
6635 set_syntax_error (_("unknown or missing PSTATE field name"));
6636 goto failure;
6637 }
6638 inst.base.operands[i].pstatefield = val;
6639 break;
6640
6641 case AARCH64_OPND_SYSREG_IC:
6642 inst.base.operands[i].sysins_op =
6643 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6644 goto sys_reg_ins;
6645
6646 case AARCH64_OPND_SYSREG_DC:
6647 inst.base.operands[i].sysins_op =
6648 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6649 goto sys_reg_ins;
6650
6651 case AARCH64_OPND_SYSREG_AT:
6652 inst.base.operands[i].sysins_op =
6653 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6654 goto sys_reg_ins;
6655
6656 case AARCH64_OPND_SYSREG_SR:
6657 inst.base.operands[i].sysins_op =
6658 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6659 goto sys_reg_ins;
6660
6661 case AARCH64_OPND_SYSREG_TLBI:
6662 inst.base.operands[i].sysins_op =
6663 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6664 sys_reg_ins:
6665 if (inst.base.operands[i].sysins_op == NULL)
6666 {
6667 set_fatal_syntax_error ( _("unknown or missing operation name"));
6668 goto failure;
6669 }
6670 break;
6671
6672 case AARCH64_OPND_BARRIER:
6673 case AARCH64_OPND_BARRIER_ISB:
6674 val = parse_barrier (&str);
6675 if (val != PARSE_FAIL
6676 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6677 {
6678 /* ISB only accepts options name 'sy'. */
6679 set_syntax_error
6680 (_("the specified option is not accepted in ISB"));
6681 /* Turn off backtrack as this optional operand is present. */
6682 backtrack_pos = 0;
6683 goto failure;
6684 }
6685 /* This is an extension to accept a 0..15 immediate. */
6686 if (val == PARSE_FAIL)
6687 po_imm_or_fail (0, 15);
6688 info->barrier = aarch64_barrier_options + val;
6689 break;
6690
6691 case AARCH64_OPND_PRFOP:
6692 val = parse_pldop (&str);
6693 /* This is an extension to accept a 0..31 immediate. */
6694 if (val == PARSE_FAIL)
6695 po_imm_or_fail (0, 31);
6696 inst.base.operands[i].prfop = aarch64_prfops + val;
6697 break;
6698
6699 case AARCH64_OPND_BARRIER_PSB:
6700 val = parse_barrier_psb (&str, &(info->hint_option));
6701 if (val == PARSE_FAIL)
6702 goto failure;
6703 break;
6704
6705 case AARCH64_OPND_BTI_TARGET:
6706 val = parse_bti_operand (&str, &(info->hint_option));
6707 if (val == PARSE_FAIL)
6708 goto failure;
6709 break;
6710
6711 default:
6712 as_fatal (_("unhandled operand code %d"), operands[i]);
6713 }
6714
6715 /* If we get here, this operand was successfully parsed. */
6716 inst.base.operands[i].present = 1;
6717 continue;
6718
6719 failure:
6720 /* The parse routine should already have set the error, but in case
6721 not, set a default one here. */
6722 if (! error_p ())
6723 set_default_error ();
6724
6725 if (! backtrack_pos)
6726 goto parse_operands_return;
6727
6728 {
6729 /* We reach here because this operand is marked as optional, and
6730 either no operand was supplied or the operand was supplied but it
6731 was syntactically incorrect. In the latter case we report an
6732 error. In the former case we perform a few more checks before
6733 dropping through to the code to insert the default operand. */
6734
6735 char *tmp = backtrack_pos;
6736 char endchar = END_OF_INSN;
6737
6738 if (i != (aarch64_num_of_operands (opcode) - 1))
6739 endchar = ',';
6740 skip_past_char (&tmp, ',');
6741
6742 if (*tmp != endchar)
6743 /* The user has supplied an operand in the wrong format. */
6744 goto parse_operands_return;
6745
6746 /* Make sure there is not a comma before the optional operand.
6747 For example the fifth operand of 'sys' is optional:
6748
6749 sys #0,c0,c0,#0, <--- wrong
6750 sys #0,c0,c0,#0 <--- correct. */
6751 if (comma_skipped_p && i && endchar == END_OF_INSN)
6752 {
6753 set_fatal_syntax_error
6754 (_("unexpected comma before the omitted optional operand"));
6755 goto parse_operands_return;
6756 }
6757 }
6758
6759 /* Reaching here means we are dealing with an optional operand that is
6760 omitted from the assembly line. */
6761 gas_assert (optional_operand_p (opcode, i));
6762 info->present = 0;
6763 process_omitted_operand (operands[i], opcode, i, info);
6764
6765 /* Try again, skipping the optional operand at backtrack_pos. */
6766 str = backtrack_pos;
6767 backtrack_pos = 0;
6768
6769 /* Clear any error record after the omitted optional operand has been
6770 successfully handled. */
6771 clear_error ();
6772 }
6773
6774 /* Check if we have parsed all the operands. */
6775 if (*str != '\0' && ! error_p ())
6776 {
6777 /* Set I to the index of the last present operand; this is
6778 for the purpose of diagnostics. */
6779 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6780 ;
6781 set_fatal_syntax_error
6782 (_("unexpected characters following instruction"));
6783 }
6784
6785 parse_operands_return:
6786
6787 if (error_p ())
6788 {
6789 DEBUG_TRACE ("parsing FAIL: %s - %s",
6790 operand_mismatch_kind_names[get_error_kind ()],
6791 get_error_message ());
6792 /* Record the operand error properly; this is useful when there
6793 are multiple instruction templates for a mnemonic name, so that
6794 later on, we can select the error that most closely describes
6795 the problem. */
6796 record_operand_error (opcode, i, get_error_kind (),
6797 get_error_message ());
6798 return FALSE;
6799 }
6800 else
6801 {
6802 DEBUG_TRACE ("parsing SUCCESS");
6803 return TRUE;
6804 }
6805 }
6806
6807 /* It does some fix-up to provide some programmer friendly feature while
6808 keeping the libopcodes happy, i.e. libopcodes only accepts
6809 the preferred architectural syntax.
6810 Return FALSE if there is any failure; otherwise return TRUE. */
6811
6812 static bfd_boolean
6813 programmer_friendly_fixup (aarch64_instruction *instr)
6814 {
6815 aarch64_inst *base = &instr->base;
6816 const aarch64_opcode *opcode = base->opcode;
6817 enum aarch64_op op = opcode->op;
6818 aarch64_opnd_info *operands = base->operands;
6819
6820 DEBUG_TRACE ("enter");
6821
6822 switch (opcode->iclass)
6823 {
6824 case testbranch:
6825 /* TBNZ Xn|Wn, #uimm6, label
6826 Test and Branch Not Zero: conditionally jumps to label if bit number
6827 uimm6 in register Xn is not zero. The bit number implies the width of
6828 the register, which may be written and should be disassembled as Wn if
6829 uimm is less than 32. */
6830 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6831 {
6832 if (operands[1].imm.value >= 32)
6833 {
6834 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6835 0, 31);
6836 return FALSE;
6837 }
6838 operands[0].qualifier = AARCH64_OPND_QLF_X;
6839 }
6840 break;
6841 case loadlit:
6842 /* LDR Wt, label | =value
6843 As a convenience assemblers will typically permit the notation
6844 "=value" in conjunction with the pc-relative literal load instructions
6845 to automatically place an immediate value or symbolic address in a
6846 nearby literal pool and generate a hidden label which references it.
6847 ISREG has been set to 0 in the case of =value. */
6848 if (instr->gen_lit_pool
6849 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6850 {
6851 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6852 if (op == OP_LDRSW_LIT)
6853 size = 4;
6854 if (instr->reloc.exp.X_op != O_constant
6855 && instr->reloc.exp.X_op != O_big
6856 && instr->reloc.exp.X_op != O_symbol)
6857 {
6858 record_operand_error (opcode, 1,
6859 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6860 _("constant expression expected"));
6861 return FALSE;
6862 }
6863 if (! add_to_lit_pool (&instr->reloc.exp, size))
6864 {
6865 record_operand_error (opcode, 1,
6866 AARCH64_OPDE_OTHER_ERROR,
6867 _("literal pool insertion failed"));
6868 return FALSE;
6869 }
6870 }
6871 break;
6872 case log_shift:
6873 case bitfield:
6874 /* UXT[BHW] Wd, Wn
6875 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6876 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6877 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6878 A programmer-friendly assembler should accept a destination Xd in
6879 place of Wd, however that is not the preferred form for disassembly.
6880 */
6881 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6882 && operands[1].qualifier == AARCH64_OPND_QLF_W
6883 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6884 operands[0].qualifier = AARCH64_OPND_QLF_W;
6885 break;
6886
6887 case addsub_ext:
6888 {
6889 /* In the 64-bit form, the final register operand is written as Wm
6890 for all but the (possibly omitted) UXTX/LSL and SXTX
6891 operators.
6892 As a programmer-friendly assembler, we accept e.g.
6893 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6894 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6895 int idx = aarch64_operand_index (opcode->operands,
6896 AARCH64_OPND_Rm_EXT);
6897 gas_assert (idx == 1 || idx == 2);
6898 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6899 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6900 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6901 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6902 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6903 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6904 }
6905 break;
6906
6907 default:
6908 break;
6909 }
6910
6911 DEBUG_TRACE ("exit with SUCCESS");
6912 return TRUE;
6913 }
6914
6915 /* Check for loads and stores that will cause unpredictable behavior. */
6916
6917 static void
6918 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6919 {
6920 aarch64_inst *base = &instr->base;
6921 const aarch64_opcode *opcode = base->opcode;
6922 const aarch64_opnd_info *opnds = base->operands;
6923 switch (opcode->iclass)
6924 {
6925 case ldst_pos:
6926 case ldst_imm9:
6927 case ldst_imm10:
6928 case ldst_unscaled:
6929 case ldst_unpriv:
6930 /* Loading/storing the base register is unpredictable if writeback. */
6931 if ((aarch64_get_operand_class (opnds[0].type)
6932 == AARCH64_OPND_CLASS_INT_REG)
6933 && opnds[0].reg.regno == opnds[1].addr.base_regno
6934 && opnds[1].addr.base_regno != REG_SP
6935 /* Exempt STG/STZG/ST2G/STZ2G. */
6936 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
6937 && opnds[1].addr.writeback)
6938 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6939 break;
6940
6941 case ldstpair_off:
6942 case ldstnapair_offs:
6943 case ldstpair_indexed:
6944 /* Loading/storing the base register is unpredictable if writeback. */
6945 if ((aarch64_get_operand_class (opnds[0].type)
6946 == AARCH64_OPND_CLASS_INT_REG)
6947 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6948 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6949 && opnds[2].addr.base_regno != REG_SP
6950 /* Exempt STGP. */
6951 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
6952 && opnds[2].addr.writeback)
6953 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6954 /* Load operations must load different registers. */
6955 if ((opcode->opcode & (1 << 22))
6956 && opnds[0].reg.regno == opnds[1].reg.regno)
6957 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6958 break;
6959
6960 case ldstexcl:
6961 /* It is unpredictable if the destination and status registers are the
6962 same. */
6963 if ((aarch64_get_operand_class (opnds[0].type)
6964 == AARCH64_OPND_CLASS_INT_REG)
6965 && (aarch64_get_operand_class (opnds[1].type)
6966 == AARCH64_OPND_CLASS_INT_REG)
6967 && (opnds[0].reg.regno == opnds[1].reg.regno
6968 || opnds[0].reg.regno == opnds[2].reg.regno))
6969 as_warn (_("unpredictable: identical transfer and status registers"
6970 " --`%s'"),
6971 str);
6972
6973 break;
6974
6975 default:
6976 break;
6977 }
6978 }
6979
6980 static void
6981 force_automatic_sequence_close (void)
6982 {
6983 if (now_instr_sequence.instr)
6984 {
6985 as_warn (_("previous `%s' sequence has not been closed"),
6986 now_instr_sequence.instr->opcode->name);
6987 init_insn_sequence (NULL, &now_instr_sequence);
6988 }
6989 }
6990
6991 /* A wrapper function to interface with libopcodes on encoding and
6992 record the error message if there is any.
6993
6994 Return TRUE on success; otherwise return FALSE. */
6995
6996 static bfd_boolean
6997 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6998 aarch64_insn *code)
6999 {
7000 aarch64_operand_error error_info;
7001 memset (&error_info, '\0', sizeof (error_info));
7002 error_info.kind = AARCH64_OPDE_NIL;
7003 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7004 && !error_info.non_fatal)
7005 return TRUE;
7006
7007 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7008 record_operand_error_info (opcode, &error_info);
7009 return error_info.non_fatal;
7010 }
7011
7012 #ifdef DEBUG_AARCH64
7013 static inline void
7014 dump_opcode_operands (const aarch64_opcode *opcode)
7015 {
7016 int i = 0;
7017 while (opcode->operands[i] != AARCH64_OPND_NIL)
7018 {
7019 aarch64_verbose ("\t\t opnd%d: %s", i,
7020 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7021 ? aarch64_get_operand_name (opcode->operands[i])
7022 : aarch64_get_operand_desc (opcode->operands[i]));
7023 ++i;
7024 }
7025 }
7026 #endif /* DEBUG_AARCH64 */
7027
7028 /* This is the guts of the machine-dependent assembler. STR points to a
7029 machine dependent instruction. This function is supposed to emit
7030 the frags/bytes it assembles to. */
7031
7032 void
7033 md_assemble (char *str)
7034 {
7035 char *p = str;
7036 templates *template;
7037 aarch64_opcode *opcode;
7038 aarch64_inst *inst_base;
7039 unsigned saved_cond;
7040
7041 /* Align the previous label if needed. */
7042 if (last_label_seen != NULL)
7043 {
7044 symbol_set_frag (last_label_seen, frag_now);
7045 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7046 S_SET_SEGMENT (last_label_seen, now_seg);
7047 }
7048
7049 /* Update the current insn_sequence from the segment. */
7050 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
7051
7052 inst.reloc.type = BFD_RELOC_UNUSED;
7053
7054 DEBUG_TRACE ("\n\n");
7055 DEBUG_TRACE ("==============================");
7056 DEBUG_TRACE ("Enter md_assemble with %s", str);
7057
7058 template = opcode_lookup (&p);
7059 if (!template)
7060 {
7061 /* It wasn't an instruction, but it might be a register alias of
7062 the form alias .req reg directive. */
7063 if (!create_register_alias (str, p))
7064 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7065 str);
7066 return;
7067 }
7068
7069 skip_whitespace (p);
7070 if (*p == ',')
7071 {
7072 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7073 get_mnemonic_name (str), str);
7074 return;
7075 }
7076
7077 init_operand_error_report ();
7078
7079 /* Sections are assumed to start aligned. In executable section, there is no
7080 MAP_DATA symbol pending. So we only align the address during
7081 MAP_DATA --> MAP_INSN transition.
7082 For other sections, this is not guaranteed. */
7083 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7084 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7085 frag_align_code (2, 0);
7086
7087 saved_cond = inst.cond;
7088 reset_aarch64_instruction (&inst);
7089 inst.cond = saved_cond;
7090
7091 /* Iterate through all opcode entries with the same mnemonic name. */
7092 do
7093 {
7094 opcode = template->opcode;
7095
7096 DEBUG_TRACE ("opcode %s found", opcode->name);
7097 #ifdef DEBUG_AARCH64
7098 if (debug_dump)
7099 dump_opcode_operands (opcode);
7100 #endif /* DEBUG_AARCH64 */
7101
7102 mapping_state (MAP_INSN);
7103
7104 inst_base = &inst.base;
7105 inst_base->opcode = opcode;
7106
7107 /* Truly conditionally executed instructions, e.g. b.cond. */
7108 if (opcode->flags & F_COND)
7109 {
7110 gas_assert (inst.cond != COND_ALWAYS);
7111 inst_base->cond = get_cond_from_value (inst.cond);
7112 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7113 }
7114 else if (inst.cond != COND_ALWAYS)
7115 {
7116 /* It shouldn't arrive here, where the assembly looks like a
7117 conditional instruction but the found opcode is unconditional. */
7118 gas_assert (0);
7119 continue;
7120 }
7121
7122 if (parse_operands (p, opcode)
7123 && programmer_friendly_fixup (&inst)
7124 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7125 {
7126 /* Check that this instruction is supported for this CPU. */
7127 if (!opcode->avariant
7128 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7129 {
7130 as_bad (_("selected processor does not support `%s'"), str);
7131 return;
7132 }
7133
7134 warn_unpredictable_ldst (&inst, str);
7135
7136 if (inst.reloc.type == BFD_RELOC_UNUSED
7137 || !inst.reloc.need_libopcodes_p)
7138 output_inst (NULL);
7139 else
7140 {
7141 /* If there is relocation generated for the instruction,
7142 store the instruction information for the future fix-up. */
7143 struct aarch64_inst *copy;
7144 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7145 copy = XNEW (struct aarch64_inst);
7146 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7147 output_inst (copy);
7148 }
7149
7150 /* Issue non-fatal messages if any. */
7151 output_operand_error_report (str, TRUE);
7152 return;
7153 }
7154
7155 template = template->next;
7156 if (template != NULL)
7157 {
7158 reset_aarch64_instruction (&inst);
7159 inst.cond = saved_cond;
7160 }
7161 }
7162 while (template != NULL);
7163
7164 /* Issue the error messages if any. */
7165 output_operand_error_report (str, FALSE);
7166 }
7167
7168 /* Various frobbings of labels and their addresses. */
7169
7170 void
7171 aarch64_start_line_hook (void)
7172 {
7173 last_label_seen = NULL;
7174 }
7175
7176 void
7177 aarch64_frob_label (symbolS * sym)
7178 {
7179 last_label_seen = sym;
7180
7181 dwarf2_emit_label (sym);
7182 }
7183
7184 void
7185 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7186 {
7187 /* Check to see if we have a block to close. */
7188 force_automatic_sequence_close ();
7189 }
7190
7191 int
7192 aarch64_data_in_code (void)
7193 {
7194 if (!strncmp (input_line_pointer + 1, "data:", 5))
7195 {
7196 *input_line_pointer = '/';
7197 input_line_pointer += 5;
7198 *input_line_pointer = 0;
7199 return 1;
7200 }
7201
7202 return 0;
7203 }
7204
7205 char *
7206 aarch64_canonicalize_symbol_name (char *name)
7207 {
7208 int len;
7209
7210 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7211 *(name + len - 5) = 0;
7212
7213 return name;
7214 }
7215 \f
7216 /* Table of all register names defined by default. The user can
7217 define additional names with .req. Note that all register names
7218 should appear in both upper and lowercase variants. Some registers
7219 also have mixed-case names. */
7220
7221 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
7222 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
7223 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7224 #define REGSET16(p,t) \
7225 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7226 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7227 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7228 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7229 #define REGSET31(p,t) \
7230 REGSET16(p, t), \
7231 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7232 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7233 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7234 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7235 #define REGSET(p,t) \
7236 REGSET31(p,t), REGNUM(p,31,t)
7237
7238 /* These go into aarch64_reg_hsh hash-table. */
7239 static const reg_entry reg_names[] = {
7240 /* Integer registers. */
7241 REGSET31 (x, R_64), REGSET31 (X, R_64),
7242 REGSET31 (w, R_32), REGSET31 (W, R_32),
7243
7244 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7245 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7246 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7247 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7248 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7249 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7250
7251 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7252 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7253
7254 /* Floating-point single precision registers. */
7255 REGSET (s, FP_S), REGSET (S, FP_S),
7256
7257 /* Floating-point double precision registers. */
7258 REGSET (d, FP_D), REGSET (D, FP_D),
7259
7260 /* Floating-point half precision registers. */
7261 REGSET (h, FP_H), REGSET (H, FP_H),
7262
7263 /* Floating-point byte precision registers. */
7264 REGSET (b, FP_B), REGSET (B, FP_B),
7265
7266 /* Floating-point quad precision registers. */
7267 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7268
7269 /* FP/SIMD registers. */
7270 REGSET (v, VN), REGSET (V, VN),
7271
7272 /* SVE vector registers. */
7273 REGSET (z, ZN), REGSET (Z, ZN),
7274
7275 /* SVE predicate registers. */
7276 REGSET16 (p, PN), REGSET16 (P, PN)
7277 };
7278
7279 #undef REGDEF
7280 #undef REGDEF_ALIAS
7281 #undef REGNUM
7282 #undef REGSET16
7283 #undef REGSET31
7284 #undef REGSET
7285
7286 #define N 1
7287 #define n 0
7288 #define Z 1
7289 #define z 0
7290 #define C 1
7291 #define c 0
7292 #define V 1
7293 #define v 0
7294 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7295 static const asm_nzcv nzcv_names[] = {
7296 {"nzcv", B (n, z, c, v)},
7297 {"nzcV", B (n, z, c, V)},
7298 {"nzCv", B (n, z, C, v)},
7299 {"nzCV", B (n, z, C, V)},
7300 {"nZcv", B (n, Z, c, v)},
7301 {"nZcV", B (n, Z, c, V)},
7302 {"nZCv", B (n, Z, C, v)},
7303 {"nZCV", B (n, Z, C, V)},
7304 {"Nzcv", B (N, z, c, v)},
7305 {"NzcV", B (N, z, c, V)},
7306 {"NzCv", B (N, z, C, v)},
7307 {"NzCV", B (N, z, C, V)},
7308 {"NZcv", B (N, Z, c, v)},
7309 {"NZcV", B (N, Z, c, V)},
7310 {"NZCv", B (N, Z, C, v)},
7311 {"NZCV", B (N, Z, C, V)}
7312 };
7313
7314 #undef N
7315 #undef n
7316 #undef Z
7317 #undef z
7318 #undef C
7319 #undef c
7320 #undef V
7321 #undef v
7322 #undef B
7323 \f
7324 /* MD interface: bits in the object file. */
7325
7326 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7327 for use in the a.out file, and stores them in the array pointed to by buf.
7328 This knows about the endian-ness of the target machine and does
7329 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7330 2 (short) and 4 (long) Floating numbers are put out as a series of
7331 LITTLENUMS (shorts, here at least). */
7332
7333 void
7334 md_number_to_chars (char *buf, valueT val, int n)
7335 {
7336 if (target_big_endian)
7337 number_to_chars_bigendian (buf, val, n);
7338 else
7339 number_to_chars_littleendian (buf, val, n);
7340 }
7341
7342 /* MD interface: Sections. */
7343
7344 /* Estimate the size of a frag before relaxing. Assume everything fits in
7345 4 bytes. */
7346
7347 int
7348 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7349 {
7350 fragp->fr_var = 4;
7351 return 4;
7352 }
7353
7354 /* Round up a section size to the appropriate boundary. */
7355
7356 valueT
7357 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7358 {
7359 return size;
7360 }
7361
7362 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7363 of an rs_align_code fragment.
7364
7365 Here we fill the frag with the appropriate info for padding the
7366 output stream. The resulting frag will consist of a fixed (fr_fix)
7367 and of a repeating (fr_var) part.
7368
7369 The fixed content is always emitted before the repeating content and
7370 these two parts are used as follows in constructing the output:
7371 - the fixed part will be used to align to a valid instruction word
7372 boundary, in case that we start at a misaligned address; as no
7373 executable instruction can live at the misaligned location, we
7374 simply fill with zeros;
7375 - the variable part will be used to cover the remaining padding and
7376 we fill using the AArch64 NOP instruction.
7377
7378 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7379 enough storage space for up to 3 bytes for padding the back to a valid
7380 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7381
7382 void
7383 aarch64_handle_align (fragS * fragP)
7384 {
7385 /* NOP = d503201f */
7386 /* AArch64 instructions are always little-endian. */
7387 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7388
7389 int bytes, fix, noop_size;
7390 char *p;
7391
7392 if (fragP->fr_type != rs_align_code)
7393 return;
7394
7395 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7396 p = fragP->fr_literal + fragP->fr_fix;
7397
7398 #ifdef OBJ_ELF
7399 gas_assert (fragP->tc_frag_data.recorded);
7400 #endif
7401
7402 noop_size = sizeof (aarch64_noop);
7403
7404 fix = bytes & (noop_size - 1);
7405 if (fix)
7406 {
7407 #ifdef OBJ_ELF
7408 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7409 #endif
7410 memset (p, 0, fix);
7411 p += fix;
7412 fragP->fr_fix += fix;
7413 }
7414
7415 if (noop_size)
7416 memcpy (p, aarch64_noop, noop_size);
7417 fragP->fr_var = noop_size;
7418 }
7419
7420 /* Perform target specific initialisation of a frag.
7421 Note - despite the name this initialisation is not done when the frag
7422 is created, but only when its type is assigned. A frag can be created
7423 and used a long time before its type is set, so beware of assuming that
7424 this initialisation is performed first. */
7425
7426 #ifndef OBJ_ELF
7427 void
7428 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7429 int max_chars ATTRIBUTE_UNUSED)
7430 {
7431 }
7432
7433 #else /* OBJ_ELF is defined. */
7434 void
7435 aarch64_init_frag (fragS * fragP, int max_chars)
7436 {
7437 /* Record a mapping symbol for alignment frags. We will delete this
7438 later if the alignment ends up empty. */
7439 if (!fragP->tc_frag_data.recorded)
7440 fragP->tc_frag_data.recorded = 1;
7441
7442 /* PR 21809: Do not set a mapping state for debug sections
7443 - it just confuses other tools. */
7444 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
7445 return;
7446
7447 switch (fragP->fr_type)
7448 {
7449 case rs_align_test:
7450 case rs_fill:
7451 mapping_state_2 (MAP_DATA, max_chars);
7452 break;
7453 case rs_align:
7454 /* PR 20364: We can get alignment frags in code sections,
7455 so do not just assume that we should use the MAP_DATA state. */
7456 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7457 break;
7458 case rs_align_code:
7459 mapping_state_2 (MAP_INSN, max_chars);
7460 break;
7461 default:
7462 break;
7463 }
7464 }
7465 \f
7466 /* Initialize the DWARF-2 unwind information for this procedure. */
7467
7468 void
7469 tc_aarch64_frame_initial_instructions (void)
7470 {
7471 cfi_add_CFA_def_cfa (REG_SP, 0);
7472 }
7473 #endif /* OBJ_ELF */
7474
7475 /* Convert REGNAME to a DWARF-2 register number. */
7476
7477 int
7478 tc_aarch64_regname_to_dw2regnum (char *regname)
7479 {
7480 const reg_entry *reg = parse_reg (&regname);
7481 if (reg == NULL)
7482 return -1;
7483
7484 switch (reg->type)
7485 {
7486 case REG_TYPE_SP_32:
7487 case REG_TYPE_SP_64:
7488 case REG_TYPE_R_32:
7489 case REG_TYPE_R_64:
7490 return reg->number;
7491
7492 case REG_TYPE_FP_B:
7493 case REG_TYPE_FP_H:
7494 case REG_TYPE_FP_S:
7495 case REG_TYPE_FP_D:
7496 case REG_TYPE_FP_Q:
7497 return reg->number + 64;
7498
7499 default:
7500 break;
7501 }
7502 return -1;
7503 }
7504
7505 /* Implement DWARF2_ADDR_SIZE. */
7506
7507 int
7508 aarch64_dwarf2_addr_size (void)
7509 {
7510 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7511 if (ilp32_p)
7512 return 4;
7513 #endif
7514 return bfd_arch_bits_per_address (stdoutput) / 8;
7515 }
7516
7517 /* MD interface: Symbol and relocation handling. */
7518
7519 /* Return the address within the segment that a PC-relative fixup is
7520 relative to. For AArch64 PC-relative fixups applied to instructions
7521 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7522
7523 long
7524 md_pcrel_from_section (fixS * fixP, segT seg)
7525 {
7526 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7527
7528 /* If this is pc-relative and we are going to emit a relocation
7529 then we just want to put out any pipeline compensation that the linker
7530 will need. Otherwise we want to use the calculated base. */
7531 if (fixP->fx_pcrel
7532 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7533 || aarch64_force_relocation (fixP)))
7534 base = 0;
7535
7536 /* AArch64 should be consistent for all pc-relative relocations. */
7537 return base + AARCH64_PCREL_OFFSET;
7538 }
7539
7540 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7541 Otherwise we have no need to default values of symbols. */
7542
7543 symbolS *
7544 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7545 {
7546 #ifdef OBJ_ELF
7547 if (name[0] == '_' && name[1] == 'G'
7548 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7549 {
7550 if (!GOT_symbol)
7551 {
7552 if (symbol_find (name))
7553 as_bad (_("GOT already in the symbol table"));
7554
7555 GOT_symbol = symbol_new (name, undefined_section,
7556 (valueT) 0, &zero_address_frag);
7557 }
7558
7559 return GOT_symbol;
7560 }
7561 #endif
7562
7563 return 0;
7564 }
7565
7566 /* Return non-zero if the indicated VALUE has overflowed the maximum
7567 range expressible by a unsigned number with the indicated number of
7568 BITS. */
7569
7570 static bfd_boolean
7571 unsigned_overflow (valueT value, unsigned bits)
7572 {
7573 valueT lim;
7574 if (bits >= sizeof (valueT) * 8)
7575 return FALSE;
7576 lim = (valueT) 1 << bits;
7577 return (value >= lim);
7578 }
7579
7580
7581 /* Return non-zero if the indicated VALUE has overflowed the maximum
7582 range expressible by an signed number with the indicated number of
7583 BITS. */
7584
7585 static bfd_boolean
7586 signed_overflow (offsetT value, unsigned bits)
7587 {
7588 offsetT lim;
7589 if (bits >= sizeof (offsetT) * 8)
7590 return FALSE;
7591 lim = (offsetT) 1 << (bits - 1);
7592 return (value < -lim || value >= lim);
7593 }
7594
7595 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7596 unsigned immediate offset load/store instruction, try to encode it as
7597 an unscaled, 9-bit, signed immediate offset load/store instruction.
7598 Return TRUE if it is successful; otherwise return FALSE.
7599
7600 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7601 in response to the standard LDR/STR mnemonics when the immediate offset is
7602 unambiguous, i.e. when it is negative or unaligned. */
7603
7604 static bfd_boolean
7605 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7606 {
7607 int idx;
7608 enum aarch64_op new_op;
7609 const aarch64_opcode *new_opcode;
7610
7611 gas_assert (instr->opcode->iclass == ldst_pos);
7612
7613 switch (instr->opcode->op)
7614 {
7615 case OP_LDRB_POS:new_op = OP_LDURB; break;
7616 case OP_STRB_POS: new_op = OP_STURB; break;
7617 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7618 case OP_LDRH_POS: new_op = OP_LDURH; break;
7619 case OP_STRH_POS: new_op = OP_STURH; break;
7620 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7621 case OP_LDR_POS: new_op = OP_LDUR; break;
7622 case OP_STR_POS: new_op = OP_STUR; break;
7623 case OP_LDRF_POS: new_op = OP_LDURV; break;
7624 case OP_STRF_POS: new_op = OP_STURV; break;
7625 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7626 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7627 default: new_op = OP_NIL; break;
7628 }
7629
7630 if (new_op == OP_NIL)
7631 return FALSE;
7632
7633 new_opcode = aarch64_get_opcode (new_op);
7634 gas_assert (new_opcode != NULL);
7635
7636 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7637 instr->opcode->op, new_opcode->op);
7638
7639 aarch64_replace_opcode (instr, new_opcode);
7640
7641 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7642 qualifier matching may fail because the out-of-date qualifier will
7643 prevent the operand being updated with a new and correct qualifier. */
7644 idx = aarch64_operand_index (instr->opcode->operands,
7645 AARCH64_OPND_ADDR_SIMM9);
7646 gas_assert (idx == 1);
7647 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7648
7649 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7650
7651 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7652 insn_sequence))
7653 return FALSE;
7654
7655 return TRUE;
7656 }
7657
7658 /* Called by fix_insn to fix a MOV immediate alias instruction.
7659
7660 Operand for a generic move immediate instruction, which is an alias
7661 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7662 a 32-bit/64-bit immediate value into general register. An assembler error
7663 shall result if the immediate cannot be created by a single one of these
7664 instructions. If there is a choice, then to ensure reversability an
7665 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7666
7667 static void
7668 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7669 {
7670 const aarch64_opcode *opcode;
7671
7672 /* Need to check if the destination is SP/ZR. The check has to be done
7673 before any aarch64_replace_opcode. */
7674 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7675 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7676
7677 instr->operands[1].imm.value = value;
7678 instr->operands[1].skip = 0;
7679
7680 if (try_mov_wide_p)
7681 {
7682 /* Try the MOVZ alias. */
7683 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7684 aarch64_replace_opcode (instr, opcode);
7685 if (aarch64_opcode_encode (instr->opcode, instr,
7686 &instr->value, NULL, NULL, insn_sequence))
7687 {
7688 put_aarch64_insn (buf, instr->value);
7689 return;
7690 }
7691 /* Try the MOVK alias. */
7692 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7693 aarch64_replace_opcode (instr, opcode);
7694 if (aarch64_opcode_encode (instr->opcode, instr,
7695 &instr->value, NULL, NULL, insn_sequence))
7696 {
7697 put_aarch64_insn (buf, instr->value);
7698 return;
7699 }
7700 }
7701
7702 if (try_mov_bitmask_p)
7703 {
7704 /* Try the ORR alias. */
7705 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7706 aarch64_replace_opcode (instr, opcode);
7707 if (aarch64_opcode_encode (instr->opcode, instr,
7708 &instr->value, NULL, NULL, insn_sequence))
7709 {
7710 put_aarch64_insn (buf, instr->value);
7711 return;
7712 }
7713 }
7714
7715 as_bad_where (fixP->fx_file, fixP->fx_line,
7716 _("immediate cannot be moved by a single instruction"));
7717 }
7718
7719 /* An instruction operand which is immediate related may have symbol used
7720 in the assembly, e.g.
7721
7722 mov w0, u32
7723 .set u32, 0x00ffff00
7724
7725 At the time when the assembly instruction is parsed, a referenced symbol,
7726 like 'u32' in the above example may not have been seen; a fixS is created
7727 in such a case and is handled here after symbols have been resolved.
7728 Instruction is fixed up with VALUE using the information in *FIXP plus
7729 extra information in FLAGS.
7730
7731 This function is called by md_apply_fix to fix up instructions that need
7732 a fix-up described above but does not involve any linker-time relocation. */
7733
7734 static void
7735 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7736 {
7737 int idx;
7738 uint32_t insn;
7739 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7740 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7741 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7742
7743 if (new_inst)
7744 {
7745 /* Now the instruction is about to be fixed-up, so the operand that
7746 was previously marked as 'ignored' needs to be unmarked in order
7747 to get the encoding done properly. */
7748 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7749 new_inst->operands[idx].skip = 0;
7750 }
7751
7752 gas_assert (opnd != AARCH64_OPND_NIL);
7753
7754 switch (opnd)
7755 {
7756 case AARCH64_OPND_EXCEPTION:
7757 case AARCH64_OPND_UNDEFINED:
7758 if (unsigned_overflow (value, 16))
7759 as_bad_where (fixP->fx_file, fixP->fx_line,
7760 _("immediate out of range"));
7761 insn = get_aarch64_insn (buf);
7762 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
7763 put_aarch64_insn (buf, insn);
7764 break;
7765
7766 case AARCH64_OPND_AIMM:
7767 /* ADD or SUB with immediate.
7768 NOTE this assumes we come here with a add/sub shifted reg encoding
7769 3 322|2222|2 2 2 21111 111111
7770 1 098|7654|3 2 1 09876 543210 98765 43210
7771 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7772 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7773 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7774 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7775 ->
7776 3 322|2222|2 2 221111111111
7777 1 098|7654|3 2 109876543210 98765 43210
7778 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7779 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7780 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7781 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7782 Fields sf Rn Rd are already set. */
7783 insn = get_aarch64_insn (buf);
7784 if (value < 0)
7785 {
7786 /* Add <-> sub. */
7787 insn = reencode_addsub_switch_add_sub (insn);
7788 value = -value;
7789 }
7790
7791 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7792 && unsigned_overflow (value, 12))
7793 {
7794 /* Try to shift the value by 12 to make it fit. */
7795 if (((value >> 12) << 12) == value
7796 && ! unsigned_overflow (value, 12 + 12))
7797 {
7798 value >>= 12;
7799 insn |= encode_addsub_imm_shift_amount (1);
7800 }
7801 }
7802
7803 if (unsigned_overflow (value, 12))
7804 as_bad_where (fixP->fx_file, fixP->fx_line,
7805 _("immediate out of range"));
7806
7807 insn |= encode_addsub_imm (value);
7808
7809 put_aarch64_insn (buf, insn);
7810 break;
7811
7812 case AARCH64_OPND_SIMD_IMM:
7813 case AARCH64_OPND_SIMD_IMM_SFT:
7814 case AARCH64_OPND_LIMM:
7815 /* Bit mask immediate. */
7816 gas_assert (new_inst != NULL);
7817 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7818 new_inst->operands[idx].imm.value = value;
7819 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7820 &new_inst->value, NULL, NULL, insn_sequence))
7821 put_aarch64_insn (buf, new_inst->value);
7822 else
7823 as_bad_where (fixP->fx_file, fixP->fx_line,
7824 _("invalid immediate"));
7825 break;
7826
7827 case AARCH64_OPND_HALF:
7828 /* 16-bit unsigned immediate. */
7829 if (unsigned_overflow (value, 16))
7830 as_bad_where (fixP->fx_file, fixP->fx_line,
7831 _("immediate out of range"));
7832 insn = get_aarch64_insn (buf);
7833 insn |= encode_movw_imm (value & 0xffff);
7834 put_aarch64_insn (buf, insn);
7835 break;
7836
7837 case AARCH64_OPND_IMM_MOV:
7838 /* Operand for a generic move immediate instruction, which is
7839 an alias instruction that generates a single MOVZ, MOVN or ORR
7840 instruction to loads a 32-bit/64-bit immediate value into general
7841 register. An assembler error shall result if the immediate cannot be
7842 created by a single one of these instructions. If there is a choice,
7843 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7844 and MOVZ or MOVN to ORR. */
7845 gas_assert (new_inst != NULL);
7846 fix_mov_imm_insn (fixP, buf, new_inst, value);
7847 break;
7848
7849 case AARCH64_OPND_ADDR_SIMM7:
7850 case AARCH64_OPND_ADDR_SIMM9:
7851 case AARCH64_OPND_ADDR_SIMM9_2:
7852 case AARCH64_OPND_ADDR_SIMM10:
7853 case AARCH64_OPND_ADDR_UIMM12:
7854 case AARCH64_OPND_ADDR_SIMM11:
7855 case AARCH64_OPND_ADDR_SIMM13:
7856 /* Immediate offset in an address. */
7857 insn = get_aarch64_insn (buf);
7858
7859 gas_assert (new_inst != NULL && new_inst->value == insn);
7860 gas_assert (new_inst->opcode->operands[1] == opnd
7861 || new_inst->opcode->operands[2] == opnd);
7862
7863 /* Get the index of the address operand. */
7864 if (new_inst->opcode->operands[1] == opnd)
7865 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7866 idx = 1;
7867 else
7868 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7869 idx = 2;
7870
7871 /* Update the resolved offset value. */
7872 new_inst->operands[idx].addr.offset.imm = value;
7873
7874 /* Encode/fix-up. */
7875 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7876 &new_inst->value, NULL, NULL, insn_sequence))
7877 {
7878 put_aarch64_insn (buf, new_inst->value);
7879 break;
7880 }
7881 else if (new_inst->opcode->iclass == ldst_pos
7882 && try_to_encode_as_unscaled_ldst (new_inst))
7883 {
7884 put_aarch64_insn (buf, new_inst->value);
7885 break;
7886 }
7887
7888 as_bad_where (fixP->fx_file, fixP->fx_line,
7889 _("immediate offset out of range"));
7890 break;
7891
7892 default:
7893 gas_assert (0);
7894 as_fatal (_("unhandled operand code %d"), opnd);
7895 }
7896 }
7897
7898 /* Apply a fixup (fixP) to segment data, once it has been determined
7899 by our caller that we have all the info we need to fix it up.
7900
7901 Parameter valP is the pointer to the value of the bits. */
7902
7903 void
7904 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7905 {
7906 offsetT value = *valP;
7907 uint32_t insn;
7908 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7909 int scale;
7910 unsigned flags = fixP->fx_addnumber;
7911
7912 DEBUG_TRACE ("\n\n");
7913 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7914 DEBUG_TRACE ("Enter md_apply_fix");
7915
7916 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7917
7918 /* Note whether this will delete the relocation. */
7919
7920 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7921 fixP->fx_done = 1;
7922
7923 /* Process the relocations. */
7924 switch (fixP->fx_r_type)
7925 {
7926 case BFD_RELOC_NONE:
7927 /* This will need to go in the object file. */
7928 fixP->fx_done = 0;
7929 break;
7930
7931 case BFD_RELOC_8:
7932 case BFD_RELOC_8_PCREL:
7933 if (fixP->fx_done || !seg->use_rela_p)
7934 md_number_to_chars (buf, value, 1);
7935 break;
7936
7937 case BFD_RELOC_16:
7938 case BFD_RELOC_16_PCREL:
7939 if (fixP->fx_done || !seg->use_rela_p)
7940 md_number_to_chars (buf, value, 2);
7941 break;
7942
7943 case BFD_RELOC_32:
7944 case BFD_RELOC_32_PCREL:
7945 if (fixP->fx_done || !seg->use_rela_p)
7946 md_number_to_chars (buf, value, 4);
7947 break;
7948
7949 case BFD_RELOC_64:
7950 case BFD_RELOC_64_PCREL:
7951 if (fixP->fx_done || !seg->use_rela_p)
7952 md_number_to_chars (buf, value, 8);
7953 break;
7954
7955 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7956 /* We claim that these fixups have been processed here, even if
7957 in fact we generate an error because we do not have a reloc
7958 for them, so tc_gen_reloc() will reject them. */
7959 fixP->fx_done = 1;
7960 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7961 {
7962 as_bad_where (fixP->fx_file, fixP->fx_line,
7963 _("undefined symbol %s used as an immediate value"),
7964 S_GET_NAME (fixP->fx_addsy));
7965 goto apply_fix_return;
7966 }
7967 fix_insn (fixP, flags, value);
7968 break;
7969
7970 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7971 if (fixP->fx_done || !seg->use_rela_p)
7972 {
7973 if (value & 3)
7974 as_bad_where (fixP->fx_file, fixP->fx_line,
7975 _("pc-relative load offset not word aligned"));
7976 if (signed_overflow (value, 21))
7977 as_bad_where (fixP->fx_file, fixP->fx_line,
7978 _("pc-relative load offset out of range"));
7979 insn = get_aarch64_insn (buf);
7980 insn |= encode_ld_lit_ofs_19 (value >> 2);
7981 put_aarch64_insn (buf, insn);
7982 }
7983 break;
7984
7985 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7986 if (fixP->fx_done || !seg->use_rela_p)
7987 {
7988 if (signed_overflow (value, 21))
7989 as_bad_where (fixP->fx_file, fixP->fx_line,
7990 _("pc-relative address offset out of range"));
7991 insn = get_aarch64_insn (buf);
7992 insn |= encode_adr_imm (value);
7993 put_aarch64_insn (buf, insn);
7994 }
7995 break;
7996
7997 case BFD_RELOC_AARCH64_BRANCH19:
7998 if (fixP->fx_done || !seg->use_rela_p)
7999 {
8000 if (value & 3)
8001 as_bad_where (fixP->fx_file, fixP->fx_line,
8002 _("conditional branch target not word aligned"));
8003 if (signed_overflow (value, 21))
8004 as_bad_where (fixP->fx_file, fixP->fx_line,
8005 _("conditional branch out of range"));
8006 insn = get_aarch64_insn (buf);
8007 insn |= encode_cond_branch_ofs_19 (value >> 2);
8008 put_aarch64_insn (buf, insn);
8009 }
8010 break;
8011
8012 case BFD_RELOC_AARCH64_TSTBR14:
8013 if (fixP->fx_done || !seg->use_rela_p)
8014 {
8015 if (value & 3)
8016 as_bad_where (fixP->fx_file, fixP->fx_line,
8017 _("conditional branch target not word aligned"));
8018 if (signed_overflow (value, 16))
8019 as_bad_where (fixP->fx_file, fixP->fx_line,
8020 _("conditional branch out of range"));
8021 insn = get_aarch64_insn (buf);
8022 insn |= encode_tst_branch_ofs_14 (value >> 2);
8023 put_aarch64_insn (buf, insn);
8024 }
8025 break;
8026
8027 case BFD_RELOC_AARCH64_CALL26:
8028 case BFD_RELOC_AARCH64_JUMP26:
8029 if (fixP->fx_done || !seg->use_rela_p)
8030 {
8031 if (value & 3)
8032 as_bad_where (fixP->fx_file, fixP->fx_line,
8033 _("branch target not word aligned"));
8034 if (signed_overflow (value, 28))
8035 as_bad_where (fixP->fx_file, fixP->fx_line,
8036 _("branch out of range"));
8037 insn = get_aarch64_insn (buf);
8038 insn |= encode_branch_ofs_26 (value >> 2);
8039 put_aarch64_insn (buf, insn);
8040 }
8041 break;
8042
8043 case BFD_RELOC_AARCH64_MOVW_G0:
8044 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8045 case BFD_RELOC_AARCH64_MOVW_G0_S:
8046 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8047 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8048 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8049 scale = 0;
8050 goto movw_common;
8051 case BFD_RELOC_AARCH64_MOVW_G1:
8052 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8053 case BFD_RELOC_AARCH64_MOVW_G1_S:
8054 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8055 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8056 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8057 scale = 16;
8058 goto movw_common;
8059 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8060 scale = 0;
8061 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8062 /* Should always be exported to object file, see
8063 aarch64_force_relocation(). */
8064 gas_assert (!fixP->fx_done);
8065 gas_assert (seg->use_rela_p);
8066 goto movw_common;
8067 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8068 scale = 16;
8069 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8070 /* Should always be exported to object file, see
8071 aarch64_force_relocation(). */
8072 gas_assert (!fixP->fx_done);
8073 gas_assert (seg->use_rela_p);
8074 goto movw_common;
8075 case BFD_RELOC_AARCH64_MOVW_G2:
8076 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8077 case BFD_RELOC_AARCH64_MOVW_G2_S:
8078 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8079 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8080 scale = 32;
8081 goto movw_common;
8082 case BFD_RELOC_AARCH64_MOVW_G3:
8083 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8084 scale = 48;
8085 movw_common:
8086 if (fixP->fx_done || !seg->use_rela_p)
8087 {
8088 insn = get_aarch64_insn (buf);
8089
8090 if (!fixP->fx_done)
8091 {
8092 /* REL signed addend must fit in 16 bits */
8093 if (signed_overflow (value, 16))
8094 as_bad_where (fixP->fx_file, fixP->fx_line,
8095 _("offset out of range"));
8096 }
8097 else
8098 {
8099 /* Check for overflow and scale. */
8100 switch (fixP->fx_r_type)
8101 {
8102 case BFD_RELOC_AARCH64_MOVW_G0:
8103 case BFD_RELOC_AARCH64_MOVW_G1:
8104 case BFD_RELOC_AARCH64_MOVW_G2:
8105 case BFD_RELOC_AARCH64_MOVW_G3:
8106 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8107 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8108 if (unsigned_overflow (value, scale + 16))
8109 as_bad_where (fixP->fx_file, fixP->fx_line,
8110 _("unsigned value out of range"));
8111 break;
8112 case BFD_RELOC_AARCH64_MOVW_G0_S:
8113 case BFD_RELOC_AARCH64_MOVW_G1_S:
8114 case BFD_RELOC_AARCH64_MOVW_G2_S:
8115 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8116 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8117 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8118 /* NOTE: We can only come here with movz or movn. */
8119 if (signed_overflow (value, scale + 16))
8120 as_bad_where (fixP->fx_file, fixP->fx_line,
8121 _("signed value out of range"));
8122 if (value < 0)
8123 {
8124 /* Force use of MOVN. */
8125 value = ~value;
8126 insn = reencode_movzn_to_movn (insn);
8127 }
8128 else
8129 {
8130 /* Force use of MOVZ. */
8131 insn = reencode_movzn_to_movz (insn);
8132 }
8133 break;
8134 default:
8135 /* Unchecked relocations. */
8136 break;
8137 }
8138 value >>= scale;
8139 }
8140
8141 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8142 insn |= encode_movw_imm (value & 0xffff);
8143
8144 put_aarch64_insn (buf, insn);
8145 }
8146 break;
8147
8148 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8149 fixP->fx_r_type = (ilp32_p
8150 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8151 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8152 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8153 /* Should always be exported to object file, see
8154 aarch64_force_relocation(). */
8155 gas_assert (!fixP->fx_done);
8156 gas_assert (seg->use_rela_p);
8157 break;
8158
8159 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8160 fixP->fx_r_type = (ilp32_p
8161 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8162 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8163 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8164 /* Should always be exported to object file, see
8165 aarch64_force_relocation(). */
8166 gas_assert (!fixP->fx_done);
8167 gas_assert (seg->use_rela_p);
8168 break;
8169
8170 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8171 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8172 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8173 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8174 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8175 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8176 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8177 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8178 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8179 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8180 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8181 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8182 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8183 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8184 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8185 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8186 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8187 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8188 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8189 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8190 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8191 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8192 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8193 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8194 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8195 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8196 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8197 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8198 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8199 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8200 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8201 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8202 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8203 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8204 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8205 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8206 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8207 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8208 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8209 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8210 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8211 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8212 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8213 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8214 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8215 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8216 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8217 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8218 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8219 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8220 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8221 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8222 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8223 /* Should always be exported to object file, see
8224 aarch64_force_relocation(). */
8225 gas_assert (!fixP->fx_done);
8226 gas_assert (seg->use_rela_p);
8227 break;
8228
8229 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8230 /* Should always be exported to object file, see
8231 aarch64_force_relocation(). */
8232 fixP->fx_r_type = (ilp32_p
8233 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8234 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8235 gas_assert (!fixP->fx_done);
8236 gas_assert (seg->use_rela_p);
8237 break;
8238
8239 case BFD_RELOC_AARCH64_ADD_LO12:
8240 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8241 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8242 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8243 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8244 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8245 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8246 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8247 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8248 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8249 case BFD_RELOC_AARCH64_LDST128_LO12:
8250 case BFD_RELOC_AARCH64_LDST16_LO12:
8251 case BFD_RELOC_AARCH64_LDST32_LO12:
8252 case BFD_RELOC_AARCH64_LDST64_LO12:
8253 case BFD_RELOC_AARCH64_LDST8_LO12:
8254 /* Should always be exported to object file, see
8255 aarch64_force_relocation(). */
8256 gas_assert (!fixP->fx_done);
8257 gas_assert (seg->use_rela_p);
8258 break;
8259
8260 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8261 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8262 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8263 break;
8264
8265 case BFD_RELOC_UNUSED:
8266 /* An error will already have been reported. */
8267 break;
8268
8269 default:
8270 as_bad_where (fixP->fx_file, fixP->fx_line,
8271 _("unexpected %s fixup"),
8272 bfd_get_reloc_code_name (fixP->fx_r_type));
8273 break;
8274 }
8275
8276 apply_fix_return:
8277 /* Free the allocated the struct aarch64_inst.
8278 N.B. currently there are very limited number of fix-up types actually use
8279 this field, so the impact on the performance should be minimal . */
8280 free (fixP->tc_fix_data.inst);
8281
8282 return;
8283 }
8284
8285 /* Translate internal representation of relocation info to BFD target
8286 format. */
8287
8288 arelent *
8289 tc_gen_reloc (asection * section, fixS * fixp)
8290 {
8291 arelent *reloc;
8292 bfd_reloc_code_real_type code;
8293
8294 reloc = XNEW (arelent);
8295
8296 reloc->sym_ptr_ptr = XNEW (asymbol *);
8297 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8298 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8299
8300 if (fixp->fx_pcrel)
8301 {
8302 if (section->use_rela_p)
8303 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8304 else
8305 fixp->fx_offset = reloc->address;
8306 }
8307 reloc->addend = fixp->fx_offset;
8308
8309 code = fixp->fx_r_type;
8310 switch (code)
8311 {
8312 case BFD_RELOC_16:
8313 if (fixp->fx_pcrel)
8314 code = BFD_RELOC_16_PCREL;
8315 break;
8316
8317 case BFD_RELOC_32:
8318 if (fixp->fx_pcrel)
8319 code = BFD_RELOC_32_PCREL;
8320 break;
8321
8322 case BFD_RELOC_64:
8323 if (fixp->fx_pcrel)
8324 code = BFD_RELOC_64_PCREL;
8325 break;
8326
8327 default:
8328 break;
8329 }
8330
8331 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8332 if (reloc->howto == NULL)
8333 {
8334 as_bad_where (fixp->fx_file, fixp->fx_line,
8335 _
8336 ("cannot represent %s relocation in this object file format"),
8337 bfd_get_reloc_code_name (code));
8338 return NULL;
8339 }
8340
8341 return reloc;
8342 }
8343
8344 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8345
8346 void
8347 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8348 {
8349 bfd_reloc_code_real_type type;
8350 int pcrel = 0;
8351
8352 /* Pick a reloc.
8353 FIXME: @@ Should look at CPU word size. */
8354 switch (size)
8355 {
8356 case 1:
8357 type = BFD_RELOC_8;
8358 break;
8359 case 2:
8360 type = BFD_RELOC_16;
8361 break;
8362 case 4:
8363 type = BFD_RELOC_32;
8364 break;
8365 case 8:
8366 type = BFD_RELOC_64;
8367 break;
8368 default:
8369 as_bad (_("cannot do %u-byte relocation"), size);
8370 type = BFD_RELOC_UNUSED;
8371 break;
8372 }
8373
8374 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8375 }
8376
8377 int
8378 aarch64_force_relocation (struct fix *fixp)
8379 {
8380 switch (fixp->fx_r_type)
8381 {
8382 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8383 /* Perform these "immediate" internal relocations
8384 even if the symbol is extern or weak. */
8385 return 0;
8386
8387 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8388 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8389 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8390 /* Pseudo relocs that need to be fixed up according to
8391 ilp32_p. */
8392 return 0;
8393
8394 case BFD_RELOC_AARCH64_ADD_LO12:
8395 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8396 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8397 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8398 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8399 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8400 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8401 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8402 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8403 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8404 case BFD_RELOC_AARCH64_LDST128_LO12:
8405 case BFD_RELOC_AARCH64_LDST16_LO12:
8406 case BFD_RELOC_AARCH64_LDST32_LO12:
8407 case BFD_RELOC_AARCH64_LDST64_LO12:
8408 case BFD_RELOC_AARCH64_LDST8_LO12:
8409 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8410 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8411 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8412 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8413 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8414 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8415 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8416 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8417 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8418 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8419 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8420 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8421 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8422 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8423 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8424 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8425 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8426 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8427 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8428 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8429 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8430 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8431 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8432 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8433 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8434 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8435 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8436 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8437 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8438 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8439 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8440 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8441 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8442 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8443 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8444 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8445 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8446 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8447 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8448 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8449 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8450 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8451 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8452 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8453 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8454 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8455 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8456 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8457 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8458 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8459 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8460 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8461 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8462 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8463 /* Always leave these relocations for the linker. */
8464 return 1;
8465
8466 default:
8467 break;
8468 }
8469
8470 return generic_force_reloc (fixp);
8471 }
8472
8473 #ifdef OBJ_ELF
8474
8475 /* Implement md_after_parse_args. This is the earliest time we need to decide
8476 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8477
8478 void
8479 aarch64_after_parse_args (void)
8480 {
8481 if (aarch64_abi != AARCH64_ABI_NONE)
8482 return;
8483
8484 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8485 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8486 aarch64_abi = AARCH64_ABI_ILP32;
8487 else
8488 aarch64_abi = AARCH64_ABI_LP64;
8489 }
8490
8491 const char *
8492 elf64_aarch64_target_format (void)
8493 {
8494 #ifdef TE_CLOUDABI
8495 /* FIXME: What to do for ilp32_p ? */
8496 if (target_big_endian)
8497 return "elf64-bigaarch64-cloudabi";
8498 else
8499 return "elf64-littleaarch64-cloudabi";
8500 #else
8501 if (target_big_endian)
8502 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8503 else
8504 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8505 #endif
8506 }
8507
8508 void
8509 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8510 {
8511 elf_frob_symbol (symp, puntp);
8512 }
8513 #endif
8514
8515 /* MD interface: Finalization. */
8516
8517 /* A good place to do this, although this was probably not intended
8518 for this kind of use. We need to dump the literal pool before
8519 references are made to a null symbol pointer. */
8520
8521 void
8522 aarch64_cleanup (void)
8523 {
8524 literal_pool *pool;
8525
8526 for (pool = list_of_pools; pool; pool = pool->next)
8527 {
8528 /* Put it at the end of the relevant section. */
8529 subseg_set (pool->section, pool->sub_section);
8530 s_ltorg (0);
8531 }
8532 }
8533
8534 #ifdef OBJ_ELF
8535 /* Remove any excess mapping symbols generated for alignment frags in
8536 SEC. We may have created a mapping symbol before a zero byte
8537 alignment; remove it if there's a mapping symbol after the
8538 alignment. */
8539 static void
8540 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8541 void *dummy ATTRIBUTE_UNUSED)
8542 {
8543 segment_info_type *seginfo = seg_info (sec);
8544 fragS *fragp;
8545
8546 if (seginfo == NULL || seginfo->frchainP == NULL)
8547 return;
8548
8549 for (fragp = seginfo->frchainP->frch_root;
8550 fragp != NULL; fragp = fragp->fr_next)
8551 {
8552 symbolS *sym = fragp->tc_frag_data.last_map;
8553 fragS *next = fragp->fr_next;
8554
8555 /* Variable-sized frags have been converted to fixed size by
8556 this point. But if this was variable-sized to start with,
8557 there will be a fixed-size frag after it. So don't handle
8558 next == NULL. */
8559 if (sym == NULL || next == NULL)
8560 continue;
8561
8562 if (S_GET_VALUE (sym) < next->fr_address)
8563 /* Not at the end of this frag. */
8564 continue;
8565 know (S_GET_VALUE (sym) == next->fr_address);
8566
8567 do
8568 {
8569 if (next->tc_frag_data.first_map != NULL)
8570 {
8571 /* Next frag starts with a mapping symbol. Discard this
8572 one. */
8573 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8574 break;
8575 }
8576
8577 if (next->fr_next == NULL)
8578 {
8579 /* This mapping symbol is at the end of the section. Discard
8580 it. */
8581 know (next->fr_fix == 0 && next->fr_var == 0);
8582 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8583 break;
8584 }
8585
8586 /* As long as we have empty frags without any mapping symbols,
8587 keep looking. */
8588 /* If the next frag is non-empty and does not start with a
8589 mapping symbol, then this mapping symbol is required. */
8590 if (next->fr_address != next->fr_next->fr_address)
8591 break;
8592
8593 next = next->fr_next;
8594 }
8595 while (next != NULL);
8596 }
8597 }
8598 #endif
8599
8600 /* Adjust the symbol table. */
8601
8602 void
8603 aarch64_adjust_symtab (void)
8604 {
8605 #ifdef OBJ_ELF
8606 /* Remove any overlapping mapping symbols generated by alignment frags. */
8607 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8608 /* Now do generic ELF adjustments. */
8609 elf_adjust_symtab ();
8610 #endif
8611 }
8612
8613 static void
8614 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8615 {
8616 const char *hash_err;
8617
8618 hash_err = hash_insert (table, key, value);
8619 if (hash_err)
8620 printf ("Internal Error: Can't hash %s\n", key);
8621 }
8622
8623 static void
8624 fill_instruction_hash_table (void)
8625 {
8626 aarch64_opcode *opcode = aarch64_opcode_table;
8627
8628 while (opcode->name != NULL)
8629 {
8630 templates *templ, *new_templ;
8631 templ = hash_find (aarch64_ops_hsh, opcode->name);
8632
8633 new_templ = XNEW (templates);
8634 new_templ->opcode = opcode;
8635 new_templ->next = NULL;
8636
8637 if (!templ)
8638 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8639 else
8640 {
8641 new_templ->next = templ->next;
8642 templ->next = new_templ;
8643 }
8644 ++opcode;
8645 }
8646 }
8647
8648 static inline void
8649 convert_to_upper (char *dst, const char *src, size_t num)
8650 {
8651 unsigned int i;
8652 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8653 *dst = TOUPPER (*src);
8654 *dst = '\0';
8655 }
8656
8657 /* Assume STR point to a lower-case string, allocate, convert and return
8658 the corresponding upper-case string. */
8659 static inline const char*
8660 get_upper_str (const char *str)
8661 {
8662 char *ret;
8663 size_t len = strlen (str);
8664 ret = XNEWVEC (char, len + 1);
8665 convert_to_upper (ret, str, len);
8666 return ret;
8667 }
8668
8669 /* MD interface: Initialization. */
8670
8671 void
8672 md_begin (void)
8673 {
8674 unsigned mach;
8675 unsigned int i;
8676
8677 if ((aarch64_ops_hsh = hash_new ()) == NULL
8678 || (aarch64_cond_hsh = hash_new ()) == NULL
8679 || (aarch64_shift_hsh = hash_new ()) == NULL
8680 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8681 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8682 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8683 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8684 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8685 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8686 || (aarch64_sys_regs_sr_hsh = hash_new ()) == NULL
8687 || (aarch64_reg_hsh = hash_new ()) == NULL
8688 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8689 || (aarch64_nzcv_hsh = hash_new ()) == NULL
8690 || (aarch64_pldop_hsh = hash_new ()) == NULL
8691 || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8692 as_fatal (_("virtual memory exhausted"));
8693
8694 fill_instruction_hash_table ();
8695
8696 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8697 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8698 (void *) (aarch64_sys_regs + i));
8699
8700 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8701 checked_hash_insert (aarch64_pstatefield_hsh,
8702 aarch64_pstatefields[i].name,
8703 (void *) (aarch64_pstatefields + i));
8704
8705 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8706 checked_hash_insert (aarch64_sys_regs_ic_hsh,
8707 aarch64_sys_regs_ic[i].name,
8708 (void *) (aarch64_sys_regs_ic + i));
8709
8710 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8711 checked_hash_insert (aarch64_sys_regs_dc_hsh,
8712 aarch64_sys_regs_dc[i].name,
8713 (void *) (aarch64_sys_regs_dc + i));
8714
8715 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8716 checked_hash_insert (aarch64_sys_regs_at_hsh,
8717 aarch64_sys_regs_at[i].name,
8718 (void *) (aarch64_sys_regs_at + i));
8719
8720 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8721 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8722 aarch64_sys_regs_tlbi[i].name,
8723 (void *) (aarch64_sys_regs_tlbi + i));
8724
8725 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8726 checked_hash_insert (aarch64_sys_regs_sr_hsh,
8727 aarch64_sys_regs_sr[i].name,
8728 (void *) (aarch64_sys_regs_sr + i));
8729
8730 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8731 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8732 (void *) (reg_names + i));
8733
8734 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8735 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8736 (void *) (nzcv_names + i));
8737
8738 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8739 {
8740 const char *name = aarch64_operand_modifiers[i].name;
8741 checked_hash_insert (aarch64_shift_hsh, name,
8742 (void *) (aarch64_operand_modifiers + i));
8743 /* Also hash the name in the upper case. */
8744 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8745 (void *) (aarch64_operand_modifiers + i));
8746 }
8747
8748 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8749 {
8750 unsigned int j;
8751 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8752 the same condition code. */
8753 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8754 {
8755 const char *name = aarch64_conds[i].names[j];
8756 if (name == NULL)
8757 break;
8758 checked_hash_insert (aarch64_cond_hsh, name,
8759 (void *) (aarch64_conds + i));
8760 /* Also hash the name in the upper case. */
8761 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8762 (void *) (aarch64_conds + i));
8763 }
8764 }
8765
8766 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8767 {
8768 const char *name = aarch64_barrier_options[i].name;
8769 /* Skip xx00 - the unallocated values of option. */
8770 if ((i & 0x3) == 0)
8771 continue;
8772 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8773 (void *) (aarch64_barrier_options + i));
8774 /* Also hash the name in the upper case. */
8775 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8776 (void *) (aarch64_barrier_options + i));
8777 }
8778
8779 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8780 {
8781 const char* name = aarch64_prfops[i].name;
8782 /* Skip the unallocated hint encodings. */
8783 if (name == NULL)
8784 continue;
8785 checked_hash_insert (aarch64_pldop_hsh, name,
8786 (void *) (aarch64_prfops + i));
8787 /* Also hash the name in the upper case. */
8788 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8789 (void *) (aarch64_prfops + i));
8790 }
8791
8792 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8793 {
8794 const char* name = aarch64_hint_options[i].name;
8795 const char* upper_name = get_upper_str(name);
8796
8797 checked_hash_insert (aarch64_hint_opt_hsh, name,
8798 (void *) (aarch64_hint_options + i));
8799
8800 /* Also hash the name in the upper case if not the same. */
8801 if (strcmp (name, upper_name) != 0)
8802 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
8803 (void *) (aarch64_hint_options + i));
8804 }
8805
8806 /* Set the cpu variant based on the command-line options. */
8807 if (!mcpu_cpu_opt)
8808 mcpu_cpu_opt = march_cpu_opt;
8809
8810 if (!mcpu_cpu_opt)
8811 mcpu_cpu_opt = &cpu_default;
8812
8813 cpu_variant = *mcpu_cpu_opt;
8814
8815 /* Record the CPU type. */
8816 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8817
8818 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8819 }
8820
8821 /* Command line processing. */
8822
8823 const char *md_shortopts = "m:";
8824
8825 #ifdef AARCH64_BI_ENDIAN
8826 #define OPTION_EB (OPTION_MD_BASE + 0)
8827 #define OPTION_EL (OPTION_MD_BASE + 1)
8828 #else
8829 #if TARGET_BYTES_BIG_ENDIAN
8830 #define OPTION_EB (OPTION_MD_BASE + 0)
8831 #else
8832 #define OPTION_EL (OPTION_MD_BASE + 1)
8833 #endif
8834 #endif
8835
8836 struct option md_longopts[] = {
8837 #ifdef OPTION_EB
8838 {"EB", no_argument, NULL, OPTION_EB},
8839 #endif
8840 #ifdef OPTION_EL
8841 {"EL", no_argument, NULL, OPTION_EL},
8842 #endif
8843 {NULL, no_argument, NULL, 0}
8844 };
8845
8846 size_t md_longopts_size = sizeof (md_longopts);
8847
8848 struct aarch64_option_table
8849 {
8850 const char *option; /* Option name to match. */
8851 const char *help; /* Help information. */
8852 int *var; /* Variable to change. */
8853 int value; /* What to change it to. */
8854 char *deprecated; /* If non-null, print this message. */
8855 };
8856
8857 static struct aarch64_option_table aarch64_opts[] = {
8858 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8859 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8860 NULL},
8861 #ifdef DEBUG_AARCH64
8862 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8863 #endif /* DEBUG_AARCH64 */
8864 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8865 NULL},
8866 {"mno-verbose-error", N_("do not output verbose error messages"),
8867 &verbose_error_p, 0, NULL},
8868 {NULL, NULL, NULL, 0, NULL}
8869 };
8870
8871 struct aarch64_cpu_option_table
8872 {
8873 const char *name;
8874 const aarch64_feature_set value;
8875 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8876 case. */
8877 const char *canonical_name;
8878 };
8879
8880 /* This list should, at a minimum, contain all the cpu names
8881 recognized by GCC. */
8882 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8883 {"all", AARCH64_ANY, NULL},
8884 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
8885 AARCH64_FEATURE_CRC), "Cortex-A34"},
8886 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8887 AARCH64_FEATURE_CRC), "Cortex-A35"},
8888 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8889 AARCH64_FEATURE_CRC), "Cortex-A53"},
8890 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8891 AARCH64_FEATURE_CRC), "Cortex-A57"},
8892 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8893 AARCH64_FEATURE_CRC), "Cortex-A72"},
8894 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8895 AARCH64_FEATURE_CRC), "Cortex-A73"},
8896 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8897 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8898 "Cortex-A55"},
8899 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8900 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8901 "Cortex-A75"},
8902 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8903 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8904 "Cortex-A76"},
8905 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8906 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8907 | AARCH64_FEATURE_DOTPROD
8908 | AARCH64_FEATURE_SSBS),
8909 "Cortex-A76AE"},
8910 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8911 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8912 | AARCH64_FEATURE_DOTPROD
8913 | AARCH64_FEATURE_SSBS),
8914 "Cortex-A77"},
8915 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8916 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8917 | AARCH64_FEATURE_DOTPROD
8918 | AARCH64_FEATURE_SSBS),
8919 "Cortex-A65"},
8920 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8921 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8922 | AARCH64_FEATURE_DOTPROD
8923 | AARCH64_FEATURE_SSBS),
8924 "Cortex-A65AE"},
8925 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8926 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8927 | AARCH64_FEATURE_DOTPROD
8928 | AARCH64_FEATURE_PROFILE),
8929 "Ares"},
8930 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8931 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8932 "Samsung Exynos M1"},
8933 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8934 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8935 | AARCH64_FEATURE_RDMA),
8936 "Qualcomm Falkor"},
8937 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8938 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8939 | AARCH64_FEATURE_DOTPROD
8940 | AARCH64_FEATURE_SSBS),
8941 "Neoverse E1"},
8942 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8943 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8944 | AARCH64_FEATURE_DOTPROD
8945 | AARCH64_FEATURE_PROFILE),
8946 "Neoverse N1"},
8947 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8948 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8949 | AARCH64_FEATURE_RDMA),
8950 "Qualcomm QDF24XX"},
8951 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8952 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8953 "Qualcomm Saphira"},
8954 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8955 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8956 "Cavium ThunderX"},
8957 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8958 AARCH64_FEATURE_CRYPTO),
8959 "Broadcom Vulcan"},
8960 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8961 in earlier releases and is superseded by 'xgene1' in all
8962 tools. */
8963 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8964 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8965 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8966 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8967 {"generic", AARCH64_ARCH_V8, NULL},
8968
8969 {NULL, AARCH64_ARCH_NONE, NULL}
8970 };
8971
8972 struct aarch64_arch_option_table
8973 {
8974 const char *name;
8975 const aarch64_feature_set value;
8976 };
8977
8978 /* This list should, at a minimum, contain all the architecture names
8979 recognized by GCC. */
8980 static const struct aarch64_arch_option_table aarch64_archs[] = {
8981 {"all", AARCH64_ANY},
8982 {"armv8-a", AARCH64_ARCH_V8},
8983 {"armv8.1-a", AARCH64_ARCH_V8_1},
8984 {"armv8.2-a", AARCH64_ARCH_V8_2},
8985 {"armv8.3-a", AARCH64_ARCH_V8_3},
8986 {"armv8.4-a", AARCH64_ARCH_V8_4},
8987 {"armv8.5-a", AARCH64_ARCH_V8_5},
8988 {"armv8.6-a", AARCH64_ARCH_V8_6},
8989 {NULL, AARCH64_ARCH_NONE}
8990 };
8991
8992 /* ISA extensions. */
8993 struct aarch64_option_cpu_value_table
8994 {
8995 const char *name;
8996 const aarch64_feature_set value;
8997 const aarch64_feature_set require; /* Feature dependencies. */
8998 };
8999
9000 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9001 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9002 AARCH64_ARCH_NONE},
9003 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9004 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9005 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9006 AARCH64_ARCH_NONE},
9007 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9008 AARCH64_ARCH_NONE},
9009 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9010 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9011 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9012 AARCH64_ARCH_NONE},
9013 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9014 AARCH64_ARCH_NONE},
9015 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9016 AARCH64_ARCH_NONE},
9017 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9018 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9019 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9020 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9021 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9022 AARCH64_FEATURE (AARCH64_FEATURE_FP
9023 | AARCH64_FEATURE_F16, 0)},
9024 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9025 AARCH64_ARCH_NONE},
9026 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9027 AARCH64_FEATURE (AARCH64_FEATURE_F16
9028 | AARCH64_FEATURE_SIMD
9029 | AARCH64_FEATURE_COMPNUM, 0)},
9030 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9031 AARCH64_ARCH_NONE},
9032 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9033 AARCH64_FEATURE (AARCH64_FEATURE_F16
9034 | AARCH64_FEATURE_SIMD, 0)},
9035 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9036 AARCH64_ARCH_NONE},
9037 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9038 AARCH64_ARCH_NONE},
9039 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9040 AARCH64_ARCH_NONE},
9041 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9042 AARCH64_ARCH_NONE},
9043 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9044 AARCH64_ARCH_NONE},
9045 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9046 AARCH64_ARCH_NONE},
9047 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9048 AARCH64_ARCH_NONE},
9049 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9050 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9051 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9052 AARCH64_ARCH_NONE},
9053 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9054 AARCH64_ARCH_NONE},
9055 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9056 AARCH64_ARCH_NONE},
9057 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9058 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9059 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9060 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9061 | AARCH64_FEATURE_SM4, 0)},
9062 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9063 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9064 | AARCH64_FEATURE_AES, 0)},
9065 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9066 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9067 | AARCH64_FEATURE_SHA3, 0)},
9068 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9069 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9070 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9071 AARCH64_ARCH_NONE},
9072 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9073 AARCH64_ARCH_NONE},
9074 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9075 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9076 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9077 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9078 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9079 };
9080
9081 struct aarch64_long_option_table
9082 {
9083 const char *option; /* Substring to match. */
9084 const char *help; /* Help information. */
9085 int (*func) (const char *subopt); /* Function to decode sub-option. */
9086 char *deprecated; /* If non-null, print this message. */
9087 };
9088
9089 /* Transitive closure of features depending on set. */
9090 static aarch64_feature_set
9091 aarch64_feature_disable_set (aarch64_feature_set set)
9092 {
9093 const struct aarch64_option_cpu_value_table *opt;
9094 aarch64_feature_set prev = 0;
9095
9096 while (prev != set) {
9097 prev = set;
9098 for (opt = aarch64_features; opt->name != NULL; opt++)
9099 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9100 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9101 }
9102 return set;
9103 }
9104
9105 /* Transitive closure of dependencies of set. */
9106 static aarch64_feature_set
9107 aarch64_feature_enable_set (aarch64_feature_set set)
9108 {
9109 const struct aarch64_option_cpu_value_table *opt;
9110 aarch64_feature_set prev = 0;
9111
9112 while (prev != set) {
9113 prev = set;
9114 for (opt = aarch64_features; opt->name != NULL; opt++)
9115 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9116 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9117 }
9118 return set;
9119 }
9120
9121 static int
9122 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9123 bfd_boolean ext_only)
9124 {
9125 /* We insist on extensions being added before being removed. We achieve
9126 this by using the ADDING_VALUE variable to indicate whether we are
9127 adding an extension (1) or removing it (0) and only allowing it to
9128 change in the order -1 -> 1 -> 0. */
9129 int adding_value = -1;
9130 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9131
9132 /* Copy the feature set, so that we can modify it. */
9133 *ext_set = **opt_p;
9134 *opt_p = ext_set;
9135
9136 while (str != NULL && *str != 0)
9137 {
9138 const struct aarch64_option_cpu_value_table *opt;
9139 const char *ext = NULL;
9140 int optlen;
9141
9142 if (!ext_only)
9143 {
9144 if (*str != '+')
9145 {
9146 as_bad (_("invalid architectural extension"));
9147 return 0;
9148 }
9149
9150 ext = strchr (++str, '+');
9151 }
9152
9153 if (ext != NULL)
9154 optlen = ext - str;
9155 else
9156 optlen = strlen (str);
9157
9158 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
9159 {
9160 if (adding_value != 0)
9161 adding_value = 0;
9162 optlen -= 2;
9163 str += 2;
9164 }
9165 else if (optlen > 0)
9166 {
9167 if (adding_value == -1)
9168 adding_value = 1;
9169 else if (adding_value != 1)
9170 {
9171 as_bad (_("must specify extensions to add before specifying "
9172 "those to remove"));
9173 return FALSE;
9174 }
9175 }
9176
9177 if (optlen == 0)
9178 {
9179 as_bad (_("missing architectural extension"));
9180 return 0;
9181 }
9182
9183 gas_assert (adding_value != -1);
9184
9185 for (opt = aarch64_features; opt->name != NULL; opt++)
9186 if (strncmp (opt->name, str, optlen) == 0)
9187 {
9188 aarch64_feature_set set;
9189
9190 /* Add or remove the extension. */
9191 if (adding_value)
9192 {
9193 set = aarch64_feature_enable_set (opt->value);
9194 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9195 }
9196 else
9197 {
9198 set = aarch64_feature_disable_set (opt->value);
9199 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9200 }
9201 break;
9202 }
9203
9204 if (opt->name == NULL)
9205 {
9206 as_bad (_("unknown architectural extension `%s'"), str);
9207 return 0;
9208 }
9209
9210 str = ext;
9211 };
9212
9213 return 1;
9214 }
9215
9216 static int
9217 aarch64_parse_cpu (const char *str)
9218 {
9219 const struct aarch64_cpu_option_table *opt;
9220 const char *ext = strchr (str, '+');
9221 size_t optlen;
9222
9223 if (ext != NULL)
9224 optlen = ext - str;
9225 else
9226 optlen = strlen (str);
9227
9228 if (optlen == 0)
9229 {
9230 as_bad (_("missing cpu name `%s'"), str);
9231 return 0;
9232 }
9233
9234 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9235 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9236 {
9237 mcpu_cpu_opt = &opt->value;
9238 if (ext != NULL)
9239 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
9240
9241 return 1;
9242 }
9243
9244 as_bad (_("unknown cpu `%s'"), str);
9245 return 0;
9246 }
9247
9248 static int
9249 aarch64_parse_arch (const char *str)
9250 {
9251 const struct aarch64_arch_option_table *opt;
9252 const char *ext = strchr (str, '+');
9253 size_t optlen;
9254
9255 if (ext != NULL)
9256 optlen = ext - str;
9257 else
9258 optlen = strlen (str);
9259
9260 if (optlen == 0)
9261 {
9262 as_bad (_("missing architecture name `%s'"), str);
9263 return 0;
9264 }
9265
9266 for (opt = aarch64_archs; opt->name != NULL; opt++)
9267 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9268 {
9269 march_cpu_opt = &opt->value;
9270 if (ext != NULL)
9271 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
9272
9273 return 1;
9274 }
9275
9276 as_bad (_("unknown architecture `%s'\n"), str);
9277 return 0;
9278 }
9279
9280 /* ABIs. */
9281 struct aarch64_option_abi_value_table
9282 {
9283 const char *name;
9284 enum aarch64_abi_type value;
9285 };
9286
9287 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9288 {"ilp32", AARCH64_ABI_ILP32},
9289 {"lp64", AARCH64_ABI_LP64},
9290 };
9291
9292 static int
9293 aarch64_parse_abi (const char *str)
9294 {
9295 unsigned int i;
9296
9297 if (str[0] == '\0')
9298 {
9299 as_bad (_("missing abi name `%s'"), str);
9300 return 0;
9301 }
9302
9303 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9304 if (strcmp (str, aarch64_abis[i].name) == 0)
9305 {
9306 aarch64_abi = aarch64_abis[i].value;
9307 return 1;
9308 }
9309
9310 as_bad (_("unknown abi `%s'\n"), str);
9311 return 0;
9312 }
9313
9314 static struct aarch64_long_option_table aarch64_long_opts[] = {
9315 #ifdef OBJ_ELF
9316 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9317 aarch64_parse_abi, NULL},
9318 #endif /* OBJ_ELF */
9319 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9320 aarch64_parse_cpu, NULL},
9321 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9322 aarch64_parse_arch, NULL},
9323 {NULL, NULL, 0, NULL}
9324 };
9325
9326 int
9327 md_parse_option (int c, const char *arg)
9328 {
9329 struct aarch64_option_table *opt;
9330 struct aarch64_long_option_table *lopt;
9331
9332 switch (c)
9333 {
9334 #ifdef OPTION_EB
9335 case OPTION_EB:
9336 target_big_endian = 1;
9337 break;
9338 #endif
9339
9340 #ifdef OPTION_EL
9341 case OPTION_EL:
9342 target_big_endian = 0;
9343 break;
9344 #endif
9345
9346 case 'a':
9347 /* Listing option. Just ignore these, we don't support additional
9348 ones. */
9349 return 0;
9350
9351 default:
9352 for (opt = aarch64_opts; opt->option != NULL; opt++)
9353 {
9354 if (c == opt->option[0]
9355 && ((arg == NULL && opt->option[1] == 0)
9356 || streq (arg, opt->option + 1)))
9357 {
9358 /* If the option is deprecated, tell the user. */
9359 if (opt->deprecated != NULL)
9360 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9361 arg ? arg : "", _(opt->deprecated));
9362
9363 if (opt->var != NULL)
9364 *opt->var = opt->value;
9365
9366 return 1;
9367 }
9368 }
9369
9370 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9371 {
9372 /* These options are expected to have an argument. */
9373 if (c == lopt->option[0]
9374 && arg != NULL
9375 && strncmp (arg, lopt->option + 1,
9376 strlen (lopt->option + 1)) == 0)
9377 {
9378 /* If the option is deprecated, tell the user. */
9379 if (lopt->deprecated != NULL)
9380 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9381 _(lopt->deprecated));
9382
9383 /* Call the sup-option parser. */
9384 return lopt->func (arg + strlen (lopt->option) - 1);
9385 }
9386 }
9387
9388 return 0;
9389 }
9390
9391 return 1;
9392 }
9393
9394 void
9395 md_show_usage (FILE * fp)
9396 {
9397 struct aarch64_option_table *opt;
9398 struct aarch64_long_option_table *lopt;
9399
9400 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9401
9402 for (opt = aarch64_opts; opt->option != NULL; opt++)
9403 if (opt->help != NULL)
9404 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9405
9406 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9407 if (lopt->help != NULL)
9408 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9409
9410 #ifdef OPTION_EB
9411 fprintf (fp, _("\
9412 -EB assemble code for a big-endian cpu\n"));
9413 #endif
9414
9415 #ifdef OPTION_EL
9416 fprintf (fp, _("\
9417 -EL assemble code for a little-endian cpu\n"));
9418 #endif
9419 }
9420
9421 /* Parse a .cpu directive. */
9422
9423 static void
9424 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9425 {
9426 const struct aarch64_cpu_option_table *opt;
9427 char saved_char;
9428 char *name;
9429 char *ext;
9430 size_t optlen;
9431
9432 name = input_line_pointer;
9433 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9434 input_line_pointer++;
9435 saved_char = *input_line_pointer;
9436 *input_line_pointer = 0;
9437
9438 ext = strchr (name, '+');
9439
9440 if (ext != NULL)
9441 optlen = ext - name;
9442 else
9443 optlen = strlen (name);
9444
9445 /* Skip the first "all" entry. */
9446 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9447 if (strlen (opt->name) == optlen
9448 && strncmp (name, opt->name, optlen) == 0)
9449 {
9450 mcpu_cpu_opt = &opt->value;
9451 if (ext != NULL)
9452 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9453 return;
9454
9455 cpu_variant = *mcpu_cpu_opt;
9456
9457 *input_line_pointer = saved_char;
9458 demand_empty_rest_of_line ();
9459 return;
9460 }
9461 as_bad (_("unknown cpu `%s'"), name);
9462 *input_line_pointer = saved_char;
9463 ignore_rest_of_line ();
9464 }
9465
9466
9467 /* Parse a .arch directive. */
9468
9469 static void
9470 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9471 {
9472 const struct aarch64_arch_option_table *opt;
9473 char saved_char;
9474 char *name;
9475 char *ext;
9476 size_t optlen;
9477
9478 name = input_line_pointer;
9479 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9480 input_line_pointer++;
9481 saved_char = *input_line_pointer;
9482 *input_line_pointer = 0;
9483
9484 ext = strchr (name, '+');
9485
9486 if (ext != NULL)
9487 optlen = ext - name;
9488 else
9489 optlen = strlen (name);
9490
9491 /* Skip the first "all" entry. */
9492 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9493 if (strlen (opt->name) == optlen
9494 && strncmp (name, opt->name, optlen) == 0)
9495 {
9496 mcpu_cpu_opt = &opt->value;
9497 if (ext != NULL)
9498 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9499 return;
9500
9501 cpu_variant = *mcpu_cpu_opt;
9502
9503 *input_line_pointer = saved_char;
9504 demand_empty_rest_of_line ();
9505 return;
9506 }
9507
9508 as_bad (_("unknown architecture `%s'\n"), name);
9509 *input_line_pointer = saved_char;
9510 ignore_rest_of_line ();
9511 }
9512
9513 /* Parse a .arch_extension directive. */
9514
9515 static void
9516 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9517 {
9518 char saved_char;
9519 char *ext = input_line_pointer;;
9520
9521 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9522 input_line_pointer++;
9523 saved_char = *input_line_pointer;
9524 *input_line_pointer = 0;
9525
9526 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9527 return;
9528
9529 cpu_variant = *mcpu_cpu_opt;
9530
9531 *input_line_pointer = saved_char;
9532 demand_empty_rest_of_line ();
9533 }
9534
9535 /* Copy symbol information. */
9536
9537 void
9538 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9539 {
9540 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9541 }
9542
9543 #ifdef OBJ_ELF
9544 /* Same as elf_copy_symbol_attributes, but without copying st_other.
9545 This is needed so AArch64 specific st_other values can be independently
9546 specified for an IFUNC resolver (that is called by the dynamic linker)
9547 and the symbol it resolves (aliased to the resolver). In particular,
9548 if a function symbol has special st_other value set via directives,
9549 then attaching an IFUNC resolver to that symbol should not override
9550 the st_other setting. Requiring the directive on the IFUNC resolver
9551 symbol would be unexpected and problematic in C code, where the two
9552 symbols appear as two independent function declarations. */
9553
9554 void
9555 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
9556 {
9557 struct elf_obj_sy *srcelf = symbol_get_obj (src);
9558 struct elf_obj_sy *destelf = symbol_get_obj (dest);
9559 if (srcelf->size)
9560 {
9561 if (destelf->size == NULL)
9562 destelf->size = XNEW (expressionS);
9563 *destelf->size = *srcelf->size;
9564 }
9565 else
9566 {
9567 free (destelf->size);
9568 destelf->size = NULL;
9569 }
9570 S_SET_SIZE (dest, S_GET_SIZE (src));
9571 }
9572 #endif