]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
Remove "memory exhausted" messages
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2020 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bfd_boolean
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Return value for certain parsers when the parsing fails; those parsers
242 return the information of the parsed result, e.g. register number, on
243 success. */
244 #define PARSE_FAIL -1
245
246 /* This is an invalid condition code that means no conditional field is
247 present. */
248 #define COND_ALWAYS 0x10
249
250 typedef struct
251 {
252 const char *template;
253 unsigned long value;
254 } asm_barrier_opt;
255
256 typedef struct
257 {
258 const char *template;
259 uint32_t value;
260 } asm_nzcv;
261
262 struct reloc_entry
263 {
264 char *name;
265 bfd_reloc_code_real_type reloc;
266 };
267
268 /* Macros to define the register types and masks for the purpose
269 of parsing. */
270
271 #undef AARCH64_REG_TYPES
272 #define AARCH64_REG_TYPES \
273 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
274 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
275 BASIC_REG_TYPE(SP_32) /* wsp */ \
276 BASIC_REG_TYPE(SP_64) /* sp */ \
277 BASIC_REG_TYPE(Z_32) /* wzr */ \
278 BASIC_REG_TYPE(Z_64) /* xzr */ \
279 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
280 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
281 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
282 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
283 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
284 BASIC_REG_TYPE(VN) /* v[0-31] */ \
285 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
286 BASIC_REG_TYPE(PN) /* p[0-15] */ \
287 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
288 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
289 /* Typecheck: same, plus SVE registers. */ \
290 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
291 | REG_TYPE(ZN)) \
292 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
293 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Typecheck: same, plus SVE registers. */ \
296 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
298 | REG_TYPE(ZN)) \
299 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
300 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
301 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
302 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
303 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
306 /* Typecheck: any [BHSDQ]P FP. */ \
307 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
308 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
309 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
310 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
311 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
312 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
313 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
314 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
315 be used for SVE instructions, since Zn and Pn are valid symbols \
316 in other contexts. */ \
317 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
318 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
319 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
320 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
321 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
322 | REG_TYPE(ZN) | REG_TYPE(PN)) \
323 /* Any integer register; used for error messages only. */ \
324 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
325 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
326 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
327 /* Pseudo type to mark the end of the enumerator sequence. */ \
328 BASIC_REG_TYPE(MAX)
329
330 #undef BASIC_REG_TYPE
331 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
332 #undef MULTI_REG_TYPE
333 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
334
335 /* Register type enumerators. */
336 typedef enum aarch64_reg_type_
337 {
338 /* A list of REG_TYPE_*. */
339 AARCH64_REG_TYPES
340 } aarch64_reg_type;
341
342 #undef BASIC_REG_TYPE
343 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
344 #undef REG_TYPE
345 #define REG_TYPE(T) (1 << REG_TYPE_##T)
346 #undef MULTI_REG_TYPE
347 #define MULTI_REG_TYPE(T,V) V,
348
349 /* Structure for a hash table entry for a register. */
350 typedef struct
351 {
352 const char *name;
353 unsigned char number;
354 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
355 unsigned char builtin;
356 } reg_entry;
357
358 /* Values indexed by aarch64_reg_type to assist the type checking. */
359 static const unsigned reg_type_masks[] =
360 {
361 AARCH64_REG_TYPES
362 };
363
364 #undef BASIC_REG_TYPE
365 #undef REG_TYPE
366 #undef MULTI_REG_TYPE
367 #undef AARCH64_REG_TYPES
368
369 /* Diagnostics used when we don't get a register of the expected type.
370 Note: this has to synchronized with aarch64_reg_type definitions
371 above. */
372 static const char *
373 get_reg_expected_msg (aarch64_reg_type reg_type)
374 {
375 const char *msg;
376
377 switch (reg_type)
378 {
379 case REG_TYPE_R_32:
380 msg = N_("integer 32-bit register expected");
381 break;
382 case REG_TYPE_R_64:
383 msg = N_("integer 64-bit register expected");
384 break;
385 case REG_TYPE_R_N:
386 msg = N_("integer register expected");
387 break;
388 case REG_TYPE_R64_SP:
389 msg = N_("64-bit integer or SP register expected");
390 break;
391 case REG_TYPE_SVE_BASE:
392 msg = N_("base register expected");
393 break;
394 case REG_TYPE_R_Z:
395 msg = N_("integer or zero register expected");
396 break;
397 case REG_TYPE_SVE_OFFSET:
398 msg = N_("offset register expected");
399 break;
400 case REG_TYPE_R_SP:
401 msg = N_("integer or SP register expected");
402 break;
403 case REG_TYPE_R_Z_SP:
404 msg = N_("integer, zero or SP register expected");
405 break;
406 case REG_TYPE_FP_B:
407 msg = N_("8-bit SIMD scalar register expected");
408 break;
409 case REG_TYPE_FP_H:
410 msg = N_("16-bit SIMD scalar or floating-point half precision "
411 "register expected");
412 break;
413 case REG_TYPE_FP_S:
414 msg = N_("32-bit SIMD scalar or floating-point single precision "
415 "register expected");
416 break;
417 case REG_TYPE_FP_D:
418 msg = N_("64-bit SIMD scalar or floating-point double precision "
419 "register expected");
420 break;
421 case REG_TYPE_FP_Q:
422 msg = N_("128-bit SIMD scalar or floating-point quad precision "
423 "register expected");
424 break;
425 case REG_TYPE_R_Z_BHSDQ_V:
426 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
427 msg = N_("register expected");
428 break;
429 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
430 msg = N_("SIMD scalar or floating-point register expected");
431 break;
432 case REG_TYPE_VN: /* any V reg */
433 msg = N_("vector register expected");
434 break;
435 case REG_TYPE_ZN:
436 msg = N_("SVE vector register expected");
437 break;
438 case REG_TYPE_PN:
439 msg = N_("SVE predicate register expected");
440 break;
441 default:
442 as_fatal (_("invalid register type %d"), reg_type);
443 }
444 return msg;
445 }
446
447 /* Some well known registers that we refer to directly elsewhere. */
448 #define REG_SP 31
449 #define REG_ZR 31
450
451 /* Instructions take 4 bytes in the object file. */
452 #define INSN_SIZE 4
453
454 static htab_t aarch64_ops_hsh;
455 static htab_t aarch64_cond_hsh;
456 static htab_t aarch64_shift_hsh;
457 static htab_t aarch64_sys_regs_hsh;
458 static htab_t aarch64_pstatefield_hsh;
459 static htab_t aarch64_sys_regs_ic_hsh;
460 static htab_t aarch64_sys_regs_dc_hsh;
461 static htab_t aarch64_sys_regs_at_hsh;
462 static htab_t aarch64_sys_regs_tlbi_hsh;
463 static htab_t aarch64_sys_regs_sr_hsh;
464 static htab_t aarch64_reg_hsh;
465 static htab_t aarch64_barrier_opt_hsh;
466 static htab_t aarch64_nzcv_hsh;
467 static htab_t aarch64_pldop_hsh;
468 static htab_t aarch64_hint_opt_hsh;
469
470 /* Stuff needed to resolve the label ambiguity
471 As:
472 ...
473 label: <insn>
474 may differ from:
475 ...
476 label:
477 <insn> */
478
479 static symbolS *last_label_seen;
480
481 /* Literal pool structure. Held on a per-section
482 and per-sub-section basis. */
483
484 #define MAX_LITERAL_POOL_SIZE 1024
485 typedef struct literal_expression
486 {
487 expressionS exp;
488 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
489 LITTLENUM_TYPE * bignum;
490 } literal_expression;
491
492 typedef struct literal_pool
493 {
494 literal_expression literals[MAX_LITERAL_POOL_SIZE];
495 unsigned int next_free_entry;
496 unsigned int id;
497 symbolS *symbol;
498 segT section;
499 subsegT sub_section;
500 int size;
501 struct literal_pool *next;
502 } literal_pool;
503
504 /* Pointer to a linked list of literal pools. */
505 static literal_pool *list_of_pools = NULL;
506 \f
507 /* Pure syntax. */
508
509 /* This array holds the chars that always start a comment. If the
510 pre-processor is disabled, these aren't very useful. */
511 const char comment_chars[] = "";
512
513 /* This array holds the chars that only start a comment at the beginning of
514 a line. If the line seems to have the form '# 123 filename'
515 .line and .file directives will appear in the pre-processed output. */
516 /* Note that input_file.c hand checks for '#' at the beginning of the
517 first line of the input file. This is because the compiler outputs
518 #NO_APP at the beginning of its output. */
519 /* Also note that comments like this one will always work. */
520 const char line_comment_chars[] = "#";
521
522 const char line_separator_chars[] = ";";
523
524 /* Chars that can be used to separate mant
525 from exp in floating point numbers. */
526 const char EXP_CHARS[] = "eE";
527
528 /* Chars that mean this number is a floating point constant. */
529 /* As in 0f12.456 */
530 /* or 0d1.2345e12 */
531
532 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhH";
533
534 /* Prefix character that indicates the start of an immediate value. */
535 #define is_immediate_prefix(C) ((C) == '#')
536
537 /* Separator character handling. */
538
539 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
540
541 static inline bfd_boolean
542 skip_past_char (char **str, char c)
543 {
544 if (**str == c)
545 {
546 (*str)++;
547 return TRUE;
548 }
549 else
550 return FALSE;
551 }
552
553 #define skip_past_comma(str) skip_past_char (str, ',')
554
555 /* Arithmetic expressions (possibly involving symbols). */
556
557 static bfd_boolean in_my_get_expression_p = FALSE;
558
559 /* Third argument to my_get_expression. */
560 #define GE_NO_PREFIX 0
561 #define GE_OPT_PREFIX 1
562
563 /* Return TRUE if the string pointed by *STR is successfully parsed
564 as an valid expression; *EP will be filled with the information of
565 such an expression. Otherwise return FALSE. */
566
567 static bfd_boolean
568 my_get_expression (expressionS * ep, char **str, int prefix_mode,
569 int reject_absent)
570 {
571 char *save_in;
572 segT seg;
573 int prefix_present_p = 0;
574
575 switch (prefix_mode)
576 {
577 case GE_NO_PREFIX:
578 break;
579 case GE_OPT_PREFIX:
580 if (is_immediate_prefix (**str))
581 {
582 (*str)++;
583 prefix_present_p = 1;
584 }
585 break;
586 default:
587 abort ();
588 }
589
590 memset (ep, 0, sizeof (expressionS));
591
592 save_in = input_line_pointer;
593 input_line_pointer = *str;
594 in_my_get_expression_p = TRUE;
595 seg = expression (ep);
596 in_my_get_expression_p = FALSE;
597
598 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
599 {
600 /* We found a bad expression in md_operand(). */
601 *str = input_line_pointer;
602 input_line_pointer = save_in;
603 if (prefix_present_p && ! error_p ())
604 set_fatal_syntax_error (_("bad expression"));
605 else
606 set_first_syntax_error (_("bad expression"));
607 return FALSE;
608 }
609
610 #ifdef OBJ_AOUT
611 if (seg != absolute_section
612 && seg != text_section
613 && seg != data_section
614 && seg != bss_section && seg != undefined_section)
615 {
616 set_syntax_error (_("bad segment"));
617 *str = input_line_pointer;
618 input_line_pointer = save_in;
619 return FALSE;
620 }
621 #else
622 (void) seg;
623 #endif
624
625 *str = input_line_pointer;
626 input_line_pointer = save_in;
627 return TRUE;
628 }
629
630 /* Turn a string in input_line_pointer into a floating point constant
631 of type TYPE, and store the appropriate bytes in *LITP. The number
632 of LITTLENUMS emitted is stored in *SIZEP. An error message is
633 returned, or NULL on OK. */
634
635 const char *
636 md_atof (int type, char *litP, int *sizeP)
637 {
638 /* If this is a bfloat16 type, then parse it slightly differently -
639 as it does not follow the IEEE standard exactly. */
640 if (type == 'b')
641 {
642 char * t;
643 LITTLENUM_TYPE words[MAX_LITTLENUMS];
644 FLONUM_TYPE generic_float;
645
646 t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float);
647
648 if (t)
649 input_line_pointer = t;
650 else
651 return _("invalid floating point number");
652
653 switch (generic_float.sign)
654 {
655 /* Is +Inf. */
656 case 'P':
657 words[0] = 0x7f80;
658 break;
659
660 /* Is -Inf. */
661 case 'N':
662 words[0] = 0xff80;
663 break;
664
665 /* Is NaN. */
666 /* bfloat16 has two types of NaN - quiet and signalling.
667 Quiet NaN has bit[6] == 1 && faction != 0, whereas
668 signalling Nan's have bit[0] == 0 && fraction != 0.
669 Chose this specific encoding as it is the same form
670 as used by other IEEE 754 encodings in GAS. */
671 case 0:
672 words[0] = 0x7fff;
673 break;
674
675 default:
676 break;
677 }
678
679 *sizeP = 2;
680
681 md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE));
682
683 return NULL;
684 }
685
686 return ieee_md_atof (type, litP, sizeP, target_big_endian);
687 }
688
689 /* We handle all bad expressions here, so that we can report the faulty
690 instruction in the error message. */
691 void
692 md_operand (expressionS * exp)
693 {
694 if (in_my_get_expression_p)
695 exp->X_op = O_illegal;
696 }
697
698 /* Immediate values. */
699
700 /* Errors may be set multiple times during parsing or bit encoding
701 (particularly in the Neon bits), but usually the earliest error which is set
702 will be the most meaningful. Avoid overwriting it with later (cascading)
703 errors by calling this function. */
704
705 static void
706 first_error (const char *error)
707 {
708 if (! error_p ())
709 set_syntax_error (error);
710 }
711
712 /* Similar to first_error, but this function accepts formatted error
713 message. */
714 static void
715 first_error_fmt (const char *format, ...)
716 {
717 va_list args;
718 enum
719 { size = 100 };
720 /* N.B. this single buffer will not cause error messages for different
721 instructions to pollute each other; this is because at the end of
722 processing of each assembly line, error message if any will be
723 collected by as_bad. */
724 static char buffer[size];
725
726 if (! error_p ())
727 {
728 int ret ATTRIBUTE_UNUSED;
729 va_start (args, format);
730 ret = vsnprintf (buffer, size, format, args);
731 know (ret <= size - 1 && ret >= 0);
732 va_end (args);
733 set_syntax_error (buffer);
734 }
735 }
736
737 /* Register parsing. */
738
739 /* Generic register parser which is called by other specialized
740 register parsers.
741 CCP points to what should be the beginning of a register name.
742 If it is indeed a valid register name, advance CCP over it and
743 return the reg_entry structure; otherwise return NULL.
744 It does not issue diagnostics. */
745
746 static reg_entry *
747 parse_reg (char **ccp)
748 {
749 char *start = *ccp;
750 char *p;
751 reg_entry *reg;
752
753 #ifdef REGISTER_PREFIX
754 if (*start != REGISTER_PREFIX)
755 return NULL;
756 start++;
757 #endif
758
759 p = start;
760 if (!ISALPHA (*p) || !is_name_beginner (*p))
761 return NULL;
762
763 do
764 p++;
765 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
766
767 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
768
769 if (!reg)
770 return NULL;
771
772 *ccp = p;
773 return reg;
774 }
775
776 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
777 return FALSE. */
778 static bfd_boolean
779 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
780 {
781 return (reg_type_masks[type] & (1 << reg->type)) != 0;
782 }
783
784 /* Try to parse a base or offset register. Allow SVE base and offset
785 registers if REG_TYPE includes SVE registers. Return the register
786 entry on success, setting *QUALIFIER to the register qualifier.
787 Return null otherwise.
788
789 Note that this function does not issue any diagnostics. */
790
791 static const reg_entry *
792 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
793 aarch64_opnd_qualifier_t *qualifier)
794 {
795 char *str = *ccp;
796 const reg_entry *reg = parse_reg (&str);
797
798 if (reg == NULL)
799 return NULL;
800
801 switch (reg->type)
802 {
803 case REG_TYPE_R_32:
804 case REG_TYPE_SP_32:
805 case REG_TYPE_Z_32:
806 *qualifier = AARCH64_OPND_QLF_W;
807 break;
808
809 case REG_TYPE_R_64:
810 case REG_TYPE_SP_64:
811 case REG_TYPE_Z_64:
812 *qualifier = AARCH64_OPND_QLF_X;
813 break;
814
815 case REG_TYPE_ZN:
816 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
817 || str[0] != '.')
818 return NULL;
819 switch (TOLOWER (str[1]))
820 {
821 case 's':
822 *qualifier = AARCH64_OPND_QLF_S_S;
823 break;
824 case 'd':
825 *qualifier = AARCH64_OPND_QLF_S_D;
826 break;
827 default:
828 return NULL;
829 }
830 str += 2;
831 break;
832
833 default:
834 return NULL;
835 }
836
837 *ccp = str;
838
839 return reg;
840 }
841
842 /* Try to parse a base or offset register. Return the register entry
843 on success, setting *QUALIFIER to the register qualifier. Return null
844 otherwise.
845
846 Note that this function does not issue any diagnostics. */
847
848 static const reg_entry *
849 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
850 {
851 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
852 }
853
854 /* Parse the qualifier of a vector register or vector element of type
855 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
856 succeeds; otherwise return FALSE.
857
858 Accept only one occurrence of:
859 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
860 b h s d q */
861 static bfd_boolean
862 parse_vector_type_for_operand (aarch64_reg_type reg_type,
863 struct vector_type_el *parsed_type, char **str)
864 {
865 char *ptr = *str;
866 unsigned width;
867 unsigned element_size;
868 enum vector_el_type type;
869
870 /* skip '.' */
871 gas_assert (*ptr == '.');
872 ptr++;
873
874 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
875 {
876 width = 0;
877 goto elt_size;
878 }
879 width = strtoul (ptr, &ptr, 10);
880 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
881 {
882 first_error_fmt (_("bad size %d in vector width specifier"), width);
883 return FALSE;
884 }
885
886 elt_size:
887 switch (TOLOWER (*ptr))
888 {
889 case 'b':
890 type = NT_b;
891 element_size = 8;
892 break;
893 case 'h':
894 type = NT_h;
895 element_size = 16;
896 break;
897 case 's':
898 type = NT_s;
899 element_size = 32;
900 break;
901 case 'd':
902 type = NT_d;
903 element_size = 64;
904 break;
905 case 'q':
906 if (reg_type == REG_TYPE_ZN || width == 1)
907 {
908 type = NT_q;
909 element_size = 128;
910 break;
911 }
912 /* fall through. */
913 default:
914 if (*ptr != '\0')
915 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
916 else
917 first_error (_("missing element size"));
918 return FALSE;
919 }
920 if (width != 0 && width * element_size != 64
921 && width * element_size != 128
922 && !(width == 2 && element_size == 16)
923 && !(width == 4 && element_size == 8))
924 {
925 first_error_fmt (_
926 ("invalid element size %d and vector size combination %c"),
927 width, *ptr);
928 return FALSE;
929 }
930 ptr++;
931
932 parsed_type->type = type;
933 parsed_type->width = width;
934
935 *str = ptr;
936
937 return TRUE;
938 }
939
940 /* *STR contains an SVE zero/merge predication suffix. Parse it into
941 *PARSED_TYPE and point *STR at the end of the suffix. */
942
943 static bfd_boolean
944 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
945 {
946 char *ptr = *str;
947
948 /* Skip '/'. */
949 gas_assert (*ptr == '/');
950 ptr++;
951 switch (TOLOWER (*ptr))
952 {
953 case 'z':
954 parsed_type->type = NT_zero;
955 break;
956 case 'm':
957 parsed_type->type = NT_merge;
958 break;
959 default:
960 if (*ptr != '\0' && *ptr != ',')
961 first_error_fmt (_("unexpected character `%c' in predication type"),
962 *ptr);
963 else
964 first_error (_("missing predication type"));
965 return FALSE;
966 }
967 parsed_type->width = 0;
968 *str = ptr + 1;
969 return TRUE;
970 }
971
972 /* Parse a register of the type TYPE.
973
974 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
975 name or the parsed register is not of TYPE.
976
977 Otherwise return the register number, and optionally fill in the actual
978 type of the register in *RTYPE when multiple alternatives were given, and
979 return the register shape and element index information in *TYPEINFO.
980
981 IN_REG_LIST should be set with TRUE if the caller is parsing a register
982 list. */
983
984 static int
985 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
986 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
987 {
988 char *str = *ccp;
989 const reg_entry *reg = parse_reg (&str);
990 struct vector_type_el atype;
991 struct vector_type_el parsetype;
992 bfd_boolean is_typed_vecreg = FALSE;
993
994 atype.defined = 0;
995 atype.type = NT_invtype;
996 atype.width = -1;
997 atype.index = 0;
998
999 if (reg == NULL)
1000 {
1001 if (typeinfo)
1002 *typeinfo = atype;
1003 set_default_error ();
1004 return PARSE_FAIL;
1005 }
1006
1007 if (! aarch64_check_reg_type (reg, type))
1008 {
1009 DEBUG_TRACE ("reg type check failed");
1010 set_default_error ();
1011 return PARSE_FAIL;
1012 }
1013 type = reg->type;
1014
1015 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
1016 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
1017 {
1018 if (*str == '.')
1019 {
1020 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1021 return PARSE_FAIL;
1022 }
1023 else
1024 {
1025 if (!parse_predication_for_operand (&parsetype, &str))
1026 return PARSE_FAIL;
1027 }
1028
1029 /* Register if of the form Vn.[bhsdq]. */
1030 is_typed_vecreg = TRUE;
1031
1032 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1033 {
1034 /* The width is always variable; we don't allow an integer width
1035 to be specified. */
1036 gas_assert (parsetype.width == 0);
1037 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1038 }
1039 else if (parsetype.width == 0)
1040 /* Expect index. In the new scheme we cannot have
1041 Vn.[bhsdq] represent a scalar. Therefore any
1042 Vn.[bhsdq] should have an index following it.
1043 Except in reglists of course. */
1044 atype.defined |= NTA_HASINDEX;
1045 else
1046 atype.defined |= NTA_HASTYPE;
1047
1048 atype.type = parsetype.type;
1049 atype.width = parsetype.width;
1050 }
1051
1052 if (skip_past_char (&str, '['))
1053 {
1054 expressionS exp;
1055
1056 /* Reject Sn[index] syntax. */
1057 if (!is_typed_vecreg)
1058 {
1059 first_error (_("this type of register can't be indexed"));
1060 return PARSE_FAIL;
1061 }
1062
1063 if (in_reg_list)
1064 {
1065 first_error (_("index not allowed inside register list"));
1066 return PARSE_FAIL;
1067 }
1068
1069 atype.defined |= NTA_HASINDEX;
1070
1071 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1072
1073 if (exp.X_op != O_constant)
1074 {
1075 first_error (_("constant expression required"));
1076 return PARSE_FAIL;
1077 }
1078
1079 if (! skip_past_char (&str, ']'))
1080 return PARSE_FAIL;
1081
1082 atype.index = exp.X_add_number;
1083 }
1084 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1085 {
1086 /* Indexed vector register expected. */
1087 first_error (_("indexed vector register expected"));
1088 return PARSE_FAIL;
1089 }
1090
1091 /* A vector reg Vn should be typed or indexed. */
1092 if (type == REG_TYPE_VN && atype.defined == 0)
1093 {
1094 first_error (_("invalid use of vector register"));
1095 }
1096
1097 if (typeinfo)
1098 *typeinfo = atype;
1099
1100 if (rtype)
1101 *rtype = type;
1102
1103 *ccp = str;
1104
1105 return reg->number;
1106 }
1107
1108 /* Parse register.
1109
1110 Return the register number on success; return PARSE_FAIL otherwise.
1111
1112 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1113 the register (e.g. NEON double or quad reg when either has been requested).
1114
1115 If this is a NEON vector register with additional type information, fill
1116 in the struct pointed to by VECTYPE (if non-NULL).
1117
1118 This parser does not handle register list. */
1119
1120 static int
1121 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1122 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1123 {
1124 struct vector_type_el atype;
1125 char *str = *ccp;
1126 int reg = parse_typed_reg (&str, type, rtype, &atype,
1127 /*in_reg_list= */ FALSE);
1128
1129 if (reg == PARSE_FAIL)
1130 return PARSE_FAIL;
1131
1132 if (vectype)
1133 *vectype = atype;
1134
1135 *ccp = str;
1136
1137 return reg;
1138 }
1139
1140 static inline bfd_boolean
1141 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1142 {
1143 return
1144 e1.type == e2.type
1145 && e1.defined == e2.defined
1146 && e1.width == e2.width && e1.index == e2.index;
1147 }
1148
1149 /* This function parses a list of vector registers of type TYPE.
1150 On success, it returns the parsed register list information in the
1151 following encoded format:
1152
1153 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1154 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1155
1156 The information of the register shape and/or index is returned in
1157 *VECTYPE.
1158
1159 It returns PARSE_FAIL if the register list is invalid.
1160
1161 The list contains one to four registers.
1162 Each register can be one of:
1163 <Vt>.<T>[<index>]
1164 <Vt>.<T>
1165 All <T> should be identical.
1166 All <index> should be identical.
1167 There are restrictions on <Vt> numbers which are checked later
1168 (by reg_list_valid_p). */
1169
1170 static int
1171 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1172 struct vector_type_el *vectype)
1173 {
1174 char *str = *ccp;
1175 int nb_regs;
1176 struct vector_type_el typeinfo, typeinfo_first;
1177 int val, val_range;
1178 int in_range;
1179 int ret_val;
1180 int i;
1181 bfd_boolean error = FALSE;
1182 bfd_boolean expect_index = FALSE;
1183
1184 if (*str != '{')
1185 {
1186 set_syntax_error (_("expecting {"));
1187 return PARSE_FAIL;
1188 }
1189 str++;
1190
1191 nb_regs = 0;
1192 typeinfo_first.defined = 0;
1193 typeinfo_first.type = NT_invtype;
1194 typeinfo_first.width = -1;
1195 typeinfo_first.index = 0;
1196 ret_val = 0;
1197 val = -1;
1198 val_range = -1;
1199 in_range = 0;
1200 do
1201 {
1202 if (in_range)
1203 {
1204 str++; /* skip over '-' */
1205 val_range = val;
1206 }
1207 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1208 /*in_reg_list= */ TRUE);
1209 if (val == PARSE_FAIL)
1210 {
1211 set_first_syntax_error (_("invalid vector register in list"));
1212 error = TRUE;
1213 continue;
1214 }
1215 /* reject [bhsd]n */
1216 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1217 {
1218 set_first_syntax_error (_("invalid scalar register in list"));
1219 error = TRUE;
1220 continue;
1221 }
1222
1223 if (typeinfo.defined & NTA_HASINDEX)
1224 expect_index = TRUE;
1225
1226 if (in_range)
1227 {
1228 if (val < val_range)
1229 {
1230 set_first_syntax_error
1231 (_("invalid range in vector register list"));
1232 error = TRUE;
1233 }
1234 val_range++;
1235 }
1236 else
1237 {
1238 val_range = val;
1239 if (nb_regs == 0)
1240 typeinfo_first = typeinfo;
1241 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1242 {
1243 set_first_syntax_error
1244 (_("type mismatch in vector register list"));
1245 error = TRUE;
1246 }
1247 }
1248 if (! error)
1249 for (i = val_range; i <= val; i++)
1250 {
1251 ret_val |= i << (5 * nb_regs);
1252 nb_regs++;
1253 }
1254 in_range = 0;
1255 }
1256 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1257
1258 skip_whitespace (str);
1259 if (*str != '}')
1260 {
1261 set_first_syntax_error (_("end of vector register list not found"));
1262 error = TRUE;
1263 }
1264 str++;
1265
1266 skip_whitespace (str);
1267
1268 if (expect_index)
1269 {
1270 if (skip_past_char (&str, '['))
1271 {
1272 expressionS exp;
1273
1274 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1275 if (exp.X_op != O_constant)
1276 {
1277 set_first_syntax_error (_("constant expression required."));
1278 error = TRUE;
1279 }
1280 if (! skip_past_char (&str, ']'))
1281 error = TRUE;
1282 else
1283 typeinfo_first.index = exp.X_add_number;
1284 }
1285 else
1286 {
1287 set_first_syntax_error (_("expected index"));
1288 error = TRUE;
1289 }
1290 }
1291
1292 if (nb_regs > 4)
1293 {
1294 set_first_syntax_error (_("too many registers in vector register list"));
1295 error = TRUE;
1296 }
1297 else if (nb_regs == 0)
1298 {
1299 set_first_syntax_error (_("empty vector register list"));
1300 error = TRUE;
1301 }
1302
1303 *ccp = str;
1304 if (! error)
1305 *vectype = typeinfo_first;
1306
1307 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1308 }
1309
1310 /* Directives: register aliases. */
1311
1312 static reg_entry *
1313 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1314 {
1315 reg_entry *new;
1316 const char *name;
1317
1318 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1319 {
1320 if (new->builtin)
1321 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1322 str);
1323
1324 /* Only warn about a redefinition if it's not defined as the
1325 same register. */
1326 else if (new->number != number || new->type != type)
1327 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1328
1329 return NULL;
1330 }
1331
1332 name = xstrdup (str);
1333 new = XNEW (reg_entry);
1334
1335 new->name = name;
1336 new->number = number;
1337 new->type = type;
1338 new->builtin = FALSE;
1339
1340 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1341
1342 return new;
1343 }
1344
1345 /* Look for the .req directive. This is of the form:
1346
1347 new_register_name .req existing_register_name
1348
1349 If we find one, or if it looks sufficiently like one that we want to
1350 handle any error here, return TRUE. Otherwise return FALSE. */
1351
1352 static bfd_boolean
1353 create_register_alias (char *newname, char *p)
1354 {
1355 const reg_entry *old;
1356 char *oldname, *nbuf;
1357 size_t nlen;
1358
1359 /* The input scrubber ensures that whitespace after the mnemonic is
1360 collapsed to single spaces. */
1361 oldname = p;
1362 if (strncmp (oldname, " .req ", 6) != 0)
1363 return FALSE;
1364
1365 oldname += 6;
1366 if (*oldname == '\0')
1367 return FALSE;
1368
1369 old = str_hash_find (aarch64_reg_hsh, oldname);
1370 if (!old)
1371 {
1372 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1373 return TRUE;
1374 }
1375
1376 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1377 the desired alias name, and p points to its end. If not, then
1378 the desired alias name is in the global original_case_string. */
1379 #ifdef TC_CASE_SENSITIVE
1380 nlen = p - newname;
1381 #else
1382 newname = original_case_string;
1383 nlen = strlen (newname);
1384 #endif
1385
1386 nbuf = xmemdup0 (newname, nlen);
1387
1388 /* Create aliases under the new name as stated; an all-lowercase
1389 version of the new name; and an all-uppercase version of the new
1390 name. */
1391 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1392 {
1393 for (p = nbuf; *p; p++)
1394 *p = TOUPPER (*p);
1395
1396 if (strncmp (nbuf, newname, nlen))
1397 {
1398 /* If this attempt to create an additional alias fails, do not bother
1399 trying to create the all-lower case alias. We will fail and issue
1400 a second, duplicate error message. This situation arises when the
1401 programmer does something like:
1402 foo .req r0
1403 Foo .req r1
1404 The second .req creates the "Foo" alias but then fails to create
1405 the artificial FOO alias because it has already been created by the
1406 first .req. */
1407 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1408 {
1409 free (nbuf);
1410 return TRUE;
1411 }
1412 }
1413
1414 for (p = nbuf; *p; p++)
1415 *p = TOLOWER (*p);
1416
1417 if (strncmp (nbuf, newname, nlen))
1418 insert_reg_alias (nbuf, old->number, old->type);
1419 }
1420
1421 free (nbuf);
1422 return TRUE;
1423 }
1424
1425 /* Should never be called, as .req goes between the alias and the
1426 register name, not at the beginning of the line. */
1427 static void
1428 s_req (int a ATTRIBUTE_UNUSED)
1429 {
1430 as_bad (_("invalid syntax for .req directive"));
1431 }
1432
1433 /* The .unreq directive deletes an alias which was previously defined
1434 by .req. For example:
1435
1436 my_alias .req r11
1437 .unreq my_alias */
1438
1439 static void
1440 s_unreq (int a ATTRIBUTE_UNUSED)
1441 {
1442 char *name;
1443 char saved_char;
1444
1445 name = input_line_pointer;
1446
1447 while (*input_line_pointer != 0
1448 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1449 ++input_line_pointer;
1450
1451 saved_char = *input_line_pointer;
1452 *input_line_pointer = 0;
1453
1454 if (!*name)
1455 as_bad (_("invalid syntax for .unreq directive"));
1456 else
1457 {
1458 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1459
1460 if (!reg)
1461 as_bad (_("unknown register alias '%s'"), name);
1462 else if (reg->builtin)
1463 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1464 name);
1465 else
1466 {
1467 char *p;
1468 char *nbuf;
1469
1470 str_hash_delete (aarch64_reg_hsh, name);
1471 free ((char *) reg->name);
1472 free (reg);
1473
1474 /* Also locate the all upper case and all lower case versions.
1475 Do not complain if we cannot find one or the other as it
1476 was probably deleted above. */
1477
1478 nbuf = strdup (name);
1479 for (p = nbuf; *p; p++)
1480 *p = TOUPPER (*p);
1481 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1482 if (reg)
1483 {
1484 str_hash_delete (aarch64_reg_hsh, nbuf);
1485 free ((char *) reg->name);
1486 free (reg);
1487 }
1488
1489 for (p = nbuf; *p; p++)
1490 *p = TOLOWER (*p);
1491 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1492 if (reg)
1493 {
1494 str_hash_delete (aarch64_reg_hsh, nbuf);
1495 free ((char *) reg->name);
1496 free (reg);
1497 }
1498
1499 free (nbuf);
1500 }
1501 }
1502
1503 *input_line_pointer = saved_char;
1504 demand_empty_rest_of_line ();
1505 }
1506
1507 /* Directives: Instruction set selection. */
1508
1509 #ifdef OBJ_ELF
1510 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1511 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1512 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1513 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1514
1515 /* Create a new mapping symbol for the transition to STATE. */
1516
1517 static void
1518 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1519 {
1520 symbolS *symbolP;
1521 const char *symname;
1522 int type;
1523
1524 switch (state)
1525 {
1526 case MAP_DATA:
1527 symname = "$d";
1528 type = BSF_NO_FLAGS;
1529 break;
1530 case MAP_INSN:
1531 symname = "$x";
1532 type = BSF_NO_FLAGS;
1533 break;
1534 default:
1535 abort ();
1536 }
1537
1538 symbolP = symbol_new (symname, now_seg, frag, value);
1539 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1540
1541 /* Save the mapping symbols for future reference. Also check that
1542 we do not place two mapping symbols at the same offset within a
1543 frag. We'll handle overlap between frags in
1544 check_mapping_symbols.
1545
1546 If .fill or other data filling directive generates zero sized data,
1547 the mapping symbol for the following code will have the same value
1548 as the one generated for the data filling directive. In this case,
1549 we replace the old symbol with the new one at the same address. */
1550 if (value == 0)
1551 {
1552 if (frag->tc_frag_data.first_map != NULL)
1553 {
1554 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1555 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1556 &symbol_lastP);
1557 }
1558 frag->tc_frag_data.first_map = symbolP;
1559 }
1560 if (frag->tc_frag_data.last_map != NULL)
1561 {
1562 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1563 S_GET_VALUE (symbolP));
1564 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1565 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1566 &symbol_lastP);
1567 }
1568 frag->tc_frag_data.last_map = symbolP;
1569 }
1570
1571 /* We must sometimes convert a region marked as code to data during
1572 code alignment, if an odd number of bytes have to be padded. The
1573 code mapping symbol is pushed to an aligned address. */
1574
1575 static void
1576 insert_data_mapping_symbol (enum mstate state,
1577 valueT value, fragS * frag, offsetT bytes)
1578 {
1579 /* If there was already a mapping symbol, remove it. */
1580 if (frag->tc_frag_data.last_map != NULL
1581 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1582 frag->fr_address + value)
1583 {
1584 symbolS *symp = frag->tc_frag_data.last_map;
1585
1586 if (value == 0)
1587 {
1588 know (frag->tc_frag_data.first_map == symp);
1589 frag->tc_frag_data.first_map = NULL;
1590 }
1591 frag->tc_frag_data.last_map = NULL;
1592 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1593 }
1594
1595 make_mapping_symbol (MAP_DATA, value, frag);
1596 make_mapping_symbol (state, value + bytes, frag);
1597 }
1598
1599 static void mapping_state_2 (enum mstate state, int max_chars);
1600
1601 /* Set the mapping state to STATE. Only call this when about to
1602 emit some STATE bytes to the file. */
1603
1604 void
1605 mapping_state (enum mstate state)
1606 {
1607 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1608
1609 if (state == MAP_INSN)
1610 /* AArch64 instructions require 4-byte alignment. When emitting
1611 instructions into any section, record the appropriate section
1612 alignment. */
1613 record_alignment (now_seg, 2);
1614
1615 if (mapstate == state)
1616 /* The mapping symbol has already been emitted.
1617 There is nothing else to do. */
1618 return;
1619
1620 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1621 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1622 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1623 evaluated later in the next else. */
1624 return;
1625 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1626 {
1627 /* Only add the symbol if the offset is > 0:
1628 if we're at the first frag, check it's size > 0;
1629 if we're not at the first frag, then for sure
1630 the offset is > 0. */
1631 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1632 const int add_symbol = (frag_now != frag_first)
1633 || (frag_now_fix () > 0);
1634
1635 if (add_symbol)
1636 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1637 }
1638 #undef TRANSITION
1639
1640 mapping_state_2 (state, 0);
1641 }
1642
1643 /* Same as mapping_state, but MAX_CHARS bytes have already been
1644 allocated. Put the mapping symbol that far back. */
1645
1646 static void
1647 mapping_state_2 (enum mstate state, int max_chars)
1648 {
1649 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1650
1651 if (!SEG_NORMAL (now_seg))
1652 return;
1653
1654 if (mapstate == state)
1655 /* The mapping symbol has already been emitted.
1656 There is nothing else to do. */
1657 return;
1658
1659 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1660 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1661 }
1662 #else
1663 #define mapping_state(x) /* nothing */
1664 #define mapping_state_2(x, y) /* nothing */
1665 #endif
1666
1667 /* Directives: sectioning and alignment. */
1668
1669 static void
1670 s_bss (int ignore ATTRIBUTE_UNUSED)
1671 {
1672 /* We don't support putting frags in the BSS segment, we fake it by
1673 marking in_bss, then looking at s_skip for clues. */
1674 subseg_set (bss_section, 0);
1675 demand_empty_rest_of_line ();
1676 mapping_state (MAP_DATA);
1677 }
1678
1679 static void
1680 s_even (int ignore ATTRIBUTE_UNUSED)
1681 {
1682 /* Never make frag if expect extra pass. */
1683 if (!need_pass_2)
1684 frag_align (1, 0, 0);
1685
1686 record_alignment (now_seg, 1);
1687
1688 demand_empty_rest_of_line ();
1689 }
1690
1691 /* Directives: Literal pools. */
1692
1693 static literal_pool *
1694 find_literal_pool (int size)
1695 {
1696 literal_pool *pool;
1697
1698 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1699 {
1700 if (pool->section == now_seg
1701 && pool->sub_section == now_subseg && pool->size == size)
1702 break;
1703 }
1704
1705 return pool;
1706 }
1707
1708 static literal_pool *
1709 find_or_make_literal_pool (int size)
1710 {
1711 /* Next literal pool ID number. */
1712 static unsigned int latest_pool_num = 1;
1713 literal_pool *pool;
1714
1715 pool = find_literal_pool (size);
1716
1717 if (pool == NULL)
1718 {
1719 /* Create a new pool. */
1720 pool = XNEW (literal_pool);
1721 if (!pool)
1722 return NULL;
1723
1724 /* Currently we always put the literal pool in the current text
1725 section. If we were generating "small" model code where we
1726 knew that all code and initialised data was within 1MB then
1727 we could output literals to mergeable, read-only data
1728 sections. */
1729
1730 pool->next_free_entry = 0;
1731 pool->section = now_seg;
1732 pool->sub_section = now_subseg;
1733 pool->size = size;
1734 pool->next = list_of_pools;
1735 pool->symbol = NULL;
1736
1737 /* Add it to the list. */
1738 list_of_pools = pool;
1739 }
1740
1741 /* New pools, and emptied pools, will have a NULL symbol. */
1742 if (pool->symbol == NULL)
1743 {
1744 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1745 &zero_address_frag, 0);
1746 pool->id = latest_pool_num++;
1747 }
1748
1749 /* Done. */
1750 return pool;
1751 }
1752
1753 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1754 Return TRUE on success, otherwise return FALSE. */
1755 static bfd_boolean
1756 add_to_lit_pool (expressionS *exp, int size)
1757 {
1758 literal_pool *pool;
1759 unsigned int entry;
1760
1761 pool = find_or_make_literal_pool (size);
1762
1763 /* Check if this literal value is already in the pool. */
1764 for (entry = 0; entry < pool->next_free_entry; entry++)
1765 {
1766 expressionS * litexp = & pool->literals[entry].exp;
1767
1768 if ((litexp->X_op == exp->X_op)
1769 && (exp->X_op == O_constant)
1770 && (litexp->X_add_number == exp->X_add_number)
1771 && (litexp->X_unsigned == exp->X_unsigned))
1772 break;
1773
1774 if ((litexp->X_op == exp->X_op)
1775 && (exp->X_op == O_symbol)
1776 && (litexp->X_add_number == exp->X_add_number)
1777 && (litexp->X_add_symbol == exp->X_add_symbol)
1778 && (litexp->X_op_symbol == exp->X_op_symbol))
1779 break;
1780 }
1781
1782 /* Do we need to create a new entry? */
1783 if (entry == pool->next_free_entry)
1784 {
1785 if (entry >= MAX_LITERAL_POOL_SIZE)
1786 {
1787 set_syntax_error (_("literal pool overflow"));
1788 return FALSE;
1789 }
1790
1791 pool->literals[entry].exp = *exp;
1792 pool->next_free_entry += 1;
1793 if (exp->X_op == O_big)
1794 {
1795 /* PR 16688: Bignums are held in a single global array. We must
1796 copy and preserve that value now, before it is overwritten. */
1797 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1798 exp->X_add_number);
1799 memcpy (pool->literals[entry].bignum, generic_bignum,
1800 CHARS_PER_LITTLENUM * exp->X_add_number);
1801 }
1802 else
1803 pool->literals[entry].bignum = NULL;
1804 }
1805
1806 exp->X_op = O_symbol;
1807 exp->X_add_number = ((int) entry) * size;
1808 exp->X_add_symbol = pool->symbol;
1809
1810 return TRUE;
1811 }
1812
1813 /* Can't use symbol_new here, so have to create a symbol and then at
1814 a later date assign it a value. That's what these functions do. */
1815
1816 static void
1817 symbol_locate (symbolS * symbolP,
1818 const char *name,/* It is copied, the caller can modify. */
1819 segT segment, /* Segment identifier (SEG_<something>). */
1820 valueT valu, /* Symbol value. */
1821 fragS * frag) /* Associated fragment. */
1822 {
1823 size_t name_length;
1824 char *preserved_copy_of_name;
1825
1826 name_length = strlen (name) + 1; /* +1 for \0. */
1827 obstack_grow (&notes, name, name_length);
1828 preserved_copy_of_name = obstack_finish (&notes);
1829
1830 #ifdef tc_canonicalize_symbol_name
1831 preserved_copy_of_name =
1832 tc_canonicalize_symbol_name (preserved_copy_of_name);
1833 #endif
1834
1835 S_SET_NAME (symbolP, preserved_copy_of_name);
1836
1837 S_SET_SEGMENT (symbolP, segment);
1838 S_SET_VALUE (symbolP, valu);
1839 symbol_clear_list_pointers (symbolP);
1840
1841 symbol_set_frag (symbolP, frag);
1842
1843 /* Link to end of symbol chain. */
1844 {
1845 extern int symbol_table_frozen;
1846
1847 if (symbol_table_frozen)
1848 abort ();
1849 }
1850
1851 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1852
1853 obj_symbol_new_hook (symbolP);
1854
1855 #ifdef tc_symbol_new_hook
1856 tc_symbol_new_hook (symbolP);
1857 #endif
1858
1859 #ifdef DEBUG_SYMS
1860 verify_symbol_chain (symbol_rootP, symbol_lastP);
1861 #endif /* DEBUG_SYMS */
1862 }
1863
1864
1865 static void
1866 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1867 {
1868 unsigned int entry;
1869 literal_pool *pool;
1870 char sym_name[20];
1871 int align;
1872
1873 for (align = 2; align <= 4; align++)
1874 {
1875 int size = 1 << align;
1876
1877 pool = find_literal_pool (size);
1878 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1879 continue;
1880
1881 /* Align pool as you have word accesses.
1882 Only make a frag if we have to. */
1883 if (!need_pass_2)
1884 frag_align (align, 0, 0);
1885
1886 mapping_state (MAP_DATA);
1887
1888 record_alignment (now_seg, align);
1889
1890 sprintf (sym_name, "$$lit_\002%x", pool->id);
1891
1892 symbol_locate (pool->symbol, sym_name, now_seg,
1893 (valueT) frag_now_fix (), frag_now);
1894 symbol_table_insert (pool->symbol);
1895
1896 for (entry = 0; entry < pool->next_free_entry; entry++)
1897 {
1898 expressionS * exp = & pool->literals[entry].exp;
1899
1900 if (exp->X_op == O_big)
1901 {
1902 /* PR 16688: Restore the global bignum value. */
1903 gas_assert (pool->literals[entry].bignum != NULL);
1904 memcpy (generic_bignum, pool->literals[entry].bignum,
1905 CHARS_PER_LITTLENUM * exp->X_add_number);
1906 }
1907
1908 /* First output the expression in the instruction to the pool. */
1909 emit_expr (exp, size); /* .word|.xword */
1910
1911 if (exp->X_op == O_big)
1912 {
1913 free (pool->literals[entry].bignum);
1914 pool->literals[entry].bignum = NULL;
1915 }
1916 }
1917
1918 /* Mark the pool as empty. */
1919 pool->next_free_entry = 0;
1920 pool->symbol = NULL;
1921 }
1922 }
1923
1924 #ifdef OBJ_ELF
1925 /* Forward declarations for functions below, in the MD interface
1926 section. */
1927 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1928 static struct reloc_table_entry * find_reloc_table_entry (char **);
1929
1930 /* Directives: Data. */
1931 /* N.B. the support for relocation suffix in this directive needs to be
1932 implemented properly. */
1933
1934 static void
1935 s_aarch64_elf_cons (int nbytes)
1936 {
1937 expressionS exp;
1938
1939 #ifdef md_flush_pending_output
1940 md_flush_pending_output ();
1941 #endif
1942
1943 if (is_it_end_of_statement ())
1944 {
1945 demand_empty_rest_of_line ();
1946 return;
1947 }
1948
1949 #ifdef md_cons_align
1950 md_cons_align (nbytes);
1951 #endif
1952
1953 mapping_state (MAP_DATA);
1954 do
1955 {
1956 struct reloc_table_entry *reloc;
1957
1958 expression (&exp);
1959
1960 if (exp.X_op != O_symbol)
1961 emit_expr (&exp, (unsigned int) nbytes);
1962 else
1963 {
1964 skip_past_char (&input_line_pointer, '#');
1965 if (skip_past_char (&input_line_pointer, ':'))
1966 {
1967 reloc = find_reloc_table_entry (&input_line_pointer);
1968 if (reloc == NULL)
1969 as_bad (_("unrecognized relocation suffix"));
1970 else
1971 as_bad (_("unimplemented relocation suffix"));
1972 ignore_rest_of_line ();
1973 return;
1974 }
1975 else
1976 emit_expr (&exp, (unsigned int) nbytes);
1977 }
1978 }
1979 while (*input_line_pointer++ == ',');
1980
1981 /* Put terminator back into stream. */
1982 input_line_pointer--;
1983 demand_empty_rest_of_line ();
1984 }
1985
1986 /* Mark symbol that it follows a variant PCS convention. */
1987
1988 static void
1989 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1990 {
1991 char *name;
1992 char c;
1993 symbolS *sym;
1994 asymbol *bfdsym;
1995 elf_symbol_type *elfsym;
1996
1997 c = get_symbol_name (&name);
1998 if (!*name)
1999 as_bad (_("Missing symbol name in directive"));
2000 sym = symbol_find_or_make (name);
2001 restore_line_pointer (c);
2002 demand_empty_rest_of_line ();
2003 bfdsym = symbol_get_bfdsym (sym);
2004 elfsym = elf_symbol_from (bfd_asymbol_bfd (bfdsym), bfdsym);
2005 gas_assert (elfsym);
2006 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2007 }
2008 #endif /* OBJ_ELF */
2009
2010 /* Output a 32-bit word, but mark as an instruction. */
2011
2012 static void
2013 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2014 {
2015 expressionS exp;
2016
2017 #ifdef md_flush_pending_output
2018 md_flush_pending_output ();
2019 #endif
2020
2021 if (is_it_end_of_statement ())
2022 {
2023 demand_empty_rest_of_line ();
2024 return;
2025 }
2026
2027 /* Sections are assumed to start aligned. In executable section, there is no
2028 MAP_DATA symbol pending. So we only align the address during
2029 MAP_DATA --> MAP_INSN transition.
2030 For other sections, this is not guaranteed. */
2031 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2032 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2033 frag_align_code (2, 0);
2034
2035 #ifdef OBJ_ELF
2036 mapping_state (MAP_INSN);
2037 #endif
2038
2039 do
2040 {
2041 expression (&exp);
2042 if (exp.X_op != O_constant)
2043 {
2044 as_bad (_("constant expression required"));
2045 ignore_rest_of_line ();
2046 return;
2047 }
2048
2049 if (target_big_endian)
2050 {
2051 unsigned int val = exp.X_add_number;
2052 exp.X_add_number = SWAP_32 (val);
2053 }
2054 emit_expr (&exp, 4);
2055 }
2056 while (*input_line_pointer++ == ',');
2057
2058 /* Put terminator back into stream. */
2059 input_line_pointer--;
2060 demand_empty_rest_of_line ();
2061 }
2062
2063 static void
2064 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2065 {
2066 demand_empty_rest_of_line ();
2067 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2068 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2069 }
2070
2071 #ifdef OBJ_ELF
2072 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2073
2074 static void
2075 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2076 {
2077 expressionS exp;
2078
2079 expression (&exp);
2080 frag_grow (4);
2081 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2082 BFD_RELOC_AARCH64_TLSDESC_ADD);
2083
2084 demand_empty_rest_of_line ();
2085 }
2086
2087 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2088
2089 static void
2090 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2091 {
2092 expressionS exp;
2093
2094 /* Since we're just labelling the code, there's no need to define a
2095 mapping symbol. */
2096 expression (&exp);
2097 /* Make sure there is enough room in this frag for the following
2098 blr. This trick only works if the blr follows immediately after
2099 the .tlsdesc directive. */
2100 frag_grow (4);
2101 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2102 BFD_RELOC_AARCH64_TLSDESC_CALL);
2103
2104 demand_empty_rest_of_line ();
2105 }
2106
2107 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2108
2109 static void
2110 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2111 {
2112 expressionS exp;
2113
2114 expression (&exp);
2115 frag_grow (4);
2116 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2117 BFD_RELOC_AARCH64_TLSDESC_LDR);
2118
2119 demand_empty_rest_of_line ();
2120 }
2121 #endif /* OBJ_ELF */
2122
2123 static void s_aarch64_arch (int);
2124 static void s_aarch64_cpu (int);
2125 static void s_aarch64_arch_extension (int);
2126
2127 /* This table describes all the machine specific pseudo-ops the assembler
2128 has to support. The fields are:
2129 pseudo-op name without dot
2130 function to call to execute this pseudo-op
2131 Integer arg to pass to the function. */
2132
2133 const pseudo_typeS md_pseudo_table[] = {
2134 /* Never called because '.req' does not start a line. */
2135 {"req", s_req, 0},
2136 {"unreq", s_unreq, 0},
2137 {"bss", s_bss, 0},
2138 {"even", s_even, 0},
2139 {"ltorg", s_ltorg, 0},
2140 {"pool", s_ltorg, 0},
2141 {"cpu", s_aarch64_cpu, 0},
2142 {"arch", s_aarch64_arch, 0},
2143 {"arch_extension", s_aarch64_arch_extension, 0},
2144 {"inst", s_aarch64_inst, 0},
2145 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2146 #ifdef OBJ_ELF
2147 {"tlsdescadd", s_tlsdescadd, 0},
2148 {"tlsdesccall", s_tlsdesccall, 0},
2149 {"tlsdescldr", s_tlsdescldr, 0},
2150 {"word", s_aarch64_elf_cons, 4},
2151 {"long", s_aarch64_elf_cons, 4},
2152 {"xword", s_aarch64_elf_cons, 8},
2153 {"dword", s_aarch64_elf_cons, 8},
2154 {"variant_pcs", s_variant_pcs, 0},
2155 #endif
2156 {"float16", float_cons, 'h'},
2157 {"bfloat16", float_cons, 'b'},
2158 {0, 0, 0}
2159 };
2160 \f
2161
2162 /* Check whether STR points to a register name followed by a comma or the
2163 end of line; REG_TYPE indicates which register types are checked
2164 against. Return TRUE if STR is such a register name; otherwise return
2165 FALSE. The function does not intend to produce any diagnostics, but since
2166 the register parser aarch64_reg_parse, which is called by this function,
2167 does produce diagnostics, we call clear_error to clear any diagnostics
2168 that may be generated by aarch64_reg_parse.
2169 Also, the function returns FALSE directly if there is any user error
2170 present at the function entry. This prevents the existing diagnostics
2171 state from being spoiled.
2172 The function currently serves parse_constant_immediate and
2173 parse_big_immediate only. */
2174 static bfd_boolean
2175 reg_name_p (char *str, aarch64_reg_type reg_type)
2176 {
2177 int reg;
2178
2179 /* Prevent the diagnostics state from being spoiled. */
2180 if (error_p ())
2181 return FALSE;
2182
2183 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2184
2185 /* Clear the parsing error that may be set by the reg parser. */
2186 clear_error ();
2187
2188 if (reg == PARSE_FAIL)
2189 return FALSE;
2190
2191 skip_whitespace (str);
2192 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2193 return TRUE;
2194
2195 return FALSE;
2196 }
2197
2198 /* Parser functions used exclusively in instruction operands. */
2199
2200 /* Parse an immediate expression which may not be constant.
2201
2202 To prevent the expression parser from pushing a register name
2203 into the symbol table as an undefined symbol, firstly a check is
2204 done to find out whether STR is a register of type REG_TYPE followed
2205 by a comma or the end of line. Return FALSE if STR is such a string. */
2206
2207 static bfd_boolean
2208 parse_immediate_expression (char **str, expressionS *exp,
2209 aarch64_reg_type reg_type)
2210 {
2211 if (reg_name_p (*str, reg_type))
2212 {
2213 set_recoverable_error (_("immediate operand required"));
2214 return FALSE;
2215 }
2216
2217 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2218
2219 if (exp->X_op == O_absent)
2220 {
2221 set_fatal_syntax_error (_("missing immediate expression"));
2222 return FALSE;
2223 }
2224
2225 return TRUE;
2226 }
2227
2228 /* Constant immediate-value read function for use in insn parsing.
2229 STR points to the beginning of the immediate (with the optional
2230 leading #); *VAL receives the value. REG_TYPE says which register
2231 names should be treated as registers rather than as symbolic immediates.
2232
2233 Return TRUE on success; otherwise return FALSE. */
2234
2235 static bfd_boolean
2236 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2237 {
2238 expressionS exp;
2239
2240 if (! parse_immediate_expression (str, &exp, reg_type))
2241 return FALSE;
2242
2243 if (exp.X_op != O_constant)
2244 {
2245 set_syntax_error (_("constant expression required"));
2246 return FALSE;
2247 }
2248
2249 *val = exp.X_add_number;
2250 return TRUE;
2251 }
2252
2253 static uint32_t
2254 encode_imm_float_bits (uint32_t imm)
2255 {
2256 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2257 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2258 }
2259
2260 /* Return TRUE if the single-precision floating-point value encoded in IMM
2261 can be expressed in the AArch64 8-bit signed floating-point format with
2262 3-bit exponent and normalized 4 bits of precision; in other words, the
2263 floating-point value must be expressable as
2264 (+/-) n / 16 * power (2, r)
2265 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2266
2267 static bfd_boolean
2268 aarch64_imm_float_p (uint32_t imm)
2269 {
2270 /* If a single-precision floating-point value has the following bit
2271 pattern, it can be expressed in the AArch64 8-bit floating-point
2272 format:
2273
2274 3 32222222 2221111111111
2275 1 09876543 21098765432109876543210
2276 n Eeeeeexx xxxx0000000000000000000
2277
2278 where n, e and each x are either 0 or 1 independently, with
2279 E == ~ e. */
2280
2281 uint32_t pattern;
2282
2283 /* Prepare the pattern for 'Eeeeee'. */
2284 if (((imm >> 30) & 0x1) == 0)
2285 pattern = 0x3e000000;
2286 else
2287 pattern = 0x40000000;
2288
2289 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2290 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2291 }
2292
2293 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2294 as an IEEE float without any loss of precision. Store the value in
2295 *FPWORD if so. */
2296
2297 static bfd_boolean
2298 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2299 {
2300 /* If a double-precision floating-point value has the following bit
2301 pattern, it can be expressed in a float:
2302
2303 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2304 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2305 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2306
2307 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2308 if Eeee_eeee != 1111_1111
2309
2310 where n, e, s and S are either 0 or 1 independently and where ~ is the
2311 inverse of E. */
2312
2313 uint32_t pattern;
2314 uint32_t high32 = imm >> 32;
2315 uint32_t low32 = imm;
2316
2317 /* Lower 29 bits need to be 0s. */
2318 if ((imm & 0x1fffffff) != 0)
2319 return FALSE;
2320
2321 /* Prepare the pattern for 'Eeeeeeeee'. */
2322 if (((high32 >> 30) & 0x1) == 0)
2323 pattern = 0x38000000;
2324 else
2325 pattern = 0x40000000;
2326
2327 /* Check E~~~. */
2328 if ((high32 & 0x78000000) != pattern)
2329 return FALSE;
2330
2331 /* Check Eeee_eeee != 1111_1111. */
2332 if ((high32 & 0x7ff00000) == 0x47f00000)
2333 return FALSE;
2334
2335 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2336 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2337 | (low32 >> 29)); /* 3 S bits. */
2338 return TRUE;
2339 }
2340
2341 /* Return true if we should treat OPERAND as a double-precision
2342 floating-point operand rather than a single-precision one. */
2343 static bfd_boolean
2344 double_precision_operand_p (const aarch64_opnd_info *operand)
2345 {
2346 /* Check for unsuffixed SVE registers, which are allowed
2347 for LDR and STR but not in instructions that require an
2348 immediate. We get better error messages if we arbitrarily
2349 pick one size, parse the immediate normally, and then
2350 report the match failure in the normal way. */
2351 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2352 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2353 }
2354
2355 /* Parse a floating-point immediate. Return TRUE on success and return the
2356 value in *IMMED in the format of IEEE754 single-precision encoding.
2357 *CCP points to the start of the string; DP_P is TRUE when the immediate
2358 is expected to be in double-precision (N.B. this only matters when
2359 hexadecimal representation is involved). REG_TYPE says which register
2360 names should be treated as registers rather than as symbolic immediates.
2361
2362 This routine accepts any IEEE float; it is up to the callers to reject
2363 invalid ones. */
2364
2365 static bfd_boolean
2366 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2367 aarch64_reg_type reg_type)
2368 {
2369 char *str = *ccp;
2370 char *fpnum;
2371 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2372 int64_t val = 0;
2373 unsigned fpword = 0;
2374 bfd_boolean hex_p = FALSE;
2375
2376 skip_past_char (&str, '#');
2377
2378 fpnum = str;
2379 skip_whitespace (fpnum);
2380
2381 if (strncmp (fpnum, "0x", 2) == 0)
2382 {
2383 /* Support the hexadecimal representation of the IEEE754 encoding.
2384 Double-precision is expected when DP_P is TRUE, otherwise the
2385 representation should be in single-precision. */
2386 if (! parse_constant_immediate (&str, &val, reg_type))
2387 goto invalid_fp;
2388
2389 if (dp_p)
2390 {
2391 if (!can_convert_double_to_float (val, &fpword))
2392 goto invalid_fp;
2393 }
2394 else if ((uint64_t) val > 0xffffffff)
2395 goto invalid_fp;
2396 else
2397 fpword = val;
2398
2399 hex_p = TRUE;
2400 }
2401 else if (reg_name_p (str, reg_type))
2402 {
2403 set_recoverable_error (_("immediate operand required"));
2404 return FALSE;
2405 }
2406
2407 if (! hex_p)
2408 {
2409 int i;
2410
2411 if ((str = atof_ieee (str, 's', words)) == NULL)
2412 goto invalid_fp;
2413
2414 /* Our FP word must be 32 bits (single-precision FP). */
2415 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2416 {
2417 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2418 fpword |= words[i];
2419 }
2420 }
2421
2422 *immed = fpword;
2423 *ccp = str;
2424 return TRUE;
2425
2426 invalid_fp:
2427 set_fatal_syntax_error (_("invalid floating-point constant"));
2428 return FALSE;
2429 }
2430
2431 /* Less-generic immediate-value read function with the possibility of loading
2432 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2433 instructions.
2434
2435 To prevent the expression parser from pushing a register name into the
2436 symbol table as an undefined symbol, a check is firstly done to find
2437 out whether STR is a register of type REG_TYPE followed by a comma or
2438 the end of line. Return FALSE if STR is such a register. */
2439
2440 static bfd_boolean
2441 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2442 {
2443 char *ptr = *str;
2444
2445 if (reg_name_p (ptr, reg_type))
2446 {
2447 set_syntax_error (_("immediate operand required"));
2448 return FALSE;
2449 }
2450
2451 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2452
2453 if (inst.reloc.exp.X_op == O_constant)
2454 *imm = inst.reloc.exp.X_add_number;
2455
2456 *str = ptr;
2457
2458 return TRUE;
2459 }
2460
2461 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2462 if NEED_LIBOPCODES is non-zero, the fixup will need
2463 assistance from the libopcodes. */
2464
2465 static inline void
2466 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2467 const aarch64_opnd_info *operand,
2468 int need_libopcodes_p)
2469 {
2470 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2471 reloc->opnd = operand->type;
2472 if (need_libopcodes_p)
2473 reloc->need_libopcodes_p = 1;
2474 };
2475
2476 /* Return TRUE if the instruction needs to be fixed up later internally by
2477 the GAS; otherwise return FALSE. */
2478
2479 static inline bfd_boolean
2480 aarch64_gas_internal_fixup_p (void)
2481 {
2482 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2483 }
2484
2485 /* Assign the immediate value to the relevant field in *OPERAND if
2486 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2487 needs an internal fixup in a later stage.
2488 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2489 IMM.VALUE that may get assigned with the constant. */
2490 static inline void
2491 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2492 aarch64_opnd_info *operand,
2493 int addr_off_p,
2494 int need_libopcodes_p,
2495 int skip_p)
2496 {
2497 if (reloc->exp.X_op == O_constant)
2498 {
2499 if (addr_off_p)
2500 operand->addr.offset.imm = reloc->exp.X_add_number;
2501 else
2502 operand->imm.value = reloc->exp.X_add_number;
2503 reloc->type = BFD_RELOC_UNUSED;
2504 }
2505 else
2506 {
2507 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2508 /* Tell libopcodes to ignore this operand or not. This is helpful
2509 when one of the operands needs to be fixed up later but we need
2510 libopcodes to check the other operands. */
2511 operand->skip = skip_p;
2512 }
2513 }
2514
2515 /* Relocation modifiers. Each entry in the table contains the textual
2516 name for the relocation which may be placed before a symbol used as
2517 a load/store offset, or add immediate. It must be surrounded by a
2518 leading and trailing colon, for example:
2519
2520 ldr x0, [x1, #:rello:varsym]
2521 add x0, x1, #:rello:varsym */
2522
2523 struct reloc_table_entry
2524 {
2525 const char *name;
2526 int pc_rel;
2527 bfd_reloc_code_real_type adr_type;
2528 bfd_reloc_code_real_type adrp_type;
2529 bfd_reloc_code_real_type movw_type;
2530 bfd_reloc_code_real_type add_type;
2531 bfd_reloc_code_real_type ldst_type;
2532 bfd_reloc_code_real_type ld_literal_type;
2533 };
2534
2535 static struct reloc_table_entry reloc_table[] = {
2536 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2537 {"lo12", 0,
2538 0, /* adr_type */
2539 0,
2540 0,
2541 BFD_RELOC_AARCH64_ADD_LO12,
2542 BFD_RELOC_AARCH64_LDST_LO12,
2543 0},
2544
2545 /* Higher 21 bits of pc-relative page offset: ADRP */
2546 {"pg_hi21", 1,
2547 0, /* adr_type */
2548 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2549 0,
2550 0,
2551 0,
2552 0},
2553
2554 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2555 {"pg_hi21_nc", 1,
2556 0, /* adr_type */
2557 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2558 0,
2559 0,
2560 0,
2561 0},
2562
2563 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2564 {"abs_g0", 0,
2565 0, /* adr_type */
2566 0,
2567 BFD_RELOC_AARCH64_MOVW_G0,
2568 0,
2569 0,
2570 0},
2571
2572 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2573 {"abs_g0_s", 0,
2574 0, /* adr_type */
2575 0,
2576 BFD_RELOC_AARCH64_MOVW_G0_S,
2577 0,
2578 0,
2579 0},
2580
2581 /* Less significant bits 0-15 of address/value: MOVK, no check */
2582 {"abs_g0_nc", 0,
2583 0, /* adr_type */
2584 0,
2585 BFD_RELOC_AARCH64_MOVW_G0_NC,
2586 0,
2587 0,
2588 0},
2589
2590 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2591 {"abs_g1", 0,
2592 0, /* adr_type */
2593 0,
2594 BFD_RELOC_AARCH64_MOVW_G1,
2595 0,
2596 0,
2597 0},
2598
2599 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2600 {"abs_g1_s", 0,
2601 0, /* adr_type */
2602 0,
2603 BFD_RELOC_AARCH64_MOVW_G1_S,
2604 0,
2605 0,
2606 0},
2607
2608 /* Less significant bits 16-31 of address/value: MOVK, no check */
2609 {"abs_g1_nc", 0,
2610 0, /* adr_type */
2611 0,
2612 BFD_RELOC_AARCH64_MOVW_G1_NC,
2613 0,
2614 0,
2615 0},
2616
2617 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2618 {"abs_g2", 0,
2619 0, /* adr_type */
2620 0,
2621 BFD_RELOC_AARCH64_MOVW_G2,
2622 0,
2623 0,
2624 0},
2625
2626 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2627 {"abs_g2_s", 0,
2628 0, /* adr_type */
2629 0,
2630 BFD_RELOC_AARCH64_MOVW_G2_S,
2631 0,
2632 0,
2633 0},
2634
2635 /* Less significant bits 32-47 of address/value: MOVK, no check */
2636 {"abs_g2_nc", 0,
2637 0, /* adr_type */
2638 0,
2639 BFD_RELOC_AARCH64_MOVW_G2_NC,
2640 0,
2641 0,
2642 0},
2643
2644 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2645 {"abs_g3", 0,
2646 0, /* adr_type */
2647 0,
2648 BFD_RELOC_AARCH64_MOVW_G3,
2649 0,
2650 0,
2651 0},
2652
2653 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2654 {"prel_g0", 1,
2655 0, /* adr_type */
2656 0,
2657 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2658 0,
2659 0,
2660 0},
2661
2662 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2663 {"prel_g0_nc", 1,
2664 0, /* adr_type */
2665 0,
2666 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2667 0,
2668 0,
2669 0},
2670
2671 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2672 {"prel_g1", 1,
2673 0, /* adr_type */
2674 0,
2675 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2676 0,
2677 0,
2678 0},
2679
2680 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2681 {"prel_g1_nc", 1,
2682 0, /* adr_type */
2683 0,
2684 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2685 0,
2686 0,
2687 0},
2688
2689 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2690 {"prel_g2", 1,
2691 0, /* adr_type */
2692 0,
2693 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2694 0,
2695 0,
2696 0},
2697
2698 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2699 {"prel_g2_nc", 1,
2700 0, /* adr_type */
2701 0,
2702 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2703 0,
2704 0,
2705 0},
2706
2707 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2708 {"prel_g3", 1,
2709 0, /* adr_type */
2710 0,
2711 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2712 0,
2713 0,
2714 0},
2715
2716 /* Get to the page containing GOT entry for a symbol. */
2717 {"got", 1,
2718 0, /* adr_type */
2719 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2720 0,
2721 0,
2722 0,
2723 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2724
2725 /* 12 bit offset into the page containing GOT entry for that symbol. */
2726 {"got_lo12", 0,
2727 0, /* adr_type */
2728 0,
2729 0,
2730 0,
2731 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2732 0},
2733
2734 /* 0-15 bits of address/value: MOVk, no check. */
2735 {"gotoff_g0_nc", 0,
2736 0, /* adr_type */
2737 0,
2738 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2739 0,
2740 0,
2741 0},
2742
2743 /* Most significant bits 16-31 of address/value: MOVZ. */
2744 {"gotoff_g1", 0,
2745 0, /* adr_type */
2746 0,
2747 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2748 0,
2749 0,
2750 0},
2751
2752 /* 15 bit offset into the page containing GOT entry for that symbol. */
2753 {"gotoff_lo15", 0,
2754 0, /* adr_type */
2755 0,
2756 0,
2757 0,
2758 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2759 0},
2760
2761 /* Get to the page containing GOT TLS entry for a symbol */
2762 {"gottprel_g0_nc", 0,
2763 0, /* adr_type */
2764 0,
2765 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2766 0,
2767 0,
2768 0},
2769
2770 /* Get to the page containing GOT TLS entry for a symbol */
2771 {"gottprel_g1", 0,
2772 0, /* adr_type */
2773 0,
2774 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2775 0,
2776 0,
2777 0},
2778
2779 /* Get to the page containing GOT TLS entry for a symbol */
2780 {"tlsgd", 0,
2781 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2782 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2783 0,
2784 0,
2785 0,
2786 0},
2787
2788 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2789 {"tlsgd_lo12", 0,
2790 0, /* adr_type */
2791 0,
2792 0,
2793 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2794 0,
2795 0},
2796
2797 /* Lower 16 bits address/value: MOVk. */
2798 {"tlsgd_g0_nc", 0,
2799 0, /* adr_type */
2800 0,
2801 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2802 0,
2803 0,
2804 0},
2805
2806 /* Most significant bits 16-31 of address/value: MOVZ. */
2807 {"tlsgd_g1", 0,
2808 0, /* adr_type */
2809 0,
2810 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2811 0,
2812 0,
2813 0},
2814
2815 /* Get to the page containing GOT TLS entry for a symbol */
2816 {"tlsdesc", 0,
2817 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2818 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2819 0,
2820 0,
2821 0,
2822 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2823
2824 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2825 {"tlsdesc_lo12", 0,
2826 0, /* adr_type */
2827 0,
2828 0,
2829 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2830 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2831 0},
2832
2833 /* Get to the page containing GOT TLS entry for a symbol.
2834 The same as GD, we allocate two consecutive GOT slots
2835 for module index and module offset, the only difference
2836 with GD is the module offset should be initialized to
2837 zero without any outstanding runtime relocation. */
2838 {"tlsldm", 0,
2839 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2840 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2841 0,
2842 0,
2843 0,
2844 0},
2845
2846 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2847 {"tlsldm_lo12_nc", 0,
2848 0, /* adr_type */
2849 0,
2850 0,
2851 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2852 0,
2853 0},
2854
2855 /* 12 bit offset into the module TLS base address. */
2856 {"dtprel_lo12", 0,
2857 0, /* adr_type */
2858 0,
2859 0,
2860 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2861 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2862 0},
2863
2864 /* Same as dtprel_lo12, no overflow check. */
2865 {"dtprel_lo12_nc", 0,
2866 0, /* adr_type */
2867 0,
2868 0,
2869 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2870 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2871 0},
2872
2873 /* bits[23:12] of offset to the module TLS base address. */
2874 {"dtprel_hi12", 0,
2875 0, /* adr_type */
2876 0,
2877 0,
2878 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2879 0,
2880 0},
2881
2882 /* bits[15:0] of offset to the module TLS base address. */
2883 {"dtprel_g0", 0,
2884 0, /* adr_type */
2885 0,
2886 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2887 0,
2888 0,
2889 0},
2890
2891 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2892 {"dtprel_g0_nc", 0,
2893 0, /* adr_type */
2894 0,
2895 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2896 0,
2897 0,
2898 0},
2899
2900 /* bits[31:16] of offset to the module TLS base address. */
2901 {"dtprel_g1", 0,
2902 0, /* adr_type */
2903 0,
2904 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2905 0,
2906 0,
2907 0},
2908
2909 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2910 {"dtprel_g1_nc", 0,
2911 0, /* adr_type */
2912 0,
2913 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2914 0,
2915 0,
2916 0},
2917
2918 /* bits[47:32] of offset to the module TLS base address. */
2919 {"dtprel_g2", 0,
2920 0, /* adr_type */
2921 0,
2922 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2923 0,
2924 0,
2925 0},
2926
2927 /* Lower 16 bit offset into GOT entry for a symbol */
2928 {"tlsdesc_off_g0_nc", 0,
2929 0, /* adr_type */
2930 0,
2931 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2932 0,
2933 0,
2934 0},
2935
2936 /* Higher 16 bit offset into GOT entry for a symbol */
2937 {"tlsdesc_off_g1", 0,
2938 0, /* adr_type */
2939 0,
2940 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2941 0,
2942 0,
2943 0},
2944
2945 /* Get to the page containing GOT TLS entry for a symbol */
2946 {"gottprel", 0,
2947 0, /* adr_type */
2948 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2949 0,
2950 0,
2951 0,
2952 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2953
2954 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2955 {"gottprel_lo12", 0,
2956 0, /* adr_type */
2957 0,
2958 0,
2959 0,
2960 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2961 0},
2962
2963 /* Get tp offset for a symbol. */
2964 {"tprel", 0,
2965 0, /* adr_type */
2966 0,
2967 0,
2968 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2969 0,
2970 0},
2971
2972 /* Get tp offset for a symbol. */
2973 {"tprel_lo12", 0,
2974 0, /* adr_type */
2975 0,
2976 0,
2977 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2978 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2979 0},
2980
2981 /* Get tp offset for a symbol. */
2982 {"tprel_hi12", 0,
2983 0, /* adr_type */
2984 0,
2985 0,
2986 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2987 0,
2988 0},
2989
2990 /* Get tp offset for a symbol. */
2991 {"tprel_lo12_nc", 0,
2992 0, /* adr_type */
2993 0,
2994 0,
2995 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2996 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2997 0},
2998
2999 /* Most significant bits 32-47 of address/value: MOVZ. */
3000 {"tprel_g2", 0,
3001 0, /* adr_type */
3002 0,
3003 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3004 0,
3005 0,
3006 0},
3007
3008 /* Most significant bits 16-31 of address/value: MOVZ. */
3009 {"tprel_g1", 0,
3010 0, /* adr_type */
3011 0,
3012 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3013 0,
3014 0,
3015 0},
3016
3017 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3018 {"tprel_g1_nc", 0,
3019 0, /* adr_type */
3020 0,
3021 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3022 0,
3023 0,
3024 0},
3025
3026 /* Most significant bits 0-15 of address/value: MOVZ. */
3027 {"tprel_g0", 0,
3028 0, /* adr_type */
3029 0,
3030 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3031 0,
3032 0,
3033 0},
3034
3035 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3036 {"tprel_g0_nc", 0,
3037 0, /* adr_type */
3038 0,
3039 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3040 0,
3041 0,
3042 0},
3043
3044 /* 15bit offset from got entry to base address of GOT table. */
3045 {"gotpage_lo15", 0,
3046 0,
3047 0,
3048 0,
3049 0,
3050 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3051 0},
3052
3053 /* 14bit offset from got entry to base address of GOT table. */
3054 {"gotpage_lo14", 0,
3055 0,
3056 0,
3057 0,
3058 0,
3059 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3060 0},
3061 };
3062
3063 /* Given the address of a pointer pointing to the textual name of a
3064 relocation as may appear in assembler source, attempt to find its
3065 details in reloc_table. The pointer will be updated to the character
3066 after the trailing colon. On failure, NULL will be returned;
3067 otherwise return the reloc_table_entry. */
3068
3069 static struct reloc_table_entry *
3070 find_reloc_table_entry (char **str)
3071 {
3072 unsigned int i;
3073 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3074 {
3075 int length = strlen (reloc_table[i].name);
3076
3077 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3078 && (*str)[length] == ':')
3079 {
3080 *str += (length + 1);
3081 return &reloc_table[i];
3082 }
3083 }
3084
3085 return NULL;
3086 }
3087
3088 /* Mode argument to parse_shift and parser_shifter_operand. */
3089 enum parse_shift_mode
3090 {
3091 SHIFTED_NONE, /* no shifter allowed */
3092 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3093 "#imm{,lsl #n}" */
3094 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3095 "#imm" */
3096 SHIFTED_LSL, /* bare "lsl #n" */
3097 SHIFTED_MUL, /* bare "mul #n" */
3098 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3099 SHIFTED_MUL_VL, /* "mul vl" */
3100 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3101 };
3102
3103 /* Parse a <shift> operator on an AArch64 data processing instruction.
3104 Return TRUE on success; otherwise return FALSE. */
3105 static bfd_boolean
3106 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3107 {
3108 const struct aarch64_name_value_pair *shift_op;
3109 enum aarch64_modifier_kind kind;
3110 expressionS exp;
3111 int exp_has_prefix;
3112 char *s = *str;
3113 char *p = s;
3114
3115 for (p = *str; ISALPHA (*p); p++)
3116 ;
3117
3118 if (p == *str)
3119 {
3120 set_syntax_error (_("shift expression expected"));
3121 return FALSE;
3122 }
3123
3124 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3125
3126 if (shift_op == NULL)
3127 {
3128 set_syntax_error (_("shift operator expected"));
3129 return FALSE;
3130 }
3131
3132 kind = aarch64_get_operand_modifier (shift_op);
3133
3134 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3135 {
3136 set_syntax_error (_("invalid use of 'MSL'"));
3137 return FALSE;
3138 }
3139
3140 if (kind == AARCH64_MOD_MUL
3141 && mode != SHIFTED_MUL
3142 && mode != SHIFTED_MUL_VL)
3143 {
3144 set_syntax_error (_("invalid use of 'MUL'"));
3145 return FALSE;
3146 }
3147
3148 switch (mode)
3149 {
3150 case SHIFTED_LOGIC_IMM:
3151 if (aarch64_extend_operator_p (kind))
3152 {
3153 set_syntax_error (_("extending shift is not permitted"));
3154 return FALSE;
3155 }
3156 break;
3157
3158 case SHIFTED_ARITH_IMM:
3159 if (kind == AARCH64_MOD_ROR)
3160 {
3161 set_syntax_error (_("'ROR' shift is not permitted"));
3162 return FALSE;
3163 }
3164 break;
3165
3166 case SHIFTED_LSL:
3167 if (kind != AARCH64_MOD_LSL)
3168 {
3169 set_syntax_error (_("only 'LSL' shift is permitted"));
3170 return FALSE;
3171 }
3172 break;
3173
3174 case SHIFTED_MUL:
3175 if (kind != AARCH64_MOD_MUL)
3176 {
3177 set_syntax_error (_("only 'MUL' is permitted"));
3178 return FALSE;
3179 }
3180 break;
3181
3182 case SHIFTED_MUL_VL:
3183 /* "MUL VL" consists of two separate tokens. Require the first
3184 token to be "MUL" and look for a following "VL". */
3185 if (kind == AARCH64_MOD_MUL)
3186 {
3187 skip_whitespace (p);
3188 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3189 {
3190 p += 2;
3191 kind = AARCH64_MOD_MUL_VL;
3192 break;
3193 }
3194 }
3195 set_syntax_error (_("only 'MUL VL' is permitted"));
3196 return FALSE;
3197
3198 case SHIFTED_REG_OFFSET:
3199 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3200 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3201 {
3202 set_fatal_syntax_error
3203 (_("invalid shift for the register offset addressing mode"));
3204 return FALSE;
3205 }
3206 break;
3207
3208 case SHIFTED_LSL_MSL:
3209 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3210 {
3211 set_syntax_error (_("invalid shift operator"));
3212 return FALSE;
3213 }
3214 break;
3215
3216 default:
3217 abort ();
3218 }
3219
3220 /* Whitespace can appear here if the next thing is a bare digit. */
3221 skip_whitespace (p);
3222
3223 /* Parse shift amount. */
3224 exp_has_prefix = 0;
3225 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3226 exp.X_op = O_absent;
3227 else
3228 {
3229 if (is_immediate_prefix (*p))
3230 {
3231 p++;
3232 exp_has_prefix = 1;
3233 }
3234 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3235 }
3236 if (kind == AARCH64_MOD_MUL_VL)
3237 /* For consistency, give MUL VL the same shift amount as an implicit
3238 MUL #1. */
3239 operand->shifter.amount = 1;
3240 else if (exp.X_op == O_absent)
3241 {
3242 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3243 {
3244 set_syntax_error (_("missing shift amount"));
3245 return FALSE;
3246 }
3247 operand->shifter.amount = 0;
3248 }
3249 else if (exp.X_op != O_constant)
3250 {
3251 set_syntax_error (_("constant shift amount required"));
3252 return FALSE;
3253 }
3254 /* For parsing purposes, MUL #n has no inherent range. The range
3255 depends on the operand and will be checked by operand-specific
3256 routines. */
3257 else if (kind != AARCH64_MOD_MUL
3258 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3259 {
3260 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3261 return FALSE;
3262 }
3263 else
3264 {
3265 operand->shifter.amount = exp.X_add_number;
3266 operand->shifter.amount_present = 1;
3267 }
3268
3269 operand->shifter.operator_present = 1;
3270 operand->shifter.kind = kind;
3271
3272 *str = p;
3273 return TRUE;
3274 }
3275
3276 /* Parse a <shifter_operand> for a data processing instruction:
3277
3278 #<immediate>
3279 #<immediate>, LSL #imm
3280
3281 Validation of immediate operands is deferred to md_apply_fix.
3282
3283 Return TRUE on success; otherwise return FALSE. */
3284
3285 static bfd_boolean
3286 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3287 enum parse_shift_mode mode)
3288 {
3289 char *p;
3290
3291 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3292 return FALSE;
3293
3294 p = *str;
3295
3296 /* Accept an immediate expression. */
3297 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3298 return FALSE;
3299
3300 /* Accept optional LSL for arithmetic immediate values. */
3301 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3302 if (! parse_shift (&p, operand, SHIFTED_LSL))
3303 return FALSE;
3304
3305 /* Not accept any shifter for logical immediate values. */
3306 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3307 && parse_shift (&p, operand, mode))
3308 {
3309 set_syntax_error (_("unexpected shift operator"));
3310 return FALSE;
3311 }
3312
3313 *str = p;
3314 return TRUE;
3315 }
3316
3317 /* Parse a <shifter_operand> for a data processing instruction:
3318
3319 <Rm>
3320 <Rm>, <shift>
3321 #<immediate>
3322 #<immediate>, LSL #imm
3323
3324 where <shift> is handled by parse_shift above, and the last two
3325 cases are handled by the function above.
3326
3327 Validation of immediate operands is deferred to md_apply_fix.
3328
3329 Return TRUE on success; otherwise return FALSE. */
3330
3331 static bfd_boolean
3332 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3333 enum parse_shift_mode mode)
3334 {
3335 const reg_entry *reg;
3336 aarch64_opnd_qualifier_t qualifier;
3337 enum aarch64_operand_class opd_class
3338 = aarch64_get_operand_class (operand->type);
3339
3340 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3341 if (reg)
3342 {
3343 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3344 {
3345 set_syntax_error (_("unexpected register in the immediate operand"));
3346 return FALSE;
3347 }
3348
3349 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3350 {
3351 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3352 return FALSE;
3353 }
3354
3355 operand->reg.regno = reg->number;
3356 operand->qualifier = qualifier;
3357
3358 /* Accept optional shift operation on register. */
3359 if (! skip_past_comma (str))
3360 return TRUE;
3361
3362 if (! parse_shift (str, operand, mode))
3363 return FALSE;
3364
3365 return TRUE;
3366 }
3367 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3368 {
3369 set_syntax_error
3370 (_("integer register expected in the extended/shifted operand "
3371 "register"));
3372 return FALSE;
3373 }
3374
3375 /* We have a shifted immediate variable. */
3376 return parse_shifter_operand_imm (str, operand, mode);
3377 }
3378
3379 /* Return TRUE on success; return FALSE otherwise. */
3380
3381 static bfd_boolean
3382 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3383 enum parse_shift_mode mode)
3384 {
3385 char *p = *str;
3386
3387 /* Determine if we have the sequence of characters #: or just :
3388 coming next. If we do, then we check for a :rello: relocation
3389 modifier. If we don't, punt the whole lot to
3390 parse_shifter_operand. */
3391
3392 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3393 {
3394 struct reloc_table_entry *entry;
3395
3396 if (p[0] == '#')
3397 p += 2;
3398 else
3399 p++;
3400 *str = p;
3401
3402 /* Try to parse a relocation. Anything else is an error. */
3403 if (!(entry = find_reloc_table_entry (str)))
3404 {
3405 set_syntax_error (_("unknown relocation modifier"));
3406 return FALSE;
3407 }
3408
3409 if (entry->add_type == 0)
3410 {
3411 set_syntax_error
3412 (_("this relocation modifier is not allowed on this instruction"));
3413 return FALSE;
3414 }
3415
3416 /* Save str before we decompose it. */
3417 p = *str;
3418
3419 /* Next, we parse the expression. */
3420 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3421 return FALSE;
3422
3423 /* Record the relocation type (use the ADD variant here). */
3424 inst.reloc.type = entry->add_type;
3425 inst.reloc.pc_rel = entry->pc_rel;
3426
3427 /* If str is empty, we've reached the end, stop here. */
3428 if (**str == '\0')
3429 return TRUE;
3430
3431 /* Otherwise, we have a shifted reloc modifier, so rewind to
3432 recover the variable name and continue parsing for the shifter. */
3433 *str = p;
3434 return parse_shifter_operand_imm (str, operand, mode);
3435 }
3436
3437 return parse_shifter_operand (str, operand, mode);
3438 }
3439
3440 /* Parse all forms of an address expression. Information is written
3441 to *OPERAND and/or inst.reloc.
3442
3443 The A64 instruction set has the following addressing modes:
3444
3445 Offset
3446 [base] // in SIMD ld/st structure
3447 [base{,#0}] // in ld/st exclusive
3448 [base{,#imm}]
3449 [base,Xm{,LSL #imm}]
3450 [base,Xm,SXTX {#imm}]
3451 [base,Wm,(S|U)XTW {#imm}]
3452 Pre-indexed
3453 [base]! // in ldraa/ldrab exclusive
3454 [base,#imm]!
3455 Post-indexed
3456 [base],#imm
3457 [base],Xm // in SIMD ld/st structure
3458 PC-relative (literal)
3459 label
3460 SVE:
3461 [base,#imm,MUL VL]
3462 [base,Zm.D{,LSL #imm}]
3463 [base,Zm.S,(S|U)XTW {#imm}]
3464 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3465 [Zn.S,#imm]
3466 [Zn.D,#imm]
3467 [Zn.S{, Xm}]
3468 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3469 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3470 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3471
3472 (As a convenience, the notation "=immediate" is permitted in conjunction
3473 with the pc-relative literal load instructions to automatically place an
3474 immediate value or symbolic address in a nearby literal pool and generate
3475 a hidden label which references it.)
3476
3477 Upon a successful parsing, the address structure in *OPERAND will be
3478 filled in the following way:
3479
3480 .base_regno = <base>
3481 .offset.is_reg // 1 if the offset is a register
3482 .offset.imm = <imm>
3483 .offset.regno = <Rm>
3484
3485 For different addressing modes defined in the A64 ISA:
3486
3487 Offset
3488 .pcrel=0; .preind=1; .postind=0; .writeback=0
3489 Pre-indexed
3490 .pcrel=0; .preind=1; .postind=0; .writeback=1
3491 Post-indexed
3492 .pcrel=0; .preind=0; .postind=1; .writeback=1
3493 PC-relative (literal)
3494 .pcrel=1; .preind=1; .postind=0; .writeback=0
3495
3496 The shift/extension information, if any, will be stored in .shifter.
3497 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3498 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3499 corresponding register.
3500
3501 BASE_TYPE says which types of base register should be accepted and
3502 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3503 is the type of shifter that is allowed for immediate offsets,
3504 or SHIFTED_NONE if none.
3505
3506 In all other respects, it is the caller's responsibility to check
3507 for addressing modes not supported by the instruction, and to set
3508 inst.reloc.type. */
3509
3510 static bfd_boolean
3511 parse_address_main (char **str, aarch64_opnd_info *operand,
3512 aarch64_opnd_qualifier_t *base_qualifier,
3513 aarch64_opnd_qualifier_t *offset_qualifier,
3514 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3515 enum parse_shift_mode imm_shift_mode)
3516 {
3517 char *p = *str;
3518 const reg_entry *reg;
3519 expressionS *exp = &inst.reloc.exp;
3520
3521 *base_qualifier = AARCH64_OPND_QLF_NIL;
3522 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3523 if (! skip_past_char (&p, '['))
3524 {
3525 /* =immediate or label. */
3526 operand->addr.pcrel = 1;
3527 operand->addr.preind = 1;
3528
3529 /* #:<reloc_op>:<symbol> */
3530 skip_past_char (&p, '#');
3531 if (skip_past_char (&p, ':'))
3532 {
3533 bfd_reloc_code_real_type ty;
3534 struct reloc_table_entry *entry;
3535
3536 /* Try to parse a relocation modifier. Anything else is
3537 an error. */
3538 entry = find_reloc_table_entry (&p);
3539 if (! entry)
3540 {
3541 set_syntax_error (_("unknown relocation modifier"));
3542 return FALSE;
3543 }
3544
3545 switch (operand->type)
3546 {
3547 case AARCH64_OPND_ADDR_PCREL21:
3548 /* adr */
3549 ty = entry->adr_type;
3550 break;
3551
3552 default:
3553 ty = entry->ld_literal_type;
3554 break;
3555 }
3556
3557 if (ty == 0)
3558 {
3559 set_syntax_error
3560 (_("this relocation modifier is not allowed on this "
3561 "instruction"));
3562 return FALSE;
3563 }
3564
3565 /* #:<reloc_op>: */
3566 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3567 {
3568 set_syntax_error (_("invalid relocation expression"));
3569 return FALSE;
3570 }
3571
3572 /* #:<reloc_op>:<expr> */
3573 /* Record the relocation type. */
3574 inst.reloc.type = ty;
3575 inst.reloc.pc_rel = entry->pc_rel;
3576 }
3577 else
3578 {
3579
3580 if (skip_past_char (&p, '='))
3581 /* =immediate; need to generate the literal in the literal pool. */
3582 inst.gen_lit_pool = 1;
3583
3584 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3585 {
3586 set_syntax_error (_("invalid address"));
3587 return FALSE;
3588 }
3589 }
3590
3591 *str = p;
3592 return TRUE;
3593 }
3594
3595 /* [ */
3596
3597 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3598 if (!reg || !aarch64_check_reg_type (reg, base_type))
3599 {
3600 set_syntax_error (_(get_reg_expected_msg (base_type)));
3601 return FALSE;
3602 }
3603 operand->addr.base_regno = reg->number;
3604
3605 /* [Xn */
3606 if (skip_past_comma (&p))
3607 {
3608 /* [Xn, */
3609 operand->addr.preind = 1;
3610
3611 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3612 if (reg)
3613 {
3614 if (!aarch64_check_reg_type (reg, offset_type))
3615 {
3616 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3617 return FALSE;
3618 }
3619
3620 /* [Xn,Rm */
3621 operand->addr.offset.regno = reg->number;
3622 operand->addr.offset.is_reg = 1;
3623 /* Shifted index. */
3624 if (skip_past_comma (&p))
3625 {
3626 /* [Xn,Rm, */
3627 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3628 /* Use the diagnostics set in parse_shift, so not set new
3629 error message here. */
3630 return FALSE;
3631 }
3632 /* We only accept:
3633 [base,Xm] # For vector plus scalar SVE2 indexing.
3634 [base,Xm{,LSL #imm}]
3635 [base,Xm,SXTX {#imm}]
3636 [base,Wm,(S|U)XTW {#imm}] */
3637 if (operand->shifter.kind == AARCH64_MOD_NONE
3638 || operand->shifter.kind == AARCH64_MOD_LSL
3639 || operand->shifter.kind == AARCH64_MOD_SXTX)
3640 {
3641 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3642 {
3643 set_syntax_error (_("invalid use of 32-bit register offset"));
3644 return FALSE;
3645 }
3646 if (aarch64_get_qualifier_esize (*base_qualifier)
3647 != aarch64_get_qualifier_esize (*offset_qualifier)
3648 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3649 || *base_qualifier != AARCH64_OPND_QLF_S_S
3650 || *offset_qualifier != AARCH64_OPND_QLF_X))
3651 {
3652 set_syntax_error (_("offset has different size from base"));
3653 return FALSE;
3654 }
3655 }
3656 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3657 {
3658 set_syntax_error (_("invalid use of 64-bit register offset"));
3659 return FALSE;
3660 }
3661 }
3662 else
3663 {
3664 /* [Xn,#:<reloc_op>:<symbol> */
3665 skip_past_char (&p, '#');
3666 if (skip_past_char (&p, ':'))
3667 {
3668 struct reloc_table_entry *entry;
3669
3670 /* Try to parse a relocation modifier. Anything else is
3671 an error. */
3672 if (!(entry = find_reloc_table_entry (&p)))
3673 {
3674 set_syntax_error (_("unknown relocation modifier"));
3675 return FALSE;
3676 }
3677
3678 if (entry->ldst_type == 0)
3679 {
3680 set_syntax_error
3681 (_("this relocation modifier is not allowed on this "
3682 "instruction"));
3683 return FALSE;
3684 }
3685
3686 /* [Xn,#:<reloc_op>: */
3687 /* We now have the group relocation table entry corresponding to
3688 the name in the assembler source. Next, we parse the
3689 expression. */
3690 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3691 {
3692 set_syntax_error (_("invalid relocation expression"));
3693 return FALSE;
3694 }
3695
3696 /* [Xn,#:<reloc_op>:<expr> */
3697 /* Record the load/store relocation type. */
3698 inst.reloc.type = entry->ldst_type;
3699 inst.reloc.pc_rel = entry->pc_rel;
3700 }
3701 else
3702 {
3703 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3704 {
3705 set_syntax_error (_("invalid expression in the address"));
3706 return FALSE;
3707 }
3708 /* [Xn,<expr> */
3709 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3710 /* [Xn,<expr>,<shifter> */
3711 if (! parse_shift (&p, operand, imm_shift_mode))
3712 return FALSE;
3713 }
3714 }
3715 }
3716
3717 if (! skip_past_char (&p, ']'))
3718 {
3719 set_syntax_error (_("']' expected"));
3720 return FALSE;
3721 }
3722
3723 if (skip_past_char (&p, '!'))
3724 {
3725 if (operand->addr.preind && operand->addr.offset.is_reg)
3726 {
3727 set_syntax_error (_("register offset not allowed in pre-indexed "
3728 "addressing mode"));
3729 return FALSE;
3730 }
3731 /* [Xn]! */
3732 operand->addr.writeback = 1;
3733 }
3734 else if (skip_past_comma (&p))
3735 {
3736 /* [Xn], */
3737 operand->addr.postind = 1;
3738 operand->addr.writeback = 1;
3739
3740 if (operand->addr.preind)
3741 {
3742 set_syntax_error (_("cannot combine pre- and post-indexing"));
3743 return FALSE;
3744 }
3745
3746 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3747 if (reg)
3748 {
3749 /* [Xn],Xm */
3750 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3751 {
3752 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3753 return FALSE;
3754 }
3755
3756 operand->addr.offset.regno = reg->number;
3757 operand->addr.offset.is_reg = 1;
3758 }
3759 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3760 {
3761 /* [Xn],#expr */
3762 set_syntax_error (_("invalid expression in the address"));
3763 return FALSE;
3764 }
3765 }
3766
3767 /* If at this point neither .preind nor .postind is set, we have a
3768 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3769 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3770 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3771 [Zn.<T>, xzr]. */
3772 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3773 {
3774 if (operand->addr.writeback)
3775 {
3776 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3777 {
3778 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3779 operand->addr.offset.is_reg = 0;
3780 operand->addr.offset.imm = 0;
3781 operand->addr.preind = 1;
3782 }
3783 else
3784 {
3785 /* Reject [Rn]! */
3786 set_syntax_error (_("missing offset in the pre-indexed address"));
3787 return FALSE;
3788 }
3789 }
3790 else
3791 {
3792 operand->addr.preind = 1;
3793 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3794 {
3795 operand->addr.offset.is_reg = 1;
3796 operand->addr.offset.regno = REG_ZR;
3797 *offset_qualifier = AARCH64_OPND_QLF_X;
3798 }
3799 else
3800 {
3801 inst.reloc.exp.X_op = O_constant;
3802 inst.reloc.exp.X_add_number = 0;
3803 }
3804 }
3805 }
3806
3807 *str = p;
3808 return TRUE;
3809 }
3810
3811 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3812 on success. */
3813 static bfd_boolean
3814 parse_address (char **str, aarch64_opnd_info *operand)
3815 {
3816 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3817 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3818 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3819 }
3820
3821 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3822 The arguments have the same meaning as for parse_address_main.
3823 Return TRUE on success. */
3824 static bfd_boolean
3825 parse_sve_address (char **str, aarch64_opnd_info *operand,
3826 aarch64_opnd_qualifier_t *base_qualifier,
3827 aarch64_opnd_qualifier_t *offset_qualifier)
3828 {
3829 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3830 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3831 SHIFTED_MUL_VL);
3832 }
3833
3834 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3835 Return TRUE on success; otherwise return FALSE. */
3836 static bfd_boolean
3837 parse_half (char **str, int *internal_fixup_p)
3838 {
3839 char *p = *str;
3840
3841 skip_past_char (&p, '#');
3842
3843 gas_assert (internal_fixup_p);
3844 *internal_fixup_p = 0;
3845
3846 if (*p == ':')
3847 {
3848 struct reloc_table_entry *entry;
3849
3850 /* Try to parse a relocation. Anything else is an error. */
3851 ++p;
3852 if (!(entry = find_reloc_table_entry (&p)))
3853 {
3854 set_syntax_error (_("unknown relocation modifier"));
3855 return FALSE;
3856 }
3857
3858 if (entry->movw_type == 0)
3859 {
3860 set_syntax_error
3861 (_("this relocation modifier is not allowed on this instruction"));
3862 return FALSE;
3863 }
3864
3865 inst.reloc.type = entry->movw_type;
3866 }
3867 else
3868 *internal_fixup_p = 1;
3869
3870 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3871 return FALSE;
3872
3873 *str = p;
3874 return TRUE;
3875 }
3876
3877 /* Parse an operand for an ADRP instruction:
3878 ADRP <Xd>, <label>
3879 Return TRUE on success; otherwise return FALSE. */
3880
3881 static bfd_boolean
3882 parse_adrp (char **str)
3883 {
3884 char *p;
3885
3886 p = *str;
3887 if (*p == ':')
3888 {
3889 struct reloc_table_entry *entry;
3890
3891 /* Try to parse a relocation. Anything else is an error. */
3892 ++p;
3893 if (!(entry = find_reloc_table_entry (&p)))
3894 {
3895 set_syntax_error (_("unknown relocation modifier"));
3896 return FALSE;
3897 }
3898
3899 if (entry->adrp_type == 0)
3900 {
3901 set_syntax_error
3902 (_("this relocation modifier is not allowed on this instruction"));
3903 return FALSE;
3904 }
3905
3906 inst.reloc.type = entry->adrp_type;
3907 }
3908 else
3909 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3910
3911 inst.reloc.pc_rel = 1;
3912
3913 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3914 return FALSE;
3915
3916 *str = p;
3917 return TRUE;
3918 }
3919
3920 /* Miscellaneous. */
3921
3922 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3923 of SIZE tokens in which index I gives the token for field value I,
3924 or is null if field value I is invalid. REG_TYPE says which register
3925 names should be treated as registers rather than as symbolic immediates.
3926
3927 Return true on success, moving *STR past the operand and storing the
3928 field value in *VAL. */
3929
3930 static int
3931 parse_enum_string (char **str, int64_t *val, const char *const *array,
3932 size_t size, aarch64_reg_type reg_type)
3933 {
3934 expressionS exp;
3935 char *p, *q;
3936 size_t i;
3937
3938 /* Match C-like tokens. */
3939 p = q = *str;
3940 while (ISALNUM (*q))
3941 q++;
3942
3943 for (i = 0; i < size; ++i)
3944 if (array[i]
3945 && strncasecmp (array[i], p, q - p) == 0
3946 && array[i][q - p] == 0)
3947 {
3948 *val = i;
3949 *str = q;
3950 return TRUE;
3951 }
3952
3953 if (!parse_immediate_expression (&p, &exp, reg_type))
3954 return FALSE;
3955
3956 if (exp.X_op == O_constant
3957 && (uint64_t) exp.X_add_number < size)
3958 {
3959 *val = exp.X_add_number;
3960 *str = p;
3961 return TRUE;
3962 }
3963
3964 /* Use the default error for this operand. */
3965 return FALSE;
3966 }
3967
3968 /* Parse an option for a preload instruction. Returns the encoding for the
3969 option, or PARSE_FAIL. */
3970
3971 static int
3972 parse_pldop (char **str)
3973 {
3974 char *p, *q;
3975 const struct aarch64_name_value_pair *o;
3976
3977 p = q = *str;
3978 while (ISALNUM (*q))
3979 q++;
3980
3981 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
3982 if (!o)
3983 return PARSE_FAIL;
3984
3985 *str = q;
3986 return o->value;
3987 }
3988
3989 /* Parse an option for a barrier instruction. Returns the encoding for the
3990 option, or PARSE_FAIL. */
3991
3992 static int
3993 parse_barrier (char **str)
3994 {
3995 char *p, *q;
3996 const asm_barrier_opt *o;
3997
3998 p = q = *str;
3999 while (ISALPHA (*q))
4000 q++;
4001
4002 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4003 if (!o)
4004 return PARSE_FAIL;
4005
4006 *str = q;
4007 return o->value;
4008 }
4009
4010 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4011 return 0 if successful. Otherwise return PARSE_FAIL. */
4012
4013 static int
4014 parse_barrier_psb (char **str,
4015 const struct aarch64_name_value_pair ** hint_opt)
4016 {
4017 char *p, *q;
4018 const struct aarch64_name_value_pair *o;
4019
4020 p = q = *str;
4021 while (ISALPHA (*q))
4022 q++;
4023
4024 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4025 if (!o)
4026 {
4027 set_fatal_syntax_error
4028 ( _("unknown or missing option to PSB/TSB"));
4029 return PARSE_FAIL;
4030 }
4031
4032 if (o->value != 0x11)
4033 {
4034 /* PSB only accepts option name 'CSYNC'. */
4035 set_syntax_error
4036 (_("the specified option is not accepted for PSB/TSB"));
4037 return PARSE_FAIL;
4038 }
4039
4040 *str = q;
4041 *hint_opt = o;
4042 return 0;
4043 }
4044
4045 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4046 return 0 if successful. Otherwise return PARSE_FAIL. */
4047
4048 static int
4049 parse_bti_operand (char **str,
4050 const struct aarch64_name_value_pair ** hint_opt)
4051 {
4052 char *p, *q;
4053 const struct aarch64_name_value_pair *o;
4054
4055 p = q = *str;
4056 while (ISALPHA (*q))
4057 q++;
4058
4059 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4060 if (!o)
4061 {
4062 set_fatal_syntax_error
4063 ( _("unknown option to BTI"));
4064 return PARSE_FAIL;
4065 }
4066
4067 switch (o->value)
4068 {
4069 /* Valid BTI operands. */
4070 case HINT_OPD_C:
4071 case HINT_OPD_J:
4072 case HINT_OPD_JC:
4073 break;
4074
4075 default:
4076 set_syntax_error
4077 (_("unknown option to BTI"));
4078 return PARSE_FAIL;
4079 }
4080
4081 *str = q;
4082 *hint_opt = o;
4083 return 0;
4084 }
4085
4086 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4087 Returns the encoding for the option, or PARSE_FAIL.
4088
4089 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4090 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4091
4092 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4093 field, otherwise as a system register.
4094 */
4095
4096 static int
4097 parse_sys_reg (char **str, htab_t sys_regs,
4098 int imple_defined_p, int pstatefield_p,
4099 uint32_t* flags)
4100 {
4101 char *p, *q;
4102 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4103 const aarch64_sys_reg *o;
4104 int value;
4105
4106 p = buf;
4107 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4108 if (p < buf + (sizeof (buf) - 1))
4109 *p++ = TOLOWER (*q);
4110 *p = '\0';
4111
4112 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4113 valid system register. This is enforced by construction of the hash
4114 table. */
4115 if (p - buf != q - *str)
4116 return PARSE_FAIL;
4117
4118 o = str_hash_find (sys_regs, buf);
4119 if (!o)
4120 {
4121 if (!imple_defined_p)
4122 return PARSE_FAIL;
4123 else
4124 {
4125 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4126 unsigned int op0, op1, cn, cm, op2;
4127
4128 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4129 != 5)
4130 return PARSE_FAIL;
4131 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4132 return PARSE_FAIL;
4133 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4134 if (flags)
4135 *flags = 0;
4136 }
4137 }
4138 else
4139 {
4140 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4141 as_bad (_("selected processor does not support PSTATE field "
4142 "name '%s'"), buf);
4143 if (!pstatefield_p
4144 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->value,
4145 o->flags, o->features))
4146 as_bad (_("selected processor does not support system register "
4147 "name '%s'"), buf);
4148 if (aarch64_sys_reg_deprecated_p (o->flags))
4149 as_warn (_("system register name '%s' is deprecated and may be "
4150 "removed in a future release"), buf);
4151 value = o->value;
4152 if (flags)
4153 *flags = o->flags;
4154 }
4155
4156 *str = q;
4157 return value;
4158 }
4159
4160 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4161 for the option, or NULL. */
4162
4163 static const aarch64_sys_ins_reg *
4164 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4165 {
4166 char *p, *q;
4167 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4168 const aarch64_sys_ins_reg *o;
4169
4170 p = buf;
4171 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4172 if (p < buf + (sizeof (buf) - 1))
4173 *p++ = TOLOWER (*q);
4174 *p = '\0';
4175
4176 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4177 valid system register. This is enforced by construction of the hash
4178 table. */
4179 if (p - buf != q - *str)
4180 return NULL;
4181
4182 o = str_hash_find (sys_ins_regs, buf);
4183 if (!o)
4184 return NULL;
4185
4186 if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o->value, o->flags, 0))
4187 as_bad (_("selected processor does not support system register "
4188 "name '%s'"), buf);
4189 if (aarch64_sys_reg_deprecated_p (o->flags))
4190 as_warn (_("system register name '%s' is deprecated and may be "
4191 "removed in a future release"), buf);
4192
4193 *str = q;
4194 return o;
4195 }
4196 \f
4197 #define po_char_or_fail(chr) do { \
4198 if (! skip_past_char (&str, chr)) \
4199 goto failure; \
4200 } while (0)
4201
4202 #define po_reg_or_fail(regtype) do { \
4203 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4204 if (val == PARSE_FAIL) \
4205 { \
4206 set_default_error (); \
4207 goto failure; \
4208 } \
4209 } while (0)
4210
4211 #define po_int_reg_or_fail(reg_type) do { \
4212 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4213 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4214 { \
4215 set_default_error (); \
4216 goto failure; \
4217 } \
4218 info->reg.regno = reg->number; \
4219 info->qualifier = qualifier; \
4220 } while (0)
4221
4222 #define po_imm_nc_or_fail() do { \
4223 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4224 goto failure; \
4225 } while (0)
4226
4227 #define po_imm_or_fail(min, max) do { \
4228 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4229 goto failure; \
4230 if (val < min || val > max) \
4231 { \
4232 set_fatal_syntax_error (_("immediate value out of range "\
4233 #min " to "#max)); \
4234 goto failure; \
4235 } \
4236 } while (0)
4237
4238 #define po_enum_or_fail(array) do { \
4239 if (!parse_enum_string (&str, &val, array, \
4240 ARRAY_SIZE (array), imm_reg_type)) \
4241 goto failure; \
4242 } while (0)
4243
4244 #define po_misc_or_fail(expr) do { \
4245 if (!expr) \
4246 goto failure; \
4247 } while (0)
4248 \f
4249 /* encode the 12-bit imm field of Add/sub immediate */
4250 static inline uint32_t
4251 encode_addsub_imm (uint32_t imm)
4252 {
4253 return imm << 10;
4254 }
4255
4256 /* encode the shift amount field of Add/sub immediate */
4257 static inline uint32_t
4258 encode_addsub_imm_shift_amount (uint32_t cnt)
4259 {
4260 return cnt << 22;
4261 }
4262
4263
4264 /* encode the imm field of Adr instruction */
4265 static inline uint32_t
4266 encode_adr_imm (uint32_t imm)
4267 {
4268 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4269 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4270 }
4271
4272 /* encode the immediate field of Move wide immediate */
4273 static inline uint32_t
4274 encode_movw_imm (uint32_t imm)
4275 {
4276 return imm << 5;
4277 }
4278
4279 /* encode the 26-bit offset of unconditional branch */
4280 static inline uint32_t
4281 encode_branch_ofs_26 (uint32_t ofs)
4282 {
4283 return ofs & ((1 << 26) - 1);
4284 }
4285
4286 /* encode the 19-bit offset of conditional branch and compare & branch */
4287 static inline uint32_t
4288 encode_cond_branch_ofs_19 (uint32_t ofs)
4289 {
4290 return (ofs & ((1 << 19) - 1)) << 5;
4291 }
4292
4293 /* encode the 19-bit offset of ld literal */
4294 static inline uint32_t
4295 encode_ld_lit_ofs_19 (uint32_t ofs)
4296 {
4297 return (ofs & ((1 << 19) - 1)) << 5;
4298 }
4299
4300 /* Encode the 14-bit offset of test & branch. */
4301 static inline uint32_t
4302 encode_tst_branch_ofs_14 (uint32_t ofs)
4303 {
4304 return (ofs & ((1 << 14) - 1)) << 5;
4305 }
4306
4307 /* Encode the 16-bit imm field of svc/hvc/smc. */
4308 static inline uint32_t
4309 encode_svc_imm (uint32_t imm)
4310 {
4311 return imm << 5;
4312 }
4313
4314 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4315 static inline uint32_t
4316 reencode_addsub_switch_add_sub (uint32_t opcode)
4317 {
4318 return opcode ^ (1 << 30);
4319 }
4320
4321 static inline uint32_t
4322 reencode_movzn_to_movz (uint32_t opcode)
4323 {
4324 return opcode | (1 << 30);
4325 }
4326
4327 static inline uint32_t
4328 reencode_movzn_to_movn (uint32_t opcode)
4329 {
4330 return opcode & ~(1 << 30);
4331 }
4332
4333 /* Overall per-instruction processing. */
4334
4335 /* We need to be able to fix up arbitrary expressions in some statements.
4336 This is so that we can handle symbols that are an arbitrary distance from
4337 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4338 which returns part of an address in a form which will be valid for
4339 a data instruction. We do this by pushing the expression into a symbol
4340 in the expr_section, and creating a fix for that. */
4341
4342 static fixS *
4343 fix_new_aarch64 (fragS * frag,
4344 int where,
4345 short int size,
4346 expressionS * exp,
4347 int pc_rel,
4348 int reloc)
4349 {
4350 fixS *new_fix;
4351
4352 switch (exp->X_op)
4353 {
4354 case O_constant:
4355 case O_symbol:
4356 case O_add:
4357 case O_subtract:
4358 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4359 break;
4360
4361 default:
4362 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4363 pc_rel, reloc);
4364 break;
4365 }
4366 return new_fix;
4367 }
4368 \f
4369 /* Diagnostics on operands errors. */
4370
4371 /* By default, output verbose error message.
4372 Disable the verbose error message by -mno-verbose-error. */
4373 static int verbose_error_p = 1;
4374
4375 #ifdef DEBUG_AARCH64
4376 /* N.B. this is only for the purpose of debugging. */
4377 const char* operand_mismatch_kind_names[] =
4378 {
4379 "AARCH64_OPDE_NIL",
4380 "AARCH64_OPDE_RECOVERABLE",
4381 "AARCH64_OPDE_SYNTAX_ERROR",
4382 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4383 "AARCH64_OPDE_INVALID_VARIANT",
4384 "AARCH64_OPDE_OUT_OF_RANGE",
4385 "AARCH64_OPDE_UNALIGNED",
4386 "AARCH64_OPDE_REG_LIST",
4387 "AARCH64_OPDE_OTHER_ERROR",
4388 };
4389 #endif /* DEBUG_AARCH64 */
4390
4391 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4392
4393 When multiple errors of different kinds are found in the same assembly
4394 line, only the error of the highest severity will be picked up for
4395 issuing the diagnostics. */
4396
4397 static inline bfd_boolean
4398 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4399 enum aarch64_operand_error_kind rhs)
4400 {
4401 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4402 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4403 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4404 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4405 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4406 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4407 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4408 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4409 return lhs > rhs;
4410 }
4411
4412 /* Helper routine to get the mnemonic name from the assembly instruction
4413 line; should only be called for the diagnosis purpose, as there is
4414 string copy operation involved, which may affect the runtime
4415 performance if used in elsewhere. */
4416
4417 static const char*
4418 get_mnemonic_name (const char *str)
4419 {
4420 static char mnemonic[32];
4421 char *ptr;
4422
4423 /* Get the first 15 bytes and assume that the full name is included. */
4424 strncpy (mnemonic, str, 31);
4425 mnemonic[31] = '\0';
4426
4427 /* Scan up to the end of the mnemonic, which must end in white space,
4428 '.', or end of string. */
4429 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4430 ;
4431
4432 *ptr = '\0';
4433
4434 /* Append '...' to the truncated long name. */
4435 if (ptr - mnemonic == 31)
4436 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4437
4438 return mnemonic;
4439 }
4440
4441 static void
4442 reset_aarch64_instruction (aarch64_instruction *instruction)
4443 {
4444 memset (instruction, '\0', sizeof (aarch64_instruction));
4445 instruction->reloc.type = BFD_RELOC_UNUSED;
4446 }
4447
4448 /* Data structures storing one user error in the assembly code related to
4449 operands. */
4450
4451 struct operand_error_record
4452 {
4453 const aarch64_opcode *opcode;
4454 aarch64_operand_error detail;
4455 struct operand_error_record *next;
4456 };
4457
4458 typedef struct operand_error_record operand_error_record;
4459
4460 struct operand_errors
4461 {
4462 operand_error_record *head;
4463 operand_error_record *tail;
4464 };
4465
4466 typedef struct operand_errors operand_errors;
4467
4468 /* Top-level data structure reporting user errors for the current line of
4469 the assembly code.
4470 The way md_assemble works is that all opcodes sharing the same mnemonic
4471 name are iterated to find a match to the assembly line. In this data
4472 structure, each of the such opcodes will have one operand_error_record
4473 allocated and inserted. In other words, excessive errors related with
4474 a single opcode are disregarded. */
4475 operand_errors operand_error_report;
4476
4477 /* Free record nodes. */
4478 static operand_error_record *free_opnd_error_record_nodes = NULL;
4479
4480 /* Initialize the data structure that stores the operand mismatch
4481 information on assembling one line of the assembly code. */
4482 static void
4483 init_operand_error_report (void)
4484 {
4485 if (operand_error_report.head != NULL)
4486 {
4487 gas_assert (operand_error_report.tail != NULL);
4488 operand_error_report.tail->next = free_opnd_error_record_nodes;
4489 free_opnd_error_record_nodes = operand_error_report.head;
4490 operand_error_report.head = NULL;
4491 operand_error_report.tail = NULL;
4492 return;
4493 }
4494 gas_assert (operand_error_report.tail == NULL);
4495 }
4496
4497 /* Return TRUE if some operand error has been recorded during the
4498 parsing of the current assembly line using the opcode *OPCODE;
4499 otherwise return FALSE. */
4500 static inline bfd_boolean
4501 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4502 {
4503 operand_error_record *record = operand_error_report.head;
4504 return record && record->opcode == opcode;
4505 }
4506
4507 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4508 OPCODE field is initialized with OPCODE.
4509 N.B. only one record for each opcode, i.e. the maximum of one error is
4510 recorded for each instruction template. */
4511
4512 static void
4513 add_operand_error_record (const operand_error_record* new_record)
4514 {
4515 const aarch64_opcode *opcode = new_record->opcode;
4516 operand_error_record* record = operand_error_report.head;
4517
4518 /* The record may have been created for this opcode. If not, we need
4519 to prepare one. */
4520 if (! opcode_has_operand_error_p (opcode))
4521 {
4522 /* Get one empty record. */
4523 if (free_opnd_error_record_nodes == NULL)
4524 {
4525 record = XNEW (operand_error_record);
4526 }
4527 else
4528 {
4529 record = free_opnd_error_record_nodes;
4530 free_opnd_error_record_nodes = record->next;
4531 }
4532 record->opcode = opcode;
4533 /* Insert at the head. */
4534 record->next = operand_error_report.head;
4535 operand_error_report.head = record;
4536 if (operand_error_report.tail == NULL)
4537 operand_error_report.tail = record;
4538 }
4539 else if (record->detail.kind != AARCH64_OPDE_NIL
4540 && record->detail.index <= new_record->detail.index
4541 && operand_error_higher_severity_p (record->detail.kind,
4542 new_record->detail.kind))
4543 {
4544 /* In the case of multiple errors found on operands related with a
4545 single opcode, only record the error of the leftmost operand and
4546 only if the error is of higher severity. */
4547 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4548 " the existing error %s on operand %d",
4549 operand_mismatch_kind_names[new_record->detail.kind],
4550 new_record->detail.index,
4551 operand_mismatch_kind_names[record->detail.kind],
4552 record->detail.index);
4553 return;
4554 }
4555
4556 record->detail = new_record->detail;
4557 }
4558
4559 static inline void
4560 record_operand_error_info (const aarch64_opcode *opcode,
4561 aarch64_operand_error *error_info)
4562 {
4563 operand_error_record record;
4564 record.opcode = opcode;
4565 record.detail = *error_info;
4566 add_operand_error_record (&record);
4567 }
4568
4569 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4570 error message *ERROR, for operand IDX (count from 0). */
4571
4572 static void
4573 record_operand_error (const aarch64_opcode *opcode, int idx,
4574 enum aarch64_operand_error_kind kind,
4575 const char* error)
4576 {
4577 aarch64_operand_error info;
4578 memset(&info, 0, sizeof (info));
4579 info.index = idx;
4580 info.kind = kind;
4581 info.error = error;
4582 info.non_fatal = FALSE;
4583 record_operand_error_info (opcode, &info);
4584 }
4585
4586 static void
4587 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4588 enum aarch64_operand_error_kind kind,
4589 const char* error, const int *extra_data)
4590 {
4591 aarch64_operand_error info;
4592 info.index = idx;
4593 info.kind = kind;
4594 info.error = error;
4595 info.data[0] = extra_data[0];
4596 info.data[1] = extra_data[1];
4597 info.data[2] = extra_data[2];
4598 info.non_fatal = FALSE;
4599 record_operand_error_info (opcode, &info);
4600 }
4601
4602 static void
4603 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4604 const char* error, int lower_bound,
4605 int upper_bound)
4606 {
4607 int data[3] = {lower_bound, upper_bound, 0};
4608 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4609 error, data);
4610 }
4611
4612 /* Remove the operand error record for *OPCODE. */
4613 static void ATTRIBUTE_UNUSED
4614 remove_operand_error_record (const aarch64_opcode *opcode)
4615 {
4616 if (opcode_has_operand_error_p (opcode))
4617 {
4618 operand_error_record* record = operand_error_report.head;
4619 gas_assert (record != NULL && operand_error_report.tail != NULL);
4620 operand_error_report.head = record->next;
4621 record->next = free_opnd_error_record_nodes;
4622 free_opnd_error_record_nodes = record;
4623 if (operand_error_report.head == NULL)
4624 {
4625 gas_assert (operand_error_report.tail == record);
4626 operand_error_report.tail = NULL;
4627 }
4628 }
4629 }
4630
4631 /* Given the instruction in *INSTR, return the index of the best matched
4632 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4633
4634 Return -1 if there is no qualifier sequence; return the first match
4635 if there is multiple matches found. */
4636
4637 static int
4638 find_best_match (const aarch64_inst *instr,
4639 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4640 {
4641 int i, num_opnds, max_num_matched, idx;
4642
4643 num_opnds = aarch64_num_of_operands (instr->opcode);
4644 if (num_opnds == 0)
4645 {
4646 DEBUG_TRACE ("no operand");
4647 return -1;
4648 }
4649
4650 max_num_matched = 0;
4651 idx = 0;
4652
4653 /* For each pattern. */
4654 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4655 {
4656 int j, num_matched;
4657 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4658
4659 /* Most opcodes has much fewer patterns in the list. */
4660 if (empty_qualifier_sequence_p (qualifiers))
4661 {
4662 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4663 break;
4664 }
4665
4666 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4667 if (*qualifiers == instr->operands[j].qualifier)
4668 ++num_matched;
4669
4670 if (num_matched > max_num_matched)
4671 {
4672 max_num_matched = num_matched;
4673 idx = i;
4674 }
4675 }
4676
4677 DEBUG_TRACE ("return with %d", idx);
4678 return idx;
4679 }
4680
4681 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4682 corresponding operands in *INSTR. */
4683
4684 static inline void
4685 assign_qualifier_sequence (aarch64_inst *instr,
4686 const aarch64_opnd_qualifier_t *qualifiers)
4687 {
4688 int i = 0;
4689 int num_opnds = aarch64_num_of_operands (instr->opcode);
4690 gas_assert (num_opnds);
4691 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4692 instr->operands[i].qualifier = *qualifiers;
4693 }
4694
4695 /* Print operands for the diagnosis purpose. */
4696
4697 static void
4698 print_operands (char *buf, const aarch64_opcode *opcode,
4699 const aarch64_opnd_info *opnds)
4700 {
4701 int i;
4702
4703 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4704 {
4705 char str[128];
4706
4707 /* We regard the opcode operand info more, however we also look into
4708 the inst->operands to support the disassembling of the optional
4709 operand.
4710 The two operand code should be the same in all cases, apart from
4711 when the operand can be optional. */
4712 if (opcode->operands[i] == AARCH64_OPND_NIL
4713 || opnds[i].type == AARCH64_OPND_NIL)
4714 break;
4715
4716 /* Generate the operand string in STR. */
4717 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4718 NULL);
4719
4720 /* Delimiter. */
4721 if (str[0] != '\0')
4722 strcat (buf, i == 0 ? " " : ", ");
4723
4724 /* Append the operand string. */
4725 strcat (buf, str);
4726 }
4727 }
4728
4729 /* Send to stderr a string as information. */
4730
4731 static void
4732 output_info (const char *format, ...)
4733 {
4734 const char *file;
4735 unsigned int line;
4736 va_list args;
4737
4738 file = as_where (&line);
4739 if (file)
4740 {
4741 if (line != 0)
4742 fprintf (stderr, "%s:%u: ", file, line);
4743 else
4744 fprintf (stderr, "%s: ", file);
4745 }
4746 fprintf (stderr, _("Info: "));
4747 va_start (args, format);
4748 vfprintf (stderr, format, args);
4749 va_end (args);
4750 (void) putc ('\n', stderr);
4751 }
4752
4753 /* Output one operand error record. */
4754
4755 static void
4756 output_operand_error_record (const operand_error_record *record, char *str)
4757 {
4758 const aarch64_operand_error *detail = &record->detail;
4759 int idx = detail->index;
4760 const aarch64_opcode *opcode = record->opcode;
4761 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4762 : AARCH64_OPND_NIL);
4763
4764 typedef void (*handler_t)(const char *format, ...);
4765 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4766
4767 switch (detail->kind)
4768 {
4769 case AARCH64_OPDE_NIL:
4770 gas_assert (0);
4771 break;
4772 case AARCH64_OPDE_SYNTAX_ERROR:
4773 case AARCH64_OPDE_RECOVERABLE:
4774 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4775 case AARCH64_OPDE_OTHER_ERROR:
4776 /* Use the prepared error message if there is, otherwise use the
4777 operand description string to describe the error. */
4778 if (detail->error != NULL)
4779 {
4780 if (idx < 0)
4781 handler (_("%s -- `%s'"), detail->error, str);
4782 else
4783 handler (_("%s at operand %d -- `%s'"),
4784 detail->error, idx + 1, str);
4785 }
4786 else
4787 {
4788 gas_assert (idx >= 0);
4789 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4790 aarch64_get_operand_desc (opd_code), str);
4791 }
4792 break;
4793
4794 case AARCH64_OPDE_INVALID_VARIANT:
4795 handler (_("operand mismatch -- `%s'"), str);
4796 if (verbose_error_p)
4797 {
4798 /* We will try to correct the erroneous instruction and also provide
4799 more information e.g. all other valid variants.
4800
4801 The string representation of the corrected instruction and other
4802 valid variants are generated by
4803
4804 1) obtaining the intermediate representation of the erroneous
4805 instruction;
4806 2) manipulating the IR, e.g. replacing the operand qualifier;
4807 3) printing out the instruction by calling the printer functions
4808 shared with the disassembler.
4809
4810 The limitation of this method is that the exact input assembly
4811 line cannot be accurately reproduced in some cases, for example an
4812 optional operand present in the actual assembly line will be
4813 omitted in the output; likewise for the optional syntax rules,
4814 e.g. the # before the immediate. Another limitation is that the
4815 assembly symbols and relocation operations in the assembly line
4816 currently cannot be printed out in the error report. Last but not
4817 least, when there is other error(s) co-exist with this error, the
4818 'corrected' instruction may be still incorrect, e.g. given
4819 'ldnp h0,h1,[x0,#6]!'
4820 this diagnosis will provide the version:
4821 'ldnp s0,s1,[x0,#6]!'
4822 which is still not right. */
4823 size_t len = strlen (get_mnemonic_name (str));
4824 int i, qlf_idx;
4825 bfd_boolean result;
4826 char buf[2048];
4827 aarch64_inst *inst_base = &inst.base;
4828 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4829
4830 /* Init inst. */
4831 reset_aarch64_instruction (&inst);
4832 inst_base->opcode = opcode;
4833
4834 /* Reset the error report so that there is no side effect on the
4835 following operand parsing. */
4836 init_operand_error_report ();
4837
4838 /* Fill inst. */
4839 result = parse_operands (str + len, opcode)
4840 && programmer_friendly_fixup (&inst);
4841 gas_assert (result);
4842 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4843 NULL, NULL, insn_sequence);
4844 gas_assert (!result);
4845
4846 /* Find the most matched qualifier sequence. */
4847 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4848 gas_assert (qlf_idx > -1);
4849
4850 /* Assign the qualifiers. */
4851 assign_qualifier_sequence (inst_base,
4852 opcode->qualifiers_list[qlf_idx]);
4853
4854 /* Print the hint. */
4855 output_info (_(" did you mean this?"));
4856 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4857 print_operands (buf, opcode, inst_base->operands);
4858 output_info (_(" %s"), buf);
4859
4860 /* Print out other variant(s) if there is any. */
4861 if (qlf_idx != 0 ||
4862 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4863 output_info (_(" other valid variant(s):"));
4864
4865 /* For each pattern. */
4866 qualifiers_list = opcode->qualifiers_list;
4867 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4868 {
4869 /* Most opcodes has much fewer patterns in the list.
4870 First NIL qualifier indicates the end in the list. */
4871 if (empty_qualifier_sequence_p (*qualifiers_list))
4872 break;
4873
4874 if (i != qlf_idx)
4875 {
4876 /* Mnemonics name. */
4877 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4878
4879 /* Assign the qualifiers. */
4880 assign_qualifier_sequence (inst_base, *qualifiers_list);
4881
4882 /* Print instruction. */
4883 print_operands (buf, opcode, inst_base->operands);
4884
4885 output_info (_(" %s"), buf);
4886 }
4887 }
4888 }
4889 break;
4890
4891 case AARCH64_OPDE_UNTIED_OPERAND:
4892 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4893 detail->index + 1, str);
4894 break;
4895
4896 case AARCH64_OPDE_OUT_OF_RANGE:
4897 if (detail->data[0] != detail->data[1])
4898 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4899 detail->error ? detail->error : _("immediate value"),
4900 detail->data[0], detail->data[1], idx + 1, str);
4901 else
4902 handler (_("%s must be %d at operand %d -- `%s'"),
4903 detail->error ? detail->error : _("immediate value"),
4904 detail->data[0], idx + 1, str);
4905 break;
4906
4907 case AARCH64_OPDE_REG_LIST:
4908 if (detail->data[0] == 1)
4909 handler (_("invalid number of registers in the list; "
4910 "only 1 register is expected at operand %d -- `%s'"),
4911 idx + 1, str);
4912 else
4913 handler (_("invalid number of registers in the list; "
4914 "%d registers are expected at operand %d -- `%s'"),
4915 detail->data[0], idx + 1, str);
4916 break;
4917
4918 case AARCH64_OPDE_UNALIGNED:
4919 handler (_("immediate value must be a multiple of "
4920 "%d at operand %d -- `%s'"),
4921 detail->data[0], idx + 1, str);
4922 break;
4923
4924 default:
4925 gas_assert (0);
4926 break;
4927 }
4928 }
4929
4930 /* Process and output the error message about the operand mismatching.
4931
4932 When this function is called, the operand error information had
4933 been collected for an assembly line and there will be multiple
4934 errors in the case of multiple instruction templates; output the
4935 error message that most closely describes the problem.
4936
4937 The errors to be printed can be filtered on printing all errors
4938 or only non-fatal errors. This distinction has to be made because
4939 the error buffer may already be filled with fatal errors we don't want to
4940 print due to the different instruction templates. */
4941
4942 static void
4943 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4944 {
4945 int largest_error_pos;
4946 const char *msg = NULL;
4947 enum aarch64_operand_error_kind kind;
4948 operand_error_record *curr;
4949 operand_error_record *head = operand_error_report.head;
4950 operand_error_record *record = NULL;
4951
4952 /* No error to report. */
4953 if (head == NULL)
4954 return;
4955
4956 gas_assert (head != NULL && operand_error_report.tail != NULL);
4957
4958 /* Only one error. */
4959 if (head == operand_error_report.tail)
4960 {
4961 /* If the only error is a non-fatal one and we don't want to print it,
4962 just exit. */
4963 if (!non_fatal_only || head->detail.non_fatal)
4964 {
4965 DEBUG_TRACE ("single opcode entry with error kind: %s",
4966 operand_mismatch_kind_names[head->detail.kind]);
4967 output_operand_error_record (head, str);
4968 }
4969 return;
4970 }
4971
4972 /* Find the error kind of the highest severity. */
4973 DEBUG_TRACE ("multiple opcode entries with error kind");
4974 kind = AARCH64_OPDE_NIL;
4975 for (curr = head; curr != NULL; curr = curr->next)
4976 {
4977 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4978 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4979 if (operand_error_higher_severity_p (curr->detail.kind, kind)
4980 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4981 kind = curr->detail.kind;
4982 }
4983
4984 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
4985
4986 /* Pick up one of errors of KIND to report. */
4987 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4988 for (curr = head; curr != NULL; curr = curr->next)
4989 {
4990 /* If we don't want to print non-fatal errors then don't consider them
4991 at all. */
4992 if (curr->detail.kind != kind
4993 || (non_fatal_only && !curr->detail.non_fatal))
4994 continue;
4995 /* If there are multiple errors, pick up the one with the highest
4996 mismatching operand index. In the case of multiple errors with
4997 the equally highest operand index, pick up the first one or the
4998 first one with non-NULL error message. */
4999 if (curr->detail.index > largest_error_pos
5000 || (curr->detail.index == largest_error_pos && msg == NULL
5001 && curr->detail.error != NULL))
5002 {
5003 largest_error_pos = curr->detail.index;
5004 record = curr;
5005 msg = record->detail.error;
5006 }
5007 }
5008
5009 /* The way errors are collected in the back-end is a bit non-intuitive. But
5010 essentially, because each operand template is tried recursively you may
5011 always have errors collected from the previous tried OPND. These are
5012 usually skipped if there is one successful match. However now with the
5013 non-fatal errors we have to ignore those previously collected hard errors
5014 when we're only interested in printing the non-fatal ones. This condition
5015 prevents us from printing errors that are not appropriate, since we did
5016 match a condition, but it also has warnings that it wants to print. */
5017 if (non_fatal_only && !record)
5018 return;
5019
5020 gas_assert (largest_error_pos != -2 && record != NULL);
5021 DEBUG_TRACE ("Pick up error kind %s to report",
5022 operand_mismatch_kind_names[record->detail.kind]);
5023
5024 /* Output. */
5025 output_operand_error_record (record, str);
5026 }
5027 \f
5028 /* Write an AARCH64 instruction to buf - always little-endian. */
5029 static void
5030 put_aarch64_insn (char *buf, uint32_t insn)
5031 {
5032 unsigned char *where = (unsigned char *) buf;
5033 where[0] = insn;
5034 where[1] = insn >> 8;
5035 where[2] = insn >> 16;
5036 where[3] = insn >> 24;
5037 }
5038
5039 static uint32_t
5040 get_aarch64_insn (char *buf)
5041 {
5042 unsigned char *where = (unsigned char *) buf;
5043 uint32_t result;
5044 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5045 | ((uint32_t) where[3] << 24)));
5046 return result;
5047 }
5048
5049 static void
5050 output_inst (struct aarch64_inst *new_inst)
5051 {
5052 char *to = NULL;
5053
5054 to = frag_more (INSN_SIZE);
5055
5056 frag_now->tc_frag_data.recorded = 1;
5057
5058 put_aarch64_insn (to, inst.base.value);
5059
5060 if (inst.reloc.type != BFD_RELOC_UNUSED)
5061 {
5062 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5063 INSN_SIZE, &inst.reloc.exp,
5064 inst.reloc.pc_rel,
5065 inst.reloc.type);
5066 DEBUG_TRACE ("Prepared relocation fix up");
5067 /* Don't check the addend value against the instruction size,
5068 that's the job of our code in md_apply_fix(). */
5069 fixp->fx_no_overflow = 1;
5070 if (new_inst != NULL)
5071 fixp->tc_fix_data.inst = new_inst;
5072 if (aarch64_gas_internal_fixup_p ())
5073 {
5074 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5075 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5076 fixp->fx_addnumber = inst.reloc.flags;
5077 }
5078 }
5079
5080 dwarf2_emit_insn (INSN_SIZE);
5081 }
5082
5083 /* Link together opcodes of the same name. */
5084
5085 struct templates
5086 {
5087 aarch64_opcode *opcode;
5088 struct templates *next;
5089 };
5090
5091 typedef struct templates templates;
5092
5093 static templates *
5094 lookup_mnemonic (const char *start, int len)
5095 {
5096 templates *templ = NULL;
5097
5098 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5099 return templ;
5100 }
5101
5102 /* Subroutine of md_assemble, responsible for looking up the primary
5103 opcode from the mnemonic the user wrote. STR points to the
5104 beginning of the mnemonic. */
5105
5106 static templates *
5107 opcode_lookup (char **str)
5108 {
5109 char *end, *base, *dot;
5110 const aarch64_cond *cond;
5111 char condname[16];
5112 int len;
5113
5114 /* Scan up to the end of the mnemonic, which must end in white space,
5115 '.', or end of string. */
5116 dot = 0;
5117 for (base = end = *str; is_part_of_name(*end); end++)
5118 if (*end == '.' && !dot)
5119 dot = end;
5120
5121 if (end == base || dot == base)
5122 return 0;
5123
5124 inst.cond = COND_ALWAYS;
5125
5126 /* Handle a possible condition. */
5127 if (dot)
5128 {
5129 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5130 if (cond)
5131 {
5132 inst.cond = cond->value;
5133 *str = end;
5134 }
5135 else
5136 {
5137 *str = dot;
5138 return 0;
5139 }
5140 len = dot - base;
5141 }
5142 else
5143 {
5144 *str = end;
5145 len = end - base;
5146 }
5147
5148 if (inst.cond == COND_ALWAYS)
5149 {
5150 /* Look for unaffixed mnemonic. */
5151 return lookup_mnemonic (base, len);
5152 }
5153 else if (len <= 13)
5154 {
5155 /* append ".c" to mnemonic if conditional */
5156 memcpy (condname, base, len);
5157 memcpy (condname + len, ".c", 2);
5158 base = condname;
5159 len += 2;
5160 return lookup_mnemonic (base, len);
5161 }
5162
5163 return NULL;
5164 }
5165
5166 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5167 to a corresponding operand qualifier. */
5168
5169 static inline aarch64_opnd_qualifier_t
5170 vectype_to_qualifier (const struct vector_type_el *vectype)
5171 {
5172 /* Element size in bytes indexed by vector_el_type. */
5173 const unsigned char ele_size[5]
5174 = {1, 2, 4, 8, 16};
5175 const unsigned int ele_base [5] =
5176 {
5177 AARCH64_OPND_QLF_V_4B,
5178 AARCH64_OPND_QLF_V_2H,
5179 AARCH64_OPND_QLF_V_2S,
5180 AARCH64_OPND_QLF_V_1D,
5181 AARCH64_OPND_QLF_V_1Q
5182 };
5183
5184 if (!vectype->defined || vectype->type == NT_invtype)
5185 goto vectype_conversion_fail;
5186
5187 if (vectype->type == NT_zero)
5188 return AARCH64_OPND_QLF_P_Z;
5189 if (vectype->type == NT_merge)
5190 return AARCH64_OPND_QLF_P_M;
5191
5192 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5193
5194 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5195 {
5196 /* Special case S_4B. */
5197 if (vectype->type == NT_b && vectype->width == 4)
5198 return AARCH64_OPND_QLF_S_4B;
5199
5200 /* Special case S_2H. */
5201 if (vectype->type == NT_h && vectype->width == 2)
5202 return AARCH64_OPND_QLF_S_2H;
5203
5204 /* Vector element register. */
5205 return AARCH64_OPND_QLF_S_B + vectype->type;
5206 }
5207 else
5208 {
5209 /* Vector register. */
5210 int reg_size = ele_size[vectype->type] * vectype->width;
5211 unsigned offset;
5212 unsigned shift;
5213 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5214 goto vectype_conversion_fail;
5215
5216 /* The conversion is by calculating the offset from the base operand
5217 qualifier for the vector type. The operand qualifiers are regular
5218 enough that the offset can established by shifting the vector width by
5219 a vector-type dependent amount. */
5220 shift = 0;
5221 if (vectype->type == NT_b)
5222 shift = 3;
5223 else if (vectype->type == NT_h || vectype->type == NT_s)
5224 shift = 2;
5225 else if (vectype->type >= NT_d)
5226 shift = 1;
5227 else
5228 gas_assert (0);
5229
5230 offset = ele_base [vectype->type] + (vectype->width >> shift);
5231 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5232 && offset <= AARCH64_OPND_QLF_V_1Q);
5233 return offset;
5234 }
5235
5236 vectype_conversion_fail:
5237 first_error (_("bad vector arrangement type"));
5238 return AARCH64_OPND_QLF_NIL;
5239 }
5240
5241 /* Process an optional operand that is found omitted from the assembly line.
5242 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5243 instruction's opcode entry while IDX is the index of this omitted operand.
5244 */
5245
5246 static void
5247 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5248 int idx, aarch64_opnd_info *operand)
5249 {
5250 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5251 gas_assert (optional_operand_p (opcode, idx));
5252 gas_assert (!operand->present);
5253
5254 switch (type)
5255 {
5256 case AARCH64_OPND_Rd:
5257 case AARCH64_OPND_Rn:
5258 case AARCH64_OPND_Rm:
5259 case AARCH64_OPND_Rt:
5260 case AARCH64_OPND_Rt2:
5261 case AARCH64_OPND_Rt_SP:
5262 case AARCH64_OPND_Rs:
5263 case AARCH64_OPND_Ra:
5264 case AARCH64_OPND_Rt_SYS:
5265 case AARCH64_OPND_Rd_SP:
5266 case AARCH64_OPND_Rn_SP:
5267 case AARCH64_OPND_Rm_SP:
5268 case AARCH64_OPND_Fd:
5269 case AARCH64_OPND_Fn:
5270 case AARCH64_OPND_Fm:
5271 case AARCH64_OPND_Fa:
5272 case AARCH64_OPND_Ft:
5273 case AARCH64_OPND_Ft2:
5274 case AARCH64_OPND_Sd:
5275 case AARCH64_OPND_Sn:
5276 case AARCH64_OPND_Sm:
5277 case AARCH64_OPND_Va:
5278 case AARCH64_OPND_Vd:
5279 case AARCH64_OPND_Vn:
5280 case AARCH64_OPND_Vm:
5281 case AARCH64_OPND_VdD1:
5282 case AARCH64_OPND_VnD1:
5283 operand->reg.regno = default_value;
5284 break;
5285
5286 case AARCH64_OPND_Ed:
5287 case AARCH64_OPND_En:
5288 case AARCH64_OPND_Em:
5289 case AARCH64_OPND_Em16:
5290 case AARCH64_OPND_SM3_IMM2:
5291 operand->reglane.regno = default_value;
5292 break;
5293
5294 case AARCH64_OPND_IDX:
5295 case AARCH64_OPND_BIT_NUM:
5296 case AARCH64_OPND_IMMR:
5297 case AARCH64_OPND_IMMS:
5298 case AARCH64_OPND_SHLL_IMM:
5299 case AARCH64_OPND_IMM_VLSL:
5300 case AARCH64_OPND_IMM_VLSR:
5301 case AARCH64_OPND_CCMP_IMM:
5302 case AARCH64_OPND_FBITS:
5303 case AARCH64_OPND_UIMM4:
5304 case AARCH64_OPND_UIMM3_OP1:
5305 case AARCH64_OPND_UIMM3_OP2:
5306 case AARCH64_OPND_IMM:
5307 case AARCH64_OPND_IMM_2:
5308 case AARCH64_OPND_WIDTH:
5309 case AARCH64_OPND_UIMM7:
5310 case AARCH64_OPND_NZCV:
5311 case AARCH64_OPND_SVE_PATTERN:
5312 case AARCH64_OPND_SVE_PRFOP:
5313 operand->imm.value = default_value;
5314 break;
5315
5316 case AARCH64_OPND_SVE_PATTERN_SCALED:
5317 operand->imm.value = default_value;
5318 operand->shifter.kind = AARCH64_MOD_MUL;
5319 operand->shifter.amount = 1;
5320 break;
5321
5322 case AARCH64_OPND_EXCEPTION:
5323 inst.reloc.type = BFD_RELOC_UNUSED;
5324 break;
5325
5326 case AARCH64_OPND_BARRIER_ISB:
5327 operand->barrier = aarch64_barrier_options + default_value;
5328 break;
5329
5330 case AARCH64_OPND_BTI_TARGET:
5331 operand->hint_option = aarch64_hint_options + default_value;
5332 break;
5333
5334 default:
5335 break;
5336 }
5337 }
5338
5339 /* Process the relocation type for move wide instructions.
5340 Return TRUE on success; otherwise return FALSE. */
5341
5342 static bfd_boolean
5343 process_movw_reloc_info (void)
5344 {
5345 int is32;
5346 unsigned shift;
5347
5348 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5349
5350 if (inst.base.opcode->op == OP_MOVK)
5351 switch (inst.reloc.type)
5352 {
5353 case BFD_RELOC_AARCH64_MOVW_G0_S:
5354 case BFD_RELOC_AARCH64_MOVW_G1_S:
5355 case BFD_RELOC_AARCH64_MOVW_G2_S:
5356 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5357 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5358 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5359 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5360 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5361 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5362 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5363 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5364 set_syntax_error
5365 (_("the specified relocation type is not allowed for MOVK"));
5366 return FALSE;
5367 default:
5368 break;
5369 }
5370
5371 switch (inst.reloc.type)
5372 {
5373 case BFD_RELOC_AARCH64_MOVW_G0:
5374 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5375 case BFD_RELOC_AARCH64_MOVW_G0_S:
5376 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5377 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5378 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5379 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5380 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5381 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5382 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5383 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5384 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5385 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5386 shift = 0;
5387 break;
5388 case BFD_RELOC_AARCH64_MOVW_G1:
5389 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5390 case BFD_RELOC_AARCH64_MOVW_G1_S:
5391 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5392 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5393 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5394 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5395 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5396 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5397 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5398 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5399 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5400 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5401 shift = 16;
5402 break;
5403 case BFD_RELOC_AARCH64_MOVW_G2:
5404 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5405 case BFD_RELOC_AARCH64_MOVW_G2_S:
5406 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5407 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5408 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5409 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5410 if (is32)
5411 {
5412 set_fatal_syntax_error
5413 (_("the specified relocation type is not allowed for 32-bit "
5414 "register"));
5415 return FALSE;
5416 }
5417 shift = 32;
5418 break;
5419 case BFD_RELOC_AARCH64_MOVW_G3:
5420 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5421 if (is32)
5422 {
5423 set_fatal_syntax_error
5424 (_("the specified relocation type is not allowed for 32-bit "
5425 "register"));
5426 return FALSE;
5427 }
5428 shift = 48;
5429 break;
5430 default:
5431 /* More cases should be added when more MOVW-related relocation types
5432 are supported in GAS. */
5433 gas_assert (aarch64_gas_internal_fixup_p ());
5434 /* The shift amount should have already been set by the parser. */
5435 return TRUE;
5436 }
5437 inst.base.operands[1].shifter.amount = shift;
5438 return TRUE;
5439 }
5440
5441 /* A primitive log calculator. */
5442
5443 static inline unsigned int
5444 get_logsz (unsigned int size)
5445 {
5446 const unsigned char ls[16] =
5447 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5448 if (size > 16)
5449 {
5450 gas_assert (0);
5451 return -1;
5452 }
5453 gas_assert (ls[size - 1] != (unsigned char)-1);
5454 return ls[size - 1];
5455 }
5456
5457 /* Determine and return the real reloc type code for an instruction
5458 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5459
5460 static inline bfd_reloc_code_real_type
5461 ldst_lo12_determine_real_reloc_type (void)
5462 {
5463 unsigned logsz;
5464 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5465 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5466
5467 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5468 {
5469 BFD_RELOC_AARCH64_LDST8_LO12,
5470 BFD_RELOC_AARCH64_LDST16_LO12,
5471 BFD_RELOC_AARCH64_LDST32_LO12,
5472 BFD_RELOC_AARCH64_LDST64_LO12,
5473 BFD_RELOC_AARCH64_LDST128_LO12
5474 },
5475 {
5476 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5477 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5478 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5479 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5480 BFD_RELOC_AARCH64_NONE
5481 },
5482 {
5483 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5484 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5485 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5486 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5487 BFD_RELOC_AARCH64_NONE
5488 },
5489 {
5490 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5491 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5492 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5493 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5494 BFD_RELOC_AARCH64_NONE
5495 },
5496 {
5497 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5498 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5499 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5500 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5501 BFD_RELOC_AARCH64_NONE
5502 }
5503 };
5504
5505 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5506 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5507 || (inst.reloc.type
5508 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5509 || (inst.reloc.type
5510 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5511 || (inst.reloc.type
5512 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5513 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5514
5515 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5516 opd1_qlf =
5517 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5518 1, opd0_qlf, 0);
5519 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5520
5521 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5522 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5523 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5524 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5525 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5526 gas_assert (logsz <= 3);
5527 else
5528 gas_assert (logsz <= 4);
5529
5530 /* In reloc.c, these pseudo relocation types should be defined in similar
5531 order as above reloc_ldst_lo12 array. Because the array index calculation
5532 below relies on this. */
5533 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5534 }
5535
5536 /* Check whether a register list REGINFO is valid. The registers must be
5537 numbered in increasing order (modulo 32), in increments of one or two.
5538
5539 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5540 increments of two.
5541
5542 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5543
5544 static bfd_boolean
5545 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5546 {
5547 uint32_t i, nb_regs, prev_regno, incr;
5548
5549 nb_regs = 1 + (reginfo & 0x3);
5550 reginfo >>= 2;
5551 prev_regno = reginfo & 0x1f;
5552 incr = accept_alternate ? 2 : 1;
5553
5554 for (i = 1; i < nb_regs; ++i)
5555 {
5556 uint32_t curr_regno;
5557 reginfo >>= 5;
5558 curr_regno = reginfo & 0x1f;
5559 if (curr_regno != ((prev_regno + incr) & 0x1f))
5560 return FALSE;
5561 prev_regno = curr_regno;
5562 }
5563
5564 return TRUE;
5565 }
5566
5567 /* Generic instruction operand parser. This does no encoding and no
5568 semantic validation; it merely squirrels values away in the inst
5569 structure. Returns TRUE or FALSE depending on whether the
5570 specified grammar matched. */
5571
5572 static bfd_boolean
5573 parse_operands (char *str, const aarch64_opcode *opcode)
5574 {
5575 int i;
5576 char *backtrack_pos = 0;
5577 const enum aarch64_opnd *operands = opcode->operands;
5578 aarch64_reg_type imm_reg_type;
5579
5580 clear_error ();
5581 skip_whitespace (str);
5582
5583 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5584 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5585 else
5586 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5587
5588 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5589 {
5590 int64_t val;
5591 const reg_entry *reg;
5592 int comma_skipped_p = 0;
5593 aarch64_reg_type rtype;
5594 struct vector_type_el vectype;
5595 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5596 aarch64_opnd_info *info = &inst.base.operands[i];
5597 aarch64_reg_type reg_type;
5598
5599 DEBUG_TRACE ("parse operand %d", i);
5600
5601 /* Assign the operand code. */
5602 info->type = operands[i];
5603
5604 if (optional_operand_p (opcode, i))
5605 {
5606 /* Remember where we are in case we need to backtrack. */
5607 gas_assert (!backtrack_pos);
5608 backtrack_pos = str;
5609 }
5610
5611 /* Expect comma between operands; the backtrack mechanism will take
5612 care of cases of omitted optional operand. */
5613 if (i > 0 && ! skip_past_char (&str, ','))
5614 {
5615 set_syntax_error (_("comma expected between operands"));
5616 goto failure;
5617 }
5618 else
5619 comma_skipped_p = 1;
5620
5621 switch (operands[i])
5622 {
5623 case AARCH64_OPND_Rd:
5624 case AARCH64_OPND_Rn:
5625 case AARCH64_OPND_Rm:
5626 case AARCH64_OPND_Rt:
5627 case AARCH64_OPND_Rt2:
5628 case AARCH64_OPND_Rs:
5629 case AARCH64_OPND_Ra:
5630 case AARCH64_OPND_Rt_SYS:
5631 case AARCH64_OPND_PAIRREG:
5632 case AARCH64_OPND_SVE_Rm:
5633 po_int_reg_or_fail (REG_TYPE_R_Z);
5634 break;
5635
5636 case AARCH64_OPND_Rd_SP:
5637 case AARCH64_OPND_Rn_SP:
5638 case AARCH64_OPND_Rt_SP:
5639 case AARCH64_OPND_SVE_Rn_SP:
5640 case AARCH64_OPND_Rm_SP:
5641 po_int_reg_or_fail (REG_TYPE_R_SP);
5642 break;
5643
5644 case AARCH64_OPND_Rm_EXT:
5645 case AARCH64_OPND_Rm_SFT:
5646 po_misc_or_fail (parse_shifter_operand
5647 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5648 ? SHIFTED_ARITH_IMM
5649 : SHIFTED_LOGIC_IMM)));
5650 if (!info->shifter.operator_present)
5651 {
5652 /* Default to LSL if not present. Libopcodes prefers shifter
5653 kind to be explicit. */
5654 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5655 info->shifter.kind = AARCH64_MOD_LSL;
5656 /* For Rm_EXT, libopcodes will carry out further check on whether
5657 or not stack pointer is used in the instruction (Recall that
5658 "the extend operator is not optional unless at least one of
5659 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5660 }
5661 break;
5662
5663 case AARCH64_OPND_Fd:
5664 case AARCH64_OPND_Fn:
5665 case AARCH64_OPND_Fm:
5666 case AARCH64_OPND_Fa:
5667 case AARCH64_OPND_Ft:
5668 case AARCH64_OPND_Ft2:
5669 case AARCH64_OPND_Sd:
5670 case AARCH64_OPND_Sn:
5671 case AARCH64_OPND_Sm:
5672 case AARCH64_OPND_SVE_VZn:
5673 case AARCH64_OPND_SVE_Vd:
5674 case AARCH64_OPND_SVE_Vm:
5675 case AARCH64_OPND_SVE_Vn:
5676 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5677 if (val == PARSE_FAIL)
5678 {
5679 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5680 goto failure;
5681 }
5682 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5683
5684 info->reg.regno = val;
5685 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5686 break;
5687
5688 case AARCH64_OPND_SVE_Pd:
5689 case AARCH64_OPND_SVE_Pg3:
5690 case AARCH64_OPND_SVE_Pg4_5:
5691 case AARCH64_OPND_SVE_Pg4_10:
5692 case AARCH64_OPND_SVE_Pg4_16:
5693 case AARCH64_OPND_SVE_Pm:
5694 case AARCH64_OPND_SVE_Pn:
5695 case AARCH64_OPND_SVE_Pt:
5696 reg_type = REG_TYPE_PN;
5697 goto vector_reg;
5698
5699 case AARCH64_OPND_SVE_Za_5:
5700 case AARCH64_OPND_SVE_Za_16:
5701 case AARCH64_OPND_SVE_Zd:
5702 case AARCH64_OPND_SVE_Zm_5:
5703 case AARCH64_OPND_SVE_Zm_16:
5704 case AARCH64_OPND_SVE_Zn:
5705 case AARCH64_OPND_SVE_Zt:
5706 reg_type = REG_TYPE_ZN;
5707 goto vector_reg;
5708
5709 case AARCH64_OPND_Va:
5710 case AARCH64_OPND_Vd:
5711 case AARCH64_OPND_Vn:
5712 case AARCH64_OPND_Vm:
5713 reg_type = REG_TYPE_VN;
5714 vector_reg:
5715 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5716 if (val == PARSE_FAIL)
5717 {
5718 first_error (_(get_reg_expected_msg (reg_type)));
5719 goto failure;
5720 }
5721 if (vectype.defined & NTA_HASINDEX)
5722 goto failure;
5723
5724 info->reg.regno = val;
5725 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5726 && vectype.type == NT_invtype)
5727 /* Unqualified Pn and Zn registers are allowed in certain
5728 contexts. Rely on F_STRICT qualifier checking to catch
5729 invalid uses. */
5730 info->qualifier = AARCH64_OPND_QLF_NIL;
5731 else
5732 {
5733 info->qualifier = vectype_to_qualifier (&vectype);
5734 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5735 goto failure;
5736 }
5737 break;
5738
5739 case AARCH64_OPND_VdD1:
5740 case AARCH64_OPND_VnD1:
5741 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5742 if (val == PARSE_FAIL)
5743 {
5744 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5745 goto failure;
5746 }
5747 if (vectype.type != NT_d || vectype.index != 1)
5748 {
5749 set_fatal_syntax_error
5750 (_("the top half of a 128-bit FP/SIMD register is expected"));
5751 goto failure;
5752 }
5753 info->reg.regno = val;
5754 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5755 here; it is correct for the purpose of encoding/decoding since
5756 only the register number is explicitly encoded in the related
5757 instructions, although this appears a bit hacky. */
5758 info->qualifier = AARCH64_OPND_QLF_S_D;
5759 break;
5760
5761 case AARCH64_OPND_SVE_Zm3_INDEX:
5762 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5763 case AARCH64_OPND_SVE_Zm3_11_INDEX:
5764 case AARCH64_OPND_SVE_Zm4_11_INDEX:
5765 case AARCH64_OPND_SVE_Zm4_INDEX:
5766 case AARCH64_OPND_SVE_Zn_INDEX:
5767 reg_type = REG_TYPE_ZN;
5768 goto vector_reg_index;
5769
5770 case AARCH64_OPND_Ed:
5771 case AARCH64_OPND_En:
5772 case AARCH64_OPND_Em:
5773 case AARCH64_OPND_Em16:
5774 case AARCH64_OPND_SM3_IMM2:
5775 reg_type = REG_TYPE_VN;
5776 vector_reg_index:
5777 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5778 if (val == PARSE_FAIL)
5779 {
5780 first_error (_(get_reg_expected_msg (reg_type)));
5781 goto failure;
5782 }
5783 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5784 goto failure;
5785
5786 info->reglane.regno = val;
5787 info->reglane.index = vectype.index;
5788 info->qualifier = vectype_to_qualifier (&vectype);
5789 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5790 goto failure;
5791 break;
5792
5793 case AARCH64_OPND_SVE_ZnxN:
5794 case AARCH64_OPND_SVE_ZtxN:
5795 reg_type = REG_TYPE_ZN;
5796 goto vector_reg_list;
5797
5798 case AARCH64_OPND_LVn:
5799 case AARCH64_OPND_LVt:
5800 case AARCH64_OPND_LVt_AL:
5801 case AARCH64_OPND_LEt:
5802 reg_type = REG_TYPE_VN;
5803 vector_reg_list:
5804 if (reg_type == REG_TYPE_ZN
5805 && get_opcode_dependent_value (opcode) == 1
5806 && *str != '{')
5807 {
5808 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5809 if (val == PARSE_FAIL)
5810 {
5811 first_error (_(get_reg_expected_msg (reg_type)));
5812 goto failure;
5813 }
5814 info->reglist.first_regno = val;
5815 info->reglist.num_regs = 1;
5816 }
5817 else
5818 {
5819 val = parse_vector_reg_list (&str, reg_type, &vectype);
5820 if (val == PARSE_FAIL)
5821 goto failure;
5822
5823 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5824 {
5825 set_fatal_syntax_error (_("invalid register list"));
5826 goto failure;
5827 }
5828
5829 if (vectype.width != 0 && *str != ',')
5830 {
5831 set_fatal_syntax_error
5832 (_("expected element type rather than vector type"));
5833 goto failure;
5834 }
5835
5836 info->reglist.first_regno = (val >> 2) & 0x1f;
5837 info->reglist.num_regs = (val & 0x3) + 1;
5838 }
5839 if (operands[i] == AARCH64_OPND_LEt)
5840 {
5841 if (!(vectype.defined & NTA_HASINDEX))
5842 goto failure;
5843 info->reglist.has_index = 1;
5844 info->reglist.index = vectype.index;
5845 }
5846 else
5847 {
5848 if (vectype.defined & NTA_HASINDEX)
5849 goto failure;
5850 if (!(vectype.defined & NTA_HASTYPE))
5851 {
5852 if (reg_type == REG_TYPE_ZN)
5853 set_fatal_syntax_error (_("missing type suffix"));
5854 goto failure;
5855 }
5856 }
5857 info->qualifier = vectype_to_qualifier (&vectype);
5858 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5859 goto failure;
5860 break;
5861
5862 case AARCH64_OPND_CRn:
5863 case AARCH64_OPND_CRm:
5864 {
5865 char prefix = *(str++);
5866 if (prefix != 'c' && prefix != 'C')
5867 goto failure;
5868
5869 po_imm_nc_or_fail ();
5870 if (val > 15)
5871 {
5872 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5873 goto failure;
5874 }
5875 info->qualifier = AARCH64_OPND_QLF_CR;
5876 info->imm.value = val;
5877 break;
5878 }
5879
5880 case AARCH64_OPND_SHLL_IMM:
5881 case AARCH64_OPND_IMM_VLSR:
5882 po_imm_or_fail (1, 64);
5883 info->imm.value = val;
5884 break;
5885
5886 case AARCH64_OPND_CCMP_IMM:
5887 case AARCH64_OPND_SIMM5:
5888 case AARCH64_OPND_FBITS:
5889 case AARCH64_OPND_TME_UIMM16:
5890 case AARCH64_OPND_UIMM4:
5891 case AARCH64_OPND_UIMM4_ADDG:
5892 case AARCH64_OPND_UIMM10:
5893 case AARCH64_OPND_UIMM3_OP1:
5894 case AARCH64_OPND_UIMM3_OP2:
5895 case AARCH64_OPND_IMM_VLSL:
5896 case AARCH64_OPND_IMM:
5897 case AARCH64_OPND_IMM_2:
5898 case AARCH64_OPND_WIDTH:
5899 case AARCH64_OPND_SVE_INV_LIMM:
5900 case AARCH64_OPND_SVE_LIMM:
5901 case AARCH64_OPND_SVE_LIMM_MOV:
5902 case AARCH64_OPND_SVE_SHLIMM_PRED:
5903 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5904 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
5905 case AARCH64_OPND_SVE_SHRIMM_PRED:
5906 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5907 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
5908 case AARCH64_OPND_SVE_SIMM5:
5909 case AARCH64_OPND_SVE_SIMM5B:
5910 case AARCH64_OPND_SVE_SIMM6:
5911 case AARCH64_OPND_SVE_SIMM8:
5912 case AARCH64_OPND_SVE_UIMM3:
5913 case AARCH64_OPND_SVE_UIMM7:
5914 case AARCH64_OPND_SVE_UIMM8:
5915 case AARCH64_OPND_SVE_UIMM8_53:
5916 case AARCH64_OPND_IMM_ROT1:
5917 case AARCH64_OPND_IMM_ROT2:
5918 case AARCH64_OPND_IMM_ROT3:
5919 case AARCH64_OPND_SVE_IMM_ROT1:
5920 case AARCH64_OPND_SVE_IMM_ROT2:
5921 case AARCH64_OPND_SVE_IMM_ROT3:
5922 po_imm_nc_or_fail ();
5923 info->imm.value = val;
5924 break;
5925
5926 case AARCH64_OPND_SVE_AIMM:
5927 case AARCH64_OPND_SVE_ASIMM:
5928 po_imm_nc_or_fail ();
5929 info->imm.value = val;
5930 skip_whitespace (str);
5931 if (skip_past_comma (&str))
5932 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5933 else
5934 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5935 break;
5936
5937 case AARCH64_OPND_SVE_PATTERN:
5938 po_enum_or_fail (aarch64_sve_pattern_array);
5939 info->imm.value = val;
5940 break;
5941
5942 case AARCH64_OPND_SVE_PATTERN_SCALED:
5943 po_enum_or_fail (aarch64_sve_pattern_array);
5944 info->imm.value = val;
5945 if (skip_past_comma (&str)
5946 && !parse_shift (&str, info, SHIFTED_MUL))
5947 goto failure;
5948 if (!info->shifter.operator_present)
5949 {
5950 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5951 info->shifter.kind = AARCH64_MOD_MUL;
5952 info->shifter.amount = 1;
5953 }
5954 break;
5955
5956 case AARCH64_OPND_SVE_PRFOP:
5957 po_enum_or_fail (aarch64_sve_prfop_array);
5958 info->imm.value = val;
5959 break;
5960
5961 case AARCH64_OPND_UIMM7:
5962 po_imm_or_fail (0, 127);
5963 info->imm.value = val;
5964 break;
5965
5966 case AARCH64_OPND_IDX:
5967 case AARCH64_OPND_MASK:
5968 case AARCH64_OPND_BIT_NUM:
5969 case AARCH64_OPND_IMMR:
5970 case AARCH64_OPND_IMMS:
5971 po_imm_or_fail (0, 63);
5972 info->imm.value = val;
5973 break;
5974
5975 case AARCH64_OPND_IMM0:
5976 po_imm_nc_or_fail ();
5977 if (val != 0)
5978 {
5979 set_fatal_syntax_error (_("immediate zero expected"));
5980 goto failure;
5981 }
5982 info->imm.value = 0;
5983 break;
5984
5985 case AARCH64_OPND_FPIMM0:
5986 {
5987 int qfloat;
5988 bfd_boolean res1 = FALSE, res2 = FALSE;
5989 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5990 it is probably not worth the effort to support it. */
5991 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5992 imm_reg_type))
5993 && (error_p ()
5994 || !(res2 = parse_constant_immediate (&str, &val,
5995 imm_reg_type))))
5996 goto failure;
5997 if ((res1 && qfloat == 0) || (res2 && val == 0))
5998 {
5999 info->imm.value = 0;
6000 info->imm.is_fp = 1;
6001 break;
6002 }
6003 set_fatal_syntax_error (_("immediate zero expected"));
6004 goto failure;
6005 }
6006
6007 case AARCH64_OPND_IMM_MOV:
6008 {
6009 char *saved = str;
6010 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6011 reg_name_p (str, REG_TYPE_VN))
6012 goto failure;
6013 str = saved;
6014 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6015 GE_OPT_PREFIX, 1));
6016 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6017 later. fix_mov_imm_insn will try to determine a machine
6018 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6019 message if the immediate cannot be moved by a single
6020 instruction. */
6021 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6022 inst.base.operands[i].skip = 1;
6023 }
6024 break;
6025
6026 case AARCH64_OPND_SIMD_IMM:
6027 case AARCH64_OPND_SIMD_IMM_SFT:
6028 if (! parse_big_immediate (&str, &val, imm_reg_type))
6029 goto failure;
6030 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6031 /* addr_off_p */ 0,
6032 /* need_libopcodes_p */ 1,
6033 /* skip_p */ 1);
6034 /* Parse shift.
6035 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6036 shift, we don't check it here; we leave the checking to
6037 the libopcodes (operand_general_constraint_met_p). By
6038 doing this, we achieve better diagnostics. */
6039 if (skip_past_comma (&str)
6040 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6041 goto failure;
6042 if (!info->shifter.operator_present
6043 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6044 {
6045 /* Default to LSL if not present. Libopcodes prefers shifter
6046 kind to be explicit. */
6047 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6048 info->shifter.kind = AARCH64_MOD_LSL;
6049 }
6050 break;
6051
6052 case AARCH64_OPND_FPIMM:
6053 case AARCH64_OPND_SIMD_FPIMM:
6054 case AARCH64_OPND_SVE_FPIMM8:
6055 {
6056 int qfloat;
6057 bfd_boolean dp_p;
6058
6059 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6060 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6061 || !aarch64_imm_float_p (qfloat))
6062 {
6063 if (!error_p ())
6064 set_fatal_syntax_error (_("invalid floating-point"
6065 " constant"));
6066 goto failure;
6067 }
6068 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6069 inst.base.operands[i].imm.is_fp = 1;
6070 }
6071 break;
6072
6073 case AARCH64_OPND_SVE_I1_HALF_ONE:
6074 case AARCH64_OPND_SVE_I1_HALF_TWO:
6075 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6076 {
6077 int qfloat;
6078 bfd_boolean dp_p;
6079
6080 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6081 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6082 {
6083 if (!error_p ())
6084 set_fatal_syntax_error (_("invalid floating-point"
6085 " constant"));
6086 goto failure;
6087 }
6088 inst.base.operands[i].imm.value = qfloat;
6089 inst.base.operands[i].imm.is_fp = 1;
6090 }
6091 break;
6092
6093 case AARCH64_OPND_LIMM:
6094 po_misc_or_fail (parse_shifter_operand (&str, info,
6095 SHIFTED_LOGIC_IMM));
6096 if (info->shifter.operator_present)
6097 {
6098 set_fatal_syntax_error
6099 (_("shift not allowed for bitmask immediate"));
6100 goto failure;
6101 }
6102 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6103 /* addr_off_p */ 0,
6104 /* need_libopcodes_p */ 1,
6105 /* skip_p */ 1);
6106 break;
6107
6108 case AARCH64_OPND_AIMM:
6109 if (opcode->op == OP_ADD)
6110 /* ADD may have relocation types. */
6111 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6112 SHIFTED_ARITH_IMM));
6113 else
6114 po_misc_or_fail (parse_shifter_operand (&str, info,
6115 SHIFTED_ARITH_IMM));
6116 switch (inst.reloc.type)
6117 {
6118 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6119 info->shifter.amount = 12;
6120 break;
6121 case BFD_RELOC_UNUSED:
6122 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6123 if (info->shifter.kind != AARCH64_MOD_NONE)
6124 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6125 inst.reloc.pc_rel = 0;
6126 break;
6127 default:
6128 break;
6129 }
6130 info->imm.value = 0;
6131 if (!info->shifter.operator_present)
6132 {
6133 /* Default to LSL if not present. Libopcodes prefers shifter
6134 kind to be explicit. */
6135 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6136 info->shifter.kind = AARCH64_MOD_LSL;
6137 }
6138 break;
6139
6140 case AARCH64_OPND_HALF:
6141 {
6142 /* #<imm16> or relocation. */
6143 int internal_fixup_p;
6144 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6145 if (internal_fixup_p)
6146 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6147 skip_whitespace (str);
6148 if (skip_past_comma (&str))
6149 {
6150 /* {, LSL #<shift>} */
6151 if (! aarch64_gas_internal_fixup_p ())
6152 {
6153 set_fatal_syntax_error (_("can't mix relocation modifier "
6154 "with explicit shift"));
6155 goto failure;
6156 }
6157 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6158 }
6159 else
6160 inst.base.operands[i].shifter.amount = 0;
6161 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6162 inst.base.operands[i].imm.value = 0;
6163 if (! process_movw_reloc_info ())
6164 goto failure;
6165 }
6166 break;
6167
6168 case AARCH64_OPND_EXCEPTION:
6169 case AARCH64_OPND_UNDEFINED:
6170 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6171 imm_reg_type));
6172 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6173 /* addr_off_p */ 0,
6174 /* need_libopcodes_p */ 0,
6175 /* skip_p */ 1);
6176 break;
6177
6178 case AARCH64_OPND_NZCV:
6179 {
6180 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6181 if (nzcv != NULL)
6182 {
6183 str += 4;
6184 info->imm.value = nzcv->value;
6185 break;
6186 }
6187 po_imm_or_fail (0, 15);
6188 info->imm.value = val;
6189 }
6190 break;
6191
6192 case AARCH64_OPND_COND:
6193 case AARCH64_OPND_COND1:
6194 {
6195 char *start = str;
6196 do
6197 str++;
6198 while (ISALPHA (*str));
6199 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6200 if (info->cond == NULL)
6201 {
6202 set_syntax_error (_("invalid condition"));
6203 goto failure;
6204 }
6205 else if (operands[i] == AARCH64_OPND_COND1
6206 && (info->cond->value & 0xe) == 0xe)
6207 {
6208 /* Do not allow AL or NV. */
6209 set_default_error ();
6210 goto failure;
6211 }
6212 }
6213 break;
6214
6215 case AARCH64_OPND_ADDR_ADRP:
6216 po_misc_or_fail (parse_adrp (&str));
6217 /* Clear the value as operand needs to be relocated. */
6218 info->imm.value = 0;
6219 break;
6220
6221 case AARCH64_OPND_ADDR_PCREL14:
6222 case AARCH64_OPND_ADDR_PCREL19:
6223 case AARCH64_OPND_ADDR_PCREL21:
6224 case AARCH64_OPND_ADDR_PCREL26:
6225 po_misc_or_fail (parse_address (&str, info));
6226 if (!info->addr.pcrel)
6227 {
6228 set_syntax_error (_("invalid pc-relative address"));
6229 goto failure;
6230 }
6231 if (inst.gen_lit_pool
6232 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6233 {
6234 /* Only permit "=value" in the literal load instructions.
6235 The literal will be generated by programmer_friendly_fixup. */
6236 set_syntax_error (_("invalid use of \"=immediate\""));
6237 goto failure;
6238 }
6239 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6240 {
6241 set_syntax_error (_("unrecognized relocation suffix"));
6242 goto failure;
6243 }
6244 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6245 {
6246 info->imm.value = inst.reloc.exp.X_add_number;
6247 inst.reloc.type = BFD_RELOC_UNUSED;
6248 }
6249 else
6250 {
6251 info->imm.value = 0;
6252 if (inst.reloc.type == BFD_RELOC_UNUSED)
6253 switch (opcode->iclass)
6254 {
6255 case compbranch:
6256 case condbranch:
6257 /* e.g. CBZ or B.COND */
6258 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6259 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6260 break;
6261 case testbranch:
6262 /* e.g. TBZ */
6263 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6264 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6265 break;
6266 case branch_imm:
6267 /* e.g. B or BL */
6268 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6269 inst.reloc.type =
6270 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6271 : BFD_RELOC_AARCH64_JUMP26;
6272 break;
6273 case loadlit:
6274 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6275 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6276 break;
6277 case pcreladdr:
6278 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6279 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6280 break;
6281 default:
6282 gas_assert (0);
6283 abort ();
6284 }
6285 inst.reloc.pc_rel = 1;
6286 }
6287 break;
6288
6289 case AARCH64_OPND_ADDR_SIMPLE:
6290 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6291 {
6292 /* [<Xn|SP>{, #<simm>}] */
6293 char *start = str;
6294 /* First use the normal address-parsing routines, to get
6295 the usual syntax errors. */
6296 po_misc_or_fail (parse_address (&str, info));
6297 if (info->addr.pcrel || info->addr.offset.is_reg
6298 || !info->addr.preind || info->addr.postind
6299 || info->addr.writeback)
6300 {
6301 set_syntax_error (_("invalid addressing mode"));
6302 goto failure;
6303 }
6304
6305 /* Then retry, matching the specific syntax of these addresses. */
6306 str = start;
6307 po_char_or_fail ('[');
6308 po_reg_or_fail (REG_TYPE_R64_SP);
6309 /* Accept optional ", #0". */
6310 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6311 && skip_past_char (&str, ','))
6312 {
6313 skip_past_char (&str, '#');
6314 if (! skip_past_char (&str, '0'))
6315 {
6316 set_fatal_syntax_error
6317 (_("the optional immediate offset can only be 0"));
6318 goto failure;
6319 }
6320 }
6321 po_char_or_fail (']');
6322 break;
6323 }
6324
6325 case AARCH64_OPND_ADDR_REGOFF:
6326 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6327 po_misc_or_fail (parse_address (&str, info));
6328 regoff_addr:
6329 if (info->addr.pcrel || !info->addr.offset.is_reg
6330 || !info->addr.preind || info->addr.postind
6331 || info->addr.writeback)
6332 {
6333 set_syntax_error (_("invalid addressing mode"));
6334 goto failure;
6335 }
6336 if (!info->shifter.operator_present)
6337 {
6338 /* Default to LSL if not present. Libopcodes prefers shifter
6339 kind to be explicit. */
6340 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6341 info->shifter.kind = AARCH64_MOD_LSL;
6342 }
6343 /* Qualifier to be deduced by libopcodes. */
6344 break;
6345
6346 case AARCH64_OPND_ADDR_SIMM7:
6347 po_misc_or_fail (parse_address (&str, info));
6348 if (info->addr.pcrel || info->addr.offset.is_reg
6349 || (!info->addr.preind && !info->addr.postind))
6350 {
6351 set_syntax_error (_("invalid addressing mode"));
6352 goto failure;
6353 }
6354 if (inst.reloc.type != BFD_RELOC_UNUSED)
6355 {
6356 set_syntax_error (_("relocation not allowed"));
6357 goto failure;
6358 }
6359 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6360 /* addr_off_p */ 1,
6361 /* need_libopcodes_p */ 1,
6362 /* skip_p */ 0);
6363 break;
6364
6365 case AARCH64_OPND_ADDR_SIMM9:
6366 case AARCH64_OPND_ADDR_SIMM9_2:
6367 case AARCH64_OPND_ADDR_SIMM11:
6368 case AARCH64_OPND_ADDR_SIMM13:
6369 po_misc_or_fail (parse_address (&str, info));
6370 if (info->addr.pcrel || info->addr.offset.is_reg
6371 || (!info->addr.preind && !info->addr.postind)
6372 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6373 && info->addr.writeback))
6374 {
6375 set_syntax_error (_("invalid addressing mode"));
6376 goto failure;
6377 }
6378 if (inst.reloc.type != BFD_RELOC_UNUSED)
6379 {
6380 set_syntax_error (_("relocation not allowed"));
6381 goto failure;
6382 }
6383 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6384 /* addr_off_p */ 1,
6385 /* need_libopcodes_p */ 1,
6386 /* skip_p */ 0);
6387 break;
6388
6389 case AARCH64_OPND_ADDR_SIMM10:
6390 case AARCH64_OPND_ADDR_OFFSET:
6391 po_misc_or_fail (parse_address (&str, info));
6392 if (info->addr.pcrel || info->addr.offset.is_reg
6393 || !info->addr.preind || info->addr.postind)
6394 {
6395 set_syntax_error (_("invalid addressing mode"));
6396 goto failure;
6397 }
6398 if (inst.reloc.type != BFD_RELOC_UNUSED)
6399 {
6400 set_syntax_error (_("relocation not allowed"));
6401 goto failure;
6402 }
6403 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6404 /* addr_off_p */ 1,
6405 /* need_libopcodes_p */ 1,
6406 /* skip_p */ 0);
6407 break;
6408
6409 case AARCH64_OPND_ADDR_UIMM12:
6410 po_misc_or_fail (parse_address (&str, info));
6411 if (info->addr.pcrel || info->addr.offset.is_reg
6412 || !info->addr.preind || info->addr.writeback)
6413 {
6414 set_syntax_error (_("invalid addressing mode"));
6415 goto failure;
6416 }
6417 if (inst.reloc.type == BFD_RELOC_UNUSED)
6418 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6419 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6420 || (inst.reloc.type
6421 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6422 || (inst.reloc.type
6423 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6424 || (inst.reloc.type
6425 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6426 || (inst.reloc.type
6427 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6428 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6429 /* Leave qualifier to be determined by libopcodes. */
6430 break;
6431
6432 case AARCH64_OPND_SIMD_ADDR_POST:
6433 /* [<Xn|SP>], <Xm|#<amount>> */
6434 po_misc_or_fail (parse_address (&str, info));
6435 if (!info->addr.postind || !info->addr.writeback)
6436 {
6437 set_syntax_error (_("invalid addressing mode"));
6438 goto failure;
6439 }
6440 if (!info->addr.offset.is_reg)
6441 {
6442 if (inst.reloc.exp.X_op == O_constant)
6443 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6444 else
6445 {
6446 set_fatal_syntax_error
6447 (_("writeback value must be an immediate constant"));
6448 goto failure;
6449 }
6450 }
6451 /* No qualifier. */
6452 break;
6453
6454 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6455 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
6456 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6457 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6458 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6459 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6460 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6461 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6462 case AARCH64_OPND_SVE_ADDR_RI_U6:
6463 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6464 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6465 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6466 /* [X<n>{, #imm, MUL VL}]
6467 [X<n>{, #imm}]
6468 but recognizing SVE registers. */
6469 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6470 &offset_qualifier));
6471 if (base_qualifier != AARCH64_OPND_QLF_X)
6472 {
6473 set_syntax_error (_("invalid addressing mode"));
6474 goto failure;
6475 }
6476 sve_regimm:
6477 if (info->addr.pcrel || info->addr.offset.is_reg
6478 || !info->addr.preind || info->addr.writeback)
6479 {
6480 set_syntax_error (_("invalid addressing mode"));
6481 goto failure;
6482 }
6483 if (inst.reloc.type != BFD_RELOC_UNUSED
6484 || inst.reloc.exp.X_op != O_constant)
6485 {
6486 /* Make sure this has priority over
6487 "invalid addressing mode". */
6488 set_fatal_syntax_error (_("constant offset required"));
6489 goto failure;
6490 }
6491 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6492 break;
6493
6494 case AARCH64_OPND_SVE_ADDR_R:
6495 /* [<Xn|SP>{, <R><m>}]
6496 but recognizing SVE registers. */
6497 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6498 &offset_qualifier));
6499 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6500 {
6501 offset_qualifier = AARCH64_OPND_QLF_X;
6502 info->addr.offset.is_reg = 1;
6503 info->addr.offset.regno = 31;
6504 }
6505 else if (base_qualifier != AARCH64_OPND_QLF_X
6506 || offset_qualifier != AARCH64_OPND_QLF_X)
6507 {
6508 set_syntax_error (_("invalid addressing mode"));
6509 goto failure;
6510 }
6511 goto regoff_addr;
6512
6513 case AARCH64_OPND_SVE_ADDR_RR:
6514 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6515 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6516 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6517 case AARCH64_OPND_SVE_ADDR_RX:
6518 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6519 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6520 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6521 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6522 but recognizing SVE registers. */
6523 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6524 &offset_qualifier));
6525 if (base_qualifier != AARCH64_OPND_QLF_X
6526 || offset_qualifier != AARCH64_OPND_QLF_X)
6527 {
6528 set_syntax_error (_("invalid addressing mode"));
6529 goto failure;
6530 }
6531 goto regoff_addr;
6532
6533 case AARCH64_OPND_SVE_ADDR_RZ:
6534 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6535 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6536 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6537 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6538 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6539 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6540 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6541 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6542 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6543 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6544 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6545 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6546 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6547 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6548 &offset_qualifier));
6549 if (base_qualifier != AARCH64_OPND_QLF_X
6550 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6551 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6552 {
6553 set_syntax_error (_("invalid addressing mode"));
6554 goto failure;
6555 }
6556 info->qualifier = offset_qualifier;
6557 goto regoff_addr;
6558
6559 case AARCH64_OPND_SVE_ADDR_ZX:
6560 /* [Zn.<T>{, <Xm>}]. */
6561 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6562 &offset_qualifier));
6563 /* Things to check:
6564 base_qualifier either S_S or S_D
6565 offset_qualifier must be X
6566 */
6567 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6568 && base_qualifier != AARCH64_OPND_QLF_S_D)
6569 || offset_qualifier != AARCH64_OPND_QLF_X)
6570 {
6571 set_syntax_error (_("invalid addressing mode"));
6572 goto failure;
6573 }
6574 info->qualifier = base_qualifier;
6575 if (!info->addr.offset.is_reg || info->addr.pcrel
6576 || !info->addr.preind || info->addr.writeback
6577 || info->shifter.operator_present != 0)
6578 {
6579 set_syntax_error (_("invalid addressing mode"));
6580 goto failure;
6581 }
6582 info->shifter.kind = AARCH64_MOD_LSL;
6583 break;
6584
6585
6586 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6587 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6588 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6589 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6590 /* [Z<n>.<T>{, #imm}] */
6591 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6592 &offset_qualifier));
6593 if (base_qualifier != AARCH64_OPND_QLF_S_S
6594 && base_qualifier != AARCH64_OPND_QLF_S_D)
6595 {
6596 set_syntax_error (_("invalid addressing mode"));
6597 goto failure;
6598 }
6599 info->qualifier = base_qualifier;
6600 goto sve_regimm;
6601
6602 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6603 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6604 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6605 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6606 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6607
6608 We don't reject:
6609
6610 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6611
6612 here since we get better error messages by leaving it to
6613 the qualifier checking routines. */
6614 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6615 &offset_qualifier));
6616 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6617 && base_qualifier != AARCH64_OPND_QLF_S_D)
6618 || offset_qualifier != base_qualifier)
6619 {
6620 set_syntax_error (_("invalid addressing mode"));
6621 goto failure;
6622 }
6623 info->qualifier = base_qualifier;
6624 goto regoff_addr;
6625
6626 case AARCH64_OPND_SYSREG:
6627 {
6628 uint32_t sysreg_flags;
6629 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6630 &sysreg_flags)) == PARSE_FAIL)
6631 {
6632 set_syntax_error (_("unknown or missing system register name"));
6633 goto failure;
6634 }
6635 inst.base.operands[i].sysreg.value = val;
6636 inst.base.operands[i].sysreg.flags = sysreg_flags;
6637 break;
6638 }
6639
6640 case AARCH64_OPND_PSTATEFIELD:
6641 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6642 == PARSE_FAIL)
6643 {
6644 set_syntax_error (_("unknown or missing PSTATE field name"));
6645 goto failure;
6646 }
6647 inst.base.operands[i].pstatefield = val;
6648 break;
6649
6650 case AARCH64_OPND_SYSREG_IC:
6651 inst.base.operands[i].sysins_op =
6652 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6653 goto sys_reg_ins;
6654
6655 case AARCH64_OPND_SYSREG_DC:
6656 inst.base.operands[i].sysins_op =
6657 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6658 goto sys_reg_ins;
6659
6660 case AARCH64_OPND_SYSREG_AT:
6661 inst.base.operands[i].sysins_op =
6662 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6663 goto sys_reg_ins;
6664
6665 case AARCH64_OPND_SYSREG_SR:
6666 inst.base.operands[i].sysins_op =
6667 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6668 goto sys_reg_ins;
6669
6670 case AARCH64_OPND_SYSREG_TLBI:
6671 inst.base.operands[i].sysins_op =
6672 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6673 sys_reg_ins:
6674 if (inst.base.operands[i].sysins_op == NULL)
6675 {
6676 set_fatal_syntax_error ( _("unknown or missing operation name"));
6677 goto failure;
6678 }
6679 break;
6680
6681 case AARCH64_OPND_BARRIER:
6682 case AARCH64_OPND_BARRIER_ISB:
6683 val = parse_barrier (&str);
6684 if (val != PARSE_FAIL
6685 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6686 {
6687 /* ISB only accepts options name 'sy'. */
6688 set_syntax_error
6689 (_("the specified option is not accepted in ISB"));
6690 /* Turn off backtrack as this optional operand is present. */
6691 backtrack_pos = 0;
6692 goto failure;
6693 }
6694 /* This is an extension to accept a 0..15 immediate. */
6695 if (val == PARSE_FAIL)
6696 po_imm_or_fail (0, 15);
6697 info->barrier = aarch64_barrier_options + val;
6698 break;
6699
6700 case AARCH64_OPND_PRFOP:
6701 val = parse_pldop (&str);
6702 /* This is an extension to accept a 0..31 immediate. */
6703 if (val == PARSE_FAIL)
6704 po_imm_or_fail (0, 31);
6705 inst.base.operands[i].prfop = aarch64_prfops + val;
6706 break;
6707
6708 case AARCH64_OPND_BARRIER_PSB:
6709 val = parse_barrier_psb (&str, &(info->hint_option));
6710 if (val == PARSE_FAIL)
6711 goto failure;
6712 break;
6713
6714 case AARCH64_OPND_BTI_TARGET:
6715 val = parse_bti_operand (&str, &(info->hint_option));
6716 if (val == PARSE_FAIL)
6717 goto failure;
6718 break;
6719
6720 default:
6721 as_fatal (_("unhandled operand code %d"), operands[i]);
6722 }
6723
6724 /* If we get here, this operand was successfully parsed. */
6725 inst.base.operands[i].present = 1;
6726 continue;
6727
6728 failure:
6729 /* The parse routine should already have set the error, but in case
6730 not, set a default one here. */
6731 if (! error_p ())
6732 set_default_error ();
6733
6734 if (! backtrack_pos)
6735 goto parse_operands_return;
6736
6737 {
6738 /* We reach here because this operand is marked as optional, and
6739 either no operand was supplied or the operand was supplied but it
6740 was syntactically incorrect. In the latter case we report an
6741 error. In the former case we perform a few more checks before
6742 dropping through to the code to insert the default operand. */
6743
6744 char *tmp = backtrack_pos;
6745 char endchar = END_OF_INSN;
6746
6747 if (i != (aarch64_num_of_operands (opcode) - 1))
6748 endchar = ',';
6749 skip_past_char (&tmp, ',');
6750
6751 if (*tmp != endchar)
6752 /* The user has supplied an operand in the wrong format. */
6753 goto parse_operands_return;
6754
6755 /* Make sure there is not a comma before the optional operand.
6756 For example the fifth operand of 'sys' is optional:
6757
6758 sys #0,c0,c0,#0, <--- wrong
6759 sys #0,c0,c0,#0 <--- correct. */
6760 if (comma_skipped_p && i && endchar == END_OF_INSN)
6761 {
6762 set_fatal_syntax_error
6763 (_("unexpected comma before the omitted optional operand"));
6764 goto parse_operands_return;
6765 }
6766 }
6767
6768 /* Reaching here means we are dealing with an optional operand that is
6769 omitted from the assembly line. */
6770 gas_assert (optional_operand_p (opcode, i));
6771 info->present = 0;
6772 process_omitted_operand (operands[i], opcode, i, info);
6773
6774 /* Try again, skipping the optional operand at backtrack_pos. */
6775 str = backtrack_pos;
6776 backtrack_pos = 0;
6777
6778 /* Clear any error record after the omitted optional operand has been
6779 successfully handled. */
6780 clear_error ();
6781 }
6782
6783 /* Check if we have parsed all the operands. */
6784 if (*str != '\0' && ! error_p ())
6785 {
6786 /* Set I to the index of the last present operand; this is
6787 for the purpose of diagnostics. */
6788 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6789 ;
6790 set_fatal_syntax_error
6791 (_("unexpected characters following instruction"));
6792 }
6793
6794 parse_operands_return:
6795
6796 if (error_p ())
6797 {
6798 DEBUG_TRACE ("parsing FAIL: %s - %s",
6799 operand_mismatch_kind_names[get_error_kind ()],
6800 get_error_message ());
6801 /* Record the operand error properly; this is useful when there
6802 are multiple instruction templates for a mnemonic name, so that
6803 later on, we can select the error that most closely describes
6804 the problem. */
6805 record_operand_error (opcode, i, get_error_kind (),
6806 get_error_message ());
6807 return FALSE;
6808 }
6809 else
6810 {
6811 DEBUG_TRACE ("parsing SUCCESS");
6812 return TRUE;
6813 }
6814 }
6815
6816 /* It does some fix-up to provide some programmer friendly feature while
6817 keeping the libopcodes happy, i.e. libopcodes only accepts
6818 the preferred architectural syntax.
6819 Return FALSE if there is any failure; otherwise return TRUE. */
6820
6821 static bfd_boolean
6822 programmer_friendly_fixup (aarch64_instruction *instr)
6823 {
6824 aarch64_inst *base = &instr->base;
6825 const aarch64_opcode *opcode = base->opcode;
6826 enum aarch64_op op = opcode->op;
6827 aarch64_opnd_info *operands = base->operands;
6828
6829 DEBUG_TRACE ("enter");
6830
6831 switch (opcode->iclass)
6832 {
6833 case testbranch:
6834 /* TBNZ Xn|Wn, #uimm6, label
6835 Test and Branch Not Zero: conditionally jumps to label if bit number
6836 uimm6 in register Xn is not zero. The bit number implies the width of
6837 the register, which may be written and should be disassembled as Wn if
6838 uimm is less than 32. */
6839 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6840 {
6841 if (operands[1].imm.value >= 32)
6842 {
6843 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6844 0, 31);
6845 return FALSE;
6846 }
6847 operands[0].qualifier = AARCH64_OPND_QLF_X;
6848 }
6849 break;
6850 case loadlit:
6851 /* LDR Wt, label | =value
6852 As a convenience assemblers will typically permit the notation
6853 "=value" in conjunction with the pc-relative literal load instructions
6854 to automatically place an immediate value or symbolic address in a
6855 nearby literal pool and generate a hidden label which references it.
6856 ISREG has been set to 0 in the case of =value. */
6857 if (instr->gen_lit_pool
6858 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6859 {
6860 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6861 if (op == OP_LDRSW_LIT)
6862 size = 4;
6863 if (instr->reloc.exp.X_op != O_constant
6864 && instr->reloc.exp.X_op != O_big
6865 && instr->reloc.exp.X_op != O_symbol)
6866 {
6867 record_operand_error (opcode, 1,
6868 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6869 _("constant expression expected"));
6870 return FALSE;
6871 }
6872 if (! add_to_lit_pool (&instr->reloc.exp, size))
6873 {
6874 record_operand_error (opcode, 1,
6875 AARCH64_OPDE_OTHER_ERROR,
6876 _("literal pool insertion failed"));
6877 return FALSE;
6878 }
6879 }
6880 break;
6881 case log_shift:
6882 case bitfield:
6883 /* UXT[BHW] Wd, Wn
6884 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6885 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6886 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6887 A programmer-friendly assembler should accept a destination Xd in
6888 place of Wd, however that is not the preferred form for disassembly.
6889 */
6890 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6891 && operands[1].qualifier == AARCH64_OPND_QLF_W
6892 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6893 operands[0].qualifier = AARCH64_OPND_QLF_W;
6894 break;
6895
6896 case addsub_ext:
6897 {
6898 /* In the 64-bit form, the final register operand is written as Wm
6899 for all but the (possibly omitted) UXTX/LSL and SXTX
6900 operators.
6901 As a programmer-friendly assembler, we accept e.g.
6902 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6903 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6904 int idx = aarch64_operand_index (opcode->operands,
6905 AARCH64_OPND_Rm_EXT);
6906 gas_assert (idx == 1 || idx == 2);
6907 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6908 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6909 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6910 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6911 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6912 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6913 }
6914 break;
6915
6916 default:
6917 break;
6918 }
6919
6920 DEBUG_TRACE ("exit with SUCCESS");
6921 return TRUE;
6922 }
6923
6924 /* Check for loads and stores that will cause unpredictable behavior. */
6925
6926 static void
6927 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6928 {
6929 aarch64_inst *base = &instr->base;
6930 const aarch64_opcode *opcode = base->opcode;
6931 const aarch64_opnd_info *opnds = base->operands;
6932 switch (opcode->iclass)
6933 {
6934 case ldst_pos:
6935 case ldst_imm9:
6936 case ldst_imm10:
6937 case ldst_unscaled:
6938 case ldst_unpriv:
6939 /* Loading/storing the base register is unpredictable if writeback. */
6940 if ((aarch64_get_operand_class (opnds[0].type)
6941 == AARCH64_OPND_CLASS_INT_REG)
6942 && opnds[0].reg.regno == opnds[1].addr.base_regno
6943 && opnds[1].addr.base_regno != REG_SP
6944 /* Exempt STG/STZG/ST2G/STZ2G. */
6945 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
6946 && opnds[1].addr.writeback)
6947 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6948 break;
6949
6950 case ldstpair_off:
6951 case ldstnapair_offs:
6952 case ldstpair_indexed:
6953 /* Loading/storing the base register is unpredictable if writeback. */
6954 if ((aarch64_get_operand_class (opnds[0].type)
6955 == AARCH64_OPND_CLASS_INT_REG)
6956 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6957 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6958 && opnds[2].addr.base_regno != REG_SP
6959 /* Exempt STGP. */
6960 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
6961 && opnds[2].addr.writeback)
6962 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6963 /* Load operations must load different registers. */
6964 if ((opcode->opcode & (1 << 22))
6965 && opnds[0].reg.regno == opnds[1].reg.regno)
6966 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6967 break;
6968
6969 case ldstexcl:
6970 /* It is unpredictable if the destination and status registers are the
6971 same. */
6972 if ((aarch64_get_operand_class (opnds[0].type)
6973 == AARCH64_OPND_CLASS_INT_REG)
6974 && (aarch64_get_operand_class (opnds[1].type)
6975 == AARCH64_OPND_CLASS_INT_REG)
6976 && (opnds[0].reg.regno == opnds[1].reg.regno
6977 || opnds[0].reg.regno == opnds[2].reg.regno))
6978 as_warn (_("unpredictable: identical transfer and status registers"
6979 " --`%s'"),
6980 str);
6981
6982 break;
6983
6984 default:
6985 break;
6986 }
6987 }
6988
6989 static void
6990 force_automatic_sequence_close (void)
6991 {
6992 if (now_instr_sequence.instr)
6993 {
6994 as_warn (_("previous `%s' sequence has not been closed"),
6995 now_instr_sequence.instr->opcode->name);
6996 init_insn_sequence (NULL, &now_instr_sequence);
6997 }
6998 }
6999
7000 /* A wrapper function to interface with libopcodes on encoding and
7001 record the error message if there is any.
7002
7003 Return TRUE on success; otherwise return FALSE. */
7004
7005 static bfd_boolean
7006 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7007 aarch64_insn *code)
7008 {
7009 aarch64_operand_error error_info;
7010 memset (&error_info, '\0', sizeof (error_info));
7011 error_info.kind = AARCH64_OPDE_NIL;
7012 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7013 && !error_info.non_fatal)
7014 return TRUE;
7015
7016 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7017 record_operand_error_info (opcode, &error_info);
7018 return error_info.non_fatal;
7019 }
7020
7021 #ifdef DEBUG_AARCH64
7022 static inline void
7023 dump_opcode_operands (const aarch64_opcode *opcode)
7024 {
7025 int i = 0;
7026 while (opcode->operands[i] != AARCH64_OPND_NIL)
7027 {
7028 aarch64_verbose ("\t\t opnd%d: %s", i,
7029 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7030 ? aarch64_get_operand_name (opcode->operands[i])
7031 : aarch64_get_operand_desc (opcode->operands[i]));
7032 ++i;
7033 }
7034 }
7035 #endif /* DEBUG_AARCH64 */
7036
7037 /* This is the guts of the machine-dependent assembler. STR points to a
7038 machine dependent instruction. This function is supposed to emit
7039 the frags/bytes it assembles to. */
7040
7041 void
7042 md_assemble (char *str)
7043 {
7044 char *p = str;
7045 templates *template;
7046 aarch64_opcode *opcode;
7047 aarch64_inst *inst_base;
7048 unsigned saved_cond;
7049
7050 /* Align the previous label if needed. */
7051 if (last_label_seen != NULL)
7052 {
7053 symbol_set_frag (last_label_seen, frag_now);
7054 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7055 S_SET_SEGMENT (last_label_seen, now_seg);
7056 }
7057
7058 /* Update the current insn_sequence from the segment. */
7059 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
7060
7061 inst.reloc.type = BFD_RELOC_UNUSED;
7062
7063 DEBUG_TRACE ("\n\n");
7064 DEBUG_TRACE ("==============================");
7065 DEBUG_TRACE ("Enter md_assemble with %s", str);
7066
7067 template = opcode_lookup (&p);
7068 if (!template)
7069 {
7070 /* It wasn't an instruction, but it might be a register alias of
7071 the form alias .req reg directive. */
7072 if (!create_register_alias (str, p))
7073 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7074 str);
7075 return;
7076 }
7077
7078 skip_whitespace (p);
7079 if (*p == ',')
7080 {
7081 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7082 get_mnemonic_name (str), str);
7083 return;
7084 }
7085
7086 init_operand_error_report ();
7087
7088 /* Sections are assumed to start aligned. In executable section, there is no
7089 MAP_DATA symbol pending. So we only align the address during
7090 MAP_DATA --> MAP_INSN transition.
7091 For other sections, this is not guaranteed. */
7092 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7093 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7094 frag_align_code (2, 0);
7095
7096 saved_cond = inst.cond;
7097 reset_aarch64_instruction (&inst);
7098 inst.cond = saved_cond;
7099
7100 /* Iterate through all opcode entries with the same mnemonic name. */
7101 do
7102 {
7103 opcode = template->opcode;
7104
7105 DEBUG_TRACE ("opcode %s found", opcode->name);
7106 #ifdef DEBUG_AARCH64
7107 if (debug_dump)
7108 dump_opcode_operands (opcode);
7109 #endif /* DEBUG_AARCH64 */
7110
7111 mapping_state (MAP_INSN);
7112
7113 inst_base = &inst.base;
7114 inst_base->opcode = opcode;
7115
7116 /* Truly conditionally executed instructions, e.g. b.cond. */
7117 if (opcode->flags & F_COND)
7118 {
7119 gas_assert (inst.cond != COND_ALWAYS);
7120 inst_base->cond = get_cond_from_value (inst.cond);
7121 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7122 }
7123 else if (inst.cond != COND_ALWAYS)
7124 {
7125 /* It shouldn't arrive here, where the assembly looks like a
7126 conditional instruction but the found opcode is unconditional. */
7127 gas_assert (0);
7128 continue;
7129 }
7130
7131 if (parse_operands (p, opcode)
7132 && programmer_friendly_fixup (&inst)
7133 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7134 {
7135 /* Check that this instruction is supported for this CPU. */
7136 if (!opcode->avariant
7137 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7138 {
7139 as_bad (_("selected processor does not support `%s'"), str);
7140 return;
7141 }
7142
7143 warn_unpredictable_ldst (&inst, str);
7144
7145 if (inst.reloc.type == BFD_RELOC_UNUSED
7146 || !inst.reloc.need_libopcodes_p)
7147 output_inst (NULL);
7148 else
7149 {
7150 /* If there is relocation generated for the instruction,
7151 store the instruction information for the future fix-up. */
7152 struct aarch64_inst *copy;
7153 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7154 copy = XNEW (struct aarch64_inst);
7155 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7156 output_inst (copy);
7157 }
7158
7159 /* Issue non-fatal messages if any. */
7160 output_operand_error_report (str, TRUE);
7161 return;
7162 }
7163
7164 template = template->next;
7165 if (template != NULL)
7166 {
7167 reset_aarch64_instruction (&inst);
7168 inst.cond = saved_cond;
7169 }
7170 }
7171 while (template != NULL);
7172
7173 /* Issue the error messages if any. */
7174 output_operand_error_report (str, FALSE);
7175 }
7176
7177 /* Various frobbings of labels and their addresses. */
7178
7179 void
7180 aarch64_start_line_hook (void)
7181 {
7182 last_label_seen = NULL;
7183 }
7184
7185 void
7186 aarch64_frob_label (symbolS * sym)
7187 {
7188 last_label_seen = sym;
7189
7190 dwarf2_emit_label (sym);
7191 }
7192
7193 void
7194 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7195 {
7196 /* Check to see if we have a block to close. */
7197 force_automatic_sequence_close ();
7198 }
7199
7200 int
7201 aarch64_data_in_code (void)
7202 {
7203 if (!strncmp (input_line_pointer + 1, "data:", 5))
7204 {
7205 *input_line_pointer = '/';
7206 input_line_pointer += 5;
7207 *input_line_pointer = 0;
7208 return 1;
7209 }
7210
7211 return 0;
7212 }
7213
7214 char *
7215 aarch64_canonicalize_symbol_name (char *name)
7216 {
7217 int len;
7218
7219 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7220 *(name + len - 5) = 0;
7221
7222 return name;
7223 }
7224 \f
7225 /* Table of all register names defined by default. The user can
7226 define additional names with .req. Note that all register names
7227 should appear in both upper and lowercase variants. Some registers
7228 also have mixed-case names. */
7229
7230 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
7231 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
7232 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7233 #define REGSET16(p,t) \
7234 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7235 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7236 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7237 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7238 #define REGSET31(p,t) \
7239 REGSET16(p, t), \
7240 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7241 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7242 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7243 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7244 #define REGSET(p,t) \
7245 REGSET31(p,t), REGNUM(p,31,t)
7246
7247 /* These go into aarch64_reg_hsh hash-table. */
7248 static const reg_entry reg_names[] = {
7249 /* Integer registers. */
7250 REGSET31 (x, R_64), REGSET31 (X, R_64),
7251 REGSET31 (w, R_32), REGSET31 (W, R_32),
7252
7253 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7254 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7255 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7256 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7257 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7258 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7259
7260 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7261 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7262
7263 /* Floating-point single precision registers. */
7264 REGSET (s, FP_S), REGSET (S, FP_S),
7265
7266 /* Floating-point double precision registers. */
7267 REGSET (d, FP_D), REGSET (D, FP_D),
7268
7269 /* Floating-point half precision registers. */
7270 REGSET (h, FP_H), REGSET (H, FP_H),
7271
7272 /* Floating-point byte precision registers. */
7273 REGSET (b, FP_B), REGSET (B, FP_B),
7274
7275 /* Floating-point quad precision registers. */
7276 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7277
7278 /* FP/SIMD registers. */
7279 REGSET (v, VN), REGSET (V, VN),
7280
7281 /* SVE vector registers. */
7282 REGSET (z, ZN), REGSET (Z, ZN),
7283
7284 /* SVE predicate registers. */
7285 REGSET16 (p, PN), REGSET16 (P, PN)
7286 };
7287
7288 #undef REGDEF
7289 #undef REGDEF_ALIAS
7290 #undef REGNUM
7291 #undef REGSET16
7292 #undef REGSET31
7293 #undef REGSET
7294
7295 #define N 1
7296 #define n 0
7297 #define Z 1
7298 #define z 0
7299 #define C 1
7300 #define c 0
7301 #define V 1
7302 #define v 0
7303 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7304 static const asm_nzcv nzcv_names[] = {
7305 {"nzcv", B (n, z, c, v)},
7306 {"nzcV", B (n, z, c, V)},
7307 {"nzCv", B (n, z, C, v)},
7308 {"nzCV", B (n, z, C, V)},
7309 {"nZcv", B (n, Z, c, v)},
7310 {"nZcV", B (n, Z, c, V)},
7311 {"nZCv", B (n, Z, C, v)},
7312 {"nZCV", B (n, Z, C, V)},
7313 {"Nzcv", B (N, z, c, v)},
7314 {"NzcV", B (N, z, c, V)},
7315 {"NzCv", B (N, z, C, v)},
7316 {"NzCV", B (N, z, C, V)},
7317 {"NZcv", B (N, Z, c, v)},
7318 {"NZcV", B (N, Z, c, V)},
7319 {"NZCv", B (N, Z, C, v)},
7320 {"NZCV", B (N, Z, C, V)}
7321 };
7322
7323 #undef N
7324 #undef n
7325 #undef Z
7326 #undef z
7327 #undef C
7328 #undef c
7329 #undef V
7330 #undef v
7331 #undef B
7332 \f
7333 /* MD interface: bits in the object file. */
7334
7335 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7336 for use in the a.out file, and stores them in the array pointed to by buf.
7337 This knows about the endian-ness of the target machine and does
7338 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7339 2 (short) and 4 (long) Floating numbers are put out as a series of
7340 LITTLENUMS (shorts, here at least). */
7341
7342 void
7343 md_number_to_chars (char *buf, valueT val, int n)
7344 {
7345 if (target_big_endian)
7346 number_to_chars_bigendian (buf, val, n);
7347 else
7348 number_to_chars_littleendian (buf, val, n);
7349 }
7350
7351 /* MD interface: Sections. */
7352
7353 /* Estimate the size of a frag before relaxing. Assume everything fits in
7354 4 bytes. */
7355
7356 int
7357 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7358 {
7359 fragp->fr_var = 4;
7360 return 4;
7361 }
7362
7363 /* Round up a section size to the appropriate boundary. */
7364
7365 valueT
7366 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7367 {
7368 return size;
7369 }
7370
7371 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7372 of an rs_align_code fragment.
7373
7374 Here we fill the frag with the appropriate info for padding the
7375 output stream. The resulting frag will consist of a fixed (fr_fix)
7376 and of a repeating (fr_var) part.
7377
7378 The fixed content is always emitted before the repeating content and
7379 these two parts are used as follows in constructing the output:
7380 - the fixed part will be used to align to a valid instruction word
7381 boundary, in case that we start at a misaligned address; as no
7382 executable instruction can live at the misaligned location, we
7383 simply fill with zeros;
7384 - the variable part will be used to cover the remaining padding and
7385 we fill using the AArch64 NOP instruction.
7386
7387 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7388 enough storage space for up to 3 bytes for padding the back to a valid
7389 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7390
7391 void
7392 aarch64_handle_align (fragS * fragP)
7393 {
7394 /* NOP = d503201f */
7395 /* AArch64 instructions are always little-endian. */
7396 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7397
7398 int bytes, fix, noop_size;
7399 char *p;
7400
7401 if (fragP->fr_type != rs_align_code)
7402 return;
7403
7404 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7405 p = fragP->fr_literal + fragP->fr_fix;
7406
7407 #ifdef OBJ_ELF
7408 gas_assert (fragP->tc_frag_data.recorded);
7409 #endif
7410
7411 noop_size = sizeof (aarch64_noop);
7412
7413 fix = bytes & (noop_size - 1);
7414 if (fix)
7415 {
7416 #ifdef OBJ_ELF
7417 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7418 #endif
7419 memset (p, 0, fix);
7420 p += fix;
7421 fragP->fr_fix += fix;
7422 }
7423
7424 if (noop_size)
7425 memcpy (p, aarch64_noop, noop_size);
7426 fragP->fr_var = noop_size;
7427 }
7428
7429 /* Perform target specific initialisation of a frag.
7430 Note - despite the name this initialisation is not done when the frag
7431 is created, but only when its type is assigned. A frag can be created
7432 and used a long time before its type is set, so beware of assuming that
7433 this initialisation is performed first. */
7434
7435 #ifndef OBJ_ELF
7436 void
7437 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7438 int max_chars ATTRIBUTE_UNUSED)
7439 {
7440 }
7441
7442 #else /* OBJ_ELF is defined. */
7443 void
7444 aarch64_init_frag (fragS * fragP, int max_chars)
7445 {
7446 /* Record a mapping symbol for alignment frags. We will delete this
7447 later if the alignment ends up empty. */
7448 if (!fragP->tc_frag_data.recorded)
7449 fragP->tc_frag_data.recorded = 1;
7450
7451 /* PR 21809: Do not set a mapping state for debug sections
7452 - it just confuses other tools. */
7453 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
7454 return;
7455
7456 switch (fragP->fr_type)
7457 {
7458 case rs_align_test:
7459 case rs_fill:
7460 mapping_state_2 (MAP_DATA, max_chars);
7461 break;
7462 case rs_align:
7463 /* PR 20364: We can get alignment frags in code sections,
7464 so do not just assume that we should use the MAP_DATA state. */
7465 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7466 break;
7467 case rs_align_code:
7468 mapping_state_2 (MAP_INSN, max_chars);
7469 break;
7470 default:
7471 break;
7472 }
7473 }
7474 \f
7475 /* Initialize the DWARF-2 unwind information for this procedure. */
7476
7477 void
7478 tc_aarch64_frame_initial_instructions (void)
7479 {
7480 cfi_add_CFA_def_cfa (REG_SP, 0);
7481 }
7482 #endif /* OBJ_ELF */
7483
7484 /* Convert REGNAME to a DWARF-2 register number. */
7485
7486 int
7487 tc_aarch64_regname_to_dw2regnum (char *regname)
7488 {
7489 const reg_entry *reg = parse_reg (&regname);
7490 if (reg == NULL)
7491 return -1;
7492
7493 switch (reg->type)
7494 {
7495 case REG_TYPE_SP_32:
7496 case REG_TYPE_SP_64:
7497 case REG_TYPE_R_32:
7498 case REG_TYPE_R_64:
7499 return reg->number;
7500
7501 case REG_TYPE_FP_B:
7502 case REG_TYPE_FP_H:
7503 case REG_TYPE_FP_S:
7504 case REG_TYPE_FP_D:
7505 case REG_TYPE_FP_Q:
7506 return reg->number + 64;
7507
7508 default:
7509 break;
7510 }
7511 return -1;
7512 }
7513
7514 /* Implement DWARF2_ADDR_SIZE. */
7515
7516 int
7517 aarch64_dwarf2_addr_size (void)
7518 {
7519 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7520 if (ilp32_p)
7521 return 4;
7522 #endif
7523 return bfd_arch_bits_per_address (stdoutput) / 8;
7524 }
7525
7526 /* MD interface: Symbol and relocation handling. */
7527
7528 /* Return the address within the segment that a PC-relative fixup is
7529 relative to. For AArch64 PC-relative fixups applied to instructions
7530 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7531
7532 long
7533 md_pcrel_from_section (fixS * fixP, segT seg)
7534 {
7535 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7536
7537 /* If this is pc-relative and we are going to emit a relocation
7538 then we just want to put out any pipeline compensation that the linker
7539 will need. Otherwise we want to use the calculated base. */
7540 if (fixP->fx_pcrel
7541 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7542 || aarch64_force_relocation (fixP)))
7543 base = 0;
7544
7545 /* AArch64 should be consistent for all pc-relative relocations. */
7546 return base + AARCH64_PCREL_OFFSET;
7547 }
7548
7549 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7550 Otherwise we have no need to default values of symbols. */
7551
7552 symbolS *
7553 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7554 {
7555 #ifdef OBJ_ELF
7556 if (name[0] == '_' && name[1] == 'G'
7557 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7558 {
7559 if (!GOT_symbol)
7560 {
7561 if (symbol_find (name))
7562 as_bad (_("GOT already in the symbol table"));
7563
7564 GOT_symbol = symbol_new (name, undefined_section,
7565 &zero_address_frag, 0);
7566 }
7567
7568 return GOT_symbol;
7569 }
7570 #endif
7571
7572 return 0;
7573 }
7574
7575 /* Return non-zero if the indicated VALUE has overflowed the maximum
7576 range expressible by a unsigned number with the indicated number of
7577 BITS. */
7578
7579 static bfd_boolean
7580 unsigned_overflow (valueT value, unsigned bits)
7581 {
7582 valueT lim;
7583 if (bits >= sizeof (valueT) * 8)
7584 return FALSE;
7585 lim = (valueT) 1 << bits;
7586 return (value >= lim);
7587 }
7588
7589
7590 /* Return non-zero if the indicated VALUE has overflowed the maximum
7591 range expressible by an signed number with the indicated number of
7592 BITS. */
7593
7594 static bfd_boolean
7595 signed_overflow (offsetT value, unsigned bits)
7596 {
7597 offsetT lim;
7598 if (bits >= sizeof (offsetT) * 8)
7599 return FALSE;
7600 lim = (offsetT) 1 << (bits - 1);
7601 return (value < -lim || value >= lim);
7602 }
7603
7604 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7605 unsigned immediate offset load/store instruction, try to encode it as
7606 an unscaled, 9-bit, signed immediate offset load/store instruction.
7607 Return TRUE if it is successful; otherwise return FALSE.
7608
7609 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7610 in response to the standard LDR/STR mnemonics when the immediate offset is
7611 unambiguous, i.e. when it is negative or unaligned. */
7612
7613 static bfd_boolean
7614 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7615 {
7616 int idx;
7617 enum aarch64_op new_op;
7618 const aarch64_opcode *new_opcode;
7619
7620 gas_assert (instr->opcode->iclass == ldst_pos);
7621
7622 switch (instr->opcode->op)
7623 {
7624 case OP_LDRB_POS:new_op = OP_LDURB; break;
7625 case OP_STRB_POS: new_op = OP_STURB; break;
7626 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7627 case OP_LDRH_POS: new_op = OP_LDURH; break;
7628 case OP_STRH_POS: new_op = OP_STURH; break;
7629 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7630 case OP_LDR_POS: new_op = OP_LDUR; break;
7631 case OP_STR_POS: new_op = OP_STUR; break;
7632 case OP_LDRF_POS: new_op = OP_LDURV; break;
7633 case OP_STRF_POS: new_op = OP_STURV; break;
7634 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7635 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7636 default: new_op = OP_NIL; break;
7637 }
7638
7639 if (new_op == OP_NIL)
7640 return FALSE;
7641
7642 new_opcode = aarch64_get_opcode (new_op);
7643 gas_assert (new_opcode != NULL);
7644
7645 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7646 instr->opcode->op, new_opcode->op);
7647
7648 aarch64_replace_opcode (instr, new_opcode);
7649
7650 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7651 qualifier matching may fail because the out-of-date qualifier will
7652 prevent the operand being updated with a new and correct qualifier. */
7653 idx = aarch64_operand_index (instr->opcode->operands,
7654 AARCH64_OPND_ADDR_SIMM9);
7655 gas_assert (idx == 1);
7656 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7657
7658 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7659
7660 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7661 insn_sequence))
7662 return FALSE;
7663
7664 return TRUE;
7665 }
7666
7667 /* Called by fix_insn to fix a MOV immediate alias instruction.
7668
7669 Operand for a generic move immediate instruction, which is an alias
7670 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7671 a 32-bit/64-bit immediate value into general register. An assembler error
7672 shall result if the immediate cannot be created by a single one of these
7673 instructions. If there is a choice, then to ensure reversability an
7674 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7675
7676 static void
7677 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7678 {
7679 const aarch64_opcode *opcode;
7680
7681 /* Need to check if the destination is SP/ZR. The check has to be done
7682 before any aarch64_replace_opcode. */
7683 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7684 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7685
7686 instr->operands[1].imm.value = value;
7687 instr->operands[1].skip = 0;
7688
7689 if (try_mov_wide_p)
7690 {
7691 /* Try the MOVZ alias. */
7692 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7693 aarch64_replace_opcode (instr, opcode);
7694 if (aarch64_opcode_encode (instr->opcode, instr,
7695 &instr->value, NULL, NULL, insn_sequence))
7696 {
7697 put_aarch64_insn (buf, instr->value);
7698 return;
7699 }
7700 /* Try the MOVK alias. */
7701 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7702 aarch64_replace_opcode (instr, opcode);
7703 if (aarch64_opcode_encode (instr->opcode, instr,
7704 &instr->value, NULL, NULL, insn_sequence))
7705 {
7706 put_aarch64_insn (buf, instr->value);
7707 return;
7708 }
7709 }
7710
7711 if (try_mov_bitmask_p)
7712 {
7713 /* Try the ORR alias. */
7714 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7715 aarch64_replace_opcode (instr, opcode);
7716 if (aarch64_opcode_encode (instr->opcode, instr,
7717 &instr->value, NULL, NULL, insn_sequence))
7718 {
7719 put_aarch64_insn (buf, instr->value);
7720 return;
7721 }
7722 }
7723
7724 as_bad_where (fixP->fx_file, fixP->fx_line,
7725 _("immediate cannot be moved by a single instruction"));
7726 }
7727
7728 /* An instruction operand which is immediate related may have symbol used
7729 in the assembly, e.g.
7730
7731 mov w0, u32
7732 .set u32, 0x00ffff00
7733
7734 At the time when the assembly instruction is parsed, a referenced symbol,
7735 like 'u32' in the above example may not have been seen; a fixS is created
7736 in such a case and is handled here after symbols have been resolved.
7737 Instruction is fixed up with VALUE using the information in *FIXP plus
7738 extra information in FLAGS.
7739
7740 This function is called by md_apply_fix to fix up instructions that need
7741 a fix-up described above but does not involve any linker-time relocation. */
7742
7743 static void
7744 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7745 {
7746 int idx;
7747 uint32_t insn;
7748 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7749 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7750 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7751
7752 if (new_inst)
7753 {
7754 /* Now the instruction is about to be fixed-up, so the operand that
7755 was previously marked as 'ignored' needs to be unmarked in order
7756 to get the encoding done properly. */
7757 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7758 new_inst->operands[idx].skip = 0;
7759 }
7760
7761 gas_assert (opnd != AARCH64_OPND_NIL);
7762
7763 switch (opnd)
7764 {
7765 case AARCH64_OPND_EXCEPTION:
7766 case AARCH64_OPND_UNDEFINED:
7767 if (unsigned_overflow (value, 16))
7768 as_bad_where (fixP->fx_file, fixP->fx_line,
7769 _("immediate out of range"));
7770 insn = get_aarch64_insn (buf);
7771 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
7772 put_aarch64_insn (buf, insn);
7773 break;
7774
7775 case AARCH64_OPND_AIMM:
7776 /* ADD or SUB with immediate.
7777 NOTE this assumes we come here with a add/sub shifted reg encoding
7778 3 322|2222|2 2 2 21111 111111
7779 1 098|7654|3 2 1 09876 543210 98765 43210
7780 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7781 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7782 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7783 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7784 ->
7785 3 322|2222|2 2 221111111111
7786 1 098|7654|3 2 109876543210 98765 43210
7787 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7788 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7789 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7790 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7791 Fields sf Rn Rd are already set. */
7792 insn = get_aarch64_insn (buf);
7793 if (value < 0)
7794 {
7795 /* Add <-> sub. */
7796 insn = reencode_addsub_switch_add_sub (insn);
7797 value = -value;
7798 }
7799
7800 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7801 && unsigned_overflow (value, 12))
7802 {
7803 /* Try to shift the value by 12 to make it fit. */
7804 if (((value >> 12) << 12) == value
7805 && ! unsigned_overflow (value, 12 + 12))
7806 {
7807 value >>= 12;
7808 insn |= encode_addsub_imm_shift_amount (1);
7809 }
7810 }
7811
7812 if (unsigned_overflow (value, 12))
7813 as_bad_where (fixP->fx_file, fixP->fx_line,
7814 _("immediate out of range"));
7815
7816 insn |= encode_addsub_imm (value);
7817
7818 put_aarch64_insn (buf, insn);
7819 break;
7820
7821 case AARCH64_OPND_SIMD_IMM:
7822 case AARCH64_OPND_SIMD_IMM_SFT:
7823 case AARCH64_OPND_LIMM:
7824 /* Bit mask immediate. */
7825 gas_assert (new_inst != NULL);
7826 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7827 new_inst->operands[idx].imm.value = value;
7828 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7829 &new_inst->value, NULL, NULL, insn_sequence))
7830 put_aarch64_insn (buf, new_inst->value);
7831 else
7832 as_bad_where (fixP->fx_file, fixP->fx_line,
7833 _("invalid immediate"));
7834 break;
7835
7836 case AARCH64_OPND_HALF:
7837 /* 16-bit unsigned immediate. */
7838 if (unsigned_overflow (value, 16))
7839 as_bad_where (fixP->fx_file, fixP->fx_line,
7840 _("immediate out of range"));
7841 insn = get_aarch64_insn (buf);
7842 insn |= encode_movw_imm (value & 0xffff);
7843 put_aarch64_insn (buf, insn);
7844 break;
7845
7846 case AARCH64_OPND_IMM_MOV:
7847 /* Operand for a generic move immediate instruction, which is
7848 an alias instruction that generates a single MOVZ, MOVN or ORR
7849 instruction to loads a 32-bit/64-bit immediate value into general
7850 register. An assembler error shall result if the immediate cannot be
7851 created by a single one of these instructions. If there is a choice,
7852 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7853 and MOVZ or MOVN to ORR. */
7854 gas_assert (new_inst != NULL);
7855 fix_mov_imm_insn (fixP, buf, new_inst, value);
7856 break;
7857
7858 case AARCH64_OPND_ADDR_SIMM7:
7859 case AARCH64_OPND_ADDR_SIMM9:
7860 case AARCH64_OPND_ADDR_SIMM9_2:
7861 case AARCH64_OPND_ADDR_SIMM10:
7862 case AARCH64_OPND_ADDR_UIMM12:
7863 case AARCH64_OPND_ADDR_SIMM11:
7864 case AARCH64_OPND_ADDR_SIMM13:
7865 /* Immediate offset in an address. */
7866 insn = get_aarch64_insn (buf);
7867
7868 gas_assert (new_inst != NULL && new_inst->value == insn);
7869 gas_assert (new_inst->opcode->operands[1] == opnd
7870 || new_inst->opcode->operands[2] == opnd);
7871
7872 /* Get the index of the address operand. */
7873 if (new_inst->opcode->operands[1] == opnd)
7874 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7875 idx = 1;
7876 else
7877 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7878 idx = 2;
7879
7880 /* Update the resolved offset value. */
7881 new_inst->operands[idx].addr.offset.imm = value;
7882
7883 /* Encode/fix-up. */
7884 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7885 &new_inst->value, NULL, NULL, insn_sequence))
7886 {
7887 put_aarch64_insn (buf, new_inst->value);
7888 break;
7889 }
7890 else if (new_inst->opcode->iclass == ldst_pos
7891 && try_to_encode_as_unscaled_ldst (new_inst))
7892 {
7893 put_aarch64_insn (buf, new_inst->value);
7894 break;
7895 }
7896
7897 as_bad_where (fixP->fx_file, fixP->fx_line,
7898 _("immediate offset out of range"));
7899 break;
7900
7901 default:
7902 gas_assert (0);
7903 as_fatal (_("unhandled operand code %d"), opnd);
7904 }
7905 }
7906
7907 /* Apply a fixup (fixP) to segment data, once it has been determined
7908 by our caller that we have all the info we need to fix it up.
7909
7910 Parameter valP is the pointer to the value of the bits. */
7911
7912 void
7913 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7914 {
7915 offsetT value = *valP;
7916 uint32_t insn;
7917 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7918 int scale;
7919 unsigned flags = fixP->fx_addnumber;
7920
7921 DEBUG_TRACE ("\n\n");
7922 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7923 DEBUG_TRACE ("Enter md_apply_fix");
7924
7925 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7926
7927 /* Note whether this will delete the relocation. */
7928
7929 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7930 fixP->fx_done = 1;
7931
7932 /* Process the relocations. */
7933 switch (fixP->fx_r_type)
7934 {
7935 case BFD_RELOC_NONE:
7936 /* This will need to go in the object file. */
7937 fixP->fx_done = 0;
7938 break;
7939
7940 case BFD_RELOC_8:
7941 case BFD_RELOC_8_PCREL:
7942 if (fixP->fx_done || !seg->use_rela_p)
7943 md_number_to_chars (buf, value, 1);
7944 break;
7945
7946 case BFD_RELOC_16:
7947 case BFD_RELOC_16_PCREL:
7948 if (fixP->fx_done || !seg->use_rela_p)
7949 md_number_to_chars (buf, value, 2);
7950 break;
7951
7952 case BFD_RELOC_32:
7953 case BFD_RELOC_32_PCREL:
7954 if (fixP->fx_done || !seg->use_rela_p)
7955 md_number_to_chars (buf, value, 4);
7956 break;
7957
7958 case BFD_RELOC_64:
7959 case BFD_RELOC_64_PCREL:
7960 if (fixP->fx_done || !seg->use_rela_p)
7961 md_number_to_chars (buf, value, 8);
7962 break;
7963
7964 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7965 /* We claim that these fixups have been processed here, even if
7966 in fact we generate an error because we do not have a reloc
7967 for them, so tc_gen_reloc() will reject them. */
7968 fixP->fx_done = 1;
7969 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7970 {
7971 as_bad_where (fixP->fx_file, fixP->fx_line,
7972 _("undefined symbol %s used as an immediate value"),
7973 S_GET_NAME (fixP->fx_addsy));
7974 goto apply_fix_return;
7975 }
7976 fix_insn (fixP, flags, value);
7977 break;
7978
7979 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7980 if (fixP->fx_done || !seg->use_rela_p)
7981 {
7982 if (value & 3)
7983 as_bad_where (fixP->fx_file, fixP->fx_line,
7984 _("pc-relative load offset not word aligned"));
7985 if (signed_overflow (value, 21))
7986 as_bad_where (fixP->fx_file, fixP->fx_line,
7987 _("pc-relative load offset out of range"));
7988 insn = get_aarch64_insn (buf);
7989 insn |= encode_ld_lit_ofs_19 (value >> 2);
7990 put_aarch64_insn (buf, insn);
7991 }
7992 break;
7993
7994 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7995 if (fixP->fx_done || !seg->use_rela_p)
7996 {
7997 if (signed_overflow (value, 21))
7998 as_bad_where (fixP->fx_file, fixP->fx_line,
7999 _("pc-relative address offset out of range"));
8000 insn = get_aarch64_insn (buf);
8001 insn |= encode_adr_imm (value);
8002 put_aarch64_insn (buf, insn);
8003 }
8004 break;
8005
8006 case BFD_RELOC_AARCH64_BRANCH19:
8007 if (fixP->fx_done || !seg->use_rela_p)
8008 {
8009 if (value & 3)
8010 as_bad_where (fixP->fx_file, fixP->fx_line,
8011 _("conditional branch target not word aligned"));
8012 if (signed_overflow (value, 21))
8013 as_bad_where (fixP->fx_file, fixP->fx_line,
8014 _("conditional branch out of range"));
8015 insn = get_aarch64_insn (buf);
8016 insn |= encode_cond_branch_ofs_19 (value >> 2);
8017 put_aarch64_insn (buf, insn);
8018 }
8019 break;
8020
8021 case BFD_RELOC_AARCH64_TSTBR14:
8022 if (fixP->fx_done || !seg->use_rela_p)
8023 {
8024 if (value & 3)
8025 as_bad_where (fixP->fx_file, fixP->fx_line,
8026 _("conditional branch target not word aligned"));
8027 if (signed_overflow (value, 16))
8028 as_bad_where (fixP->fx_file, fixP->fx_line,
8029 _("conditional branch out of range"));
8030 insn = get_aarch64_insn (buf);
8031 insn |= encode_tst_branch_ofs_14 (value >> 2);
8032 put_aarch64_insn (buf, insn);
8033 }
8034 break;
8035
8036 case BFD_RELOC_AARCH64_CALL26:
8037 case BFD_RELOC_AARCH64_JUMP26:
8038 if (fixP->fx_done || !seg->use_rela_p)
8039 {
8040 if (value & 3)
8041 as_bad_where (fixP->fx_file, fixP->fx_line,
8042 _("branch target not word aligned"));
8043 if (signed_overflow (value, 28))
8044 as_bad_where (fixP->fx_file, fixP->fx_line,
8045 _("branch out of range"));
8046 insn = get_aarch64_insn (buf);
8047 insn |= encode_branch_ofs_26 (value >> 2);
8048 put_aarch64_insn (buf, insn);
8049 }
8050 break;
8051
8052 case BFD_RELOC_AARCH64_MOVW_G0:
8053 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8054 case BFD_RELOC_AARCH64_MOVW_G0_S:
8055 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8056 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8057 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8058 scale = 0;
8059 goto movw_common;
8060 case BFD_RELOC_AARCH64_MOVW_G1:
8061 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8062 case BFD_RELOC_AARCH64_MOVW_G1_S:
8063 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8064 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8065 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8066 scale = 16;
8067 goto movw_common;
8068 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8069 scale = 0;
8070 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8071 /* Should always be exported to object file, see
8072 aarch64_force_relocation(). */
8073 gas_assert (!fixP->fx_done);
8074 gas_assert (seg->use_rela_p);
8075 goto movw_common;
8076 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8077 scale = 16;
8078 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8079 /* Should always be exported to object file, see
8080 aarch64_force_relocation(). */
8081 gas_assert (!fixP->fx_done);
8082 gas_assert (seg->use_rela_p);
8083 goto movw_common;
8084 case BFD_RELOC_AARCH64_MOVW_G2:
8085 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8086 case BFD_RELOC_AARCH64_MOVW_G2_S:
8087 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8088 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8089 scale = 32;
8090 goto movw_common;
8091 case BFD_RELOC_AARCH64_MOVW_G3:
8092 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8093 scale = 48;
8094 movw_common:
8095 if (fixP->fx_done || !seg->use_rela_p)
8096 {
8097 insn = get_aarch64_insn (buf);
8098
8099 if (!fixP->fx_done)
8100 {
8101 /* REL signed addend must fit in 16 bits */
8102 if (signed_overflow (value, 16))
8103 as_bad_where (fixP->fx_file, fixP->fx_line,
8104 _("offset out of range"));
8105 }
8106 else
8107 {
8108 /* Check for overflow and scale. */
8109 switch (fixP->fx_r_type)
8110 {
8111 case BFD_RELOC_AARCH64_MOVW_G0:
8112 case BFD_RELOC_AARCH64_MOVW_G1:
8113 case BFD_RELOC_AARCH64_MOVW_G2:
8114 case BFD_RELOC_AARCH64_MOVW_G3:
8115 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8116 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8117 if (unsigned_overflow (value, scale + 16))
8118 as_bad_where (fixP->fx_file, fixP->fx_line,
8119 _("unsigned value out of range"));
8120 break;
8121 case BFD_RELOC_AARCH64_MOVW_G0_S:
8122 case BFD_RELOC_AARCH64_MOVW_G1_S:
8123 case BFD_RELOC_AARCH64_MOVW_G2_S:
8124 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8125 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8126 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8127 /* NOTE: We can only come here with movz or movn. */
8128 if (signed_overflow (value, scale + 16))
8129 as_bad_where (fixP->fx_file, fixP->fx_line,
8130 _("signed value out of range"));
8131 if (value < 0)
8132 {
8133 /* Force use of MOVN. */
8134 value = ~value;
8135 insn = reencode_movzn_to_movn (insn);
8136 }
8137 else
8138 {
8139 /* Force use of MOVZ. */
8140 insn = reencode_movzn_to_movz (insn);
8141 }
8142 break;
8143 default:
8144 /* Unchecked relocations. */
8145 break;
8146 }
8147 value >>= scale;
8148 }
8149
8150 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8151 insn |= encode_movw_imm (value & 0xffff);
8152
8153 put_aarch64_insn (buf, insn);
8154 }
8155 break;
8156
8157 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8158 fixP->fx_r_type = (ilp32_p
8159 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8160 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8161 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8162 /* Should always be exported to object file, see
8163 aarch64_force_relocation(). */
8164 gas_assert (!fixP->fx_done);
8165 gas_assert (seg->use_rela_p);
8166 break;
8167
8168 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8169 fixP->fx_r_type = (ilp32_p
8170 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8171 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8172 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8173 /* Should always be exported to object file, see
8174 aarch64_force_relocation(). */
8175 gas_assert (!fixP->fx_done);
8176 gas_assert (seg->use_rela_p);
8177 break;
8178
8179 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8180 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8181 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8182 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8183 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8184 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8185 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8186 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8187 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8188 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8189 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8190 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8191 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8192 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8193 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8194 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8195 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8196 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8197 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8198 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8199 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8200 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8201 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8202 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8203 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8204 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8205 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8206 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8207 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8208 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8209 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8210 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8211 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8212 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8213 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8214 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8215 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8216 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8217 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8218 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8219 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8220 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8221 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8222 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8223 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8224 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8225 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8226 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8227 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8228 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8229 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8230 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8231 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8232 /* Should always be exported to object file, see
8233 aarch64_force_relocation(). */
8234 gas_assert (!fixP->fx_done);
8235 gas_assert (seg->use_rela_p);
8236 break;
8237
8238 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8239 /* Should always be exported to object file, see
8240 aarch64_force_relocation(). */
8241 fixP->fx_r_type = (ilp32_p
8242 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8243 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8244 gas_assert (!fixP->fx_done);
8245 gas_assert (seg->use_rela_p);
8246 break;
8247
8248 case BFD_RELOC_AARCH64_ADD_LO12:
8249 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8250 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8251 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8252 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8253 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8254 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8255 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8256 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8257 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8258 case BFD_RELOC_AARCH64_LDST128_LO12:
8259 case BFD_RELOC_AARCH64_LDST16_LO12:
8260 case BFD_RELOC_AARCH64_LDST32_LO12:
8261 case BFD_RELOC_AARCH64_LDST64_LO12:
8262 case BFD_RELOC_AARCH64_LDST8_LO12:
8263 /* Should always be exported to object file, see
8264 aarch64_force_relocation(). */
8265 gas_assert (!fixP->fx_done);
8266 gas_assert (seg->use_rela_p);
8267 break;
8268
8269 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8270 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8271 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8272 break;
8273
8274 case BFD_RELOC_UNUSED:
8275 /* An error will already have been reported. */
8276 break;
8277
8278 default:
8279 as_bad_where (fixP->fx_file, fixP->fx_line,
8280 _("unexpected %s fixup"),
8281 bfd_get_reloc_code_name (fixP->fx_r_type));
8282 break;
8283 }
8284
8285 apply_fix_return:
8286 /* Free the allocated the struct aarch64_inst.
8287 N.B. currently there are very limited number of fix-up types actually use
8288 this field, so the impact on the performance should be minimal . */
8289 free (fixP->tc_fix_data.inst);
8290
8291 return;
8292 }
8293
8294 /* Translate internal representation of relocation info to BFD target
8295 format. */
8296
8297 arelent *
8298 tc_gen_reloc (asection * section, fixS * fixp)
8299 {
8300 arelent *reloc;
8301 bfd_reloc_code_real_type code;
8302
8303 reloc = XNEW (arelent);
8304
8305 reloc->sym_ptr_ptr = XNEW (asymbol *);
8306 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8307 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8308
8309 if (fixp->fx_pcrel)
8310 {
8311 if (section->use_rela_p)
8312 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8313 else
8314 fixp->fx_offset = reloc->address;
8315 }
8316 reloc->addend = fixp->fx_offset;
8317
8318 code = fixp->fx_r_type;
8319 switch (code)
8320 {
8321 case BFD_RELOC_16:
8322 if (fixp->fx_pcrel)
8323 code = BFD_RELOC_16_PCREL;
8324 break;
8325
8326 case BFD_RELOC_32:
8327 if (fixp->fx_pcrel)
8328 code = BFD_RELOC_32_PCREL;
8329 break;
8330
8331 case BFD_RELOC_64:
8332 if (fixp->fx_pcrel)
8333 code = BFD_RELOC_64_PCREL;
8334 break;
8335
8336 default:
8337 break;
8338 }
8339
8340 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8341 if (reloc->howto == NULL)
8342 {
8343 as_bad_where (fixp->fx_file, fixp->fx_line,
8344 _
8345 ("cannot represent %s relocation in this object file format"),
8346 bfd_get_reloc_code_name (code));
8347 return NULL;
8348 }
8349
8350 return reloc;
8351 }
8352
8353 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8354
8355 void
8356 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8357 {
8358 bfd_reloc_code_real_type type;
8359 int pcrel = 0;
8360
8361 /* Pick a reloc.
8362 FIXME: @@ Should look at CPU word size. */
8363 switch (size)
8364 {
8365 case 1:
8366 type = BFD_RELOC_8;
8367 break;
8368 case 2:
8369 type = BFD_RELOC_16;
8370 break;
8371 case 4:
8372 type = BFD_RELOC_32;
8373 break;
8374 case 8:
8375 type = BFD_RELOC_64;
8376 break;
8377 default:
8378 as_bad (_("cannot do %u-byte relocation"), size);
8379 type = BFD_RELOC_UNUSED;
8380 break;
8381 }
8382
8383 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8384 }
8385
8386 int
8387 aarch64_force_relocation (struct fix *fixp)
8388 {
8389 switch (fixp->fx_r_type)
8390 {
8391 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8392 /* Perform these "immediate" internal relocations
8393 even if the symbol is extern or weak. */
8394 return 0;
8395
8396 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8397 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8398 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8399 /* Pseudo relocs that need to be fixed up according to
8400 ilp32_p. */
8401 return 0;
8402
8403 case BFD_RELOC_AARCH64_ADD_LO12:
8404 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8405 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8406 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8407 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8408 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8409 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8410 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8411 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8412 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8413 case BFD_RELOC_AARCH64_LDST128_LO12:
8414 case BFD_RELOC_AARCH64_LDST16_LO12:
8415 case BFD_RELOC_AARCH64_LDST32_LO12:
8416 case BFD_RELOC_AARCH64_LDST64_LO12:
8417 case BFD_RELOC_AARCH64_LDST8_LO12:
8418 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8419 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8420 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8421 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8422 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8423 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8424 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8425 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8426 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8427 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8428 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8429 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8430 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8431 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8432 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8433 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8434 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8435 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8436 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8437 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8438 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8439 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8440 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8441 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8442 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8443 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8444 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8445 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8446 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8447 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8448 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8449 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8450 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8451 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8452 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8453 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8454 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8455 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8456 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8457 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8458 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8459 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8460 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8461 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8462 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8463 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8464 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8465 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8466 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8467 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8468 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8469 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8470 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8471 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8472 /* Always leave these relocations for the linker. */
8473 return 1;
8474
8475 default:
8476 break;
8477 }
8478
8479 return generic_force_reloc (fixp);
8480 }
8481
8482 #ifdef OBJ_ELF
8483
8484 /* Implement md_after_parse_args. This is the earliest time we need to decide
8485 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8486
8487 void
8488 aarch64_after_parse_args (void)
8489 {
8490 if (aarch64_abi != AARCH64_ABI_NONE)
8491 return;
8492
8493 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8494 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8495 aarch64_abi = AARCH64_ABI_ILP32;
8496 else
8497 aarch64_abi = AARCH64_ABI_LP64;
8498 }
8499
8500 const char *
8501 elf64_aarch64_target_format (void)
8502 {
8503 #ifdef TE_CLOUDABI
8504 /* FIXME: What to do for ilp32_p ? */
8505 if (target_big_endian)
8506 return "elf64-bigaarch64-cloudabi";
8507 else
8508 return "elf64-littleaarch64-cloudabi";
8509 #else
8510 if (target_big_endian)
8511 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8512 else
8513 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8514 #endif
8515 }
8516
8517 void
8518 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8519 {
8520 elf_frob_symbol (symp, puntp);
8521 }
8522 #endif
8523
8524 /* MD interface: Finalization. */
8525
8526 /* A good place to do this, although this was probably not intended
8527 for this kind of use. We need to dump the literal pool before
8528 references are made to a null symbol pointer. */
8529
8530 void
8531 aarch64_cleanup (void)
8532 {
8533 literal_pool *pool;
8534
8535 for (pool = list_of_pools; pool; pool = pool->next)
8536 {
8537 /* Put it at the end of the relevant section. */
8538 subseg_set (pool->section, pool->sub_section);
8539 s_ltorg (0);
8540 }
8541 }
8542
8543 #ifdef OBJ_ELF
8544 /* Remove any excess mapping symbols generated for alignment frags in
8545 SEC. We may have created a mapping symbol before a zero byte
8546 alignment; remove it if there's a mapping symbol after the
8547 alignment. */
8548 static void
8549 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8550 void *dummy ATTRIBUTE_UNUSED)
8551 {
8552 segment_info_type *seginfo = seg_info (sec);
8553 fragS *fragp;
8554
8555 if (seginfo == NULL || seginfo->frchainP == NULL)
8556 return;
8557
8558 for (fragp = seginfo->frchainP->frch_root;
8559 fragp != NULL; fragp = fragp->fr_next)
8560 {
8561 symbolS *sym = fragp->tc_frag_data.last_map;
8562 fragS *next = fragp->fr_next;
8563
8564 /* Variable-sized frags have been converted to fixed size by
8565 this point. But if this was variable-sized to start with,
8566 there will be a fixed-size frag after it. So don't handle
8567 next == NULL. */
8568 if (sym == NULL || next == NULL)
8569 continue;
8570
8571 if (S_GET_VALUE (sym) < next->fr_address)
8572 /* Not at the end of this frag. */
8573 continue;
8574 know (S_GET_VALUE (sym) == next->fr_address);
8575
8576 do
8577 {
8578 if (next->tc_frag_data.first_map != NULL)
8579 {
8580 /* Next frag starts with a mapping symbol. Discard this
8581 one. */
8582 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8583 break;
8584 }
8585
8586 if (next->fr_next == NULL)
8587 {
8588 /* This mapping symbol is at the end of the section. Discard
8589 it. */
8590 know (next->fr_fix == 0 && next->fr_var == 0);
8591 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8592 break;
8593 }
8594
8595 /* As long as we have empty frags without any mapping symbols,
8596 keep looking. */
8597 /* If the next frag is non-empty and does not start with a
8598 mapping symbol, then this mapping symbol is required. */
8599 if (next->fr_address != next->fr_next->fr_address)
8600 break;
8601
8602 next = next->fr_next;
8603 }
8604 while (next != NULL);
8605 }
8606 }
8607 #endif
8608
8609 /* Adjust the symbol table. */
8610
8611 void
8612 aarch64_adjust_symtab (void)
8613 {
8614 #ifdef OBJ_ELF
8615 /* Remove any overlapping mapping symbols generated by alignment frags. */
8616 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8617 /* Now do generic ELF adjustments. */
8618 elf_adjust_symtab ();
8619 #endif
8620 }
8621
8622 static void
8623 checked_hash_insert (htab_t table, const char *key, void *value)
8624 {
8625 str_hash_insert (table, key, value, 0);
8626 }
8627
8628 static void
8629 sysreg_hash_insert (htab_t table, const char *key, void *value)
8630 {
8631 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
8632 checked_hash_insert (table, key, value);
8633 }
8634
8635 static void
8636 fill_instruction_hash_table (void)
8637 {
8638 aarch64_opcode *opcode = aarch64_opcode_table;
8639
8640 while (opcode->name != NULL)
8641 {
8642 templates *templ, *new_templ;
8643 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
8644
8645 new_templ = XNEW (templates);
8646 new_templ->opcode = opcode;
8647 new_templ->next = NULL;
8648
8649 if (!templ)
8650 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8651 else
8652 {
8653 new_templ->next = templ->next;
8654 templ->next = new_templ;
8655 }
8656 ++opcode;
8657 }
8658 }
8659
8660 static inline void
8661 convert_to_upper (char *dst, const char *src, size_t num)
8662 {
8663 unsigned int i;
8664 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8665 *dst = TOUPPER (*src);
8666 *dst = '\0';
8667 }
8668
8669 /* Assume STR point to a lower-case string, allocate, convert and return
8670 the corresponding upper-case string. */
8671 static inline const char*
8672 get_upper_str (const char *str)
8673 {
8674 char *ret;
8675 size_t len = strlen (str);
8676 ret = XNEWVEC (char, len + 1);
8677 convert_to_upper (ret, str, len);
8678 return ret;
8679 }
8680
8681 /* MD interface: Initialization. */
8682
8683 void
8684 md_begin (void)
8685 {
8686 unsigned mach;
8687 unsigned int i;
8688
8689 aarch64_ops_hsh = str_htab_create ();
8690 aarch64_cond_hsh = str_htab_create ();
8691 aarch64_shift_hsh = str_htab_create ();
8692 aarch64_sys_regs_hsh = str_htab_create ();
8693 aarch64_pstatefield_hsh = str_htab_create ();
8694 aarch64_sys_regs_ic_hsh = str_htab_create ();
8695 aarch64_sys_regs_dc_hsh = str_htab_create ();
8696 aarch64_sys_regs_at_hsh = str_htab_create ();
8697 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
8698 aarch64_sys_regs_sr_hsh = str_htab_create ();
8699 aarch64_reg_hsh = str_htab_create ();
8700 aarch64_barrier_opt_hsh = str_htab_create ();
8701 aarch64_nzcv_hsh = str_htab_create ();
8702 aarch64_pldop_hsh = str_htab_create ();
8703 aarch64_hint_opt_hsh = str_htab_create ();
8704
8705 fill_instruction_hash_table ();
8706
8707 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8708 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8709 (void *) (aarch64_sys_regs + i));
8710
8711 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8712 sysreg_hash_insert (aarch64_pstatefield_hsh,
8713 aarch64_pstatefields[i].name,
8714 (void *) (aarch64_pstatefields + i));
8715
8716 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8717 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
8718 aarch64_sys_regs_ic[i].name,
8719 (void *) (aarch64_sys_regs_ic + i));
8720
8721 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8722 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
8723 aarch64_sys_regs_dc[i].name,
8724 (void *) (aarch64_sys_regs_dc + i));
8725
8726 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8727 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
8728 aarch64_sys_regs_at[i].name,
8729 (void *) (aarch64_sys_regs_at + i));
8730
8731 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8732 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
8733 aarch64_sys_regs_tlbi[i].name,
8734 (void *) (aarch64_sys_regs_tlbi + i));
8735
8736 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8737 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
8738 aarch64_sys_regs_sr[i].name,
8739 (void *) (aarch64_sys_regs_sr + i));
8740
8741 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8742 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8743 (void *) (reg_names + i));
8744
8745 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8746 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8747 (void *) (nzcv_names + i));
8748
8749 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8750 {
8751 const char *name = aarch64_operand_modifiers[i].name;
8752 checked_hash_insert (aarch64_shift_hsh, name,
8753 (void *) (aarch64_operand_modifiers + i));
8754 /* Also hash the name in the upper case. */
8755 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8756 (void *) (aarch64_operand_modifiers + i));
8757 }
8758
8759 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8760 {
8761 unsigned int j;
8762 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8763 the same condition code. */
8764 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8765 {
8766 const char *name = aarch64_conds[i].names[j];
8767 if (name == NULL)
8768 break;
8769 checked_hash_insert (aarch64_cond_hsh, name,
8770 (void *) (aarch64_conds + i));
8771 /* Also hash the name in the upper case. */
8772 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8773 (void *) (aarch64_conds + i));
8774 }
8775 }
8776
8777 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8778 {
8779 const char *name = aarch64_barrier_options[i].name;
8780 /* Skip xx00 - the unallocated values of option. */
8781 if ((i & 0x3) == 0)
8782 continue;
8783 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8784 (void *) (aarch64_barrier_options + i));
8785 /* Also hash the name in the upper case. */
8786 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8787 (void *) (aarch64_barrier_options + i));
8788 }
8789
8790 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8791 {
8792 const char* name = aarch64_prfops[i].name;
8793 /* Skip the unallocated hint encodings. */
8794 if (name == NULL)
8795 continue;
8796 checked_hash_insert (aarch64_pldop_hsh, name,
8797 (void *) (aarch64_prfops + i));
8798 /* Also hash the name in the upper case. */
8799 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8800 (void *) (aarch64_prfops + i));
8801 }
8802
8803 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8804 {
8805 const char* name = aarch64_hint_options[i].name;
8806 const char* upper_name = get_upper_str(name);
8807
8808 checked_hash_insert (aarch64_hint_opt_hsh, name,
8809 (void *) (aarch64_hint_options + i));
8810
8811 /* Also hash the name in the upper case if not the same. */
8812 if (strcmp (name, upper_name) != 0)
8813 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
8814 (void *) (aarch64_hint_options + i));
8815 }
8816
8817 /* Set the cpu variant based on the command-line options. */
8818 if (!mcpu_cpu_opt)
8819 mcpu_cpu_opt = march_cpu_opt;
8820
8821 if (!mcpu_cpu_opt)
8822 mcpu_cpu_opt = &cpu_default;
8823
8824 cpu_variant = *mcpu_cpu_opt;
8825
8826 /* Record the CPU type. */
8827 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8828
8829 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8830 }
8831
8832 /* Command line processing. */
8833
8834 const char *md_shortopts = "m:";
8835
8836 #ifdef AARCH64_BI_ENDIAN
8837 #define OPTION_EB (OPTION_MD_BASE + 0)
8838 #define OPTION_EL (OPTION_MD_BASE + 1)
8839 #else
8840 #if TARGET_BYTES_BIG_ENDIAN
8841 #define OPTION_EB (OPTION_MD_BASE + 0)
8842 #else
8843 #define OPTION_EL (OPTION_MD_BASE + 1)
8844 #endif
8845 #endif
8846
8847 struct option md_longopts[] = {
8848 #ifdef OPTION_EB
8849 {"EB", no_argument, NULL, OPTION_EB},
8850 #endif
8851 #ifdef OPTION_EL
8852 {"EL", no_argument, NULL, OPTION_EL},
8853 #endif
8854 {NULL, no_argument, NULL, 0}
8855 };
8856
8857 size_t md_longopts_size = sizeof (md_longopts);
8858
8859 struct aarch64_option_table
8860 {
8861 const char *option; /* Option name to match. */
8862 const char *help; /* Help information. */
8863 int *var; /* Variable to change. */
8864 int value; /* What to change it to. */
8865 char *deprecated; /* If non-null, print this message. */
8866 };
8867
8868 static struct aarch64_option_table aarch64_opts[] = {
8869 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8870 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8871 NULL},
8872 #ifdef DEBUG_AARCH64
8873 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8874 #endif /* DEBUG_AARCH64 */
8875 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8876 NULL},
8877 {"mno-verbose-error", N_("do not output verbose error messages"),
8878 &verbose_error_p, 0, NULL},
8879 {NULL, NULL, NULL, 0, NULL}
8880 };
8881
8882 struct aarch64_cpu_option_table
8883 {
8884 const char *name;
8885 const aarch64_feature_set value;
8886 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8887 case. */
8888 const char *canonical_name;
8889 };
8890
8891 /* This list should, at a minimum, contain all the cpu names
8892 recognized by GCC. */
8893 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8894 {"all", AARCH64_ANY, NULL},
8895 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
8896 AARCH64_FEATURE_CRC), "Cortex-A34"},
8897 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8898 AARCH64_FEATURE_CRC), "Cortex-A35"},
8899 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8900 AARCH64_FEATURE_CRC), "Cortex-A53"},
8901 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8902 AARCH64_FEATURE_CRC), "Cortex-A57"},
8903 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8904 AARCH64_FEATURE_CRC), "Cortex-A72"},
8905 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8906 AARCH64_FEATURE_CRC), "Cortex-A73"},
8907 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8908 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8909 "Cortex-A55"},
8910 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8911 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8912 "Cortex-A75"},
8913 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8914 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8915 "Cortex-A76"},
8916 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8917 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8918 | AARCH64_FEATURE_DOTPROD
8919 | AARCH64_FEATURE_SSBS),
8920 "Cortex-A76AE"},
8921 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8922 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8923 | AARCH64_FEATURE_DOTPROD
8924 | AARCH64_FEATURE_SSBS),
8925 "Cortex-A77"},
8926 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8927 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8928 | AARCH64_FEATURE_DOTPROD
8929 | AARCH64_FEATURE_SSBS),
8930 "Cortex-A65"},
8931 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8932 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8933 | AARCH64_FEATURE_DOTPROD
8934 | AARCH64_FEATURE_SSBS),
8935 "Cortex-A65AE"},
8936 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8937 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8938 | AARCH64_FEATURE_DOTPROD
8939 | AARCH64_FEATURE_PROFILE),
8940 "Ares"},
8941 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8942 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8943 "Samsung Exynos M1"},
8944 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8945 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8946 | AARCH64_FEATURE_RDMA),
8947 "Qualcomm Falkor"},
8948 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8949 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8950 | AARCH64_FEATURE_DOTPROD
8951 | AARCH64_FEATURE_SSBS),
8952 "Neoverse E1"},
8953 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8954 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8955 | AARCH64_FEATURE_DOTPROD
8956 | AARCH64_FEATURE_PROFILE),
8957 "Neoverse N1"},
8958 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8959 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8960 | AARCH64_FEATURE_RDMA),
8961 "Qualcomm QDF24XX"},
8962 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8963 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8964 "Qualcomm Saphira"},
8965 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8966 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8967 "Cavium ThunderX"},
8968 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8969 AARCH64_FEATURE_CRYPTO),
8970 "Broadcom Vulcan"},
8971 /* The 'xgene-1' name is an older name for 'xgene1', which was used
8972 in earlier releases and is superseded by 'xgene1' in all
8973 tools. */
8974 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8975 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8976 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8977 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8978 {"generic", AARCH64_ARCH_V8, NULL},
8979
8980 {NULL, AARCH64_ARCH_NONE, NULL}
8981 };
8982
8983 struct aarch64_arch_option_table
8984 {
8985 const char *name;
8986 const aarch64_feature_set value;
8987 };
8988
8989 /* This list should, at a minimum, contain all the architecture names
8990 recognized by GCC. */
8991 static const struct aarch64_arch_option_table aarch64_archs[] = {
8992 {"all", AARCH64_ANY},
8993 {"armv8-a", AARCH64_ARCH_V8},
8994 {"armv8.1-a", AARCH64_ARCH_V8_1},
8995 {"armv8.2-a", AARCH64_ARCH_V8_2},
8996 {"armv8.3-a", AARCH64_ARCH_V8_3},
8997 {"armv8.4-a", AARCH64_ARCH_V8_4},
8998 {"armv8.5-a", AARCH64_ARCH_V8_5},
8999 {"armv8.6-a", AARCH64_ARCH_V8_6},
9000 {NULL, AARCH64_ARCH_NONE}
9001 };
9002
9003 /* ISA extensions. */
9004 struct aarch64_option_cpu_value_table
9005 {
9006 const char *name;
9007 const aarch64_feature_set value;
9008 const aarch64_feature_set require; /* Feature dependencies. */
9009 };
9010
9011 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9012 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9013 AARCH64_ARCH_NONE},
9014 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9015 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9016 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9017 AARCH64_ARCH_NONE},
9018 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9019 AARCH64_ARCH_NONE},
9020 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9021 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9022 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9023 AARCH64_ARCH_NONE},
9024 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9025 AARCH64_ARCH_NONE},
9026 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9027 AARCH64_ARCH_NONE},
9028 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9029 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9030 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9031 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9032 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9033 AARCH64_FEATURE (AARCH64_FEATURE_FP
9034 | AARCH64_FEATURE_F16, 0)},
9035 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9036 AARCH64_ARCH_NONE},
9037 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9038 AARCH64_FEATURE (AARCH64_FEATURE_F16
9039 | AARCH64_FEATURE_SIMD
9040 | AARCH64_FEATURE_COMPNUM, 0)},
9041 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9042 AARCH64_ARCH_NONE},
9043 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9044 AARCH64_FEATURE (AARCH64_FEATURE_F16
9045 | AARCH64_FEATURE_SIMD, 0)},
9046 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9047 AARCH64_ARCH_NONE},
9048 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9049 AARCH64_ARCH_NONE},
9050 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9051 AARCH64_ARCH_NONE},
9052 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9053 AARCH64_ARCH_NONE},
9054 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9055 AARCH64_ARCH_NONE},
9056 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9057 AARCH64_ARCH_NONE},
9058 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9059 AARCH64_ARCH_NONE},
9060 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9061 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9062 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9063 AARCH64_ARCH_NONE},
9064 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9065 AARCH64_ARCH_NONE},
9066 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9067 AARCH64_ARCH_NONE},
9068 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9069 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9070 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9071 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9072 | AARCH64_FEATURE_SM4, 0)},
9073 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9074 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9075 | AARCH64_FEATURE_AES, 0)},
9076 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9077 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9078 | AARCH64_FEATURE_SHA3, 0)},
9079 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9080 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9081 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9082 AARCH64_ARCH_NONE},
9083 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9084 AARCH64_ARCH_NONE},
9085 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9086 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9087 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9088 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9089 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9090 };
9091
9092 struct aarch64_long_option_table
9093 {
9094 const char *option; /* Substring to match. */
9095 const char *help; /* Help information. */
9096 int (*func) (const char *subopt); /* Function to decode sub-option. */
9097 char *deprecated; /* If non-null, print this message. */
9098 };
9099
9100 /* Transitive closure of features depending on set. */
9101 static aarch64_feature_set
9102 aarch64_feature_disable_set (aarch64_feature_set set)
9103 {
9104 const struct aarch64_option_cpu_value_table *opt;
9105 aarch64_feature_set prev = 0;
9106
9107 while (prev != set) {
9108 prev = set;
9109 for (opt = aarch64_features; opt->name != NULL; opt++)
9110 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9111 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9112 }
9113 return set;
9114 }
9115
9116 /* Transitive closure of dependencies of set. */
9117 static aarch64_feature_set
9118 aarch64_feature_enable_set (aarch64_feature_set set)
9119 {
9120 const struct aarch64_option_cpu_value_table *opt;
9121 aarch64_feature_set prev = 0;
9122
9123 while (prev != set) {
9124 prev = set;
9125 for (opt = aarch64_features; opt->name != NULL; opt++)
9126 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9127 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9128 }
9129 return set;
9130 }
9131
9132 static int
9133 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9134 bfd_boolean ext_only)
9135 {
9136 /* We insist on extensions being added before being removed. We achieve
9137 this by using the ADDING_VALUE variable to indicate whether we are
9138 adding an extension (1) or removing it (0) and only allowing it to
9139 change in the order -1 -> 1 -> 0. */
9140 int adding_value = -1;
9141 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9142
9143 /* Copy the feature set, so that we can modify it. */
9144 *ext_set = **opt_p;
9145 *opt_p = ext_set;
9146
9147 while (str != NULL && *str != 0)
9148 {
9149 const struct aarch64_option_cpu_value_table *opt;
9150 const char *ext = NULL;
9151 int optlen;
9152
9153 if (!ext_only)
9154 {
9155 if (*str != '+')
9156 {
9157 as_bad (_("invalid architectural extension"));
9158 return 0;
9159 }
9160
9161 ext = strchr (++str, '+');
9162 }
9163
9164 if (ext != NULL)
9165 optlen = ext - str;
9166 else
9167 optlen = strlen (str);
9168
9169 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
9170 {
9171 if (adding_value != 0)
9172 adding_value = 0;
9173 optlen -= 2;
9174 str += 2;
9175 }
9176 else if (optlen > 0)
9177 {
9178 if (adding_value == -1)
9179 adding_value = 1;
9180 else if (adding_value != 1)
9181 {
9182 as_bad (_("must specify extensions to add before specifying "
9183 "those to remove"));
9184 return FALSE;
9185 }
9186 }
9187
9188 if (optlen == 0)
9189 {
9190 as_bad (_("missing architectural extension"));
9191 return 0;
9192 }
9193
9194 gas_assert (adding_value != -1);
9195
9196 for (opt = aarch64_features; opt->name != NULL; opt++)
9197 if (strncmp (opt->name, str, optlen) == 0)
9198 {
9199 aarch64_feature_set set;
9200
9201 /* Add or remove the extension. */
9202 if (adding_value)
9203 {
9204 set = aarch64_feature_enable_set (opt->value);
9205 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9206 }
9207 else
9208 {
9209 set = aarch64_feature_disable_set (opt->value);
9210 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9211 }
9212 break;
9213 }
9214
9215 if (opt->name == NULL)
9216 {
9217 as_bad (_("unknown architectural extension `%s'"), str);
9218 return 0;
9219 }
9220
9221 str = ext;
9222 };
9223
9224 return 1;
9225 }
9226
9227 static int
9228 aarch64_parse_cpu (const char *str)
9229 {
9230 const struct aarch64_cpu_option_table *opt;
9231 const char *ext = strchr (str, '+');
9232 size_t optlen;
9233
9234 if (ext != NULL)
9235 optlen = ext - str;
9236 else
9237 optlen = strlen (str);
9238
9239 if (optlen == 0)
9240 {
9241 as_bad (_("missing cpu name `%s'"), str);
9242 return 0;
9243 }
9244
9245 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9246 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9247 {
9248 mcpu_cpu_opt = &opt->value;
9249 if (ext != NULL)
9250 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
9251
9252 return 1;
9253 }
9254
9255 as_bad (_("unknown cpu `%s'"), str);
9256 return 0;
9257 }
9258
9259 static int
9260 aarch64_parse_arch (const char *str)
9261 {
9262 const struct aarch64_arch_option_table *opt;
9263 const char *ext = strchr (str, '+');
9264 size_t optlen;
9265
9266 if (ext != NULL)
9267 optlen = ext - str;
9268 else
9269 optlen = strlen (str);
9270
9271 if (optlen == 0)
9272 {
9273 as_bad (_("missing architecture name `%s'"), str);
9274 return 0;
9275 }
9276
9277 for (opt = aarch64_archs; opt->name != NULL; opt++)
9278 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9279 {
9280 march_cpu_opt = &opt->value;
9281 if (ext != NULL)
9282 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
9283
9284 return 1;
9285 }
9286
9287 as_bad (_("unknown architecture `%s'\n"), str);
9288 return 0;
9289 }
9290
9291 /* ABIs. */
9292 struct aarch64_option_abi_value_table
9293 {
9294 const char *name;
9295 enum aarch64_abi_type value;
9296 };
9297
9298 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9299 {"ilp32", AARCH64_ABI_ILP32},
9300 {"lp64", AARCH64_ABI_LP64},
9301 };
9302
9303 static int
9304 aarch64_parse_abi (const char *str)
9305 {
9306 unsigned int i;
9307
9308 if (str[0] == '\0')
9309 {
9310 as_bad (_("missing abi name `%s'"), str);
9311 return 0;
9312 }
9313
9314 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9315 if (strcmp (str, aarch64_abis[i].name) == 0)
9316 {
9317 aarch64_abi = aarch64_abis[i].value;
9318 return 1;
9319 }
9320
9321 as_bad (_("unknown abi `%s'\n"), str);
9322 return 0;
9323 }
9324
9325 static struct aarch64_long_option_table aarch64_long_opts[] = {
9326 #ifdef OBJ_ELF
9327 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9328 aarch64_parse_abi, NULL},
9329 #endif /* OBJ_ELF */
9330 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9331 aarch64_parse_cpu, NULL},
9332 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9333 aarch64_parse_arch, NULL},
9334 {NULL, NULL, 0, NULL}
9335 };
9336
9337 int
9338 md_parse_option (int c, const char *arg)
9339 {
9340 struct aarch64_option_table *opt;
9341 struct aarch64_long_option_table *lopt;
9342
9343 switch (c)
9344 {
9345 #ifdef OPTION_EB
9346 case OPTION_EB:
9347 target_big_endian = 1;
9348 break;
9349 #endif
9350
9351 #ifdef OPTION_EL
9352 case OPTION_EL:
9353 target_big_endian = 0;
9354 break;
9355 #endif
9356
9357 case 'a':
9358 /* Listing option. Just ignore these, we don't support additional
9359 ones. */
9360 return 0;
9361
9362 default:
9363 for (opt = aarch64_opts; opt->option != NULL; opt++)
9364 {
9365 if (c == opt->option[0]
9366 && ((arg == NULL && opt->option[1] == 0)
9367 || streq (arg, opt->option + 1)))
9368 {
9369 /* If the option is deprecated, tell the user. */
9370 if (opt->deprecated != NULL)
9371 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9372 arg ? arg : "", _(opt->deprecated));
9373
9374 if (opt->var != NULL)
9375 *opt->var = opt->value;
9376
9377 return 1;
9378 }
9379 }
9380
9381 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9382 {
9383 /* These options are expected to have an argument. */
9384 if (c == lopt->option[0]
9385 && arg != NULL
9386 && strncmp (arg, lopt->option + 1,
9387 strlen (lopt->option + 1)) == 0)
9388 {
9389 /* If the option is deprecated, tell the user. */
9390 if (lopt->deprecated != NULL)
9391 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9392 _(lopt->deprecated));
9393
9394 /* Call the sup-option parser. */
9395 return lopt->func (arg + strlen (lopt->option) - 1);
9396 }
9397 }
9398
9399 return 0;
9400 }
9401
9402 return 1;
9403 }
9404
9405 void
9406 md_show_usage (FILE * fp)
9407 {
9408 struct aarch64_option_table *opt;
9409 struct aarch64_long_option_table *lopt;
9410
9411 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9412
9413 for (opt = aarch64_opts; opt->option != NULL; opt++)
9414 if (opt->help != NULL)
9415 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9416
9417 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9418 if (lopt->help != NULL)
9419 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9420
9421 #ifdef OPTION_EB
9422 fprintf (fp, _("\
9423 -EB assemble code for a big-endian cpu\n"));
9424 #endif
9425
9426 #ifdef OPTION_EL
9427 fprintf (fp, _("\
9428 -EL assemble code for a little-endian cpu\n"));
9429 #endif
9430 }
9431
9432 /* Parse a .cpu directive. */
9433
9434 static void
9435 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9436 {
9437 const struct aarch64_cpu_option_table *opt;
9438 char saved_char;
9439 char *name;
9440 char *ext;
9441 size_t optlen;
9442
9443 name = input_line_pointer;
9444 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9445 input_line_pointer++;
9446 saved_char = *input_line_pointer;
9447 *input_line_pointer = 0;
9448
9449 ext = strchr (name, '+');
9450
9451 if (ext != NULL)
9452 optlen = ext - name;
9453 else
9454 optlen = strlen (name);
9455
9456 /* Skip the first "all" entry. */
9457 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9458 if (strlen (opt->name) == optlen
9459 && strncmp (name, opt->name, optlen) == 0)
9460 {
9461 mcpu_cpu_opt = &opt->value;
9462 if (ext != NULL)
9463 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9464 return;
9465
9466 cpu_variant = *mcpu_cpu_opt;
9467
9468 *input_line_pointer = saved_char;
9469 demand_empty_rest_of_line ();
9470 return;
9471 }
9472 as_bad (_("unknown cpu `%s'"), name);
9473 *input_line_pointer = saved_char;
9474 ignore_rest_of_line ();
9475 }
9476
9477
9478 /* Parse a .arch directive. */
9479
9480 static void
9481 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9482 {
9483 const struct aarch64_arch_option_table *opt;
9484 char saved_char;
9485 char *name;
9486 char *ext;
9487 size_t optlen;
9488
9489 name = input_line_pointer;
9490 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9491 input_line_pointer++;
9492 saved_char = *input_line_pointer;
9493 *input_line_pointer = 0;
9494
9495 ext = strchr (name, '+');
9496
9497 if (ext != NULL)
9498 optlen = ext - name;
9499 else
9500 optlen = strlen (name);
9501
9502 /* Skip the first "all" entry. */
9503 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9504 if (strlen (opt->name) == optlen
9505 && strncmp (name, opt->name, optlen) == 0)
9506 {
9507 mcpu_cpu_opt = &opt->value;
9508 if (ext != NULL)
9509 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9510 return;
9511
9512 cpu_variant = *mcpu_cpu_opt;
9513
9514 *input_line_pointer = saved_char;
9515 demand_empty_rest_of_line ();
9516 return;
9517 }
9518
9519 as_bad (_("unknown architecture `%s'\n"), name);
9520 *input_line_pointer = saved_char;
9521 ignore_rest_of_line ();
9522 }
9523
9524 /* Parse a .arch_extension directive. */
9525
9526 static void
9527 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9528 {
9529 char saved_char;
9530 char *ext = input_line_pointer;;
9531
9532 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9533 input_line_pointer++;
9534 saved_char = *input_line_pointer;
9535 *input_line_pointer = 0;
9536
9537 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9538 return;
9539
9540 cpu_variant = *mcpu_cpu_opt;
9541
9542 *input_line_pointer = saved_char;
9543 demand_empty_rest_of_line ();
9544 }
9545
9546 /* Copy symbol information. */
9547
9548 void
9549 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9550 {
9551 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9552 }
9553
9554 #ifdef OBJ_ELF
9555 /* Same as elf_copy_symbol_attributes, but without copying st_other.
9556 This is needed so AArch64 specific st_other values can be independently
9557 specified for an IFUNC resolver (that is called by the dynamic linker)
9558 and the symbol it resolves (aliased to the resolver). In particular,
9559 if a function symbol has special st_other value set via directives,
9560 then attaching an IFUNC resolver to that symbol should not override
9561 the st_other setting. Requiring the directive on the IFUNC resolver
9562 symbol would be unexpected and problematic in C code, where the two
9563 symbols appear as two independent function declarations. */
9564
9565 void
9566 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
9567 {
9568 struct elf_obj_sy *srcelf = symbol_get_obj (src);
9569 struct elf_obj_sy *destelf = symbol_get_obj (dest);
9570 if (srcelf->size)
9571 {
9572 if (destelf->size == NULL)
9573 destelf->size = XNEW (expressionS);
9574 *destelf->size = *srcelf->size;
9575 }
9576 else
9577 {
9578 free (destelf->size);
9579 destelf->size = NULL;
9580 }
9581 S_SET_SIZE (dest, S_GET_SIZE (src));
9582 }
9583 #endif