]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
[PATCH][GAS][AArch64] Update Cortex-X1 feature flags
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2020 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bfd_boolean
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Return value for certain parsers when the parsing fails; those parsers
242 return the information of the parsed result, e.g. register number, on
243 success. */
244 #define PARSE_FAIL -1
245
246 /* This is an invalid condition code that means no conditional field is
247 present. */
248 #define COND_ALWAYS 0x10
249
250 typedef struct
251 {
252 const char *template;
253 unsigned long value;
254 } asm_barrier_opt;
255
256 typedef struct
257 {
258 const char *template;
259 uint32_t value;
260 } asm_nzcv;
261
262 struct reloc_entry
263 {
264 char *name;
265 bfd_reloc_code_real_type reloc;
266 };
267
268 /* Macros to define the register types and masks for the purpose
269 of parsing. */
270
271 #undef AARCH64_REG_TYPES
272 #define AARCH64_REG_TYPES \
273 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
274 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
275 BASIC_REG_TYPE(SP_32) /* wsp */ \
276 BASIC_REG_TYPE(SP_64) /* sp */ \
277 BASIC_REG_TYPE(Z_32) /* wzr */ \
278 BASIC_REG_TYPE(Z_64) /* xzr */ \
279 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
280 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
281 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
282 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
283 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
284 BASIC_REG_TYPE(VN) /* v[0-31] */ \
285 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
286 BASIC_REG_TYPE(PN) /* p[0-15] */ \
287 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
288 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
289 /* Typecheck: same, plus SVE registers. */ \
290 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
291 | REG_TYPE(ZN)) \
292 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
293 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
294 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
295 /* Typecheck: same, plus SVE registers. */ \
296 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
298 | REG_TYPE(ZN)) \
299 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
300 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
301 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
302 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
303 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
304 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
306 /* Typecheck: any [BHSDQ]P FP. */ \
307 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
308 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
309 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
310 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
311 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
312 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
313 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
314 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
315 be used for SVE instructions, since Zn and Pn are valid symbols \
316 in other contexts. */ \
317 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
318 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
319 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
320 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
321 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
322 | REG_TYPE(ZN) | REG_TYPE(PN)) \
323 /* Any integer register; used for error messages only. */ \
324 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
325 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
326 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
327 /* Pseudo type to mark the end of the enumerator sequence. */ \
328 BASIC_REG_TYPE(MAX)
329
330 #undef BASIC_REG_TYPE
331 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
332 #undef MULTI_REG_TYPE
333 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
334
335 /* Register type enumerators. */
336 typedef enum aarch64_reg_type_
337 {
338 /* A list of REG_TYPE_*. */
339 AARCH64_REG_TYPES
340 } aarch64_reg_type;
341
342 #undef BASIC_REG_TYPE
343 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
344 #undef REG_TYPE
345 #define REG_TYPE(T) (1 << REG_TYPE_##T)
346 #undef MULTI_REG_TYPE
347 #define MULTI_REG_TYPE(T,V) V,
348
349 /* Structure for a hash table entry for a register. */
350 typedef struct
351 {
352 const char *name;
353 unsigned char number;
354 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
355 unsigned char builtin;
356 } reg_entry;
357
358 /* Values indexed by aarch64_reg_type to assist the type checking. */
359 static const unsigned reg_type_masks[] =
360 {
361 AARCH64_REG_TYPES
362 };
363
364 #undef BASIC_REG_TYPE
365 #undef REG_TYPE
366 #undef MULTI_REG_TYPE
367 #undef AARCH64_REG_TYPES
368
369 /* Diagnostics used when we don't get a register of the expected type.
370 Note: this has to synchronized with aarch64_reg_type definitions
371 above. */
372 static const char *
373 get_reg_expected_msg (aarch64_reg_type reg_type)
374 {
375 const char *msg;
376
377 switch (reg_type)
378 {
379 case REG_TYPE_R_32:
380 msg = N_("integer 32-bit register expected");
381 break;
382 case REG_TYPE_R_64:
383 msg = N_("integer 64-bit register expected");
384 break;
385 case REG_TYPE_R_N:
386 msg = N_("integer register expected");
387 break;
388 case REG_TYPE_R64_SP:
389 msg = N_("64-bit integer or SP register expected");
390 break;
391 case REG_TYPE_SVE_BASE:
392 msg = N_("base register expected");
393 break;
394 case REG_TYPE_R_Z:
395 msg = N_("integer or zero register expected");
396 break;
397 case REG_TYPE_SVE_OFFSET:
398 msg = N_("offset register expected");
399 break;
400 case REG_TYPE_R_SP:
401 msg = N_("integer or SP register expected");
402 break;
403 case REG_TYPE_R_Z_SP:
404 msg = N_("integer, zero or SP register expected");
405 break;
406 case REG_TYPE_FP_B:
407 msg = N_("8-bit SIMD scalar register expected");
408 break;
409 case REG_TYPE_FP_H:
410 msg = N_("16-bit SIMD scalar or floating-point half precision "
411 "register expected");
412 break;
413 case REG_TYPE_FP_S:
414 msg = N_("32-bit SIMD scalar or floating-point single precision "
415 "register expected");
416 break;
417 case REG_TYPE_FP_D:
418 msg = N_("64-bit SIMD scalar or floating-point double precision "
419 "register expected");
420 break;
421 case REG_TYPE_FP_Q:
422 msg = N_("128-bit SIMD scalar or floating-point quad precision "
423 "register expected");
424 break;
425 case REG_TYPE_R_Z_BHSDQ_V:
426 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
427 msg = N_("register expected");
428 break;
429 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
430 msg = N_("SIMD scalar or floating-point register expected");
431 break;
432 case REG_TYPE_VN: /* any V reg */
433 msg = N_("vector register expected");
434 break;
435 case REG_TYPE_ZN:
436 msg = N_("SVE vector register expected");
437 break;
438 case REG_TYPE_PN:
439 msg = N_("SVE predicate register expected");
440 break;
441 default:
442 as_fatal (_("invalid register type %d"), reg_type);
443 }
444 return msg;
445 }
446
447 /* Some well known registers that we refer to directly elsewhere. */
448 #define REG_SP 31
449 #define REG_ZR 31
450
451 /* Instructions take 4 bytes in the object file. */
452 #define INSN_SIZE 4
453
454 static htab_t aarch64_ops_hsh;
455 static htab_t aarch64_cond_hsh;
456 static htab_t aarch64_shift_hsh;
457 static htab_t aarch64_sys_regs_hsh;
458 static htab_t aarch64_pstatefield_hsh;
459 static htab_t aarch64_sys_regs_ic_hsh;
460 static htab_t aarch64_sys_regs_dc_hsh;
461 static htab_t aarch64_sys_regs_at_hsh;
462 static htab_t aarch64_sys_regs_tlbi_hsh;
463 static htab_t aarch64_sys_regs_sr_hsh;
464 static htab_t aarch64_reg_hsh;
465 static htab_t aarch64_barrier_opt_hsh;
466 static htab_t aarch64_nzcv_hsh;
467 static htab_t aarch64_pldop_hsh;
468 static htab_t aarch64_hint_opt_hsh;
469
470 /* Stuff needed to resolve the label ambiguity
471 As:
472 ...
473 label: <insn>
474 may differ from:
475 ...
476 label:
477 <insn> */
478
479 static symbolS *last_label_seen;
480
481 /* Literal pool structure. Held on a per-section
482 and per-sub-section basis. */
483
484 #define MAX_LITERAL_POOL_SIZE 1024
485 typedef struct literal_expression
486 {
487 expressionS exp;
488 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
489 LITTLENUM_TYPE * bignum;
490 } literal_expression;
491
492 typedef struct literal_pool
493 {
494 literal_expression literals[MAX_LITERAL_POOL_SIZE];
495 unsigned int next_free_entry;
496 unsigned int id;
497 symbolS *symbol;
498 segT section;
499 subsegT sub_section;
500 int size;
501 struct literal_pool *next;
502 } literal_pool;
503
504 /* Pointer to a linked list of literal pools. */
505 static literal_pool *list_of_pools = NULL;
506 \f
507 /* Pure syntax. */
508
509 /* This array holds the chars that always start a comment. If the
510 pre-processor is disabled, these aren't very useful. */
511 const char comment_chars[] = "";
512
513 /* This array holds the chars that only start a comment at the beginning of
514 a line. If the line seems to have the form '# 123 filename'
515 .line and .file directives will appear in the pre-processed output. */
516 /* Note that input_file.c hand checks for '#' at the beginning of the
517 first line of the input file. This is because the compiler outputs
518 #NO_APP at the beginning of its output. */
519 /* Also note that comments like this one will always work. */
520 const char line_comment_chars[] = "#";
521
522 const char line_separator_chars[] = ";";
523
524 /* Chars that can be used to separate mant
525 from exp in floating point numbers. */
526 const char EXP_CHARS[] = "eE";
527
528 /* Chars that mean this number is a floating point constant. */
529 /* As in 0f12.456 */
530 /* or 0d1.2345e12 */
531
532 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhH";
533
534 /* Prefix character that indicates the start of an immediate value. */
535 #define is_immediate_prefix(C) ((C) == '#')
536
537 /* Separator character handling. */
538
539 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
540
541 static inline bfd_boolean
542 skip_past_char (char **str, char c)
543 {
544 if (**str == c)
545 {
546 (*str)++;
547 return TRUE;
548 }
549 else
550 return FALSE;
551 }
552
553 #define skip_past_comma(str) skip_past_char (str, ',')
554
555 /* Arithmetic expressions (possibly involving symbols). */
556
557 static bfd_boolean in_my_get_expression_p = FALSE;
558
559 /* Third argument to my_get_expression. */
560 #define GE_NO_PREFIX 0
561 #define GE_OPT_PREFIX 1
562
563 /* Return TRUE if the string pointed by *STR is successfully parsed
564 as an valid expression; *EP will be filled with the information of
565 such an expression. Otherwise return FALSE. */
566
567 static bfd_boolean
568 my_get_expression (expressionS * ep, char **str, int prefix_mode,
569 int reject_absent)
570 {
571 char *save_in;
572 segT seg;
573 int prefix_present_p = 0;
574
575 switch (prefix_mode)
576 {
577 case GE_NO_PREFIX:
578 break;
579 case GE_OPT_PREFIX:
580 if (is_immediate_prefix (**str))
581 {
582 (*str)++;
583 prefix_present_p = 1;
584 }
585 break;
586 default:
587 abort ();
588 }
589
590 memset (ep, 0, sizeof (expressionS));
591
592 save_in = input_line_pointer;
593 input_line_pointer = *str;
594 in_my_get_expression_p = TRUE;
595 seg = expression (ep);
596 in_my_get_expression_p = FALSE;
597
598 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
599 {
600 /* We found a bad expression in md_operand(). */
601 *str = input_line_pointer;
602 input_line_pointer = save_in;
603 if (prefix_present_p && ! error_p ())
604 set_fatal_syntax_error (_("bad expression"));
605 else
606 set_first_syntax_error (_("bad expression"));
607 return FALSE;
608 }
609
610 #ifdef OBJ_AOUT
611 if (seg != absolute_section
612 && seg != text_section
613 && seg != data_section
614 && seg != bss_section && seg != undefined_section)
615 {
616 set_syntax_error (_("bad segment"));
617 *str = input_line_pointer;
618 input_line_pointer = save_in;
619 return FALSE;
620 }
621 #else
622 (void) seg;
623 #endif
624
625 *str = input_line_pointer;
626 input_line_pointer = save_in;
627 return TRUE;
628 }
629
630 /* Turn a string in input_line_pointer into a floating point constant
631 of type TYPE, and store the appropriate bytes in *LITP. The number
632 of LITTLENUMS emitted is stored in *SIZEP. An error message is
633 returned, or NULL on OK. */
634
635 const char *
636 md_atof (int type, char *litP, int *sizeP)
637 {
638 /* If this is a bfloat16 type, then parse it slightly differently -
639 as it does not follow the IEEE standard exactly. */
640 if (type == 'b')
641 {
642 char * t;
643 LITTLENUM_TYPE words[MAX_LITTLENUMS];
644 FLONUM_TYPE generic_float;
645
646 t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float);
647
648 if (t)
649 input_line_pointer = t;
650 else
651 return _("invalid floating point number");
652
653 switch (generic_float.sign)
654 {
655 /* Is +Inf. */
656 case 'P':
657 words[0] = 0x7f80;
658 break;
659
660 /* Is -Inf. */
661 case 'N':
662 words[0] = 0xff80;
663 break;
664
665 /* Is NaN. */
666 /* bfloat16 has two types of NaN - quiet and signalling.
667 Quiet NaN has bit[6] == 1 && faction != 0, whereas
668 signalling Nan's have bit[0] == 0 && fraction != 0.
669 Chose this specific encoding as it is the same form
670 as used by other IEEE 754 encodings in GAS. */
671 case 0:
672 words[0] = 0x7fff;
673 break;
674
675 default:
676 break;
677 }
678
679 *sizeP = 2;
680
681 md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE));
682
683 return NULL;
684 }
685
686 return ieee_md_atof (type, litP, sizeP, target_big_endian);
687 }
688
689 /* We handle all bad expressions here, so that we can report the faulty
690 instruction in the error message. */
691 void
692 md_operand (expressionS * exp)
693 {
694 if (in_my_get_expression_p)
695 exp->X_op = O_illegal;
696 }
697
698 /* Immediate values. */
699
700 /* Errors may be set multiple times during parsing or bit encoding
701 (particularly in the Neon bits), but usually the earliest error which is set
702 will be the most meaningful. Avoid overwriting it with later (cascading)
703 errors by calling this function. */
704
705 static void
706 first_error (const char *error)
707 {
708 if (! error_p ())
709 set_syntax_error (error);
710 }
711
712 /* Similar to first_error, but this function accepts formatted error
713 message. */
714 static void
715 first_error_fmt (const char *format, ...)
716 {
717 va_list args;
718 enum
719 { size = 100 };
720 /* N.B. this single buffer will not cause error messages for different
721 instructions to pollute each other; this is because at the end of
722 processing of each assembly line, error message if any will be
723 collected by as_bad. */
724 static char buffer[size];
725
726 if (! error_p ())
727 {
728 int ret ATTRIBUTE_UNUSED;
729 va_start (args, format);
730 ret = vsnprintf (buffer, size, format, args);
731 know (ret <= size - 1 && ret >= 0);
732 va_end (args);
733 set_syntax_error (buffer);
734 }
735 }
736
737 /* Register parsing. */
738
739 /* Generic register parser which is called by other specialized
740 register parsers.
741 CCP points to what should be the beginning of a register name.
742 If it is indeed a valid register name, advance CCP over it and
743 return the reg_entry structure; otherwise return NULL.
744 It does not issue diagnostics. */
745
746 static reg_entry *
747 parse_reg (char **ccp)
748 {
749 char *start = *ccp;
750 char *p;
751 reg_entry *reg;
752
753 #ifdef REGISTER_PREFIX
754 if (*start != REGISTER_PREFIX)
755 return NULL;
756 start++;
757 #endif
758
759 p = start;
760 if (!ISALPHA (*p) || !is_name_beginner (*p))
761 return NULL;
762
763 do
764 p++;
765 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
766
767 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
768
769 if (!reg)
770 return NULL;
771
772 *ccp = p;
773 return reg;
774 }
775
776 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
777 return FALSE. */
778 static bfd_boolean
779 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
780 {
781 return (reg_type_masks[type] & (1 << reg->type)) != 0;
782 }
783
784 /* Try to parse a base or offset register. Allow SVE base and offset
785 registers if REG_TYPE includes SVE registers. Return the register
786 entry on success, setting *QUALIFIER to the register qualifier.
787 Return null otherwise.
788
789 Note that this function does not issue any diagnostics. */
790
791 static const reg_entry *
792 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
793 aarch64_opnd_qualifier_t *qualifier)
794 {
795 char *str = *ccp;
796 const reg_entry *reg = parse_reg (&str);
797
798 if (reg == NULL)
799 return NULL;
800
801 switch (reg->type)
802 {
803 case REG_TYPE_R_32:
804 case REG_TYPE_SP_32:
805 case REG_TYPE_Z_32:
806 *qualifier = AARCH64_OPND_QLF_W;
807 break;
808
809 case REG_TYPE_R_64:
810 case REG_TYPE_SP_64:
811 case REG_TYPE_Z_64:
812 *qualifier = AARCH64_OPND_QLF_X;
813 break;
814
815 case REG_TYPE_ZN:
816 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
817 || str[0] != '.')
818 return NULL;
819 switch (TOLOWER (str[1]))
820 {
821 case 's':
822 *qualifier = AARCH64_OPND_QLF_S_S;
823 break;
824 case 'd':
825 *qualifier = AARCH64_OPND_QLF_S_D;
826 break;
827 default:
828 return NULL;
829 }
830 str += 2;
831 break;
832
833 default:
834 return NULL;
835 }
836
837 *ccp = str;
838
839 return reg;
840 }
841
842 /* Try to parse a base or offset register. Return the register entry
843 on success, setting *QUALIFIER to the register qualifier. Return null
844 otherwise.
845
846 Note that this function does not issue any diagnostics. */
847
848 static const reg_entry *
849 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
850 {
851 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
852 }
853
854 /* Parse the qualifier of a vector register or vector element of type
855 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
856 succeeds; otherwise return FALSE.
857
858 Accept only one occurrence of:
859 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
860 b h s d q */
861 static bfd_boolean
862 parse_vector_type_for_operand (aarch64_reg_type reg_type,
863 struct vector_type_el *parsed_type, char **str)
864 {
865 char *ptr = *str;
866 unsigned width;
867 unsigned element_size;
868 enum vector_el_type type;
869
870 /* skip '.' */
871 gas_assert (*ptr == '.');
872 ptr++;
873
874 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
875 {
876 width = 0;
877 goto elt_size;
878 }
879 width = strtoul (ptr, &ptr, 10);
880 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
881 {
882 first_error_fmt (_("bad size %d in vector width specifier"), width);
883 return FALSE;
884 }
885
886 elt_size:
887 switch (TOLOWER (*ptr))
888 {
889 case 'b':
890 type = NT_b;
891 element_size = 8;
892 break;
893 case 'h':
894 type = NT_h;
895 element_size = 16;
896 break;
897 case 's':
898 type = NT_s;
899 element_size = 32;
900 break;
901 case 'd':
902 type = NT_d;
903 element_size = 64;
904 break;
905 case 'q':
906 if (reg_type == REG_TYPE_ZN || width == 1)
907 {
908 type = NT_q;
909 element_size = 128;
910 break;
911 }
912 /* fall through. */
913 default:
914 if (*ptr != '\0')
915 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
916 else
917 first_error (_("missing element size"));
918 return FALSE;
919 }
920 if (width != 0 && width * element_size != 64
921 && width * element_size != 128
922 && !(width == 2 && element_size == 16)
923 && !(width == 4 && element_size == 8))
924 {
925 first_error_fmt (_
926 ("invalid element size %d and vector size combination %c"),
927 width, *ptr);
928 return FALSE;
929 }
930 ptr++;
931
932 parsed_type->type = type;
933 parsed_type->width = width;
934
935 *str = ptr;
936
937 return TRUE;
938 }
939
940 /* *STR contains an SVE zero/merge predication suffix. Parse it into
941 *PARSED_TYPE and point *STR at the end of the suffix. */
942
943 static bfd_boolean
944 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
945 {
946 char *ptr = *str;
947
948 /* Skip '/'. */
949 gas_assert (*ptr == '/');
950 ptr++;
951 switch (TOLOWER (*ptr))
952 {
953 case 'z':
954 parsed_type->type = NT_zero;
955 break;
956 case 'm':
957 parsed_type->type = NT_merge;
958 break;
959 default:
960 if (*ptr != '\0' && *ptr != ',')
961 first_error_fmt (_("unexpected character `%c' in predication type"),
962 *ptr);
963 else
964 first_error (_("missing predication type"));
965 return FALSE;
966 }
967 parsed_type->width = 0;
968 *str = ptr + 1;
969 return TRUE;
970 }
971
972 /* Parse a register of the type TYPE.
973
974 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
975 name or the parsed register is not of TYPE.
976
977 Otherwise return the register number, and optionally fill in the actual
978 type of the register in *RTYPE when multiple alternatives were given, and
979 return the register shape and element index information in *TYPEINFO.
980
981 IN_REG_LIST should be set with TRUE if the caller is parsing a register
982 list. */
983
984 static int
985 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
986 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
987 {
988 char *str = *ccp;
989 const reg_entry *reg = parse_reg (&str);
990 struct vector_type_el atype;
991 struct vector_type_el parsetype;
992 bfd_boolean is_typed_vecreg = FALSE;
993
994 atype.defined = 0;
995 atype.type = NT_invtype;
996 atype.width = -1;
997 atype.index = 0;
998
999 if (reg == NULL)
1000 {
1001 if (typeinfo)
1002 *typeinfo = atype;
1003 set_default_error ();
1004 return PARSE_FAIL;
1005 }
1006
1007 if (! aarch64_check_reg_type (reg, type))
1008 {
1009 DEBUG_TRACE ("reg type check failed");
1010 set_default_error ();
1011 return PARSE_FAIL;
1012 }
1013 type = reg->type;
1014
1015 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
1016 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
1017 {
1018 if (*str == '.')
1019 {
1020 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1021 return PARSE_FAIL;
1022 }
1023 else
1024 {
1025 if (!parse_predication_for_operand (&parsetype, &str))
1026 return PARSE_FAIL;
1027 }
1028
1029 /* Register if of the form Vn.[bhsdq]. */
1030 is_typed_vecreg = TRUE;
1031
1032 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1033 {
1034 /* The width is always variable; we don't allow an integer width
1035 to be specified. */
1036 gas_assert (parsetype.width == 0);
1037 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1038 }
1039 else if (parsetype.width == 0)
1040 /* Expect index. In the new scheme we cannot have
1041 Vn.[bhsdq] represent a scalar. Therefore any
1042 Vn.[bhsdq] should have an index following it.
1043 Except in reglists of course. */
1044 atype.defined |= NTA_HASINDEX;
1045 else
1046 atype.defined |= NTA_HASTYPE;
1047
1048 atype.type = parsetype.type;
1049 atype.width = parsetype.width;
1050 }
1051
1052 if (skip_past_char (&str, '['))
1053 {
1054 expressionS exp;
1055
1056 /* Reject Sn[index] syntax. */
1057 if (!is_typed_vecreg)
1058 {
1059 first_error (_("this type of register can't be indexed"));
1060 return PARSE_FAIL;
1061 }
1062
1063 if (in_reg_list)
1064 {
1065 first_error (_("index not allowed inside register list"));
1066 return PARSE_FAIL;
1067 }
1068
1069 atype.defined |= NTA_HASINDEX;
1070
1071 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1072
1073 if (exp.X_op != O_constant)
1074 {
1075 first_error (_("constant expression required"));
1076 return PARSE_FAIL;
1077 }
1078
1079 if (! skip_past_char (&str, ']'))
1080 return PARSE_FAIL;
1081
1082 atype.index = exp.X_add_number;
1083 }
1084 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1085 {
1086 /* Indexed vector register expected. */
1087 first_error (_("indexed vector register expected"));
1088 return PARSE_FAIL;
1089 }
1090
1091 /* A vector reg Vn should be typed or indexed. */
1092 if (type == REG_TYPE_VN && atype.defined == 0)
1093 {
1094 first_error (_("invalid use of vector register"));
1095 }
1096
1097 if (typeinfo)
1098 *typeinfo = atype;
1099
1100 if (rtype)
1101 *rtype = type;
1102
1103 *ccp = str;
1104
1105 return reg->number;
1106 }
1107
1108 /* Parse register.
1109
1110 Return the register number on success; return PARSE_FAIL otherwise.
1111
1112 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1113 the register (e.g. NEON double or quad reg when either has been requested).
1114
1115 If this is a NEON vector register with additional type information, fill
1116 in the struct pointed to by VECTYPE (if non-NULL).
1117
1118 This parser does not handle register list. */
1119
1120 static int
1121 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1122 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1123 {
1124 struct vector_type_el atype;
1125 char *str = *ccp;
1126 int reg = parse_typed_reg (&str, type, rtype, &atype,
1127 /*in_reg_list= */ FALSE);
1128
1129 if (reg == PARSE_FAIL)
1130 return PARSE_FAIL;
1131
1132 if (vectype)
1133 *vectype = atype;
1134
1135 *ccp = str;
1136
1137 return reg;
1138 }
1139
1140 static inline bfd_boolean
1141 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1142 {
1143 return
1144 e1.type == e2.type
1145 && e1.defined == e2.defined
1146 && e1.width == e2.width && e1.index == e2.index;
1147 }
1148
1149 /* This function parses a list of vector registers of type TYPE.
1150 On success, it returns the parsed register list information in the
1151 following encoded format:
1152
1153 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1154 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1155
1156 The information of the register shape and/or index is returned in
1157 *VECTYPE.
1158
1159 It returns PARSE_FAIL if the register list is invalid.
1160
1161 The list contains one to four registers.
1162 Each register can be one of:
1163 <Vt>.<T>[<index>]
1164 <Vt>.<T>
1165 All <T> should be identical.
1166 All <index> should be identical.
1167 There are restrictions on <Vt> numbers which are checked later
1168 (by reg_list_valid_p). */
1169
1170 static int
1171 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1172 struct vector_type_el *vectype)
1173 {
1174 char *str = *ccp;
1175 int nb_regs;
1176 struct vector_type_el typeinfo, typeinfo_first;
1177 int val, val_range;
1178 int in_range;
1179 int ret_val;
1180 int i;
1181 bfd_boolean error = FALSE;
1182 bfd_boolean expect_index = FALSE;
1183
1184 if (*str != '{')
1185 {
1186 set_syntax_error (_("expecting {"));
1187 return PARSE_FAIL;
1188 }
1189 str++;
1190
1191 nb_regs = 0;
1192 typeinfo_first.defined = 0;
1193 typeinfo_first.type = NT_invtype;
1194 typeinfo_first.width = -1;
1195 typeinfo_first.index = 0;
1196 ret_val = 0;
1197 val = -1;
1198 val_range = -1;
1199 in_range = 0;
1200 do
1201 {
1202 if (in_range)
1203 {
1204 str++; /* skip over '-' */
1205 val_range = val;
1206 }
1207 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1208 /*in_reg_list= */ TRUE);
1209 if (val == PARSE_FAIL)
1210 {
1211 set_first_syntax_error (_("invalid vector register in list"));
1212 error = TRUE;
1213 continue;
1214 }
1215 /* reject [bhsd]n */
1216 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1217 {
1218 set_first_syntax_error (_("invalid scalar register in list"));
1219 error = TRUE;
1220 continue;
1221 }
1222
1223 if (typeinfo.defined & NTA_HASINDEX)
1224 expect_index = TRUE;
1225
1226 if (in_range)
1227 {
1228 if (val < val_range)
1229 {
1230 set_first_syntax_error
1231 (_("invalid range in vector register list"));
1232 error = TRUE;
1233 }
1234 val_range++;
1235 }
1236 else
1237 {
1238 val_range = val;
1239 if (nb_regs == 0)
1240 typeinfo_first = typeinfo;
1241 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1242 {
1243 set_first_syntax_error
1244 (_("type mismatch in vector register list"));
1245 error = TRUE;
1246 }
1247 }
1248 if (! error)
1249 for (i = val_range; i <= val; i++)
1250 {
1251 ret_val |= i << (5 * nb_regs);
1252 nb_regs++;
1253 }
1254 in_range = 0;
1255 }
1256 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1257
1258 skip_whitespace (str);
1259 if (*str != '}')
1260 {
1261 set_first_syntax_error (_("end of vector register list not found"));
1262 error = TRUE;
1263 }
1264 str++;
1265
1266 skip_whitespace (str);
1267
1268 if (expect_index)
1269 {
1270 if (skip_past_char (&str, '['))
1271 {
1272 expressionS exp;
1273
1274 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1275 if (exp.X_op != O_constant)
1276 {
1277 set_first_syntax_error (_("constant expression required."));
1278 error = TRUE;
1279 }
1280 if (! skip_past_char (&str, ']'))
1281 error = TRUE;
1282 else
1283 typeinfo_first.index = exp.X_add_number;
1284 }
1285 else
1286 {
1287 set_first_syntax_error (_("expected index"));
1288 error = TRUE;
1289 }
1290 }
1291
1292 if (nb_regs > 4)
1293 {
1294 set_first_syntax_error (_("too many registers in vector register list"));
1295 error = TRUE;
1296 }
1297 else if (nb_regs == 0)
1298 {
1299 set_first_syntax_error (_("empty vector register list"));
1300 error = TRUE;
1301 }
1302
1303 *ccp = str;
1304 if (! error)
1305 *vectype = typeinfo_first;
1306
1307 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1308 }
1309
1310 /* Directives: register aliases. */
1311
1312 static reg_entry *
1313 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1314 {
1315 reg_entry *new;
1316 const char *name;
1317
1318 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1319 {
1320 if (new->builtin)
1321 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1322 str);
1323
1324 /* Only warn about a redefinition if it's not defined as the
1325 same register. */
1326 else if (new->number != number || new->type != type)
1327 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1328
1329 return NULL;
1330 }
1331
1332 name = xstrdup (str);
1333 new = XNEW (reg_entry);
1334
1335 new->name = name;
1336 new->number = number;
1337 new->type = type;
1338 new->builtin = FALSE;
1339
1340 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1341
1342 return new;
1343 }
1344
1345 /* Look for the .req directive. This is of the form:
1346
1347 new_register_name .req existing_register_name
1348
1349 If we find one, or if it looks sufficiently like one that we want to
1350 handle any error here, return TRUE. Otherwise return FALSE. */
1351
1352 static bfd_boolean
1353 create_register_alias (char *newname, char *p)
1354 {
1355 const reg_entry *old;
1356 char *oldname, *nbuf;
1357 size_t nlen;
1358
1359 /* The input scrubber ensures that whitespace after the mnemonic is
1360 collapsed to single spaces. */
1361 oldname = p;
1362 if (strncmp (oldname, " .req ", 6) != 0)
1363 return FALSE;
1364
1365 oldname += 6;
1366 if (*oldname == '\0')
1367 return FALSE;
1368
1369 old = str_hash_find (aarch64_reg_hsh, oldname);
1370 if (!old)
1371 {
1372 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1373 return TRUE;
1374 }
1375
1376 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1377 the desired alias name, and p points to its end. If not, then
1378 the desired alias name is in the global original_case_string. */
1379 #ifdef TC_CASE_SENSITIVE
1380 nlen = p - newname;
1381 #else
1382 newname = original_case_string;
1383 nlen = strlen (newname);
1384 #endif
1385
1386 nbuf = xmemdup0 (newname, nlen);
1387
1388 /* Create aliases under the new name as stated; an all-lowercase
1389 version of the new name; and an all-uppercase version of the new
1390 name. */
1391 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1392 {
1393 for (p = nbuf; *p; p++)
1394 *p = TOUPPER (*p);
1395
1396 if (strncmp (nbuf, newname, nlen))
1397 {
1398 /* If this attempt to create an additional alias fails, do not bother
1399 trying to create the all-lower case alias. We will fail and issue
1400 a second, duplicate error message. This situation arises when the
1401 programmer does something like:
1402 foo .req r0
1403 Foo .req r1
1404 The second .req creates the "Foo" alias but then fails to create
1405 the artificial FOO alias because it has already been created by the
1406 first .req. */
1407 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1408 {
1409 free (nbuf);
1410 return TRUE;
1411 }
1412 }
1413
1414 for (p = nbuf; *p; p++)
1415 *p = TOLOWER (*p);
1416
1417 if (strncmp (nbuf, newname, nlen))
1418 insert_reg_alias (nbuf, old->number, old->type);
1419 }
1420
1421 free (nbuf);
1422 return TRUE;
1423 }
1424
1425 /* Should never be called, as .req goes between the alias and the
1426 register name, not at the beginning of the line. */
1427 static void
1428 s_req (int a ATTRIBUTE_UNUSED)
1429 {
1430 as_bad (_("invalid syntax for .req directive"));
1431 }
1432
1433 /* The .unreq directive deletes an alias which was previously defined
1434 by .req. For example:
1435
1436 my_alias .req r11
1437 .unreq my_alias */
1438
1439 static void
1440 s_unreq (int a ATTRIBUTE_UNUSED)
1441 {
1442 char *name;
1443 char saved_char;
1444
1445 name = input_line_pointer;
1446
1447 while (*input_line_pointer != 0
1448 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1449 ++input_line_pointer;
1450
1451 saved_char = *input_line_pointer;
1452 *input_line_pointer = 0;
1453
1454 if (!*name)
1455 as_bad (_("invalid syntax for .unreq directive"));
1456 else
1457 {
1458 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1459
1460 if (!reg)
1461 as_bad (_("unknown register alias '%s'"), name);
1462 else if (reg->builtin)
1463 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1464 name);
1465 else
1466 {
1467 char *p;
1468 char *nbuf;
1469
1470 str_hash_delete (aarch64_reg_hsh, name);
1471 free ((char *) reg->name);
1472 free (reg);
1473
1474 /* Also locate the all upper case and all lower case versions.
1475 Do not complain if we cannot find one or the other as it
1476 was probably deleted above. */
1477
1478 nbuf = strdup (name);
1479 for (p = nbuf; *p; p++)
1480 *p = TOUPPER (*p);
1481 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1482 if (reg)
1483 {
1484 str_hash_delete (aarch64_reg_hsh, nbuf);
1485 free ((char *) reg->name);
1486 free (reg);
1487 }
1488
1489 for (p = nbuf; *p; p++)
1490 *p = TOLOWER (*p);
1491 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1492 if (reg)
1493 {
1494 str_hash_delete (aarch64_reg_hsh, nbuf);
1495 free ((char *) reg->name);
1496 free (reg);
1497 }
1498
1499 free (nbuf);
1500 }
1501 }
1502
1503 *input_line_pointer = saved_char;
1504 demand_empty_rest_of_line ();
1505 }
1506
1507 /* Directives: Instruction set selection. */
1508
1509 #ifdef OBJ_ELF
1510 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1511 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1512 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1513 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1514
1515 /* Create a new mapping symbol for the transition to STATE. */
1516
1517 static void
1518 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1519 {
1520 symbolS *symbolP;
1521 const char *symname;
1522 int type;
1523
1524 switch (state)
1525 {
1526 case MAP_DATA:
1527 symname = "$d";
1528 type = BSF_NO_FLAGS;
1529 break;
1530 case MAP_INSN:
1531 symname = "$x";
1532 type = BSF_NO_FLAGS;
1533 break;
1534 default:
1535 abort ();
1536 }
1537
1538 symbolP = symbol_new (symname, now_seg, frag, value);
1539 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1540
1541 /* Save the mapping symbols for future reference. Also check that
1542 we do not place two mapping symbols at the same offset within a
1543 frag. We'll handle overlap between frags in
1544 check_mapping_symbols.
1545
1546 If .fill or other data filling directive generates zero sized data,
1547 the mapping symbol for the following code will have the same value
1548 as the one generated for the data filling directive. In this case,
1549 we replace the old symbol with the new one at the same address. */
1550 if (value == 0)
1551 {
1552 if (frag->tc_frag_data.first_map != NULL)
1553 {
1554 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1555 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1556 &symbol_lastP);
1557 }
1558 frag->tc_frag_data.first_map = symbolP;
1559 }
1560 if (frag->tc_frag_data.last_map != NULL)
1561 {
1562 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1563 S_GET_VALUE (symbolP));
1564 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1565 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1566 &symbol_lastP);
1567 }
1568 frag->tc_frag_data.last_map = symbolP;
1569 }
1570
1571 /* We must sometimes convert a region marked as code to data during
1572 code alignment, if an odd number of bytes have to be padded. The
1573 code mapping symbol is pushed to an aligned address. */
1574
1575 static void
1576 insert_data_mapping_symbol (enum mstate state,
1577 valueT value, fragS * frag, offsetT bytes)
1578 {
1579 /* If there was already a mapping symbol, remove it. */
1580 if (frag->tc_frag_data.last_map != NULL
1581 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1582 frag->fr_address + value)
1583 {
1584 symbolS *symp = frag->tc_frag_data.last_map;
1585
1586 if (value == 0)
1587 {
1588 know (frag->tc_frag_data.first_map == symp);
1589 frag->tc_frag_data.first_map = NULL;
1590 }
1591 frag->tc_frag_data.last_map = NULL;
1592 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1593 }
1594
1595 make_mapping_symbol (MAP_DATA, value, frag);
1596 make_mapping_symbol (state, value + bytes, frag);
1597 }
1598
1599 static void mapping_state_2 (enum mstate state, int max_chars);
1600
1601 /* Set the mapping state to STATE. Only call this when about to
1602 emit some STATE bytes to the file. */
1603
1604 void
1605 mapping_state (enum mstate state)
1606 {
1607 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1608
1609 if (state == MAP_INSN)
1610 /* AArch64 instructions require 4-byte alignment. When emitting
1611 instructions into any section, record the appropriate section
1612 alignment. */
1613 record_alignment (now_seg, 2);
1614
1615 if (mapstate == state)
1616 /* The mapping symbol has already been emitted.
1617 There is nothing else to do. */
1618 return;
1619
1620 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1621 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1622 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1623 evaluated later in the next else. */
1624 return;
1625 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1626 {
1627 /* Only add the symbol if the offset is > 0:
1628 if we're at the first frag, check it's size > 0;
1629 if we're not at the first frag, then for sure
1630 the offset is > 0. */
1631 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1632 const int add_symbol = (frag_now != frag_first)
1633 || (frag_now_fix () > 0);
1634
1635 if (add_symbol)
1636 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1637 }
1638 #undef TRANSITION
1639
1640 mapping_state_2 (state, 0);
1641 }
1642
1643 /* Same as mapping_state, but MAX_CHARS bytes have already been
1644 allocated. Put the mapping symbol that far back. */
1645
1646 static void
1647 mapping_state_2 (enum mstate state, int max_chars)
1648 {
1649 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1650
1651 if (!SEG_NORMAL (now_seg))
1652 return;
1653
1654 if (mapstate == state)
1655 /* The mapping symbol has already been emitted.
1656 There is nothing else to do. */
1657 return;
1658
1659 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1660 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1661 }
1662 #else
1663 #define mapping_state(x) /* nothing */
1664 #define mapping_state_2(x, y) /* nothing */
1665 #endif
1666
1667 /* Directives: sectioning and alignment. */
1668
1669 static void
1670 s_bss (int ignore ATTRIBUTE_UNUSED)
1671 {
1672 /* We don't support putting frags in the BSS segment, we fake it by
1673 marking in_bss, then looking at s_skip for clues. */
1674 subseg_set (bss_section, 0);
1675 demand_empty_rest_of_line ();
1676 mapping_state (MAP_DATA);
1677 }
1678
1679 static void
1680 s_even (int ignore ATTRIBUTE_UNUSED)
1681 {
1682 /* Never make frag if expect extra pass. */
1683 if (!need_pass_2)
1684 frag_align (1, 0, 0);
1685
1686 record_alignment (now_seg, 1);
1687
1688 demand_empty_rest_of_line ();
1689 }
1690
1691 /* Directives: Literal pools. */
1692
1693 static literal_pool *
1694 find_literal_pool (int size)
1695 {
1696 literal_pool *pool;
1697
1698 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1699 {
1700 if (pool->section == now_seg
1701 && pool->sub_section == now_subseg && pool->size == size)
1702 break;
1703 }
1704
1705 return pool;
1706 }
1707
1708 static literal_pool *
1709 find_or_make_literal_pool (int size)
1710 {
1711 /* Next literal pool ID number. */
1712 static unsigned int latest_pool_num = 1;
1713 literal_pool *pool;
1714
1715 pool = find_literal_pool (size);
1716
1717 if (pool == NULL)
1718 {
1719 /* Create a new pool. */
1720 pool = XNEW (literal_pool);
1721 if (!pool)
1722 return NULL;
1723
1724 /* Currently we always put the literal pool in the current text
1725 section. If we were generating "small" model code where we
1726 knew that all code and initialised data was within 1MB then
1727 we could output literals to mergeable, read-only data
1728 sections. */
1729
1730 pool->next_free_entry = 0;
1731 pool->section = now_seg;
1732 pool->sub_section = now_subseg;
1733 pool->size = size;
1734 pool->next = list_of_pools;
1735 pool->symbol = NULL;
1736
1737 /* Add it to the list. */
1738 list_of_pools = pool;
1739 }
1740
1741 /* New pools, and emptied pools, will have a NULL symbol. */
1742 if (pool->symbol == NULL)
1743 {
1744 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1745 &zero_address_frag, 0);
1746 pool->id = latest_pool_num++;
1747 }
1748
1749 /* Done. */
1750 return pool;
1751 }
1752
1753 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1754 Return TRUE on success, otherwise return FALSE. */
1755 static bfd_boolean
1756 add_to_lit_pool (expressionS *exp, int size)
1757 {
1758 literal_pool *pool;
1759 unsigned int entry;
1760
1761 pool = find_or_make_literal_pool (size);
1762
1763 /* Check if this literal value is already in the pool. */
1764 for (entry = 0; entry < pool->next_free_entry; entry++)
1765 {
1766 expressionS * litexp = & pool->literals[entry].exp;
1767
1768 if ((litexp->X_op == exp->X_op)
1769 && (exp->X_op == O_constant)
1770 && (litexp->X_add_number == exp->X_add_number)
1771 && (litexp->X_unsigned == exp->X_unsigned))
1772 break;
1773
1774 if ((litexp->X_op == exp->X_op)
1775 && (exp->X_op == O_symbol)
1776 && (litexp->X_add_number == exp->X_add_number)
1777 && (litexp->X_add_symbol == exp->X_add_symbol)
1778 && (litexp->X_op_symbol == exp->X_op_symbol))
1779 break;
1780 }
1781
1782 /* Do we need to create a new entry? */
1783 if (entry == pool->next_free_entry)
1784 {
1785 if (entry >= MAX_LITERAL_POOL_SIZE)
1786 {
1787 set_syntax_error (_("literal pool overflow"));
1788 return FALSE;
1789 }
1790
1791 pool->literals[entry].exp = *exp;
1792 pool->next_free_entry += 1;
1793 if (exp->X_op == O_big)
1794 {
1795 /* PR 16688: Bignums are held in a single global array. We must
1796 copy and preserve that value now, before it is overwritten. */
1797 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1798 exp->X_add_number);
1799 memcpy (pool->literals[entry].bignum, generic_bignum,
1800 CHARS_PER_LITTLENUM * exp->X_add_number);
1801 }
1802 else
1803 pool->literals[entry].bignum = NULL;
1804 }
1805
1806 exp->X_op = O_symbol;
1807 exp->X_add_number = ((int) entry) * size;
1808 exp->X_add_symbol = pool->symbol;
1809
1810 return TRUE;
1811 }
1812
1813 /* Can't use symbol_new here, so have to create a symbol and then at
1814 a later date assign it a value. That's what these functions do. */
1815
1816 static void
1817 symbol_locate (symbolS * symbolP,
1818 const char *name,/* It is copied, the caller can modify. */
1819 segT segment, /* Segment identifier (SEG_<something>). */
1820 valueT valu, /* Symbol value. */
1821 fragS * frag) /* Associated fragment. */
1822 {
1823 size_t name_length;
1824 char *preserved_copy_of_name;
1825
1826 name_length = strlen (name) + 1; /* +1 for \0. */
1827 obstack_grow (&notes, name, name_length);
1828 preserved_copy_of_name = obstack_finish (&notes);
1829
1830 #ifdef tc_canonicalize_symbol_name
1831 preserved_copy_of_name =
1832 tc_canonicalize_symbol_name (preserved_copy_of_name);
1833 #endif
1834
1835 S_SET_NAME (symbolP, preserved_copy_of_name);
1836
1837 S_SET_SEGMENT (symbolP, segment);
1838 S_SET_VALUE (symbolP, valu);
1839 symbol_clear_list_pointers (symbolP);
1840
1841 symbol_set_frag (symbolP, frag);
1842
1843 /* Link to end of symbol chain. */
1844 {
1845 extern int symbol_table_frozen;
1846
1847 if (symbol_table_frozen)
1848 abort ();
1849 }
1850
1851 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1852
1853 obj_symbol_new_hook (symbolP);
1854
1855 #ifdef tc_symbol_new_hook
1856 tc_symbol_new_hook (symbolP);
1857 #endif
1858
1859 #ifdef DEBUG_SYMS
1860 verify_symbol_chain (symbol_rootP, symbol_lastP);
1861 #endif /* DEBUG_SYMS */
1862 }
1863
1864
1865 static void
1866 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1867 {
1868 unsigned int entry;
1869 literal_pool *pool;
1870 char sym_name[20];
1871 int align;
1872
1873 for (align = 2; align <= 4; align++)
1874 {
1875 int size = 1 << align;
1876
1877 pool = find_literal_pool (size);
1878 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1879 continue;
1880
1881 /* Align pool as you have word accesses.
1882 Only make a frag if we have to. */
1883 if (!need_pass_2)
1884 frag_align (align, 0, 0);
1885
1886 mapping_state (MAP_DATA);
1887
1888 record_alignment (now_seg, align);
1889
1890 sprintf (sym_name, "$$lit_\002%x", pool->id);
1891
1892 symbol_locate (pool->symbol, sym_name, now_seg,
1893 (valueT) frag_now_fix (), frag_now);
1894 symbol_table_insert (pool->symbol);
1895
1896 for (entry = 0; entry < pool->next_free_entry; entry++)
1897 {
1898 expressionS * exp = & pool->literals[entry].exp;
1899
1900 if (exp->X_op == O_big)
1901 {
1902 /* PR 16688: Restore the global bignum value. */
1903 gas_assert (pool->literals[entry].bignum != NULL);
1904 memcpy (generic_bignum, pool->literals[entry].bignum,
1905 CHARS_PER_LITTLENUM * exp->X_add_number);
1906 }
1907
1908 /* First output the expression in the instruction to the pool. */
1909 emit_expr (exp, size); /* .word|.xword */
1910
1911 if (exp->X_op == O_big)
1912 {
1913 free (pool->literals[entry].bignum);
1914 pool->literals[entry].bignum = NULL;
1915 }
1916 }
1917
1918 /* Mark the pool as empty. */
1919 pool->next_free_entry = 0;
1920 pool->symbol = NULL;
1921 }
1922 }
1923
1924 #ifdef OBJ_ELF
1925 /* Forward declarations for functions below, in the MD interface
1926 section. */
1927 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1928 static struct reloc_table_entry * find_reloc_table_entry (char **);
1929
1930 /* Directives: Data. */
1931 /* N.B. the support for relocation suffix in this directive needs to be
1932 implemented properly. */
1933
1934 static void
1935 s_aarch64_elf_cons (int nbytes)
1936 {
1937 expressionS exp;
1938
1939 #ifdef md_flush_pending_output
1940 md_flush_pending_output ();
1941 #endif
1942
1943 if (is_it_end_of_statement ())
1944 {
1945 demand_empty_rest_of_line ();
1946 return;
1947 }
1948
1949 #ifdef md_cons_align
1950 md_cons_align (nbytes);
1951 #endif
1952
1953 mapping_state (MAP_DATA);
1954 do
1955 {
1956 struct reloc_table_entry *reloc;
1957
1958 expression (&exp);
1959
1960 if (exp.X_op != O_symbol)
1961 emit_expr (&exp, (unsigned int) nbytes);
1962 else
1963 {
1964 skip_past_char (&input_line_pointer, '#');
1965 if (skip_past_char (&input_line_pointer, ':'))
1966 {
1967 reloc = find_reloc_table_entry (&input_line_pointer);
1968 if (reloc == NULL)
1969 as_bad (_("unrecognized relocation suffix"));
1970 else
1971 as_bad (_("unimplemented relocation suffix"));
1972 ignore_rest_of_line ();
1973 return;
1974 }
1975 else
1976 emit_expr (&exp, (unsigned int) nbytes);
1977 }
1978 }
1979 while (*input_line_pointer++ == ',');
1980
1981 /* Put terminator back into stream. */
1982 input_line_pointer--;
1983 demand_empty_rest_of_line ();
1984 }
1985
1986 /* Mark symbol that it follows a variant PCS convention. */
1987
1988 static void
1989 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1990 {
1991 char *name;
1992 char c;
1993 symbolS *sym;
1994 asymbol *bfdsym;
1995 elf_symbol_type *elfsym;
1996
1997 c = get_symbol_name (&name);
1998 if (!*name)
1999 as_bad (_("Missing symbol name in directive"));
2000 sym = symbol_find_or_make (name);
2001 restore_line_pointer (c);
2002 demand_empty_rest_of_line ();
2003 bfdsym = symbol_get_bfdsym (sym);
2004 elfsym = elf_symbol_from (bfdsym);
2005 gas_assert (elfsym);
2006 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2007 }
2008 #endif /* OBJ_ELF */
2009
2010 /* Output a 32-bit word, but mark as an instruction. */
2011
2012 static void
2013 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2014 {
2015 expressionS exp;
2016
2017 #ifdef md_flush_pending_output
2018 md_flush_pending_output ();
2019 #endif
2020
2021 if (is_it_end_of_statement ())
2022 {
2023 demand_empty_rest_of_line ();
2024 return;
2025 }
2026
2027 /* Sections are assumed to start aligned. In executable section, there is no
2028 MAP_DATA symbol pending. So we only align the address during
2029 MAP_DATA --> MAP_INSN transition.
2030 For other sections, this is not guaranteed. */
2031 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2032 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2033 frag_align_code (2, 0);
2034
2035 #ifdef OBJ_ELF
2036 mapping_state (MAP_INSN);
2037 #endif
2038
2039 do
2040 {
2041 expression (&exp);
2042 if (exp.X_op != O_constant)
2043 {
2044 as_bad (_("constant expression required"));
2045 ignore_rest_of_line ();
2046 return;
2047 }
2048
2049 if (target_big_endian)
2050 {
2051 unsigned int val = exp.X_add_number;
2052 exp.X_add_number = SWAP_32 (val);
2053 }
2054 emit_expr (&exp, 4);
2055 }
2056 while (*input_line_pointer++ == ',');
2057
2058 /* Put terminator back into stream. */
2059 input_line_pointer--;
2060 demand_empty_rest_of_line ();
2061 }
2062
2063 static void
2064 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2065 {
2066 demand_empty_rest_of_line ();
2067 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2068 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2069 }
2070
2071 #ifdef OBJ_ELF
2072 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2073
2074 static void
2075 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2076 {
2077 expressionS exp;
2078
2079 expression (&exp);
2080 frag_grow (4);
2081 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2082 BFD_RELOC_AARCH64_TLSDESC_ADD);
2083
2084 demand_empty_rest_of_line ();
2085 }
2086
2087 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2088
2089 static void
2090 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2091 {
2092 expressionS exp;
2093
2094 /* Since we're just labelling the code, there's no need to define a
2095 mapping symbol. */
2096 expression (&exp);
2097 /* Make sure there is enough room in this frag for the following
2098 blr. This trick only works if the blr follows immediately after
2099 the .tlsdesc directive. */
2100 frag_grow (4);
2101 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2102 BFD_RELOC_AARCH64_TLSDESC_CALL);
2103
2104 demand_empty_rest_of_line ();
2105 }
2106
2107 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2108
2109 static void
2110 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2111 {
2112 expressionS exp;
2113
2114 expression (&exp);
2115 frag_grow (4);
2116 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2117 BFD_RELOC_AARCH64_TLSDESC_LDR);
2118
2119 demand_empty_rest_of_line ();
2120 }
2121 #endif /* OBJ_ELF */
2122
2123 static void s_aarch64_arch (int);
2124 static void s_aarch64_cpu (int);
2125 static void s_aarch64_arch_extension (int);
2126
2127 /* This table describes all the machine specific pseudo-ops the assembler
2128 has to support. The fields are:
2129 pseudo-op name without dot
2130 function to call to execute this pseudo-op
2131 Integer arg to pass to the function. */
2132
2133 const pseudo_typeS md_pseudo_table[] = {
2134 /* Never called because '.req' does not start a line. */
2135 {"req", s_req, 0},
2136 {"unreq", s_unreq, 0},
2137 {"bss", s_bss, 0},
2138 {"even", s_even, 0},
2139 {"ltorg", s_ltorg, 0},
2140 {"pool", s_ltorg, 0},
2141 {"cpu", s_aarch64_cpu, 0},
2142 {"arch", s_aarch64_arch, 0},
2143 {"arch_extension", s_aarch64_arch_extension, 0},
2144 {"inst", s_aarch64_inst, 0},
2145 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2146 #ifdef OBJ_ELF
2147 {"tlsdescadd", s_tlsdescadd, 0},
2148 {"tlsdesccall", s_tlsdesccall, 0},
2149 {"tlsdescldr", s_tlsdescldr, 0},
2150 {"word", s_aarch64_elf_cons, 4},
2151 {"long", s_aarch64_elf_cons, 4},
2152 {"xword", s_aarch64_elf_cons, 8},
2153 {"dword", s_aarch64_elf_cons, 8},
2154 {"variant_pcs", s_variant_pcs, 0},
2155 #endif
2156 {"float16", float_cons, 'h'},
2157 {"bfloat16", float_cons, 'b'},
2158 {0, 0, 0}
2159 };
2160 \f
2161
2162 /* Check whether STR points to a register name followed by a comma or the
2163 end of line; REG_TYPE indicates which register types are checked
2164 against. Return TRUE if STR is such a register name; otherwise return
2165 FALSE. The function does not intend to produce any diagnostics, but since
2166 the register parser aarch64_reg_parse, which is called by this function,
2167 does produce diagnostics, we call clear_error to clear any diagnostics
2168 that may be generated by aarch64_reg_parse.
2169 Also, the function returns FALSE directly if there is any user error
2170 present at the function entry. This prevents the existing diagnostics
2171 state from being spoiled.
2172 The function currently serves parse_constant_immediate and
2173 parse_big_immediate only. */
2174 static bfd_boolean
2175 reg_name_p (char *str, aarch64_reg_type reg_type)
2176 {
2177 int reg;
2178
2179 /* Prevent the diagnostics state from being spoiled. */
2180 if (error_p ())
2181 return FALSE;
2182
2183 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2184
2185 /* Clear the parsing error that may be set by the reg parser. */
2186 clear_error ();
2187
2188 if (reg == PARSE_FAIL)
2189 return FALSE;
2190
2191 skip_whitespace (str);
2192 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2193 return TRUE;
2194
2195 return FALSE;
2196 }
2197
2198 /* Parser functions used exclusively in instruction operands. */
2199
2200 /* Parse an immediate expression which may not be constant.
2201
2202 To prevent the expression parser from pushing a register name
2203 into the symbol table as an undefined symbol, firstly a check is
2204 done to find out whether STR is a register of type REG_TYPE followed
2205 by a comma or the end of line. Return FALSE if STR is such a string. */
2206
2207 static bfd_boolean
2208 parse_immediate_expression (char **str, expressionS *exp,
2209 aarch64_reg_type reg_type)
2210 {
2211 if (reg_name_p (*str, reg_type))
2212 {
2213 set_recoverable_error (_("immediate operand required"));
2214 return FALSE;
2215 }
2216
2217 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2218
2219 if (exp->X_op == O_absent)
2220 {
2221 set_fatal_syntax_error (_("missing immediate expression"));
2222 return FALSE;
2223 }
2224
2225 return TRUE;
2226 }
2227
2228 /* Constant immediate-value read function for use in insn parsing.
2229 STR points to the beginning of the immediate (with the optional
2230 leading #); *VAL receives the value. REG_TYPE says which register
2231 names should be treated as registers rather than as symbolic immediates.
2232
2233 Return TRUE on success; otherwise return FALSE. */
2234
2235 static bfd_boolean
2236 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2237 {
2238 expressionS exp;
2239
2240 if (! parse_immediate_expression (str, &exp, reg_type))
2241 return FALSE;
2242
2243 if (exp.X_op != O_constant)
2244 {
2245 set_syntax_error (_("constant expression required"));
2246 return FALSE;
2247 }
2248
2249 *val = exp.X_add_number;
2250 return TRUE;
2251 }
2252
2253 static uint32_t
2254 encode_imm_float_bits (uint32_t imm)
2255 {
2256 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2257 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2258 }
2259
2260 /* Return TRUE if the single-precision floating-point value encoded in IMM
2261 can be expressed in the AArch64 8-bit signed floating-point format with
2262 3-bit exponent and normalized 4 bits of precision; in other words, the
2263 floating-point value must be expressable as
2264 (+/-) n / 16 * power (2, r)
2265 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2266
2267 static bfd_boolean
2268 aarch64_imm_float_p (uint32_t imm)
2269 {
2270 /* If a single-precision floating-point value has the following bit
2271 pattern, it can be expressed in the AArch64 8-bit floating-point
2272 format:
2273
2274 3 32222222 2221111111111
2275 1 09876543 21098765432109876543210
2276 n Eeeeeexx xxxx0000000000000000000
2277
2278 where n, e and each x are either 0 or 1 independently, with
2279 E == ~ e. */
2280
2281 uint32_t pattern;
2282
2283 /* Prepare the pattern for 'Eeeeee'. */
2284 if (((imm >> 30) & 0x1) == 0)
2285 pattern = 0x3e000000;
2286 else
2287 pattern = 0x40000000;
2288
2289 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2290 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2291 }
2292
2293 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2294 as an IEEE float without any loss of precision. Store the value in
2295 *FPWORD if so. */
2296
2297 static bfd_boolean
2298 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2299 {
2300 /* If a double-precision floating-point value has the following bit
2301 pattern, it can be expressed in a float:
2302
2303 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2304 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2305 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2306
2307 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2308 if Eeee_eeee != 1111_1111
2309
2310 where n, e, s and S are either 0 or 1 independently and where ~ is the
2311 inverse of E. */
2312
2313 uint32_t pattern;
2314 uint32_t high32 = imm >> 32;
2315 uint32_t low32 = imm;
2316
2317 /* Lower 29 bits need to be 0s. */
2318 if ((imm & 0x1fffffff) != 0)
2319 return FALSE;
2320
2321 /* Prepare the pattern for 'Eeeeeeeee'. */
2322 if (((high32 >> 30) & 0x1) == 0)
2323 pattern = 0x38000000;
2324 else
2325 pattern = 0x40000000;
2326
2327 /* Check E~~~. */
2328 if ((high32 & 0x78000000) != pattern)
2329 return FALSE;
2330
2331 /* Check Eeee_eeee != 1111_1111. */
2332 if ((high32 & 0x7ff00000) == 0x47f00000)
2333 return FALSE;
2334
2335 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2336 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2337 | (low32 >> 29)); /* 3 S bits. */
2338 return TRUE;
2339 }
2340
2341 /* Return true if we should treat OPERAND as a double-precision
2342 floating-point operand rather than a single-precision one. */
2343 static bfd_boolean
2344 double_precision_operand_p (const aarch64_opnd_info *operand)
2345 {
2346 /* Check for unsuffixed SVE registers, which are allowed
2347 for LDR and STR but not in instructions that require an
2348 immediate. We get better error messages if we arbitrarily
2349 pick one size, parse the immediate normally, and then
2350 report the match failure in the normal way. */
2351 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2352 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2353 }
2354
2355 /* Parse a floating-point immediate. Return TRUE on success and return the
2356 value in *IMMED in the format of IEEE754 single-precision encoding.
2357 *CCP points to the start of the string; DP_P is TRUE when the immediate
2358 is expected to be in double-precision (N.B. this only matters when
2359 hexadecimal representation is involved). REG_TYPE says which register
2360 names should be treated as registers rather than as symbolic immediates.
2361
2362 This routine accepts any IEEE float; it is up to the callers to reject
2363 invalid ones. */
2364
2365 static bfd_boolean
2366 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2367 aarch64_reg_type reg_type)
2368 {
2369 char *str = *ccp;
2370 char *fpnum;
2371 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2372 int64_t val = 0;
2373 unsigned fpword = 0;
2374 bfd_boolean hex_p = FALSE;
2375
2376 skip_past_char (&str, '#');
2377
2378 fpnum = str;
2379 skip_whitespace (fpnum);
2380
2381 if (strncmp (fpnum, "0x", 2) == 0)
2382 {
2383 /* Support the hexadecimal representation of the IEEE754 encoding.
2384 Double-precision is expected when DP_P is TRUE, otherwise the
2385 representation should be in single-precision. */
2386 if (! parse_constant_immediate (&str, &val, reg_type))
2387 goto invalid_fp;
2388
2389 if (dp_p)
2390 {
2391 if (!can_convert_double_to_float (val, &fpword))
2392 goto invalid_fp;
2393 }
2394 else if ((uint64_t) val > 0xffffffff)
2395 goto invalid_fp;
2396 else
2397 fpword = val;
2398
2399 hex_p = TRUE;
2400 }
2401 else if (reg_name_p (str, reg_type))
2402 {
2403 set_recoverable_error (_("immediate operand required"));
2404 return FALSE;
2405 }
2406
2407 if (! hex_p)
2408 {
2409 int i;
2410
2411 if ((str = atof_ieee (str, 's', words)) == NULL)
2412 goto invalid_fp;
2413
2414 /* Our FP word must be 32 bits (single-precision FP). */
2415 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2416 {
2417 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2418 fpword |= words[i];
2419 }
2420 }
2421
2422 *immed = fpword;
2423 *ccp = str;
2424 return TRUE;
2425
2426 invalid_fp:
2427 set_fatal_syntax_error (_("invalid floating-point constant"));
2428 return FALSE;
2429 }
2430
2431 /* Less-generic immediate-value read function with the possibility of loading
2432 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2433 instructions.
2434
2435 To prevent the expression parser from pushing a register name into the
2436 symbol table as an undefined symbol, a check is firstly done to find
2437 out whether STR is a register of type REG_TYPE followed by a comma or
2438 the end of line. Return FALSE if STR is such a register. */
2439
2440 static bfd_boolean
2441 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2442 {
2443 char *ptr = *str;
2444
2445 if (reg_name_p (ptr, reg_type))
2446 {
2447 set_syntax_error (_("immediate operand required"));
2448 return FALSE;
2449 }
2450
2451 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2452
2453 if (inst.reloc.exp.X_op == O_constant)
2454 *imm = inst.reloc.exp.X_add_number;
2455
2456 *str = ptr;
2457
2458 return TRUE;
2459 }
2460
2461 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2462 if NEED_LIBOPCODES is non-zero, the fixup will need
2463 assistance from the libopcodes. */
2464
2465 static inline void
2466 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2467 const aarch64_opnd_info *operand,
2468 int need_libopcodes_p)
2469 {
2470 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2471 reloc->opnd = operand->type;
2472 if (need_libopcodes_p)
2473 reloc->need_libopcodes_p = 1;
2474 };
2475
2476 /* Return TRUE if the instruction needs to be fixed up later internally by
2477 the GAS; otherwise return FALSE. */
2478
2479 static inline bfd_boolean
2480 aarch64_gas_internal_fixup_p (void)
2481 {
2482 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2483 }
2484
2485 /* Assign the immediate value to the relevant field in *OPERAND if
2486 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2487 needs an internal fixup in a later stage.
2488 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2489 IMM.VALUE that may get assigned with the constant. */
2490 static inline void
2491 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2492 aarch64_opnd_info *operand,
2493 int addr_off_p,
2494 int need_libopcodes_p,
2495 int skip_p)
2496 {
2497 if (reloc->exp.X_op == O_constant)
2498 {
2499 if (addr_off_p)
2500 operand->addr.offset.imm = reloc->exp.X_add_number;
2501 else
2502 operand->imm.value = reloc->exp.X_add_number;
2503 reloc->type = BFD_RELOC_UNUSED;
2504 }
2505 else
2506 {
2507 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2508 /* Tell libopcodes to ignore this operand or not. This is helpful
2509 when one of the operands needs to be fixed up later but we need
2510 libopcodes to check the other operands. */
2511 operand->skip = skip_p;
2512 }
2513 }
2514
2515 /* Relocation modifiers. Each entry in the table contains the textual
2516 name for the relocation which may be placed before a symbol used as
2517 a load/store offset, or add immediate. It must be surrounded by a
2518 leading and trailing colon, for example:
2519
2520 ldr x0, [x1, #:rello:varsym]
2521 add x0, x1, #:rello:varsym */
2522
2523 struct reloc_table_entry
2524 {
2525 const char *name;
2526 int pc_rel;
2527 bfd_reloc_code_real_type adr_type;
2528 bfd_reloc_code_real_type adrp_type;
2529 bfd_reloc_code_real_type movw_type;
2530 bfd_reloc_code_real_type add_type;
2531 bfd_reloc_code_real_type ldst_type;
2532 bfd_reloc_code_real_type ld_literal_type;
2533 };
2534
2535 static struct reloc_table_entry reloc_table[] = {
2536 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2537 {"lo12", 0,
2538 0, /* adr_type */
2539 0,
2540 0,
2541 BFD_RELOC_AARCH64_ADD_LO12,
2542 BFD_RELOC_AARCH64_LDST_LO12,
2543 0},
2544
2545 /* Higher 21 bits of pc-relative page offset: ADRP */
2546 {"pg_hi21", 1,
2547 0, /* adr_type */
2548 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2549 0,
2550 0,
2551 0,
2552 0},
2553
2554 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2555 {"pg_hi21_nc", 1,
2556 0, /* adr_type */
2557 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2558 0,
2559 0,
2560 0,
2561 0},
2562
2563 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2564 {"abs_g0", 0,
2565 0, /* adr_type */
2566 0,
2567 BFD_RELOC_AARCH64_MOVW_G0,
2568 0,
2569 0,
2570 0},
2571
2572 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2573 {"abs_g0_s", 0,
2574 0, /* adr_type */
2575 0,
2576 BFD_RELOC_AARCH64_MOVW_G0_S,
2577 0,
2578 0,
2579 0},
2580
2581 /* Less significant bits 0-15 of address/value: MOVK, no check */
2582 {"abs_g0_nc", 0,
2583 0, /* adr_type */
2584 0,
2585 BFD_RELOC_AARCH64_MOVW_G0_NC,
2586 0,
2587 0,
2588 0},
2589
2590 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2591 {"abs_g1", 0,
2592 0, /* adr_type */
2593 0,
2594 BFD_RELOC_AARCH64_MOVW_G1,
2595 0,
2596 0,
2597 0},
2598
2599 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2600 {"abs_g1_s", 0,
2601 0, /* adr_type */
2602 0,
2603 BFD_RELOC_AARCH64_MOVW_G1_S,
2604 0,
2605 0,
2606 0},
2607
2608 /* Less significant bits 16-31 of address/value: MOVK, no check */
2609 {"abs_g1_nc", 0,
2610 0, /* adr_type */
2611 0,
2612 BFD_RELOC_AARCH64_MOVW_G1_NC,
2613 0,
2614 0,
2615 0},
2616
2617 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2618 {"abs_g2", 0,
2619 0, /* adr_type */
2620 0,
2621 BFD_RELOC_AARCH64_MOVW_G2,
2622 0,
2623 0,
2624 0},
2625
2626 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2627 {"abs_g2_s", 0,
2628 0, /* adr_type */
2629 0,
2630 BFD_RELOC_AARCH64_MOVW_G2_S,
2631 0,
2632 0,
2633 0},
2634
2635 /* Less significant bits 32-47 of address/value: MOVK, no check */
2636 {"abs_g2_nc", 0,
2637 0, /* adr_type */
2638 0,
2639 BFD_RELOC_AARCH64_MOVW_G2_NC,
2640 0,
2641 0,
2642 0},
2643
2644 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2645 {"abs_g3", 0,
2646 0, /* adr_type */
2647 0,
2648 BFD_RELOC_AARCH64_MOVW_G3,
2649 0,
2650 0,
2651 0},
2652
2653 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2654 {"prel_g0", 1,
2655 0, /* adr_type */
2656 0,
2657 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2658 0,
2659 0,
2660 0},
2661
2662 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2663 {"prel_g0_nc", 1,
2664 0, /* adr_type */
2665 0,
2666 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2667 0,
2668 0,
2669 0},
2670
2671 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2672 {"prel_g1", 1,
2673 0, /* adr_type */
2674 0,
2675 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2676 0,
2677 0,
2678 0},
2679
2680 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2681 {"prel_g1_nc", 1,
2682 0, /* adr_type */
2683 0,
2684 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2685 0,
2686 0,
2687 0},
2688
2689 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2690 {"prel_g2", 1,
2691 0, /* adr_type */
2692 0,
2693 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2694 0,
2695 0,
2696 0},
2697
2698 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2699 {"prel_g2_nc", 1,
2700 0, /* adr_type */
2701 0,
2702 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2703 0,
2704 0,
2705 0},
2706
2707 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2708 {"prel_g3", 1,
2709 0, /* adr_type */
2710 0,
2711 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2712 0,
2713 0,
2714 0},
2715
2716 /* Get to the page containing GOT entry for a symbol. */
2717 {"got", 1,
2718 0, /* adr_type */
2719 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2720 0,
2721 0,
2722 0,
2723 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2724
2725 /* 12 bit offset into the page containing GOT entry for that symbol. */
2726 {"got_lo12", 0,
2727 0, /* adr_type */
2728 0,
2729 0,
2730 0,
2731 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2732 0},
2733
2734 /* 0-15 bits of address/value: MOVk, no check. */
2735 {"gotoff_g0_nc", 0,
2736 0, /* adr_type */
2737 0,
2738 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2739 0,
2740 0,
2741 0},
2742
2743 /* Most significant bits 16-31 of address/value: MOVZ. */
2744 {"gotoff_g1", 0,
2745 0, /* adr_type */
2746 0,
2747 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2748 0,
2749 0,
2750 0},
2751
2752 /* 15 bit offset into the page containing GOT entry for that symbol. */
2753 {"gotoff_lo15", 0,
2754 0, /* adr_type */
2755 0,
2756 0,
2757 0,
2758 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2759 0},
2760
2761 /* Get to the page containing GOT TLS entry for a symbol */
2762 {"gottprel_g0_nc", 0,
2763 0, /* adr_type */
2764 0,
2765 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2766 0,
2767 0,
2768 0},
2769
2770 /* Get to the page containing GOT TLS entry for a symbol */
2771 {"gottprel_g1", 0,
2772 0, /* adr_type */
2773 0,
2774 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2775 0,
2776 0,
2777 0},
2778
2779 /* Get to the page containing GOT TLS entry for a symbol */
2780 {"tlsgd", 0,
2781 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2782 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2783 0,
2784 0,
2785 0,
2786 0},
2787
2788 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2789 {"tlsgd_lo12", 0,
2790 0, /* adr_type */
2791 0,
2792 0,
2793 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2794 0,
2795 0},
2796
2797 /* Lower 16 bits address/value: MOVk. */
2798 {"tlsgd_g0_nc", 0,
2799 0, /* adr_type */
2800 0,
2801 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2802 0,
2803 0,
2804 0},
2805
2806 /* Most significant bits 16-31 of address/value: MOVZ. */
2807 {"tlsgd_g1", 0,
2808 0, /* adr_type */
2809 0,
2810 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2811 0,
2812 0,
2813 0},
2814
2815 /* Get to the page containing GOT TLS entry for a symbol */
2816 {"tlsdesc", 0,
2817 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2818 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2819 0,
2820 0,
2821 0,
2822 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2823
2824 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2825 {"tlsdesc_lo12", 0,
2826 0, /* adr_type */
2827 0,
2828 0,
2829 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2830 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2831 0},
2832
2833 /* Get to the page containing GOT TLS entry for a symbol.
2834 The same as GD, we allocate two consecutive GOT slots
2835 for module index and module offset, the only difference
2836 with GD is the module offset should be initialized to
2837 zero without any outstanding runtime relocation. */
2838 {"tlsldm", 0,
2839 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2840 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2841 0,
2842 0,
2843 0,
2844 0},
2845
2846 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2847 {"tlsldm_lo12_nc", 0,
2848 0, /* adr_type */
2849 0,
2850 0,
2851 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2852 0,
2853 0},
2854
2855 /* 12 bit offset into the module TLS base address. */
2856 {"dtprel_lo12", 0,
2857 0, /* adr_type */
2858 0,
2859 0,
2860 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2861 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2862 0},
2863
2864 /* Same as dtprel_lo12, no overflow check. */
2865 {"dtprel_lo12_nc", 0,
2866 0, /* adr_type */
2867 0,
2868 0,
2869 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2870 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2871 0},
2872
2873 /* bits[23:12] of offset to the module TLS base address. */
2874 {"dtprel_hi12", 0,
2875 0, /* adr_type */
2876 0,
2877 0,
2878 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2879 0,
2880 0},
2881
2882 /* bits[15:0] of offset to the module TLS base address. */
2883 {"dtprel_g0", 0,
2884 0, /* adr_type */
2885 0,
2886 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2887 0,
2888 0,
2889 0},
2890
2891 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2892 {"dtprel_g0_nc", 0,
2893 0, /* adr_type */
2894 0,
2895 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2896 0,
2897 0,
2898 0},
2899
2900 /* bits[31:16] of offset to the module TLS base address. */
2901 {"dtprel_g1", 0,
2902 0, /* adr_type */
2903 0,
2904 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2905 0,
2906 0,
2907 0},
2908
2909 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2910 {"dtprel_g1_nc", 0,
2911 0, /* adr_type */
2912 0,
2913 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2914 0,
2915 0,
2916 0},
2917
2918 /* bits[47:32] of offset to the module TLS base address. */
2919 {"dtprel_g2", 0,
2920 0, /* adr_type */
2921 0,
2922 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2923 0,
2924 0,
2925 0},
2926
2927 /* Lower 16 bit offset into GOT entry for a symbol */
2928 {"tlsdesc_off_g0_nc", 0,
2929 0, /* adr_type */
2930 0,
2931 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2932 0,
2933 0,
2934 0},
2935
2936 /* Higher 16 bit offset into GOT entry for a symbol */
2937 {"tlsdesc_off_g1", 0,
2938 0, /* adr_type */
2939 0,
2940 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2941 0,
2942 0,
2943 0},
2944
2945 /* Get to the page containing GOT TLS entry for a symbol */
2946 {"gottprel", 0,
2947 0, /* adr_type */
2948 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2949 0,
2950 0,
2951 0,
2952 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2953
2954 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2955 {"gottprel_lo12", 0,
2956 0, /* adr_type */
2957 0,
2958 0,
2959 0,
2960 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2961 0},
2962
2963 /* Get tp offset for a symbol. */
2964 {"tprel", 0,
2965 0, /* adr_type */
2966 0,
2967 0,
2968 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2969 0,
2970 0},
2971
2972 /* Get tp offset for a symbol. */
2973 {"tprel_lo12", 0,
2974 0, /* adr_type */
2975 0,
2976 0,
2977 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2978 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2979 0},
2980
2981 /* Get tp offset for a symbol. */
2982 {"tprel_hi12", 0,
2983 0, /* adr_type */
2984 0,
2985 0,
2986 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2987 0,
2988 0},
2989
2990 /* Get tp offset for a symbol. */
2991 {"tprel_lo12_nc", 0,
2992 0, /* adr_type */
2993 0,
2994 0,
2995 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2996 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2997 0},
2998
2999 /* Most significant bits 32-47 of address/value: MOVZ. */
3000 {"tprel_g2", 0,
3001 0, /* adr_type */
3002 0,
3003 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3004 0,
3005 0,
3006 0},
3007
3008 /* Most significant bits 16-31 of address/value: MOVZ. */
3009 {"tprel_g1", 0,
3010 0, /* adr_type */
3011 0,
3012 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3013 0,
3014 0,
3015 0},
3016
3017 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3018 {"tprel_g1_nc", 0,
3019 0, /* adr_type */
3020 0,
3021 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3022 0,
3023 0,
3024 0},
3025
3026 /* Most significant bits 0-15 of address/value: MOVZ. */
3027 {"tprel_g0", 0,
3028 0, /* adr_type */
3029 0,
3030 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3031 0,
3032 0,
3033 0},
3034
3035 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3036 {"tprel_g0_nc", 0,
3037 0, /* adr_type */
3038 0,
3039 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3040 0,
3041 0,
3042 0},
3043
3044 /* 15bit offset from got entry to base address of GOT table. */
3045 {"gotpage_lo15", 0,
3046 0,
3047 0,
3048 0,
3049 0,
3050 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3051 0},
3052
3053 /* 14bit offset from got entry to base address of GOT table. */
3054 {"gotpage_lo14", 0,
3055 0,
3056 0,
3057 0,
3058 0,
3059 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3060 0},
3061 };
3062
3063 /* Given the address of a pointer pointing to the textual name of a
3064 relocation as may appear in assembler source, attempt to find its
3065 details in reloc_table. The pointer will be updated to the character
3066 after the trailing colon. On failure, NULL will be returned;
3067 otherwise return the reloc_table_entry. */
3068
3069 static struct reloc_table_entry *
3070 find_reloc_table_entry (char **str)
3071 {
3072 unsigned int i;
3073 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3074 {
3075 int length = strlen (reloc_table[i].name);
3076
3077 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3078 && (*str)[length] == ':')
3079 {
3080 *str += (length + 1);
3081 return &reloc_table[i];
3082 }
3083 }
3084
3085 return NULL;
3086 }
3087
3088 /* Mode argument to parse_shift and parser_shifter_operand. */
3089 enum parse_shift_mode
3090 {
3091 SHIFTED_NONE, /* no shifter allowed */
3092 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3093 "#imm{,lsl #n}" */
3094 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3095 "#imm" */
3096 SHIFTED_LSL, /* bare "lsl #n" */
3097 SHIFTED_MUL, /* bare "mul #n" */
3098 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3099 SHIFTED_MUL_VL, /* "mul vl" */
3100 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3101 };
3102
3103 /* Parse a <shift> operator on an AArch64 data processing instruction.
3104 Return TRUE on success; otherwise return FALSE. */
3105 static bfd_boolean
3106 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3107 {
3108 const struct aarch64_name_value_pair *shift_op;
3109 enum aarch64_modifier_kind kind;
3110 expressionS exp;
3111 int exp_has_prefix;
3112 char *s = *str;
3113 char *p = s;
3114
3115 for (p = *str; ISALPHA (*p); p++)
3116 ;
3117
3118 if (p == *str)
3119 {
3120 set_syntax_error (_("shift expression expected"));
3121 return FALSE;
3122 }
3123
3124 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3125
3126 if (shift_op == NULL)
3127 {
3128 set_syntax_error (_("shift operator expected"));
3129 return FALSE;
3130 }
3131
3132 kind = aarch64_get_operand_modifier (shift_op);
3133
3134 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3135 {
3136 set_syntax_error (_("invalid use of 'MSL'"));
3137 return FALSE;
3138 }
3139
3140 if (kind == AARCH64_MOD_MUL
3141 && mode != SHIFTED_MUL
3142 && mode != SHIFTED_MUL_VL)
3143 {
3144 set_syntax_error (_("invalid use of 'MUL'"));
3145 return FALSE;
3146 }
3147
3148 switch (mode)
3149 {
3150 case SHIFTED_LOGIC_IMM:
3151 if (aarch64_extend_operator_p (kind))
3152 {
3153 set_syntax_error (_("extending shift is not permitted"));
3154 return FALSE;
3155 }
3156 break;
3157
3158 case SHIFTED_ARITH_IMM:
3159 if (kind == AARCH64_MOD_ROR)
3160 {
3161 set_syntax_error (_("'ROR' shift is not permitted"));
3162 return FALSE;
3163 }
3164 break;
3165
3166 case SHIFTED_LSL:
3167 if (kind != AARCH64_MOD_LSL)
3168 {
3169 set_syntax_error (_("only 'LSL' shift is permitted"));
3170 return FALSE;
3171 }
3172 break;
3173
3174 case SHIFTED_MUL:
3175 if (kind != AARCH64_MOD_MUL)
3176 {
3177 set_syntax_error (_("only 'MUL' is permitted"));
3178 return FALSE;
3179 }
3180 break;
3181
3182 case SHIFTED_MUL_VL:
3183 /* "MUL VL" consists of two separate tokens. Require the first
3184 token to be "MUL" and look for a following "VL". */
3185 if (kind == AARCH64_MOD_MUL)
3186 {
3187 skip_whitespace (p);
3188 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3189 {
3190 p += 2;
3191 kind = AARCH64_MOD_MUL_VL;
3192 break;
3193 }
3194 }
3195 set_syntax_error (_("only 'MUL VL' is permitted"));
3196 return FALSE;
3197
3198 case SHIFTED_REG_OFFSET:
3199 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3200 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3201 {
3202 set_fatal_syntax_error
3203 (_("invalid shift for the register offset addressing mode"));
3204 return FALSE;
3205 }
3206 break;
3207
3208 case SHIFTED_LSL_MSL:
3209 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3210 {
3211 set_syntax_error (_("invalid shift operator"));
3212 return FALSE;
3213 }
3214 break;
3215
3216 default:
3217 abort ();
3218 }
3219
3220 /* Whitespace can appear here if the next thing is a bare digit. */
3221 skip_whitespace (p);
3222
3223 /* Parse shift amount. */
3224 exp_has_prefix = 0;
3225 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3226 exp.X_op = O_absent;
3227 else
3228 {
3229 if (is_immediate_prefix (*p))
3230 {
3231 p++;
3232 exp_has_prefix = 1;
3233 }
3234 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3235 }
3236 if (kind == AARCH64_MOD_MUL_VL)
3237 /* For consistency, give MUL VL the same shift amount as an implicit
3238 MUL #1. */
3239 operand->shifter.amount = 1;
3240 else if (exp.X_op == O_absent)
3241 {
3242 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3243 {
3244 set_syntax_error (_("missing shift amount"));
3245 return FALSE;
3246 }
3247 operand->shifter.amount = 0;
3248 }
3249 else if (exp.X_op != O_constant)
3250 {
3251 set_syntax_error (_("constant shift amount required"));
3252 return FALSE;
3253 }
3254 /* For parsing purposes, MUL #n has no inherent range. The range
3255 depends on the operand and will be checked by operand-specific
3256 routines. */
3257 else if (kind != AARCH64_MOD_MUL
3258 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3259 {
3260 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3261 return FALSE;
3262 }
3263 else
3264 {
3265 operand->shifter.amount = exp.X_add_number;
3266 operand->shifter.amount_present = 1;
3267 }
3268
3269 operand->shifter.operator_present = 1;
3270 operand->shifter.kind = kind;
3271
3272 *str = p;
3273 return TRUE;
3274 }
3275
3276 /* Parse a <shifter_operand> for a data processing instruction:
3277
3278 #<immediate>
3279 #<immediate>, LSL #imm
3280
3281 Validation of immediate operands is deferred to md_apply_fix.
3282
3283 Return TRUE on success; otherwise return FALSE. */
3284
3285 static bfd_boolean
3286 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3287 enum parse_shift_mode mode)
3288 {
3289 char *p;
3290
3291 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3292 return FALSE;
3293
3294 p = *str;
3295
3296 /* Accept an immediate expression. */
3297 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3298 return FALSE;
3299
3300 /* Accept optional LSL for arithmetic immediate values. */
3301 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3302 if (! parse_shift (&p, operand, SHIFTED_LSL))
3303 return FALSE;
3304
3305 /* Not accept any shifter for logical immediate values. */
3306 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3307 && parse_shift (&p, operand, mode))
3308 {
3309 set_syntax_error (_("unexpected shift operator"));
3310 return FALSE;
3311 }
3312
3313 *str = p;
3314 return TRUE;
3315 }
3316
3317 /* Parse a <shifter_operand> for a data processing instruction:
3318
3319 <Rm>
3320 <Rm>, <shift>
3321 #<immediate>
3322 #<immediate>, LSL #imm
3323
3324 where <shift> is handled by parse_shift above, and the last two
3325 cases are handled by the function above.
3326
3327 Validation of immediate operands is deferred to md_apply_fix.
3328
3329 Return TRUE on success; otherwise return FALSE. */
3330
3331 static bfd_boolean
3332 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3333 enum parse_shift_mode mode)
3334 {
3335 const reg_entry *reg;
3336 aarch64_opnd_qualifier_t qualifier;
3337 enum aarch64_operand_class opd_class
3338 = aarch64_get_operand_class (operand->type);
3339
3340 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3341 if (reg)
3342 {
3343 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3344 {
3345 set_syntax_error (_("unexpected register in the immediate operand"));
3346 return FALSE;
3347 }
3348
3349 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3350 {
3351 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3352 return FALSE;
3353 }
3354
3355 operand->reg.regno = reg->number;
3356 operand->qualifier = qualifier;
3357
3358 /* Accept optional shift operation on register. */
3359 if (! skip_past_comma (str))
3360 return TRUE;
3361
3362 if (! parse_shift (str, operand, mode))
3363 return FALSE;
3364
3365 return TRUE;
3366 }
3367 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3368 {
3369 set_syntax_error
3370 (_("integer register expected in the extended/shifted operand "
3371 "register"));
3372 return FALSE;
3373 }
3374
3375 /* We have a shifted immediate variable. */
3376 return parse_shifter_operand_imm (str, operand, mode);
3377 }
3378
3379 /* Return TRUE on success; return FALSE otherwise. */
3380
3381 static bfd_boolean
3382 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3383 enum parse_shift_mode mode)
3384 {
3385 char *p = *str;
3386
3387 /* Determine if we have the sequence of characters #: or just :
3388 coming next. If we do, then we check for a :rello: relocation
3389 modifier. If we don't, punt the whole lot to
3390 parse_shifter_operand. */
3391
3392 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3393 {
3394 struct reloc_table_entry *entry;
3395
3396 if (p[0] == '#')
3397 p += 2;
3398 else
3399 p++;
3400 *str = p;
3401
3402 /* Try to parse a relocation. Anything else is an error. */
3403 if (!(entry = find_reloc_table_entry (str)))
3404 {
3405 set_syntax_error (_("unknown relocation modifier"));
3406 return FALSE;
3407 }
3408
3409 if (entry->add_type == 0)
3410 {
3411 set_syntax_error
3412 (_("this relocation modifier is not allowed on this instruction"));
3413 return FALSE;
3414 }
3415
3416 /* Save str before we decompose it. */
3417 p = *str;
3418
3419 /* Next, we parse the expression. */
3420 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3421 return FALSE;
3422
3423 /* Record the relocation type (use the ADD variant here). */
3424 inst.reloc.type = entry->add_type;
3425 inst.reloc.pc_rel = entry->pc_rel;
3426
3427 /* If str is empty, we've reached the end, stop here. */
3428 if (**str == '\0')
3429 return TRUE;
3430
3431 /* Otherwise, we have a shifted reloc modifier, so rewind to
3432 recover the variable name and continue parsing for the shifter. */
3433 *str = p;
3434 return parse_shifter_operand_imm (str, operand, mode);
3435 }
3436
3437 return parse_shifter_operand (str, operand, mode);
3438 }
3439
3440 /* Parse all forms of an address expression. Information is written
3441 to *OPERAND and/or inst.reloc.
3442
3443 The A64 instruction set has the following addressing modes:
3444
3445 Offset
3446 [base] // in SIMD ld/st structure
3447 [base{,#0}] // in ld/st exclusive
3448 [base{,#imm}]
3449 [base,Xm{,LSL #imm}]
3450 [base,Xm,SXTX {#imm}]
3451 [base,Wm,(S|U)XTW {#imm}]
3452 Pre-indexed
3453 [base]! // in ldraa/ldrab exclusive
3454 [base,#imm]!
3455 Post-indexed
3456 [base],#imm
3457 [base],Xm // in SIMD ld/st structure
3458 PC-relative (literal)
3459 label
3460 SVE:
3461 [base,#imm,MUL VL]
3462 [base,Zm.D{,LSL #imm}]
3463 [base,Zm.S,(S|U)XTW {#imm}]
3464 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3465 [Zn.S,#imm]
3466 [Zn.D,#imm]
3467 [Zn.S{, Xm}]
3468 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3469 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3470 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3471
3472 (As a convenience, the notation "=immediate" is permitted in conjunction
3473 with the pc-relative literal load instructions to automatically place an
3474 immediate value or symbolic address in a nearby literal pool and generate
3475 a hidden label which references it.)
3476
3477 Upon a successful parsing, the address structure in *OPERAND will be
3478 filled in the following way:
3479
3480 .base_regno = <base>
3481 .offset.is_reg // 1 if the offset is a register
3482 .offset.imm = <imm>
3483 .offset.regno = <Rm>
3484
3485 For different addressing modes defined in the A64 ISA:
3486
3487 Offset
3488 .pcrel=0; .preind=1; .postind=0; .writeback=0
3489 Pre-indexed
3490 .pcrel=0; .preind=1; .postind=0; .writeback=1
3491 Post-indexed
3492 .pcrel=0; .preind=0; .postind=1; .writeback=1
3493 PC-relative (literal)
3494 .pcrel=1; .preind=1; .postind=0; .writeback=0
3495
3496 The shift/extension information, if any, will be stored in .shifter.
3497 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3498 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3499 corresponding register.
3500
3501 BASE_TYPE says which types of base register should be accepted and
3502 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3503 is the type of shifter that is allowed for immediate offsets,
3504 or SHIFTED_NONE if none.
3505
3506 In all other respects, it is the caller's responsibility to check
3507 for addressing modes not supported by the instruction, and to set
3508 inst.reloc.type. */
3509
3510 static bfd_boolean
3511 parse_address_main (char **str, aarch64_opnd_info *operand,
3512 aarch64_opnd_qualifier_t *base_qualifier,
3513 aarch64_opnd_qualifier_t *offset_qualifier,
3514 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3515 enum parse_shift_mode imm_shift_mode)
3516 {
3517 char *p = *str;
3518 const reg_entry *reg;
3519 expressionS *exp = &inst.reloc.exp;
3520
3521 *base_qualifier = AARCH64_OPND_QLF_NIL;
3522 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3523 if (! skip_past_char (&p, '['))
3524 {
3525 /* =immediate or label. */
3526 operand->addr.pcrel = 1;
3527 operand->addr.preind = 1;
3528
3529 /* #:<reloc_op>:<symbol> */
3530 skip_past_char (&p, '#');
3531 if (skip_past_char (&p, ':'))
3532 {
3533 bfd_reloc_code_real_type ty;
3534 struct reloc_table_entry *entry;
3535
3536 /* Try to parse a relocation modifier. Anything else is
3537 an error. */
3538 entry = find_reloc_table_entry (&p);
3539 if (! entry)
3540 {
3541 set_syntax_error (_("unknown relocation modifier"));
3542 return FALSE;
3543 }
3544
3545 switch (operand->type)
3546 {
3547 case AARCH64_OPND_ADDR_PCREL21:
3548 /* adr */
3549 ty = entry->adr_type;
3550 break;
3551
3552 default:
3553 ty = entry->ld_literal_type;
3554 break;
3555 }
3556
3557 if (ty == 0)
3558 {
3559 set_syntax_error
3560 (_("this relocation modifier is not allowed on this "
3561 "instruction"));
3562 return FALSE;
3563 }
3564
3565 /* #:<reloc_op>: */
3566 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3567 {
3568 set_syntax_error (_("invalid relocation expression"));
3569 return FALSE;
3570 }
3571
3572 /* #:<reloc_op>:<expr> */
3573 /* Record the relocation type. */
3574 inst.reloc.type = ty;
3575 inst.reloc.pc_rel = entry->pc_rel;
3576 }
3577 else
3578 {
3579
3580 if (skip_past_char (&p, '='))
3581 /* =immediate; need to generate the literal in the literal pool. */
3582 inst.gen_lit_pool = 1;
3583
3584 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3585 {
3586 set_syntax_error (_("invalid address"));
3587 return FALSE;
3588 }
3589 }
3590
3591 *str = p;
3592 return TRUE;
3593 }
3594
3595 /* [ */
3596
3597 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3598 if (!reg || !aarch64_check_reg_type (reg, base_type))
3599 {
3600 set_syntax_error (_(get_reg_expected_msg (base_type)));
3601 return FALSE;
3602 }
3603 operand->addr.base_regno = reg->number;
3604
3605 /* [Xn */
3606 if (skip_past_comma (&p))
3607 {
3608 /* [Xn, */
3609 operand->addr.preind = 1;
3610
3611 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3612 if (reg)
3613 {
3614 if (!aarch64_check_reg_type (reg, offset_type))
3615 {
3616 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3617 return FALSE;
3618 }
3619
3620 /* [Xn,Rm */
3621 operand->addr.offset.regno = reg->number;
3622 operand->addr.offset.is_reg = 1;
3623 /* Shifted index. */
3624 if (skip_past_comma (&p))
3625 {
3626 /* [Xn,Rm, */
3627 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3628 /* Use the diagnostics set in parse_shift, so not set new
3629 error message here. */
3630 return FALSE;
3631 }
3632 /* We only accept:
3633 [base,Xm] # For vector plus scalar SVE2 indexing.
3634 [base,Xm{,LSL #imm}]
3635 [base,Xm,SXTX {#imm}]
3636 [base,Wm,(S|U)XTW {#imm}] */
3637 if (operand->shifter.kind == AARCH64_MOD_NONE
3638 || operand->shifter.kind == AARCH64_MOD_LSL
3639 || operand->shifter.kind == AARCH64_MOD_SXTX)
3640 {
3641 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3642 {
3643 set_syntax_error (_("invalid use of 32-bit register offset"));
3644 return FALSE;
3645 }
3646 if (aarch64_get_qualifier_esize (*base_qualifier)
3647 != aarch64_get_qualifier_esize (*offset_qualifier)
3648 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3649 || *base_qualifier != AARCH64_OPND_QLF_S_S
3650 || *offset_qualifier != AARCH64_OPND_QLF_X))
3651 {
3652 set_syntax_error (_("offset has different size from base"));
3653 return FALSE;
3654 }
3655 }
3656 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3657 {
3658 set_syntax_error (_("invalid use of 64-bit register offset"));
3659 return FALSE;
3660 }
3661 }
3662 else
3663 {
3664 /* [Xn,#:<reloc_op>:<symbol> */
3665 skip_past_char (&p, '#');
3666 if (skip_past_char (&p, ':'))
3667 {
3668 struct reloc_table_entry *entry;
3669
3670 /* Try to parse a relocation modifier. Anything else is
3671 an error. */
3672 if (!(entry = find_reloc_table_entry (&p)))
3673 {
3674 set_syntax_error (_("unknown relocation modifier"));
3675 return FALSE;
3676 }
3677
3678 if (entry->ldst_type == 0)
3679 {
3680 set_syntax_error
3681 (_("this relocation modifier is not allowed on this "
3682 "instruction"));
3683 return FALSE;
3684 }
3685
3686 /* [Xn,#:<reloc_op>: */
3687 /* We now have the group relocation table entry corresponding to
3688 the name in the assembler source. Next, we parse the
3689 expression. */
3690 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3691 {
3692 set_syntax_error (_("invalid relocation expression"));
3693 return FALSE;
3694 }
3695
3696 /* [Xn,#:<reloc_op>:<expr> */
3697 /* Record the load/store relocation type. */
3698 inst.reloc.type = entry->ldst_type;
3699 inst.reloc.pc_rel = entry->pc_rel;
3700 }
3701 else
3702 {
3703 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3704 {
3705 set_syntax_error (_("invalid expression in the address"));
3706 return FALSE;
3707 }
3708 /* [Xn,<expr> */
3709 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3710 /* [Xn,<expr>,<shifter> */
3711 if (! parse_shift (&p, operand, imm_shift_mode))
3712 return FALSE;
3713 }
3714 }
3715 }
3716
3717 if (! skip_past_char (&p, ']'))
3718 {
3719 set_syntax_error (_("']' expected"));
3720 return FALSE;
3721 }
3722
3723 if (skip_past_char (&p, '!'))
3724 {
3725 if (operand->addr.preind && operand->addr.offset.is_reg)
3726 {
3727 set_syntax_error (_("register offset not allowed in pre-indexed "
3728 "addressing mode"));
3729 return FALSE;
3730 }
3731 /* [Xn]! */
3732 operand->addr.writeback = 1;
3733 }
3734 else if (skip_past_comma (&p))
3735 {
3736 /* [Xn], */
3737 operand->addr.postind = 1;
3738 operand->addr.writeback = 1;
3739
3740 if (operand->addr.preind)
3741 {
3742 set_syntax_error (_("cannot combine pre- and post-indexing"));
3743 return FALSE;
3744 }
3745
3746 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3747 if (reg)
3748 {
3749 /* [Xn],Xm */
3750 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3751 {
3752 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3753 return FALSE;
3754 }
3755
3756 operand->addr.offset.regno = reg->number;
3757 operand->addr.offset.is_reg = 1;
3758 }
3759 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3760 {
3761 /* [Xn],#expr */
3762 set_syntax_error (_("invalid expression in the address"));
3763 return FALSE;
3764 }
3765 }
3766
3767 /* If at this point neither .preind nor .postind is set, we have a
3768 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3769 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3770 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3771 [Zn.<T>, xzr]. */
3772 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3773 {
3774 if (operand->addr.writeback)
3775 {
3776 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3777 {
3778 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3779 operand->addr.offset.is_reg = 0;
3780 operand->addr.offset.imm = 0;
3781 operand->addr.preind = 1;
3782 }
3783 else
3784 {
3785 /* Reject [Rn]! */
3786 set_syntax_error (_("missing offset in the pre-indexed address"));
3787 return FALSE;
3788 }
3789 }
3790 else
3791 {
3792 operand->addr.preind = 1;
3793 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3794 {
3795 operand->addr.offset.is_reg = 1;
3796 operand->addr.offset.regno = REG_ZR;
3797 *offset_qualifier = AARCH64_OPND_QLF_X;
3798 }
3799 else
3800 {
3801 inst.reloc.exp.X_op = O_constant;
3802 inst.reloc.exp.X_add_number = 0;
3803 }
3804 }
3805 }
3806
3807 *str = p;
3808 return TRUE;
3809 }
3810
3811 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3812 on success. */
3813 static bfd_boolean
3814 parse_address (char **str, aarch64_opnd_info *operand)
3815 {
3816 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3817 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3818 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3819 }
3820
3821 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3822 The arguments have the same meaning as for parse_address_main.
3823 Return TRUE on success. */
3824 static bfd_boolean
3825 parse_sve_address (char **str, aarch64_opnd_info *operand,
3826 aarch64_opnd_qualifier_t *base_qualifier,
3827 aarch64_opnd_qualifier_t *offset_qualifier)
3828 {
3829 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3830 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3831 SHIFTED_MUL_VL);
3832 }
3833
3834 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3835 Return TRUE on success; otherwise return FALSE. */
3836 static bfd_boolean
3837 parse_half (char **str, int *internal_fixup_p)
3838 {
3839 char *p = *str;
3840
3841 skip_past_char (&p, '#');
3842
3843 gas_assert (internal_fixup_p);
3844 *internal_fixup_p = 0;
3845
3846 if (*p == ':')
3847 {
3848 struct reloc_table_entry *entry;
3849
3850 /* Try to parse a relocation. Anything else is an error. */
3851 ++p;
3852 if (!(entry = find_reloc_table_entry (&p)))
3853 {
3854 set_syntax_error (_("unknown relocation modifier"));
3855 return FALSE;
3856 }
3857
3858 if (entry->movw_type == 0)
3859 {
3860 set_syntax_error
3861 (_("this relocation modifier is not allowed on this instruction"));
3862 return FALSE;
3863 }
3864
3865 inst.reloc.type = entry->movw_type;
3866 }
3867 else
3868 *internal_fixup_p = 1;
3869
3870 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3871 return FALSE;
3872
3873 *str = p;
3874 return TRUE;
3875 }
3876
3877 /* Parse an operand for an ADRP instruction:
3878 ADRP <Xd>, <label>
3879 Return TRUE on success; otherwise return FALSE. */
3880
3881 static bfd_boolean
3882 parse_adrp (char **str)
3883 {
3884 char *p;
3885
3886 p = *str;
3887 if (*p == ':')
3888 {
3889 struct reloc_table_entry *entry;
3890
3891 /* Try to parse a relocation. Anything else is an error. */
3892 ++p;
3893 if (!(entry = find_reloc_table_entry (&p)))
3894 {
3895 set_syntax_error (_("unknown relocation modifier"));
3896 return FALSE;
3897 }
3898
3899 if (entry->adrp_type == 0)
3900 {
3901 set_syntax_error
3902 (_("this relocation modifier is not allowed on this instruction"));
3903 return FALSE;
3904 }
3905
3906 inst.reloc.type = entry->adrp_type;
3907 }
3908 else
3909 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3910
3911 inst.reloc.pc_rel = 1;
3912
3913 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3914 return FALSE;
3915
3916 *str = p;
3917 return TRUE;
3918 }
3919
3920 /* Miscellaneous. */
3921
3922 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3923 of SIZE tokens in which index I gives the token for field value I,
3924 or is null if field value I is invalid. REG_TYPE says which register
3925 names should be treated as registers rather than as symbolic immediates.
3926
3927 Return true on success, moving *STR past the operand and storing the
3928 field value in *VAL. */
3929
3930 static int
3931 parse_enum_string (char **str, int64_t *val, const char *const *array,
3932 size_t size, aarch64_reg_type reg_type)
3933 {
3934 expressionS exp;
3935 char *p, *q;
3936 size_t i;
3937
3938 /* Match C-like tokens. */
3939 p = q = *str;
3940 while (ISALNUM (*q))
3941 q++;
3942
3943 for (i = 0; i < size; ++i)
3944 if (array[i]
3945 && strncasecmp (array[i], p, q - p) == 0
3946 && array[i][q - p] == 0)
3947 {
3948 *val = i;
3949 *str = q;
3950 return TRUE;
3951 }
3952
3953 if (!parse_immediate_expression (&p, &exp, reg_type))
3954 return FALSE;
3955
3956 if (exp.X_op == O_constant
3957 && (uint64_t) exp.X_add_number < size)
3958 {
3959 *val = exp.X_add_number;
3960 *str = p;
3961 return TRUE;
3962 }
3963
3964 /* Use the default error for this operand. */
3965 return FALSE;
3966 }
3967
3968 /* Parse an option for a preload instruction. Returns the encoding for the
3969 option, or PARSE_FAIL. */
3970
3971 static int
3972 parse_pldop (char **str)
3973 {
3974 char *p, *q;
3975 const struct aarch64_name_value_pair *o;
3976
3977 p = q = *str;
3978 while (ISALNUM (*q))
3979 q++;
3980
3981 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
3982 if (!o)
3983 return PARSE_FAIL;
3984
3985 *str = q;
3986 return o->value;
3987 }
3988
3989 /* Parse an option for a barrier instruction. Returns the encoding for the
3990 option, or PARSE_FAIL. */
3991
3992 static int
3993 parse_barrier (char **str)
3994 {
3995 char *p, *q;
3996 const asm_barrier_opt *o;
3997
3998 p = q = *str;
3999 while (ISALPHA (*q))
4000 q++;
4001
4002 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4003 if (!o)
4004 return PARSE_FAIL;
4005
4006 *str = q;
4007 return o->value;
4008 }
4009
4010 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4011 return 0 if successful. Otherwise return PARSE_FAIL. */
4012
4013 static int
4014 parse_barrier_psb (char **str,
4015 const struct aarch64_name_value_pair ** hint_opt)
4016 {
4017 char *p, *q;
4018 const struct aarch64_name_value_pair *o;
4019
4020 p = q = *str;
4021 while (ISALPHA (*q))
4022 q++;
4023
4024 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4025 if (!o)
4026 {
4027 set_fatal_syntax_error
4028 ( _("unknown or missing option to PSB/TSB"));
4029 return PARSE_FAIL;
4030 }
4031
4032 if (o->value != 0x11)
4033 {
4034 /* PSB only accepts option name 'CSYNC'. */
4035 set_syntax_error
4036 (_("the specified option is not accepted for PSB/TSB"));
4037 return PARSE_FAIL;
4038 }
4039
4040 *str = q;
4041 *hint_opt = o;
4042 return 0;
4043 }
4044
4045 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4046 return 0 if successful. Otherwise return PARSE_FAIL. */
4047
4048 static int
4049 parse_bti_operand (char **str,
4050 const struct aarch64_name_value_pair ** hint_opt)
4051 {
4052 char *p, *q;
4053 const struct aarch64_name_value_pair *o;
4054
4055 p = q = *str;
4056 while (ISALPHA (*q))
4057 q++;
4058
4059 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4060 if (!o)
4061 {
4062 set_fatal_syntax_error
4063 ( _("unknown option to BTI"));
4064 return PARSE_FAIL;
4065 }
4066
4067 switch (o->value)
4068 {
4069 /* Valid BTI operands. */
4070 case HINT_OPD_C:
4071 case HINT_OPD_J:
4072 case HINT_OPD_JC:
4073 break;
4074
4075 default:
4076 set_syntax_error
4077 (_("unknown option to BTI"));
4078 return PARSE_FAIL;
4079 }
4080
4081 *str = q;
4082 *hint_opt = o;
4083 return 0;
4084 }
4085
4086 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4087 Returns the encoding for the option, or PARSE_FAIL.
4088
4089 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4090 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4091
4092 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4093 field, otherwise as a system register.
4094 */
4095
4096 static int
4097 parse_sys_reg (char **str, htab_t sys_regs,
4098 int imple_defined_p, int pstatefield_p,
4099 uint32_t* flags)
4100 {
4101 char *p, *q;
4102 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4103 const aarch64_sys_reg *o;
4104 int value;
4105
4106 p = buf;
4107 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4108 if (p < buf + (sizeof (buf) - 1))
4109 *p++ = TOLOWER (*q);
4110 *p = '\0';
4111
4112 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4113 valid system register. This is enforced by construction of the hash
4114 table. */
4115 if (p - buf != q - *str)
4116 return PARSE_FAIL;
4117
4118 o = str_hash_find (sys_regs, buf);
4119 if (!o)
4120 {
4121 if (!imple_defined_p)
4122 return PARSE_FAIL;
4123 else
4124 {
4125 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4126 unsigned int op0, op1, cn, cm, op2;
4127
4128 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4129 != 5)
4130 return PARSE_FAIL;
4131 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4132 return PARSE_FAIL;
4133 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4134 if (flags)
4135 *flags = 0;
4136 }
4137 }
4138 else
4139 {
4140 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4141 as_bad (_("selected processor does not support PSTATE field "
4142 "name '%s'"), buf);
4143 if (!pstatefield_p
4144 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4145 o->value, o->flags, o->features))
4146 as_bad (_("selected processor does not support system register "
4147 "name '%s'"), buf);
4148 if (aarch64_sys_reg_deprecated_p (o->flags))
4149 as_warn (_("system register name '%s' is deprecated and may be "
4150 "removed in a future release"), buf);
4151 value = o->value;
4152 if (flags)
4153 *flags = o->flags;
4154 }
4155
4156 *str = q;
4157 return value;
4158 }
4159
4160 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4161 for the option, or NULL. */
4162
4163 static const aarch64_sys_ins_reg *
4164 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4165 {
4166 char *p, *q;
4167 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4168 const aarch64_sys_ins_reg *o;
4169
4170 p = buf;
4171 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4172 if (p < buf + (sizeof (buf) - 1))
4173 *p++ = TOLOWER (*q);
4174 *p = '\0';
4175
4176 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4177 valid system register. This is enforced by construction of the hash
4178 table. */
4179 if (p - buf != q - *str)
4180 return NULL;
4181
4182 o = str_hash_find (sys_ins_regs, buf);
4183 if (!o)
4184 return NULL;
4185
4186 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4187 o->name, o->value, o->flags, 0))
4188 as_bad (_("selected processor does not support system register "
4189 "name '%s'"), buf);
4190 if (aarch64_sys_reg_deprecated_p (o->flags))
4191 as_warn (_("system register name '%s' is deprecated and may be "
4192 "removed in a future release"), buf);
4193
4194 *str = q;
4195 return o;
4196 }
4197 \f
4198 #define po_char_or_fail(chr) do { \
4199 if (! skip_past_char (&str, chr)) \
4200 goto failure; \
4201 } while (0)
4202
4203 #define po_reg_or_fail(regtype) do { \
4204 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4205 if (val == PARSE_FAIL) \
4206 { \
4207 set_default_error (); \
4208 goto failure; \
4209 } \
4210 } while (0)
4211
4212 #define po_int_reg_or_fail(reg_type) do { \
4213 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4214 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4215 { \
4216 set_default_error (); \
4217 goto failure; \
4218 } \
4219 info->reg.regno = reg->number; \
4220 info->qualifier = qualifier; \
4221 } while (0)
4222
4223 #define po_imm_nc_or_fail() do { \
4224 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4225 goto failure; \
4226 } while (0)
4227
4228 #define po_imm_or_fail(min, max) do { \
4229 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4230 goto failure; \
4231 if (val < min || val > max) \
4232 { \
4233 set_fatal_syntax_error (_("immediate value out of range "\
4234 #min " to "#max)); \
4235 goto failure; \
4236 } \
4237 } while (0)
4238
4239 #define po_enum_or_fail(array) do { \
4240 if (!parse_enum_string (&str, &val, array, \
4241 ARRAY_SIZE (array), imm_reg_type)) \
4242 goto failure; \
4243 } while (0)
4244
4245 #define po_misc_or_fail(expr) do { \
4246 if (!expr) \
4247 goto failure; \
4248 } while (0)
4249 \f
4250 /* encode the 12-bit imm field of Add/sub immediate */
4251 static inline uint32_t
4252 encode_addsub_imm (uint32_t imm)
4253 {
4254 return imm << 10;
4255 }
4256
4257 /* encode the shift amount field of Add/sub immediate */
4258 static inline uint32_t
4259 encode_addsub_imm_shift_amount (uint32_t cnt)
4260 {
4261 return cnt << 22;
4262 }
4263
4264
4265 /* encode the imm field of Adr instruction */
4266 static inline uint32_t
4267 encode_adr_imm (uint32_t imm)
4268 {
4269 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4270 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4271 }
4272
4273 /* encode the immediate field of Move wide immediate */
4274 static inline uint32_t
4275 encode_movw_imm (uint32_t imm)
4276 {
4277 return imm << 5;
4278 }
4279
4280 /* encode the 26-bit offset of unconditional branch */
4281 static inline uint32_t
4282 encode_branch_ofs_26 (uint32_t ofs)
4283 {
4284 return ofs & ((1 << 26) - 1);
4285 }
4286
4287 /* encode the 19-bit offset of conditional branch and compare & branch */
4288 static inline uint32_t
4289 encode_cond_branch_ofs_19 (uint32_t ofs)
4290 {
4291 return (ofs & ((1 << 19) - 1)) << 5;
4292 }
4293
4294 /* encode the 19-bit offset of ld literal */
4295 static inline uint32_t
4296 encode_ld_lit_ofs_19 (uint32_t ofs)
4297 {
4298 return (ofs & ((1 << 19) - 1)) << 5;
4299 }
4300
4301 /* Encode the 14-bit offset of test & branch. */
4302 static inline uint32_t
4303 encode_tst_branch_ofs_14 (uint32_t ofs)
4304 {
4305 return (ofs & ((1 << 14) - 1)) << 5;
4306 }
4307
4308 /* Encode the 16-bit imm field of svc/hvc/smc. */
4309 static inline uint32_t
4310 encode_svc_imm (uint32_t imm)
4311 {
4312 return imm << 5;
4313 }
4314
4315 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4316 static inline uint32_t
4317 reencode_addsub_switch_add_sub (uint32_t opcode)
4318 {
4319 return opcode ^ (1 << 30);
4320 }
4321
4322 static inline uint32_t
4323 reencode_movzn_to_movz (uint32_t opcode)
4324 {
4325 return opcode | (1 << 30);
4326 }
4327
4328 static inline uint32_t
4329 reencode_movzn_to_movn (uint32_t opcode)
4330 {
4331 return opcode & ~(1 << 30);
4332 }
4333
4334 /* Overall per-instruction processing. */
4335
4336 /* We need to be able to fix up arbitrary expressions in some statements.
4337 This is so that we can handle symbols that are an arbitrary distance from
4338 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4339 which returns part of an address in a form which will be valid for
4340 a data instruction. We do this by pushing the expression into a symbol
4341 in the expr_section, and creating a fix for that. */
4342
4343 static fixS *
4344 fix_new_aarch64 (fragS * frag,
4345 int where,
4346 short int size,
4347 expressionS * exp,
4348 int pc_rel,
4349 int reloc)
4350 {
4351 fixS *new_fix;
4352
4353 switch (exp->X_op)
4354 {
4355 case O_constant:
4356 case O_symbol:
4357 case O_add:
4358 case O_subtract:
4359 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4360 break;
4361
4362 default:
4363 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4364 pc_rel, reloc);
4365 break;
4366 }
4367 return new_fix;
4368 }
4369 \f
4370 /* Diagnostics on operands errors. */
4371
4372 /* By default, output verbose error message.
4373 Disable the verbose error message by -mno-verbose-error. */
4374 static int verbose_error_p = 1;
4375
4376 #ifdef DEBUG_AARCH64
4377 /* N.B. this is only for the purpose of debugging. */
4378 const char* operand_mismatch_kind_names[] =
4379 {
4380 "AARCH64_OPDE_NIL",
4381 "AARCH64_OPDE_RECOVERABLE",
4382 "AARCH64_OPDE_SYNTAX_ERROR",
4383 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4384 "AARCH64_OPDE_INVALID_VARIANT",
4385 "AARCH64_OPDE_OUT_OF_RANGE",
4386 "AARCH64_OPDE_UNALIGNED",
4387 "AARCH64_OPDE_REG_LIST",
4388 "AARCH64_OPDE_OTHER_ERROR",
4389 };
4390 #endif /* DEBUG_AARCH64 */
4391
4392 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4393
4394 When multiple errors of different kinds are found in the same assembly
4395 line, only the error of the highest severity will be picked up for
4396 issuing the diagnostics. */
4397
4398 static inline bfd_boolean
4399 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4400 enum aarch64_operand_error_kind rhs)
4401 {
4402 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4403 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4404 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4405 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4406 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4407 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4408 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4409 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4410 return lhs > rhs;
4411 }
4412
4413 /* Helper routine to get the mnemonic name from the assembly instruction
4414 line; should only be called for the diagnosis purpose, as there is
4415 string copy operation involved, which may affect the runtime
4416 performance if used in elsewhere. */
4417
4418 static const char*
4419 get_mnemonic_name (const char *str)
4420 {
4421 static char mnemonic[32];
4422 char *ptr;
4423
4424 /* Get the first 15 bytes and assume that the full name is included. */
4425 strncpy (mnemonic, str, 31);
4426 mnemonic[31] = '\0';
4427
4428 /* Scan up to the end of the mnemonic, which must end in white space,
4429 '.', or end of string. */
4430 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4431 ;
4432
4433 *ptr = '\0';
4434
4435 /* Append '...' to the truncated long name. */
4436 if (ptr - mnemonic == 31)
4437 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4438
4439 return mnemonic;
4440 }
4441
4442 static void
4443 reset_aarch64_instruction (aarch64_instruction *instruction)
4444 {
4445 memset (instruction, '\0', sizeof (aarch64_instruction));
4446 instruction->reloc.type = BFD_RELOC_UNUSED;
4447 }
4448
4449 /* Data structures storing one user error in the assembly code related to
4450 operands. */
4451
4452 struct operand_error_record
4453 {
4454 const aarch64_opcode *opcode;
4455 aarch64_operand_error detail;
4456 struct operand_error_record *next;
4457 };
4458
4459 typedef struct operand_error_record operand_error_record;
4460
4461 struct operand_errors
4462 {
4463 operand_error_record *head;
4464 operand_error_record *tail;
4465 };
4466
4467 typedef struct operand_errors operand_errors;
4468
4469 /* Top-level data structure reporting user errors for the current line of
4470 the assembly code.
4471 The way md_assemble works is that all opcodes sharing the same mnemonic
4472 name are iterated to find a match to the assembly line. In this data
4473 structure, each of the such opcodes will have one operand_error_record
4474 allocated and inserted. In other words, excessive errors related with
4475 a single opcode are disregarded. */
4476 operand_errors operand_error_report;
4477
4478 /* Free record nodes. */
4479 static operand_error_record *free_opnd_error_record_nodes = NULL;
4480
4481 /* Initialize the data structure that stores the operand mismatch
4482 information on assembling one line of the assembly code. */
4483 static void
4484 init_operand_error_report (void)
4485 {
4486 if (operand_error_report.head != NULL)
4487 {
4488 gas_assert (operand_error_report.tail != NULL);
4489 operand_error_report.tail->next = free_opnd_error_record_nodes;
4490 free_opnd_error_record_nodes = operand_error_report.head;
4491 operand_error_report.head = NULL;
4492 operand_error_report.tail = NULL;
4493 return;
4494 }
4495 gas_assert (operand_error_report.tail == NULL);
4496 }
4497
4498 /* Return TRUE if some operand error has been recorded during the
4499 parsing of the current assembly line using the opcode *OPCODE;
4500 otherwise return FALSE. */
4501 static inline bfd_boolean
4502 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4503 {
4504 operand_error_record *record = operand_error_report.head;
4505 return record && record->opcode == opcode;
4506 }
4507
4508 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4509 OPCODE field is initialized with OPCODE.
4510 N.B. only one record for each opcode, i.e. the maximum of one error is
4511 recorded for each instruction template. */
4512
4513 static void
4514 add_operand_error_record (const operand_error_record* new_record)
4515 {
4516 const aarch64_opcode *opcode = new_record->opcode;
4517 operand_error_record* record = operand_error_report.head;
4518
4519 /* The record may have been created for this opcode. If not, we need
4520 to prepare one. */
4521 if (! opcode_has_operand_error_p (opcode))
4522 {
4523 /* Get one empty record. */
4524 if (free_opnd_error_record_nodes == NULL)
4525 {
4526 record = XNEW (operand_error_record);
4527 }
4528 else
4529 {
4530 record = free_opnd_error_record_nodes;
4531 free_opnd_error_record_nodes = record->next;
4532 }
4533 record->opcode = opcode;
4534 /* Insert at the head. */
4535 record->next = operand_error_report.head;
4536 operand_error_report.head = record;
4537 if (operand_error_report.tail == NULL)
4538 operand_error_report.tail = record;
4539 }
4540 else if (record->detail.kind != AARCH64_OPDE_NIL
4541 && record->detail.index <= new_record->detail.index
4542 && operand_error_higher_severity_p (record->detail.kind,
4543 new_record->detail.kind))
4544 {
4545 /* In the case of multiple errors found on operands related with a
4546 single opcode, only record the error of the leftmost operand and
4547 only if the error is of higher severity. */
4548 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4549 " the existing error %s on operand %d",
4550 operand_mismatch_kind_names[new_record->detail.kind],
4551 new_record->detail.index,
4552 operand_mismatch_kind_names[record->detail.kind],
4553 record->detail.index);
4554 return;
4555 }
4556
4557 record->detail = new_record->detail;
4558 }
4559
4560 static inline void
4561 record_operand_error_info (const aarch64_opcode *opcode,
4562 aarch64_operand_error *error_info)
4563 {
4564 operand_error_record record;
4565 record.opcode = opcode;
4566 record.detail = *error_info;
4567 add_operand_error_record (&record);
4568 }
4569
4570 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4571 error message *ERROR, for operand IDX (count from 0). */
4572
4573 static void
4574 record_operand_error (const aarch64_opcode *opcode, int idx,
4575 enum aarch64_operand_error_kind kind,
4576 const char* error)
4577 {
4578 aarch64_operand_error info;
4579 memset(&info, 0, sizeof (info));
4580 info.index = idx;
4581 info.kind = kind;
4582 info.error = error;
4583 info.non_fatal = FALSE;
4584 record_operand_error_info (opcode, &info);
4585 }
4586
4587 static void
4588 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4589 enum aarch64_operand_error_kind kind,
4590 const char* error, const int *extra_data)
4591 {
4592 aarch64_operand_error info;
4593 info.index = idx;
4594 info.kind = kind;
4595 info.error = error;
4596 info.data[0] = extra_data[0];
4597 info.data[1] = extra_data[1];
4598 info.data[2] = extra_data[2];
4599 info.non_fatal = FALSE;
4600 record_operand_error_info (opcode, &info);
4601 }
4602
4603 static void
4604 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4605 const char* error, int lower_bound,
4606 int upper_bound)
4607 {
4608 int data[3] = {lower_bound, upper_bound, 0};
4609 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4610 error, data);
4611 }
4612
4613 /* Remove the operand error record for *OPCODE. */
4614 static void ATTRIBUTE_UNUSED
4615 remove_operand_error_record (const aarch64_opcode *opcode)
4616 {
4617 if (opcode_has_operand_error_p (opcode))
4618 {
4619 operand_error_record* record = operand_error_report.head;
4620 gas_assert (record != NULL && operand_error_report.tail != NULL);
4621 operand_error_report.head = record->next;
4622 record->next = free_opnd_error_record_nodes;
4623 free_opnd_error_record_nodes = record;
4624 if (operand_error_report.head == NULL)
4625 {
4626 gas_assert (operand_error_report.tail == record);
4627 operand_error_report.tail = NULL;
4628 }
4629 }
4630 }
4631
4632 /* Given the instruction in *INSTR, return the index of the best matched
4633 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4634
4635 Return -1 if there is no qualifier sequence; return the first match
4636 if there is multiple matches found. */
4637
4638 static int
4639 find_best_match (const aarch64_inst *instr,
4640 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4641 {
4642 int i, num_opnds, max_num_matched, idx;
4643
4644 num_opnds = aarch64_num_of_operands (instr->opcode);
4645 if (num_opnds == 0)
4646 {
4647 DEBUG_TRACE ("no operand");
4648 return -1;
4649 }
4650
4651 max_num_matched = 0;
4652 idx = 0;
4653
4654 /* For each pattern. */
4655 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4656 {
4657 int j, num_matched;
4658 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4659
4660 /* Most opcodes has much fewer patterns in the list. */
4661 if (empty_qualifier_sequence_p (qualifiers))
4662 {
4663 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4664 break;
4665 }
4666
4667 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4668 if (*qualifiers == instr->operands[j].qualifier)
4669 ++num_matched;
4670
4671 if (num_matched > max_num_matched)
4672 {
4673 max_num_matched = num_matched;
4674 idx = i;
4675 }
4676 }
4677
4678 DEBUG_TRACE ("return with %d", idx);
4679 return idx;
4680 }
4681
4682 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4683 corresponding operands in *INSTR. */
4684
4685 static inline void
4686 assign_qualifier_sequence (aarch64_inst *instr,
4687 const aarch64_opnd_qualifier_t *qualifiers)
4688 {
4689 int i = 0;
4690 int num_opnds = aarch64_num_of_operands (instr->opcode);
4691 gas_assert (num_opnds);
4692 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4693 instr->operands[i].qualifier = *qualifiers;
4694 }
4695
4696 /* Print operands for the diagnosis purpose. */
4697
4698 static void
4699 print_operands (char *buf, const aarch64_opcode *opcode,
4700 const aarch64_opnd_info *opnds)
4701 {
4702 int i;
4703
4704 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4705 {
4706 char str[128];
4707
4708 /* We regard the opcode operand info more, however we also look into
4709 the inst->operands to support the disassembling of the optional
4710 operand.
4711 The two operand code should be the same in all cases, apart from
4712 when the operand can be optional. */
4713 if (opcode->operands[i] == AARCH64_OPND_NIL
4714 || opnds[i].type == AARCH64_OPND_NIL)
4715 break;
4716
4717 /* Generate the operand string in STR. */
4718 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4719 NULL, cpu_variant);
4720
4721 /* Delimiter. */
4722 if (str[0] != '\0')
4723 strcat (buf, i == 0 ? " " : ", ");
4724
4725 /* Append the operand string. */
4726 strcat (buf, str);
4727 }
4728 }
4729
4730 /* Send to stderr a string as information. */
4731
4732 static void
4733 output_info (const char *format, ...)
4734 {
4735 const char *file;
4736 unsigned int line;
4737 va_list args;
4738
4739 file = as_where (&line);
4740 if (file)
4741 {
4742 if (line != 0)
4743 fprintf (stderr, "%s:%u: ", file, line);
4744 else
4745 fprintf (stderr, "%s: ", file);
4746 }
4747 fprintf (stderr, _("Info: "));
4748 va_start (args, format);
4749 vfprintf (stderr, format, args);
4750 va_end (args);
4751 (void) putc ('\n', stderr);
4752 }
4753
4754 /* Output one operand error record. */
4755
4756 static void
4757 output_operand_error_record (const operand_error_record *record, char *str)
4758 {
4759 const aarch64_operand_error *detail = &record->detail;
4760 int idx = detail->index;
4761 const aarch64_opcode *opcode = record->opcode;
4762 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4763 : AARCH64_OPND_NIL);
4764
4765 typedef void (*handler_t)(const char *format, ...);
4766 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4767
4768 switch (detail->kind)
4769 {
4770 case AARCH64_OPDE_NIL:
4771 gas_assert (0);
4772 break;
4773 case AARCH64_OPDE_SYNTAX_ERROR:
4774 case AARCH64_OPDE_RECOVERABLE:
4775 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4776 case AARCH64_OPDE_OTHER_ERROR:
4777 /* Use the prepared error message if there is, otherwise use the
4778 operand description string to describe the error. */
4779 if (detail->error != NULL)
4780 {
4781 if (idx < 0)
4782 handler (_("%s -- `%s'"), detail->error, str);
4783 else
4784 handler (_("%s at operand %d -- `%s'"),
4785 detail->error, idx + 1, str);
4786 }
4787 else
4788 {
4789 gas_assert (idx >= 0);
4790 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4791 aarch64_get_operand_desc (opd_code), str);
4792 }
4793 break;
4794
4795 case AARCH64_OPDE_INVALID_VARIANT:
4796 handler (_("operand mismatch -- `%s'"), str);
4797 if (verbose_error_p)
4798 {
4799 /* We will try to correct the erroneous instruction and also provide
4800 more information e.g. all other valid variants.
4801
4802 The string representation of the corrected instruction and other
4803 valid variants are generated by
4804
4805 1) obtaining the intermediate representation of the erroneous
4806 instruction;
4807 2) manipulating the IR, e.g. replacing the operand qualifier;
4808 3) printing out the instruction by calling the printer functions
4809 shared with the disassembler.
4810
4811 The limitation of this method is that the exact input assembly
4812 line cannot be accurately reproduced in some cases, for example an
4813 optional operand present in the actual assembly line will be
4814 omitted in the output; likewise for the optional syntax rules,
4815 e.g. the # before the immediate. Another limitation is that the
4816 assembly symbols and relocation operations in the assembly line
4817 currently cannot be printed out in the error report. Last but not
4818 least, when there is other error(s) co-exist with this error, the
4819 'corrected' instruction may be still incorrect, e.g. given
4820 'ldnp h0,h1,[x0,#6]!'
4821 this diagnosis will provide the version:
4822 'ldnp s0,s1,[x0,#6]!'
4823 which is still not right. */
4824 size_t len = strlen (get_mnemonic_name (str));
4825 int i, qlf_idx;
4826 bfd_boolean result;
4827 char buf[2048];
4828 aarch64_inst *inst_base = &inst.base;
4829 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4830
4831 /* Init inst. */
4832 reset_aarch64_instruction (&inst);
4833 inst_base->opcode = opcode;
4834
4835 /* Reset the error report so that there is no side effect on the
4836 following operand parsing. */
4837 init_operand_error_report ();
4838
4839 /* Fill inst. */
4840 result = parse_operands (str + len, opcode)
4841 && programmer_friendly_fixup (&inst);
4842 gas_assert (result);
4843 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4844 NULL, NULL, insn_sequence);
4845 gas_assert (!result);
4846
4847 /* Find the most matched qualifier sequence. */
4848 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4849 gas_assert (qlf_idx > -1);
4850
4851 /* Assign the qualifiers. */
4852 assign_qualifier_sequence (inst_base,
4853 opcode->qualifiers_list[qlf_idx]);
4854
4855 /* Print the hint. */
4856 output_info (_(" did you mean this?"));
4857 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4858 print_operands (buf, opcode, inst_base->operands);
4859 output_info (_(" %s"), buf);
4860
4861 /* Print out other variant(s) if there is any. */
4862 if (qlf_idx != 0 ||
4863 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4864 output_info (_(" other valid variant(s):"));
4865
4866 /* For each pattern. */
4867 qualifiers_list = opcode->qualifiers_list;
4868 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4869 {
4870 /* Most opcodes has much fewer patterns in the list.
4871 First NIL qualifier indicates the end in the list. */
4872 if (empty_qualifier_sequence_p (*qualifiers_list))
4873 break;
4874
4875 if (i != qlf_idx)
4876 {
4877 /* Mnemonics name. */
4878 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4879
4880 /* Assign the qualifiers. */
4881 assign_qualifier_sequence (inst_base, *qualifiers_list);
4882
4883 /* Print instruction. */
4884 print_operands (buf, opcode, inst_base->operands);
4885
4886 output_info (_(" %s"), buf);
4887 }
4888 }
4889 }
4890 break;
4891
4892 case AARCH64_OPDE_UNTIED_OPERAND:
4893 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4894 detail->index + 1, str);
4895 break;
4896
4897 case AARCH64_OPDE_OUT_OF_RANGE:
4898 if (detail->data[0] != detail->data[1])
4899 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4900 detail->error ? detail->error : _("immediate value"),
4901 detail->data[0], detail->data[1], idx + 1, str);
4902 else
4903 handler (_("%s must be %d at operand %d -- `%s'"),
4904 detail->error ? detail->error : _("immediate value"),
4905 detail->data[0], idx + 1, str);
4906 break;
4907
4908 case AARCH64_OPDE_REG_LIST:
4909 if (detail->data[0] == 1)
4910 handler (_("invalid number of registers in the list; "
4911 "only 1 register is expected at operand %d -- `%s'"),
4912 idx + 1, str);
4913 else
4914 handler (_("invalid number of registers in the list; "
4915 "%d registers are expected at operand %d -- `%s'"),
4916 detail->data[0], idx + 1, str);
4917 break;
4918
4919 case AARCH64_OPDE_UNALIGNED:
4920 handler (_("immediate value must be a multiple of "
4921 "%d at operand %d -- `%s'"),
4922 detail->data[0], idx + 1, str);
4923 break;
4924
4925 default:
4926 gas_assert (0);
4927 break;
4928 }
4929 }
4930
4931 /* Process and output the error message about the operand mismatching.
4932
4933 When this function is called, the operand error information had
4934 been collected for an assembly line and there will be multiple
4935 errors in the case of multiple instruction templates; output the
4936 error message that most closely describes the problem.
4937
4938 The errors to be printed can be filtered on printing all errors
4939 or only non-fatal errors. This distinction has to be made because
4940 the error buffer may already be filled with fatal errors we don't want to
4941 print due to the different instruction templates. */
4942
4943 static void
4944 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4945 {
4946 int largest_error_pos;
4947 const char *msg = NULL;
4948 enum aarch64_operand_error_kind kind;
4949 operand_error_record *curr;
4950 operand_error_record *head = operand_error_report.head;
4951 operand_error_record *record = NULL;
4952
4953 /* No error to report. */
4954 if (head == NULL)
4955 return;
4956
4957 gas_assert (head != NULL && operand_error_report.tail != NULL);
4958
4959 /* Only one error. */
4960 if (head == operand_error_report.tail)
4961 {
4962 /* If the only error is a non-fatal one and we don't want to print it,
4963 just exit. */
4964 if (!non_fatal_only || head->detail.non_fatal)
4965 {
4966 DEBUG_TRACE ("single opcode entry with error kind: %s",
4967 operand_mismatch_kind_names[head->detail.kind]);
4968 output_operand_error_record (head, str);
4969 }
4970 return;
4971 }
4972
4973 /* Find the error kind of the highest severity. */
4974 DEBUG_TRACE ("multiple opcode entries with error kind");
4975 kind = AARCH64_OPDE_NIL;
4976 for (curr = head; curr != NULL; curr = curr->next)
4977 {
4978 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4979 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4980 if (operand_error_higher_severity_p (curr->detail.kind, kind)
4981 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4982 kind = curr->detail.kind;
4983 }
4984
4985 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
4986
4987 /* Pick up one of errors of KIND to report. */
4988 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4989 for (curr = head; curr != NULL; curr = curr->next)
4990 {
4991 /* If we don't want to print non-fatal errors then don't consider them
4992 at all. */
4993 if (curr->detail.kind != kind
4994 || (non_fatal_only && !curr->detail.non_fatal))
4995 continue;
4996 /* If there are multiple errors, pick up the one with the highest
4997 mismatching operand index. In the case of multiple errors with
4998 the equally highest operand index, pick up the first one or the
4999 first one with non-NULL error message. */
5000 if (curr->detail.index > largest_error_pos
5001 || (curr->detail.index == largest_error_pos && msg == NULL
5002 && curr->detail.error != NULL))
5003 {
5004 largest_error_pos = curr->detail.index;
5005 record = curr;
5006 msg = record->detail.error;
5007 }
5008 }
5009
5010 /* The way errors are collected in the back-end is a bit non-intuitive. But
5011 essentially, because each operand template is tried recursively you may
5012 always have errors collected from the previous tried OPND. These are
5013 usually skipped if there is one successful match. However now with the
5014 non-fatal errors we have to ignore those previously collected hard errors
5015 when we're only interested in printing the non-fatal ones. This condition
5016 prevents us from printing errors that are not appropriate, since we did
5017 match a condition, but it also has warnings that it wants to print. */
5018 if (non_fatal_only && !record)
5019 return;
5020
5021 gas_assert (largest_error_pos != -2 && record != NULL);
5022 DEBUG_TRACE ("Pick up error kind %s to report",
5023 operand_mismatch_kind_names[record->detail.kind]);
5024
5025 /* Output. */
5026 output_operand_error_record (record, str);
5027 }
5028 \f
5029 /* Write an AARCH64 instruction to buf - always little-endian. */
5030 static void
5031 put_aarch64_insn (char *buf, uint32_t insn)
5032 {
5033 unsigned char *where = (unsigned char *) buf;
5034 where[0] = insn;
5035 where[1] = insn >> 8;
5036 where[2] = insn >> 16;
5037 where[3] = insn >> 24;
5038 }
5039
5040 static uint32_t
5041 get_aarch64_insn (char *buf)
5042 {
5043 unsigned char *where = (unsigned char *) buf;
5044 uint32_t result;
5045 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5046 | ((uint32_t) where[3] << 24)));
5047 return result;
5048 }
5049
5050 static void
5051 output_inst (struct aarch64_inst *new_inst)
5052 {
5053 char *to = NULL;
5054
5055 to = frag_more (INSN_SIZE);
5056
5057 frag_now->tc_frag_data.recorded = 1;
5058
5059 put_aarch64_insn (to, inst.base.value);
5060
5061 if (inst.reloc.type != BFD_RELOC_UNUSED)
5062 {
5063 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5064 INSN_SIZE, &inst.reloc.exp,
5065 inst.reloc.pc_rel,
5066 inst.reloc.type);
5067 DEBUG_TRACE ("Prepared relocation fix up");
5068 /* Don't check the addend value against the instruction size,
5069 that's the job of our code in md_apply_fix(). */
5070 fixp->fx_no_overflow = 1;
5071 if (new_inst != NULL)
5072 fixp->tc_fix_data.inst = new_inst;
5073 if (aarch64_gas_internal_fixup_p ())
5074 {
5075 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5076 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5077 fixp->fx_addnumber = inst.reloc.flags;
5078 }
5079 }
5080
5081 dwarf2_emit_insn (INSN_SIZE);
5082 }
5083
5084 /* Link together opcodes of the same name. */
5085
5086 struct templates
5087 {
5088 aarch64_opcode *opcode;
5089 struct templates *next;
5090 };
5091
5092 typedef struct templates templates;
5093
5094 static templates *
5095 lookup_mnemonic (const char *start, int len)
5096 {
5097 templates *templ = NULL;
5098
5099 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5100 return templ;
5101 }
5102
5103 /* Subroutine of md_assemble, responsible for looking up the primary
5104 opcode from the mnemonic the user wrote. STR points to the
5105 beginning of the mnemonic. */
5106
5107 static templates *
5108 opcode_lookup (char **str)
5109 {
5110 char *end, *base, *dot;
5111 const aarch64_cond *cond;
5112 char condname[16];
5113 int len;
5114
5115 /* Scan up to the end of the mnemonic, which must end in white space,
5116 '.', or end of string. */
5117 dot = 0;
5118 for (base = end = *str; is_part_of_name(*end); end++)
5119 if (*end == '.' && !dot)
5120 dot = end;
5121
5122 if (end == base || dot == base)
5123 return 0;
5124
5125 inst.cond = COND_ALWAYS;
5126
5127 /* Handle a possible condition. */
5128 if (dot)
5129 {
5130 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5131 if (cond)
5132 {
5133 inst.cond = cond->value;
5134 *str = end;
5135 }
5136 else
5137 {
5138 *str = dot;
5139 return 0;
5140 }
5141 len = dot - base;
5142 }
5143 else
5144 {
5145 *str = end;
5146 len = end - base;
5147 }
5148
5149 if (inst.cond == COND_ALWAYS)
5150 {
5151 /* Look for unaffixed mnemonic. */
5152 return lookup_mnemonic (base, len);
5153 }
5154 else if (len <= 13)
5155 {
5156 /* append ".c" to mnemonic if conditional */
5157 memcpy (condname, base, len);
5158 memcpy (condname + len, ".c", 2);
5159 base = condname;
5160 len += 2;
5161 return lookup_mnemonic (base, len);
5162 }
5163
5164 return NULL;
5165 }
5166
5167 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5168 to a corresponding operand qualifier. */
5169
5170 static inline aarch64_opnd_qualifier_t
5171 vectype_to_qualifier (const struct vector_type_el *vectype)
5172 {
5173 /* Element size in bytes indexed by vector_el_type. */
5174 const unsigned char ele_size[5]
5175 = {1, 2, 4, 8, 16};
5176 const unsigned int ele_base [5] =
5177 {
5178 AARCH64_OPND_QLF_V_4B,
5179 AARCH64_OPND_QLF_V_2H,
5180 AARCH64_OPND_QLF_V_2S,
5181 AARCH64_OPND_QLF_V_1D,
5182 AARCH64_OPND_QLF_V_1Q
5183 };
5184
5185 if (!vectype->defined || vectype->type == NT_invtype)
5186 goto vectype_conversion_fail;
5187
5188 if (vectype->type == NT_zero)
5189 return AARCH64_OPND_QLF_P_Z;
5190 if (vectype->type == NT_merge)
5191 return AARCH64_OPND_QLF_P_M;
5192
5193 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5194
5195 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5196 {
5197 /* Special case S_4B. */
5198 if (vectype->type == NT_b && vectype->width == 4)
5199 return AARCH64_OPND_QLF_S_4B;
5200
5201 /* Special case S_2H. */
5202 if (vectype->type == NT_h && vectype->width == 2)
5203 return AARCH64_OPND_QLF_S_2H;
5204
5205 /* Vector element register. */
5206 return AARCH64_OPND_QLF_S_B + vectype->type;
5207 }
5208 else
5209 {
5210 /* Vector register. */
5211 int reg_size = ele_size[vectype->type] * vectype->width;
5212 unsigned offset;
5213 unsigned shift;
5214 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5215 goto vectype_conversion_fail;
5216
5217 /* The conversion is by calculating the offset from the base operand
5218 qualifier for the vector type. The operand qualifiers are regular
5219 enough that the offset can established by shifting the vector width by
5220 a vector-type dependent amount. */
5221 shift = 0;
5222 if (vectype->type == NT_b)
5223 shift = 3;
5224 else if (vectype->type == NT_h || vectype->type == NT_s)
5225 shift = 2;
5226 else if (vectype->type >= NT_d)
5227 shift = 1;
5228 else
5229 gas_assert (0);
5230
5231 offset = ele_base [vectype->type] + (vectype->width >> shift);
5232 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5233 && offset <= AARCH64_OPND_QLF_V_1Q);
5234 return offset;
5235 }
5236
5237 vectype_conversion_fail:
5238 first_error (_("bad vector arrangement type"));
5239 return AARCH64_OPND_QLF_NIL;
5240 }
5241
5242 /* Process an optional operand that is found omitted from the assembly line.
5243 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5244 instruction's opcode entry while IDX is the index of this omitted operand.
5245 */
5246
5247 static void
5248 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5249 int idx, aarch64_opnd_info *operand)
5250 {
5251 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5252 gas_assert (optional_operand_p (opcode, idx));
5253 gas_assert (!operand->present);
5254
5255 switch (type)
5256 {
5257 case AARCH64_OPND_Rd:
5258 case AARCH64_OPND_Rn:
5259 case AARCH64_OPND_Rm:
5260 case AARCH64_OPND_Rt:
5261 case AARCH64_OPND_Rt2:
5262 case AARCH64_OPND_Rt_SP:
5263 case AARCH64_OPND_Rs:
5264 case AARCH64_OPND_Ra:
5265 case AARCH64_OPND_Rt_SYS:
5266 case AARCH64_OPND_Rd_SP:
5267 case AARCH64_OPND_Rn_SP:
5268 case AARCH64_OPND_Rm_SP:
5269 case AARCH64_OPND_Fd:
5270 case AARCH64_OPND_Fn:
5271 case AARCH64_OPND_Fm:
5272 case AARCH64_OPND_Fa:
5273 case AARCH64_OPND_Ft:
5274 case AARCH64_OPND_Ft2:
5275 case AARCH64_OPND_Sd:
5276 case AARCH64_OPND_Sn:
5277 case AARCH64_OPND_Sm:
5278 case AARCH64_OPND_Va:
5279 case AARCH64_OPND_Vd:
5280 case AARCH64_OPND_Vn:
5281 case AARCH64_OPND_Vm:
5282 case AARCH64_OPND_VdD1:
5283 case AARCH64_OPND_VnD1:
5284 operand->reg.regno = default_value;
5285 break;
5286
5287 case AARCH64_OPND_Ed:
5288 case AARCH64_OPND_En:
5289 case AARCH64_OPND_Em:
5290 case AARCH64_OPND_Em16:
5291 case AARCH64_OPND_SM3_IMM2:
5292 operand->reglane.regno = default_value;
5293 break;
5294
5295 case AARCH64_OPND_IDX:
5296 case AARCH64_OPND_BIT_NUM:
5297 case AARCH64_OPND_IMMR:
5298 case AARCH64_OPND_IMMS:
5299 case AARCH64_OPND_SHLL_IMM:
5300 case AARCH64_OPND_IMM_VLSL:
5301 case AARCH64_OPND_IMM_VLSR:
5302 case AARCH64_OPND_CCMP_IMM:
5303 case AARCH64_OPND_FBITS:
5304 case AARCH64_OPND_UIMM4:
5305 case AARCH64_OPND_UIMM3_OP1:
5306 case AARCH64_OPND_UIMM3_OP2:
5307 case AARCH64_OPND_IMM:
5308 case AARCH64_OPND_IMM_2:
5309 case AARCH64_OPND_WIDTH:
5310 case AARCH64_OPND_UIMM7:
5311 case AARCH64_OPND_NZCV:
5312 case AARCH64_OPND_SVE_PATTERN:
5313 case AARCH64_OPND_SVE_PRFOP:
5314 operand->imm.value = default_value;
5315 break;
5316
5317 case AARCH64_OPND_SVE_PATTERN_SCALED:
5318 operand->imm.value = default_value;
5319 operand->shifter.kind = AARCH64_MOD_MUL;
5320 operand->shifter.amount = 1;
5321 break;
5322
5323 case AARCH64_OPND_EXCEPTION:
5324 inst.reloc.type = BFD_RELOC_UNUSED;
5325 break;
5326
5327 case AARCH64_OPND_BARRIER_ISB:
5328 operand->barrier = aarch64_barrier_options + default_value;
5329 break;
5330
5331 case AARCH64_OPND_BTI_TARGET:
5332 operand->hint_option = aarch64_hint_options + default_value;
5333 break;
5334
5335 default:
5336 break;
5337 }
5338 }
5339
5340 /* Process the relocation type for move wide instructions.
5341 Return TRUE on success; otherwise return FALSE. */
5342
5343 static bfd_boolean
5344 process_movw_reloc_info (void)
5345 {
5346 int is32;
5347 unsigned shift;
5348
5349 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5350
5351 if (inst.base.opcode->op == OP_MOVK)
5352 switch (inst.reloc.type)
5353 {
5354 case BFD_RELOC_AARCH64_MOVW_G0_S:
5355 case BFD_RELOC_AARCH64_MOVW_G1_S:
5356 case BFD_RELOC_AARCH64_MOVW_G2_S:
5357 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5358 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5359 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5360 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5361 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5362 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5363 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5364 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5365 set_syntax_error
5366 (_("the specified relocation type is not allowed for MOVK"));
5367 return FALSE;
5368 default:
5369 break;
5370 }
5371
5372 switch (inst.reloc.type)
5373 {
5374 case BFD_RELOC_AARCH64_MOVW_G0:
5375 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5376 case BFD_RELOC_AARCH64_MOVW_G0_S:
5377 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5378 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5379 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5380 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5381 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5382 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5383 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5384 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5385 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5386 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5387 shift = 0;
5388 break;
5389 case BFD_RELOC_AARCH64_MOVW_G1:
5390 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5391 case BFD_RELOC_AARCH64_MOVW_G1_S:
5392 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5393 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5394 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5395 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5396 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5397 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5398 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5399 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5400 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5401 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5402 shift = 16;
5403 break;
5404 case BFD_RELOC_AARCH64_MOVW_G2:
5405 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5406 case BFD_RELOC_AARCH64_MOVW_G2_S:
5407 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5408 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5409 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5410 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5411 if (is32)
5412 {
5413 set_fatal_syntax_error
5414 (_("the specified relocation type is not allowed for 32-bit "
5415 "register"));
5416 return FALSE;
5417 }
5418 shift = 32;
5419 break;
5420 case BFD_RELOC_AARCH64_MOVW_G3:
5421 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5422 if (is32)
5423 {
5424 set_fatal_syntax_error
5425 (_("the specified relocation type is not allowed for 32-bit "
5426 "register"));
5427 return FALSE;
5428 }
5429 shift = 48;
5430 break;
5431 default:
5432 /* More cases should be added when more MOVW-related relocation types
5433 are supported in GAS. */
5434 gas_assert (aarch64_gas_internal_fixup_p ());
5435 /* The shift amount should have already been set by the parser. */
5436 return TRUE;
5437 }
5438 inst.base.operands[1].shifter.amount = shift;
5439 return TRUE;
5440 }
5441
5442 /* A primitive log calculator. */
5443
5444 static inline unsigned int
5445 get_logsz (unsigned int size)
5446 {
5447 const unsigned char ls[16] =
5448 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5449 if (size > 16)
5450 {
5451 gas_assert (0);
5452 return -1;
5453 }
5454 gas_assert (ls[size - 1] != (unsigned char)-1);
5455 return ls[size - 1];
5456 }
5457
5458 /* Determine and return the real reloc type code for an instruction
5459 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5460
5461 static inline bfd_reloc_code_real_type
5462 ldst_lo12_determine_real_reloc_type (void)
5463 {
5464 unsigned logsz;
5465 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5466 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5467
5468 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5469 {
5470 BFD_RELOC_AARCH64_LDST8_LO12,
5471 BFD_RELOC_AARCH64_LDST16_LO12,
5472 BFD_RELOC_AARCH64_LDST32_LO12,
5473 BFD_RELOC_AARCH64_LDST64_LO12,
5474 BFD_RELOC_AARCH64_LDST128_LO12
5475 },
5476 {
5477 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5478 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5479 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5480 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5481 BFD_RELOC_AARCH64_NONE
5482 },
5483 {
5484 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5485 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5486 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5487 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5488 BFD_RELOC_AARCH64_NONE
5489 },
5490 {
5491 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5492 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5493 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5494 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5495 BFD_RELOC_AARCH64_NONE
5496 },
5497 {
5498 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5499 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5500 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5501 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5502 BFD_RELOC_AARCH64_NONE
5503 }
5504 };
5505
5506 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5507 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5508 || (inst.reloc.type
5509 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5510 || (inst.reloc.type
5511 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5512 || (inst.reloc.type
5513 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5514 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5515
5516 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5517 opd1_qlf =
5518 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5519 1, opd0_qlf, 0);
5520 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5521
5522 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5523 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5524 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5525 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5526 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5527 gas_assert (logsz <= 3);
5528 else
5529 gas_assert (logsz <= 4);
5530
5531 /* In reloc.c, these pseudo relocation types should be defined in similar
5532 order as above reloc_ldst_lo12 array. Because the array index calculation
5533 below relies on this. */
5534 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5535 }
5536
5537 /* Check whether a register list REGINFO is valid. The registers must be
5538 numbered in increasing order (modulo 32), in increments of one or two.
5539
5540 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5541 increments of two.
5542
5543 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5544
5545 static bfd_boolean
5546 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5547 {
5548 uint32_t i, nb_regs, prev_regno, incr;
5549
5550 nb_regs = 1 + (reginfo & 0x3);
5551 reginfo >>= 2;
5552 prev_regno = reginfo & 0x1f;
5553 incr = accept_alternate ? 2 : 1;
5554
5555 for (i = 1; i < nb_regs; ++i)
5556 {
5557 uint32_t curr_regno;
5558 reginfo >>= 5;
5559 curr_regno = reginfo & 0x1f;
5560 if (curr_regno != ((prev_regno + incr) & 0x1f))
5561 return FALSE;
5562 prev_regno = curr_regno;
5563 }
5564
5565 return TRUE;
5566 }
5567
5568 /* Generic instruction operand parser. This does no encoding and no
5569 semantic validation; it merely squirrels values away in the inst
5570 structure. Returns TRUE or FALSE depending on whether the
5571 specified grammar matched. */
5572
5573 static bfd_boolean
5574 parse_operands (char *str, const aarch64_opcode *opcode)
5575 {
5576 int i;
5577 char *backtrack_pos = 0;
5578 const enum aarch64_opnd *operands = opcode->operands;
5579 aarch64_reg_type imm_reg_type;
5580
5581 clear_error ();
5582 skip_whitespace (str);
5583
5584 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5585 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5586 else
5587 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5588
5589 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5590 {
5591 int64_t val;
5592 const reg_entry *reg;
5593 int comma_skipped_p = 0;
5594 aarch64_reg_type rtype;
5595 struct vector_type_el vectype;
5596 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5597 aarch64_opnd_info *info = &inst.base.operands[i];
5598 aarch64_reg_type reg_type;
5599
5600 DEBUG_TRACE ("parse operand %d", i);
5601
5602 /* Assign the operand code. */
5603 info->type = operands[i];
5604
5605 if (optional_operand_p (opcode, i))
5606 {
5607 /* Remember where we are in case we need to backtrack. */
5608 gas_assert (!backtrack_pos);
5609 backtrack_pos = str;
5610 }
5611
5612 /* Expect comma between operands; the backtrack mechanism will take
5613 care of cases of omitted optional operand. */
5614 if (i > 0 && ! skip_past_char (&str, ','))
5615 {
5616 set_syntax_error (_("comma expected between operands"));
5617 goto failure;
5618 }
5619 else
5620 comma_skipped_p = 1;
5621
5622 switch (operands[i])
5623 {
5624 case AARCH64_OPND_Rd:
5625 case AARCH64_OPND_Rn:
5626 case AARCH64_OPND_Rm:
5627 case AARCH64_OPND_Rt:
5628 case AARCH64_OPND_Rt2:
5629 case AARCH64_OPND_Rs:
5630 case AARCH64_OPND_Ra:
5631 case AARCH64_OPND_Rt_SYS:
5632 case AARCH64_OPND_PAIRREG:
5633 case AARCH64_OPND_SVE_Rm:
5634 po_int_reg_or_fail (REG_TYPE_R_Z);
5635 break;
5636
5637 case AARCH64_OPND_Rd_SP:
5638 case AARCH64_OPND_Rn_SP:
5639 case AARCH64_OPND_Rt_SP:
5640 case AARCH64_OPND_SVE_Rn_SP:
5641 case AARCH64_OPND_Rm_SP:
5642 po_int_reg_or_fail (REG_TYPE_R_SP);
5643 break;
5644
5645 case AARCH64_OPND_Rm_EXT:
5646 case AARCH64_OPND_Rm_SFT:
5647 po_misc_or_fail (parse_shifter_operand
5648 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5649 ? SHIFTED_ARITH_IMM
5650 : SHIFTED_LOGIC_IMM)));
5651 if (!info->shifter.operator_present)
5652 {
5653 /* Default to LSL if not present. Libopcodes prefers shifter
5654 kind to be explicit. */
5655 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5656 info->shifter.kind = AARCH64_MOD_LSL;
5657 /* For Rm_EXT, libopcodes will carry out further check on whether
5658 or not stack pointer is used in the instruction (Recall that
5659 "the extend operator is not optional unless at least one of
5660 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5661 }
5662 break;
5663
5664 case AARCH64_OPND_Fd:
5665 case AARCH64_OPND_Fn:
5666 case AARCH64_OPND_Fm:
5667 case AARCH64_OPND_Fa:
5668 case AARCH64_OPND_Ft:
5669 case AARCH64_OPND_Ft2:
5670 case AARCH64_OPND_Sd:
5671 case AARCH64_OPND_Sn:
5672 case AARCH64_OPND_Sm:
5673 case AARCH64_OPND_SVE_VZn:
5674 case AARCH64_OPND_SVE_Vd:
5675 case AARCH64_OPND_SVE_Vm:
5676 case AARCH64_OPND_SVE_Vn:
5677 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5678 if (val == PARSE_FAIL)
5679 {
5680 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5681 goto failure;
5682 }
5683 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5684
5685 info->reg.regno = val;
5686 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5687 break;
5688
5689 case AARCH64_OPND_SVE_Pd:
5690 case AARCH64_OPND_SVE_Pg3:
5691 case AARCH64_OPND_SVE_Pg4_5:
5692 case AARCH64_OPND_SVE_Pg4_10:
5693 case AARCH64_OPND_SVE_Pg4_16:
5694 case AARCH64_OPND_SVE_Pm:
5695 case AARCH64_OPND_SVE_Pn:
5696 case AARCH64_OPND_SVE_Pt:
5697 reg_type = REG_TYPE_PN;
5698 goto vector_reg;
5699
5700 case AARCH64_OPND_SVE_Za_5:
5701 case AARCH64_OPND_SVE_Za_16:
5702 case AARCH64_OPND_SVE_Zd:
5703 case AARCH64_OPND_SVE_Zm_5:
5704 case AARCH64_OPND_SVE_Zm_16:
5705 case AARCH64_OPND_SVE_Zn:
5706 case AARCH64_OPND_SVE_Zt:
5707 reg_type = REG_TYPE_ZN;
5708 goto vector_reg;
5709
5710 case AARCH64_OPND_Va:
5711 case AARCH64_OPND_Vd:
5712 case AARCH64_OPND_Vn:
5713 case AARCH64_OPND_Vm:
5714 reg_type = REG_TYPE_VN;
5715 vector_reg:
5716 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5717 if (val == PARSE_FAIL)
5718 {
5719 first_error (_(get_reg_expected_msg (reg_type)));
5720 goto failure;
5721 }
5722 if (vectype.defined & NTA_HASINDEX)
5723 goto failure;
5724
5725 info->reg.regno = val;
5726 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5727 && vectype.type == NT_invtype)
5728 /* Unqualified Pn and Zn registers are allowed in certain
5729 contexts. Rely on F_STRICT qualifier checking to catch
5730 invalid uses. */
5731 info->qualifier = AARCH64_OPND_QLF_NIL;
5732 else
5733 {
5734 info->qualifier = vectype_to_qualifier (&vectype);
5735 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5736 goto failure;
5737 }
5738 break;
5739
5740 case AARCH64_OPND_VdD1:
5741 case AARCH64_OPND_VnD1:
5742 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5743 if (val == PARSE_FAIL)
5744 {
5745 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5746 goto failure;
5747 }
5748 if (vectype.type != NT_d || vectype.index != 1)
5749 {
5750 set_fatal_syntax_error
5751 (_("the top half of a 128-bit FP/SIMD register is expected"));
5752 goto failure;
5753 }
5754 info->reg.regno = val;
5755 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5756 here; it is correct for the purpose of encoding/decoding since
5757 only the register number is explicitly encoded in the related
5758 instructions, although this appears a bit hacky. */
5759 info->qualifier = AARCH64_OPND_QLF_S_D;
5760 break;
5761
5762 case AARCH64_OPND_SVE_Zm3_INDEX:
5763 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5764 case AARCH64_OPND_SVE_Zm3_11_INDEX:
5765 case AARCH64_OPND_SVE_Zm4_11_INDEX:
5766 case AARCH64_OPND_SVE_Zm4_INDEX:
5767 case AARCH64_OPND_SVE_Zn_INDEX:
5768 reg_type = REG_TYPE_ZN;
5769 goto vector_reg_index;
5770
5771 case AARCH64_OPND_Ed:
5772 case AARCH64_OPND_En:
5773 case AARCH64_OPND_Em:
5774 case AARCH64_OPND_Em16:
5775 case AARCH64_OPND_SM3_IMM2:
5776 reg_type = REG_TYPE_VN;
5777 vector_reg_index:
5778 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5779 if (val == PARSE_FAIL)
5780 {
5781 first_error (_(get_reg_expected_msg (reg_type)));
5782 goto failure;
5783 }
5784 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5785 goto failure;
5786
5787 info->reglane.regno = val;
5788 info->reglane.index = vectype.index;
5789 info->qualifier = vectype_to_qualifier (&vectype);
5790 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5791 goto failure;
5792 break;
5793
5794 case AARCH64_OPND_SVE_ZnxN:
5795 case AARCH64_OPND_SVE_ZtxN:
5796 reg_type = REG_TYPE_ZN;
5797 goto vector_reg_list;
5798
5799 case AARCH64_OPND_LVn:
5800 case AARCH64_OPND_LVt:
5801 case AARCH64_OPND_LVt_AL:
5802 case AARCH64_OPND_LEt:
5803 reg_type = REG_TYPE_VN;
5804 vector_reg_list:
5805 if (reg_type == REG_TYPE_ZN
5806 && get_opcode_dependent_value (opcode) == 1
5807 && *str != '{')
5808 {
5809 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5810 if (val == PARSE_FAIL)
5811 {
5812 first_error (_(get_reg_expected_msg (reg_type)));
5813 goto failure;
5814 }
5815 info->reglist.first_regno = val;
5816 info->reglist.num_regs = 1;
5817 }
5818 else
5819 {
5820 val = parse_vector_reg_list (&str, reg_type, &vectype);
5821 if (val == PARSE_FAIL)
5822 goto failure;
5823
5824 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5825 {
5826 set_fatal_syntax_error (_("invalid register list"));
5827 goto failure;
5828 }
5829
5830 if (vectype.width != 0 && *str != ',')
5831 {
5832 set_fatal_syntax_error
5833 (_("expected element type rather than vector type"));
5834 goto failure;
5835 }
5836
5837 info->reglist.first_regno = (val >> 2) & 0x1f;
5838 info->reglist.num_regs = (val & 0x3) + 1;
5839 }
5840 if (operands[i] == AARCH64_OPND_LEt)
5841 {
5842 if (!(vectype.defined & NTA_HASINDEX))
5843 goto failure;
5844 info->reglist.has_index = 1;
5845 info->reglist.index = vectype.index;
5846 }
5847 else
5848 {
5849 if (vectype.defined & NTA_HASINDEX)
5850 goto failure;
5851 if (!(vectype.defined & NTA_HASTYPE))
5852 {
5853 if (reg_type == REG_TYPE_ZN)
5854 set_fatal_syntax_error (_("missing type suffix"));
5855 goto failure;
5856 }
5857 }
5858 info->qualifier = vectype_to_qualifier (&vectype);
5859 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5860 goto failure;
5861 break;
5862
5863 case AARCH64_OPND_CRn:
5864 case AARCH64_OPND_CRm:
5865 {
5866 char prefix = *(str++);
5867 if (prefix != 'c' && prefix != 'C')
5868 goto failure;
5869
5870 po_imm_nc_or_fail ();
5871 if (val > 15)
5872 {
5873 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5874 goto failure;
5875 }
5876 info->qualifier = AARCH64_OPND_QLF_CR;
5877 info->imm.value = val;
5878 break;
5879 }
5880
5881 case AARCH64_OPND_SHLL_IMM:
5882 case AARCH64_OPND_IMM_VLSR:
5883 po_imm_or_fail (1, 64);
5884 info->imm.value = val;
5885 break;
5886
5887 case AARCH64_OPND_CCMP_IMM:
5888 case AARCH64_OPND_SIMM5:
5889 case AARCH64_OPND_FBITS:
5890 case AARCH64_OPND_TME_UIMM16:
5891 case AARCH64_OPND_UIMM4:
5892 case AARCH64_OPND_UIMM4_ADDG:
5893 case AARCH64_OPND_UIMM10:
5894 case AARCH64_OPND_UIMM3_OP1:
5895 case AARCH64_OPND_UIMM3_OP2:
5896 case AARCH64_OPND_IMM_VLSL:
5897 case AARCH64_OPND_IMM:
5898 case AARCH64_OPND_IMM_2:
5899 case AARCH64_OPND_WIDTH:
5900 case AARCH64_OPND_SVE_INV_LIMM:
5901 case AARCH64_OPND_SVE_LIMM:
5902 case AARCH64_OPND_SVE_LIMM_MOV:
5903 case AARCH64_OPND_SVE_SHLIMM_PRED:
5904 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5905 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
5906 case AARCH64_OPND_SVE_SHRIMM_PRED:
5907 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5908 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
5909 case AARCH64_OPND_SVE_SIMM5:
5910 case AARCH64_OPND_SVE_SIMM5B:
5911 case AARCH64_OPND_SVE_SIMM6:
5912 case AARCH64_OPND_SVE_SIMM8:
5913 case AARCH64_OPND_SVE_UIMM3:
5914 case AARCH64_OPND_SVE_UIMM7:
5915 case AARCH64_OPND_SVE_UIMM8:
5916 case AARCH64_OPND_SVE_UIMM8_53:
5917 case AARCH64_OPND_IMM_ROT1:
5918 case AARCH64_OPND_IMM_ROT2:
5919 case AARCH64_OPND_IMM_ROT3:
5920 case AARCH64_OPND_SVE_IMM_ROT1:
5921 case AARCH64_OPND_SVE_IMM_ROT2:
5922 case AARCH64_OPND_SVE_IMM_ROT3:
5923 po_imm_nc_or_fail ();
5924 info->imm.value = val;
5925 break;
5926
5927 case AARCH64_OPND_SVE_AIMM:
5928 case AARCH64_OPND_SVE_ASIMM:
5929 po_imm_nc_or_fail ();
5930 info->imm.value = val;
5931 skip_whitespace (str);
5932 if (skip_past_comma (&str))
5933 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5934 else
5935 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5936 break;
5937
5938 case AARCH64_OPND_SVE_PATTERN:
5939 po_enum_or_fail (aarch64_sve_pattern_array);
5940 info->imm.value = val;
5941 break;
5942
5943 case AARCH64_OPND_SVE_PATTERN_SCALED:
5944 po_enum_or_fail (aarch64_sve_pattern_array);
5945 info->imm.value = val;
5946 if (skip_past_comma (&str)
5947 && !parse_shift (&str, info, SHIFTED_MUL))
5948 goto failure;
5949 if (!info->shifter.operator_present)
5950 {
5951 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5952 info->shifter.kind = AARCH64_MOD_MUL;
5953 info->shifter.amount = 1;
5954 }
5955 break;
5956
5957 case AARCH64_OPND_SVE_PRFOP:
5958 po_enum_or_fail (aarch64_sve_prfop_array);
5959 info->imm.value = val;
5960 break;
5961
5962 case AARCH64_OPND_UIMM7:
5963 po_imm_or_fail (0, 127);
5964 info->imm.value = val;
5965 break;
5966
5967 case AARCH64_OPND_IDX:
5968 case AARCH64_OPND_MASK:
5969 case AARCH64_OPND_BIT_NUM:
5970 case AARCH64_OPND_IMMR:
5971 case AARCH64_OPND_IMMS:
5972 po_imm_or_fail (0, 63);
5973 info->imm.value = val;
5974 break;
5975
5976 case AARCH64_OPND_IMM0:
5977 po_imm_nc_or_fail ();
5978 if (val != 0)
5979 {
5980 set_fatal_syntax_error (_("immediate zero expected"));
5981 goto failure;
5982 }
5983 info->imm.value = 0;
5984 break;
5985
5986 case AARCH64_OPND_FPIMM0:
5987 {
5988 int qfloat;
5989 bfd_boolean res1 = FALSE, res2 = FALSE;
5990 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5991 it is probably not worth the effort to support it. */
5992 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5993 imm_reg_type))
5994 && (error_p ()
5995 || !(res2 = parse_constant_immediate (&str, &val,
5996 imm_reg_type))))
5997 goto failure;
5998 if ((res1 && qfloat == 0) || (res2 && val == 0))
5999 {
6000 info->imm.value = 0;
6001 info->imm.is_fp = 1;
6002 break;
6003 }
6004 set_fatal_syntax_error (_("immediate zero expected"));
6005 goto failure;
6006 }
6007
6008 case AARCH64_OPND_IMM_MOV:
6009 {
6010 char *saved = str;
6011 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6012 reg_name_p (str, REG_TYPE_VN))
6013 goto failure;
6014 str = saved;
6015 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6016 GE_OPT_PREFIX, 1));
6017 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6018 later. fix_mov_imm_insn will try to determine a machine
6019 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6020 message if the immediate cannot be moved by a single
6021 instruction. */
6022 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6023 inst.base.operands[i].skip = 1;
6024 }
6025 break;
6026
6027 case AARCH64_OPND_SIMD_IMM:
6028 case AARCH64_OPND_SIMD_IMM_SFT:
6029 if (! parse_big_immediate (&str, &val, imm_reg_type))
6030 goto failure;
6031 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6032 /* addr_off_p */ 0,
6033 /* need_libopcodes_p */ 1,
6034 /* skip_p */ 1);
6035 /* Parse shift.
6036 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6037 shift, we don't check it here; we leave the checking to
6038 the libopcodes (operand_general_constraint_met_p). By
6039 doing this, we achieve better diagnostics. */
6040 if (skip_past_comma (&str)
6041 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6042 goto failure;
6043 if (!info->shifter.operator_present
6044 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6045 {
6046 /* Default to LSL if not present. Libopcodes prefers shifter
6047 kind to be explicit. */
6048 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6049 info->shifter.kind = AARCH64_MOD_LSL;
6050 }
6051 break;
6052
6053 case AARCH64_OPND_FPIMM:
6054 case AARCH64_OPND_SIMD_FPIMM:
6055 case AARCH64_OPND_SVE_FPIMM8:
6056 {
6057 int qfloat;
6058 bfd_boolean dp_p;
6059
6060 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6061 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6062 || !aarch64_imm_float_p (qfloat))
6063 {
6064 if (!error_p ())
6065 set_fatal_syntax_error (_("invalid floating-point"
6066 " constant"));
6067 goto failure;
6068 }
6069 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6070 inst.base.operands[i].imm.is_fp = 1;
6071 }
6072 break;
6073
6074 case AARCH64_OPND_SVE_I1_HALF_ONE:
6075 case AARCH64_OPND_SVE_I1_HALF_TWO:
6076 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6077 {
6078 int qfloat;
6079 bfd_boolean dp_p;
6080
6081 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6082 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6083 {
6084 if (!error_p ())
6085 set_fatal_syntax_error (_("invalid floating-point"
6086 " constant"));
6087 goto failure;
6088 }
6089 inst.base.operands[i].imm.value = qfloat;
6090 inst.base.operands[i].imm.is_fp = 1;
6091 }
6092 break;
6093
6094 case AARCH64_OPND_LIMM:
6095 po_misc_or_fail (parse_shifter_operand (&str, info,
6096 SHIFTED_LOGIC_IMM));
6097 if (info->shifter.operator_present)
6098 {
6099 set_fatal_syntax_error
6100 (_("shift not allowed for bitmask immediate"));
6101 goto failure;
6102 }
6103 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6104 /* addr_off_p */ 0,
6105 /* need_libopcodes_p */ 1,
6106 /* skip_p */ 1);
6107 break;
6108
6109 case AARCH64_OPND_AIMM:
6110 if (opcode->op == OP_ADD)
6111 /* ADD may have relocation types. */
6112 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6113 SHIFTED_ARITH_IMM));
6114 else
6115 po_misc_or_fail (parse_shifter_operand (&str, info,
6116 SHIFTED_ARITH_IMM));
6117 switch (inst.reloc.type)
6118 {
6119 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6120 info->shifter.amount = 12;
6121 break;
6122 case BFD_RELOC_UNUSED:
6123 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6124 if (info->shifter.kind != AARCH64_MOD_NONE)
6125 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6126 inst.reloc.pc_rel = 0;
6127 break;
6128 default:
6129 break;
6130 }
6131 info->imm.value = 0;
6132 if (!info->shifter.operator_present)
6133 {
6134 /* Default to LSL if not present. Libopcodes prefers shifter
6135 kind to be explicit. */
6136 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6137 info->shifter.kind = AARCH64_MOD_LSL;
6138 }
6139 break;
6140
6141 case AARCH64_OPND_HALF:
6142 {
6143 /* #<imm16> or relocation. */
6144 int internal_fixup_p;
6145 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6146 if (internal_fixup_p)
6147 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6148 skip_whitespace (str);
6149 if (skip_past_comma (&str))
6150 {
6151 /* {, LSL #<shift>} */
6152 if (! aarch64_gas_internal_fixup_p ())
6153 {
6154 set_fatal_syntax_error (_("can't mix relocation modifier "
6155 "with explicit shift"));
6156 goto failure;
6157 }
6158 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6159 }
6160 else
6161 inst.base.operands[i].shifter.amount = 0;
6162 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6163 inst.base.operands[i].imm.value = 0;
6164 if (! process_movw_reloc_info ())
6165 goto failure;
6166 }
6167 break;
6168
6169 case AARCH64_OPND_EXCEPTION:
6170 case AARCH64_OPND_UNDEFINED:
6171 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6172 imm_reg_type));
6173 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6174 /* addr_off_p */ 0,
6175 /* need_libopcodes_p */ 0,
6176 /* skip_p */ 1);
6177 break;
6178
6179 case AARCH64_OPND_NZCV:
6180 {
6181 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6182 if (nzcv != NULL)
6183 {
6184 str += 4;
6185 info->imm.value = nzcv->value;
6186 break;
6187 }
6188 po_imm_or_fail (0, 15);
6189 info->imm.value = val;
6190 }
6191 break;
6192
6193 case AARCH64_OPND_COND:
6194 case AARCH64_OPND_COND1:
6195 {
6196 char *start = str;
6197 do
6198 str++;
6199 while (ISALPHA (*str));
6200 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6201 if (info->cond == NULL)
6202 {
6203 set_syntax_error (_("invalid condition"));
6204 goto failure;
6205 }
6206 else if (operands[i] == AARCH64_OPND_COND1
6207 && (info->cond->value & 0xe) == 0xe)
6208 {
6209 /* Do not allow AL or NV. */
6210 set_default_error ();
6211 goto failure;
6212 }
6213 }
6214 break;
6215
6216 case AARCH64_OPND_ADDR_ADRP:
6217 po_misc_or_fail (parse_adrp (&str));
6218 /* Clear the value as operand needs to be relocated. */
6219 info->imm.value = 0;
6220 break;
6221
6222 case AARCH64_OPND_ADDR_PCREL14:
6223 case AARCH64_OPND_ADDR_PCREL19:
6224 case AARCH64_OPND_ADDR_PCREL21:
6225 case AARCH64_OPND_ADDR_PCREL26:
6226 po_misc_or_fail (parse_address (&str, info));
6227 if (!info->addr.pcrel)
6228 {
6229 set_syntax_error (_("invalid pc-relative address"));
6230 goto failure;
6231 }
6232 if (inst.gen_lit_pool
6233 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6234 {
6235 /* Only permit "=value" in the literal load instructions.
6236 The literal will be generated by programmer_friendly_fixup. */
6237 set_syntax_error (_("invalid use of \"=immediate\""));
6238 goto failure;
6239 }
6240 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6241 {
6242 set_syntax_error (_("unrecognized relocation suffix"));
6243 goto failure;
6244 }
6245 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6246 {
6247 info->imm.value = inst.reloc.exp.X_add_number;
6248 inst.reloc.type = BFD_RELOC_UNUSED;
6249 }
6250 else
6251 {
6252 info->imm.value = 0;
6253 if (inst.reloc.type == BFD_RELOC_UNUSED)
6254 switch (opcode->iclass)
6255 {
6256 case compbranch:
6257 case condbranch:
6258 /* e.g. CBZ or B.COND */
6259 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6260 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6261 break;
6262 case testbranch:
6263 /* e.g. TBZ */
6264 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6265 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6266 break;
6267 case branch_imm:
6268 /* e.g. B or BL */
6269 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6270 inst.reloc.type =
6271 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6272 : BFD_RELOC_AARCH64_JUMP26;
6273 break;
6274 case loadlit:
6275 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6276 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6277 break;
6278 case pcreladdr:
6279 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6280 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6281 break;
6282 default:
6283 gas_assert (0);
6284 abort ();
6285 }
6286 inst.reloc.pc_rel = 1;
6287 }
6288 break;
6289
6290 case AARCH64_OPND_ADDR_SIMPLE:
6291 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6292 {
6293 /* [<Xn|SP>{, #<simm>}] */
6294 char *start = str;
6295 /* First use the normal address-parsing routines, to get
6296 the usual syntax errors. */
6297 po_misc_or_fail (parse_address (&str, info));
6298 if (info->addr.pcrel || info->addr.offset.is_reg
6299 || !info->addr.preind || info->addr.postind
6300 || info->addr.writeback)
6301 {
6302 set_syntax_error (_("invalid addressing mode"));
6303 goto failure;
6304 }
6305
6306 /* Then retry, matching the specific syntax of these addresses. */
6307 str = start;
6308 po_char_or_fail ('[');
6309 po_reg_or_fail (REG_TYPE_R64_SP);
6310 /* Accept optional ", #0". */
6311 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6312 && skip_past_char (&str, ','))
6313 {
6314 skip_past_char (&str, '#');
6315 if (! skip_past_char (&str, '0'))
6316 {
6317 set_fatal_syntax_error
6318 (_("the optional immediate offset can only be 0"));
6319 goto failure;
6320 }
6321 }
6322 po_char_or_fail (']');
6323 break;
6324 }
6325
6326 case AARCH64_OPND_ADDR_REGOFF:
6327 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6328 po_misc_or_fail (parse_address (&str, info));
6329 regoff_addr:
6330 if (info->addr.pcrel || !info->addr.offset.is_reg
6331 || !info->addr.preind || info->addr.postind
6332 || info->addr.writeback)
6333 {
6334 set_syntax_error (_("invalid addressing mode"));
6335 goto failure;
6336 }
6337 if (!info->shifter.operator_present)
6338 {
6339 /* Default to LSL if not present. Libopcodes prefers shifter
6340 kind to be explicit. */
6341 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6342 info->shifter.kind = AARCH64_MOD_LSL;
6343 }
6344 /* Qualifier to be deduced by libopcodes. */
6345 break;
6346
6347 case AARCH64_OPND_ADDR_SIMM7:
6348 po_misc_or_fail (parse_address (&str, info));
6349 if (info->addr.pcrel || info->addr.offset.is_reg
6350 || (!info->addr.preind && !info->addr.postind))
6351 {
6352 set_syntax_error (_("invalid addressing mode"));
6353 goto failure;
6354 }
6355 if (inst.reloc.type != BFD_RELOC_UNUSED)
6356 {
6357 set_syntax_error (_("relocation not allowed"));
6358 goto failure;
6359 }
6360 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6361 /* addr_off_p */ 1,
6362 /* need_libopcodes_p */ 1,
6363 /* skip_p */ 0);
6364 break;
6365
6366 case AARCH64_OPND_ADDR_SIMM9:
6367 case AARCH64_OPND_ADDR_SIMM9_2:
6368 case AARCH64_OPND_ADDR_SIMM11:
6369 case AARCH64_OPND_ADDR_SIMM13:
6370 po_misc_or_fail (parse_address (&str, info));
6371 if (info->addr.pcrel || info->addr.offset.is_reg
6372 || (!info->addr.preind && !info->addr.postind)
6373 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6374 && info->addr.writeback))
6375 {
6376 set_syntax_error (_("invalid addressing mode"));
6377 goto failure;
6378 }
6379 if (inst.reloc.type != BFD_RELOC_UNUSED)
6380 {
6381 set_syntax_error (_("relocation not allowed"));
6382 goto failure;
6383 }
6384 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6385 /* addr_off_p */ 1,
6386 /* need_libopcodes_p */ 1,
6387 /* skip_p */ 0);
6388 break;
6389
6390 case AARCH64_OPND_ADDR_SIMM10:
6391 case AARCH64_OPND_ADDR_OFFSET:
6392 po_misc_or_fail (parse_address (&str, info));
6393 if (info->addr.pcrel || info->addr.offset.is_reg
6394 || !info->addr.preind || info->addr.postind)
6395 {
6396 set_syntax_error (_("invalid addressing mode"));
6397 goto failure;
6398 }
6399 if (inst.reloc.type != BFD_RELOC_UNUSED)
6400 {
6401 set_syntax_error (_("relocation not allowed"));
6402 goto failure;
6403 }
6404 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6405 /* addr_off_p */ 1,
6406 /* need_libopcodes_p */ 1,
6407 /* skip_p */ 0);
6408 break;
6409
6410 case AARCH64_OPND_ADDR_UIMM12:
6411 po_misc_or_fail (parse_address (&str, info));
6412 if (info->addr.pcrel || info->addr.offset.is_reg
6413 || !info->addr.preind || info->addr.writeback)
6414 {
6415 set_syntax_error (_("invalid addressing mode"));
6416 goto failure;
6417 }
6418 if (inst.reloc.type == BFD_RELOC_UNUSED)
6419 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6420 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6421 || (inst.reloc.type
6422 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6423 || (inst.reloc.type
6424 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6425 || (inst.reloc.type
6426 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6427 || (inst.reloc.type
6428 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6429 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6430 /* Leave qualifier to be determined by libopcodes. */
6431 break;
6432
6433 case AARCH64_OPND_SIMD_ADDR_POST:
6434 /* [<Xn|SP>], <Xm|#<amount>> */
6435 po_misc_or_fail (parse_address (&str, info));
6436 if (!info->addr.postind || !info->addr.writeback)
6437 {
6438 set_syntax_error (_("invalid addressing mode"));
6439 goto failure;
6440 }
6441 if (!info->addr.offset.is_reg)
6442 {
6443 if (inst.reloc.exp.X_op == O_constant)
6444 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6445 else
6446 {
6447 set_fatal_syntax_error
6448 (_("writeback value must be an immediate constant"));
6449 goto failure;
6450 }
6451 }
6452 /* No qualifier. */
6453 break;
6454
6455 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6456 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
6457 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6458 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6459 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6460 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6461 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6462 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6463 case AARCH64_OPND_SVE_ADDR_RI_U6:
6464 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6465 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6466 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6467 /* [X<n>{, #imm, MUL VL}]
6468 [X<n>{, #imm}]
6469 but recognizing SVE registers. */
6470 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6471 &offset_qualifier));
6472 if (base_qualifier != AARCH64_OPND_QLF_X)
6473 {
6474 set_syntax_error (_("invalid addressing mode"));
6475 goto failure;
6476 }
6477 sve_regimm:
6478 if (info->addr.pcrel || info->addr.offset.is_reg
6479 || !info->addr.preind || info->addr.writeback)
6480 {
6481 set_syntax_error (_("invalid addressing mode"));
6482 goto failure;
6483 }
6484 if (inst.reloc.type != BFD_RELOC_UNUSED
6485 || inst.reloc.exp.X_op != O_constant)
6486 {
6487 /* Make sure this has priority over
6488 "invalid addressing mode". */
6489 set_fatal_syntax_error (_("constant offset required"));
6490 goto failure;
6491 }
6492 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6493 break;
6494
6495 case AARCH64_OPND_SVE_ADDR_R:
6496 /* [<Xn|SP>{, <R><m>}]
6497 but recognizing SVE registers. */
6498 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6499 &offset_qualifier));
6500 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6501 {
6502 offset_qualifier = AARCH64_OPND_QLF_X;
6503 info->addr.offset.is_reg = 1;
6504 info->addr.offset.regno = 31;
6505 }
6506 else if (base_qualifier != AARCH64_OPND_QLF_X
6507 || offset_qualifier != AARCH64_OPND_QLF_X)
6508 {
6509 set_syntax_error (_("invalid addressing mode"));
6510 goto failure;
6511 }
6512 goto regoff_addr;
6513
6514 case AARCH64_OPND_SVE_ADDR_RR:
6515 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6516 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6517 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6518 case AARCH64_OPND_SVE_ADDR_RX:
6519 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6520 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6521 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6522 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6523 but recognizing SVE registers. */
6524 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6525 &offset_qualifier));
6526 if (base_qualifier != AARCH64_OPND_QLF_X
6527 || offset_qualifier != AARCH64_OPND_QLF_X)
6528 {
6529 set_syntax_error (_("invalid addressing mode"));
6530 goto failure;
6531 }
6532 goto regoff_addr;
6533
6534 case AARCH64_OPND_SVE_ADDR_RZ:
6535 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6536 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6537 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6538 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6539 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6540 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6541 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6542 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6543 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6544 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6545 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6546 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6547 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6548 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6549 &offset_qualifier));
6550 if (base_qualifier != AARCH64_OPND_QLF_X
6551 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6552 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6553 {
6554 set_syntax_error (_("invalid addressing mode"));
6555 goto failure;
6556 }
6557 info->qualifier = offset_qualifier;
6558 goto regoff_addr;
6559
6560 case AARCH64_OPND_SVE_ADDR_ZX:
6561 /* [Zn.<T>{, <Xm>}]. */
6562 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6563 &offset_qualifier));
6564 /* Things to check:
6565 base_qualifier either S_S or S_D
6566 offset_qualifier must be X
6567 */
6568 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6569 && base_qualifier != AARCH64_OPND_QLF_S_D)
6570 || offset_qualifier != AARCH64_OPND_QLF_X)
6571 {
6572 set_syntax_error (_("invalid addressing mode"));
6573 goto failure;
6574 }
6575 info->qualifier = base_qualifier;
6576 if (!info->addr.offset.is_reg || info->addr.pcrel
6577 || !info->addr.preind || info->addr.writeback
6578 || info->shifter.operator_present != 0)
6579 {
6580 set_syntax_error (_("invalid addressing mode"));
6581 goto failure;
6582 }
6583 info->shifter.kind = AARCH64_MOD_LSL;
6584 break;
6585
6586
6587 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6588 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6589 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6590 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6591 /* [Z<n>.<T>{, #imm}] */
6592 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6593 &offset_qualifier));
6594 if (base_qualifier != AARCH64_OPND_QLF_S_S
6595 && base_qualifier != AARCH64_OPND_QLF_S_D)
6596 {
6597 set_syntax_error (_("invalid addressing mode"));
6598 goto failure;
6599 }
6600 info->qualifier = base_qualifier;
6601 goto sve_regimm;
6602
6603 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6604 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6605 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6606 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6607 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6608
6609 We don't reject:
6610
6611 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6612
6613 here since we get better error messages by leaving it to
6614 the qualifier checking routines. */
6615 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6616 &offset_qualifier));
6617 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6618 && base_qualifier != AARCH64_OPND_QLF_S_D)
6619 || offset_qualifier != base_qualifier)
6620 {
6621 set_syntax_error (_("invalid addressing mode"));
6622 goto failure;
6623 }
6624 info->qualifier = base_qualifier;
6625 goto regoff_addr;
6626
6627 case AARCH64_OPND_SYSREG:
6628 {
6629 uint32_t sysreg_flags;
6630 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6631 &sysreg_flags)) == PARSE_FAIL)
6632 {
6633 set_syntax_error (_("unknown or missing system register name"));
6634 goto failure;
6635 }
6636 inst.base.operands[i].sysreg.value = val;
6637 inst.base.operands[i].sysreg.flags = sysreg_flags;
6638 break;
6639 }
6640
6641 case AARCH64_OPND_PSTATEFIELD:
6642 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6643 == PARSE_FAIL)
6644 {
6645 set_syntax_error (_("unknown or missing PSTATE field name"));
6646 goto failure;
6647 }
6648 inst.base.operands[i].pstatefield = val;
6649 break;
6650
6651 case AARCH64_OPND_SYSREG_IC:
6652 inst.base.operands[i].sysins_op =
6653 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6654 goto sys_reg_ins;
6655
6656 case AARCH64_OPND_SYSREG_DC:
6657 inst.base.operands[i].sysins_op =
6658 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6659 goto sys_reg_ins;
6660
6661 case AARCH64_OPND_SYSREG_AT:
6662 inst.base.operands[i].sysins_op =
6663 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6664 goto sys_reg_ins;
6665
6666 case AARCH64_OPND_SYSREG_SR:
6667 inst.base.operands[i].sysins_op =
6668 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6669 goto sys_reg_ins;
6670
6671 case AARCH64_OPND_SYSREG_TLBI:
6672 inst.base.operands[i].sysins_op =
6673 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6674 sys_reg_ins:
6675 if (inst.base.operands[i].sysins_op == NULL)
6676 {
6677 set_fatal_syntax_error ( _("unknown or missing operation name"));
6678 goto failure;
6679 }
6680 break;
6681
6682 case AARCH64_OPND_BARRIER:
6683 case AARCH64_OPND_BARRIER_ISB:
6684 val = parse_barrier (&str);
6685 if (val != PARSE_FAIL
6686 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6687 {
6688 /* ISB only accepts options name 'sy'. */
6689 set_syntax_error
6690 (_("the specified option is not accepted in ISB"));
6691 /* Turn off backtrack as this optional operand is present. */
6692 backtrack_pos = 0;
6693 goto failure;
6694 }
6695 /* This is an extension to accept a 0..15 immediate. */
6696 if (val == PARSE_FAIL)
6697 po_imm_or_fail (0, 15);
6698 info->barrier = aarch64_barrier_options + val;
6699 break;
6700
6701 case AARCH64_OPND_PRFOP:
6702 val = parse_pldop (&str);
6703 /* This is an extension to accept a 0..31 immediate. */
6704 if (val == PARSE_FAIL)
6705 po_imm_or_fail (0, 31);
6706 inst.base.operands[i].prfop = aarch64_prfops + val;
6707 break;
6708
6709 case AARCH64_OPND_BARRIER_PSB:
6710 val = parse_barrier_psb (&str, &(info->hint_option));
6711 if (val == PARSE_FAIL)
6712 goto failure;
6713 break;
6714
6715 case AARCH64_OPND_BTI_TARGET:
6716 val = parse_bti_operand (&str, &(info->hint_option));
6717 if (val == PARSE_FAIL)
6718 goto failure;
6719 break;
6720
6721 default:
6722 as_fatal (_("unhandled operand code %d"), operands[i]);
6723 }
6724
6725 /* If we get here, this operand was successfully parsed. */
6726 inst.base.operands[i].present = 1;
6727 continue;
6728
6729 failure:
6730 /* The parse routine should already have set the error, but in case
6731 not, set a default one here. */
6732 if (! error_p ())
6733 set_default_error ();
6734
6735 if (! backtrack_pos)
6736 goto parse_operands_return;
6737
6738 {
6739 /* We reach here because this operand is marked as optional, and
6740 either no operand was supplied or the operand was supplied but it
6741 was syntactically incorrect. In the latter case we report an
6742 error. In the former case we perform a few more checks before
6743 dropping through to the code to insert the default operand. */
6744
6745 char *tmp = backtrack_pos;
6746 char endchar = END_OF_INSN;
6747
6748 if (i != (aarch64_num_of_operands (opcode) - 1))
6749 endchar = ',';
6750 skip_past_char (&tmp, ',');
6751
6752 if (*tmp != endchar)
6753 /* The user has supplied an operand in the wrong format. */
6754 goto parse_operands_return;
6755
6756 /* Make sure there is not a comma before the optional operand.
6757 For example the fifth operand of 'sys' is optional:
6758
6759 sys #0,c0,c0,#0, <--- wrong
6760 sys #0,c0,c0,#0 <--- correct. */
6761 if (comma_skipped_p && i && endchar == END_OF_INSN)
6762 {
6763 set_fatal_syntax_error
6764 (_("unexpected comma before the omitted optional operand"));
6765 goto parse_operands_return;
6766 }
6767 }
6768
6769 /* Reaching here means we are dealing with an optional operand that is
6770 omitted from the assembly line. */
6771 gas_assert (optional_operand_p (opcode, i));
6772 info->present = 0;
6773 process_omitted_operand (operands[i], opcode, i, info);
6774
6775 /* Try again, skipping the optional operand at backtrack_pos. */
6776 str = backtrack_pos;
6777 backtrack_pos = 0;
6778
6779 /* Clear any error record after the omitted optional operand has been
6780 successfully handled. */
6781 clear_error ();
6782 }
6783
6784 /* Check if we have parsed all the operands. */
6785 if (*str != '\0' && ! error_p ())
6786 {
6787 /* Set I to the index of the last present operand; this is
6788 for the purpose of diagnostics. */
6789 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6790 ;
6791 set_fatal_syntax_error
6792 (_("unexpected characters following instruction"));
6793 }
6794
6795 parse_operands_return:
6796
6797 if (error_p ())
6798 {
6799 DEBUG_TRACE ("parsing FAIL: %s - %s",
6800 operand_mismatch_kind_names[get_error_kind ()],
6801 get_error_message ());
6802 /* Record the operand error properly; this is useful when there
6803 are multiple instruction templates for a mnemonic name, so that
6804 later on, we can select the error that most closely describes
6805 the problem. */
6806 record_operand_error (opcode, i, get_error_kind (),
6807 get_error_message ());
6808 return FALSE;
6809 }
6810 else
6811 {
6812 DEBUG_TRACE ("parsing SUCCESS");
6813 return TRUE;
6814 }
6815 }
6816
6817 /* It does some fix-up to provide some programmer friendly feature while
6818 keeping the libopcodes happy, i.e. libopcodes only accepts
6819 the preferred architectural syntax.
6820 Return FALSE if there is any failure; otherwise return TRUE. */
6821
6822 static bfd_boolean
6823 programmer_friendly_fixup (aarch64_instruction *instr)
6824 {
6825 aarch64_inst *base = &instr->base;
6826 const aarch64_opcode *opcode = base->opcode;
6827 enum aarch64_op op = opcode->op;
6828 aarch64_opnd_info *operands = base->operands;
6829
6830 DEBUG_TRACE ("enter");
6831
6832 switch (opcode->iclass)
6833 {
6834 case testbranch:
6835 /* TBNZ Xn|Wn, #uimm6, label
6836 Test and Branch Not Zero: conditionally jumps to label if bit number
6837 uimm6 in register Xn is not zero. The bit number implies the width of
6838 the register, which may be written and should be disassembled as Wn if
6839 uimm is less than 32. */
6840 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6841 {
6842 if (operands[1].imm.value >= 32)
6843 {
6844 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6845 0, 31);
6846 return FALSE;
6847 }
6848 operands[0].qualifier = AARCH64_OPND_QLF_X;
6849 }
6850 break;
6851 case loadlit:
6852 /* LDR Wt, label | =value
6853 As a convenience assemblers will typically permit the notation
6854 "=value" in conjunction with the pc-relative literal load instructions
6855 to automatically place an immediate value or symbolic address in a
6856 nearby literal pool and generate a hidden label which references it.
6857 ISREG has been set to 0 in the case of =value. */
6858 if (instr->gen_lit_pool
6859 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6860 {
6861 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6862 if (op == OP_LDRSW_LIT)
6863 size = 4;
6864 if (instr->reloc.exp.X_op != O_constant
6865 && instr->reloc.exp.X_op != O_big
6866 && instr->reloc.exp.X_op != O_symbol)
6867 {
6868 record_operand_error (opcode, 1,
6869 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6870 _("constant expression expected"));
6871 return FALSE;
6872 }
6873 if (! add_to_lit_pool (&instr->reloc.exp, size))
6874 {
6875 record_operand_error (opcode, 1,
6876 AARCH64_OPDE_OTHER_ERROR,
6877 _("literal pool insertion failed"));
6878 return FALSE;
6879 }
6880 }
6881 break;
6882 case log_shift:
6883 case bitfield:
6884 /* UXT[BHW] Wd, Wn
6885 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6886 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6887 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6888 A programmer-friendly assembler should accept a destination Xd in
6889 place of Wd, however that is not the preferred form for disassembly.
6890 */
6891 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6892 && operands[1].qualifier == AARCH64_OPND_QLF_W
6893 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6894 operands[0].qualifier = AARCH64_OPND_QLF_W;
6895 break;
6896
6897 case addsub_ext:
6898 {
6899 /* In the 64-bit form, the final register operand is written as Wm
6900 for all but the (possibly omitted) UXTX/LSL and SXTX
6901 operators.
6902 As a programmer-friendly assembler, we accept e.g.
6903 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6904 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6905 int idx = aarch64_operand_index (opcode->operands,
6906 AARCH64_OPND_Rm_EXT);
6907 gas_assert (idx == 1 || idx == 2);
6908 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6909 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6910 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6911 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6912 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6913 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6914 }
6915 break;
6916
6917 default:
6918 break;
6919 }
6920
6921 DEBUG_TRACE ("exit with SUCCESS");
6922 return TRUE;
6923 }
6924
6925 /* Check for loads and stores that will cause unpredictable behavior. */
6926
6927 static void
6928 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6929 {
6930 aarch64_inst *base = &instr->base;
6931 const aarch64_opcode *opcode = base->opcode;
6932 const aarch64_opnd_info *opnds = base->operands;
6933 switch (opcode->iclass)
6934 {
6935 case ldst_pos:
6936 case ldst_imm9:
6937 case ldst_imm10:
6938 case ldst_unscaled:
6939 case ldst_unpriv:
6940 /* Loading/storing the base register is unpredictable if writeback. */
6941 if ((aarch64_get_operand_class (opnds[0].type)
6942 == AARCH64_OPND_CLASS_INT_REG)
6943 && opnds[0].reg.regno == opnds[1].addr.base_regno
6944 && opnds[1].addr.base_regno != REG_SP
6945 /* Exempt STG/STZG/ST2G/STZ2G. */
6946 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
6947 && opnds[1].addr.writeback)
6948 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6949 break;
6950
6951 case ldstpair_off:
6952 case ldstnapair_offs:
6953 case ldstpair_indexed:
6954 /* Loading/storing the base register is unpredictable if writeback. */
6955 if ((aarch64_get_operand_class (opnds[0].type)
6956 == AARCH64_OPND_CLASS_INT_REG)
6957 && (opnds[0].reg.regno == opnds[2].addr.base_regno
6958 || opnds[1].reg.regno == opnds[2].addr.base_regno)
6959 && opnds[2].addr.base_regno != REG_SP
6960 /* Exempt STGP. */
6961 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
6962 && opnds[2].addr.writeback)
6963 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6964 /* Load operations must load different registers. */
6965 if ((opcode->opcode & (1 << 22))
6966 && opnds[0].reg.regno == opnds[1].reg.regno)
6967 as_warn (_("unpredictable load of register pair -- `%s'"), str);
6968 break;
6969
6970 case ldstexcl:
6971 /* It is unpredictable if the destination and status registers are the
6972 same. */
6973 if ((aarch64_get_operand_class (opnds[0].type)
6974 == AARCH64_OPND_CLASS_INT_REG)
6975 && (aarch64_get_operand_class (opnds[1].type)
6976 == AARCH64_OPND_CLASS_INT_REG)
6977 && (opnds[0].reg.regno == opnds[1].reg.regno
6978 || opnds[0].reg.regno == opnds[2].reg.regno))
6979 as_warn (_("unpredictable: identical transfer and status registers"
6980 " --`%s'"),
6981 str);
6982
6983 break;
6984
6985 default:
6986 break;
6987 }
6988 }
6989
6990 static void
6991 force_automatic_sequence_close (void)
6992 {
6993 if (now_instr_sequence.instr)
6994 {
6995 as_warn (_("previous `%s' sequence has not been closed"),
6996 now_instr_sequence.instr->opcode->name);
6997 init_insn_sequence (NULL, &now_instr_sequence);
6998 }
6999 }
7000
7001 /* A wrapper function to interface with libopcodes on encoding and
7002 record the error message if there is any.
7003
7004 Return TRUE on success; otherwise return FALSE. */
7005
7006 static bfd_boolean
7007 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7008 aarch64_insn *code)
7009 {
7010 aarch64_operand_error error_info;
7011 memset (&error_info, '\0', sizeof (error_info));
7012 error_info.kind = AARCH64_OPDE_NIL;
7013 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7014 && !error_info.non_fatal)
7015 return TRUE;
7016
7017 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7018 record_operand_error_info (opcode, &error_info);
7019 return error_info.non_fatal;
7020 }
7021
7022 #ifdef DEBUG_AARCH64
7023 static inline void
7024 dump_opcode_operands (const aarch64_opcode *opcode)
7025 {
7026 int i = 0;
7027 while (opcode->operands[i] != AARCH64_OPND_NIL)
7028 {
7029 aarch64_verbose ("\t\t opnd%d: %s", i,
7030 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7031 ? aarch64_get_operand_name (opcode->operands[i])
7032 : aarch64_get_operand_desc (opcode->operands[i]));
7033 ++i;
7034 }
7035 }
7036 #endif /* DEBUG_AARCH64 */
7037
7038 /* This is the guts of the machine-dependent assembler. STR points to a
7039 machine dependent instruction. This function is supposed to emit
7040 the frags/bytes it assembles to. */
7041
7042 void
7043 md_assemble (char *str)
7044 {
7045 char *p = str;
7046 templates *template;
7047 aarch64_opcode *opcode;
7048 aarch64_inst *inst_base;
7049 unsigned saved_cond;
7050
7051 /* Align the previous label if needed. */
7052 if (last_label_seen != NULL)
7053 {
7054 symbol_set_frag (last_label_seen, frag_now);
7055 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7056 S_SET_SEGMENT (last_label_seen, now_seg);
7057 }
7058
7059 /* Update the current insn_sequence from the segment. */
7060 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
7061
7062 inst.reloc.type = BFD_RELOC_UNUSED;
7063
7064 DEBUG_TRACE ("\n\n");
7065 DEBUG_TRACE ("==============================");
7066 DEBUG_TRACE ("Enter md_assemble with %s", str);
7067
7068 template = opcode_lookup (&p);
7069 if (!template)
7070 {
7071 /* It wasn't an instruction, but it might be a register alias of
7072 the form alias .req reg directive. */
7073 if (!create_register_alias (str, p))
7074 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7075 str);
7076 return;
7077 }
7078
7079 skip_whitespace (p);
7080 if (*p == ',')
7081 {
7082 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7083 get_mnemonic_name (str), str);
7084 return;
7085 }
7086
7087 init_operand_error_report ();
7088
7089 /* Sections are assumed to start aligned. In executable section, there is no
7090 MAP_DATA symbol pending. So we only align the address during
7091 MAP_DATA --> MAP_INSN transition.
7092 For other sections, this is not guaranteed. */
7093 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7094 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7095 frag_align_code (2, 0);
7096
7097 saved_cond = inst.cond;
7098 reset_aarch64_instruction (&inst);
7099 inst.cond = saved_cond;
7100
7101 /* Iterate through all opcode entries with the same mnemonic name. */
7102 do
7103 {
7104 opcode = template->opcode;
7105
7106 DEBUG_TRACE ("opcode %s found", opcode->name);
7107 #ifdef DEBUG_AARCH64
7108 if (debug_dump)
7109 dump_opcode_operands (opcode);
7110 #endif /* DEBUG_AARCH64 */
7111
7112 mapping_state (MAP_INSN);
7113
7114 inst_base = &inst.base;
7115 inst_base->opcode = opcode;
7116
7117 /* Truly conditionally executed instructions, e.g. b.cond. */
7118 if (opcode->flags & F_COND)
7119 {
7120 gas_assert (inst.cond != COND_ALWAYS);
7121 inst_base->cond = get_cond_from_value (inst.cond);
7122 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7123 }
7124 else if (inst.cond != COND_ALWAYS)
7125 {
7126 /* It shouldn't arrive here, where the assembly looks like a
7127 conditional instruction but the found opcode is unconditional. */
7128 gas_assert (0);
7129 continue;
7130 }
7131
7132 if (parse_operands (p, opcode)
7133 && programmer_friendly_fixup (&inst)
7134 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7135 {
7136 /* Check that this instruction is supported for this CPU. */
7137 if (!opcode->avariant
7138 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7139 {
7140 as_bad (_("selected processor does not support `%s'"), str);
7141 return;
7142 }
7143
7144 warn_unpredictable_ldst (&inst, str);
7145
7146 if (inst.reloc.type == BFD_RELOC_UNUSED
7147 || !inst.reloc.need_libopcodes_p)
7148 output_inst (NULL);
7149 else
7150 {
7151 /* If there is relocation generated for the instruction,
7152 store the instruction information for the future fix-up. */
7153 struct aarch64_inst *copy;
7154 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7155 copy = XNEW (struct aarch64_inst);
7156 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7157 output_inst (copy);
7158 }
7159
7160 /* Issue non-fatal messages if any. */
7161 output_operand_error_report (str, TRUE);
7162 return;
7163 }
7164
7165 template = template->next;
7166 if (template != NULL)
7167 {
7168 reset_aarch64_instruction (&inst);
7169 inst.cond = saved_cond;
7170 }
7171 }
7172 while (template != NULL);
7173
7174 /* Issue the error messages if any. */
7175 output_operand_error_report (str, FALSE);
7176 }
7177
7178 /* Various frobbings of labels and their addresses. */
7179
7180 void
7181 aarch64_start_line_hook (void)
7182 {
7183 last_label_seen = NULL;
7184 }
7185
7186 void
7187 aarch64_frob_label (symbolS * sym)
7188 {
7189 last_label_seen = sym;
7190
7191 dwarf2_emit_label (sym);
7192 }
7193
7194 void
7195 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7196 {
7197 /* Check to see if we have a block to close. */
7198 force_automatic_sequence_close ();
7199 }
7200
7201 int
7202 aarch64_data_in_code (void)
7203 {
7204 if (!strncmp (input_line_pointer + 1, "data:", 5))
7205 {
7206 *input_line_pointer = '/';
7207 input_line_pointer += 5;
7208 *input_line_pointer = 0;
7209 return 1;
7210 }
7211
7212 return 0;
7213 }
7214
7215 char *
7216 aarch64_canonicalize_symbol_name (char *name)
7217 {
7218 int len;
7219
7220 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7221 *(name + len - 5) = 0;
7222
7223 return name;
7224 }
7225 \f
7226 /* Table of all register names defined by default. The user can
7227 define additional names with .req. Note that all register names
7228 should appear in both upper and lowercase variants. Some registers
7229 also have mixed-case names. */
7230
7231 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
7232 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
7233 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7234 #define REGSET16(p,t) \
7235 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7236 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7237 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7238 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7239 #define REGSET31(p,t) \
7240 REGSET16(p, t), \
7241 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7242 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7243 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7244 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7245 #define REGSET(p,t) \
7246 REGSET31(p,t), REGNUM(p,31,t)
7247
7248 /* These go into aarch64_reg_hsh hash-table. */
7249 static const reg_entry reg_names[] = {
7250 /* Integer registers. */
7251 REGSET31 (x, R_64), REGSET31 (X, R_64),
7252 REGSET31 (w, R_32), REGSET31 (W, R_32),
7253
7254 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7255 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7256 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7257 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7258 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7259 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7260
7261 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7262 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7263
7264 /* Floating-point single precision registers. */
7265 REGSET (s, FP_S), REGSET (S, FP_S),
7266
7267 /* Floating-point double precision registers. */
7268 REGSET (d, FP_D), REGSET (D, FP_D),
7269
7270 /* Floating-point half precision registers. */
7271 REGSET (h, FP_H), REGSET (H, FP_H),
7272
7273 /* Floating-point byte precision registers. */
7274 REGSET (b, FP_B), REGSET (B, FP_B),
7275
7276 /* Floating-point quad precision registers. */
7277 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7278
7279 /* FP/SIMD registers. */
7280 REGSET (v, VN), REGSET (V, VN),
7281
7282 /* SVE vector registers. */
7283 REGSET (z, ZN), REGSET (Z, ZN),
7284
7285 /* SVE predicate registers. */
7286 REGSET16 (p, PN), REGSET16 (P, PN)
7287 };
7288
7289 #undef REGDEF
7290 #undef REGDEF_ALIAS
7291 #undef REGNUM
7292 #undef REGSET16
7293 #undef REGSET31
7294 #undef REGSET
7295
7296 #define N 1
7297 #define n 0
7298 #define Z 1
7299 #define z 0
7300 #define C 1
7301 #define c 0
7302 #define V 1
7303 #define v 0
7304 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7305 static const asm_nzcv nzcv_names[] = {
7306 {"nzcv", B (n, z, c, v)},
7307 {"nzcV", B (n, z, c, V)},
7308 {"nzCv", B (n, z, C, v)},
7309 {"nzCV", B (n, z, C, V)},
7310 {"nZcv", B (n, Z, c, v)},
7311 {"nZcV", B (n, Z, c, V)},
7312 {"nZCv", B (n, Z, C, v)},
7313 {"nZCV", B (n, Z, C, V)},
7314 {"Nzcv", B (N, z, c, v)},
7315 {"NzcV", B (N, z, c, V)},
7316 {"NzCv", B (N, z, C, v)},
7317 {"NzCV", B (N, z, C, V)},
7318 {"NZcv", B (N, Z, c, v)},
7319 {"NZcV", B (N, Z, c, V)},
7320 {"NZCv", B (N, Z, C, v)},
7321 {"NZCV", B (N, Z, C, V)}
7322 };
7323
7324 #undef N
7325 #undef n
7326 #undef Z
7327 #undef z
7328 #undef C
7329 #undef c
7330 #undef V
7331 #undef v
7332 #undef B
7333 \f
7334 /* MD interface: bits in the object file. */
7335
7336 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7337 for use in the a.out file, and stores them in the array pointed to by buf.
7338 This knows about the endian-ness of the target machine and does
7339 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7340 2 (short) and 4 (long) Floating numbers are put out as a series of
7341 LITTLENUMS (shorts, here at least). */
7342
7343 void
7344 md_number_to_chars (char *buf, valueT val, int n)
7345 {
7346 if (target_big_endian)
7347 number_to_chars_bigendian (buf, val, n);
7348 else
7349 number_to_chars_littleendian (buf, val, n);
7350 }
7351
7352 /* MD interface: Sections. */
7353
7354 /* Estimate the size of a frag before relaxing. Assume everything fits in
7355 4 bytes. */
7356
7357 int
7358 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7359 {
7360 fragp->fr_var = 4;
7361 return 4;
7362 }
7363
7364 /* Round up a section size to the appropriate boundary. */
7365
7366 valueT
7367 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7368 {
7369 return size;
7370 }
7371
7372 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7373 of an rs_align_code fragment.
7374
7375 Here we fill the frag with the appropriate info for padding the
7376 output stream. The resulting frag will consist of a fixed (fr_fix)
7377 and of a repeating (fr_var) part.
7378
7379 The fixed content is always emitted before the repeating content and
7380 these two parts are used as follows in constructing the output:
7381 - the fixed part will be used to align to a valid instruction word
7382 boundary, in case that we start at a misaligned address; as no
7383 executable instruction can live at the misaligned location, we
7384 simply fill with zeros;
7385 - the variable part will be used to cover the remaining padding and
7386 we fill using the AArch64 NOP instruction.
7387
7388 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7389 enough storage space for up to 3 bytes for padding the back to a valid
7390 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7391
7392 void
7393 aarch64_handle_align (fragS * fragP)
7394 {
7395 /* NOP = d503201f */
7396 /* AArch64 instructions are always little-endian. */
7397 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7398
7399 int bytes, fix, noop_size;
7400 char *p;
7401
7402 if (fragP->fr_type != rs_align_code)
7403 return;
7404
7405 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7406 p = fragP->fr_literal + fragP->fr_fix;
7407
7408 #ifdef OBJ_ELF
7409 gas_assert (fragP->tc_frag_data.recorded);
7410 #endif
7411
7412 noop_size = sizeof (aarch64_noop);
7413
7414 fix = bytes & (noop_size - 1);
7415 if (fix)
7416 {
7417 #ifdef OBJ_ELF
7418 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7419 #endif
7420 memset (p, 0, fix);
7421 p += fix;
7422 fragP->fr_fix += fix;
7423 }
7424
7425 if (noop_size)
7426 memcpy (p, aarch64_noop, noop_size);
7427 fragP->fr_var = noop_size;
7428 }
7429
7430 /* Perform target specific initialisation of a frag.
7431 Note - despite the name this initialisation is not done when the frag
7432 is created, but only when its type is assigned. A frag can be created
7433 and used a long time before its type is set, so beware of assuming that
7434 this initialisation is performed first. */
7435
7436 #ifndef OBJ_ELF
7437 void
7438 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7439 int max_chars ATTRIBUTE_UNUSED)
7440 {
7441 }
7442
7443 #else /* OBJ_ELF is defined. */
7444 void
7445 aarch64_init_frag (fragS * fragP, int max_chars)
7446 {
7447 /* Record a mapping symbol for alignment frags. We will delete this
7448 later if the alignment ends up empty. */
7449 if (!fragP->tc_frag_data.recorded)
7450 fragP->tc_frag_data.recorded = 1;
7451
7452 /* PR 21809: Do not set a mapping state for debug sections
7453 - it just confuses other tools. */
7454 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
7455 return;
7456
7457 switch (fragP->fr_type)
7458 {
7459 case rs_align_test:
7460 case rs_fill:
7461 mapping_state_2 (MAP_DATA, max_chars);
7462 break;
7463 case rs_align:
7464 /* PR 20364: We can get alignment frags in code sections,
7465 so do not just assume that we should use the MAP_DATA state. */
7466 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7467 break;
7468 case rs_align_code:
7469 mapping_state_2 (MAP_INSN, max_chars);
7470 break;
7471 default:
7472 break;
7473 }
7474 }
7475 \f
7476 /* Initialize the DWARF-2 unwind information for this procedure. */
7477
7478 void
7479 tc_aarch64_frame_initial_instructions (void)
7480 {
7481 cfi_add_CFA_def_cfa (REG_SP, 0);
7482 }
7483 #endif /* OBJ_ELF */
7484
7485 /* Convert REGNAME to a DWARF-2 register number. */
7486
7487 int
7488 tc_aarch64_regname_to_dw2regnum (char *regname)
7489 {
7490 const reg_entry *reg = parse_reg (&regname);
7491 if (reg == NULL)
7492 return -1;
7493
7494 switch (reg->type)
7495 {
7496 case REG_TYPE_SP_32:
7497 case REG_TYPE_SP_64:
7498 case REG_TYPE_R_32:
7499 case REG_TYPE_R_64:
7500 return reg->number;
7501
7502 case REG_TYPE_FP_B:
7503 case REG_TYPE_FP_H:
7504 case REG_TYPE_FP_S:
7505 case REG_TYPE_FP_D:
7506 case REG_TYPE_FP_Q:
7507 return reg->number + 64;
7508
7509 default:
7510 break;
7511 }
7512 return -1;
7513 }
7514
7515 /* Implement DWARF2_ADDR_SIZE. */
7516
7517 int
7518 aarch64_dwarf2_addr_size (void)
7519 {
7520 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7521 if (ilp32_p)
7522 return 4;
7523 #endif
7524 return bfd_arch_bits_per_address (stdoutput) / 8;
7525 }
7526
7527 /* MD interface: Symbol and relocation handling. */
7528
7529 /* Return the address within the segment that a PC-relative fixup is
7530 relative to. For AArch64 PC-relative fixups applied to instructions
7531 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7532
7533 long
7534 md_pcrel_from_section (fixS * fixP, segT seg)
7535 {
7536 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7537
7538 /* If this is pc-relative and we are going to emit a relocation
7539 then we just want to put out any pipeline compensation that the linker
7540 will need. Otherwise we want to use the calculated base. */
7541 if (fixP->fx_pcrel
7542 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7543 || aarch64_force_relocation (fixP)))
7544 base = 0;
7545
7546 /* AArch64 should be consistent for all pc-relative relocations. */
7547 return base + AARCH64_PCREL_OFFSET;
7548 }
7549
7550 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7551 Otherwise we have no need to default values of symbols. */
7552
7553 symbolS *
7554 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7555 {
7556 #ifdef OBJ_ELF
7557 if (name[0] == '_' && name[1] == 'G'
7558 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7559 {
7560 if (!GOT_symbol)
7561 {
7562 if (symbol_find (name))
7563 as_bad (_("GOT already in the symbol table"));
7564
7565 GOT_symbol = symbol_new (name, undefined_section,
7566 &zero_address_frag, 0);
7567 }
7568
7569 return GOT_symbol;
7570 }
7571 #endif
7572
7573 return 0;
7574 }
7575
7576 /* Return non-zero if the indicated VALUE has overflowed the maximum
7577 range expressible by a unsigned number with the indicated number of
7578 BITS. */
7579
7580 static bfd_boolean
7581 unsigned_overflow (valueT value, unsigned bits)
7582 {
7583 valueT lim;
7584 if (bits >= sizeof (valueT) * 8)
7585 return FALSE;
7586 lim = (valueT) 1 << bits;
7587 return (value >= lim);
7588 }
7589
7590
7591 /* Return non-zero if the indicated VALUE has overflowed the maximum
7592 range expressible by an signed number with the indicated number of
7593 BITS. */
7594
7595 static bfd_boolean
7596 signed_overflow (offsetT value, unsigned bits)
7597 {
7598 offsetT lim;
7599 if (bits >= sizeof (offsetT) * 8)
7600 return FALSE;
7601 lim = (offsetT) 1 << (bits - 1);
7602 return (value < -lim || value >= lim);
7603 }
7604
7605 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7606 unsigned immediate offset load/store instruction, try to encode it as
7607 an unscaled, 9-bit, signed immediate offset load/store instruction.
7608 Return TRUE if it is successful; otherwise return FALSE.
7609
7610 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7611 in response to the standard LDR/STR mnemonics when the immediate offset is
7612 unambiguous, i.e. when it is negative or unaligned. */
7613
7614 static bfd_boolean
7615 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7616 {
7617 int idx;
7618 enum aarch64_op new_op;
7619 const aarch64_opcode *new_opcode;
7620
7621 gas_assert (instr->opcode->iclass == ldst_pos);
7622
7623 switch (instr->opcode->op)
7624 {
7625 case OP_LDRB_POS:new_op = OP_LDURB; break;
7626 case OP_STRB_POS: new_op = OP_STURB; break;
7627 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7628 case OP_LDRH_POS: new_op = OP_LDURH; break;
7629 case OP_STRH_POS: new_op = OP_STURH; break;
7630 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7631 case OP_LDR_POS: new_op = OP_LDUR; break;
7632 case OP_STR_POS: new_op = OP_STUR; break;
7633 case OP_LDRF_POS: new_op = OP_LDURV; break;
7634 case OP_STRF_POS: new_op = OP_STURV; break;
7635 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7636 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7637 default: new_op = OP_NIL; break;
7638 }
7639
7640 if (new_op == OP_NIL)
7641 return FALSE;
7642
7643 new_opcode = aarch64_get_opcode (new_op);
7644 gas_assert (new_opcode != NULL);
7645
7646 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7647 instr->opcode->op, new_opcode->op);
7648
7649 aarch64_replace_opcode (instr, new_opcode);
7650
7651 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7652 qualifier matching may fail because the out-of-date qualifier will
7653 prevent the operand being updated with a new and correct qualifier. */
7654 idx = aarch64_operand_index (instr->opcode->operands,
7655 AARCH64_OPND_ADDR_SIMM9);
7656 gas_assert (idx == 1);
7657 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7658
7659 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7660
7661 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7662 insn_sequence))
7663 return FALSE;
7664
7665 return TRUE;
7666 }
7667
7668 /* Called by fix_insn to fix a MOV immediate alias instruction.
7669
7670 Operand for a generic move immediate instruction, which is an alias
7671 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7672 a 32-bit/64-bit immediate value into general register. An assembler error
7673 shall result if the immediate cannot be created by a single one of these
7674 instructions. If there is a choice, then to ensure reversability an
7675 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7676
7677 static void
7678 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7679 {
7680 const aarch64_opcode *opcode;
7681
7682 /* Need to check if the destination is SP/ZR. The check has to be done
7683 before any aarch64_replace_opcode. */
7684 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7685 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7686
7687 instr->operands[1].imm.value = value;
7688 instr->operands[1].skip = 0;
7689
7690 if (try_mov_wide_p)
7691 {
7692 /* Try the MOVZ alias. */
7693 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7694 aarch64_replace_opcode (instr, opcode);
7695 if (aarch64_opcode_encode (instr->opcode, instr,
7696 &instr->value, NULL, NULL, insn_sequence))
7697 {
7698 put_aarch64_insn (buf, instr->value);
7699 return;
7700 }
7701 /* Try the MOVK alias. */
7702 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7703 aarch64_replace_opcode (instr, opcode);
7704 if (aarch64_opcode_encode (instr->opcode, instr,
7705 &instr->value, NULL, NULL, insn_sequence))
7706 {
7707 put_aarch64_insn (buf, instr->value);
7708 return;
7709 }
7710 }
7711
7712 if (try_mov_bitmask_p)
7713 {
7714 /* Try the ORR alias. */
7715 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7716 aarch64_replace_opcode (instr, opcode);
7717 if (aarch64_opcode_encode (instr->opcode, instr,
7718 &instr->value, NULL, NULL, insn_sequence))
7719 {
7720 put_aarch64_insn (buf, instr->value);
7721 return;
7722 }
7723 }
7724
7725 as_bad_where (fixP->fx_file, fixP->fx_line,
7726 _("immediate cannot be moved by a single instruction"));
7727 }
7728
7729 /* An instruction operand which is immediate related may have symbol used
7730 in the assembly, e.g.
7731
7732 mov w0, u32
7733 .set u32, 0x00ffff00
7734
7735 At the time when the assembly instruction is parsed, a referenced symbol,
7736 like 'u32' in the above example may not have been seen; a fixS is created
7737 in such a case and is handled here after symbols have been resolved.
7738 Instruction is fixed up with VALUE using the information in *FIXP plus
7739 extra information in FLAGS.
7740
7741 This function is called by md_apply_fix to fix up instructions that need
7742 a fix-up described above but does not involve any linker-time relocation. */
7743
7744 static void
7745 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7746 {
7747 int idx;
7748 uint32_t insn;
7749 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7750 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7751 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7752
7753 if (new_inst)
7754 {
7755 /* Now the instruction is about to be fixed-up, so the operand that
7756 was previously marked as 'ignored' needs to be unmarked in order
7757 to get the encoding done properly. */
7758 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7759 new_inst->operands[idx].skip = 0;
7760 }
7761
7762 gas_assert (opnd != AARCH64_OPND_NIL);
7763
7764 switch (opnd)
7765 {
7766 case AARCH64_OPND_EXCEPTION:
7767 case AARCH64_OPND_UNDEFINED:
7768 if (unsigned_overflow (value, 16))
7769 as_bad_where (fixP->fx_file, fixP->fx_line,
7770 _("immediate out of range"));
7771 insn = get_aarch64_insn (buf);
7772 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
7773 put_aarch64_insn (buf, insn);
7774 break;
7775
7776 case AARCH64_OPND_AIMM:
7777 /* ADD or SUB with immediate.
7778 NOTE this assumes we come here with a add/sub shifted reg encoding
7779 3 322|2222|2 2 2 21111 111111
7780 1 098|7654|3 2 1 09876 543210 98765 43210
7781 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7782 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7783 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7784 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7785 ->
7786 3 322|2222|2 2 221111111111
7787 1 098|7654|3 2 109876543210 98765 43210
7788 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7789 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7790 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7791 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7792 Fields sf Rn Rd are already set. */
7793 insn = get_aarch64_insn (buf);
7794 if (value < 0)
7795 {
7796 /* Add <-> sub. */
7797 insn = reencode_addsub_switch_add_sub (insn);
7798 value = -value;
7799 }
7800
7801 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7802 && unsigned_overflow (value, 12))
7803 {
7804 /* Try to shift the value by 12 to make it fit. */
7805 if (((value >> 12) << 12) == value
7806 && ! unsigned_overflow (value, 12 + 12))
7807 {
7808 value >>= 12;
7809 insn |= encode_addsub_imm_shift_amount (1);
7810 }
7811 }
7812
7813 if (unsigned_overflow (value, 12))
7814 as_bad_where (fixP->fx_file, fixP->fx_line,
7815 _("immediate out of range"));
7816
7817 insn |= encode_addsub_imm (value);
7818
7819 put_aarch64_insn (buf, insn);
7820 break;
7821
7822 case AARCH64_OPND_SIMD_IMM:
7823 case AARCH64_OPND_SIMD_IMM_SFT:
7824 case AARCH64_OPND_LIMM:
7825 /* Bit mask immediate. */
7826 gas_assert (new_inst != NULL);
7827 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7828 new_inst->operands[idx].imm.value = value;
7829 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7830 &new_inst->value, NULL, NULL, insn_sequence))
7831 put_aarch64_insn (buf, new_inst->value);
7832 else
7833 as_bad_where (fixP->fx_file, fixP->fx_line,
7834 _("invalid immediate"));
7835 break;
7836
7837 case AARCH64_OPND_HALF:
7838 /* 16-bit unsigned immediate. */
7839 if (unsigned_overflow (value, 16))
7840 as_bad_where (fixP->fx_file, fixP->fx_line,
7841 _("immediate out of range"));
7842 insn = get_aarch64_insn (buf);
7843 insn |= encode_movw_imm (value & 0xffff);
7844 put_aarch64_insn (buf, insn);
7845 break;
7846
7847 case AARCH64_OPND_IMM_MOV:
7848 /* Operand for a generic move immediate instruction, which is
7849 an alias instruction that generates a single MOVZ, MOVN or ORR
7850 instruction to loads a 32-bit/64-bit immediate value into general
7851 register. An assembler error shall result if the immediate cannot be
7852 created by a single one of these instructions. If there is a choice,
7853 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7854 and MOVZ or MOVN to ORR. */
7855 gas_assert (new_inst != NULL);
7856 fix_mov_imm_insn (fixP, buf, new_inst, value);
7857 break;
7858
7859 case AARCH64_OPND_ADDR_SIMM7:
7860 case AARCH64_OPND_ADDR_SIMM9:
7861 case AARCH64_OPND_ADDR_SIMM9_2:
7862 case AARCH64_OPND_ADDR_SIMM10:
7863 case AARCH64_OPND_ADDR_UIMM12:
7864 case AARCH64_OPND_ADDR_SIMM11:
7865 case AARCH64_OPND_ADDR_SIMM13:
7866 /* Immediate offset in an address. */
7867 insn = get_aarch64_insn (buf);
7868
7869 gas_assert (new_inst != NULL && new_inst->value == insn);
7870 gas_assert (new_inst->opcode->operands[1] == opnd
7871 || new_inst->opcode->operands[2] == opnd);
7872
7873 /* Get the index of the address operand. */
7874 if (new_inst->opcode->operands[1] == opnd)
7875 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7876 idx = 1;
7877 else
7878 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7879 idx = 2;
7880
7881 /* Update the resolved offset value. */
7882 new_inst->operands[idx].addr.offset.imm = value;
7883
7884 /* Encode/fix-up. */
7885 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7886 &new_inst->value, NULL, NULL, insn_sequence))
7887 {
7888 put_aarch64_insn (buf, new_inst->value);
7889 break;
7890 }
7891 else if (new_inst->opcode->iclass == ldst_pos
7892 && try_to_encode_as_unscaled_ldst (new_inst))
7893 {
7894 put_aarch64_insn (buf, new_inst->value);
7895 break;
7896 }
7897
7898 as_bad_where (fixP->fx_file, fixP->fx_line,
7899 _("immediate offset out of range"));
7900 break;
7901
7902 default:
7903 gas_assert (0);
7904 as_fatal (_("unhandled operand code %d"), opnd);
7905 }
7906 }
7907
7908 /* Apply a fixup (fixP) to segment data, once it has been determined
7909 by our caller that we have all the info we need to fix it up.
7910
7911 Parameter valP is the pointer to the value of the bits. */
7912
7913 void
7914 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7915 {
7916 offsetT value = *valP;
7917 uint32_t insn;
7918 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7919 int scale;
7920 unsigned flags = fixP->fx_addnumber;
7921
7922 DEBUG_TRACE ("\n\n");
7923 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7924 DEBUG_TRACE ("Enter md_apply_fix");
7925
7926 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7927
7928 /* Note whether this will delete the relocation. */
7929
7930 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7931 fixP->fx_done = 1;
7932
7933 /* Process the relocations. */
7934 switch (fixP->fx_r_type)
7935 {
7936 case BFD_RELOC_NONE:
7937 /* This will need to go in the object file. */
7938 fixP->fx_done = 0;
7939 break;
7940
7941 case BFD_RELOC_8:
7942 case BFD_RELOC_8_PCREL:
7943 if (fixP->fx_done || !seg->use_rela_p)
7944 md_number_to_chars (buf, value, 1);
7945 break;
7946
7947 case BFD_RELOC_16:
7948 case BFD_RELOC_16_PCREL:
7949 if (fixP->fx_done || !seg->use_rela_p)
7950 md_number_to_chars (buf, value, 2);
7951 break;
7952
7953 case BFD_RELOC_32:
7954 case BFD_RELOC_32_PCREL:
7955 if (fixP->fx_done || !seg->use_rela_p)
7956 md_number_to_chars (buf, value, 4);
7957 break;
7958
7959 case BFD_RELOC_64:
7960 case BFD_RELOC_64_PCREL:
7961 if (fixP->fx_done || !seg->use_rela_p)
7962 md_number_to_chars (buf, value, 8);
7963 break;
7964
7965 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7966 /* We claim that these fixups have been processed here, even if
7967 in fact we generate an error because we do not have a reloc
7968 for them, so tc_gen_reloc() will reject them. */
7969 fixP->fx_done = 1;
7970 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7971 {
7972 as_bad_where (fixP->fx_file, fixP->fx_line,
7973 _("undefined symbol %s used as an immediate value"),
7974 S_GET_NAME (fixP->fx_addsy));
7975 goto apply_fix_return;
7976 }
7977 fix_insn (fixP, flags, value);
7978 break;
7979
7980 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7981 if (fixP->fx_done || !seg->use_rela_p)
7982 {
7983 if (value & 3)
7984 as_bad_where (fixP->fx_file, fixP->fx_line,
7985 _("pc-relative load offset not word aligned"));
7986 if (signed_overflow (value, 21))
7987 as_bad_where (fixP->fx_file, fixP->fx_line,
7988 _("pc-relative load offset out of range"));
7989 insn = get_aarch64_insn (buf);
7990 insn |= encode_ld_lit_ofs_19 (value >> 2);
7991 put_aarch64_insn (buf, insn);
7992 }
7993 break;
7994
7995 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7996 if (fixP->fx_done || !seg->use_rela_p)
7997 {
7998 if (signed_overflow (value, 21))
7999 as_bad_where (fixP->fx_file, fixP->fx_line,
8000 _("pc-relative address offset out of range"));
8001 insn = get_aarch64_insn (buf);
8002 insn |= encode_adr_imm (value);
8003 put_aarch64_insn (buf, insn);
8004 }
8005 break;
8006
8007 case BFD_RELOC_AARCH64_BRANCH19:
8008 if (fixP->fx_done || !seg->use_rela_p)
8009 {
8010 if (value & 3)
8011 as_bad_where (fixP->fx_file, fixP->fx_line,
8012 _("conditional branch target not word aligned"));
8013 if (signed_overflow (value, 21))
8014 as_bad_where (fixP->fx_file, fixP->fx_line,
8015 _("conditional branch out of range"));
8016 insn = get_aarch64_insn (buf);
8017 insn |= encode_cond_branch_ofs_19 (value >> 2);
8018 put_aarch64_insn (buf, insn);
8019 }
8020 break;
8021
8022 case BFD_RELOC_AARCH64_TSTBR14:
8023 if (fixP->fx_done || !seg->use_rela_p)
8024 {
8025 if (value & 3)
8026 as_bad_where (fixP->fx_file, fixP->fx_line,
8027 _("conditional branch target not word aligned"));
8028 if (signed_overflow (value, 16))
8029 as_bad_where (fixP->fx_file, fixP->fx_line,
8030 _("conditional branch out of range"));
8031 insn = get_aarch64_insn (buf);
8032 insn |= encode_tst_branch_ofs_14 (value >> 2);
8033 put_aarch64_insn (buf, insn);
8034 }
8035 break;
8036
8037 case BFD_RELOC_AARCH64_CALL26:
8038 case BFD_RELOC_AARCH64_JUMP26:
8039 if (fixP->fx_done || !seg->use_rela_p)
8040 {
8041 if (value & 3)
8042 as_bad_where (fixP->fx_file, fixP->fx_line,
8043 _("branch target not word aligned"));
8044 if (signed_overflow (value, 28))
8045 as_bad_where (fixP->fx_file, fixP->fx_line,
8046 _("branch out of range"));
8047 insn = get_aarch64_insn (buf);
8048 insn |= encode_branch_ofs_26 (value >> 2);
8049 put_aarch64_insn (buf, insn);
8050 }
8051 break;
8052
8053 case BFD_RELOC_AARCH64_MOVW_G0:
8054 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8055 case BFD_RELOC_AARCH64_MOVW_G0_S:
8056 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8057 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8058 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8059 scale = 0;
8060 goto movw_common;
8061 case BFD_RELOC_AARCH64_MOVW_G1:
8062 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8063 case BFD_RELOC_AARCH64_MOVW_G1_S:
8064 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8065 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8066 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8067 scale = 16;
8068 goto movw_common;
8069 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8070 scale = 0;
8071 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8072 /* Should always be exported to object file, see
8073 aarch64_force_relocation(). */
8074 gas_assert (!fixP->fx_done);
8075 gas_assert (seg->use_rela_p);
8076 goto movw_common;
8077 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8078 scale = 16;
8079 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8080 /* Should always be exported to object file, see
8081 aarch64_force_relocation(). */
8082 gas_assert (!fixP->fx_done);
8083 gas_assert (seg->use_rela_p);
8084 goto movw_common;
8085 case BFD_RELOC_AARCH64_MOVW_G2:
8086 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8087 case BFD_RELOC_AARCH64_MOVW_G2_S:
8088 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8089 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8090 scale = 32;
8091 goto movw_common;
8092 case BFD_RELOC_AARCH64_MOVW_G3:
8093 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8094 scale = 48;
8095 movw_common:
8096 if (fixP->fx_done || !seg->use_rela_p)
8097 {
8098 insn = get_aarch64_insn (buf);
8099
8100 if (!fixP->fx_done)
8101 {
8102 /* REL signed addend must fit in 16 bits */
8103 if (signed_overflow (value, 16))
8104 as_bad_where (fixP->fx_file, fixP->fx_line,
8105 _("offset out of range"));
8106 }
8107 else
8108 {
8109 /* Check for overflow and scale. */
8110 switch (fixP->fx_r_type)
8111 {
8112 case BFD_RELOC_AARCH64_MOVW_G0:
8113 case BFD_RELOC_AARCH64_MOVW_G1:
8114 case BFD_RELOC_AARCH64_MOVW_G2:
8115 case BFD_RELOC_AARCH64_MOVW_G3:
8116 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8117 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8118 if (unsigned_overflow (value, scale + 16))
8119 as_bad_where (fixP->fx_file, fixP->fx_line,
8120 _("unsigned value out of range"));
8121 break;
8122 case BFD_RELOC_AARCH64_MOVW_G0_S:
8123 case BFD_RELOC_AARCH64_MOVW_G1_S:
8124 case BFD_RELOC_AARCH64_MOVW_G2_S:
8125 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8126 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8127 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8128 /* NOTE: We can only come here with movz or movn. */
8129 if (signed_overflow (value, scale + 16))
8130 as_bad_where (fixP->fx_file, fixP->fx_line,
8131 _("signed value out of range"));
8132 if (value < 0)
8133 {
8134 /* Force use of MOVN. */
8135 value = ~value;
8136 insn = reencode_movzn_to_movn (insn);
8137 }
8138 else
8139 {
8140 /* Force use of MOVZ. */
8141 insn = reencode_movzn_to_movz (insn);
8142 }
8143 break;
8144 default:
8145 /* Unchecked relocations. */
8146 break;
8147 }
8148 value >>= scale;
8149 }
8150
8151 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8152 insn |= encode_movw_imm (value & 0xffff);
8153
8154 put_aarch64_insn (buf, insn);
8155 }
8156 break;
8157
8158 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8159 fixP->fx_r_type = (ilp32_p
8160 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8161 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8162 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8163 /* Should always be exported to object file, see
8164 aarch64_force_relocation(). */
8165 gas_assert (!fixP->fx_done);
8166 gas_assert (seg->use_rela_p);
8167 break;
8168
8169 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8170 fixP->fx_r_type = (ilp32_p
8171 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8172 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8173 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8174 /* Should always be exported to object file, see
8175 aarch64_force_relocation(). */
8176 gas_assert (!fixP->fx_done);
8177 gas_assert (seg->use_rela_p);
8178 break;
8179
8180 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8181 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8182 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8183 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8184 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8185 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8186 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8187 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8188 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8189 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8190 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8191 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8192 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8193 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8194 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8195 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8196 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8197 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8198 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8199 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8200 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8201 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8202 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8203 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8204 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8205 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8206 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8207 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8208 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8209 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8210 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8211 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8212 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8213 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8214 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8215 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8216 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8217 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8218 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8219 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8220 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8221 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8222 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8223 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8224 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8225 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8226 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8227 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8228 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8229 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8230 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8231 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8232 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8233 /* Should always be exported to object file, see
8234 aarch64_force_relocation(). */
8235 gas_assert (!fixP->fx_done);
8236 gas_assert (seg->use_rela_p);
8237 break;
8238
8239 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8240 /* Should always be exported to object file, see
8241 aarch64_force_relocation(). */
8242 fixP->fx_r_type = (ilp32_p
8243 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8244 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8245 gas_assert (!fixP->fx_done);
8246 gas_assert (seg->use_rela_p);
8247 break;
8248
8249 case BFD_RELOC_AARCH64_ADD_LO12:
8250 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8251 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8252 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8253 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8254 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8255 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8256 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8257 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8258 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8259 case BFD_RELOC_AARCH64_LDST128_LO12:
8260 case BFD_RELOC_AARCH64_LDST16_LO12:
8261 case BFD_RELOC_AARCH64_LDST32_LO12:
8262 case BFD_RELOC_AARCH64_LDST64_LO12:
8263 case BFD_RELOC_AARCH64_LDST8_LO12:
8264 /* Should always be exported to object file, see
8265 aarch64_force_relocation(). */
8266 gas_assert (!fixP->fx_done);
8267 gas_assert (seg->use_rela_p);
8268 break;
8269
8270 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8271 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8272 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8273 break;
8274
8275 case BFD_RELOC_UNUSED:
8276 /* An error will already have been reported. */
8277 break;
8278
8279 default:
8280 as_bad_where (fixP->fx_file, fixP->fx_line,
8281 _("unexpected %s fixup"),
8282 bfd_get_reloc_code_name (fixP->fx_r_type));
8283 break;
8284 }
8285
8286 apply_fix_return:
8287 /* Free the allocated the struct aarch64_inst.
8288 N.B. currently there are very limited number of fix-up types actually use
8289 this field, so the impact on the performance should be minimal . */
8290 free (fixP->tc_fix_data.inst);
8291
8292 return;
8293 }
8294
8295 /* Translate internal representation of relocation info to BFD target
8296 format. */
8297
8298 arelent *
8299 tc_gen_reloc (asection * section, fixS * fixp)
8300 {
8301 arelent *reloc;
8302 bfd_reloc_code_real_type code;
8303
8304 reloc = XNEW (arelent);
8305
8306 reloc->sym_ptr_ptr = XNEW (asymbol *);
8307 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8308 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8309
8310 if (fixp->fx_pcrel)
8311 {
8312 if (section->use_rela_p)
8313 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8314 else
8315 fixp->fx_offset = reloc->address;
8316 }
8317 reloc->addend = fixp->fx_offset;
8318
8319 code = fixp->fx_r_type;
8320 switch (code)
8321 {
8322 case BFD_RELOC_16:
8323 if (fixp->fx_pcrel)
8324 code = BFD_RELOC_16_PCREL;
8325 break;
8326
8327 case BFD_RELOC_32:
8328 if (fixp->fx_pcrel)
8329 code = BFD_RELOC_32_PCREL;
8330 break;
8331
8332 case BFD_RELOC_64:
8333 if (fixp->fx_pcrel)
8334 code = BFD_RELOC_64_PCREL;
8335 break;
8336
8337 default:
8338 break;
8339 }
8340
8341 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8342 if (reloc->howto == NULL)
8343 {
8344 as_bad_where (fixp->fx_file, fixp->fx_line,
8345 _
8346 ("cannot represent %s relocation in this object file format"),
8347 bfd_get_reloc_code_name (code));
8348 return NULL;
8349 }
8350
8351 return reloc;
8352 }
8353
8354 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8355
8356 void
8357 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8358 {
8359 bfd_reloc_code_real_type type;
8360 int pcrel = 0;
8361
8362 /* Pick a reloc.
8363 FIXME: @@ Should look at CPU word size. */
8364 switch (size)
8365 {
8366 case 1:
8367 type = BFD_RELOC_8;
8368 break;
8369 case 2:
8370 type = BFD_RELOC_16;
8371 break;
8372 case 4:
8373 type = BFD_RELOC_32;
8374 break;
8375 case 8:
8376 type = BFD_RELOC_64;
8377 break;
8378 default:
8379 as_bad (_("cannot do %u-byte relocation"), size);
8380 type = BFD_RELOC_UNUSED;
8381 break;
8382 }
8383
8384 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8385 }
8386
8387 int
8388 aarch64_force_relocation (struct fix *fixp)
8389 {
8390 switch (fixp->fx_r_type)
8391 {
8392 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8393 /* Perform these "immediate" internal relocations
8394 even if the symbol is extern or weak. */
8395 return 0;
8396
8397 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8398 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8399 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8400 /* Pseudo relocs that need to be fixed up according to
8401 ilp32_p. */
8402 return 0;
8403
8404 case BFD_RELOC_AARCH64_ADD_LO12:
8405 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8406 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8407 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8408 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8409 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8410 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8411 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8412 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8413 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8414 case BFD_RELOC_AARCH64_LDST128_LO12:
8415 case BFD_RELOC_AARCH64_LDST16_LO12:
8416 case BFD_RELOC_AARCH64_LDST32_LO12:
8417 case BFD_RELOC_AARCH64_LDST64_LO12:
8418 case BFD_RELOC_AARCH64_LDST8_LO12:
8419 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8420 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8421 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8422 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8423 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8424 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8425 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8426 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8427 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8428 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8429 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8430 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8431 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8432 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8433 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8434 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8435 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8436 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8437 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8438 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8439 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8440 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8441 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8442 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8443 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8444 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8445 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8446 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8447 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8448 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8449 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8450 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8451 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8452 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8453 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8454 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8455 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8456 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8457 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8458 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8459 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8460 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8461 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8462 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8463 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8464 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8465 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8466 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8467 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8468 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8469 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8470 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8471 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8472 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8473 /* Always leave these relocations for the linker. */
8474 return 1;
8475
8476 default:
8477 break;
8478 }
8479
8480 return generic_force_reloc (fixp);
8481 }
8482
8483 #ifdef OBJ_ELF
8484
8485 /* Implement md_after_parse_args. This is the earliest time we need to decide
8486 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8487
8488 void
8489 aarch64_after_parse_args (void)
8490 {
8491 if (aarch64_abi != AARCH64_ABI_NONE)
8492 return;
8493
8494 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8495 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8496 aarch64_abi = AARCH64_ABI_ILP32;
8497 else
8498 aarch64_abi = AARCH64_ABI_LP64;
8499 }
8500
8501 const char *
8502 elf64_aarch64_target_format (void)
8503 {
8504 #ifdef TE_CLOUDABI
8505 /* FIXME: What to do for ilp32_p ? */
8506 if (target_big_endian)
8507 return "elf64-bigaarch64-cloudabi";
8508 else
8509 return "elf64-littleaarch64-cloudabi";
8510 #else
8511 if (target_big_endian)
8512 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8513 else
8514 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8515 #endif
8516 }
8517
8518 void
8519 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8520 {
8521 elf_frob_symbol (symp, puntp);
8522 }
8523 #endif
8524
8525 /* MD interface: Finalization. */
8526
8527 /* A good place to do this, although this was probably not intended
8528 for this kind of use. We need to dump the literal pool before
8529 references are made to a null symbol pointer. */
8530
8531 void
8532 aarch64_cleanup (void)
8533 {
8534 literal_pool *pool;
8535
8536 for (pool = list_of_pools; pool; pool = pool->next)
8537 {
8538 /* Put it at the end of the relevant section. */
8539 subseg_set (pool->section, pool->sub_section);
8540 s_ltorg (0);
8541 }
8542 }
8543
8544 #ifdef OBJ_ELF
8545 /* Remove any excess mapping symbols generated for alignment frags in
8546 SEC. We may have created a mapping symbol before a zero byte
8547 alignment; remove it if there's a mapping symbol after the
8548 alignment. */
8549 static void
8550 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8551 void *dummy ATTRIBUTE_UNUSED)
8552 {
8553 segment_info_type *seginfo = seg_info (sec);
8554 fragS *fragp;
8555
8556 if (seginfo == NULL || seginfo->frchainP == NULL)
8557 return;
8558
8559 for (fragp = seginfo->frchainP->frch_root;
8560 fragp != NULL; fragp = fragp->fr_next)
8561 {
8562 symbolS *sym = fragp->tc_frag_data.last_map;
8563 fragS *next = fragp->fr_next;
8564
8565 /* Variable-sized frags have been converted to fixed size by
8566 this point. But if this was variable-sized to start with,
8567 there will be a fixed-size frag after it. So don't handle
8568 next == NULL. */
8569 if (sym == NULL || next == NULL)
8570 continue;
8571
8572 if (S_GET_VALUE (sym) < next->fr_address)
8573 /* Not at the end of this frag. */
8574 continue;
8575 know (S_GET_VALUE (sym) == next->fr_address);
8576
8577 do
8578 {
8579 if (next->tc_frag_data.first_map != NULL)
8580 {
8581 /* Next frag starts with a mapping symbol. Discard this
8582 one. */
8583 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8584 break;
8585 }
8586
8587 if (next->fr_next == NULL)
8588 {
8589 /* This mapping symbol is at the end of the section. Discard
8590 it. */
8591 know (next->fr_fix == 0 && next->fr_var == 0);
8592 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8593 break;
8594 }
8595
8596 /* As long as we have empty frags without any mapping symbols,
8597 keep looking. */
8598 /* If the next frag is non-empty and does not start with a
8599 mapping symbol, then this mapping symbol is required. */
8600 if (next->fr_address != next->fr_next->fr_address)
8601 break;
8602
8603 next = next->fr_next;
8604 }
8605 while (next != NULL);
8606 }
8607 }
8608 #endif
8609
8610 /* Adjust the symbol table. */
8611
8612 void
8613 aarch64_adjust_symtab (void)
8614 {
8615 #ifdef OBJ_ELF
8616 /* Remove any overlapping mapping symbols generated by alignment frags. */
8617 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8618 /* Now do generic ELF adjustments. */
8619 elf_adjust_symtab ();
8620 #endif
8621 }
8622
8623 static void
8624 checked_hash_insert (htab_t table, const char *key, void *value)
8625 {
8626 str_hash_insert (table, key, value, 0);
8627 }
8628
8629 static void
8630 sysreg_hash_insert (htab_t table, const char *key, void *value)
8631 {
8632 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
8633 checked_hash_insert (table, key, value);
8634 }
8635
8636 static void
8637 fill_instruction_hash_table (void)
8638 {
8639 aarch64_opcode *opcode = aarch64_opcode_table;
8640
8641 while (opcode->name != NULL)
8642 {
8643 templates *templ, *new_templ;
8644 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
8645
8646 new_templ = XNEW (templates);
8647 new_templ->opcode = opcode;
8648 new_templ->next = NULL;
8649
8650 if (!templ)
8651 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8652 else
8653 {
8654 new_templ->next = templ->next;
8655 templ->next = new_templ;
8656 }
8657 ++opcode;
8658 }
8659 }
8660
8661 static inline void
8662 convert_to_upper (char *dst, const char *src, size_t num)
8663 {
8664 unsigned int i;
8665 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8666 *dst = TOUPPER (*src);
8667 *dst = '\0';
8668 }
8669
8670 /* Assume STR point to a lower-case string, allocate, convert and return
8671 the corresponding upper-case string. */
8672 static inline const char*
8673 get_upper_str (const char *str)
8674 {
8675 char *ret;
8676 size_t len = strlen (str);
8677 ret = XNEWVEC (char, len + 1);
8678 convert_to_upper (ret, str, len);
8679 return ret;
8680 }
8681
8682 /* MD interface: Initialization. */
8683
8684 void
8685 md_begin (void)
8686 {
8687 unsigned mach;
8688 unsigned int i;
8689
8690 aarch64_ops_hsh = str_htab_create ();
8691 aarch64_cond_hsh = str_htab_create ();
8692 aarch64_shift_hsh = str_htab_create ();
8693 aarch64_sys_regs_hsh = str_htab_create ();
8694 aarch64_pstatefield_hsh = str_htab_create ();
8695 aarch64_sys_regs_ic_hsh = str_htab_create ();
8696 aarch64_sys_regs_dc_hsh = str_htab_create ();
8697 aarch64_sys_regs_at_hsh = str_htab_create ();
8698 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
8699 aarch64_sys_regs_sr_hsh = str_htab_create ();
8700 aarch64_reg_hsh = str_htab_create ();
8701 aarch64_barrier_opt_hsh = str_htab_create ();
8702 aarch64_nzcv_hsh = str_htab_create ();
8703 aarch64_pldop_hsh = str_htab_create ();
8704 aarch64_hint_opt_hsh = str_htab_create ();
8705
8706 fill_instruction_hash_table ();
8707
8708 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8709 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8710 (void *) (aarch64_sys_regs + i));
8711
8712 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8713 sysreg_hash_insert (aarch64_pstatefield_hsh,
8714 aarch64_pstatefields[i].name,
8715 (void *) (aarch64_pstatefields + i));
8716
8717 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8718 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
8719 aarch64_sys_regs_ic[i].name,
8720 (void *) (aarch64_sys_regs_ic + i));
8721
8722 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8723 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
8724 aarch64_sys_regs_dc[i].name,
8725 (void *) (aarch64_sys_regs_dc + i));
8726
8727 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8728 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
8729 aarch64_sys_regs_at[i].name,
8730 (void *) (aarch64_sys_regs_at + i));
8731
8732 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8733 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
8734 aarch64_sys_regs_tlbi[i].name,
8735 (void *) (aarch64_sys_regs_tlbi + i));
8736
8737 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8738 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
8739 aarch64_sys_regs_sr[i].name,
8740 (void *) (aarch64_sys_regs_sr + i));
8741
8742 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8743 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8744 (void *) (reg_names + i));
8745
8746 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8747 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8748 (void *) (nzcv_names + i));
8749
8750 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8751 {
8752 const char *name = aarch64_operand_modifiers[i].name;
8753 checked_hash_insert (aarch64_shift_hsh, name,
8754 (void *) (aarch64_operand_modifiers + i));
8755 /* Also hash the name in the upper case. */
8756 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8757 (void *) (aarch64_operand_modifiers + i));
8758 }
8759
8760 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8761 {
8762 unsigned int j;
8763 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8764 the same condition code. */
8765 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8766 {
8767 const char *name = aarch64_conds[i].names[j];
8768 if (name == NULL)
8769 break;
8770 checked_hash_insert (aarch64_cond_hsh, name,
8771 (void *) (aarch64_conds + i));
8772 /* Also hash the name in the upper case. */
8773 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8774 (void *) (aarch64_conds + i));
8775 }
8776 }
8777
8778 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8779 {
8780 const char *name = aarch64_barrier_options[i].name;
8781 /* Skip xx00 - the unallocated values of option. */
8782 if ((i & 0x3) == 0)
8783 continue;
8784 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8785 (void *) (aarch64_barrier_options + i));
8786 /* Also hash the name in the upper case. */
8787 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8788 (void *) (aarch64_barrier_options + i));
8789 }
8790
8791 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8792 {
8793 const char* name = aarch64_prfops[i].name;
8794 /* Skip the unallocated hint encodings. */
8795 if (name == NULL)
8796 continue;
8797 checked_hash_insert (aarch64_pldop_hsh, name,
8798 (void *) (aarch64_prfops + i));
8799 /* Also hash the name in the upper case. */
8800 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8801 (void *) (aarch64_prfops + i));
8802 }
8803
8804 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8805 {
8806 const char* name = aarch64_hint_options[i].name;
8807 const char* upper_name = get_upper_str(name);
8808
8809 checked_hash_insert (aarch64_hint_opt_hsh, name,
8810 (void *) (aarch64_hint_options + i));
8811
8812 /* Also hash the name in the upper case if not the same. */
8813 if (strcmp (name, upper_name) != 0)
8814 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
8815 (void *) (aarch64_hint_options + i));
8816 }
8817
8818 /* Set the cpu variant based on the command-line options. */
8819 if (!mcpu_cpu_opt)
8820 mcpu_cpu_opt = march_cpu_opt;
8821
8822 if (!mcpu_cpu_opt)
8823 mcpu_cpu_opt = &cpu_default;
8824
8825 cpu_variant = *mcpu_cpu_opt;
8826
8827 /* Record the CPU type. */
8828 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8829
8830 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8831 }
8832
8833 /* Command line processing. */
8834
8835 const char *md_shortopts = "m:";
8836
8837 #ifdef AARCH64_BI_ENDIAN
8838 #define OPTION_EB (OPTION_MD_BASE + 0)
8839 #define OPTION_EL (OPTION_MD_BASE + 1)
8840 #else
8841 #if TARGET_BYTES_BIG_ENDIAN
8842 #define OPTION_EB (OPTION_MD_BASE + 0)
8843 #else
8844 #define OPTION_EL (OPTION_MD_BASE + 1)
8845 #endif
8846 #endif
8847
8848 struct option md_longopts[] = {
8849 #ifdef OPTION_EB
8850 {"EB", no_argument, NULL, OPTION_EB},
8851 #endif
8852 #ifdef OPTION_EL
8853 {"EL", no_argument, NULL, OPTION_EL},
8854 #endif
8855 {NULL, no_argument, NULL, 0}
8856 };
8857
8858 size_t md_longopts_size = sizeof (md_longopts);
8859
8860 struct aarch64_option_table
8861 {
8862 const char *option; /* Option name to match. */
8863 const char *help; /* Help information. */
8864 int *var; /* Variable to change. */
8865 int value; /* What to change it to. */
8866 char *deprecated; /* If non-null, print this message. */
8867 };
8868
8869 static struct aarch64_option_table aarch64_opts[] = {
8870 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8871 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8872 NULL},
8873 #ifdef DEBUG_AARCH64
8874 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8875 #endif /* DEBUG_AARCH64 */
8876 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8877 NULL},
8878 {"mno-verbose-error", N_("do not output verbose error messages"),
8879 &verbose_error_p, 0, NULL},
8880 {NULL, NULL, NULL, 0, NULL}
8881 };
8882
8883 struct aarch64_cpu_option_table
8884 {
8885 const char *name;
8886 const aarch64_feature_set value;
8887 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8888 case. */
8889 const char *canonical_name;
8890 };
8891
8892 /* This list should, at a minimum, contain all the cpu names
8893 recognized by GCC. */
8894 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8895 {"all", AARCH64_ANY, NULL},
8896 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
8897 AARCH64_FEATURE_CRC), "Cortex-A34"},
8898 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8899 AARCH64_FEATURE_CRC), "Cortex-A35"},
8900 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8901 AARCH64_FEATURE_CRC), "Cortex-A53"},
8902 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8903 AARCH64_FEATURE_CRC), "Cortex-A57"},
8904 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8905 AARCH64_FEATURE_CRC), "Cortex-A72"},
8906 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8907 AARCH64_FEATURE_CRC), "Cortex-A73"},
8908 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8909 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8910 "Cortex-A55"},
8911 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8912 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8913 "Cortex-A75"},
8914 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8915 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8916 "Cortex-A76"},
8917 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8918 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8919 | AARCH64_FEATURE_DOTPROD
8920 | AARCH64_FEATURE_SSBS),
8921 "Cortex-A76AE"},
8922 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8923 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8924 | AARCH64_FEATURE_DOTPROD
8925 | AARCH64_FEATURE_SSBS),
8926 "Cortex-A77"},
8927 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8928 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8929 | AARCH64_FEATURE_DOTPROD
8930 | AARCH64_FEATURE_SSBS),
8931 "Cortex-A65"},
8932 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8933 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8934 | AARCH64_FEATURE_DOTPROD
8935 | AARCH64_FEATURE_SSBS),
8936 "Cortex-A65AE"},
8937 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8938 AARCH64_FEATURE_F16
8939 | AARCH64_FEATURE_RCPC
8940 | AARCH64_FEATURE_DOTPROD
8941 | AARCH64_FEATURE_SSBS
8942 | AARCH64_FEATURE_PROFILE),
8943 "Cortex-A78"},
8944 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8945 AARCH64_FEATURE_F16
8946 | AARCH64_FEATURE_RCPC
8947 | AARCH64_FEATURE_DOTPROD
8948 | AARCH64_FEATURE_SSBS
8949 | AARCH64_FEATURE_PROFILE),
8950 "Cortex-A78AE"},
8951 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8952 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8953 | AARCH64_FEATURE_DOTPROD
8954 | AARCH64_FEATURE_PROFILE),
8955 "Ares"},
8956 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8957 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8958 "Samsung Exynos M1"},
8959 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8960 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8961 | AARCH64_FEATURE_RDMA),
8962 "Qualcomm Falkor"},
8963 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8964 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8965 | AARCH64_FEATURE_DOTPROD
8966 | AARCH64_FEATURE_SSBS),
8967 "Neoverse E1"},
8968 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8969 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8970 | AARCH64_FEATURE_DOTPROD
8971 | AARCH64_FEATURE_PROFILE),
8972 "Neoverse N1"},
8973 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
8974 AARCH64_FEATURE_BFLOAT16
8975 | AARCH64_FEATURE_I8MM
8976 | AARCH64_FEATURE_F16
8977 | AARCH64_FEATURE_SVE
8978 | AARCH64_FEATURE_SVE2
8979 | AARCH64_FEATURE_SVE2_BITPERM
8980 | AARCH64_FEATURE_MEMTAG
8981 | AARCH64_FEATURE_RNG),
8982 "Neoverse N2"},
8983 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8984 AARCH64_FEATURE_PROFILE
8985 | AARCH64_FEATURE_CVADP
8986 | AARCH64_FEATURE_SVE
8987 | AARCH64_FEATURE_SSBS
8988 | AARCH64_FEATURE_RNG
8989 | AARCH64_FEATURE_F16
8990 | AARCH64_FEATURE_BFLOAT16
8991 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
8992 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8993 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8994 | AARCH64_FEATURE_RDMA),
8995 "Qualcomm QDF24XX"},
8996 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8997 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8998 "Qualcomm Saphira"},
8999 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9000 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9001 "Cavium ThunderX"},
9002 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9003 AARCH64_FEATURE_CRYPTO),
9004 "Broadcom Vulcan"},
9005 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9006 in earlier releases and is superseded by 'xgene1' in all
9007 tools. */
9008 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9009 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9010 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9011 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9012 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9013 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9014 AARCH64_FEATURE_F16
9015 | AARCH64_FEATURE_RCPC
9016 | AARCH64_FEATURE_DOTPROD
9017 | AARCH64_FEATURE_SSBS
9018 | AARCH64_FEATURE_PROFILE),
9019 "Cortex-X1"},
9020 {"generic", AARCH64_ARCH_V8, NULL},
9021
9022 {NULL, AARCH64_ARCH_NONE, NULL}
9023 };
9024
9025 struct aarch64_arch_option_table
9026 {
9027 const char *name;
9028 const aarch64_feature_set value;
9029 };
9030
9031 /* This list should, at a minimum, contain all the architecture names
9032 recognized by GCC. */
9033 static const struct aarch64_arch_option_table aarch64_archs[] = {
9034 {"all", AARCH64_ANY},
9035 {"armv8-a", AARCH64_ARCH_V8},
9036 {"armv8.1-a", AARCH64_ARCH_V8_1},
9037 {"armv8.2-a", AARCH64_ARCH_V8_2},
9038 {"armv8.3-a", AARCH64_ARCH_V8_3},
9039 {"armv8.4-a", AARCH64_ARCH_V8_4},
9040 {"armv8.5-a", AARCH64_ARCH_V8_5},
9041 {"armv8.6-a", AARCH64_ARCH_V8_6},
9042 {"armv8-r", AARCH64_ARCH_V8_R},
9043 {NULL, AARCH64_ARCH_NONE}
9044 };
9045
9046 /* ISA extensions. */
9047 struct aarch64_option_cpu_value_table
9048 {
9049 const char *name;
9050 const aarch64_feature_set value;
9051 const aarch64_feature_set require; /* Feature dependencies. */
9052 };
9053
9054 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9055 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9056 AARCH64_ARCH_NONE},
9057 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9058 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9059 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9060 AARCH64_ARCH_NONE},
9061 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9062 AARCH64_ARCH_NONE},
9063 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9064 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9065 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9066 AARCH64_ARCH_NONE},
9067 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9068 AARCH64_ARCH_NONE},
9069 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9070 AARCH64_ARCH_NONE},
9071 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9072 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9073 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9074 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9075 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9076 AARCH64_FEATURE (AARCH64_FEATURE_FP
9077 | AARCH64_FEATURE_F16, 0)},
9078 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9079 AARCH64_ARCH_NONE},
9080 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9081 AARCH64_FEATURE (AARCH64_FEATURE_F16
9082 | AARCH64_FEATURE_SIMD
9083 | AARCH64_FEATURE_COMPNUM, 0)},
9084 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9085 AARCH64_ARCH_NONE},
9086 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9087 AARCH64_FEATURE (AARCH64_FEATURE_F16
9088 | AARCH64_FEATURE_SIMD, 0)},
9089 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9090 AARCH64_ARCH_NONE},
9091 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9092 AARCH64_ARCH_NONE},
9093 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9094 AARCH64_ARCH_NONE},
9095 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9096 AARCH64_ARCH_NONE},
9097 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9098 AARCH64_ARCH_NONE},
9099 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9100 AARCH64_ARCH_NONE},
9101 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9102 AARCH64_ARCH_NONE},
9103 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9104 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9105 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9106 AARCH64_ARCH_NONE},
9107 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9108 AARCH64_ARCH_NONE},
9109 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9110 AARCH64_ARCH_NONE},
9111 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9112 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9113 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9114 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9115 | AARCH64_FEATURE_SM4, 0)},
9116 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9117 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9118 | AARCH64_FEATURE_AES, 0)},
9119 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9120 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9121 | AARCH64_FEATURE_SHA3, 0)},
9122 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9123 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9124 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9125 AARCH64_ARCH_NONE},
9126 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9127 AARCH64_ARCH_NONE},
9128 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9129 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9130 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9131 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9132 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9133 };
9134
9135 struct aarch64_long_option_table
9136 {
9137 const char *option; /* Substring to match. */
9138 const char *help; /* Help information. */
9139 int (*func) (const char *subopt); /* Function to decode sub-option. */
9140 char *deprecated; /* If non-null, print this message. */
9141 };
9142
9143 /* Transitive closure of features depending on set. */
9144 static aarch64_feature_set
9145 aarch64_feature_disable_set (aarch64_feature_set set)
9146 {
9147 const struct aarch64_option_cpu_value_table *opt;
9148 aarch64_feature_set prev = 0;
9149
9150 while (prev != set) {
9151 prev = set;
9152 for (opt = aarch64_features; opt->name != NULL; opt++)
9153 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9154 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9155 }
9156 return set;
9157 }
9158
9159 /* Transitive closure of dependencies of set. */
9160 static aarch64_feature_set
9161 aarch64_feature_enable_set (aarch64_feature_set set)
9162 {
9163 const struct aarch64_option_cpu_value_table *opt;
9164 aarch64_feature_set prev = 0;
9165
9166 while (prev != set) {
9167 prev = set;
9168 for (opt = aarch64_features; opt->name != NULL; opt++)
9169 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9170 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9171 }
9172 return set;
9173 }
9174
9175 static int
9176 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9177 bfd_boolean ext_only)
9178 {
9179 /* We insist on extensions being added before being removed. We achieve
9180 this by using the ADDING_VALUE variable to indicate whether we are
9181 adding an extension (1) or removing it (0) and only allowing it to
9182 change in the order -1 -> 1 -> 0. */
9183 int adding_value = -1;
9184 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9185
9186 /* Copy the feature set, so that we can modify it. */
9187 *ext_set = **opt_p;
9188 *opt_p = ext_set;
9189
9190 while (str != NULL && *str != 0)
9191 {
9192 const struct aarch64_option_cpu_value_table *opt;
9193 const char *ext = NULL;
9194 int optlen;
9195
9196 if (!ext_only)
9197 {
9198 if (*str != '+')
9199 {
9200 as_bad (_("invalid architectural extension"));
9201 return 0;
9202 }
9203
9204 ext = strchr (++str, '+');
9205 }
9206
9207 if (ext != NULL)
9208 optlen = ext - str;
9209 else
9210 optlen = strlen (str);
9211
9212 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
9213 {
9214 if (adding_value != 0)
9215 adding_value = 0;
9216 optlen -= 2;
9217 str += 2;
9218 }
9219 else if (optlen > 0)
9220 {
9221 if (adding_value == -1)
9222 adding_value = 1;
9223 else if (adding_value != 1)
9224 {
9225 as_bad (_("must specify extensions to add before specifying "
9226 "those to remove"));
9227 return FALSE;
9228 }
9229 }
9230
9231 if (optlen == 0)
9232 {
9233 as_bad (_("missing architectural extension"));
9234 return 0;
9235 }
9236
9237 gas_assert (adding_value != -1);
9238
9239 for (opt = aarch64_features; opt->name != NULL; opt++)
9240 if (strncmp (opt->name, str, optlen) == 0)
9241 {
9242 aarch64_feature_set set;
9243
9244 /* Add or remove the extension. */
9245 if (adding_value)
9246 {
9247 set = aarch64_feature_enable_set (opt->value);
9248 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9249 }
9250 else
9251 {
9252 set = aarch64_feature_disable_set (opt->value);
9253 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9254 }
9255 break;
9256 }
9257
9258 if (opt->name == NULL)
9259 {
9260 as_bad (_("unknown architectural extension `%s'"), str);
9261 return 0;
9262 }
9263
9264 str = ext;
9265 };
9266
9267 return 1;
9268 }
9269
9270 static int
9271 aarch64_parse_cpu (const char *str)
9272 {
9273 const struct aarch64_cpu_option_table *opt;
9274 const char *ext = strchr (str, '+');
9275 size_t optlen;
9276
9277 if (ext != NULL)
9278 optlen = ext - str;
9279 else
9280 optlen = strlen (str);
9281
9282 if (optlen == 0)
9283 {
9284 as_bad (_("missing cpu name `%s'"), str);
9285 return 0;
9286 }
9287
9288 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9289 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9290 {
9291 mcpu_cpu_opt = &opt->value;
9292 if (ext != NULL)
9293 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
9294
9295 return 1;
9296 }
9297
9298 as_bad (_("unknown cpu `%s'"), str);
9299 return 0;
9300 }
9301
9302 static int
9303 aarch64_parse_arch (const char *str)
9304 {
9305 const struct aarch64_arch_option_table *opt;
9306 const char *ext = strchr (str, '+');
9307 size_t optlen;
9308
9309 if (ext != NULL)
9310 optlen = ext - str;
9311 else
9312 optlen = strlen (str);
9313
9314 if (optlen == 0)
9315 {
9316 as_bad (_("missing architecture name `%s'"), str);
9317 return 0;
9318 }
9319
9320 for (opt = aarch64_archs; opt->name != NULL; opt++)
9321 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9322 {
9323 march_cpu_opt = &opt->value;
9324 if (ext != NULL)
9325 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
9326
9327 return 1;
9328 }
9329
9330 as_bad (_("unknown architecture `%s'\n"), str);
9331 return 0;
9332 }
9333
9334 /* ABIs. */
9335 struct aarch64_option_abi_value_table
9336 {
9337 const char *name;
9338 enum aarch64_abi_type value;
9339 };
9340
9341 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9342 {"ilp32", AARCH64_ABI_ILP32},
9343 {"lp64", AARCH64_ABI_LP64},
9344 };
9345
9346 static int
9347 aarch64_parse_abi (const char *str)
9348 {
9349 unsigned int i;
9350
9351 if (str[0] == '\0')
9352 {
9353 as_bad (_("missing abi name `%s'"), str);
9354 return 0;
9355 }
9356
9357 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9358 if (strcmp (str, aarch64_abis[i].name) == 0)
9359 {
9360 aarch64_abi = aarch64_abis[i].value;
9361 return 1;
9362 }
9363
9364 as_bad (_("unknown abi `%s'\n"), str);
9365 return 0;
9366 }
9367
9368 static struct aarch64_long_option_table aarch64_long_opts[] = {
9369 #ifdef OBJ_ELF
9370 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9371 aarch64_parse_abi, NULL},
9372 #endif /* OBJ_ELF */
9373 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9374 aarch64_parse_cpu, NULL},
9375 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9376 aarch64_parse_arch, NULL},
9377 {NULL, NULL, 0, NULL}
9378 };
9379
9380 int
9381 md_parse_option (int c, const char *arg)
9382 {
9383 struct aarch64_option_table *opt;
9384 struct aarch64_long_option_table *lopt;
9385
9386 switch (c)
9387 {
9388 #ifdef OPTION_EB
9389 case OPTION_EB:
9390 target_big_endian = 1;
9391 break;
9392 #endif
9393
9394 #ifdef OPTION_EL
9395 case OPTION_EL:
9396 target_big_endian = 0;
9397 break;
9398 #endif
9399
9400 case 'a':
9401 /* Listing option. Just ignore these, we don't support additional
9402 ones. */
9403 return 0;
9404
9405 default:
9406 for (opt = aarch64_opts; opt->option != NULL; opt++)
9407 {
9408 if (c == opt->option[0]
9409 && ((arg == NULL && opt->option[1] == 0)
9410 || streq (arg, opt->option + 1)))
9411 {
9412 /* If the option is deprecated, tell the user. */
9413 if (opt->deprecated != NULL)
9414 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9415 arg ? arg : "", _(opt->deprecated));
9416
9417 if (opt->var != NULL)
9418 *opt->var = opt->value;
9419
9420 return 1;
9421 }
9422 }
9423
9424 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9425 {
9426 /* These options are expected to have an argument. */
9427 if (c == lopt->option[0]
9428 && arg != NULL
9429 && strncmp (arg, lopt->option + 1,
9430 strlen (lopt->option + 1)) == 0)
9431 {
9432 /* If the option is deprecated, tell the user. */
9433 if (lopt->deprecated != NULL)
9434 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9435 _(lopt->deprecated));
9436
9437 /* Call the sup-option parser. */
9438 return lopt->func (arg + strlen (lopt->option) - 1);
9439 }
9440 }
9441
9442 return 0;
9443 }
9444
9445 return 1;
9446 }
9447
9448 void
9449 md_show_usage (FILE * fp)
9450 {
9451 struct aarch64_option_table *opt;
9452 struct aarch64_long_option_table *lopt;
9453
9454 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9455
9456 for (opt = aarch64_opts; opt->option != NULL; opt++)
9457 if (opt->help != NULL)
9458 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9459
9460 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9461 if (lopt->help != NULL)
9462 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9463
9464 #ifdef OPTION_EB
9465 fprintf (fp, _("\
9466 -EB assemble code for a big-endian cpu\n"));
9467 #endif
9468
9469 #ifdef OPTION_EL
9470 fprintf (fp, _("\
9471 -EL assemble code for a little-endian cpu\n"));
9472 #endif
9473 }
9474
9475 /* Parse a .cpu directive. */
9476
9477 static void
9478 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9479 {
9480 const struct aarch64_cpu_option_table *opt;
9481 char saved_char;
9482 char *name;
9483 char *ext;
9484 size_t optlen;
9485
9486 name = input_line_pointer;
9487 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9488 input_line_pointer++;
9489 saved_char = *input_line_pointer;
9490 *input_line_pointer = 0;
9491
9492 ext = strchr (name, '+');
9493
9494 if (ext != NULL)
9495 optlen = ext - name;
9496 else
9497 optlen = strlen (name);
9498
9499 /* Skip the first "all" entry. */
9500 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9501 if (strlen (opt->name) == optlen
9502 && strncmp (name, opt->name, optlen) == 0)
9503 {
9504 mcpu_cpu_opt = &opt->value;
9505 if (ext != NULL)
9506 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9507 return;
9508
9509 cpu_variant = *mcpu_cpu_opt;
9510
9511 *input_line_pointer = saved_char;
9512 demand_empty_rest_of_line ();
9513 return;
9514 }
9515 as_bad (_("unknown cpu `%s'"), name);
9516 *input_line_pointer = saved_char;
9517 ignore_rest_of_line ();
9518 }
9519
9520
9521 /* Parse a .arch directive. */
9522
9523 static void
9524 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9525 {
9526 const struct aarch64_arch_option_table *opt;
9527 char saved_char;
9528 char *name;
9529 char *ext;
9530 size_t optlen;
9531
9532 name = input_line_pointer;
9533 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9534 input_line_pointer++;
9535 saved_char = *input_line_pointer;
9536 *input_line_pointer = 0;
9537
9538 ext = strchr (name, '+');
9539
9540 if (ext != NULL)
9541 optlen = ext - name;
9542 else
9543 optlen = strlen (name);
9544
9545 /* Skip the first "all" entry. */
9546 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9547 if (strlen (opt->name) == optlen
9548 && strncmp (name, opt->name, optlen) == 0)
9549 {
9550 mcpu_cpu_opt = &opt->value;
9551 if (ext != NULL)
9552 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9553 return;
9554
9555 cpu_variant = *mcpu_cpu_opt;
9556
9557 *input_line_pointer = saved_char;
9558 demand_empty_rest_of_line ();
9559 return;
9560 }
9561
9562 as_bad (_("unknown architecture `%s'\n"), name);
9563 *input_line_pointer = saved_char;
9564 ignore_rest_of_line ();
9565 }
9566
9567 /* Parse a .arch_extension directive. */
9568
9569 static void
9570 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9571 {
9572 char saved_char;
9573 char *ext = input_line_pointer;;
9574
9575 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9576 input_line_pointer++;
9577 saved_char = *input_line_pointer;
9578 *input_line_pointer = 0;
9579
9580 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9581 return;
9582
9583 cpu_variant = *mcpu_cpu_opt;
9584
9585 *input_line_pointer = saved_char;
9586 demand_empty_rest_of_line ();
9587 }
9588
9589 /* Copy symbol information. */
9590
9591 void
9592 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9593 {
9594 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9595 }
9596
9597 #ifdef OBJ_ELF
9598 /* Same as elf_copy_symbol_attributes, but without copying st_other.
9599 This is needed so AArch64 specific st_other values can be independently
9600 specified for an IFUNC resolver (that is called by the dynamic linker)
9601 and the symbol it resolves (aliased to the resolver). In particular,
9602 if a function symbol has special st_other value set via directives,
9603 then attaching an IFUNC resolver to that symbol should not override
9604 the st_other setting. Requiring the directive on the IFUNC resolver
9605 symbol would be unexpected and problematic in C code, where the two
9606 symbols appear as two independent function declarations. */
9607
9608 void
9609 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
9610 {
9611 struct elf_obj_sy *srcelf = symbol_get_obj (src);
9612 struct elf_obj_sy *destelf = symbol_get_obj (dest);
9613 if (srcelf->size)
9614 {
9615 if (destelf->size == NULL)
9616 destelf->size = XNEW (expressionS);
9617 *destelf->size = *srcelf->size;
9618 }
9619 else
9620 {
9621 free (destelf->size);
9622 destelf->size = NULL;
9623 }
9624 S_SET_SIZE (dest, S_GET_SIZE (src));
9625 }
9626 #endif