]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
aarch64: Remove support for CSRE
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bfd_boolean
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Return value for certain parsers when the parsing fails; those parsers
242 return the information of the parsed result, e.g. register number, on
243 success. */
244 #define PARSE_FAIL -1
245
246 /* This is an invalid condition code that means no conditional field is
247 present. */
248 #define COND_ALWAYS 0x10
249
250 typedef struct
251 {
252 const char *template;
253 uint32_t value;
254 } asm_nzcv;
255
256 struct reloc_entry
257 {
258 char *name;
259 bfd_reloc_code_real_type reloc;
260 };
261
262 /* Macros to define the register types and masks for the purpose
263 of parsing. */
264
265 #undef AARCH64_REG_TYPES
266 #define AARCH64_REG_TYPES \
267 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
268 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
269 BASIC_REG_TYPE(SP_32) /* wsp */ \
270 BASIC_REG_TYPE(SP_64) /* sp */ \
271 BASIC_REG_TYPE(Z_32) /* wzr */ \
272 BASIC_REG_TYPE(Z_64) /* xzr */ \
273 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
274 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
275 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
276 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
277 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
278 BASIC_REG_TYPE(VN) /* v[0-31] */ \
279 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
280 BASIC_REG_TYPE(PN) /* p[0-15] */ \
281 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
282 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
283 /* Typecheck: same, plus SVE registers. */ \
284 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
285 | REG_TYPE(ZN)) \
286 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
287 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
289 /* Typecheck: same, plus SVE registers. */ \
290 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
291 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
292 | REG_TYPE(ZN)) \
293 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
294 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
295 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
296 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
297 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
298 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
299 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
300 /* Typecheck: any [BHSDQ]P FP. */ \
301 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
302 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
303 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
304 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
306 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
307 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
308 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
309 be used for SVE instructions, since Zn and Pn are valid symbols \
310 in other contexts. */ \
311 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
312 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
313 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
314 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
315 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
316 | REG_TYPE(ZN) | REG_TYPE(PN)) \
317 /* Any integer register; used for error messages only. */ \
318 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
319 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
320 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
321 /* Pseudo type to mark the end of the enumerator sequence. */ \
322 BASIC_REG_TYPE(MAX)
323
324 #undef BASIC_REG_TYPE
325 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
326 #undef MULTI_REG_TYPE
327 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
328
329 /* Register type enumerators. */
330 typedef enum aarch64_reg_type_
331 {
332 /* A list of REG_TYPE_*. */
333 AARCH64_REG_TYPES
334 } aarch64_reg_type;
335
336 #undef BASIC_REG_TYPE
337 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
338 #undef REG_TYPE
339 #define REG_TYPE(T) (1 << REG_TYPE_##T)
340 #undef MULTI_REG_TYPE
341 #define MULTI_REG_TYPE(T,V) V,
342
343 /* Structure for a hash table entry for a register. */
344 typedef struct
345 {
346 const char *name;
347 unsigned char number;
348 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
349 unsigned char builtin;
350 } reg_entry;
351
352 /* Values indexed by aarch64_reg_type to assist the type checking. */
353 static const unsigned reg_type_masks[] =
354 {
355 AARCH64_REG_TYPES
356 };
357
358 #undef BASIC_REG_TYPE
359 #undef REG_TYPE
360 #undef MULTI_REG_TYPE
361 #undef AARCH64_REG_TYPES
362
363 /* Diagnostics used when we don't get a register of the expected type.
364 Note: this has to synchronized with aarch64_reg_type definitions
365 above. */
366 static const char *
367 get_reg_expected_msg (aarch64_reg_type reg_type)
368 {
369 const char *msg;
370
371 switch (reg_type)
372 {
373 case REG_TYPE_R_32:
374 msg = N_("integer 32-bit register expected");
375 break;
376 case REG_TYPE_R_64:
377 msg = N_("integer 64-bit register expected");
378 break;
379 case REG_TYPE_R_N:
380 msg = N_("integer register expected");
381 break;
382 case REG_TYPE_R64_SP:
383 msg = N_("64-bit integer or SP register expected");
384 break;
385 case REG_TYPE_SVE_BASE:
386 msg = N_("base register expected");
387 break;
388 case REG_TYPE_R_Z:
389 msg = N_("integer or zero register expected");
390 break;
391 case REG_TYPE_SVE_OFFSET:
392 msg = N_("offset register expected");
393 break;
394 case REG_TYPE_R_SP:
395 msg = N_("integer or SP register expected");
396 break;
397 case REG_TYPE_R_Z_SP:
398 msg = N_("integer, zero or SP register expected");
399 break;
400 case REG_TYPE_FP_B:
401 msg = N_("8-bit SIMD scalar register expected");
402 break;
403 case REG_TYPE_FP_H:
404 msg = N_("16-bit SIMD scalar or floating-point half precision "
405 "register expected");
406 break;
407 case REG_TYPE_FP_S:
408 msg = N_("32-bit SIMD scalar or floating-point single precision "
409 "register expected");
410 break;
411 case REG_TYPE_FP_D:
412 msg = N_("64-bit SIMD scalar or floating-point double precision "
413 "register expected");
414 break;
415 case REG_TYPE_FP_Q:
416 msg = N_("128-bit SIMD scalar or floating-point quad precision "
417 "register expected");
418 break;
419 case REG_TYPE_R_Z_BHSDQ_V:
420 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
421 msg = N_("register expected");
422 break;
423 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
424 msg = N_("SIMD scalar or floating-point register expected");
425 break;
426 case REG_TYPE_VN: /* any V reg */
427 msg = N_("vector register expected");
428 break;
429 case REG_TYPE_ZN:
430 msg = N_("SVE vector register expected");
431 break;
432 case REG_TYPE_PN:
433 msg = N_("SVE predicate register expected");
434 break;
435 default:
436 as_fatal (_("invalid register type %d"), reg_type);
437 }
438 return msg;
439 }
440
441 /* Some well known registers that we refer to directly elsewhere. */
442 #define REG_SP 31
443 #define REG_ZR 31
444
445 /* Instructions take 4 bytes in the object file. */
446 #define INSN_SIZE 4
447
448 static htab_t aarch64_ops_hsh;
449 static htab_t aarch64_cond_hsh;
450 static htab_t aarch64_shift_hsh;
451 static htab_t aarch64_sys_regs_hsh;
452 static htab_t aarch64_pstatefield_hsh;
453 static htab_t aarch64_sys_regs_ic_hsh;
454 static htab_t aarch64_sys_regs_dc_hsh;
455 static htab_t aarch64_sys_regs_at_hsh;
456 static htab_t aarch64_sys_regs_tlbi_hsh;
457 static htab_t aarch64_sys_regs_sr_hsh;
458 static htab_t aarch64_reg_hsh;
459 static htab_t aarch64_barrier_opt_hsh;
460 static htab_t aarch64_nzcv_hsh;
461 static htab_t aarch64_pldop_hsh;
462 static htab_t aarch64_hint_opt_hsh;
463
464 /* Stuff needed to resolve the label ambiguity
465 As:
466 ...
467 label: <insn>
468 may differ from:
469 ...
470 label:
471 <insn> */
472
473 static symbolS *last_label_seen;
474
475 /* Literal pool structure. Held on a per-section
476 and per-sub-section basis. */
477
478 #define MAX_LITERAL_POOL_SIZE 1024
479 typedef struct literal_expression
480 {
481 expressionS exp;
482 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
483 LITTLENUM_TYPE * bignum;
484 } literal_expression;
485
486 typedef struct literal_pool
487 {
488 literal_expression literals[MAX_LITERAL_POOL_SIZE];
489 unsigned int next_free_entry;
490 unsigned int id;
491 symbolS *symbol;
492 segT section;
493 subsegT sub_section;
494 int size;
495 struct literal_pool *next;
496 } literal_pool;
497
498 /* Pointer to a linked list of literal pools. */
499 static literal_pool *list_of_pools = NULL;
500 \f
501 /* Pure syntax. */
502
503 /* This array holds the chars that always start a comment. If the
504 pre-processor is disabled, these aren't very useful. */
505 const char comment_chars[] = "";
506
507 /* This array holds the chars that only start a comment at the beginning of
508 a line. If the line seems to have the form '# 123 filename'
509 .line and .file directives will appear in the pre-processed output. */
510 /* Note that input_file.c hand checks for '#' at the beginning of the
511 first line of the input file. This is because the compiler outputs
512 #NO_APP at the beginning of its output. */
513 /* Also note that comments like this one will always work. */
514 const char line_comment_chars[] = "#";
515
516 const char line_separator_chars[] = ";";
517
518 /* Chars that can be used to separate mant
519 from exp in floating point numbers. */
520 const char EXP_CHARS[] = "eE";
521
522 /* Chars that mean this number is a floating point constant. */
523 /* As in 0f12.456 */
524 /* or 0d1.2345e12 */
525
526 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhH";
527
528 /* Prefix character that indicates the start of an immediate value. */
529 #define is_immediate_prefix(C) ((C) == '#')
530
531 /* Separator character handling. */
532
533 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
534
535 static inline bfd_boolean
536 skip_past_char (char **str, char c)
537 {
538 if (**str == c)
539 {
540 (*str)++;
541 return TRUE;
542 }
543 else
544 return FALSE;
545 }
546
547 #define skip_past_comma(str) skip_past_char (str, ',')
548
549 /* Arithmetic expressions (possibly involving symbols). */
550
551 static bfd_boolean in_my_get_expression_p = FALSE;
552
553 /* Third argument to my_get_expression. */
554 #define GE_NO_PREFIX 0
555 #define GE_OPT_PREFIX 1
556
557 /* Return TRUE if the string pointed by *STR is successfully parsed
558 as an valid expression; *EP will be filled with the information of
559 such an expression. Otherwise return FALSE. */
560
561 static bfd_boolean
562 my_get_expression (expressionS * ep, char **str, int prefix_mode,
563 int reject_absent)
564 {
565 char *save_in;
566 segT seg;
567 int prefix_present_p = 0;
568
569 switch (prefix_mode)
570 {
571 case GE_NO_PREFIX:
572 break;
573 case GE_OPT_PREFIX:
574 if (is_immediate_prefix (**str))
575 {
576 (*str)++;
577 prefix_present_p = 1;
578 }
579 break;
580 default:
581 abort ();
582 }
583
584 memset (ep, 0, sizeof (expressionS));
585
586 save_in = input_line_pointer;
587 input_line_pointer = *str;
588 in_my_get_expression_p = TRUE;
589 seg = expression (ep);
590 in_my_get_expression_p = FALSE;
591
592 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
593 {
594 /* We found a bad expression in md_operand(). */
595 *str = input_line_pointer;
596 input_line_pointer = save_in;
597 if (prefix_present_p && ! error_p ())
598 set_fatal_syntax_error (_("bad expression"));
599 else
600 set_first_syntax_error (_("bad expression"));
601 return FALSE;
602 }
603
604 #ifdef OBJ_AOUT
605 if (seg != absolute_section
606 && seg != text_section
607 && seg != data_section
608 && seg != bss_section && seg != undefined_section)
609 {
610 set_syntax_error (_("bad segment"));
611 *str = input_line_pointer;
612 input_line_pointer = save_in;
613 return FALSE;
614 }
615 #else
616 (void) seg;
617 #endif
618
619 *str = input_line_pointer;
620 input_line_pointer = save_in;
621 return TRUE;
622 }
623
624 /* Turn a string in input_line_pointer into a floating point constant
625 of type TYPE, and store the appropriate bytes in *LITP. The number
626 of LITTLENUMS emitted is stored in *SIZEP. An error message is
627 returned, or NULL on OK. */
628
629 const char *
630 md_atof (int type, char *litP, int *sizeP)
631 {
632 /* If this is a bfloat16 type, then parse it slightly differently -
633 as it does not follow the IEEE standard exactly. */
634 if (type == 'b')
635 {
636 char * t;
637 LITTLENUM_TYPE words[MAX_LITTLENUMS];
638 FLONUM_TYPE generic_float;
639
640 t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float);
641
642 if (t)
643 input_line_pointer = t;
644 else
645 return _("invalid floating point number");
646
647 switch (generic_float.sign)
648 {
649 /* Is +Inf. */
650 case 'P':
651 words[0] = 0x7f80;
652 break;
653
654 /* Is -Inf. */
655 case 'N':
656 words[0] = 0xff80;
657 break;
658
659 /* Is NaN. */
660 /* bfloat16 has two types of NaN - quiet and signalling.
661 Quiet NaN has bit[6] == 1 && faction != 0, whereas
662 signalling Nan's have bit[0] == 0 && fraction != 0.
663 Chose this specific encoding as it is the same form
664 as used by other IEEE 754 encodings in GAS. */
665 case 0:
666 words[0] = 0x7fff;
667 break;
668
669 default:
670 break;
671 }
672
673 *sizeP = 2;
674
675 md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE));
676
677 return NULL;
678 }
679
680 return ieee_md_atof (type, litP, sizeP, target_big_endian);
681 }
682
683 /* We handle all bad expressions here, so that we can report the faulty
684 instruction in the error message. */
685 void
686 md_operand (expressionS * exp)
687 {
688 if (in_my_get_expression_p)
689 exp->X_op = O_illegal;
690 }
691
692 /* Immediate values. */
693
694 /* Errors may be set multiple times during parsing or bit encoding
695 (particularly in the Neon bits), but usually the earliest error which is set
696 will be the most meaningful. Avoid overwriting it with later (cascading)
697 errors by calling this function. */
698
699 static void
700 first_error (const char *error)
701 {
702 if (! error_p ())
703 set_syntax_error (error);
704 }
705
706 /* Similar to first_error, but this function accepts formatted error
707 message. */
708 static void
709 first_error_fmt (const char *format, ...)
710 {
711 va_list args;
712 enum
713 { size = 100 };
714 /* N.B. this single buffer will not cause error messages for different
715 instructions to pollute each other; this is because at the end of
716 processing of each assembly line, error message if any will be
717 collected by as_bad. */
718 static char buffer[size];
719
720 if (! error_p ())
721 {
722 int ret ATTRIBUTE_UNUSED;
723 va_start (args, format);
724 ret = vsnprintf (buffer, size, format, args);
725 know (ret <= size - 1 && ret >= 0);
726 va_end (args);
727 set_syntax_error (buffer);
728 }
729 }
730
731 /* Register parsing. */
732
733 /* Generic register parser which is called by other specialized
734 register parsers.
735 CCP points to what should be the beginning of a register name.
736 If it is indeed a valid register name, advance CCP over it and
737 return the reg_entry structure; otherwise return NULL.
738 It does not issue diagnostics. */
739
740 static reg_entry *
741 parse_reg (char **ccp)
742 {
743 char *start = *ccp;
744 char *p;
745 reg_entry *reg;
746
747 #ifdef REGISTER_PREFIX
748 if (*start != REGISTER_PREFIX)
749 return NULL;
750 start++;
751 #endif
752
753 p = start;
754 if (!ISALPHA (*p) || !is_name_beginner (*p))
755 return NULL;
756
757 do
758 p++;
759 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
760
761 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
762
763 if (!reg)
764 return NULL;
765
766 *ccp = p;
767 return reg;
768 }
769
770 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
771 return FALSE. */
772 static bfd_boolean
773 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
774 {
775 return (reg_type_masks[type] & (1 << reg->type)) != 0;
776 }
777
778 /* Try to parse a base or offset register. Allow SVE base and offset
779 registers if REG_TYPE includes SVE registers. Return the register
780 entry on success, setting *QUALIFIER to the register qualifier.
781 Return null otherwise.
782
783 Note that this function does not issue any diagnostics. */
784
785 static const reg_entry *
786 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
787 aarch64_opnd_qualifier_t *qualifier)
788 {
789 char *str = *ccp;
790 const reg_entry *reg = parse_reg (&str);
791
792 if (reg == NULL)
793 return NULL;
794
795 switch (reg->type)
796 {
797 case REG_TYPE_R_32:
798 case REG_TYPE_SP_32:
799 case REG_TYPE_Z_32:
800 *qualifier = AARCH64_OPND_QLF_W;
801 break;
802
803 case REG_TYPE_R_64:
804 case REG_TYPE_SP_64:
805 case REG_TYPE_Z_64:
806 *qualifier = AARCH64_OPND_QLF_X;
807 break;
808
809 case REG_TYPE_ZN:
810 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
811 || str[0] != '.')
812 return NULL;
813 switch (TOLOWER (str[1]))
814 {
815 case 's':
816 *qualifier = AARCH64_OPND_QLF_S_S;
817 break;
818 case 'd':
819 *qualifier = AARCH64_OPND_QLF_S_D;
820 break;
821 default:
822 return NULL;
823 }
824 str += 2;
825 break;
826
827 default:
828 return NULL;
829 }
830
831 *ccp = str;
832
833 return reg;
834 }
835
836 /* Try to parse a base or offset register. Return the register entry
837 on success, setting *QUALIFIER to the register qualifier. Return null
838 otherwise.
839
840 Note that this function does not issue any diagnostics. */
841
842 static const reg_entry *
843 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
844 {
845 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
846 }
847
848 /* Parse the qualifier of a vector register or vector element of type
849 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
850 succeeds; otherwise return FALSE.
851
852 Accept only one occurrence of:
853 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
854 b h s d q */
855 static bfd_boolean
856 parse_vector_type_for_operand (aarch64_reg_type reg_type,
857 struct vector_type_el *parsed_type, char **str)
858 {
859 char *ptr = *str;
860 unsigned width;
861 unsigned element_size;
862 enum vector_el_type type;
863
864 /* skip '.' */
865 gas_assert (*ptr == '.');
866 ptr++;
867
868 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
869 {
870 width = 0;
871 goto elt_size;
872 }
873 width = strtoul (ptr, &ptr, 10);
874 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
875 {
876 first_error_fmt (_("bad size %d in vector width specifier"), width);
877 return FALSE;
878 }
879
880 elt_size:
881 switch (TOLOWER (*ptr))
882 {
883 case 'b':
884 type = NT_b;
885 element_size = 8;
886 break;
887 case 'h':
888 type = NT_h;
889 element_size = 16;
890 break;
891 case 's':
892 type = NT_s;
893 element_size = 32;
894 break;
895 case 'd':
896 type = NT_d;
897 element_size = 64;
898 break;
899 case 'q':
900 if (reg_type == REG_TYPE_ZN || width == 1)
901 {
902 type = NT_q;
903 element_size = 128;
904 break;
905 }
906 /* fall through. */
907 default:
908 if (*ptr != '\0')
909 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
910 else
911 first_error (_("missing element size"));
912 return FALSE;
913 }
914 if (width != 0 && width * element_size != 64
915 && width * element_size != 128
916 && !(width == 2 && element_size == 16)
917 && !(width == 4 && element_size == 8))
918 {
919 first_error_fmt (_
920 ("invalid element size %d and vector size combination %c"),
921 width, *ptr);
922 return FALSE;
923 }
924 ptr++;
925
926 parsed_type->type = type;
927 parsed_type->width = width;
928
929 *str = ptr;
930
931 return TRUE;
932 }
933
934 /* *STR contains an SVE zero/merge predication suffix. Parse it into
935 *PARSED_TYPE and point *STR at the end of the suffix. */
936
937 static bfd_boolean
938 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
939 {
940 char *ptr = *str;
941
942 /* Skip '/'. */
943 gas_assert (*ptr == '/');
944 ptr++;
945 switch (TOLOWER (*ptr))
946 {
947 case 'z':
948 parsed_type->type = NT_zero;
949 break;
950 case 'm':
951 parsed_type->type = NT_merge;
952 break;
953 default:
954 if (*ptr != '\0' && *ptr != ',')
955 first_error_fmt (_("unexpected character `%c' in predication type"),
956 *ptr);
957 else
958 first_error (_("missing predication type"));
959 return FALSE;
960 }
961 parsed_type->width = 0;
962 *str = ptr + 1;
963 return TRUE;
964 }
965
966 /* Parse a register of the type TYPE.
967
968 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
969 name or the parsed register is not of TYPE.
970
971 Otherwise return the register number, and optionally fill in the actual
972 type of the register in *RTYPE when multiple alternatives were given, and
973 return the register shape and element index information in *TYPEINFO.
974
975 IN_REG_LIST should be set with TRUE if the caller is parsing a register
976 list. */
977
978 static int
979 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
980 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
981 {
982 char *str = *ccp;
983 const reg_entry *reg = parse_reg (&str);
984 struct vector_type_el atype;
985 struct vector_type_el parsetype;
986 bfd_boolean is_typed_vecreg = FALSE;
987
988 atype.defined = 0;
989 atype.type = NT_invtype;
990 atype.width = -1;
991 atype.index = 0;
992
993 if (reg == NULL)
994 {
995 if (typeinfo)
996 *typeinfo = atype;
997 set_default_error ();
998 return PARSE_FAIL;
999 }
1000
1001 if (! aarch64_check_reg_type (reg, type))
1002 {
1003 DEBUG_TRACE ("reg type check failed");
1004 set_default_error ();
1005 return PARSE_FAIL;
1006 }
1007 type = reg->type;
1008
1009 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
1010 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
1011 {
1012 if (*str == '.')
1013 {
1014 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1015 return PARSE_FAIL;
1016 }
1017 else
1018 {
1019 if (!parse_predication_for_operand (&parsetype, &str))
1020 return PARSE_FAIL;
1021 }
1022
1023 /* Register if of the form Vn.[bhsdq]. */
1024 is_typed_vecreg = TRUE;
1025
1026 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1027 {
1028 /* The width is always variable; we don't allow an integer width
1029 to be specified. */
1030 gas_assert (parsetype.width == 0);
1031 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1032 }
1033 else if (parsetype.width == 0)
1034 /* Expect index. In the new scheme we cannot have
1035 Vn.[bhsdq] represent a scalar. Therefore any
1036 Vn.[bhsdq] should have an index following it.
1037 Except in reglists of course. */
1038 atype.defined |= NTA_HASINDEX;
1039 else
1040 atype.defined |= NTA_HASTYPE;
1041
1042 atype.type = parsetype.type;
1043 atype.width = parsetype.width;
1044 }
1045
1046 if (skip_past_char (&str, '['))
1047 {
1048 expressionS exp;
1049
1050 /* Reject Sn[index] syntax. */
1051 if (!is_typed_vecreg)
1052 {
1053 first_error (_("this type of register can't be indexed"));
1054 return PARSE_FAIL;
1055 }
1056
1057 if (in_reg_list)
1058 {
1059 first_error (_("index not allowed inside register list"));
1060 return PARSE_FAIL;
1061 }
1062
1063 atype.defined |= NTA_HASINDEX;
1064
1065 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1066
1067 if (exp.X_op != O_constant)
1068 {
1069 first_error (_("constant expression required"));
1070 return PARSE_FAIL;
1071 }
1072
1073 if (! skip_past_char (&str, ']'))
1074 return PARSE_FAIL;
1075
1076 atype.index = exp.X_add_number;
1077 }
1078 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1079 {
1080 /* Indexed vector register expected. */
1081 first_error (_("indexed vector register expected"));
1082 return PARSE_FAIL;
1083 }
1084
1085 /* A vector reg Vn should be typed or indexed. */
1086 if (type == REG_TYPE_VN && atype.defined == 0)
1087 {
1088 first_error (_("invalid use of vector register"));
1089 }
1090
1091 if (typeinfo)
1092 *typeinfo = atype;
1093
1094 if (rtype)
1095 *rtype = type;
1096
1097 *ccp = str;
1098
1099 return reg->number;
1100 }
1101
1102 /* Parse register.
1103
1104 Return the register number on success; return PARSE_FAIL otherwise.
1105
1106 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1107 the register (e.g. NEON double or quad reg when either has been requested).
1108
1109 If this is a NEON vector register with additional type information, fill
1110 in the struct pointed to by VECTYPE (if non-NULL).
1111
1112 This parser does not handle register list. */
1113
1114 static int
1115 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1116 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1117 {
1118 struct vector_type_el atype;
1119 char *str = *ccp;
1120 int reg = parse_typed_reg (&str, type, rtype, &atype,
1121 /*in_reg_list= */ FALSE);
1122
1123 if (reg == PARSE_FAIL)
1124 return PARSE_FAIL;
1125
1126 if (vectype)
1127 *vectype = atype;
1128
1129 *ccp = str;
1130
1131 return reg;
1132 }
1133
1134 static inline bfd_boolean
1135 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1136 {
1137 return
1138 e1.type == e2.type
1139 && e1.defined == e2.defined
1140 && e1.width == e2.width && e1.index == e2.index;
1141 }
1142
1143 /* This function parses a list of vector registers of type TYPE.
1144 On success, it returns the parsed register list information in the
1145 following encoded format:
1146
1147 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1148 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1149
1150 The information of the register shape and/or index is returned in
1151 *VECTYPE.
1152
1153 It returns PARSE_FAIL if the register list is invalid.
1154
1155 The list contains one to four registers.
1156 Each register can be one of:
1157 <Vt>.<T>[<index>]
1158 <Vt>.<T>
1159 All <T> should be identical.
1160 All <index> should be identical.
1161 There are restrictions on <Vt> numbers which are checked later
1162 (by reg_list_valid_p). */
1163
1164 static int
1165 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1166 struct vector_type_el *vectype)
1167 {
1168 char *str = *ccp;
1169 int nb_regs;
1170 struct vector_type_el typeinfo, typeinfo_first;
1171 int val, val_range;
1172 int in_range;
1173 int ret_val;
1174 int i;
1175 bfd_boolean error = FALSE;
1176 bfd_boolean expect_index = FALSE;
1177
1178 if (*str != '{')
1179 {
1180 set_syntax_error (_("expecting {"));
1181 return PARSE_FAIL;
1182 }
1183 str++;
1184
1185 nb_regs = 0;
1186 typeinfo_first.defined = 0;
1187 typeinfo_first.type = NT_invtype;
1188 typeinfo_first.width = -1;
1189 typeinfo_first.index = 0;
1190 ret_val = 0;
1191 val = -1;
1192 val_range = -1;
1193 in_range = 0;
1194 do
1195 {
1196 if (in_range)
1197 {
1198 str++; /* skip over '-' */
1199 val_range = val;
1200 }
1201 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1202 /*in_reg_list= */ TRUE);
1203 if (val == PARSE_FAIL)
1204 {
1205 set_first_syntax_error (_("invalid vector register in list"));
1206 error = TRUE;
1207 continue;
1208 }
1209 /* reject [bhsd]n */
1210 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1211 {
1212 set_first_syntax_error (_("invalid scalar register in list"));
1213 error = TRUE;
1214 continue;
1215 }
1216
1217 if (typeinfo.defined & NTA_HASINDEX)
1218 expect_index = TRUE;
1219
1220 if (in_range)
1221 {
1222 if (val < val_range)
1223 {
1224 set_first_syntax_error
1225 (_("invalid range in vector register list"));
1226 error = TRUE;
1227 }
1228 val_range++;
1229 }
1230 else
1231 {
1232 val_range = val;
1233 if (nb_regs == 0)
1234 typeinfo_first = typeinfo;
1235 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1236 {
1237 set_first_syntax_error
1238 (_("type mismatch in vector register list"));
1239 error = TRUE;
1240 }
1241 }
1242 if (! error)
1243 for (i = val_range; i <= val; i++)
1244 {
1245 ret_val |= i << (5 * nb_regs);
1246 nb_regs++;
1247 }
1248 in_range = 0;
1249 }
1250 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1251
1252 skip_whitespace (str);
1253 if (*str != '}')
1254 {
1255 set_first_syntax_error (_("end of vector register list not found"));
1256 error = TRUE;
1257 }
1258 str++;
1259
1260 skip_whitespace (str);
1261
1262 if (expect_index)
1263 {
1264 if (skip_past_char (&str, '['))
1265 {
1266 expressionS exp;
1267
1268 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1269 if (exp.X_op != O_constant)
1270 {
1271 set_first_syntax_error (_("constant expression required."));
1272 error = TRUE;
1273 }
1274 if (! skip_past_char (&str, ']'))
1275 error = TRUE;
1276 else
1277 typeinfo_first.index = exp.X_add_number;
1278 }
1279 else
1280 {
1281 set_first_syntax_error (_("expected index"));
1282 error = TRUE;
1283 }
1284 }
1285
1286 if (nb_regs > 4)
1287 {
1288 set_first_syntax_error (_("too many registers in vector register list"));
1289 error = TRUE;
1290 }
1291 else if (nb_regs == 0)
1292 {
1293 set_first_syntax_error (_("empty vector register list"));
1294 error = TRUE;
1295 }
1296
1297 *ccp = str;
1298 if (! error)
1299 *vectype = typeinfo_first;
1300
1301 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1302 }
1303
1304 /* Directives: register aliases. */
1305
1306 static reg_entry *
1307 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1308 {
1309 reg_entry *new;
1310 const char *name;
1311
1312 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1313 {
1314 if (new->builtin)
1315 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1316 str);
1317
1318 /* Only warn about a redefinition if it's not defined as the
1319 same register. */
1320 else if (new->number != number || new->type != type)
1321 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1322
1323 return NULL;
1324 }
1325
1326 name = xstrdup (str);
1327 new = XNEW (reg_entry);
1328
1329 new->name = name;
1330 new->number = number;
1331 new->type = type;
1332 new->builtin = FALSE;
1333
1334 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1335
1336 return new;
1337 }
1338
1339 /* Look for the .req directive. This is of the form:
1340
1341 new_register_name .req existing_register_name
1342
1343 If we find one, or if it looks sufficiently like one that we want to
1344 handle any error here, return TRUE. Otherwise return FALSE. */
1345
1346 static bfd_boolean
1347 create_register_alias (char *newname, char *p)
1348 {
1349 const reg_entry *old;
1350 char *oldname, *nbuf;
1351 size_t nlen;
1352
1353 /* The input scrubber ensures that whitespace after the mnemonic is
1354 collapsed to single spaces. */
1355 oldname = p;
1356 if (strncmp (oldname, " .req ", 6) != 0)
1357 return FALSE;
1358
1359 oldname += 6;
1360 if (*oldname == '\0')
1361 return FALSE;
1362
1363 old = str_hash_find (aarch64_reg_hsh, oldname);
1364 if (!old)
1365 {
1366 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1367 return TRUE;
1368 }
1369
1370 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1371 the desired alias name, and p points to its end. If not, then
1372 the desired alias name is in the global original_case_string. */
1373 #ifdef TC_CASE_SENSITIVE
1374 nlen = p - newname;
1375 #else
1376 newname = original_case_string;
1377 nlen = strlen (newname);
1378 #endif
1379
1380 nbuf = xmemdup0 (newname, nlen);
1381
1382 /* Create aliases under the new name as stated; an all-lowercase
1383 version of the new name; and an all-uppercase version of the new
1384 name. */
1385 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1386 {
1387 for (p = nbuf; *p; p++)
1388 *p = TOUPPER (*p);
1389
1390 if (strncmp (nbuf, newname, nlen))
1391 {
1392 /* If this attempt to create an additional alias fails, do not bother
1393 trying to create the all-lower case alias. We will fail and issue
1394 a second, duplicate error message. This situation arises when the
1395 programmer does something like:
1396 foo .req r0
1397 Foo .req r1
1398 The second .req creates the "Foo" alias but then fails to create
1399 the artificial FOO alias because it has already been created by the
1400 first .req. */
1401 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1402 {
1403 free (nbuf);
1404 return TRUE;
1405 }
1406 }
1407
1408 for (p = nbuf; *p; p++)
1409 *p = TOLOWER (*p);
1410
1411 if (strncmp (nbuf, newname, nlen))
1412 insert_reg_alias (nbuf, old->number, old->type);
1413 }
1414
1415 free (nbuf);
1416 return TRUE;
1417 }
1418
1419 /* Should never be called, as .req goes between the alias and the
1420 register name, not at the beginning of the line. */
1421 static void
1422 s_req (int a ATTRIBUTE_UNUSED)
1423 {
1424 as_bad (_("invalid syntax for .req directive"));
1425 }
1426
1427 /* The .unreq directive deletes an alias which was previously defined
1428 by .req. For example:
1429
1430 my_alias .req r11
1431 .unreq my_alias */
1432
1433 static void
1434 s_unreq (int a ATTRIBUTE_UNUSED)
1435 {
1436 char *name;
1437 char saved_char;
1438
1439 name = input_line_pointer;
1440
1441 while (*input_line_pointer != 0
1442 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1443 ++input_line_pointer;
1444
1445 saved_char = *input_line_pointer;
1446 *input_line_pointer = 0;
1447
1448 if (!*name)
1449 as_bad (_("invalid syntax for .unreq directive"));
1450 else
1451 {
1452 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1453
1454 if (!reg)
1455 as_bad (_("unknown register alias '%s'"), name);
1456 else if (reg->builtin)
1457 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1458 name);
1459 else
1460 {
1461 char *p;
1462 char *nbuf;
1463
1464 str_hash_delete (aarch64_reg_hsh, name);
1465 free ((char *) reg->name);
1466 free (reg);
1467
1468 /* Also locate the all upper case and all lower case versions.
1469 Do not complain if we cannot find one or the other as it
1470 was probably deleted above. */
1471
1472 nbuf = strdup (name);
1473 for (p = nbuf; *p; p++)
1474 *p = TOUPPER (*p);
1475 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1476 if (reg)
1477 {
1478 str_hash_delete (aarch64_reg_hsh, nbuf);
1479 free ((char *) reg->name);
1480 free (reg);
1481 }
1482
1483 for (p = nbuf; *p; p++)
1484 *p = TOLOWER (*p);
1485 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1486 if (reg)
1487 {
1488 str_hash_delete (aarch64_reg_hsh, nbuf);
1489 free ((char *) reg->name);
1490 free (reg);
1491 }
1492
1493 free (nbuf);
1494 }
1495 }
1496
1497 *input_line_pointer = saved_char;
1498 demand_empty_rest_of_line ();
1499 }
1500
1501 /* Directives: Instruction set selection. */
1502
1503 #ifdef OBJ_ELF
1504 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1505 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1506 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1507 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1508
1509 /* Create a new mapping symbol for the transition to STATE. */
1510
1511 static void
1512 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1513 {
1514 symbolS *symbolP;
1515 const char *symname;
1516 int type;
1517
1518 switch (state)
1519 {
1520 case MAP_DATA:
1521 symname = "$d";
1522 type = BSF_NO_FLAGS;
1523 break;
1524 case MAP_INSN:
1525 symname = "$x";
1526 type = BSF_NO_FLAGS;
1527 break;
1528 default:
1529 abort ();
1530 }
1531
1532 symbolP = symbol_new (symname, now_seg, frag, value);
1533 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1534
1535 /* Save the mapping symbols for future reference. Also check that
1536 we do not place two mapping symbols at the same offset within a
1537 frag. We'll handle overlap between frags in
1538 check_mapping_symbols.
1539
1540 If .fill or other data filling directive generates zero sized data,
1541 the mapping symbol for the following code will have the same value
1542 as the one generated for the data filling directive. In this case,
1543 we replace the old symbol with the new one at the same address. */
1544 if (value == 0)
1545 {
1546 if (frag->tc_frag_data.first_map != NULL)
1547 {
1548 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1549 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1550 &symbol_lastP);
1551 }
1552 frag->tc_frag_data.first_map = symbolP;
1553 }
1554 if (frag->tc_frag_data.last_map != NULL)
1555 {
1556 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1557 S_GET_VALUE (symbolP));
1558 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1559 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1560 &symbol_lastP);
1561 }
1562 frag->tc_frag_data.last_map = symbolP;
1563 }
1564
1565 /* We must sometimes convert a region marked as code to data during
1566 code alignment, if an odd number of bytes have to be padded. The
1567 code mapping symbol is pushed to an aligned address. */
1568
1569 static void
1570 insert_data_mapping_symbol (enum mstate state,
1571 valueT value, fragS * frag, offsetT bytes)
1572 {
1573 /* If there was already a mapping symbol, remove it. */
1574 if (frag->tc_frag_data.last_map != NULL
1575 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1576 frag->fr_address + value)
1577 {
1578 symbolS *symp = frag->tc_frag_data.last_map;
1579
1580 if (value == 0)
1581 {
1582 know (frag->tc_frag_data.first_map == symp);
1583 frag->tc_frag_data.first_map = NULL;
1584 }
1585 frag->tc_frag_data.last_map = NULL;
1586 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1587 }
1588
1589 make_mapping_symbol (MAP_DATA, value, frag);
1590 make_mapping_symbol (state, value + bytes, frag);
1591 }
1592
1593 static void mapping_state_2 (enum mstate state, int max_chars);
1594
1595 /* Set the mapping state to STATE. Only call this when about to
1596 emit some STATE bytes to the file. */
1597
1598 void
1599 mapping_state (enum mstate state)
1600 {
1601 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1602
1603 if (state == MAP_INSN)
1604 /* AArch64 instructions require 4-byte alignment. When emitting
1605 instructions into any section, record the appropriate section
1606 alignment. */
1607 record_alignment (now_seg, 2);
1608
1609 if (mapstate == state)
1610 /* The mapping symbol has already been emitted.
1611 There is nothing else to do. */
1612 return;
1613
1614 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1615 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1616 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1617 evaluated later in the next else. */
1618 return;
1619 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1620 {
1621 /* Only add the symbol if the offset is > 0:
1622 if we're at the first frag, check it's size > 0;
1623 if we're not at the first frag, then for sure
1624 the offset is > 0. */
1625 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1626 const int add_symbol = (frag_now != frag_first)
1627 || (frag_now_fix () > 0);
1628
1629 if (add_symbol)
1630 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1631 }
1632 #undef TRANSITION
1633
1634 mapping_state_2 (state, 0);
1635 }
1636
1637 /* Same as mapping_state, but MAX_CHARS bytes have already been
1638 allocated. Put the mapping symbol that far back. */
1639
1640 static void
1641 mapping_state_2 (enum mstate state, int max_chars)
1642 {
1643 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1644
1645 if (!SEG_NORMAL (now_seg))
1646 return;
1647
1648 if (mapstate == state)
1649 /* The mapping symbol has already been emitted.
1650 There is nothing else to do. */
1651 return;
1652
1653 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1654 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1655 }
1656 #else
1657 #define mapping_state(x) /* nothing */
1658 #define mapping_state_2(x, y) /* nothing */
1659 #endif
1660
1661 /* Directives: sectioning and alignment. */
1662
1663 static void
1664 s_bss (int ignore ATTRIBUTE_UNUSED)
1665 {
1666 /* We don't support putting frags in the BSS segment, we fake it by
1667 marking in_bss, then looking at s_skip for clues. */
1668 subseg_set (bss_section, 0);
1669 demand_empty_rest_of_line ();
1670 mapping_state (MAP_DATA);
1671 }
1672
1673 static void
1674 s_even (int ignore ATTRIBUTE_UNUSED)
1675 {
1676 /* Never make frag if expect extra pass. */
1677 if (!need_pass_2)
1678 frag_align (1, 0, 0);
1679
1680 record_alignment (now_seg, 1);
1681
1682 demand_empty_rest_of_line ();
1683 }
1684
1685 /* Directives: Literal pools. */
1686
1687 static literal_pool *
1688 find_literal_pool (int size)
1689 {
1690 literal_pool *pool;
1691
1692 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1693 {
1694 if (pool->section == now_seg
1695 && pool->sub_section == now_subseg && pool->size == size)
1696 break;
1697 }
1698
1699 return pool;
1700 }
1701
1702 static literal_pool *
1703 find_or_make_literal_pool (int size)
1704 {
1705 /* Next literal pool ID number. */
1706 static unsigned int latest_pool_num = 1;
1707 literal_pool *pool;
1708
1709 pool = find_literal_pool (size);
1710
1711 if (pool == NULL)
1712 {
1713 /* Create a new pool. */
1714 pool = XNEW (literal_pool);
1715 if (!pool)
1716 return NULL;
1717
1718 /* Currently we always put the literal pool in the current text
1719 section. If we were generating "small" model code where we
1720 knew that all code and initialised data was within 1MB then
1721 we could output literals to mergeable, read-only data
1722 sections. */
1723
1724 pool->next_free_entry = 0;
1725 pool->section = now_seg;
1726 pool->sub_section = now_subseg;
1727 pool->size = size;
1728 pool->next = list_of_pools;
1729 pool->symbol = NULL;
1730
1731 /* Add it to the list. */
1732 list_of_pools = pool;
1733 }
1734
1735 /* New pools, and emptied pools, will have a NULL symbol. */
1736 if (pool->symbol == NULL)
1737 {
1738 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1739 &zero_address_frag, 0);
1740 pool->id = latest_pool_num++;
1741 }
1742
1743 /* Done. */
1744 return pool;
1745 }
1746
1747 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1748 Return TRUE on success, otherwise return FALSE. */
1749 static bfd_boolean
1750 add_to_lit_pool (expressionS *exp, int size)
1751 {
1752 literal_pool *pool;
1753 unsigned int entry;
1754
1755 pool = find_or_make_literal_pool (size);
1756
1757 /* Check if this literal value is already in the pool. */
1758 for (entry = 0; entry < pool->next_free_entry; entry++)
1759 {
1760 expressionS * litexp = & pool->literals[entry].exp;
1761
1762 if ((litexp->X_op == exp->X_op)
1763 && (exp->X_op == O_constant)
1764 && (litexp->X_add_number == exp->X_add_number)
1765 && (litexp->X_unsigned == exp->X_unsigned))
1766 break;
1767
1768 if ((litexp->X_op == exp->X_op)
1769 && (exp->X_op == O_symbol)
1770 && (litexp->X_add_number == exp->X_add_number)
1771 && (litexp->X_add_symbol == exp->X_add_symbol)
1772 && (litexp->X_op_symbol == exp->X_op_symbol))
1773 break;
1774 }
1775
1776 /* Do we need to create a new entry? */
1777 if (entry == pool->next_free_entry)
1778 {
1779 if (entry >= MAX_LITERAL_POOL_SIZE)
1780 {
1781 set_syntax_error (_("literal pool overflow"));
1782 return FALSE;
1783 }
1784
1785 pool->literals[entry].exp = *exp;
1786 pool->next_free_entry += 1;
1787 if (exp->X_op == O_big)
1788 {
1789 /* PR 16688: Bignums are held in a single global array. We must
1790 copy and preserve that value now, before it is overwritten. */
1791 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1792 exp->X_add_number);
1793 memcpy (pool->literals[entry].bignum, generic_bignum,
1794 CHARS_PER_LITTLENUM * exp->X_add_number);
1795 }
1796 else
1797 pool->literals[entry].bignum = NULL;
1798 }
1799
1800 exp->X_op = O_symbol;
1801 exp->X_add_number = ((int) entry) * size;
1802 exp->X_add_symbol = pool->symbol;
1803
1804 return TRUE;
1805 }
1806
1807 /* Can't use symbol_new here, so have to create a symbol and then at
1808 a later date assign it a value. That's what these functions do. */
1809
1810 static void
1811 symbol_locate (symbolS * symbolP,
1812 const char *name,/* It is copied, the caller can modify. */
1813 segT segment, /* Segment identifier (SEG_<something>). */
1814 valueT valu, /* Symbol value. */
1815 fragS * frag) /* Associated fragment. */
1816 {
1817 size_t name_length;
1818 char *preserved_copy_of_name;
1819
1820 name_length = strlen (name) + 1; /* +1 for \0. */
1821 obstack_grow (&notes, name, name_length);
1822 preserved_copy_of_name = obstack_finish (&notes);
1823
1824 #ifdef tc_canonicalize_symbol_name
1825 preserved_copy_of_name =
1826 tc_canonicalize_symbol_name (preserved_copy_of_name);
1827 #endif
1828
1829 S_SET_NAME (symbolP, preserved_copy_of_name);
1830
1831 S_SET_SEGMENT (symbolP, segment);
1832 S_SET_VALUE (symbolP, valu);
1833 symbol_clear_list_pointers (symbolP);
1834
1835 symbol_set_frag (symbolP, frag);
1836
1837 /* Link to end of symbol chain. */
1838 {
1839 extern int symbol_table_frozen;
1840
1841 if (symbol_table_frozen)
1842 abort ();
1843 }
1844
1845 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1846
1847 obj_symbol_new_hook (symbolP);
1848
1849 #ifdef tc_symbol_new_hook
1850 tc_symbol_new_hook (symbolP);
1851 #endif
1852
1853 #ifdef DEBUG_SYMS
1854 verify_symbol_chain (symbol_rootP, symbol_lastP);
1855 #endif /* DEBUG_SYMS */
1856 }
1857
1858
1859 static void
1860 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1861 {
1862 unsigned int entry;
1863 literal_pool *pool;
1864 char sym_name[20];
1865 int align;
1866
1867 for (align = 2; align <= 4; align++)
1868 {
1869 int size = 1 << align;
1870
1871 pool = find_literal_pool (size);
1872 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1873 continue;
1874
1875 /* Align pool as you have word accesses.
1876 Only make a frag if we have to. */
1877 if (!need_pass_2)
1878 frag_align (align, 0, 0);
1879
1880 mapping_state (MAP_DATA);
1881
1882 record_alignment (now_seg, align);
1883
1884 sprintf (sym_name, "$$lit_\002%x", pool->id);
1885
1886 symbol_locate (pool->symbol, sym_name, now_seg,
1887 (valueT) frag_now_fix (), frag_now);
1888 symbol_table_insert (pool->symbol);
1889
1890 for (entry = 0; entry < pool->next_free_entry; entry++)
1891 {
1892 expressionS * exp = & pool->literals[entry].exp;
1893
1894 if (exp->X_op == O_big)
1895 {
1896 /* PR 16688: Restore the global bignum value. */
1897 gas_assert (pool->literals[entry].bignum != NULL);
1898 memcpy (generic_bignum, pool->literals[entry].bignum,
1899 CHARS_PER_LITTLENUM * exp->X_add_number);
1900 }
1901
1902 /* First output the expression in the instruction to the pool. */
1903 emit_expr (exp, size); /* .word|.xword */
1904
1905 if (exp->X_op == O_big)
1906 {
1907 free (pool->literals[entry].bignum);
1908 pool->literals[entry].bignum = NULL;
1909 }
1910 }
1911
1912 /* Mark the pool as empty. */
1913 pool->next_free_entry = 0;
1914 pool->symbol = NULL;
1915 }
1916 }
1917
1918 #ifdef OBJ_ELF
1919 /* Forward declarations for functions below, in the MD interface
1920 section. */
1921 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1922 static struct reloc_table_entry * find_reloc_table_entry (char **);
1923
1924 /* Directives: Data. */
1925 /* N.B. the support for relocation suffix in this directive needs to be
1926 implemented properly. */
1927
1928 static void
1929 s_aarch64_elf_cons (int nbytes)
1930 {
1931 expressionS exp;
1932
1933 #ifdef md_flush_pending_output
1934 md_flush_pending_output ();
1935 #endif
1936
1937 if (is_it_end_of_statement ())
1938 {
1939 demand_empty_rest_of_line ();
1940 return;
1941 }
1942
1943 #ifdef md_cons_align
1944 md_cons_align (nbytes);
1945 #endif
1946
1947 mapping_state (MAP_DATA);
1948 do
1949 {
1950 struct reloc_table_entry *reloc;
1951
1952 expression (&exp);
1953
1954 if (exp.X_op != O_symbol)
1955 emit_expr (&exp, (unsigned int) nbytes);
1956 else
1957 {
1958 skip_past_char (&input_line_pointer, '#');
1959 if (skip_past_char (&input_line_pointer, ':'))
1960 {
1961 reloc = find_reloc_table_entry (&input_line_pointer);
1962 if (reloc == NULL)
1963 as_bad (_("unrecognized relocation suffix"));
1964 else
1965 as_bad (_("unimplemented relocation suffix"));
1966 ignore_rest_of_line ();
1967 return;
1968 }
1969 else
1970 emit_expr (&exp, (unsigned int) nbytes);
1971 }
1972 }
1973 while (*input_line_pointer++ == ',');
1974
1975 /* Put terminator back into stream. */
1976 input_line_pointer--;
1977 demand_empty_rest_of_line ();
1978 }
1979
1980 /* Mark symbol that it follows a variant PCS convention. */
1981
1982 static void
1983 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1984 {
1985 char *name;
1986 char c;
1987 symbolS *sym;
1988 asymbol *bfdsym;
1989 elf_symbol_type *elfsym;
1990
1991 c = get_symbol_name (&name);
1992 if (!*name)
1993 as_bad (_("Missing symbol name in directive"));
1994 sym = symbol_find_or_make (name);
1995 restore_line_pointer (c);
1996 demand_empty_rest_of_line ();
1997 bfdsym = symbol_get_bfdsym (sym);
1998 elfsym = elf_symbol_from (bfdsym);
1999 gas_assert (elfsym);
2000 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2001 }
2002 #endif /* OBJ_ELF */
2003
2004 /* Output a 32-bit word, but mark as an instruction. */
2005
2006 static void
2007 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2008 {
2009 expressionS exp;
2010
2011 #ifdef md_flush_pending_output
2012 md_flush_pending_output ();
2013 #endif
2014
2015 if (is_it_end_of_statement ())
2016 {
2017 demand_empty_rest_of_line ();
2018 return;
2019 }
2020
2021 /* Sections are assumed to start aligned. In executable section, there is no
2022 MAP_DATA symbol pending. So we only align the address during
2023 MAP_DATA --> MAP_INSN transition.
2024 For other sections, this is not guaranteed. */
2025 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2026 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2027 frag_align_code (2, 0);
2028
2029 #ifdef OBJ_ELF
2030 mapping_state (MAP_INSN);
2031 #endif
2032
2033 do
2034 {
2035 expression (&exp);
2036 if (exp.X_op != O_constant)
2037 {
2038 as_bad (_("constant expression required"));
2039 ignore_rest_of_line ();
2040 return;
2041 }
2042
2043 if (target_big_endian)
2044 {
2045 unsigned int val = exp.X_add_number;
2046 exp.X_add_number = SWAP_32 (val);
2047 }
2048 emit_expr (&exp, 4);
2049 }
2050 while (*input_line_pointer++ == ',');
2051
2052 /* Put terminator back into stream. */
2053 input_line_pointer--;
2054 demand_empty_rest_of_line ();
2055 }
2056
2057 static void
2058 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2059 {
2060 demand_empty_rest_of_line ();
2061 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2062 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2063 }
2064
2065 #ifdef OBJ_ELF
2066 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2067
2068 static void
2069 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2070 {
2071 expressionS exp;
2072
2073 expression (&exp);
2074 frag_grow (4);
2075 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2076 BFD_RELOC_AARCH64_TLSDESC_ADD);
2077
2078 demand_empty_rest_of_line ();
2079 }
2080
2081 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2082
2083 static void
2084 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2085 {
2086 expressionS exp;
2087
2088 /* Since we're just labelling the code, there's no need to define a
2089 mapping symbol. */
2090 expression (&exp);
2091 /* Make sure there is enough room in this frag for the following
2092 blr. This trick only works if the blr follows immediately after
2093 the .tlsdesc directive. */
2094 frag_grow (4);
2095 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2096 BFD_RELOC_AARCH64_TLSDESC_CALL);
2097
2098 demand_empty_rest_of_line ();
2099 }
2100
2101 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2102
2103 static void
2104 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2105 {
2106 expressionS exp;
2107
2108 expression (&exp);
2109 frag_grow (4);
2110 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2111 BFD_RELOC_AARCH64_TLSDESC_LDR);
2112
2113 demand_empty_rest_of_line ();
2114 }
2115 #endif /* OBJ_ELF */
2116
2117 static void s_aarch64_arch (int);
2118 static void s_aarch64_cpu (int);
2119 static void s_aarch64_arch_extension (int);
2120
2121 /* This table describes all the machine specific pseudo-ops the assembler
2122 has to support. The fields are:
2123 pseudo-op name without dot
2124 function to call to execute this pseudo-op
2125 Integer arg to pass to the function. */
2126
2127 const pseudo_typeS md_pseudo_table[] = {
2128 /* Never called because '.req' does not start a line. */
2129 {"req", s_req, 0},
2130 {"unreq", s_unreq, 0},
2131 {"bss", s_bss, 0},
2132 {"even", s_even, 0},
2133 {"ltorg", s_ltorg, 0},
2134 {"pool", s_ltorg, 0},
2135 {"cpu", s_aarch64_cpu, 0},
2136 {"arch", s_aarch64_arch, 0},
2137 {"arch_extension", s_aarch64_arch_extension, 0},
2138 {"inst", s_aarch64_inst, 0},
2139 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2140 #ifdef OBJ_ELF
2141 {"tlsdescadd", s_tlsdescadd, 0},
2142 {"tlsdesccall", s_tlsdesccall, 0},
2143 {"tlsdescldr", s_tlsdescldr, 0},
2144 {"word", s_aarch64_elf_cons, 4},
2145 {"long", s_aarch64_elf_cons, 4},
2146 {"xword", s_aarch64_elf_cons, 8},
2147 {"dword", s_aarch64_elf_cons, 8},
2148 {"variant_pcs", s_variant_pcs, 0},
2149 #endif
2150 {"float16", float_cons, 'h'},
2151 {"bfloat16", float_cons, 'b'},
2152 {0, 0, 0}
2153 };
2154 \f
2155
2156 /* Check whether STR points to a register name followed by a comma or the
2157 end of line; REG_TYPE indicates which register types are checked
2158 against. Return TRUE if STR is such a register name; otherwise return
2159 FALSE. The function does not intend to produce any diagnostics, but since
2160 the register parser aarch64_reg_parse, which is called by this function,
2161 does produce diagnostics, we call clear_error to clear any diagnostics
2162 that may be generated by aarch64_reg_parse.
2163 Also, the function returns FALSE directly if there is any user error
2164 present at the function entry. This prevents the existing diagnostics
2165 state from being spoiled.
2166 The function currently serves parse_constant_immediate and
2167 parse_big_immediate only. */
2168 static bfd_boolean
2169 reg_name_p (char *str, aarch64_reg_type reg_type)
2170 {
2171 int reg;
2172
2173 /* Prevent the diagnostics state from being spoiled. */
2174 if (error_p ())
2175 return FALSE;
2176
2177 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2178
2179 /* Clear the parsing error that may be set by the reg parser. */
2180 clear_error ();
2181
2182 if (reg == PARSE_FAIL)
2183 return FALSE;
2184
2185 skip_whitespace (str);
2186 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2187 return TRUE;
2188
2189 return FALSE;
2190 }
2191
2192 /* Parser functions used exclusively in instruction operands. */
2193
2194 /* Parse an immediate expression which may not be constant.
2195
2196 To prevent the expression parser from pushing a register name
2197 into the symbol table as an undefined symbol, firstly a check is
2198 done to find out whether STR is a register of type REG_TYPE followed
2199 by a comma or the end of line. Return FALSE if STR is such a string. */
2200
2201 static bfd_boolean
2202 parse_immediate_expression (char **str, expressionS *exp,
2203 aarch64_reg_type reg_type)
2204 {
2205 if (reg_name_p (*str, reg_type))
2206 {
2207 set_recoverable_error (_("immediate operand required"));
2208 return FALSE;
2209 }
2210
2211 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2212
2213 if (exp->X_op == O_absent)
2214 {
2215 set_fatal_syntax_error (_("missing immediate expression"));
2216 return FALSE;
2217 }
2218
2219 return TRUE;
2220 }
2221
2222 /* Constant immediate-value read function for use in insn parsing.
2223 STR points to the beginning of the immediate (with the optional
2224 leading #); *VAL receives the value. REG_TYPE says which register
2225 names should be treated as registers rather than as symbolic immediates.
2226
2227 Return TRUE on success; otherwise return FALSE. */
2228
2229 static bfd_boolean
2230 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2231 {
2232 expressionS exp;
2233
2234 if (! parse_immediate_expression (str, &exp, reg_type))
2235 return FALSE;
2236
2237 if (exp.X_op != O_constant)
2238 {
2239 set_syntax_error (_("constant expression required"));
2240 return FALSE;
2241 }
2242
2243 *val = exp.X_add_number;
2244 return TRUE;
2245 }
2246
2247 static uint32_t
2248 encode_imm_float_bits (uint32_t imm)
2249 {
2250 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2251 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2252 }
2253
2254 /* Return TRUE if the single-precision floating-point value encoded in IMM
2255 can be expressed in the AArch64 8-bit signed floating-point format with
2256 3-bit exponent and normalized 4 bits of precision; in other words, the
2257 floating-point value must be expressable as
2258 (+/-) n / 16 * power (2, r)
2259 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2260
2261 static bfd_boolean
2262 aarch64_imm_float_p (uint32_t imm)
2263 {
2264 /* If a single-precision floating-point value has the following bit
2265 pattern, it can be expressed in the AArch64 8-bit floating-point
2266 format:
2267
2268 3 32222222 2221111111111
2269 1 09876543 21098765432109876543210
2270 n Eeeeeexx xxxx0000000000000000000
2271
2272 where n, e and each x are either 0 or 1 independently, with
2273 E == ~ e. */
2274
2275 uint32_t pattern;
2276
2277 /* Prepare the pattern for 'Eeeeee'. */
2278 if (((imm >> 30) & 0x1) == 0)
2279 pattern = 0x3e000000;
2280 else
2281 pattern = 0x40000000;
2282
2283 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2284 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2285 }
2286
2287 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2288 as an IEEE float without any loss of precision. Store the value in
2289 *FPWORD if so. */
2290
2291 static bfd_boolean
2292 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2293 {
2294 /* If a double-precision floating-point value has the following bit
2295 pattern, it can be expressed in a float:
2296
2297 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2298 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2299 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2300
2301 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2302 if Eeee_eeee != 1111_1111
2303
2304 where n, e, s and S are either 0 or 1 independently and where ~ is the
2305 inverse of E. */
2306
2307 uint32_t pattern;
2308 uint32_t high32 = imm >> 32;
2309 uint32_t low32 = imm;
2310
2311 /* Lower 29 bits need to be 0s. */
2312 if ((imm & 0x1fffffff) != 0)
2313 return FALSE;
2314
2315 /* Prepare the pattern for 'Eeeeeeeee'. */
2316 if (((high32 >> 30) & 0x1) == 0)
2317 pattern = 0x38000000;
2318 else
2319 pattern = 0x40000000;
2320
2321 /* Check E~~~. */
2322 if ((high32 & 0x78000000) != pattern)
2323 return FALSE;
2324
2325 /* Check Eeee_eeee != 1111_1111. */
2326 if ((high32 & 0x7ff00000) == 0x47f00000)
2327 return FALSE;
2328
2329 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2330 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2331 | (low32 >> 29)); /* 3 S bits. */
2332 return TRUE;
2333 }
2334
2335 /* Return true if we should treat OPERAND as a double-precision
2336 floating-point operand rather than a single-precision one. */
2337 static bfd_boolean
2338 double_precision_operand_p (const aarch64_opnd_info *operand)
2339 {
2340 /* Check for unsuffixed SVE registers, which are allowed
2341 for LDR and STR but not in instructions that require an
2342 immediate. We get better error messages if we arbitrarily
2343 pick one size, parse the immediate normally, and then
2344 report the match failure in the normal way. */
2345 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2346 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2347 }
2348
2349 /* Parse a floating-point immediate. Return TRUE on success and return the
2350 value in *IMMED in the format of IEEE754 single-precision encoding.
2351 *CCP points to the start of the string; DP_P is TRUE when the immediate
2352 is expected to be in double-precision (N.B. this only matters when
2353 hexadecimal representation is involved). REG_TYPE says which register
2354 names should be treated as registers rather than as symbolic immediates.
2355
2356 This routine accepts any IEEE float; it is up to the callers to reject
2357 invalid ones. */
2358
2359 static bfd_boolean
2360 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2361 aarch64_reg_type reg_type)
2362 {
2363 char *str = *ccp;
2364 char *fpnum;
2365 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2366 int64_t val = 0;
2367 unsigned fpword = 0;
2368 bfd_boolean hex_p = FALSE;
2369
2370 skip_past_char (&str, '#');
2371
2372 fpnum = str;
2373 skip_whitespace (fpnum);
2374
2375 if (strncmp (fpnum, "0x", 2) == 0)
2376 {
2377 /* Support the hexadecimal representation of the IEEE754 encoding.
2378 Double-precision is expected when DP_P is TRUE, otherwise the
2379 representation should be in single-precision. */
2380 if (! parse_constant_immediate (&str, &val, reg_type))
2381 goto invalid_fp;
2382
2383 if (dp_p)
2384 {
2385 if (!can_convert_double_to_float (val, &fpword))
2386 goto invalid_fp;
2387 }
2388 else if ((uint64_t) val > 0xffffffff)
2389 goto invalid_fp;
2390 else
2391 fpword = val;
2392
2393 hex_p = TRUE;
2394 }
2395 else if (reg_name_p (str, reg_type))
2396 {
2397 set_recoverable_error (_("immediate operand required"));
2398 return FALSE;
2399 }
2400
2401 if (! hex_p)
2402 {
2403 int i;
2404
2405 if ((str = atof_ieee (str, 's', words)) == NULL)
2406 goto invalid_fp;
2407
2408 /* Our FP word must be 32 bits (single-precision FP). */
2409 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2410 {
2411 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2412 fpword |= words[i];
2413 }
2414 }
2415
2416 *immed = fpword;
2417 *ccp = str;
2418 return TRUE;
2419
2420 invalid_fp:
2421 set_fatal_syntax_error (_("invalid floating-point constant"));
2422 return FALSE;
2423 }
2424
2425 /* Less-generic immediate-value read function with the possibility of loading
2426 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2427 instructions.
2428
2429 To prevent the expression parser from pushing a register name into the
2430 symbol table as an undefined symbol, a check is firstly done to find
2431 out whether STR is a register of type REG_TYPE followed by a comma or
2432 the end of line. Return FALSE if STR is such a register. */
2433
2434 static bfd_boolean
2435 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2436 {
2437 char *ptr = *str;
2438
2439 if (reg_name_p (ptr, reg_type))
2440 {
2441 set_syntax_error (_("immediate operand required"));
2442 return FALSE;
2443 }
2444
2445 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2446
2447 if (inst.reloc.exp.X_op == O_constant)
2448 *imm = inst.reloc.exp.X_add_number;
2449
2450 *str = ptr;
2451
2452 return TRUE;
2453 }
2454
2455 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2456 if NEED_LIBOPCODES is non-zero, the fixup will need
2457 assistance from the libopcodes. */
2458
2459 static inline void
2460 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2461 const aarch64_opnd_info *operand,
2462 int need_libopcodes_p)
2463 {
2464 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2465 reloc->opnd = operand->type;
2466 if (need_libopcodes_p)
2467 reloc->need_libopcodes_p = 1;
2468 };
2469
2470 /* Return TRUE if the instruction needs to be fixed up later internally by
2471 the GAS; otherwise return FALSE. */
2472
2473 static inline bfd_boolean
2474 aarch64_gas_internal_fixup_p (void)
2475 {
2476 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2477 }
2478
2479 /* Assign the immediate value to the relevant field in *OPERAND if
2480 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2481 needs an internal fixup in a later stage.
2482 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2483 IMM.VALUE that may get assigned with the constant. */
2484 static inline void
2485 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2486 aarch64_opnd_info *operand,
2487 int addr_off_p,
2488 int need_libopcodes_p,
2489 int skip_p)
2490 {
2491 if (reloc->exp.X_op == O_constant)
2492 {
2493 if (addr_off_p)
2494 operand->addr.offset.imm = reloc->exp.X_add_number;
2495 else
2496 operand->imm.value = reloc->exp.X_add_number;
2497 reloc->type = BFD_RELOC_UNUSED;
2498 }
2499 else
2500 {
2501 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2502 /* Tell libopcodes to ignore this operand or not. This is helpful
2503 when one of the operands needs to be fixed up later but we need
2504 libopcodes to check the other operands. */
2505 operand->skip = skip_p;
2506 }
2507 }
2508
2509 /* Relocation modifiers. Each entry in the table contains the textual
2510 name for the relocation which may be placed before a symbol used as
2511 a load/store offset, or add immediate. It must be surrounded by a
2512 leading and trailing colon, for example:
2513
2514 ldr x0, [x1, #:rello:varsym]
2515 add x0, x1, #:rello:varsym */
2516
2517 struct reloc_table_entry
2518 {
2519 const char *name;
2520 int pc_rel;
2521 bfd_reloc_code_real_type adr_type;
2522 bfd_reloc_code_real_type adrp_type;
2523 bfd_reloc_code_real_type movw_type;
2524 bfd_reloc_code_real_type add_type;
2525 bfd_reloc_code_real_type ldst_type;
2526 bfd_reloc_code_real_type ld_literal_type;
2527 };
2528
2529 static struct reloc_table_entry reloc_table[] = {
2530 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2531 {"lo12", 0,
2532 0, /* adr_type */
2533 0,
2534 0,
2535 BFD_RELOC_AARCH64_ADD_LO12,
2536 BFD_RELOC_AARCH64_LDST_LO12,
2537 0},
2538
2539 /* Higher 21 bits of pc-relative page offset: ADRP */
2540 {"pg_hi21", 1,
2541 0, /* adr_type */
2542 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2543 0,
2544 0,
2545 0,
2546 0},
2547
2548 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2549 {"pg_hi21_nc", 1,
2550 0, /* adr_type */
2551 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2552 0,
2553 0,
2554 0,
2555 0},
2556
2557 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2558 {"abs_g0", 0,
2559 0, /* adr_type */
2560 0,
2561 BFD_RELOC_AARCH64_MOVW_G0,
2562 0,
2563 0,
2564 0},
2565
2566 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2567 {"abs_g0_s", 0,
2568 0, /* adr_type */
2569 0,
2570 BFD_RELOC_AARCH64_MOVW_G0_S,
2571 0,
2572 0,
2573 0},
2574
2575 /* Less significant bits 0-15 of address/value: MOVK, no check */
2576 {"abs_g0_nc", 0,
2577 0, /* adr_type */
2578 0,
2579 BFD_RELOC_AARCH64_MOVW_G0_NC,
2580 0,
2581 0,
2582 0},
2583
2584 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2585 {"abs_g1", 0,
2586 0, /* adr_type */
2587 0,
2588 BFD_RELOC_AARCH64_MOVW_G1,
2589 0,
2590 0,
2591 0},
2592
2593 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2594 {"abs_g1_s", 0,
2595 0, /* adr_type */
2596 0,
2597 BFD_RELOC_AARCH64_MOVW_G1_S,
2598 0,
2599 0,
2600 0},
2601
2602 /* Less significant bits 16-31 of address/value: MOVK, no check */
2603 {"abs_g1_nc", 0,
2604 0, /* adr_type */
2605 0,
2606 BFD_RELOC_AARCH64_MOVW_G1_NC,
2607 0,
2608 0,
2609 0},
2610
2611 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2612 {"abs_g2", 0,
2613 0, /* adr_type */
2614 0,
2615 BFD_RELOC_AARCH64_MOVW_G2,
2616 0,
2617 0,
2618 0},
2619
2620 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2621 {"abs_g2_s", 0,
2622 0, /* adr_type */
2623 0,
2624 BFD_RELOC_AARCH64_MOVW_G2_S,
2625 0,
2626 0,
2627 0},
2628
2629 /* Less significant bits 32-47 of address/value: MOVK, no check */
2630 {"abs_g2_nc", 0,
2631 0, /* adr_type */
2632 0,
2633 BFD_RELOC_AARCH64_MOVW_G2_NC,
2634 0,
2635 0,
2636 0},
2637
2638 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2639 {"abs_g3", 0,
2640 0, /* adr_type */
2641 0,
2642 BFD_RELOC_AARCH64_MOVW_G3,
2643 0,
2644 0,
2645 0},
2646
2647 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2648 {"prel_g0", 1,
2649 0, /* adr_type */
2650 0,
2651 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2652 0,
2653 0,
2654 0},
2655
2656 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2657 {"prel_g0_nc", 1,
2658 0, /* adr_type */
2659 0,
2660 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2661 0,
2662 0,
2663 0},
2664
2665 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2666 {"prel_g1", 1,
2667 0, /* adr_type */
2668 0,
2669 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2670 0,
2671 0,
2672 0},
2673
2674 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2675 {"prel_g1_nc", 1,
2676 0, /* adr_type */
2677 0,
2678 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2679 0,
2680 0,
2681 0},
2682
2683 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2684 {"prel_g2", 1,
2685 0, /* adr_type */
2686 0,
2687 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2688 0,
2689 0,
2690 0},
2691
2692 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2693 {"prel_g2_nc", 1,
2694 0, /* adr_type */
2695 0,
2696 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2697 0,
2698 0,
2699 0},
2700
2701 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2702 {"prel_g3", 1,
2703 0, /* adr_type */
2704 0,
2705 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2706 0,
2707 0,
2708 0},
2709
2710 /* Get to the page containing GOT entry for a symbol. */
2711 {"got", 1,
2712 0, /* adr_type */
2713 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2714 0,
2715 0,
2716 0,
2717 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2718
2719 /* 12 bit offset into the page containing GOT entry for that symbol. */
2720 {"got_lo12", 0,
2721 0, /* adr_type */
2722 0,
2723 0,
2724 0,
2725 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2726 0},
2727
2728 /* 0-15 bits of address/value: MOVk, no check. */
2729 {"gotoff_g0_nc", 0,
2730 0, /* adr_type */
2731 0,
2732 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2733 0,
2734 0,
2735 0},
2736
2737 /* Most significant bits 16-31 of address/value: MOVZ. */
2738 {"gotoff_g1", 0,
2739 0, /* adr_type */
2740 0,
2741 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2742 0,
2743 0,
2744 0},
2745
2746 /* 15 bit offset into the page containing GOT entry for that symbol. */
2747 {"gotoff_lo15", 0,
2748 0, /* adr_type */
2749 0,
2750 0,
2751 0,
2752 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2753 0},
2754
2755 /* Get to the page containing GOT TLS entry for a symbol */
2756 {"gottprel_g0_nc", 0,
2757 0, /* adr_type */
2758 0,
2759 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2760 0,
2761 0,
2762 0},
2763
2764 /* Get to the page containing GOT TLS entry for a symbol */
2765 {"gottprel_g1", 0,
2766 0, /* adr_type */
2767 0,
2768 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2769 0,
2770 0,
2771 0},
2772
2773 /* Get to the page containing GOT TLS entry for a symbol */
2774 {"tlsgd", 0,
2775 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2776 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2777 0,
2778 0,
2779 0,
2780 0},
2781
2782 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2783 {"tlsgd_lo12", 0,
2784 0, /* adr_type */
2785 0,
2786 0,
2787 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2788 0,
2789 0},
2790
2791 /* Lower 16 bits address/value: MOVk. */
2792 {"tlsgd_g0_nc", 0,
2793 0, /* adr_type */
2794 0,
2795 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2796 0,
2797 0,
2798 0},
2799
2800 /* Most significant bits 16-31 of address/value: MOVZ. */
2801 {"tlsgd_g1", 0,
2802 0, /* adr_type */
2803 0,
2804 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2805 0,
2806 0,
2807 0},
2808
2809 /* Get to the page containing GOT TLS entry for a symbol */
2810 {"tlsdesc", 0,
2811 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2812 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2813 0,
2814 0,
2815 0,
2816 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2817
2818 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2819 {"tlsdesc_lo12", 0,
2820 0, /* adr_type */
2821 0,
2822 0,
2823 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2824 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2825 0},
2826
2827 /* Get to the page containing GOT TLS entry for a symbol.
2828 The same as GD, we allocate two consecutive GOT slots
2829 for module index and module offset, the only difference
2830 with GD is the module offset should be initialized to
2831 zero without any outstanding runtime relocation. */
2832 {"tlsldm", 0,
2833 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2834 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2835 0,
2836 0,
2837 0,
2838 0},
2839
2840 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2841 {"tlsldm_lo12_nc", 0,
2842 0, /* adr_type */
2843 0,
2844 0,
2845 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2846 0,
2847 0},
2848
2849 /* 12 bit offset into the module TLS base address. */
2850 {"dtprel_lo12", 0,
2851 0, /* adr_type */
2852 0,
2853 0,
2854 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2855 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2856 0},
2857
2858 /* Same as dtprel_lo12, no overflow check. */
2859 {"dtprel_lo12_nc", 0,
2860 0, /* adr_type */
2861 0,
2862 0,
2863 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2864 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2865 0},
2866
2867 /* bits[23:12] of offset to the module TLS base address. */
2868 {"dtprel_hi12", 0,
2869 0, /* adr_type */
2870 0,
2871 0,
2872 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2873 0,
2874 0},
2875
2876 /* bits[15:0] of offset to the module TLS base address. */
2877 {"dtprel_g0", 0,
2878 0, /* adr_type */
2879 0,
2880 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2881 0,
2882 0,
2883 0},
2884
2885 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2886 {"dtprel_g0_nc", 0,
2887 0, /* adr_type */
2888 0,
2889 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2890 0,
2891 0,
2892 0},
2893
2894 /* bits[31:16] of offset to the module TLS base address. */
2895 {"dtprel_g1", 0,
2896 0, /* adr_type */
2897 0,
2898 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2899 0,
2900 0,
2901 0},
2902
2903 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2904 {"dtprel_g1_nc", 0,
2905 0, /* adr_type */
2906 0,
2907 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2908 0,
2909 0,
2910 0},
2911
2912 /* bits[47:32] of offset to the module TLS base address. */
2913 {"dtprel_g2", 0,
2914 0, /* adr_type */
2915 0,
2916 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2917 0,
2918 0,
2919 0},
2920
2921 /* Lower 16 bit offset into GOT entry for a symbol */
2922 {"tlsdesc_off_g0_nc", 0,
2923 0, /* adr_type */
2924 0,
2925 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2926 0,
2927 0,
2928 0},
2929
2930 /* Higher 16 bit offset into GOT entry for a symbol */
2931 {"tlsdesc_off_g1", 0,
2932 0, /* adr_type */
2933 0,
2934 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2935 0,
2936 0,
2937 0},
2938
2939 /* Get to the page containing GOT TLS entry for a symbol */
2940 {"gottprel", 0,
2941 0, /* adr_type */
2942 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2943 0,
2944 0,
2945 0,
2946 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2947
2948 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2949 {"gottprel_lo12", 0,
2950 0, /* adr_type */
2951 0,
2952 0,
2953 0,
2954 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2955 0},
2956
2957 /* Get tp offset for a symbol. */
2958 {"tprel", 0,
2959 0, /* adr_type */
2960 0,
2961 0,
2962 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2963 0,
2964 0},
2965
2966 /* Get tp offset for a symbol. */
2967 {"tprel_lo12", 0,
2968 0, /* adr_type */
2969 0,
2970 0,
2971 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2972 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2973 0},
2974
2975 /* Get tp offset for a symbol. */
2976 {"tprel_hi12", 0,
2977 0, /* adr_type */
2978 0,
2979 0,
2980 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2981 0,
2982 0},
2983
2984 /* Get tp offset for a symbol. */
2985 {"tprel_lo12_nc", 0,
2986 0, /* adr_type */
2987 0,
2988 0,
2989 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2990 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2991 0},
2992
2993 /* Most significant bits 32-47 of address/value: MOVZ. */
2994 {"tprel_g2", 0,
2995 0, /* adr_type */
2996 0,
2997 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2998 0,
2999 0,
3000 0},
3001
3002 /* Most significant bits 16-31 of address/value: MOVZ. */
3003 {"tprel_g1", 0,
3004 0, /* adr_type */
3005 0,
3006 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3007 0,
3008 0,
3009 0},
3010
3011 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3012 {"tprel_g1_nc", 0,
3013 0, /* adr_type */
3014 0,
3015 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3016 0,
3017 0,
3018 0},
3019
3020 /* Most significant bits 0-15 of address/value: MOVZ. */
3021 {"tprel_g0", 0,
3022 0, /* adr_type */
3023 0,
3024 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3025 0,
3026 0,
3027 0},
3028
3029 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3030 {"tprel_g0_nc", 0,
3031 0, /* adr_type */
3032 0,
3033 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3034 0,
3035 0,
3036 0},
3037
3038 /* 15bit offset from got entry to base address of GOT table. */
3039 {"gotpage_lo15", 0,
3040 0,
3041 0,
3042 0,
3043 0,
3044 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3045 0},
3046
3047 /* 14bit offset from got entry to base address of GOT table. */
3048 {"gotpage_lo14", 0,
3049 0,
3050 0,
3051 0,
3052 0,
3053 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3054 0},
3055 };
3056
3057 /* Given the address of a pointer pointing to the textual name of a
3058 relocation as may appear in assembler source, attempt to find its
3059 details in reloc_table. The pointer will be updated to the character
3060 after the trailing colon. On failure, NULL will be returned;
3061 otherwise return the reloc_table_entry. */
3062
3063 static struct reloc_table_entry *
3064 find_reloc_table_entry (char **str)
3065 {
3066 unsigned int i;
3067 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3068 {
3069 int length = strlen (reloc_table[i].name);
3070
3071 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3072 && (*str)[length] == ':')
3073 {
3074 *str += (length + 1);
3075 return &reloc_table[i];
3076 }
3077 }
3078
3079 return NULL;
3080 }
3081
3082 /* Mode argument to parse_shift and parser_shifter_operand. */
3083 enum parse_shift_mode
3084 {
3085 SHIFTED_NONE, /* no shifter allowed */
3086 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3087 "#imm{,lsl #n}" */
3088 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3089 "#imm" */
3090 SHIFTED_LSL, /* bare "lsl #n" */
3091 SHIFTED_MUL, /* bare "mul #n" */
3092 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3093 SHIFTED_MUL_VL, /* "mul vl" */
3094 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3095 };
3096
3097 /* Parse a <shift> operator on an AArch64 data processing instruction.
3098 Return TRUE on success; otherwise return FALSE. */
3099 static bfd_boolean
3100 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3101 {
3102 const struct aarch64_name_value_pair *shift_op;
3103 enum aarch64_modifier_kind kind;
3104 expressionS exp;
3105 int exp_has_prefix;
3106 char *s = *str;
3107 char *p = s;
3108
3109 for (p = *str; ISALPHA (*p); p++)
3110 ;
3111
3112 if (p == *str)
3113 {
3114 set_syntax_error (_("shift expression expected"));
3115 return FALSE;
3116 }
3117
3118 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3119
3120 if (shift_op == NULL)
3121 {
3122 set_syntax_error (_("shift operator expected"));
3123 return FALSE;
3124 }
3125
3126 kind = aarch64_get_operand_modifier (shift_op);
3127
3128 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3129 {
3130 set_syntax_error (_("invalid use of 'MSL'"));
3131 return FALSE;
3132 }
3133
3134 if (kind == AARCH64_MOD_MUL
3135 && mode != SHIFTED_MUL
3136 && mode != SHIFTED_MUL_VL)
3137 {
3138 set_syntax_error (_("invalid use of 'MUL'"));
3139 return FALSE;
3140 }
3141
3142 switch (mode)
3143 {
3144 case SHIFTED_LOGIC_IMM:
3145 if (aarch64_extend_operator_p (kind))
3146 {
3147 set_syntax_error (_("extending shift is not permitted"));
3148 return FALSE;
3149 }
3150 break;
3151
3152 case SHIFTED_ARITH_IMM:
3153 if (kind == AARCH64_MOD_ROR)
3154 {
3155 set_syntax_error (_("'ROR' shift is not permitted"));
3156 return FALSE;
3157 }
3158 break;
3159
3160 case SHIFTED_LSL:
3161 if (kind != AARCH64_MOD_LSL)
3162 {
3163 set_syntax_error (_("only 'LSL' shift is permitted"));
3164 return FALSE;
3165 }
3166 break;
3167
3168 case SHIFTED_MUL:
3169 if (kind != AARCH64_MOD_MUL)
3170 {
3171 set_syntax_error (_("only 'MUL' is permitted"));
3172 return FALSE;
3173 }
3174 break;
3175
3176 case SHIFTED_MUL_VL:
3177 /* "MUL VL" consists of two separate tokens. Require the first
3178 token to be "MUL" and look for a following "VL". */
3179 if (kind == AARCH64_MOD_MUL)
3180 {
3181 skip_whitespace (p);
3182 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3183 {
3184 p += 2;
3185 kind = AARCH64_MOD_MUL_VL;
3186 break;
3187 }
3188 }
3189 set_syntax_error (_("only 'MUL VL' is permitted"));
3190 return FALSE;
3191
3192 case SHIFTED_REG_OFFSET:
3193 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3194 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3195 {
3196 set_fatal_syntax_error
3197 (_("invalid shift for the register offset addressing mode"));
3198 return FALSE;
3199 }
3200 break;
3201
3202 case SHIFTED_LSL_MSL:
3203 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3204 {
3205 set_syntax_error (_("invalid shift operator"));
3206 return FALSE;
3207 }
3208 break;
3209
3210 default:
3211 abort ();
3212 }
3213
3214 /* Whitespace can appear here if the next thing is a bare digit. */
3215 skip_whitespace (p);
3216
3217 /* Parse shift amount. */
3218 exp_has_prefix = 0;
3219 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3220 exp.X_op = O_absent;
3221 else
3222 {
3223 if (is_immediate_prefix (*p))
3224 {
3225 p++;
3226 exp_has_prefix = 1;
3227 }
3228 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3229 }
3230 if (kind == AARCH64_MOD_MUL_VL)
3231 /* For consistency, give MUL VL the same shift amount as an implicit
3232 MUL #1. */
3233 operand->shifter.amount = 1;
3234 else if (exp.X_op == O_absent)
3235 {
3236 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3237 {
3238 set_syntax_error (_("missing shift amount"));
3239 return FALSE;
3240 }
3241 operand->shifter.amount = 0;
3242 }
3243 else if (exp.X_op != O_constant)
3244 {
3245 set_syntax_error (_("constant shift amount required"));
3246 return FALSE;
3247 }
3248 /* For parsing purposes, MUL #n has no inherent range. The range
3249 depends on the operand and will be checked by operand-specific
3250 routines. */
3251 else if (kind != AARCH64_MOD_MUL
3252 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3253 {
3254 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3255 return FALSE;
3256 }
3257 else
3258 {
3259 operand->shifter.amount = exp.X_add_number;
3260 operand->shifter.amount_present = 1;
3261 }
3262
3263 operand->shifter.operator_present = 1;
3264 operand->shifter.kind = kind;
3265
3266 *str = p;
3267 return TRUE;
3268 }
3269
3270 /* Parse a <shifter_operand> for a data processing instruction:
3271
3272 #<immediate>
3273 #<immediate>, LSL #imm
3274
3275 Validation of immediate operands is deferred to md_apply_fix.
3276
3277 Return TRUE on success; otherwise return FALSE. */
3278
3279 static bfd_boolean
3280 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3281 enum parse_shift_mode mode)
3282 {
3283 char *p;
3284
3285 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3286 return FALSE;
3287
3288 p = *str;
3289
3290 /* Accept an immediate expression. */
3291 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3292 return FALSE;
3293
3294 /* Accept optional LSL for arithmetic immediate values. */
3295 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3296 if (! parse_shift (&p, operand, SHIFTED_LSL))
3297 return FALSE;
3298
3299 /* Not accept any shifter for logical immediate values. */
3300 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3301 && parse_shift (&p, operand, mode))
3302 {
3303 set_syntax_error (_("unexpected shift operator"));
3304 return FALSE;
3305 }
3306
3307 *str = p;
3308 return TRUE;
3309 }
3310
3311 /* Parse a <shifter_operand> for a data processing instruction:
3312
3313 <Rm>
3314 <Rm>, <shift>
3315 #<immediate>
3316 #<immediate>, LSL #imm
3317
3318 where <shift> is handled by parse_shift above, and the last two
3319 cases are handled by the function above.
3320
3321 Validation of immediate operands is deferred to md_apply_fix.
3322
3323 Return TRUE on success; otherwise return FALSE. */
3324
3325 static bfd_boolean
3326 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3327 enum parse_shift_mode mode)
3328 {
3329 const reg_entry *reg;
3330 aarch64_opnd_qualifier_t qualifier;
3331 enum aarch64_operand_class opd_class
3332 = aarch64_get_operand_class (operand->type);
3333
3334 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3335 if (reg)
3336 {
3337 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3338 {
3339 set_syntax_error (_("unexpected register in the immediate operand"));
3340 return FALSE;
3341 }
3342
3343 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3344 {
3345 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3346 return FALSE;
3347 }
3348
3349 operand->reg.regno = reg->number;
3350 operand->qualifier = qualifier;
3351
3352 /* Accept optional shift operation on register. */
3353 if (! skip_past_comma (str))
3354 return TRUE;
3355
3356 if (! parse_shift (str, operand, mode))
3357 return FALSE;
3358
3359 return TRUE;
3360 }
3361 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3362 {
3363 set_syntax_error
3364 (_("integer register expected in the extended/shifted operand "
3365 "register"));
3366 return FALSE;
3367 }
3368
3369 /* We have a shifted immediate variable. */
3370 return parse_shifter_operand_imm (str, operand, mode);
3371 }
3372
3373 /* Return TRUE on success; return FALSE otherwise. */
3374
3375 static bfd_boolean
3376 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3377 enum parse_shift_mode mode)
3378 {
3379 char *p = *str;
3380
3381 /* Determine if we have the sequence of characters #: or just :
3382 coming next. If we do, then we check for a :rello: relocation
3383 modifier. If we don't, punt the whole lot to
3384 parse_shifter_operand. */
3385
3386 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3387 {
3388 struct reloc_table_entry *entry;
3389
3390 if (p[0] == '#')
3391 p += 2;
3392 else
3393 p++;
3394 *str = p;
3395
3396 /* Try to parse a relocation. Anything else is an error. */
3397 if (!(entry = find_reloc_table_entry (str)))
3398 {
3399 set_syntax_error (_("unknown relocation modifier"));
3400 return FALSE;
3401 }
3402
3403 if (entry->add_type == 0)
3404 {
3405 set_syntax_error
3406 (_("this relocation modifier is not allowed on this instruction"));
3407 return FALSE;
3408 }
3409
3410 /* Save str before we decompose it. */
3411 p = *str;
3412
3413 /* Next, we parse the expression. */
3414 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3415 return FALSE;
3416
3417 /* Record the relocation type (use the ADD variant here). */
3418 inst.reloc.type = entry->add_type;
3419 inst.reloc.pc_rel = entry->pc_rel;
3420
3421 /* If str is empty, we've reached the end, stop here. */
3422 if (**str == '\0')
3423 return TRUE;
3424
3425 /* Otherwise, we have a shifted reloc modifier, so rewind to
3426 recover the variable name and continue parsing for the shifter. */
3427 *str = p;
3428 return parse_shifter_operand_imm (str, operand, mode);
3429 }
3430
3431 return parse_shifter_operand (str, operand, mode);
3432 }
3433
3434 /* Parse all forms of an address expression. Information is written
3435 to *OPERAND and/or inst.reloc.
3436
3437 The A64 instruction set has the following addressing modes:
3438
3439 Offset
3440 [base] // in SIMD ld/st structure
3441 [base{,#0}] // in ld/st exclusive
3442 [base{,#imm}]
3443 [base,Xm{,LSL #imm}]
3444 [base,Xm,SXTX {#imm}]
3445 [base,Wm,(S|U)XTW {#imm}]
3446 Pre-indexed
3447 [base]! // in ldraa/ldrab exclusive
3448 [base,#imm]!
3449 Post-indexed
3450 [base],#imm
3451 [base],Xm // in SIMD ld/st structure
3452 PC-relative (literal)
3453 label
3454 SVE:
3455 [base,#imm,MUL VL]
3456 [base,Zm.D{,LSL #imm}]
3457 [base,Zm.S,(S|U)XTW {#imm}]
3458 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3459 [Zn.S,#imm]
3460 [Zn.D,#imm]
3461 [Zn.S{, Xm}]
3462 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3463 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3464 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3465
3466 (As a convenience, the notation "=immediate" is permitted in conjunction
3467 with the pc-relative literal load instructions to automatically place an
3468 immediate value or symbolic address in a nearby literal pool and generate
3469 a hidden label which references it.)
3470
3471 Upon a successful parsing, the address structure in *OPERAND will be
3472 filled in the following way:
3473
3474 .base_regno = <base>
3475 .offset.is_reg // 1 if the offset is a register
3476 .offset.imm = <imm>
3477 .offset.regno = <Rm>
3478
3479 For different addressing modes defined in the A64 ISA:
3480
3481 Offset
3482 .pcrel=0; .preind=1; .postind=0; .writeback=0
3483 Pre-indexed
3484 .pcrel=0; .preind=1; .postind=0; .writeback=1
3485 Post-indexed
3486 .pcrel=0; .preind=0; .postind=1; .writeback=1
3487 PC-relative (literal)
3488 .pcrel=1; .preind=1; .postind=0; .writeback=0
3489
3490 The shift/extension information, if any, will be stored in .shifter.
3491 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3492 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3493 corresponding register.
3494
3495 BASE_TYPE says which types of base register should be accepted and
3496 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3497 is the type of shifter that is allowed for immediate offsets,
3498 or SHIFTED_NONE if none.
3499
3500 In all other respects, it is the caller's responsibility to check
3501 for addressing modes not supported by the instruction, and to set
3502 inst.reloc.type. */
3503
3504 static bfd_boolean
3505 parse_address_main (char **str, aarch64_opnd_info *operand,
3506 aarch64_opnd_qualifier_t *base_qualifier,
3507 aarch64_opnd_qualifier_t *offset_qualifier,
3508 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3509 enum parse_shift_mode imm_shift_mode)
3510 {
3511 char *p = *str;
3512 const reg_entry *reg;
3513 expressionS *exp = &inst.reloc.exp;
3514
3515 *base_qualifier = AARCH64_OPND_QLF_NIL;
3516 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3517 if (! skip_past_char (&p, '['))
3518 {
3519 /* =immediate or label. */
3520 operand->addr.pcrel = 1;
3521 operand->addr.preind = 1;
3522
3523 /* #:<reloc_op>:<symbol> */
3524 skip_past_char (&p, '#');
3525 if (skip_past_char (&p, ':'))
3526 {
3527 bfd_reloc_code_real_type ty;
3528 struct reloc_table_entry *entry;
3529
3530 /* Try to parse a relocation modifier. Anything else is
3531 an error. */
3532 entry = find_reloc_table_entry (&p);
3533 if (! entry)
3534 {
3535 set_syntax_error (_("unknown relocation modifier"));
3536 return FALSE;
3537 }
3538
3539 switch (operand->type)
3540 {
3541 case AARCH64_OPND_ADDR_PCREL21:
3542 /* adr */
3543 ty = entry->adr_type;
3544 break;
3545
3546 default:
3547 ty = entry->ld_literal_type;
3548 break;
3549 }
3550
3551 if (ty == 0)
3552 {
3553 set_syntax_error
3554 (_("this relocation modifier is not allowed on this "
3555 "instruction"));
3556 return FALSE;
3557 }
3558
3559 /* #:<reloc_op>: */
3560 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3561 {
3562 set_syntax_error (_("invalid relocation expression"));
3563 return FALSE;
3564 }
3565
3566 /* #:<reloc_op>:<expr> */
3567 /* Record the relocation type. */
3568 inst.reloc.type = ty;
3569 inst.reloc.pc_rel = entry->pc_rel;
3570 }
3571 else
3572 {
3573
3574 if (skip_past_char (&p, '='))
3575 /* =immediate; need to generate the literal in the literal pool. */
3576 inst.gen_lit_pool = 1;
3577
3578 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3579 {
3580 set_syntax_error (_("invalid address"));
3581 return FALSE;
3582 }
3583 }
3584
3585 *str = p;
3586 return TRUE;
3587 }
3588
3589 /* [ */
3590
3591 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3592 if (!reg || !aarch64_check_reg_type (reg, base_type))
3593 {
3594 set_syntax_error (_(get_reg_expected_msg (base_type)));
3595 return FALSE;
3596 }
3597 operand->addr.base_regno = reg->number;
3598
3599 /* [Xn */
3600 if (skip_past_comma (&p))
3601 {
3602 /* [Xn, */
3603 operand->addr.preind = 1;
3604
3605 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3606 if (reg)
3607 {
3608 if (!aarch64_check_reg_type (reg, offset_type))
3609 {
3610 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3611 return FALSE;
3612 }
3613
3614 /* [Xn,Rm */
3615 operand->addr.offset.regno = reg->number;
3616 operand->addr.offset.is_reg = 1;
3617 /* Shifted index. */
3618 if (skip_past_comma (&p))
3619 {
3620 /* [Xn,Rm, */
3621 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3622 /* Use the diagnostics set in parse_shift, so not set new
3623 error message here. */
3624 return FALSE;
3625 }
3626 /* We only accept:
3627 [base,Xm] # For vector plus scalar SVE2 indexing.
3628 [base,Xm{,LSL #imm}]
3629 [base,Xm,SXTX {#imm}]
3630 [base,Wm,(S|U)XTW {#imm}] */
3631 if (operand->shifter.kind == AARCH64_MOD_NONE
3632 || operand->shifter.kind == AARCH64_MOD_LSL
3633 || operand->shifter.kind == AARCH64_MOD_SXTX)
3634 {
3635 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3636 {
3637 set_syntax_error (_("invalid use of 32-bit register offset"));
3638 return FALSE;
3639 }
3640 if (aarch64_get_qualifier_esize (*base_qualifier)
3641 != aarch64_get_qualifier_esize (*offset_qualifier)
3642 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3643 || *base_qualifier != AARCH64_OPND_QLF_S_S
3644 || *offset_qualifier != AARCH64_OPND_QLF_X))
3645 {
3646 set_syntax_error (_("offset has different size from base"));
3647 return FALSE;
3648 }
3649 }
3650 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3651 {
3652 set_syntax_error (_("invalid use of 64-bit register offset"));
3653 return FALSE;
3654 }
3655 }
3656 else
3657 {
3658 /* [Xn,#:<reloc_op>:<symbol> */
3659 skip_past_char (&p, '#');
3660 if (skip_past_char (&p, ':'))
3661 {
3662 struct reloc_table_entry *entry;
3663
3664 /* Try to parse a relocation modifier. Anything else is
3665 an error. */
3666 if (!(entry = find_reloc_table_entry (&p)))
3667 {
3668 set_syntax_error (_("unknown relocation modifier"));
3669 return FALSE;
3670 }
3671
3672 if (entry->ldst_type == 0)
3673 {
3674 set_syntax_error
3675 (_("this relocation modifier is not allowed on this "
3676 "instruction"));
3677 return FALSE;
3678 }
3679
3680 /* [Xn,#:<reloc_op>: */
3681 /* We now have the group relocation table entry corresponding to
3682 the name in the assembler source. Next, we parse the
3683 expression. */
3684 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3685 {
3686 set_syntax_error (_("invalid relocation expression"));
3687 return FALSE;
3688 }
3689
3690 /* [Xn,#:<reloc_op>:<expr> */
3691 /* Record the load/store relocation type. */
3692 inst.reloc.type = entry->ldst_type;
3693 inst.reloc.pc_rel = entry->pc_rel;
3694 }
3695 else
3696 {
3697 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3698 {
3699 set_syntax_error (_("invalid expression in the address"));
3700 return FALSE;
3701 }
3702 /* [Xn,<expr> */
3703 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3704 /* [Xn,<expr>,<shifter> */
3705 if (! parse_shift (&p, operand, imm_shift_mode))
3706 return FALSE;
3707 }
3708 }
3709 }
3710
3711 if (! skip_past_char (&p, ']'))
3712 {
3713 set_syntax_error (_("']' expected"));
3714 return FALSE;
3715 }
3716
3717 if (skip_past_char (&p, '!'))
3718 {
3719 if (operand->addr.preind && operand->addr.offset.is_reg)
3720 {
3721 set_syntax_error (_("register offset not allowed in pre-indexed "
3722 "addressing mode"));
3723 return FALSE;
3724 }
3725 /* [Xn]! */
3726 operand->addr.writeback = 1;
3727 }
3728 else if (skip_past_comma (&p))
3729 {
3730 /* [Xn], */
3731 operand->addr.postind = 1;
3732 operand->addr.writeback = 1;
3733
3734 if (operand->addr.preind)
3735 {
3736 set_syntax_error (_("cannot combine pre- and post-indexing"));
3737 return FALSE;
3738 }
3739
3740 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3741 if (reg)
3742 {
3743 /* [Xn],Xm */
3744 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3745 {
3746 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3747 return FALSE;
3748 }
3749
3750 operand->addr.offset.regno = reg->number;
3751 operand->addr.offset.is_reg = 1;
3752 }
3753 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3754 {
3755 /* [Xn],#expr */
3756 set_syntax_error (_("invalid expression in the address"));
3757 return FALSE;
3758 }
3759 }
3760
3761 /* If at this point neither .preind nor .postind is set, we have a
3762 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3763 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3764 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3765 [Zn.<T>, xzr]. */
3766 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3767 {
3768 if (operand->addr.writeback)
3769 {
3770 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3771 {
3772 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3773 operand->addr.offset.is_reg = 0;
3774 operand->addr.offset.imm = 0;
3775 operand->addr.preind = 1;
3776 }
3777 else
3778 {
3779 /* Reject [Rn]! */
3780 set_syntax_error (_("missing offset in the pre-indexed address"));
3781 return FALSE;
3782 }
3783 }
3784 else
3785 {
3786 operand->addr.preind = 1;
3787 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3788 {
3789 operand->addr.offset.is_reg = 1;
3790 operand->addr.offset.regno = REG_ZR;
3791 *offset_qualifier = AARCH64_OPND_QLF_X;
3792 }
3793 else
3794 {
3795 inst.reloc.exp.X_op = O_constant;
3796 inst.reloc.exp.X_add_number = 0;
3797 }
3798 }
3799 }
3800
3801 *str = p;
3802 return TRUE;
3803 }
3804
3805 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3806 on success. */
3807 static bfd_boolean
3808 parse_address (char **str, aarch64_opnd_info *operand)
3809 {
3810 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3811 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3812 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3813 }
3814
3815 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3816 The arguments have the same meaning as for parse_address_main.
3817 Return TRUE on success. */
3818 static bfd_boolean
3819 parse_sve_address (char **str, aarch64_opnd_info *operand,
3820 aarch64_opnd_qualifier_t *base_qualifier,
3821 aarch64_opnd_qualifier_t *offset_qualifier)
3822 {
3823 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3824 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3825 SHIFTED_MUL_VL);
3826 }
3827
3828 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3829 Return TRUE on success; otherwise return FALSE. */
3830 static bfd_boolean
3831 parse_half (char **str, int *internal_fixup_p)
3832 {
3833 char *p = *str;
3834
3835 skip_past_char (&p, '#');
3836
3837 gas_assert (internal_fixup_p);
3838 *internal_fixup_p = 0;
3839
3840 if (*p == ':')
3841 {
3842 struct reloc_table_entry *entry;
3843
3844 /* Try to parse a relocation. Anything else is an error. */
3845 ++p;
3846 if (!(entry = find_reloc_table_entry (&p)))
3847 {
3848 set_syntax_error (_("unknown relocation modifier"));
3849 return FALSE;
3850 }
3851
3852 if (entry->movw_type == 0)
3853 {
3854 set_syntax_error
3855 (_("this relocation modifier is not allowed on this instruction"));
3856 return FALSE;
3857 }
3858
3859 inst.reloc.type = entry->movw_type;
3860 }
3861 else
3862 *internal_fixup_p = 1;
3863
3864 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3865 return FALSE;
3866
3867 *str = p;
3868 return TRUE;
3869 }
3870
3871 /* Parse an operand for an ADRP instruction:
3872 ADRP <Xd>, <label>
3873 Return TRUE on success; otherwise return FALSE. */
3874
3875 static bfd_boolean
3876 parse_adrp (char **str)
3877 {
3878 char *p;
3879
3880 p = *str;
3881 if (*p == ':')
3882 {
3883 struct reloc_table_entry *entry;
3884
3885 /* Try to parse a relocation. Anything else is an error. */
3886 ++p;
3887 if (!(entry = find_reloc_table_entry (&p)))
3888 {
3889 set_syntax_error (_("unknown relocation modifier"));
3890 return FALSE;
3891 }
3892
3893 if (entry->adrp_type == 0)
3894 {
3895 set_syntax_error
3896 (_("this relocation modifier is not allowed on this instruction"));
3897 return FALSE;
3898 }
3899
3900 inst.reloc.type = entry->adrp_type;
3901 }
3902 else
3903 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3904
3905 inst.reloc.pc_rel = 1;
3906
3907 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3908 return FALSE;
3909
3910 *str = p;
3911 return TRUE;
3912 }
3913
3914 /* Miscellaneous. */
3915
3916 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3917 of SIZE tokens in which index I gives the token for field value I,
3918 or is null if field value I is invalid. REG_TYPE says which register
3919 names should be treated as registers rather than as symbolic immediates.
3920
3921 Return true on success, moving *STR past the operand and storing the
3922 field value in *VAL. */
3923
3924 static int
3925 parse_enum_string (char **str, int64_t *val, const char *const *array,
3926 size_t size, aarch64_reg_type reg_type)
3927 {
3928 expressionS exp;
3929 char *p, *q;
3930 size_t i;
3931
3932 /* Match C-like tokens. */
3933 p = q = *str;
3934 while (ISALNUM (*q))
3935 q++;
3936
3937 for (i = 0; i < size; ++i)
3938 if (array[i]
3939 && strncasecmp (array[i], p, q - p) == 0
3940 && array[i][q - p] == 0)
3941 {
3942 *val = i;
3943 *str = q;
3944 return TRUE;
3945 }
3946
3947 if (!parse_immediate_expression (&p, &exp, reg_type))
3948 return FALSE;
3949
3950 if (exp.X_op == O_constant
3951 && (uint64_t) exp.X_add_number < size)
3952 {
3953 *val = exp.X_add_number;
3954 *str = p;
3955 return TRUE;
3956 }
3957
3958 /* Use the default error for this operand. */
3959 return FALSE;
3960 }
3961
3962 /* Parse an option for a preload instruction. Returns the encoding for the
3963 option, or PARSE_FAIL. */
3964
3965 static int
3966 parse_pldop (char **str)
3967 {
3968 char *p, *q;
3969 const struct aarch64_name_value_pair *o;
3970
3971 p = q = *str;
3972 while (ISALNUM (*q))
3973 q++;
3974
3975 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
3976 if (!o)
3977 return PARSE_FAIL;
3978
3979 *str = q;
3980 return o->value;
3981 }
3982
3983 /* Parse an option for a barrier instruction. Returns the encoding for the
3984 option, or PARSE_FAIL. */
3985
3986 static int
3987 parse_barrier (char **str)
3988 {
3989 char *p, *q;
3990 const struct aarch64_name_value_pair *o;
3991
3992 p = q = *str;
3993 while (ISALPHA (*q))
3994 q++;
3995
3996 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3997 if (!o)
3998 return PARSE_FAIL;
3999
4000 *str = q;
4001 return o->value;
4002 }
4003
4004 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4005 return 0 if successful. Otherwise return PARSE_FAIL. */
4006
4007 static int
4008 parse_barrier_psb (char **str,
4009 const struct aarch64_name_value_pair ** hint_opt)
4010 {
4011 char *p, *q;
4012 const struct aarch64_name_value_pair *o;
4013
4014 p = q = *str;
4015 while (ISALPHA (*q))
4016 q++;
4017
4018 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4019 if (!o)
4020 {
4021 set_fatal_syntax_error
4022 ( _("unknown or missing option to PSB/TSB"));
4023 return PARSE_FAIL;
4024 }
4025
4026 if (o->value != 0x11)
4027 {
4028 /* PSB only accepts option name 'CSYNC'. */
4029 set_syntax_error
4030 (_("the specified option is not accepted for PSB/TSB"));
4031 return PARSE_FAIL;
4032 }
4033
4034 *str = q;
4035 *hint_opt = o;
4036 return 0;
4037 }
4038
4039 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4040 return 0 if successful. Otherwise return PARSE_FAIL. */
4041
4042 static int
4043 parse_bti_operand (char **str,
4044 const struct aarch64_name_value_pair ** hint_opt)
4045 {
4046 char *p, *q;
4047 const struct aarch64_name_value_pair *o;
4048
4049 p = q = *str;
4050 while (ISALPHA (*q))
4051 q++;
4052
4053 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4054 if (!o)
4055 {
4056 set_fatal_syntax_error
4057 ( _("unknown option to BTI"));
4058 return PARSE_FAIL;
4059 }
4060
4061 switch (o->value)
4062 {
4063 /* Valid BTI operands. */
4064 case HINT_OPD_C:
4065 case HINT_OPD_J:
4066 case HINT_OPD_JC:
4067 break;
4068
4069 default:
4070 set_syntax_error
4071 (_("unknown option to BTI"));
4072 return PARSE_FAIL;
4073 }
4074
4075 *str = q;
4076 *hint_opt = o;
4077 return 0;
4078 }
4079
4080 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4081 Returns the encoding for the option, or PARSE_FAIL.
4082
4083 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4084 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4085
4086 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4087 field, otherwise as a system register.
4088 */
4089
4090 static int
4091 parse_sys_reg (char **str, htab_t sys_regs,
4092 int imple_defined_p, int pstatefield_p,
4093 uint32_t* flags)
4094 {
4095 char *p, *q;
4096 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4097 const aarch64_sys_reg *o;
4098 int value;
4099
4100 p = buf;
4101 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4102 if (p < buf + (sizeof (buf) - 1))
4103 *p++ = TOLOWER (*q);
4104 *p = '\0';
4105
4106 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4107 valid system register. This is enforced by construction of the hash
4108 table. */
4109 if (p - buf != q - *str)
4110 return PARSE_FAIL;
4111
4112 o = str_hash_find (sys_regs, buf);
4113 if (!o)
4114 {
4115 if (!imple_defined_p)
4116 return PARSE_FAIL;
4117 else
4118 {
4119 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4120 unsigned int op0, op1, cn, cm, op2;
4121
4122 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4123 != 5)
4124 return PARSE_FAIL;
4125 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4126 return PARSE_FAIL;
4127 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4128 if (flags)
4129 *flags = 0;
4130 }
4131 }
4132 else
4133 {
4134 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4135 as_bad (_("selected processor does not support PSTATE field "
4136 "name '%s'"), buf);
4137 if (!pstatefield_p
4138 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4139 o->value, o->flags, o->features))
4140 as_bad (_("selected processor does not support system register "
4141 "name '%s'"), buf);
4142 if (aarch64_sys_reg_deprecated_p (o->flags))
4143 as_warn (_("system register name '%s' is deprecated and may be "
4144 "removed in a future release"), buf);
4145 value = o->value;
4146 if (flags)
4147 *flags = o->flags;
4148 }
4149
4150 *str = q;
4151 return value;
4152 }
4153
4154 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4155 for the option, or NULL. */
4156
4157 static const aarch64_sys_ins_reg *
4158 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4159 {
4160 char *p, *q;
4161 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4162 const aarch64_sys_ins_reg *o;
4163
4164 p = buf;
4165 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4166 if (p < buf + (sizeof (buf) - 1))
4167 *p++ = TOLOWER (*q);
4168 *p = '\0';
4169
4170 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4171 valid system register. This is enforced by construction of the hash
4172 table. */
4173 if (p - buf != q - *str)
4174 return NULL;
4175
4176 o = str_hash_find (sys_ins_regs, buf);
4177 if (!o)
4178 return NULL;
4179
4180 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4181 o->name, o->value, o->flags, 0))
4182 as_bad (_("selected processor does not support system register "
4183 "name '%s'"), buf);
4184 if (aarch64_sys_reg_deprecated_p (o->flags))
4185 as_warn (_("system register name '%s' is deprecated and may be "
4186 "removed in a future release"), buf);
4187
4188 *str = q;
4189 return o;
4190 }
4191 \f
4192 #define po_char_or_fail(chr) do { \
4193 if (! skip_past_char (&str, chr)) \
4194 goto failure; \
4195 } while (0)
4196
4197 #define po_reg_or_fail(regtype) do { \
4198 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4199 if (val == PARSE_FAIL) \
4200 { \
4201 set_default_error (); \
4202 goto failure; \
4203 } \
4204 } while (0)
4205
4206 #define po_int_reg_or_fail(reg_type) do { \
4207 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4208 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4209 { \
4210 set_default_error (); \
4211 goto failure; \
4212 } \
4213 info->reg.regno = reg->number; \
4214 info->qualifier = qualifier; \
4215 } while (0)
4216
4217 #define po_imm_nc_or_fail() do { \
4218 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4219 goto failure; \
4220 } while (0)
4221
4222 #define po_imm_or_fail(min, max) do { \
4223 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4224 goto failure; \
4225 if (val < min || val > max) \
4226 { \
4227 set_fatal_syntax_error (_("immediate value out of range "\
4228 #min " to "#max)); \
4229 goto failure; \
4230 } \
4231 } while (0)
4232
4233 #define po_enum_or_fail(array) do { \
4234 if (!parse_enum_string (&str, &val, array, \
4235 ARRAY_SIZE (array), imm_reg_type)) \
4236 goto failure; \
4237 } while (0)
4238
4239 #define po_misc_or_fail(expr) do { \
4240 if (!expr) \
4241 goto failure; \
4242 } while (0)
4243 \f
4244 /* encode the 12-bit imm field of Add/sub immediate */
4245 static inline uint32_t
4246 encode_addsub_imm (uint32_t imm)
4247 {
4248 return imm << 10;
4249 }
4250
4251 /* encode the shift amount field of Add/sub immediate */
4252 static inline uint32_t
4253 encode_addsub_imm_shift_amount (uint32_t cnt)
4254 {
4255 return cnt << 22;
4256 }
4257
4258
4259 /* encode the imm field of Adr instruction */
4260 static inline uint32_t
4261 encode_adr_imm (uint32_t imm)
4262 {
4263 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4264 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4265 }
4266
4267 /* encode the immediate field of Move wide immediate */
4268 static inline uint32_t
4269 encode_movw_imm (uint32_t imm)
4270 {
4271 return imm << 5;
4272 }
4273
4274 /* encode the 26-bit offset of unconditional branch */
4275 static inline uint32_t
4276 encode_branch_ofs_26 (uint32_t ofs)
4277 {
4278 return ofs & ((1 << 26) - 1);
4279 }
4280
4281 /* encode the 19-bit offset of conditional branch and compare & branch */
4282 static inline uint32_t
4283 encode_cond_branch_ofs_19 (uint32_t ofs)
4284 {
4285 return (ofs & ((1 << 19) - 1)) << 5;
4286 }
4287
4288 /* encode the 19-bit offset of ld literal */
4289 static inline uint32_t
4290 encode_ld_lit_ofs_19 (uint32_t ofs)
4291 {
4292 return (ofs & ((1 << 19) - 1)) << 5;
4293 }
4294
4295 /* Encode the 14-bit offset of test & branch. */
4296 static inline uint32_t
4297 encode_tst_branch_ofs_14 (uint32_t ofs)
4298 {
4299 return (ofs & ((1 << 14) - 1)) << 5;
4300 }
4301
4302 /* Encode the 16-bit imm field of svc/hvc/smc. */
4303 static inline uint32_t
4304 encode_svc_imm (uint32_t imm)
4305 {
4306 return imm << 5;
4307 }
4308
4309 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4310 static inline uint32_t
4311 reencode_addsub_switch_add_sub (uint32_t opcode)
4312 {
4313 return opcode ^ (1 << 30);
4314 }
4315
4316 static inline uint32_t
4317 reencode_movzn_to_movz (uint32_t opcode)
4318 {
4319 return opcode | (1 << 30);
4320 }
4321
4322 static inline uint32_t
4323 reencode_movzn_to_movn (uint32_t opcode)
4324 {
4325 return opcode & ~(1 << 30);
4326 }
4327
4328 /* Overall per-instruction processing. */
4329
4330 /* We need to be able to fix up arbitrary expressions in some statements.
4331 This is so that we can handle symbols that are an arbitrary distance from
4332 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4333 which returns part of an address in a form which will be valid for
4334 a data instruction. We do this by pushing the expression into a symbol
4335 in the expr_section, and creating a fix for that. */
4336
4337 static fixS *
4338 fix_new_aarch64 (fragS * frag,
4339 int where,
4340 short int size,
4341 expressionS * exp,
4342 int pc_rel,
4343 int reloc)
4344 {
4345 fixS *new_fix;
4346
4347 switch (exp->X_op)
4348 {
4349 case O_constant:
4350 case O_symbol:
4351 case O_add:
4352 case O_subtract:
4353 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4354 break;
4355
4356 default:
4357 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4358 pc_rel, reloc);
4359 break;
4360 }
4361 return new_fix;
4362 }
4363 \f
4364 /* Diagnostics on operands errors. */
4365
4366 /* By default, output verbose error message.
4367 Disable the verbose error message by -mno-verbose-error. */
4368 static int verbose_error_p = 1;
4369
4370 #ifdef DEBUG_AARCH64
4371 /* N.B. this is only for the purpose of debugging. */
4372 const char* operand_mismatch_kind_names[] =
4373 {
4374 "AARCH64_OPDE_NIL",
4375 "AARCH64_OPDE_RECOVERABLE",
4376 "AARCH64_OPDE_SYNTAX_ERROR",
4377 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4378 "AARCH64_OPDE_INVALID_VARIANT",
4379 "AARCH64_OPDE_OUT_OF_RANGE",
4380 "AARCH64_OPDE_UNALIGNED",
4381 "AARCH64_OPDE_REG_LIST",
4382 "AARCH64_OPDE_OTHER_ERROR",
4383 };
4384 #endif /* DEBUG_AARCH64 */
4385
4386 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4387
4388 When multiple errors of different kinds are found in the same assembly
4389 line, only the error of the highest severity will be picked up for
4390 issuing the diagnostics. */
4391
4392 static inline bfd_boolean
4393 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4394 enum aarch64_operand_error_kind rhs)
4395 {
4396 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4397 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4398 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4399 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4400 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4401 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4402 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4403 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4404 return lhs > rhs;
4405 }
4406
4407 /* Helper routine to get the mnemonic name from the assembly instruction
4408 line; should only be called for the diagnosis purpose, as there is
4409 string copy operation involved, which may affect the runtime
4410 performance if used in elsewhere. */
4411
4412 static const char*
4413 get_mnemonic_name (const char *str)
4414 {
4415 static char mnemonic[32];
4416 char *ptr;
4417
4418 /* Get the first 15 bytes and assume that the full name is included. */
4419 strncpy (mnemonic, str, 31);
4420 mnemonic[31] = '\0';
4421
4422 /* Scan up to the end of the mnemonic, which must end in white space,
4423 '.', or end of string. */
4424 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4425 ;
4426
4427 *ptr = '\0';
4428
4429 /* Append '...' to the truncated long name. */
4430 if (ptr - mnemonic == 31)
4431 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4432
4433 return mnemonic;
4434 }
4435
4436 static void
4437 reset_aarch64_instruction (aarch64_instruction *instruction)
4438 {
4439 memset (instruction, '\0', sizeof (aarch64_instruction));
4440 instruction->reloc.type = BFD_RELOC_UNUSED;
4441 }
4442
4443 /* Data structures storing one user error in the assembly code related to
4444 operands. */
4445
4446 struct operand_error_record
4447 {
4448 const aarch64_opcode *opcode;
4449 aarch64_operand_error detail;
4450 struct operand_error_record *next;
4451 };
4452
4453 typedef struct operand_error_record operand_error_record;
4454
4455 struct operand_errors
4456 {
4457 operand_error_record *head;
4458 operand_error_record *tail;
4459 };
4460
4461 typedef struct operand_errors operand_errors;
4462
4463 /* Top-level data structure reporting user errors for the current line of
4464 the assembly code.
4465 The way md_assemble works is that all opcodes sharing the same mnemonic
4466 name are iterated to find a match to the assembly line. In this data
4467 structure, each of the such opcodes will have one operand_error_record
4468 allocated and inserted. In other words, excessive errors related with
4469 a single opcode are disregarded. */
4470 operand_errors operand_error_report;
4471
4472 /* Free record nodes. */
4473 static operand_error_record *free_opnd_error_record_nodes = NULL;
4474
4475 /* Initialize the data structure that stores the operand mismatch
4476 information on assembling one line of the assembly code. */
4477 static void
4478 init_operand_error_report (void)
4479 {
4480 if (operand_error_report.head != NULL)
4481 {
4482 gas_assert (operand_error_report.tail != NULL);
4483 operand_error_report.tail->next = free_opnd_error_record_nodes;
4484 free_opnd_error_record_nodes = operand_error_report.head;
4485 operand_error_report.head = NULL;
4486 operand_error_report.tail = NULL;
4487 return;
4488 }
4489 gas_assert (operand_error_report.tail == NULL);
4490 }
4491
4492 /* Return TRUE if some operand error has been recorded during the
4493 parsing of the current assembly line using the opcode *OPCODE;
4494 otherwise return FALSE. */
4495 static inline bfd_boolean
4496 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4497 {
4498 operand_error_record *record = operand_error_report.head;
4499 return record && record->opcode == opcode;
4500 }
4501
4502 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4503 OPCODE field is initialized with OPCODE.
4504 N.B. only one record for each opcode, i.e. the maximum of one error is
4505 recorded for each instruction template. */
4506
4507 static void
4508 add_operand_error_record (const operand_error_record* new_record)
4509 {
4510 const aarch64_opcode *opcode = new_record->opcode;
4511 operand_error_record* record = operand_error_report.head;
4512
4513 /* The record may have been created for this opcode. If not, we need
4514 to prepare one. */
4515 if (! opcode_has_operand_error_p (opcode))
4516 {
4517 /* Get one empty record. */
4518 if (free_opnd_error_record_nodes == NULL)
4519 {
4520 record = XNEW (operand_error_record);
4521 }
4522 else
4523 {
4524 record = free_opnd_error_record_nodes;
4525 free_opnd_error_record_nodes = record->next;
4526 }
4527 record->opcode = opcode;
4528 /* Insert at the head. */
4529 record->next = operand_error_report.head;
4530 operand_error_report.head = record;
4531 if (operand_error_report.tail == NULL)
4532 operand_error_report.tail = record;
4533 }
4534 else if (record->detail.kind != AARCH64_OPDE_NIL
4535 && record->detail.index <= new_record->detail.index
4536 && operand_error_higher_severity_p (record->detail.kind,
4537 new_record->detail.kind))
4538 {
4539 /* In the case of multiple errors found on operands related with a
4540 single opcode, only record the error of the leftmost operand and
4541 only if the error is of higher severity. */
4542 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4543 " the existing error %s on operand %d",
4544 operand_mismatch_kind_names[new_record->detail.kind],
4545 new_record->detail.index,
4546 operand_mismatch_kind_names[record->detail.kind],
4547 record->detail.index);
4548 return;
4549 }
4550
4551 record->detail = new_record->detail;
4552 }
4553
4554 static inline void
4555 record_operand_error_info (const aarch64_opcode *opcode,
4556 aarch64_operand_error *error_info)
4557 {
4558 operand_error_record record;
4559 record.opcode = opcode;
4560 record.detail = *error_info;
4561 add_operand_error_record (&record);
4562 }
4563
4564 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4565 error message *ERROR, for operand IDX (count from 0). */
4566
4567 static void
4568 record_operand_error (const aarch64_opcode *opcode, int idx,
4569 enum aarch64_operand_error_kind kind,
4570 const char* error)
4571 {
4572 aarch64_operand_error info;
4573 memset(&info, 0, sizeof (info));
4574 info.index = idx;
4575 info.kind = kind;
4576 info.error = error;
4577 info.non_fatal = FALSE;
4578 record_operand_error_info (opcode, &info);
4579 }
4580
4581 static void
4582 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4583 enum aarch64_operand_error_kind kind,
4584 const char* error, const int *extra_data)
4585 {
4586 aarch64_operand_error info;
4587 info.index = idx;
4588 info.kind = kind;
4589 info.error = error;
4590 info.data[0] = extra_data[0];
4591 info.data[1] = extra_data[1];
4592 info.data[2] = extra_data[2];
4593 info.non_fatal = FALSE;
4594 record_operand_error_info (opcode, &info);
4595 }
4596
4597 static void
4598 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4599 const char* error, int lower_bound,
4600 int upper_bound)
4601 {
4602 int data[3] = {lower_bound, upper_bound, 0};
4603 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4604 error, data);
4605 }
4606
4607 /* Remove the operand error record for *OPCODE. */
4608 static void ATTRIBUTE_UNUSED
4609 remove_operand_error_record (const aarch64_opcode *opcode)
4610 {
4611 if (opcode_has_operand_error_p (opcode))
4612 {
4613 operand_error_record* record = operand_error_report.head;
4614 gas_assert (record != NULL && operand_error_report.tail != NULL);
4615 operand_error_report.head = record->next;
4616 record->next = free_opnd_error_record_nodes;
4617 free_opnd_error_record_nodes = record;
4618 if (operand_error_report.head == NULL)
4619 {
4620 gas_assert (operand_error_report.tail == record);
4621 operand_error_report.tail = NULL;
4622 }
4623 }
4624 }
4625
4626 /* Given the instruction in *INSTR, return the index of the best matched
4627 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4628
4629 Return -1 if there is no qualifier sequence; return the first match
4630 if there is multiple matches found. */
4631
4632 static int
4633 find_best_match (const aarch64_inst *instr,
4634 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4635 {
4636 int i, num_opnds, max_num_matched, idx;
4637
4638 num_opnds = aarch64_num_of_operands (instr->opcode);
4639 if (num_opnds == 0)
4640 {
4641 DEBUG_TRACE ("no operand");
4642 return -1;
4643 }
4644
4645 max_num_matched = 0;
4646 idx = 0;
4647
4648 /* For each pattern. */
4649 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4650 {
4651 int j, num_matched;
4652 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4653
4654 /* Most opcodes has much fewer patterns in the list. */
4655 if (empty_qualifier_sequence_p (qualifiers))
4656 {
4657 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4658 break;
4659 }
4660
4661 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4662 if (*qualifiers == instr->operands[j].qualifier)
4663 ++num_matched;
4664
4665 if (num_matched > max_num_matched)
4666 {
4667 max_num_matched = num_matched;
4668 idx = i;
4669 }
4670 }
4671
4672 DEBUG_TRACE ("return with %d", idx);
4673 return idx;
4674 }
4675
4676 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4677 corresponding operands in *INSTR. */
4678
4679 static inline void
4680 assign_qualifier_sequence (aarch64_inst *instr,
4681 const aarch64_opnd_qualifier_t *qualifiers)
4682 {
4683 int i = 0;
4684 int num_opnds = aarch64_num_of_operands (instr->opcode);
4685 gas_assert (num_opnds);
4686 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4687 instr->operands[i].qualifier = *qualifiers;
4688 }
4689
4690 /* Print operands for the diagnosis purpose. */
4691
4692 static void
4693 print_operands (char *buf, const aarch64_opcode *opcode,
4694 const aarch64_opnd_info *opnds)
4695 {
4696 int i;
4697
4698 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4699 {
4700 char str[128];
4701
4702 /* We regard the opcode operand info more, however we also look into
4703 the inst->operands to support the disassembling of the optional
4704 operand.
4705 The two operand code should be the same in all cases, apart from
4706 when the operand can be optional. */
4707 if (opcode->operands[i] == AARCH64_OPND_NIL
4708 || opnds[i].type == AARCH64_OPND_NIL)
4709 break;
4710
4711 /* Generate the operand string in STR. */
4712 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4713 NULL, cpu_variant);
4714
4715 /* Delimiter. */
4716 if (str[0] != '\0')
4717 strcat (buf, i == 0 ? " " : ", ");
4718
4719 /* Append the operand string. */
4720 strcat (buf, str);
4721 }
4722 }
4723
4724 /* Send to stderr a string as information. */
4725
4726 static void
4727 output_info (const char *format, ...)
4728 {
4729 const char *file;
4730 unsigned int line;
4731 va_list args;
4732
4733 file = as_where (&line);
4734 if (file)
4735 {
4736 if (line != 0)
4737 fprintf (stderr, "%s:%u: ", file, line);
4738 else
4739 fprintf (stderr, "%s: ", file);
4740 }
4741 fprintf (stderr, _("Info: "));
4742 va_start (args, format);
4743 vfprintf (stderr, format, args);
4744 va_end (args);
4745 (void) putc ('\n', stderr);
4746 }
4747
4748 /* Output one operand error record. */
4749
4750 static void
4751 output_operand_error_record (const operand_error_record *record, char *str)
4752 {
4753 const aarch64_operand_error *detail = &record->detail;
4754 int idx = detail->index;
4755 const aarch64_opcode *opcode = record->opcode;
4756 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4757 : AARCH64_OPND_NIL);
4758
4759 typedef void (*handler_t)(const char *format, ...);
4760 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4761
4762 switch (detail->kind)
4763 {
4764 case AARCH64_OPDE_NIL:
4765 gas_assert (0);
4766 break;
4767 case AARCH64_OPDE_SYNTAX_ERROR:
4768 case AARCH64_OPDE_RECOVERABLE:
4769 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4770 case AARCH64_OPDE_OTHER_ERROR:
4771 /* Use the prepared error message if there is, otherwise use the
4772 operand description string to describe the error. */
4773 if (detail->error != NULL)
4774 {
4775 if (idx < 0)
4776 handler (_("%s -- `%s'"), detail->error, str);
4777 else
4778 handler (_("%s at operand %d -- `%s'"),
4779 detail->error, idx + 1, str);
4780 }
4781 else
4782 {
4783 gas_assert (idx >= 0);
4784 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4785 aarch64_get_operand_desc (opd_code), str);
4786 }
4787 break;
4788
4789 case AARCH64_OPDE_INVALID_VARIANT:
4790 handler (_("operand mismatch -- `%s'"), str);
4791 if (verbose_error_p)
4792 {
4793 /* We will try to correct the erroneous instruction and also provide
4794 more information e.g. all other valid variants.
4795
4796 The string representation of the corrected instruction and other
4797 valid variants are generated by
4798
4799 1) obtaining the intermediate representation of the erroneous
4800 instruction;
4801 2) manipulating the IR, e.g. replacing the operand qualifier;
4802 3) printing out the instruction by calling the printer functions
4803 shared with the disassembler.
4804
4805 The limitation of this method is that the exact input assembly
4806 line cannot be accurately reproduced in some cases, for example an
4807 optional operand present in the actual assembly line will be
4808 omitted in the output; likewise for the optional syntax rules,
4809 e.g. the # before the immediate. Another limitation is that the
4810 assembly symbols and relocation operations in the assembly line
4811 currently cannot be printed out in the error report. Last but not
4812 least, when there is other error(s) co-exist with this error, the
4813 'corrected' instruction may be still incorrect, e.g. given
4814 'ldnp h0,h1,[x0,#6]!'
4815 this diagnosis will provide the version:
4816 'ldnp s0,s1,[x0,#6]!'
4817 which is still not right. */
4818 size_t len = strlen (get_mnemonic_name (str));
4819 int i, qlf_idx;
4820 bfd_boolean result;
4821 char buf[2048];
4822 aarch64_inst *inst_base = &inst.base;
4823 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4824
4825 /* Init inst. */
4826 reset_aarch64_instruction (&inst);
4827 inst_base->opcode = opcode;
4828
4829 /* Reset the error report so that there is no side effect on the
4830 following operand parsing. */
4831 init_operand_error_report ();
4832
4833 /* Fill inst. */
4834 result = parse_operands (str + len, opcode)
4835 && programmer_friendly_fixup (&inst);
4836 gas_assert (result);
4837 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4838 NULL, NULL, insn_sequence);
4839 gas_assert (!result);
4840
4841 /* Find the most matched qualifier sequence. */
4842 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4843 gas_assert (qlf_idx > -1);
4844
4845 /* Assign the qualifiers. */
4846 assign_qualifier_sequence (inst_base,
4847 opcode->qualifiers_list[qlf_idx]);
4848
4849 /* Print the hint. */
4850 output_info (_(" did you mean this?"));
4851 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4852 print_operands (buf, opcode, inst_base->operands);
4853 output_info (_(" %s"), buf);
4854
4855 /* Print out other variant(s) if there is any. */
4856 if (qlf_idx != 0 ||
4857 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4858 output_info (_(" other valid variant(s):"));
4859
4860 /* For each pattern. */
4861 qualifiers_list = opcode->qualifiers_list;
4862 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4863 {
4864 /* Most opcodes has much fewer patterns in the list.
4865 First NIL qualifier indicates the end in the list. */
4866 if (empty_qualifier_sequence_p (*qualifiers_list))
4867 break;
4868
4869 if (i != qlf_idx)
4870 {
4871 /* Mnemonics name. */
4872 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4873
4874 /* Assign the qualifiers. */
4875 assign_qualifier_sequence (inst_base, *qualifiers_list);
4876
4877 /* Print instruction. */
4878 print_operands (buf, opcode, inst_base->operands);
4879
4880 output_info (_(" %s"), buf);
4881 }
4882 }
4883 }
4884 break;
4885
4886 case AARCH64_OPDE_UNTIED_OPERAND:
4887 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4888 detail->index + 1, str);
4889 break;
4890
4891 case AARCH64_OPDE_OUT_OF_RANGE:
4892 if (detail->data[0] != detail->data[1])
4893 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4894 detail->error ? detail->error : _("immediate value"),
4895 detail->data[0], detail->data[1], idx + 1, str);
4896 else
4897 handler (_("%s must be %d at operand %d -- `%s'"),
4898 detail->error ? detail->error : _("immediate value"),
4899 detail->data[0], idx + 1, str);
4900 break;
4901
4902 case AARCH64_OPDE_REG_LIST:
4903 if (detail->data[0] == 1)
4904 handler (_("invalid number of registers in the list; "
4905 "only 1 register is expected at operand %d -- `%s'"),
4906 idx + 1, str);
4907 else
4908 handler (_("invalid number of registers in the list; "
4909 "%d registers are expected at operand %d -- `%s'"),
4910 detail->data[0], idx + 1, str);
4911 break;
4912
4913 case AARCH64_OPDE_UNALIGNED:
4914 handler (_("immediate value must be a multiple of "
4915 "%d at operand %d -- `%s'"),
4916 detail->data[0], idx + 1, str);
4917 break;
4918
4919 default:
4920 gas_assert (0);
4921 break;
4922 }
4923 }
4924
4925 /* Process and output the error message about the operand mismatching.
4926
4927 When this function is called, the operand error information had
4928 been collected for an assembly line and there will be multiple
4929 errors in the case of multiple instruction templates; output the
4930 error message that most closely describes the problem.
4931
4932 The errors to be printed can be filtered on printing all errors
4933 or only non-fatal errors. This distinction has to be made because
4934 the error buffer may already be filled with fatal errors we don't want to
4935 print due to the different instruction templates. */
4936
4937 static void
4938 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4939 {
4940 int largest_error_pos;
4941 const char *msg = NULL;
4942 enum aarch64_operand_error_kind kind;
4943 operand_error_record *curr;
4944 operand_error_record *head = operand_error_report.head;
4945 operand_error_record *record = NULL;
4946
4947 /* No error to report. */
4948 if (head == NULL)
4949 return;
4950
4951 gas_assert (head != NULL && operand_error_report.tail != NULL);
4952
4953 /* Only one error. */
4954 if (head == operand_error_report.tail)
4955 {
4956 /* If the only error is a non-fatal one and we don't want to print it,
4957 just exit. */
4958 if (!non_fatal_only || head->detail.non_fatal)
4959 {
4960 DEBUG_TRACE ("single opcode entry with error kind: %s",
4961 operand_mismatch_kind_names[head->detail.kind]);
4962 output_operand_error_record (head, str);
4963 }
4964 return;
4965 }
4966
4967 /* Find the error kind of the highest severity. */
4968 DEBUG_TRACE ("multiple opcode entries with error kind");
4969 kind = AARCH64_OPDE_NIL;
4970 for (curr = head; curr != NULL; curr = curr->next)
4971 {
4972 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4973 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4974 if (operand_error_higher_severity_p (curr->detail.kind, kind)
4975 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4976 kind = curr->detail.kind;
4977 }
4978
4979 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
4980
4981 /* Pick up one of errors of KIND to report. */
4982 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4983 for (curr = head; curr != NULL; curr = curr->next)
4984 {
4985 /* If we don't want to print non-fatal errors then don't consider them
4986 at all. */
4987 if (curr->detail.kind != kind
4988 || (non_fatal_only && !curr->detail.non_fatal))
4989 continue;
4990 /* If there are multiple errors, pick up the one with the highest
4991 mismatching operand index. In the case of multiple errors with
4992 the equally highest operand index, pick up the first one or the
4993 first one with non-NULL error message. */
4994 if (curr->detail.index > largest_error_pos
4995 || (curr->detail.index == largest_error_pos && msg == NULL
4996 && curr->detail.error != NULL))
4997 {
4998 largest_error_pos = curr->detail.index;
4999 record = curr;
5000 msg = record->detail.error;
5001 }
5002 }
5003
5004 /* The way errors are collected in the back-end is a bit non-intuitive. But
5005 essentially, because each operand template is tried recursively you may
5006 always have errors collected from the previous tried OPND. These are
5007 usually skipped if there is one successful match. However now with the
5008 non-fatal errors we have to ignore those previously collected hard errors
5009 when we're only interested in printing the non-fatal ones. This condition
5010 prevents us from printing errors that are not appropriate, since we did
5011 match a condition, but it also has warnings that it wants to print. */
5012 if (non_fatal_only && !record)
5013 return;
5014
5015 gas_assert (largest_error_pos != -2 && record != NULL);
5016 DEBUG_TRACE ("Pick up error kind %s to report",
5017 operand_mismatch_kind_names[record->detail.kind]);
5018
5019 /* Output. */
5020 output_operand_error_record (record, str);
5021 }
5022 \f
5023 /* Write an AARCH64 instruction to buf - always little-endian. */
5024 static void
5025 put_aarch64_insn (char *buf, uint32_t insn)
5026 {
5027 unsigned char *where = (unsigned char *) buf;
5028 where[0] = insn;
5029 where[1] = insn >> 8;
5030 where[2] = insn >> 16;
5031 where[3] = insn >> 24;
5032 }
5033
5034 static uint32_t
5035 get_aarch64_insn (char *buf)
5036 {
5037 unsigned char *where = (unsigned char *) buf;
5038 uint32_t result;
5039 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5040 | ((uint32_t) where[3] << 24)));
5041 return result;
5042 }
5043
5044 static void
5045 output_inst (struct aarch64_inst *new_inst)
5046 {
5047 char *to = NULL;
5048
5049 to = frag_more (INSN_SIZE);
5050
5051 frag_now->tc_frag_data.recorded = 1;
5052
5053 put_aarch64_insn (to, inst.base.value);
5054
5055 if (inst.reloc.type != BFD_RELOC_UNUSED)
5056 {
5057 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5058 INSN_SIZE, &inst.reloc.exp,
5059 inst.reloc.pc_rel,
5060 inst.reloc.type);
5061 DEBUG_TRACE ("Prepared relocation fix up");
5062 /* Don't check the addend value against the instruction size,
5063 that's the job of our code in md_apply_fix(). */
5064 fixp->fx_no_overflow = 1;
5065 if (new_inst != NULL)
5066 fixp->tc_fix_data.inst = new_inst;
5067 if (aarch64_gas_internal_fixup_p ())
5068 {
5069 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5070 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5071 fixp->fx_addnumber = inst.reloc.flags;
5072 }
5073 }
5074
5075 dwarf2_emit_insn (INSN_SIZE);
5076 }
5077
5078 /* Link together opcodes of the same name. */
5079
5080 struct templates
5081 {
5082 aarch64_opcode *opcode;
5083 struct templates *next;
5084 };
5085
5086 typedef struct templates templates;
5087
5088 static templates *
5089 lookup_mnemonic (const char *start, int len)
5090 {
5091 templates *templ = NULL;
5092
5093 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5094 return templ;
5095 }
5096
5097 /* Subroutine of md_assemble, responsible for looking up the primary
5098 opcode from the mnemonic the user wrote. STR points to the
5099 beginning of the mnemonic. */
5100
5101 static templates *
5102 opcode_lookup (char **str)
5103 {
5104 char *end, *base, *dot;
5105 const aarch64_cond *cond;
5106 char condname[16];
5107 int len;
5108
5109 /* Scan up to the end of the mnemonic, which must end in white space,
5110 '.', or end of string. */
5111 dot = 0;
5112 for (base = end = *str; is_part_of_name(*end); end++)
5113 if (*end == '.' && !dot)
5114 dot = end;
5115
5116 if (end == base || dot == base)
5117 return 0;
5118
5119 inst.cond = COND_ALWAYS;
5120
5121 /* Handle a possible condition. */
5122 if (dot)
5123 {
5124 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5125 if (cond)
5126 {
5127 inst.cond = cond->value;
5128 *str = end;
5129 }
5130 else
5131 {
5132 *str = dot;
5133 return 0;
5134 }
5135 len = dot - base;
5136 }
5137 else
5138 {
5139 *str = end;
5140 len = end - base;
5141 }
5142
5143 if (inst.cond == COND_ALWAYS)
5144 {
5145 /* Look for unaffixed mnemonic. */
5146 return lookup_mnemonic (base, len);
5147 }
5148 else if (len <= 13)
5149 {
5150 /* append ".c" to mnemonic if conditional */
5151 memcpy (condname, base, len);
5152 memcpy (condname + len, ".c", 2);
5153 base = condname;
5154 len += 2;
5155 return lookup_mnemonic (base, len);
5156 }
5157
5158 return NULL;
5159 }
5160
5161 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5162 to a corresponding operand qualifier. */
5163
5164 static inline aarch64_opnd_qualifier_t
5165 vectype_to_qualifier (const struct vector_type_el *vectype)
5166 {
5167 /* Element size in bytes indexed by vector_el_type. */
5168 const unsigned char ele_size[5]
5169 = {1, 2, 4, 8, 16};
5170 const unsigned int ele_base [5] =
5171 {
5172 AARCH64_OPND_QLF_V_4B,
5173 AARCH64_OPND_QLF_V_2H,
5174 AARCH64_OPND_QLF_V_2S,
5175 AARCH64_OPND_QLF_V_1D,
5176 AARCH64_OPND_QLF_V_1Q
5177 };
5178
5179 if (!vectype->defined || vectype->type == NT_invtype)
5180 goto vectype_conversion_fail;
5181
5182 if (vectype->type == NT_zero)
5183 return AARCH64_OPND_QLF_P_Z;
5184 if (vectype->type == NT_merge)
5185 return AARCH64_OPND_QLF_P_M;
5186
5187 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5188
5189 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5190 {
5191 /* Special case S_4B. */
5192 if (vectype->type == NT_b && vectype->width == 4)
5193 return AARCH64_OPND_QLF_S_4B;
5194
5195 /* Special case S_2H. */
5196 if (vectype->type == NT_h && vectype->width == 2)
5197 return AARCH64_OPND_QLF_S_2H;
5198
5199 /* Vector element register. */
5200 return AARCH64_OPND_QLF_S_B + vectype->type;
5201 }
5202 else
5203 {
5204 /* Vector register. */
5205 int reg_size = ele_size[vectype->type] * vectype->width;
5206 unsigned offset;
5207 unsigned shift;
5208 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5209 goto vectype_conversion_fail;
5210
5211 /* The conversion is by calculating the offset from the base operand
5212 qualifier for the vector type. The operand qualifiers are regular
5213 enough that the offset can established by shifting the vector width by
5214 a vector-type dependent amount. */
5215 shift = 0;
5216 if (vectype->type == NT_b)
5217 shift = 3;
5218 else if (vectype->type == NT_h || vectype->type == NT_s)
5219 shift = 2;
5220 else if (vectype->type >= NT_d)
5221 shift = 1;
5222 else
5223 gas_assert (0);
5224
5225 offset = ele_base [vectype->type] + (vectype->width >> shift);
5226 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5227 && offset <= AARCH64_OPND_QLF_V_1Q);
5228 return offset;
5229 }
5230
5231 vectype_conversion_fail:
5232 first_error (_("bad vector arrangement type"));
5233 return AARCH64_OPND_QLF_NIL;
5234 }
5235
5236 /* Process an optional operand that is found omitted from the assembly line.
5237 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5238 instruction's opcode entry while IDX is the index of this omitted operand.
5239 */
5240
5241 static void
5242 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5243 int idx, aarch64_opnd_info *operand)
5244 {
5245 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5246 gas_assert (optional_operand_p (opcode, idx));
5247 gas_assert (!operand->present);
5248
5249 switch (type)
5250 {
5251 case AARCH64_OPND_Rd:
5252 case AARCH64_OPND_Rn:
5253 case AARCH64_OPND_Rm:
5254 case AARCH64_OPND_Rt:
5255 case AARCH64_OPND_Rt2:
5256 case AARCH64_OPND_Rt_LS64:
5257 case AARCH64_OPND_Rt_SP:
5258 case AARCH64_OPND_Rs:
5259 case AARCH64_OPND_Ra:
5260 case AARCH64_OPND_Rt_SYS:
5261 case AARCH64_OPND_Rd_SP:
5262 case AARCH64_OPND_Rn_SP:
5263 case AARCH64_OPND_Rm_SP:
5264 case AARCH64_OPND_Fd:
5265 case AARCH64_OPND_Fn:
5266 case AARCH64_OPND_Fm:
5267 case AARCH64_OPND_Fa:
5268 case AARCH64_OPND_Ft:
5269 case AARCH64_OPND_Ft2:
5270 case AARCH64_OPND_Sd:
5271 case AARCH64_OPND_Sn:
5272 case AARCH64_OPND_Sm:
5273 case AARCH64_OPND_Va:
5274 case AARCH64_OPND_Vd:
5275 case AARCH64_OPND_Vn:
5276 case AARCH64_OPND_Vm:
5277 case AARCH64_OPND_VdD1:
5278 case AARCH64_OPND_VnD1:
5279 operand->reg.regno = default_value;
5280 break;
5281
5282 case AARCH64_OPND_Ed:
5283 case AARCH64_OPND_En:
5284 case AARCH64_OPND_Em:
5285 case AARCH64_OPND_Em16:
5286 case AARCH64_OPND_SM3_IMM2:
5287 operand->reglane.regno = default_value;
5288 break;
5289
5290 case AARCH64_OPND_IDX:
5291 case AARCH64_OPND_BIT_NUM:
5292 case AARCH64_OPND_IMMR:
5293 case AARCH64_OPND_IMMS:
5294 case AARCH64_OPND_SHLL_IMM:
5295 case AARCH64_OPND_IMM_VLSL:
5296 case AARCH64_OPND_IMM_VLSR:
5297 case AARCH64_OPND_CCMP_IMM:
5298 case AARCH64_OPND_FBITS:
5299 case AARCH64_OPND_UIMM4:
5300 case AARCH64_OPND_UIMM3_OP1:
5301 case AARCH64_OPND_UIMM3_OP2:
5302 case AARCH64_OPND_IMM:
5303 case AARCH64_OPND_IMM_2:
5304 case AARCH64_OPND_WIDTH:
5305 case AARCH64_OPND_UIMM7:
5306 case AARCH64_OPND_NZCV:
5307 case AARCH64_OPND_SVE_PATTERN:
5308 case AARCH64_OPND_SVE_PRFOP:
5309 operand->imm.value = default_value;
5310 break;
5311
5312 case AARCH64_OPND_SVE_PATTERN_SCALED:
5313 operand->imm.value = default_value;
5314 operand->shifter.kind = AARCH64_MOD_MUL;
5315 operand->shifter.amount = 1;
5316 break;
5317
5318 case AARCH64_OPND_EXCEPTION:
5319 inst.reloc.type = BFD_RELOC_UNUSED;
5320 break;
5321
5322 case AARCH64_OPND_BARRIER_ISB:
5323 operand->barrier = aarch64_barrier_options + default_value;
5324 break;
5325
5326 case AARCH64_OPND_BTI_TARGET:
5327 operand->hint_option = aarch64_hint_options + default_value;
5328 break;
5329
5330 default:
5331 break;
5332 }
5333 }
5334
5335 /* Process the relocation type for move wide instructions.
5336 Return TRUE on success; otherwise return FALSE. */
5337
5338 static bfd_boolean
5339 process_movw_reloc_info (void)
5340 {
5341 int is32;
5342 unsigned shift;
5343
5344 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5345
5346 if (inst.base.opcode->op == OP_MOVK)
5347 switch (inst.reloc.type)
5348 {
5349 case BFD_RELOC_AARCH64_MOVW_G0_S:
5350 case BFD_RELOC_AARCH64_MOVW_G1_S:
5351 case BFD_RELOC_AARCH64_MOVW_G2_S:
5352 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5353 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5354 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5355 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5356 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5357 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5358 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5359 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5360 set_syntax_error
5361 (_("the specified relocation type is not allowed for MOVK"));
5362 return FALSE;
5363 default:
5364 break;
5365 }
5366
5367 switch (inst.reloc.type)
5368 {
5369 case BFD_RELOC_AARCH64_MOVW_G0:
5370 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5371 case BFD_RELOC_AARCH64_MOVW_G0_S:
5372 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5373 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5374 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5375 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5376 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5377 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5378 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5379 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5380 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5381 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5382 shift = 0;
5383 break;
5384 case BFD_RELOC_AARCH64_MOVW_G1:
5385 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5386 case BFD_RELOC_AARCH64_MOVW_G1_S:
5387 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5388 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5389 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5390 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5391 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5392 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5393 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5394 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5395 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5396 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5397 shift = 16;
5398 break;
5399 case BFD_RELOC_AARCH64_MOVW_G2:
5400 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5401 case BFD_RELOC_AARCH64_MOVW_G2_S:
5402 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5403 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5404 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5405 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5406 if (is32)
5407 {
5408 set_fatal_syntax_error
5409 (_("the specified relocation type is not allowed for 32-bit "
5410 "register"));
5411 return FALSE;
5412 }
5413 shift = 32;
5414 break;
5415 case BFD_RELOC_AARCH64_MOVW_G3:
5416 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5417 if (is32)
5418 {
5419 set_fatal_syntax_error
5420 (_("the specified relocation type is not allowed for 32-bit "
5421 "register"));
5422 return FALSE;
5423 }
5424 shift = 48;
5425 break;
5426 default:
5427 /* More cases should be added when more MOVW-related relocation types
5428 are supported in GAS. */
5429 gas_assert (aarch64_gas_internal_fixup_p ());
5430 /* The shift amount should have already been set by the parser. */
5431 return TRUE;
5432 }
5433 inst.base.operands[1].shifter.amount = shift;
5434 return TRUE;
5435 }
5436
5437 /* A primitive log calculator. */
5438
5439 static inline unsigned int
5440 get_logsz (unsigned int size)
5441 {
5442 const unsigned char ls[16] =
5443 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5444 if (size > 16)
5445 {
5446 gas_assert (0);
5447 return -1;
5448 }
5449 gas_assert (ls[size - 1] != (unsigned char)-1);
5450 return ls[size - 1];
5451 }
5452
5453 /* Determine and return the real reloc type code for an instruction
5454 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5455
5456 static inline bfd_reloc_code_real_type
5457 ldst_lo12_determine_real_reloc_type (void)
5458 {
5459 unsigned logsz;
5460 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5461 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5462
5463 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5464 {
5465 BFD_RELOC_AARCH64_LDST8_LO12,
5466 BFD_RELOC_AARCH64_LDST16_LO12,
5467 BFD_RELOC_AARCH64_LDST32_LO12,
5468 BFD_RELOC_AARCH64_LDST64_LO12,
5469 BFD_RELOC_AARCH64_LDST128_LO12
5470 },
5471 {
5472 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5473 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5474 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5475 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5476 BFD_RELOC_AARCH64_NONE
5477 },
5478 {
5479 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5480 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5481 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5482 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5483 BFD_RELOC_AARCH64_NONE
5484 },
5485 {
5486 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5487 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5488 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5489 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5490 BFD_RELOC_AARCH64_NONE
5491 },
5492 {
5493 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5494 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5495 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5496 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5497 BFD_RELOC_AARCH64_NONE
5498 }
5499 };
5500
5501 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5502 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5503 || (inst.reloc.type
5504 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5505 || (inst.reloc.type
5506 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5507 || (inst.reloc.type
5508 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5509 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5510
5511 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5512 opd1_qlf =
5513 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5514 1, opd0_qlf, 0);
5515 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5516
5517 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5518 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5519 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5520 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5521 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5522 gas_assert (logsz <= 3);
5523 else
5524 gas_assert (logsz <= 4);
5525
5526 /* In reloc.c, these pseudo relocation types should be defined in similar
5527 order as above reloc_ldst_lo12 array. Because the array index calculation
5528 below relies on this. */
5529 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5530 }
5531
5532 /* Check whether a register list REGINFO is valid. The registers must be
5533 numbered in increasing order (modulo 32), in increments of one or two.
5534
5535 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5536 increments of two.
5537
5538 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5539
5540 static bfd_boolean
5541 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5542 {
5543 uint32_t i, nb_regs, prev_regno, incr;
5544
5545 nb_regs = 1 + (reginfo & 0x3);
5546 reginfo >>= 2;
5547 prev_regno = reginfo & 0x1f;
5548 incr = accept_alternate ? 2 : 1;
5549
5550 for (i = 1; i < nb_regs; ++i)
5551 {
5552 uint32_t curr_regno;
5553 reginfo >>= 5;
5554 curr_regno = reginfo & 0x1f;
5555 if (curr_regno != ((prev_regno + incr) & 0x1f))
5556 return FALSE;
5557 prev_regno = curr_regno;
5558 }
5559
5560 return TRUE;
5561 }
5562
5563 /* Generic instruction operand parser. This does no encoding and no
5564 semantic validation; it merely squirrels values away in the inst
5565 structure. Returns TRUE or FALSE depending on whether the
5566 specified grammar matched. */
5567
5568 static bfd_boolean
5569 parse_operands (char *str, const aarch64_opcode *opcode)
5570 {
5571 int i;
5572 char *backtrack_pos = 0;
5573 const enum aarch64_opnd *operands = opcode->operands;
5574 aarch64_reg_type imm_reg_type;
5575
5576 clear_error ();
5577 skip_whitespace (str);
5578
5579 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5580 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5581 else
5582 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5583
5584 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5585 {
5586 int64_t val;
5587 const reg_entry *reg;
5588 int comma_skipped_p = 0;
5589 aarch64_reg_type rtype;
5590 struct vector_type_el vectype;
5591 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5592 aarch64_opnd_info *info = &inst.base.operands[i];
5593 aarch64_reg_type reg_type;
5594
5595 DEBUG_TRACE ("parse operand %d", i);
5596
5597 /* Assign the operand code. */
5598 info->type = operands[i];
5599
5600 if (optional_operand_p (opcode, i))
5601 {
5602 /* Remember where we are in case we need to backtrack. */
5603 gas_assert (!backtrack_pos);
5604 backtrack_pos = str;
5605 }
5606
5607 /* Expect comma between operands; the backtrack mechanism will take
5608 care of cases of omitted optional operand. */
5609 if (i > 0 && ! skip_past_char (&str, ','))
5610 {
5611 set_syntax_error (_("comma expected between operands"));
5612 goto failure;
5613 }
5614 else
5615 comma_skipped_p = 1;
5616
5617 switch (operands[i])
5618 {
5619 case AARCH64_OPND_Rd:
5620 case AARCH64_OPND_Rn:
5621 case AARCH64_OPND_Rm:
5622 case AARCH64_OPND_Rt:
5623 case AARCH64_OPND_Rt2:
5624 case AARCH64_OPND_Rs:
5625 case AARCH64_OPND_Ra:
5626 case AARCH64_OPND_Rt_LS64:
5627 case AARCH64_OPND_Rt_SYS:
5628 case AARCH64_OPND_PAIRREG:
5629 case AARCH64_OPND_SVE_Rm:
5630 po_int_reg_or_fail (REG_TYPE_R_Z);
5631
5632 /* In LS64 load/store instructions Rt register number must be even
5633 and <=22. */
5634 if (operands[i] == AARCH64_OPND_Rt_LS64)
5635 {
5636 /* We've already checked if this is valid register.
5637 This will check if register number (Rt) is not undefined for LS64
5638 instructions:
5639 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
5640 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
5641 {
5642 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
5643 goto failure;
5644 }
5645 }
5646 break;
5647
5648 case AARCH64_OPND_Rd_SP:
5649 case AARCH64_OPND_Rn_SP:
5650 case AARCH64_OPND_Rt_SP:
5651 case AARCH64_OPND_SVE_Rn_SP:
5652 case AARCH64_OPND_Rm_SP:
5653 po_int_reg_or_fail (REG_TYPE_R_SP);
5654 break;
5655
5656 case AARCH64_OPND_Rm_EXT:
5657 case AARCH64_OPND_Rm_SFT:
5658 po_misc_or_fail (parse_shifter_operand
5659 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5660 ? SHIFTED_ARITH_IMM
5661 : SHIFTED_LOGIC_IMM)));
5662 if (!info->shifter.operator_present)
5663 {
5664 /* Default to LSL if not present. Libopcodes prefers shifter
5665 kind to be explicit. */
5666 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5667 info->shifter.kind = AARCH64_MOD_LSL;
5668 /* For Rm_EXT, libopcodes will carry out further check on whether
5669 or not stack pointer is used in the instruction (Recall that
5670 "the extend operator is not optional unless at least one of
5671 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5672 }
5673 break;
5674
5675 case AARCH64_OPND_Fd:
5676 case AARCH64_OPND_Fn:
5677 case AARCH64_OPND_Fm:
5678 case AARCH64_OPND_Fa:
5679 case AARCH64_OPND_Ft:
5680 case AARCH64_OPND_Ft2:
5681 case AARCH64_OPND_Sd:
5682 case AARCH64_OPND_Sn:
5683 case AARCH64_OPND_Sm:
5684 case AARCH64_OPND_SVE_VZn:
5685 case AARCH64_OPND_SVE_Vd:
5686 case AARCH64_OPND_SVE_Vm:
5687 case AARCH64_OPND_SVE_Vn:
5688 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5689 if (val == PARSE_FAIL)
5690 {
5691 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5692 goto failure;
5693 }
5694 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5695
5696 info->reg.regno = val;
5697 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5698 break;
5699
5700 case AARCH64_OPND_SVE_Pd:
5701 case AARCH64_OPND_SVE_Pg3:
5702 case AARCH64_OPND_SVE_Pg4_5:
5703 case AARCH64_OPND_SVE_Pg4_10:
5704 case AARCH64_OPND_SVE_Pg4_16:
5705 case AARCH64_OPND_SVE_Pm:
5706 case AARCH64_OPND_SVE_Pn:
5707 case AARCH64_OPND_SVE_Pt:
5708 reg_type = REG_TYPE_PN;
5709 goto vector_reg;
5710
5711 case AARCH64_OPND_SVE_Za_5:
5712 case AARCH64_OPND_SVE_Za_16:
5713 case AARCH64_OPND_SVE_Zd:
5714 case AARCH64_OPND_SVE_Zm_5:
5715 case AARCH64_OPND_SVE_Zm_16:
5716 case AARCH64_OPND_SVE_Zn:
5717 case AARCH64_OPND_SVE_Zt:
5718 reg_type = REG_TYPE_ZN;
5719 goto vector_reg;
5720
5721 case AARCH64_OPND_Va:
5722 case AARCH64_OPND_Vd:
5723 case AARCH64_OPND_Vn:
5724 case AARCH64_OPND_Vm:
5725 reg_type = REG_TYPE_VN;
5726 vector_reg:
5727 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5728 if (val == PARSE_FAIL)
5729 {
5730 first_error (_(get_reg_expected_msg (reg_type)));
5731 goto failure;
5732 }
5733 if (vectype.defined & NTA_HASINDEX)
5734 goto failure;
5735
5736 info->reg.regno = val;
5737 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5738 && vectype.type == NT_invtype)
5739 /* Unqualified Pn and Zn registers are allowed in certain
5740 contexts. Rely on F_STRICT qualifier checking to catch
5741 invalid uses. */
5742 info->qualifier = AARCH64_OPND_QLF_NIL;
5743 else
5744 {
5745 info->qualifier = vectype_to_qualifier (&vectype);
5746 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5747 goto failure;
5748 }
5749 break;
5750
5751 case AARCH64_OPND_VdD1:
5752 case AARCH64_OPND_VnD1:
5753 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5754 if (val == PARSE_FAIL)
5755 {
5756 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5757 goto failure;
5758 }
5759 if (vectype.type != NT_d || vectype.index != 1)
5760 {
5761 set_fatal_syntax_error
5762 (_("the top half of a 128-bit FP/SIMD register is expected"));
5763 goto failure;
5764 }
5765 info->reg.regno = val;
5766 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5767 here; it is correct for the purpose of encoding/decoding since
5768 only the register number is explicitly encoded in the related
5769 instructions, although this appears a bit hacky. */
5770 info->qualifier = AARCH64_OPND_QLF_S_D;
5771 break;
5772
5773 case AARCH64_OPND_SVE_Zm3_INDEX:
5774 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5775 case AARCH64_OPND_SVE_Zm3_11_INDEX:
5776 case AARCH64_OPND_SVE_Zm4_11_INDEX:
5777 case AARCH64_OPND_SVE_Zm4_INDEX:
5778 case AARCH64_OPND_SVE_Zn_INDEX:
5779 reg_type = REG_TYPE_ZN;
5780 goto vector_reg_index;
5781
5782 case AARCH64_OPND_Ed:
5783 case AARCH64_OPND_En:
5784 case AARCH64_OPND_Em:
5785 case AARCH64_OPND_Em16:
5786 case AARCH64_OPND_SM3_IMM2:
5787 reg_type = REG_TYPE_VN;
5788 vector_reg_index:
5789 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5790 if (val == PARSE_FAIL)
5791 {
5792 first_error (_(get_reg_expected_msg (reg_type)));
5793 goto failure;
5794 }
5795 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5796 goto failure;
5797
5798 info->reglane.regno = val;
5799 info->reglane.index = vectype.index;
5800 info->qualifier = vectype_to_qualifier (&vectype);
5801 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5802 goto failure;
5803 break;
5804
5805 case AARCH64_OPND_SVE_ZnxN:
5806 case AARCH64_OPND_SVE_ZtxN:
5807 reg_type = REG_TYPE_ZN;
5808 goto vector_reg_list;
5809
5810 case AARCH64_OPND_LVn:
5811 case AARCH64_OPND_LVt:
5812 case AARCH64_OPND_LVt_AL:
5813 case AARCH64_OPND_LEt:
5814 reg_type = REG_TYPE_VN;
5815 vector_reg_list:
5816 if (reg_type == REG_TYPE_ZN
5817 && get_opcode_dependent_value (opcode) == 1
5818 && *str != '{')
5819 {
5820 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5821 if (val == PARSE_FAIL)
5822 {
5823 first_error (_(get_reg_expected_msg (reg_type)));
5824 goto failure;
5825 }
5826 info->reglist.first_regno = val;
5827 info->reglist.num_regs = 1;
5828 }
5829 else
5830 {
5831 val = parse_vector_reg_list (&str, reg_type, &vectype);
5832 if (val == PARSE_FAIL)
5833 goto failure;
5834
5835 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5836 {
5837 set_fatal_syntax_error (_("invalid register list"));
5838 goto failure;
5839 }
5840
5841 if (vectype.width != 0 && *str != ',')
5842 {
5843 set_fatal_syntax_error
5844 (_("expected element type rather than vector type"));
5845 goto failure;
5846 }
5847
5848 info->reglist.first_regno = (val >> 2) & 0x1f;
5849 info->reglist.num_regs = (val & 0x3) + 1;
5850 }
5851 if (operands[i] == AARCH64_OPND_LEt)
5852 {
5853 if (!(vectype.defined & NTA_HASINDEX))
5854 goto failure;
5855 info->reglist.has_index = 1;
5856 info->reglist.index = vectype.index;
5857 }
5858 else
5859 {
5860 if (vectype.defined & NTA_HASINDEX)
5861 goto failure;
5862 if (!(vectype.defined & NTA_HASTYPE))
5863 {
5864 if (reg_type == REG_TYPE_ZN)
5865 set_fatal_syntax_error (_("missing type suffix"));
5866 goto failure;
5867 }
5868 }
5869 info->qualifier = vectype_to_qualifier (&vectype);
5870 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5871 goto failure;
5872 break;
5873
5874 case AARCH64_OPND_CRn:
5875 case AARCH64_OPND_CRm:
5876 {
5877 char prefix = *(str++);
5878 if (prefix != 'c' && prefix != 'C')
5879 goto failure;
5880
5881 po_imm_nc_or_fail ();
5882 if (val > 15)
5883 {
5884 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5885 goto failure;
5886 }
5887 info->qualifier = AARCH64_OPND_QLF_CR;
5888 info->imm.value = val;
5889 break;
5890 }
5891
5892 case AARCH64_OPND_SHLL_IMM:
5893 case AARCH64_OPND_IMM_VLSR:
5894 po_imm_or_fail (1, 64);
5895 info->imm.value = val;
5896 break;
5897
5898 case AARCH64_OPND_CCMP_IMM:
5899 case AARCH64_OPND_SIMM5:
5900 case AARCH64_OPND_FBITS:
5901 case AARCH64_OPND_TME_UIMM16:
5902 case AARCH64_OPND_UIMM4:
5903 case AARCH64_OPND_UIMM4_ADDG:
5904 case AARCH64_OPND_UIMM10:
5905 case AARCH64_OPND_UIMM3_OP1:
5906 case AARCH64_OPND_UIMM3_OP2:
5907 case AARCH64_OPND_IMM_VLSL:
5908 case AARCH64_OPND_IMM:
5909 case AARCH64_OPND_IMM_2:
5910 case AARCH64_OPND_WIDTH:
5911 case AARCH64_OPND_SVE_INV_LIMM:
5912 case AARCH64_OPND_SVE_LIMM:
5913 case AARCH64_OPND_SVE_LIMM_MOV:
5914 case AARCH64_OPND_SVE_SHLIMM_PRED:
5915 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5916 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
5917 case AARCH64_OPND_SVE_SHRIMM_PRED:
5918 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5919 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
5920 case AARCH64_OPND_SVE_SIMM5:
5921 case AARCH64_OPND_SVE_SIMM5B:
5922 case AARCH64_OPND_SVE_SIMM6:
5923 case AARCH64_OPND_SVE_SIMM8:
5924 case AARCH64_OPND_SVE_UIMM3:
5925 case AARCH64_OPND_SVE_UIMM7:
5926 case AARCH64_OPND_SVE_UIMM8:
5927 case AARCH64_OPND_SVE_UIMM8_53:
5928 case AARCH64_OPND_IMM_ROT1:
5929 case AARCH64_OPND_IMM_ROT2:
5930 case AARCH64_OPND_IMM_ROT3:
5931 case AARCH64_OPND_SVE_IMM_ROT1:
5932 case AARCH64_OPND_SVE_IMM_ROT2:
5933 case AARCH64_OPND_SVE_IMM_ROT3:
5934 po_imm_nc_or_fail ();
5935 info->imm.value = val;
5936 break;
5937
5938 case AARCH64_OPND_SVE_AIMM:
5939 case AARCH64_OPND_SVE_ASIMM:
5940 po_imm_nc_or_fail ();
5941 info->imm.value = val;
5942 skip_whitespace (str);
5943 if (skip_past_comma (&str))
5944 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5945 else
5946 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5947 break;
5948
5949 case AARCH64_OPND_SVE_PATTERN:
5950 po_enum_or_fail (aarch64_sve_pattern_array);
5951 info->imm.value = val;
5952 break;
5953
5954 case AARCH64_OPND_SVE_PATTERN_SCALED:
5955 po_enum_or_fail (aarch64_sve_pattern_array);
5956 info->imm.value = val;
5957 if (skip_past_comma (&str)
5958 && !parse_shift (&str, info, SHIFTED_MUL))
5959 goto failure;
5960 if (!info->shifter.operator_present)
5961 {
5962 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5963 info->shifter.kind = AARCH64_MOD_MUL;
5964 info->shifter.amount = 1;
5965 }
5966 break;
5967
5968 case AARCH64_OPND_SVE_PRFOP:
5969 po_enum_or_fail (aarch64_sve_prfop_array);
5970 info->imm.value = val;
5971 break;
5972
5973 case AARCH64_OPND_UIMM7:
5974 po_imm_or_fail (0, 127);
5975 info->imm.value = val;
5976 break;
5977
5978 case AARCH64_OPND_IDX:
5979 case AARCH64_OPND_MASK:
5980 case AARCH64_OPND_BIT_NUM:
5981 case AARCH64_OPND_IMMR:
5982 case AARCH64_OPND_IMMS:
5983 po_imm_or_fail (0, 63);
5984 info->imm.value = val;
5985 break;
5986
5987 case AARCH64_OPND_IMM0:
5988 po_imm_nc_or_fail ();
5989 if (val != 0)
5990 {
5991 set_fatal_syntax_error (_("immediate zero expected"));
5992 goto failure;
5993 }
5994 info->imm.value = 0;
5995 break;
5996
5997 case AARCH64_OPND_FPIMM0:
5998 {
5999 int qfloat;
6000 bfd_boolean res1 = FALSE, res2 = FALSE;
6001 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6002 it is probably not worth the effort to support it. */
6003 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
6004 imm_reg_type))
6005 && (error_p ()
6006 || !(res2 = parse_constant_immediate (&str, &val,
6007 imm_reg_type))))
6008 goto failure;
6009 if ((res1 && qfloat == 0) || (res2 && val == 0))
6010 {
6011 info->imm.value = 0;
6012 info->imm.is_fp = 1;
6013 break;
6014 }
6015 set_fatal_syntax_error (_("immediate zero expected"));
6016 goto failure;
6017 }
6018
6019 case AARCH64_OPND_IMM_MOV:
6020 {
6021 char *saved = str;
6022 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6023 reg_name_p (str, REG_TYPE_VN))
6024 goto failure;
6025 str = saved;
6026 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6027 GE_OPT_PREFIX, 1));
6028 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6029 later. fix_mov_imm_insn will try to determine a machine
6030 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6031 message if the immediate cannot be moved by a single
6032 instruction. */
6033 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6034 inst.base.operands[i].skip = 1;
6035 }
6036 break;
6037
6038 case AARCH64_OPND_SIMD_IMM:
6039 case AARCH64_OPND_SIMD_IMM_SFT:
6040 if (! parse_big_immediate (&str, &val, imm_reg_type))
6041 goto failure;
6042 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6043 /* addr_off_p */ 0,
6044 /* need_libopcodes_p */ 1,
6045 /* skip_p */ 1);
6046 /* Parse shift.
6047 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6048 shift, we don't check it here; we leave the checking to
6049 the libopcodes (operand_general_constraint_met_p). By
6050 doing this, we achieve better diagnostics. */
6051 if (skip_past_comma (&str)
6052 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6053 goto failure;
6054 if (!info->shifter.operator_present
6055 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6056 {
6057 /* Default to LSL if not present. Libopcodes prefers shifter
6058 kind to be explicit. */
6059 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6060 info->shifter.kind = AARCH64_MOD_LSL;
6061 }
6062 break;
6063
6064 case AARCH64_OPND_FPIMM:
6065 case AARCH64_OPND_SIMD_FPIMM:
6066 case AARCH64_OPND_SVE_FPIMM8:
6067 {
6068 int qfloat;
6069 bfd_boolean dp_p;
6070
6071 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6072 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6073 || !aarch64_imm_float_p (qfloat))
6074 {
6075 if (!error_p ())
6076 set_fatal_syntax_error (_("invalid floating-point"
6077 " constant"));
6078 goto failure;
6079 }
6080 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6081 inst.base.operands[i].imm.is_fp = 1;
6082 }
6083 break;
6084
6085 case AARCH64_OPND_SVE_I1_HALF_ONE:
6086 case AARCH64_OPND_SVE_I1_HALF_TWO:
6087 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6088 {
6089 int qfloat;
6090 bfd_boolean dp_p;
6091
6092 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6093 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6094 {
6095 if (!error_p ())
6096 set_fatal_syntax_error (_("invalid floating-point"
6097 " constant"));
6098 goto failure;
6099 }
6100 inst.base.operands[i].imm.value = qfloat;
6101 inst.base.operands[i].imm.is_fp = 1;
6102 }
6103 break;
6104
6105 case AARCH64_OPND_LIMM:
6106 po_misc_or_fail (parse_shifter_operand (&str, info,
6107 SHIFTED_LOGIC_IMM));
6108 if (info->shifter.operator_present)
6109 {
6110 set_fatal_syntax_error
6111 (_("shift not allowed for bitmask immediate"));
6112 goto failure;
6113 }
6114 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6115 /* addr_off_p */ 0,
6116 /* need_libopcodes_p */ 1,
6117 /* skip_p */ 1);
6118 break;
6119
6120 case AARCH64_OPND_AIMM:
6121 if (opcode->op == OP_ADD)
6122 /* ADD may have relocation types. */
6123 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6124 SHIFTED_ARITH_IMM));
6125 else
6126 po_misc_or_fail (parse_shifter_operand (&str, info,
6127 SHIFTED_ARITH_IMM));
6128 switch (inst.reloc.type)
6129 {
6130 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6131 info->shifter.amount = 12;
6132 break;
6133 case BFD_RELOC_UNUSED:
6134 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6135 if (info->shifter.kind != AARCH64_MOD_NONE)
6136 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6137 inst.reloc.pc_rel = 0;
6138 break;
6139 default:
6140 break;
6141 }
6142 info->imm.value = 0;
6143 if (!info->shifter.operator_present)
6144 {
6145 /* Default to LSL if not present. Libopcodes prefers shifter
6146 kind to be explicit. */
6147 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6148 info->shifter.kind = AARCH64_MOD_LSL;
6149 }
6150 break;
6151
6152 case AARCH64_OPND_HALF:
6153 {
6154 /* #<imm16> or relocation. */
6155 int internal_fixup_p;
6156 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6157 if (internal_fixup_p)
6158 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6159 skip_whitespace (str);
6160 if (skip_past_comma (&str))
6161 {
6162 /* {, LSL #<shift>} */
6163 if (! aarch64_gas_internal_fixup_p ())
6164 {
6165 set_fatal_syntax_error (_("can't mix relocation modifier "
6166 "with explicit shift"));
6167 goto failure;
6168 }
6169 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6170 }
6171 else
6172 inst.base.operands[i].shifter.amount = 0;
6173 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6174 inst.base.operands[i].imm.value = 0;
6175 if (! process_movw_reloc_info ())
6176 goto failure;
6177 }
6178 break;
6179
6180 case AARCH64_OPND_EXCEPTION:
6181 case AARCH64_OPND_UNDEFINED:
6182 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6183 imm_reg_type));
6184 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6185 /* addr_off_p */ 0,
6186 /* need_libopcodes_p */ 0,
6187 /* skip_p */ 1);
6188 break;
6189
6190 case AARCH64_OPND_NZCV:
6191 {
6192 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6193 if (nzcv != NULL)
6194 {
6195 str += 4;
6196 info->imm.value = nzcv->value;
6197 break;
6198 }
6199 po_imm_or_fail (0, 15);
6200 info->imm.value = val;
6201 }
6202 break;
6203
6204 case AARCH64_OPND_COND:
6205 case AARCH64_OPND_COND1:
6206 {
6207 char *start = str;
6208 do
6209 str++;
6210 while (ISALPHA (*str));
6211 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6212 if (info->cond == NULL)
6213 {
6214 set_syntax_error (_("invalid condition"));
6215 goto failure;
6216 }
6217 else if (operands[i] == AARCH64_OPND_COND1
6218 && (info->cond->value & 0xe) == 0xe)
6219 {
6220 /* Do not allow AL or NV. */
6221 set_default_error ();
6222 goto failure;
6223 }
6224 }
6225 break;
6226
6227 case AARCH64_OPND_ADDR_ADRP:
6228 po_misc_or_fail (parse_adrp (&str));
6229 /* Clear the value as operand needs to be relocated. */
6230 info->imm.value = 0;
6231 break;
6232
6233 case AARCH64_OPND_ADDR_PCREL14:
6234 case AARCH64_OPND_ADDR_PCREL19:
6235 case AARCH64_OPND_ADDR_PCREL21:
6236 case AARCH64_OPND_ADDR_PCREL26:
6237 po_misc_or_fail (parse_address (&str, info));
6238 if (!info->addr.pcrel)
6239 {
6240 set_syntax_error (_("invalid pc-relative address"));
6241 goto failure;
6242 }
6243 if (inst.gen_lit_pool
6244 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6245 {
6246 /* Only permit "=value" in the literal load instructions.
6247 The literal will be generated by programmer_friendly_fixup. */
6248 set_syntax_error (_("invalid use of \"=immediate\""));
6249 goto failure;
6250 }
6251 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6252 {
6253 set_syntax_error (_("unrecognized relocation suffix"));
6254 goto failure;
6255 }
6256 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6257 {
6258 info->imm.value = inst.reloc.exp.X_add_number;
6259 inst.reloc.type = BFD_RELOC_UNUSED;
6260 }
6261 else
6262 {
6263 info->imm.value = 0;
6264 if (inst.reloc.type == BFD_RELOC_UNUSED)
6265 switch (opcode->iclass)
6266 {
6267 case compbranch:
6268 case condbranch:
6269 /* e.g. CBZ or B.COND */
6270 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6271 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6272 break;
6273 case testbranch:
6274 /* e.g. TBZ */
6275 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6276 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6277 break;
6278 case branch_imm:
6279 /* e.g. B or BL */
6280 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6281 inst.reloc.type =
6282 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6283 : BFD_RELOC_AARCH64_JUMP26;
6284 break;
6285 case loadlit:
6286 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6287 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6288 break;
6289 case pcreladdr:
6290 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6291 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6292 break;
6293 default:
6294 gas_assert (0);
6295 abort ();
6296 }
6297 inst.reloc.pc_rel = 1;
6298 }
6299 break;
6300
6301 case AARCH64_OPND_ADDR_SIMPLE:
6302 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6303 {
6304 /* [<Xn|SP>{, #<simm>}] */
6305 char *start = str;
6306 /* First use the normal address-parsing routines, to get
6307 the usual syntax errors. */
6308 po_misc_or_fail (parse_address (&str, info));
6309 if (info->addr.pcrel || info->addr.offset.is_reg
6310 || !info->addr.preind || info->addr.postind
6311 || info->addr.writeback)
6312 {
6313 set_syntax_error (_("invalid addressing mode"));
6314 goto failure;
6315 }
6316
6317 /* Then retry, matching the specific syntax of these addresses. */
6318 str = start;
6319 po_char_or_fail ('[');
6320 po_reg_or_fail (REG_TYPE_R64_SP);
6321 /* Accept optional ", #0". */
6322 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6323 && skip_past_char (&str, ','))
6324 {
6325 skip_past_char (&str, '#');
6326 if (! skip_past_char (&str, '0'))
6327 {
6328 set_fatal_syntax_error
6329 (_("the optional immediate offset can only be 0"));
6330 goto failure;
6331 }
6332 }
6333 po_char_or_fail (']');
6334 break;
6335 }
6336
6337 case AARCH64_OPND_ADDR_REGOFF:
6338 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6339 po_misc_or_fail (parse_address (&str, info));
6340 regoff_addr:
6341 if (info->addr.pcrel || !info->addr.offset.is_reg
6342 || !info->addr.preind || info->addr.postind
6343 || info->addr.writeback)
6344 {
6345 set_syntax_error (_("invalid addressing mode"));
6346 goto failure;
6347 }
6348 if (!info->shifter.operator_present)
6349 {
6350 /* Default to LSL if not present. Libopcodes prefers shifter
6351 kind to be explicit. */
6352 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6353 info->shifter.kind = AARCH64_MOD_LSL;
6354 }
6355 /* Qualifier to be deduced by libopcodes. */
6356 break;
6357
6358 case AARCH64_OPND_ADDR_SIMM7:
6359 po_misc_or_fail (parse_address (&str, info));
6360 if (info->addr.pcrel || info->addr.offset.is_reg
6361 || (!info->addr.preind && !info->addr.postind))
6362 {
6363 set_syntax_error (_("invalid addressing mode"));
6364 goto failure;
6365 }
6366 if (inst.reloc.type != BFD_RELOC_UNUSED)
6367 {
6368 set_syntax_error (_("relocation not allowed"));
6369 goto failure;
6370 }
6371 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6372 /* addr_off_p */ 1,
6373 /* need_libopcodes_p */ 1,
6374 /* skip_p */ 0);
6375 break;
6376
6377 case AARCH64_OPND_ADDR_SIMM9:
6378 case AARCH64_OPND_ADDR_SIMM9_2:
6379 case AARCH64_OPND_ADDR_SIMM11:
6380 case AARCH64_OPND_ADDR_SIMM13:
6381 po_misc_or_fail (parse_address (&str, info));
6382 if (info->addr.pcrel || info->addr.offset.is_reg
6383 || (!info->addr.preind && !info->addr.postind)
6384 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6385 && info->addr.writeback))
6386 {
6387 set_syntax_error (_("invalid addressing mode"));
6388 goto failure;
6389 }
6390 if (inst.reloc.type != BFD_RELOC_UNUSED)
6391 {
6392 set_syntax_error (_("relocation not allowed"));
6393 goto failure;
6394 }
6395 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6396 /* addr_off_p */ 1,
6397 /* need_libopcodes_p */ 1,
6398 /* skip_p */ 0);
6399 break;
6400
6401 case AARCH64_OPND_ADDR_SIMM10:
6402 case AARCH64_OPND_ADDR_OFFSET:
6403 po_misc_or_fail (parse_address (&str, info));
6404 if (info->addr.pcrel || info->addr.offset.is_reg
6405 || !info->addr.preind || info->addr.postind)
6406 {
6407 set_syntax_error (_("invalid addressing mode"));
6408 goto failure;
6409 }
6410 if (inst.reloc.type != BFD_RELOC_UNUSED)
6411 {
6412 set_syntax_error (_("relocation not allowed"));
6413 goto failure;
6414 }
6415 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6416 /* addr_off_p */ 1,
6417 /* need_libopcodes_p */ 1,
6418 /* skip_p */ 0);
6419 break;
6420
6421 case AARCH64_OPND_ADDR_UIMM12:
6422 po_misc_or_fail (parse_address (&str, info));
6423 if (info->addr.pcrel || info->addr.offset.is_reg
6424 || !info->addr.preind || info->addr.writeback)
6425 {
6426 set_syntax_error (_("invalid addressing mode"));
6427 goto failure;
6428 }
6429 if (inst.reloc.type == BFD_RELOC_UNUSED)
6430 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6431 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6432 || (inst.reloc.type
6433 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6434 || (inst.reloc.type
6435 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6436 || (inst.reloc.type
6437 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6438 || (inst.reloc.type
6439 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6440 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6441 /* Leave qualifier to be determined by libopcodes. */
6442 break;
6443
6444 case AARCH64_OPND_SIMD_ADDR_POST:
6445 /* [<Xn|SP>], <Xm|#<amount>> */
6446 po_misc_or_fail (parse_address (&str, info));
6447 if (!info->addr.postind || !info->addr.writeback)
6448 {
6449 set_syntax_error (_("invalid addressing mode"));
6450 goto failure;
6451 }
6452 if (!info->addr.offset.is_reg)
6453 {
6454 if (inst.reloc.exp.X_op == O_constant)
6455 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6456 else
6457 {
6458 set_fatal_syntax_error
6459 (_("writeback value must be an immediate constant"));
6460 goto failure;
6461 }
6462 }
6463 /* No qualifier. */
6464 break;
6465
6466 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6467 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
6468 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6469 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6470 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6471 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6472 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6473 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6474 case AARCH64_OPND_SVE_ADDR_RI_U6:
6475 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6476 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6477 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6478 /* [X<n>{, #imm, MUL VL}]
6479 [X<n>{, #imm}]
6480 but recognizing SVE registers. */
6481 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6482 &offset_qualifier));
6483 if (base_qualifier != AARCH64_OPND_QLF_X)
6484 {
6485 set_syntax_error (_("invalid addressing mode"));
6486 goto failure;
6487 }
6488 sve_regimm:
6489 if (info->addr.pcrel || info->addr.offset.is_reg
6490 || !info->addr.preind || info->addr.writeback)
6491 {
6492 set_syntax_error (_("invalid addressing mode"));
6493 goto failure;
6494 }
6495 if (inst.reloc.type != BFD_RELOC_UNUSED
6496 || inst.reloc.exp.X_op != O_constant)
6497 {
6498 /* Make sure this has priority over
6499 "invalid addressing mode". */
6500 set_fatal_syntax_error (_("constant offset required"));
6501 goto failure;
6502 }
6503 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6504 break;
6505
6506 case AARCH64_OPND_SVE_ADDR_R:
6507 /* [<Xn|SP>{, <R><m>}]
6508 but recognizing SVE registers. */
6509 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6510 &offset_qualifier));
6511 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6512 {
6513 offset_qualifier = AARCH64_OPND_QLF_X;
6514 info->addr.offset.is_reg = 1;
6515 info->addr.offset.regno = 31;
6516 }
6517 else if (base_qualifier != AARCH64_OPND_QLF_X
6518 || offset_qualifier != AARCH64_OPND_QLF_X)
6519 {
6520 set_syntax_error (_("invalid addressing mode"));
6521 goto failure;
6522 }
6523 goto regoff_addr;
6524
6525 case AARCH64_OPND_SVE_ADDR_RR:
6526 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6527 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6528 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6529 case AARCH64_OPND_SVE_ADDR_RX:
6530 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6531 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6532 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6533 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6534 but recognizing SVE registers. */
6535 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6536 &offset_qualifier));
6537 if (base_qualifier != AARCH64_OPND_QLF_X
6538 || offset_qualifier != AARCH64_OPND_QLF_X)
6539 {
6540 set_syntax_error (_("invalid addressing mode"));
6541 goto failure;
6542 }
6543 goto regoff_addr;
6544
6545 case AARCH64_OPND_SVE_ADDR_RZ:
6546 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6547 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6548 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6549 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6550 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6551 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6552 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6553 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6554 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6555 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6556 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6557 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6558 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6559 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6560 &offset_qualifier));
6561 if (base_qualifier != AARCH64_OPND_QLF_X
6562 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6563 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6564 {
6565 set_syntax_error (_("invalid addressing mode"));
6566 goto failure;
6567 }
6568 info->qualifier = offset_qualifier;
6569 goto regoff_addr;
6570
6571 case AARCH64_OPND_SVE_ADDR_ZX:
6572 /* [Zn.<T>{, <Xm>}]. */
6573 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6574 &offset_qualifier));
6575 /* Things to check:
6576 base_qualifier either S_S or S_D
6577 offset_qualifier must be X
6578 */
6579 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6580 && base_qualifier != AARCH64_OPND_QLF_S_D)
6581 || offset_qualifier != AARCH64_OPND_QLF_X)
6582 {
6583 set_syntax_error (_("invalid addressing mode"));
6584 goto failure;
6585 }
6586 info->qualifier = base_qualifier;
6587 if (!info->addr.offset.is_reg || info->addr.pcrel
6588 || !info->addr.preind || info->addr.writeback
6589 || info->shifter.operator_present != 0)
6590 {
6591 set_syntax_error (_("invalid addressing mode"));
6592 goto failure;
6593 }
6594 info->shifter.kind = AARCH64_MOD_LSL;
6595 break;
6596
6597
6598 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6599 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6600 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6601 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6602 /* [Z<n>.<T>{, #imm}] */
6603 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6604 &offset_qualifier));
6605 if (base_qualifier != AARCH64_OPND_QLF_S_S
6606 && base_qualifier != AARCH64_OPND_QLF_S_D)
6607 {
6608 set_syntax_error (_("invalid addressing mode"));
6609 goto failure;
6610 }
6611 info->qualifier = base_qualifier;
6612 goto sve_regimm;
6613
6614 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6615 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6616 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6617 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6618 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6619
6620 We don't reject:
6621
6622 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6623
6624 here since we get better error messages by leaving it to
6625 the qualifier checking routines. */
6626 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6627 &offset_qualifier));
6628 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6629 && base_qualifier != AARCH64_OPND_QLF_S_D)
6630 || offset_qualifier != base_qualifier)
6631 {
6632 set_syntax_error (_("invalid addressing mode"));
6633 goto failure;
6634 }
6635 info->qualifier = base_qualifier;
6636 goto regoff_addr;
6637
6638 case AARCH64_OPND_SYSREG:
6639 {
6640 uint32_t sysreg_flags;
6641 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6642 &sysreg_flags)) == PARSE_FAIL)
6643 {
6644 set_syntax_error (_("unknown or missing system register name"));
6645 goto failure;
6646 }
6647 inst.base.operands[i].sysreg.value = val;
6648 inst.base.operands[i].sysreg.flags = sysreg_flags;
6649 break;
6650 }
6651
6652 case AARCH64_OPND_PSTATEFIELD:
6653 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6654 == PARSE_FAIL)
6655 {
6656 set_syntax_error (_("unknown or missing PSTATE field name"));
6657 goto failure;
6658 }
6659 inst.base.operands[i].pstatefield = val;
6660 break;
6661
6662 case AARCH64_OPND_SYSREG_IC:
6663 inst.base.operands[i].sysins_op =
6664 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6665 goto sys_reg_ins;
6666
6667 case AARCH64_OPND_SYSREG_DC:
6668 inst.base.operands[i].sysins_op =
6669 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6670 goto sys_reg_ins;
6671
6672 case AARCH64_OPND_SYSREG_AT:
6673 inst.base.operands[i].sysins_op =
6674 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6675 goto sys_reg_ins;
6676
6677 case AARCH64_OPND_SYSREG_SR:
6678 inst.base.operands[i].sysins_op =
6679 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6680 goto sys_reg_ins;
6681
6682 case AARCH64_OPND_SYSREG_TLBI:
6683 inst.base.operands[i].sysins_op =
6684 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6685 sys_reg_ins:
6686 if (inst.base.operands[i].sysins_op == NULL)
6687 {
6688 set_fatal_syntax_error ( _("unknown or missing operation name"));
6689 goto failure;
6690 }
6691 break;
6692
6693 case AARCH64_OPND_BARRIER:
6694 case AARCH64_OPND_BARRIER_ISB:
6695 val = parse_barrier (&str);
6696 if (val != PARSE_FAIL
6697 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6698 {
6699 /* ISB only accepts options name 'sy'. */
6700 set_syntax_error
6701 (_("the specified option is not accepted in ISB"));
6702 /* Turn off backtrack as this optional operand is present. */
6703 backtrack_pos = 0;
6704 goto failure;
6705 }
6706 if (val != PARSE_FAIL
6707 && operands[i] == AARCH64_OPND_BARRIER)
6708 {
6709 /* Regular barriers accept options CRm (C0-C15).
6710 DSB nXS barrier variant accepts values > 15. */
6711 if (val < 0 || val > 15)
6712 {
6713 set_syntax_error (_("the specified option is not accepted in DSB"));
6714 goto failure;
6715 }
6716 }
6717 /* This is an extension to accept a 0..15 immediate. */
6718 if (val == PARSE_FAIL)
6719 po_imm_or_fail (0, 15);
6720 info->barrier = aarch64_barrier_options + val;
6721 break;
6722
6723 case AARCH64_OPND_BARRIER_DSB_NXS:
6724 val = parse_barrier (&str);
6725 if (val != PARSE_FAIL)
6726 {
6727 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
6728 if (!(val == 16 || val == 20 || val == 24 || val == 28))
6729 {
6730 set_syntax_error (_("the specified option is not accepted in DSB"));
6731 /* Turn off backtrack as this optional operand is present. */
6732 backtrack_pos = 0;
6733 goto failure;
6734 }
6735 }
6736 else
6737 {
6738 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
6739 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
6740 if (! parse_constant_immediate (&str, &val, imm_reg_type))
6741 goto failure;
6742 if (!(val == 16 || val == 20 || val == 24 || val == 28))
6743 {
6744 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
6745 goto failure;
6746 }
6747 }
6748 /* Option index is encoded as 2-bit value in val<3:2>. */
6749 val = (val >> 2) - 4;
6750 info->barrier = aarch64_barrier_dsb_nxs_options + val;
6751 break;
6752
6753 case AARCH64_OPND_PRFOP:
6754 val = parse_pldop (&str);
6755 /* This is an extension to accept a 0..31 immediate. */
6756 if (val == PARSE_FAIL)
6757 po_imm_or_fail (0, 31);
6758 inst.base.operands[i].prfop = aarch64_prfops + val;
6759 break;
6760
6761 case AARCH64_OPND_BARRIER_PSB:
6762 val = parse_barrier_psb (&str, &(info->hint_option));
6763 if (val == PARSE_FAIL)
6764 goto failure;
6765 break;
6766
6767 case AARCH64_OPND_BTI_TARGET:
6768 val = parse_bti_operand (&str, &(info->hint_option));
6769 if (val == PARSE_FAIL)
6770 goto failure;
6771 break;
6772
6773 default:
6774 as_fatal (_("unhandled operand code %d"), operands[i]);
6775 }
6776
6777 /* If we get here, this operand was successfully parsed. */
6778 inst.base.operands[i].present = 1;
6779 continue;
6780
6781 failure:
6782 /* The parse routine should already have set the error, but in case
6783 not, set a default one here. */
6784 if (! error_p ())
6785 set_default_error ();
6786
6787 if (! backtrack_pos)
6788 goto parse_operands_return;
6789
6790 {
6791 /* We reach here because this operand is marked as optional, and
6792 either no operand was supplied or the operand was supplied but it
6793 was syntactically incorrect. In the latter case we report an
6794 error. In the former case we perform a few more checks before
6795 dropping through to the code to insert the default operand. */
6796
6797 char *tmp = backtrack_pos;
6798 char endchar = END_OF_INSN;
6799
6800 if (i != (aarch64_num_of_operands (opcode) - 1))
6801 endchar = ',';
6802 skip_past_char (&tmp, ',');
6803
6804 if (*tmp != endchar)
6805 /* The user has supplied an operand in the wrong format. */
6806 goto parse_operands_return;
6807
6808 /* Make sure there is not a comma before the optional operand.
6809 For example the fifth operand of 'sys' is optional:
6810
6811 sys #0,c0,c0,#0, <--- wrong
6812 sys #0,c0,c0,#0 <--- correct. */
6813 if (comma_skipped_p && i && endchar == END_OF_INSN)
6814 {
6815 set_fatal_syntax_error
6816 (_("unexpected comma before the omitted optional operand"));
6817 goto parse_operands_return;
6818 }
6819 }
6820
6821 /* Reaching here means we are dealing with an optional operand that is
6822 omitted from the assembly line. */
6823 gas_assert (optional_operand_p (opcode, i));
6824 info->present = 0;
6825 process_omitted_operand (operands[i], opcode, i, info);
6826
6827 /* Try again, skipping the optional operand at backtrack_pos. */
6828 str = backtrack_pos;
6829 backtrack_pos = 0;
6830
6831 /* Clear any error record after the omitted optional operand has been
6832 successfully handled. */
6833 clear_error ();
6834 }
6835
6836 /* Check if we have parsed all the operands. */
6837 if (*str != '\0' && ! error_p ())
6838 {
6839 /* Set I to the index of the last present operand; this is
6840 for the purpose of diagnostics. */
6841 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6842 ;
6843 set_fatal_syntax_error
6844 (_("unexpected characters following instruction"));
6845 }
6846
6847 parse_operands_return:
6848
6849 if (error_p ())
6850 {
6851 DEBUG_TRACE ("parsing FAIL: %s - %s",
6852 operand_mismatch_kind_names[get_error_kind ()],
6853 get_error_message ());
6854 /* Record the operand error properly; this is useful when there
6855 are multiple instruction templates for a mnemonic name, so that
6856 later on, we can select the error that most closely describes
6857 the problem. */
6858 record_operand_error (opcode, i, get_error_kind (),
6859 get_error_message ());
6860 return FALSE;
6861 }
6862 else
6863 {
6864 DEBUG_TRACE ("parsing SUCCESS");
6865 return TRUE;
6866 }
6867 }
6868
6869 /* It does some fix-up to provide some programmer friendly feature while
6870 keeping the libopcodes happy, i.e. libopcodes only accepts
6871 the preferred architectural syntax.
6872 Return FALSE if there is any failure; otherwise return TRUE. */
6873
6874 static bfd_boolean
6875 programmer_friendly_fixup (aarch64_instruction *instr)
6876 {
6877 aarch64_inst *base = &instr->base;
6878 const aarch64_opcode *opcode = base->opcode;
6879 enum aarch64_op op = opcode->op;
6880 aarch64_opnd_info *operands = base->operands;
6881
6882 DEBUG_TRACE ("enter");
6883
6884 switch (opcode->iclass)
6885 {
6886 case testbranch:
6887 /* TBNZ Xn|Wn, #uimm6, label
6888 Test and Branch Not Zero: conditionally jumps to label if bit number
6889 uimm6 in register Xn is not zero. The bit number implies the width of
6890 the register, which may be written and should be disassembled as Wn if
6891 uimm is less than 32. */
6892 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6893 {
6894 if (operands[1].imm.value >= 32)
6895 {
6896 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6897 0, 31);
6898 return FALSE;
6899 }
6900 operands[0].qualifier = AARCH64_OPND_QLF_X;
6901 }
6902 break;
6903 case loadlit:
6904 /* LDR Wt, label | =value
6905 As a convenience assemblers will typically permit the notation
6906 "=value" in conjunction with the pc-relative literal load instructions
6907 to automatically place an immediate value or symbolic address in a
6908 nearby literal pool and generate a hidden label which references it.
6909 ISREG has been set to 0 in the case of =value. */
6910 if (instr->gen_lit_pool
6911 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6912 {
6913 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6914 if (op == OP_LDRSW_LIT)
6915 size = 4;
6916 if (instr->reloc.exp.X_op != O_constant
6917 && instr->reloc.exp.X_op != O_big
6918 && instr->reloc.exp.X_op != O_symbol)
6919 {
6920 record_operand_error (opcode, 1,
6921 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6922 _("constant expression expected"));
6923 return FALSE;
6924 }
6925 if (! add_to_lit_pool (&instr->reloc.exp, size))
6926 {
6927 record_operand_error (opcode, 1,
6928 AARCH64_OPDE_OTHER_ERROR,
6929 _("literal pool insertion failed"));
6930 return FALSE;
6931 }
6932 }
6933 break;
6934 case log_shift:
6935 case bitfield:
6936 /* UXT[BHW] Wd, Wn
6937 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6938 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6939 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6940 A programmer-friendly assembler should accept a destination Xd in
6941 place of Wd, however that is not the preferred form for disassembly.
6942 */
6943 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6944 && operands[1].qualifier == AARCH64_OPND_QLF_W
6945 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6946 operands[0].qualifier = AARCH64_OPND_QLF_W;
6947 break;
6948
6949 case addsub_ext:
6950 {
6951 /* In the 64-bit form, the final register operand is written as Wm
6952 for all but the (possibly omitted) UXTX/LSL and SXTX
6953 operators.
6954 As a programmer-friendly assembler, we accept e.g.
6955 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6956 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6957 int idx = aarch64_operand_index (opcode->operands,
6958 AARCH64_OPND_Rm_EXT);
6959 gas_assert (idx == 1 || idx == 2);
6960 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6961 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6962 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6963 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6964 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6965 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6966 }
6967 break;
6968
6969 default:
6970 break;
6971 }
6972
6973 DEBUG_TRACE ("exit with SUCCESS");
6974 return TRUE;
6975 }
6976
6977 /* Check for loads and stores that will cause unpredictable behavior. */
6978
6979 static void
6980 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6981 {
6982 aarch64_inst *base = &instr->base;
6983 const aarch64_opcode *opcode = base->opcode;
6984 const aarch64_opnd_info *opnds = base->operands;
6985 switch (opcode->iclass)
6986 {
6987 case ldst_pos:
6988 case ldst_imm9:
6989 case ldst_imm10:
6990 case ldst_unscaled:
6991 case ldst_unpriv:
6992 /* Loading/storing the base register is unpredictable if writeback. */
6993 if ((aarch64_get_operand_class (opnds[0].type)
6994 == AARCH64_OPND_CLASS_INT_REG)
6995 && opnds[0].reg.regno == opnds[1].addr.base_regno
6996 && opnds[1].addr.base_regno != REG_SP
6997 /* Exempt STG/STZG/ST2G/STZ2G. */
6998 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
6999 && opnds[1].addr.writeback)
7000 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7001 break;
7002
7003 case ldstpair_off:
7004 case ldstnapair_offs:
7005 case ldstpair_indexed:
7006 /* Loading/storing the base register is unpredictable if writeback. */
7007 if ((aarch64_get_operand_class (opnds[0].type)
7008 == AARCH64_OPND_CLASS_INT_REG)
7009 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7010 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7011 && opnds[2].addr.base_regno != REG_SP
7012 /* Exempt STGP. */
7013 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7014 && opnds[2].addr.writeback)
7015 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7016 /* Load operations must load different registers. */
7017 if ((opcode->opcode & (1 << 22))
7018 && opnds[0].reg.regno == opnds[1].reg.regno)
7019 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7020 break;
7021
7022 case ldstexcl:
7023 /* It is unpredictable if the destination and status registers are the
7024 same. */
7025 if ((aarch64_get_operand_class (opnds[0].type)
7026 == AARCH64_OPND_CLASS_INT_REG)
7027 && (aarch64_get_operand_class (opnds[1].type)
7028 == AARCH64_OPND_CLASS_INT_REG)
7029 && (opnds[0].reg.regno == opnds[1].reg.regno
7030 || opnds[0].reg.regno == opnds[2].reg.regno))
7031 as_warn (_("unpredictable: identical transfer and status registers"
7032 " --`%s'"),
7033 str);
7034
7035 break;
7036
7037 default:
7038 break;
7039 }
7040 }
7041
7042 static void
7043 force_automatic_sequence_close (void)
7044 {
7045 if (now_instr_sequence.instr)
7046 {
7047 as_warn (_("previous `%s' sequence has not been closed"),
7048 now_instr_sequence.instr->opcode->name);
7049 init_insn_sequence (NULL, &now_instr_sequence);
7050 }
7051 }
7052
7053 /* A wrapper function to interface with libopcodes on encoding and
7054 record the error message if there is any.
7055
7056 Return TRUE on success; otherwise return FALSE. */
7057
7058 static bfd_boolean
7059 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7060 aarch64_insn *code)
7061 {
7062 aarch64_operand_error error_info;
7063 memset (&error_info, '\0', sizeof (error_info));
7064 error_info.kind = AARCH64_OPDE_NIL;
7065 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7066 && !error_info.non_fatal)
7067 return TRUE;
7068
7069 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7070 record_operand_error_info (opcode, &error_info);
7071 return error_info.non_fatal;
7072 }
7073
7074 #ifdef DEBUG_AARCH64
7075 static inline void
7076 dump_opcode_operands (const aarch64_opcode *opcode)
7077 {
7078 int i = 0;
7079 while (opcode->operands[i] != AARCH64_OPND_NIL)
7080 {
7081 aarch64_verbose ("\t\t opnd%d: %s", i,
7082 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7083 ? aarch64_get_operand_name (opcode->operands[i])
7084 : aarch64_get_operand_desc (opcode->operands[i]));
7085 ++i;
7086 }
7087 }
7088 #endif /* DEBUG_AARCH64 */
7089
7090 /* This is the guts of the machine-dependent assembler. STR points to a
7091 machine dependent instruction. This function is supposed to emit
7092 the frags/bytes it assembles to. */
7093
7094 void
7095 md_assemble (char *str)
7096 {
7097 char *p = str;
7098 templates *template;
7099 aarch64_opcode *opcode;
7100 aarch64_inst *inst_base;
7101 unsigned saved_cond;
7102
7103 /* Align the previous label if needed. */
7104 if (last_label_seen != NULL)
7105 {
7106 symbol_set_frag (last_label_seen, frag_now);
7107 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7108 S_SET_SEGMENT (last_label_seen, now_seg);
7109 }
7110
7111 /* Update the current insn_sequence from the segment. */
7112 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
7113
7114 inst.reloc.type = BFD_RELOC_UNUSED;
7115
7116 DEBUG_TRACE ("\n\n");
7117 DEBUG_TRACE ("==============================");
7118 DEBUG_TRACE ("Enter md_assemble with %s", str);
7119
7120 template = opcode_lookup (&p);
7121 if (!template)
7122 {
7123 /* It wasn't an instruction, but it might be a register alias of
7124 the form alias .req reg directive. */
7125 if (!create_register_alias (str, p))
7126 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7127 str);
7128 return;
7129 }
7130
7131 skip_whitespace (p);
7132 if (*p == ',')
7133 {
7134 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7135 get_mnemonic_name (str), str);
7136 return;
7137 }
7138
7139 init_operand_error_report ();
7140
7141 /* Sections are assumed to start aligned. In executable section, there is no
7142 MAP_DATA symbol pending. So we only align the address during
7143 MAP_DATA --> MAP_INSN transition.
7144 For other sections, this is not guaranteed. */
7145 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7146 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7147 frag_align_code (2, 0);
7148
7149 saved_cond = inst.cond;
7150 reset_aarch64_instruction (&inst);
7151 inst.cond = saved_cond;
7152
7153 /* Iterate through all opcode entries with the same mnemonic name. */
7154 do
7155 {
7156 opcode = template->opcode;
7157
7158 DEBUG_TRACE ("opcode %s found", opcode->name);
7159 #ifdef DEBUG_AARCH64
7160 if (debug_dump)
7161 dump_opcode_operands (opcode);
7162 #endif /* DEBUG_AARCH64 */
7163
7164 mapping_state (MAP_INSN);
7165
7166 inst_base = &inst.base;
7167 inst_base->opcode = opcode;
7168
7169 /* Truly conditionally executed instructions, e.g. b.cond. */
7170 if (opcode->flags & F_COND)
7171 {
7172 gas_assert (inst.cond != COND_ALWAYS);
7173 inst_base->cond = get_cond_from_value (inst.cond);
7174 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7175 }
7176 else if (inst.cond != COND_ALWAYS)
7177 {
7178 /* It shouldn't arrive here, where the assembly looks like a
7179 conditional instruction but the found opcode is unconditional. */
7180 gas_assert (0);
7181 continue;
7182 }
7183
7184 if (parse_operands (p, opcode)
7185 && programmer_friendly_fixup (&inst)
7186 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7187 {
7188 /* Check that this instruction is supported for this CPU. */
7189 if (!opcode->avariant
7190 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7191 {
7192 as_bad (_("selected processor does not support `%s'"), str);
7193 return;
7194 }
7195
7196 warn_unpredictable_ldst (&inst, str);
7197
7198 if (inst.reloc.type == BFD_RELOC_UNUSED
7199 || !inst.reloc.need_libopcodes_p)
7200 output_inst (NULL);
7201 else
7202 {
7203 /* If there is relocation generated for the instruction,
7204 store the instruction information for the future fix-up. */
7205 struct aarch64_inst *copy;
7206 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7207 copy = XNEW (struct aarch64_inst);
7208 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7209 output_inst (copy);
7210 }
7211
7212 /* Issue non-fatal messages if any. */
7213 output_operand_error_report (str, TRUE);
7214 return;
7215 }
7216
7217 template = template->next;
7218 if (template != NULL)
7219 {
7220 reset_aarch64_instruction (&inst);
7221 inst.cond = saved_cond;
7222 }
7223 }
7224 while (template != NULL);
7225
7226 /* Issue the error messages if any. */
7227 output_operand_error_report (str, FALSE);
7228 }
7229
7230 /* Various frobbings of labels and their addresses. */
7231
7232 void
7233 aarch64_start_line_hook (void)
7234 {
7235 last_label_seen = NULL;
7236 }
7237
7238 void
7239 aarch64_frob_label (symbolS * sym)
7240 {
7241 last_label_seen = sym;
7242
7243 dwarf2_emit_label (sym);
7244 }
7245
7246 void
7247 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7248 {
7249 /* Check to see if we have a block to close. */
7250 force_automatic_sequence_close ();
7251 }
7252
7253 int
7254 aarch64_data_in_code (void)
7255 {
7256 if (!strncmp (input_line_pointer + 1, "data:", 5))
7257 {
7258 *input_line_pointer = '/';
7259 input_line_pointer += 5;
7260 *input_line_pointer = 0;
7261 return 1;
7262 }
7263
7264 return 0;
7265 }
7266
7267 char *
7268 aarch64_canonicalize_symbol_name (char *name)
7269 {
7270 int len;
7271
7272 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7273 *(name + len - 5) = 0;
7274
7275 return name;
7276 }
7277 \f
7278 /* Table of all register names defined by default. The user can
7279 define additional names with .req. Note that all register names
7280 should appear in both upper and lowercase variants. Some registers
7281 also have mixed-case names. */
7282
7283 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
7284 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
7285 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7286 #define REGSET16(p,t) \
7287 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7288 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7289 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7290 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7291 #define REGSET31(p,t) \
7292 REGSET16(p, t), \
7293 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7294 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7295 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7296 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7297 #define REGSET(p,t) \
7298 REGSET31(p,t), REGNUM(p,31,t)
7299
7300 /* These go into aarch64_reg_hsh hash-table. */
7301 static const reg_entry reg_names[] = {
7302 /* Integer registers. */
7303 REGSET31 (x, R_64), REGSET31 (X, R_64),
7304 REGSET31 (w, R_32), REGSET31 (W, R_32),
7305
7306 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7307 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7308 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7309 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7310 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7311 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7312
7313 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7314 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7315
7316 /* Floating-point single precision registers. */
7317 REGSET (s, FP_S), REGSET (S, FP_S),
7318
7319 /* Floating-point double precision registers. */
7320 REGSET (d, FP_D), REGSET (D, FP_D),
7321
7322 /* Floating-point half precision registers. */
7323 REGSET (h, FP_H), REGSET (H, FP_H),
7324
7325 /* Floating-point byte precision registers. */
7326 REGSET (b, FP_B), REGSET (B, FP_B),
7327
7328 /* Floating-point quad precision registers. */
7329 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7330
7331 /* FP/SIMD registers. */
7332 REGSET (v, VN), REGSET (V, VN),
7333
7334 /* SVE vector registers. */
7335 REGSET (z, ZN), REGSET (Z, ZN),
7336
7337 /* SVE predicate registers. */
7338 REGSET16 (p, PN), REGSET16 (P, PN)
7339 };
7340
7341 #undef REGDEF
7342 #undef REGDEF_ALIAS
7343 #undef REGNUM
7344 #undef REGSET16
7345 #undef REGSET31
7346 #undef REGSET
7347
7348 #define N 1
7349 #define n 0
7350 #define Z 1
7351 #define z 0
7352 #define C 1
7353 #define c 0
7354 #define V 1
7355 #define v 0
7356 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7357 static const asm_nzcv nzcv_names[] = {
7358 {"nzcv", B (n, z, c, v)},
7359 {"nzcV", B (n, z, c, V)},
7360 {"nzCv", B (n, z, C, v)},
7361 {"nzCV", B (n, z, C, V)},
7362 {"nZcv", B (n, Z, c, v)},
7363 {"nZcV", B (n, Z, c, V)},
7364 {"nZCv", B (n, Z, C, v)},
7365 {"nZCV", B (n, Z, C, V)},
7366 {"Nzcv", B (N, z, c, v)},
7367 {"NzcV", B (N, z, c, V)},
7368 {"NzCv", B (N, z, C, v)},
7369 {"NzCV", B (N, z, C, V)},
7370 {"NZcv", B (N, Z, c, v)},
7371 {"NZcV", B (N, Z, c, V)},
7372 {"NZCv", B (N, Z, C, v)},
7373 {"NZCV", B (N, Z, C, V)}
7374 };
7375
7376 #undef N
7377 #undef n
7378 #undef Z
7379 #undef z
7380 #undef C
7381 #undef c
7382 #undef V
7383 #undef v
7384 #undef B
7385 \f
7386 /* MD interface: bits in the object file. */
7387
7388 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7389 for use in the a.out file, and stores them in the array pointed to by buf.
7390 This knows about the endian-ness of the target machine and does
7391 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7392 2 (short) and 4 (long) Floating numbers are put out as a series of
7393 LITTLENUMS (shorts, here at least). */
7394
7395 void
7396 md_number_to_chars (char *buf, valueT val, int n)
7397 {
7398 if (target_big_endian)
7399 number_to_chars_bigendian (buf, val, n);
7400 else
7401 number_to_chars_littleendian (buf, val, n);
7402 }
7403
7404 /* MD interface: Sections. */
7405
7406 /* Estimate the size of a frag before relaxing. Assume everything fits in
7407 4 bytes. */
7408
7409 int
7410 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7411 {
7412 fragp->fr_var = 4;
7413 return 4;
7414 }
7415
7416 /* Round up a section size to the appropriate boundary. */
7417
7418 valueT
7419 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7420 {
7421 return size;
7422 }
7423
7424 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7425 of an rs_align_code fragment.
7426
7427 Here we fill the frag with the appropriate info for padding the
7428 output stream. The resulting frag will consist of a fixed (fr_fix)
7429 and of a repeating (fr_var) part.
7430
7431 The fixed content is always emitted before the repeating content and
7432 these two parts are used as follows in constructing the output:
7433 - the fixed part will be used to align to a valid instruction word
7434 boundary, in case that we start at a misaligned address; as no
7435 executable instruction can live at the misaligned location, we
7436 simply fill with zeros;
7437 - the variable part will be used to cover the remaining padding and
7438 we fill using the AArch64 NOP instruction.
7439
7440 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7441 enough storage space for up to 3 bytes for padding the back to a valid
7442 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7443
7444 void
7445 aarch64_handle_align (fragS * fragP)
7446 {
7447 /* NOP = d503201f */
7448 /* AArch64 instructions are always little-endian. */
7449 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7450
7451 int bytes, fix, noop_size;
7452 char *p;
7453
7454 if (fragP->fr_type != rs_align_code)
7455 return;
7456
7457 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7458 p = fragP->fr_literal + fragP->fr_fix;
7459
7460 #ifdef OBJ_ELF
7461 gas_assert (fragP->tc_frag_data.recorded);
7462 #endif
7463
7464 noop_size = sizeof (aarch64_noop);
7465
7466 fix = bytes & (noop_size - 1);
7467 if (fix)
7468 {
7469 #ifdef OBJ_ELF
7470 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7471 #endif
7472 memset (p, 0, fix);
7473 p += fix;
7474 fragP->fr_fix += fix;
7475 }
7476
7477 if (noop_size)
7478 memcpy (p, aarch64_noop, noop_size);
7479 fragP->fr_var = noop_size;
7480 }
7481
7482 /* Perform target specific initialisation of a frag.
7483 Note - despite the name this initialisation is not done when the frag
7484 is created, but only when its type is assigned. A frag can be created
7485 and used a long time before its type is set, so beware of assuming that
7486 this initialisation is performed first. */
7487
7488 #ifndef OBJ_ELF
7489 void
7490 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7491 int max_chars ATTRIBUTE_UNUSED)
7492 {
7493 }
7494
7495 #else /* OBJ_ELF is defined. */
7496 void
7497 aarch64_init_frag (fragS * fragP, int max_chars)
7498 {
7499 /* Record a mapping symbol for alignment frags. We will delete this
7500 later if the alignment ends up empty. */
7501 if (!fragP->tc_frag_data.recorded)
7502 fragP->tc_frag_data.recorded = 1;
7503
7504 /* PR 21809: Do not set a mapping state for debug sections
7505 - it just confuses other tools. */
7506 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
7507 return;
7508
7509 switch (fragP->fr_type)
7510 {
7511 case rs_align_test:
7512 case rs_fill:
7513 mapping_state_2 (MAP_DATA, max_chars);
7514 break;
7515 case rs_align:
7516 /* PR 20364: We can get alignment frags in code sections,
7517 so do not just assume that we should use the MAP_DATA state. */
7518 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7519 break;
7520 case rs_align_code:
7521 mapping_state_2 (MAP_INSN, max_chars);
7522 break;
7523 default:
7524 break;
7525 }
7526 }
7527 \f
7528 /* Initialize the DWARF-2 unwind information for this procedure. */
7529
7530 void
7531 tc_aarch64_frame_initial_instructions (void)
7532 {
7533 cfi_add_CFA_def_cfa (REG_SP, 0);
7534 }
7535 #endif /* OBJ_ELF */
7536
7537 /* Convert REGNAME to a DWARF-2 register number. */
7538
7539 int
7540 tc_aarch64_regname_to_dw2regnum (char *regname)
7541 {
7542 const reg_entry *reg = parse_reg (&regname);
7543 if (reg == NULL)
7544 return -1;
7545
7546 switch (reg->type)
7547 {
7548 case REG_TYPE_SP_32:
7549 case REG_TYPE_SP_64:
7550 case REG_TYPE_R_32:
7551 case REG_TYPE_R_64:
7552 return reg->number;
7553
7554 case REG_TYPE_FP_B:
7555 case REG_TYPE_FP_H:
7556 case REG_TYPE_FP_S:
7557 case REG_TYPE_FP_D:
7558 case REG_TYPE_FP_Q:
7559 return reg->number + 64;
7560
7561 default:
7562 break;
7563 }
7564 return -1;
7565 }
7566
7567 /* Implement DWARF2_ADDR_SIZE. */
7568
7569 int
7570 aarch64_dwarf2_addr_size (void)
7571 {
7572 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7573 if (ilp32_p)
7574 return 4;
7575 #endif
7576 return bfd_arch_bits_per_address (stdoutput) / 8;
7577 }
7578
7579 /* MD interface: Symbol and relocation handling. */
7580
7581 /* Return the address within the segment that a PC-relative fixup is
7582 relative to. For AArch64 PC-relative fixups applied to instructions
7583 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7584
7585 long
7586 md_pcrel_from_section (fixS * fixP, segT seg)
7587 {
7588 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7589
7590 /* If this is pc-relative and we are going to emit a relocation
7591 then we just want to put out any pipeline compensation that the linker
7592 will need. Otherwise we want to use the calculated base. */
7593 if (fixP->fx_pcrel
7594 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7595 || aarch64_force_relocation (fixP)))
7596 base = 0;
7597
7598 /* AArch64 should be consistent for all pc-relative relocations. */
7599 return base + AARCH64_PCREL_OFFSET;
7600 }
7601
7602 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7603 Otherwise we have no need to default values of symbols. */
7604
7605 symbolS *
7606 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7607 {
7608 #ifdef OBJ_ELF
7609 if (name[0] == '_' && name[1] == 'G'
7610 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7611 {
7612 if (!GOT_symbol)
7613 {
7614 if (symbol_find (name))
7615 as_bad (_("GOT already in the symbol table"));
7616
7617 GOT_symbol = symbol_new (name, undefined_section,
7618 &zero_address_frag, 0);
7619 }
7620
7621 return GOT_symbol;
7622 }
7623 #endif
7624
7625 return 0;
7626 }
7627
7628 /* Return non-zero if the indicated VALUE has overflowed the maximum
7629 range expressible by a unsigned number with the indicated number of
7630 BITS. */
7631
7632 static bfd_boolean
7633 unsigned_overflow (valueT value, unsigned bits)
7634 {
7635 valueT lim;
7636 if (bits >= sizeof (valueT) * 8)
7637 return FALSE;
7638 lim = (valueT) 1 << bits;
7639 return (value >= lim);
7640 }
7641
7642
7643 /* Return non-zero if the indicated VALUE has overflowed the maximum
7644 range expressible by an signed number with the indicated number of
7645 BITS. */
7646
7647 static bfd_boolean
7648 signed_overflow (offsetT value, unsigned bits)
7649 {
7650 offsetT lim;
7651 if (bits >= sizeof (offsetT) * 8)
7652 return FALSE;
7653 lim = (offsetT) 1 << (bits - 1);
7654 return (value < -lim || value >= lim);
7655 }
7656
7657 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7658 unsigned immediate offset load/store instruction, try to encode it as
7659 an unscaled, 9-bit, signed immediate offset load/store instruction.
7660 Return TRUE if it is successful; otherwise return FALSE.
7661
7662 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7663 in response to the standard LDR/STR mnemonics when the immediate offset is
7664 unambiguous, i.e. when it is negative or unaligned. */
7665
7666 static bfd_boolean
7667 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7668 {
7669 int idx;
7670 enum aarch64_op new_op;
7671 const aarch64_opcode *new_opcode;
7672
7673 gas_assert (instr->opcode->iclass == ldst_pos);
7674
7675 switch (instr->opcode->op)
7676 {
7677 case OP_LDRB_POS:new_op = OP_LDURB; break;
7678 case OP_STRB_POS: new_op = OP_STURB; break;
7679 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7680 case OP_LDRH_POS: new_op = OP_LDURH; break;
7681 case OP_STRH_POS: new_op = OP_STURH; break;
7682 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7683 case OP_LDR_POS: new_op = OP_LDUR; break;
7684 case OP_STR_POS: new_op = OP_STUR; break;
7685 case OP_LDRF_POS: new_op = OP_LDURV; break;
7686 case OP_STRF_POS: new_op = OP_STURV; break;
7687 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7688 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7689 default: new_op = OP_NIL; break;
7690 }
7691
7692 if (new_op == OP_NIL)
7693 return FALSE;
7694
7695 new_opcode = aarch64_get_opcode (new_op);
7696 gas_assert (new_opcode != NULL);
7697
7698 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7699 instr->opcode->op, new_opcode->op);
7700
7701 aarch64_replace_opcode (instr, new_opcode);
7702
7703 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7704 qualifier matching may fail because the out-of-date qualifier will
7705 prevent the operand being updated with a new and correct qualifier. */
7706 idx = aarch64_operand_index (instr->opcode->operands,
7707 AARCH64_OPND_ADDR_SIMM9);
7708 gas_assert (idx == 1);
7709 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7710
7711 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7712
7713 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7714 insn_sequence))
7715 return FALSE;
7716
7717 return TRUE;
7718 }
7719
7720 /* Called by fix_insn to fix a MOV immediate alias instruction.
7721
7722 Operand for a generic move immediate instruction, which is an alias
7723 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7724 a 32-bit/64-bit immediate value into general register. An assembler error
7725 shall result if the immediate cannot be created by a single one of these
7726 instructions. If there is a choice, then to ensure reversability an
7727 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7728
7729 static void
7730 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7731 {
7732 const aarch64_opcode *opcode;
7733
7734 /* Need to check if the destination is SP/ZR. The check has to be done
7735 before any aarch64_replace_opcode. */
7736 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7737 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7738
7739 instr->operands[1].imm.value = value;
7740 instr->operands[1].skip = 0;
7741
7742 if (try_mov_wide_p)
7743 {
7744 /* Try the MOVZ alias. */
7745 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7746 aarch64_replace_opcode (instr, opcode);
7747 if (aarch64_opcode_encode (instr->opcode, instr,
7748 &instr->value, NULL, NULL, insn_sequence))
7749 {
7750 put_aarch64_insn (buf, instr->value);
7751 return;
7752 }
7753 /* Try the MOVK alias. */
7754 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7755 aarch64_replace_opcode (instr, opcode);
7756 if (aarch64_opcode_encode (instr->opcode, instr,
7757 &instr->value, NULL, NULL, insn_sequence))
7758 {
7759 put_aarch64_insn (buf, instr->value);
7760 return;
7761 }
7762 }
7763
7764 if (try_mov_bitmask_p)
7765 {
7766 /* Try the ORR alias. */
7767 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7768 aarch64_replace_opcode (instr, opcode);
7769 if (aarch64_opcode_encode (instr->opcode, instr,
7770 &instr->value, NULL, NULL, insn_sequence))
7771 {
7772 put_aarch64_insn (buf, instr->value);
7773 return;
7774 }
7775 }
7776
7777 as_bad_where (fixP->fx_file, fixP->fx_line,
7778 _("immediate cannot be moved by a single instruction"));
7779 }
7780
7781 /* An instruction operand which is immediate related may have symbol used
7782 in the assembly, e.g.
7783
7784 mov w0, u32
7785 .set u32, 0x00ffff00
7786
7787 At the time when the assembly instruction is parsed, a referenced symbol,
7788 like 'u32' in the above example may not have been seen; a fixS is created
7789 in such a case and is handled here after symbols have been resolved.
7790 Instruction is fixed up with VALUE using the information in *FIXP plus
7791 extra information in FLAGS.
7792
7793 This function is called by md_apply_fix to fix up instructions that need
7794 a fix-up described above but does not involve any linker-time relocation. */
7795
7796 static void
7797 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7798 {
7799 int idx;
7800 uint32_t insn;
7801 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7802 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7803 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7804
7805 if (new_inst)
7806 {
7807 /* Now the instruction is about to be fixed-up, so the operand that
7808 was previously marked as 'ignored' needs to be unmarked in order
7809 to get the encoding done properly. */
7810 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7811 new_inst->operands[idx].skip = 0;
7812 }
7813
7814 gas_assert (opnd != AARCH64_OPND_NIL);
7815
7816 switch (opnd)
7817 {
7818 case AARCH64_OPND_EXCEPTION:
7819 case AARCH64_OPND_UNDEFINED:
7820 if (unsigned_overflow (value, 16))
7821 as_bad_where (fixP->fx_file, fixP->fx_line,
7822 _("immediate out of range"));
7823 insn = get_aarch64_insn (buf);
7824 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
7825 put_aarch64_insn (buf, insn);
7826 break;
7827
7828 case AARCH64_OPND_AIMM:
7829 /* ADD or SUB with immediate.
7830 NOTE this assumes we come here with a add/sub shifted reg encoding
7831 3 322|2222|2 2 2 21111 111111
7832 1 098|7654|3 2 1 09876 543210 98765 43210
7833 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7834 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7835 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7836 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7837 ->
7838 3 322|2222|2 2 221111111111
7839 1 098|7654|3 2 109876543210 98765 43210
7840 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7841 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7842 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7843 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7844 Fields sf Rn Rd are already set. */
7845 insn = get_aarch64_insn (buf);
7846 if (value < 0)
7847 {
7848 /* Add <-> sub. */
7849 insn = reencode_addsub_switch_add_sub (insn);
7850 value = -value;
7851 }
7852
7853 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7854 && unsigned_overflow (value, 12))
7855 {
7856 /* Try to shift the value by 12 to make it fit. */
7857 if (((value >> 12) << 12) == value
7858 && ! unsigned_overflow (value, 12 + 12))
7859 {
7860 value >>= 12;
7861 insn |= encode_addsub_imm_shift_amount (1);
7862 }
7863 }
7864
7865 if (unsigned_overflow (value, 12))
7866 as_bad_where (fixP->fx_file, fixP->fx_line,
7867 _("immediate out of range"));
7868
7869 insn |= encode_addsub_imm (value);
7870
7871 put_aarch64_insn (buf, insn);
7872 break;
7873
7874 case AARCH64_OPND_SIMD_IMM:
7875 case AARCH64_OPND_SIMD_IMM_SFT:
7876 case AARCH64_OPND_LIMM:
7877 /* Bit mask immediate. */
7878 gas_assert (new_inst != NULL);
7879 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7880 new_inst->operands[idx].imm.value = value;
7881 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7882 &new_inst->value, NULL, NULL, insn_sequence))
7883 put_aarch64_insn (buf, new_inst->value);
7884 else
7885 as_bad_where (fixP->fx_file, fixP->fx_line,
7886 _("invalid immediate"));
7887 break;
7888
7889 case AARCH64_OPND_HALF:
7890 /* 16-bit unsigned immediate. */
7891 if (unsigned_overflow (value, 16))
7892 as_bad_where (fixP->fx_file, fixP->fx_line,
7893 _("immediate out of range"));
7894 insn = get_aarch64_insn (buf);
7895 insn |= encode_movw_imm (value & 0xffff);
7896 put_aarch64_insn (buf, insn);
7897 break;
7898
7899 case AARCH64_OPND_IMM_MOV:
7900 /* Operand for a generic move immediate instruction, which is
7901 an alias instruction that generates a single MOVZ, MOVN or ORR
7902 instruction to loads a 32-bit/64-bit immediate value into general
7903 register. An assembler error shall result if the immediate cannot be
7904 created by a single one of these instructions. If there is a choice,
7905 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7906 and MOVZ or MOVN to ORR. */
7907 gas_assert (new_inst != NULL);
7908 fix_mov_imm_insn (fixP, buf, new_inst, value);
7909 break;
7910
7911 case AARCH64_OPND_ADDR_SIMM7:
7912 case AARCH64_OPND_ADDR_SIMM9:
7913 case AARCH64_OPND_ADDR_SIMM9_2:
7914 case AARCH64_OPND_ADDR_SIMM10:
7915 case AARCH64_OPND_ADDR_UIMM12:
7916 case AARCH64_OPND_ADDR_SIMM11:
7917 case AARCH64_OPND_ADDR_SIMM13:
7918 /* Immediate offset in an address. */
7919 insn = get_aarch64_insn (buf);
7920
7921 gas_assert (new_inst != NULL && new_inst->value == insn);
7922 gas_assert (new_inst->opcode->operands[1] == opnd
7923 || new_inst->opcode->operands[2] == opnd);
7924
7925 /* Get the index of the address operand. */
7926 if (new_inst->opcode->operands[1] == opnd)
7927 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7928 idx = 1;
7929 else
7930 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7931 idx = 2;
7932
7933 /* Update the resolved offset value. */
7934 new_inst->operands[idx].addr.offset.imm = value;
7935
7936 /* Encode/fix-up. */
7937 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7938 &new_inst->value, NULL, NULL, insn_sequence))
7939 {
7940 put_aarch64_insn (buf, new_inst->value);
7941 break;
7942 }
7943 else if (new_inst->opcode->iclass == ldst_pos
7944 && try_to_encode_as_unscaled_ldst (new_inst))
7945 {
7946 put_aarch64_insn (buf, new_inst->value);
7947 break;
7948 }
7949
7950 as_bad_where (fixP->fx_file, fixP->fx_line,
7951 _("immediate offset out of range"));
7952 break;
7953
7954 default:
7955 gas_assert (0);
7956 as_fatal (_("unhandled operand code %d"), opnd);
7957 }
7958 }
7959
7960 /* Apply a fixup (fixP) to segment data, once it has been determined
7961 by our caller that we have all the info we need to fix it up.
7962
7963 Parameter valP is the pointer to the value of the bits. */
7964
7965 void
7966 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7967 {
7968 offsetT value = *valP;
7969 uint32_t insn;
7970 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7971 int scale;
7972 unsigned flags = fixP->fx_addnumber;
7973
7974 DEBUG_TRACE ("\n\n");
7975 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7976 DEBUG_TRACE ("Enter md_apply_fix");
7977
7978 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7979
7980 /* Note whether this will delete the relocation. */
7981
7982 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7983 fixP->fx_done = 1;
7984
7985 /* Process the relocations. */
7986 switch (fixP->fx_r_type)
7987 {
7988 case BFD_RELOC_NONE:
7989 /* This will need to go in the object file. */
7990 fixP->fx_done = 0;
7991 break;
7992
7993 case BFD_RELOC_8:
7994 case BFD_RELOC_8_PCREL:
7995 if (fixP->fx_done || !seg->use_rela_p)
7996 md_number_to_chars (buf, value, 1);
7997 break;
7998
7999 case BFD_RELOC_16:
8000 case BFD_RELOC_16_PCREL:
8001 if (fixP->fx_done || !seg->use_rela_p)
8002 md_number_to_chars (buf, value, 2);
8003 break;
8004
8005 case BFD_RELOC_32:
8006 case BFD_RELOC_32_PCREL:
8007 if (fixP->fx_done || !seg->use_rela_p)
8008 md_number_to_chars (buf, value, 4);
8009 break;
8010
8011 case BFD_RELOC_64:
8012 case BFD_RELOC_64_PCREL:
8013 if (fixP->fx_done || !seg->use_rela_p)
8014 md_number_to_chars (buf, value, 8);
8015 break;
8016
8017 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8018 /* We claim that these fixups have been processed here, even if
8019 in fact we generate an error because we do not have a reloc
8020 for them, so tc_gen_reloc() will reject them. */
8021 fixP->fx_done = 1;
8022 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8023 {
8024 as_bad_where (fixP->fx_file, fixP->fx_line,
8025 _("undefined symbol %s used as an immediate value"),
8026 S_GET_NAME (fixP->fx_addsy));
8027 goto apply_fix_return;
8028 }
8029 fix_insn (fixP, flags, value);
8030 break;
8031
8032 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8033 if (fixP->fx_done || !seg->use_rela_p)
8034 {
8035 if (value & 3)
8036 as_bad_where (fixP->fx_file, fixP->fx_line,
8037 _("pc-relative load offset not word aligned"));
8038 if (signed_overflow (value, 21))
8039 as_bad_where (fixP->fx_file, fixP->fx_line,
8040 _("pc-relative load offset out of range"));
8041 insn = get_aarch64_insn (buf);
8042 insn |= encode_ld_lit_ofs_19 (value >> 2);
8043 put_aarch64_insn (buf, insn);
8044 }
8045 break;
8046
8047 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8048 if (fixP->fx_done || !seg->use_rela_p)
8049 {
8050 if (signed_overflow (value, 21))
8051 as_bad_where (fixP->fx_file, fixP->fx_line,
8052 _("pc-relative address offset out of range"));
8053 insn = get_aarch64_insn (buf);
8054 insn |= encode_adr_imm (value);
8055 put_aarch64_insn (buf, insn);
8056 }
8057 break;
8058
8059 case BFD_RELOC_AARCH64_BRANCH19:
8060 if (fixP->fx_done || !seg->use_rela_p)
8061 {
8062 if (value & 3)
8063 as_bad_where (fixP->fx_file, fixP->fx_line,
8064 _("conditional branch target not word aligned"));
8065 if (signed_overflow (value, 21))
8066 as_bad_where (fixP->fx_file, fixP->fx_line,
8067 _("conditional branch out of range"));
8068 insn = get_aarch64_insn (buf);
8069 insn |= encode_cond_branch_ofs_19 (value >> 2);
8070 put_aarch64_insn (buf, insn);
8071 }
8072 break;
8073
8074 case BFD_RELOC_AARCH64_TSTBR14:
8075 if (fixP->fx_done || !seg->use_rela_p)
8076 {
8077 if (value & 3)
8078 as_bad_where (fixP->fx_file, fixP->fx_line,
8079 _("conditional branch target not word aligned"));
8080 if (signed_overflow (value, 16))
8081 as_bad_where (fixP->fx_file, fixP->fx_line,
8082 _("conditional branch out of range"));
8083 insn = get_aarch64_insn (buf);
8084 insn |= encode_tst_branch_ofs_14 (value >> 2);
8085 put_aarch64_insn (buf, insn);
8086 }
8087 break;
8088
8089 case BFD_RELOC_AARCH64_CALL26:
8090 case BFD_RELOC_AARCH64_JUMP26:
8091 if (fixP->fx_done || !seg->use_rela_p)
8092 {
8093 if (value & 3)
8094 as_bad_where (fixP->fx_file, fixP->fx_line,
8095 _("branch target not word aligned"));
8096 if (signed_overflow (value, 28))
8097 as_bad_where (fixP->fx_file, fixP->fx_line,
8098 _("branch out of range"));
8099 insn = get_aarch64_insn (buf);
8100 insn |= encode_branch_ofs_26 (value >> 2);
8101 put_aarch64_insn (buf, insn);
8102 }
8103 break;
8104
8105 case BFD_RELOC_AARCH64_MOVW_G0:
8106 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8107 case BFD_RELOC_AARCH64_MOVW_G0_S:
8108 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8109 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8110 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8111 scale = 0;
8112 goto movw_common;
8113 case BFD_RELOC_AARCH64_MOVW_G1:
8114 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8115 case BFD_RELOC_AARCH64_MOVW_G1_S:
8116 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8117 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8118 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8119 scale = 16;
8120 goto movw_common;
8121 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8122 scale = 0;
8123 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8124 /* Should always be exported to object file, see
8125 aarch64_force_relocation(). */
8126 gas_assert (!fixP->fx_done);
8127 gas_assert (seg->use_rela_p);
8128 goto movw_common;
8129 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8130 scale = 16;
8131 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8132 /* Should always be exported to object file, see
8133 aarch64_force_relocation(). */
8134 gas_assert (!fixP->fx_done);
8135 gas_assert (seg->use_rela_p);
8136 goto movw_common;
8137 case BFD_RELOC_AARCH64_MOVW_G2:
8138 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8139 case BFD_RELOC_AARCH64_MOVW_G2_S:
8140 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8141 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8142 scale = 32;
8143 goto movw_common;
8144 case BFD_RELOC_AARCH64_MOVW_G3:
8145 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8146 scale = 48;
8147 movw_common:
8148 if (fixP->fx_done || !seg->use_rela_p)
8149 {
8150 insn = get_aarch64_insn (buf);
8151
8152 if (!fixP->fx_done)
8153 {
8154 /* REL signed addend must fit in 16 bits */
8155 if (signed_overflow (value, 16))
8156 as_bad_where (fixP->fx_file, fixP->fx_line,
8157 _("offset out of range"));
8158 }
8159 else
8160 {
8161 /* Check for overflow and scale. */
8162 switch (fixP->fx_r_type)
8163 {
8164 case BFD_RELOC_AARCH64_MOVW_G0:
8165 case BFD_RELOC_AARCH64_MOVW_G1:
8166 case BFD_RELOC_AARCH64_MOVW_G2:
8167 case BFD_RELOC_AARCH64_MOVW_G3:
8168 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8169 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8170 if (unsigned_overflow (value, scale + 16))
8171 as_bad_where (fixP->fx_file, fixP->fx_line,
8172 _("unsigned value out of range"));
8173 break;
8174 case BFD_RELOC_AARCH64_MOVW_G0_S:
8175 case BFD_RELOC_AARCH64_MOVW_G1_S:
8176 case BFD_RELOC_AARCH64_MOVW_G2_S:
8177 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8178 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8179 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8180 /* NOTE: We can only come here with movz or movn. */
8181 if (signed_overflow (value, scale + 16))
8182 as_bad_where (fixP->fx_file, fixP->fx_line,
8183 _("signed value out of range"));
8184 if (value < 0)
8185 {
8186 /* Force use of MOVN. */
8187 value = ~value;
8188 insn = reencode_movzn_to_movn (insn);
8189 }
8190 else
8191 {
8192 /* Force use of MOVZ. */
8193 insn = reencode_movzn_to_movz (insn);
8194 }
8195 break;
8196 default:
8197 /* Unchecked relocations. */
8198 break;
8199 }
8200 value >>= scale;
8201 }
8202
8203 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8204 insn |= encode_movw_imm (value & 0xffff);
8205
8206 put_aarch64_insn (buf, insn);
8207 }
8208 break;
8209
8210 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8211 fixP->fx_r_type = (ilp32_p
8212 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8213 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8214 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8215 /* Should always be exported to object file, see
8216 aarch64_force_relocation(). */
8217 gas_assert (!fixP->fx_done);
8218 gas_assert (seg->use_rela_p);
8219 break;
8220
8221 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8222 fixP->fx_r_type = (ilp32_p
8223 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8224 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8225 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8226 /* Should always be exported to object file, see
8227 aarch64_force_relocation(). */
8228 gas_assert (!fixP->fx_done);
8229 gas_assert (seg->use_rela_p);
8230 break;
8231
8232 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8233 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8234 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8235 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8236 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8237 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8238 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8239 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8240 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8241 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8242 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8243 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8244 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8245 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8246 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8247 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8248 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8249 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8250 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8251 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8252 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8253 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8254 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8255 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8256 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8257 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8258 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8259 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8260 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8261 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8262 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8263 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8264 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8265 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8266 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8267 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8268 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8269 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8270 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8271 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8272 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8273 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8274 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8275 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8276 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8277 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8278 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8279 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8280 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8281 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8282 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8283 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8284 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8285 /* Should always be exported to object file, see
8286 aarch64_force_relocation(). */
8287 gas_assert (!fixP->fx_done);
8288 gas_assert (seg->use_rela_p);
8289 break;
8290
8291 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8292 /* Should always be exported to object file, see
8293 aarch64_force_relocation(). */
8294 fixP->fx_r_type = (ilp32_p
8295 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8296 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8297 gas_assert (!fixP->fx_done);
8298 gas_assert (seg->use_rela_p);
8299 break;
8300
8301 case BFD_RELOC_AARCH64_ADD_LO12:
8302 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8303 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8304 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8305 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8306 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8307 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8308 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8309 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8310 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8311 case BFD_RELOC_AARCH64_LDST128_LO12:
8312 case BFD_RELOC_AARCH64_LDST16_LO12:
8313 case BFD_RELOC_AARCH64_LDST32_LO12:
8314 case BFD_RELOC_AARCH64_LDST64_LO12:
8315 case BFD_RELOC_AARCH64_LDST8_LO12:
8316 /* Should always be exported to object file, see
8317 aarch64_force_relocation(). */
8318 gas_assert (!fixP->fx_done);
8319 gas_assert (seg->use_rela_p);
8320 break;
8321
8322 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8323 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8324 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8325 break;
8326
8327 case BFD_RELOC_UNUSED:
8328 /* An error will already have been reported. */
8329 break;
8330
8331 default:
8332 as_bad_where (fixP->fx_file, fixP->fx_line,
8333 _("unexpected %s fixup"),
8334 bfd_get_reloc_code_name (fixP->fx_r_type));
8335 break;
8336 }
8337
8338 apply_fix_return:
8339 /* Free the allocated the struct aarch64_inst.
8340 N.B. currently there are very limited number of fix-up types actually use
8341 this field, so the impact on the performance should be minimal . */
8342 free (fixP->tc_fix_data.inst);
8343
8344 return;
8345 }
8346
8347 /* Translate internal representation of relocation info to BFD target
8348 format. */
8349
8350 arelent *
8351 tc_gen_reloc (asection * section, fixS * fixp)
8352 {
8353 arelent *reloc;
8354 bfd_reloc_code_real_type code;
8355
8356 reloc = XNEW (arelent);
8357
8358 reloc->sym_ptr_ptr = XNEW (asymbol *);
8359 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8360 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8361
8362 if (fixp->fx_pcrel)
8363 {
8364 if (section->use_rela_p)
8365 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8366 else
8367 fixp->fx_offset = reloc->address;
8368 }
8369 reloc->addend = fixp->fx_offset;
8370
8371 code = fixp->fx_r_type;
8372 switch (code)
8373 {
8374 case BFD_RELOC_16:
8375 if (fixp->fx_pcrel)
8376 code = BFD_RELOC_16_PCREL;
8377 break;
8378
8379 case BFD_RELOC_32:
8380 if (fixp->fx_pcrel)
8381 code = BFD_RELOC_32_PCREL;
8382 break;
8383
8384 case BFD_RELOC_64:
8385 if (fixp->fx_pcrel)
8386 code = BFD_RELOC_64_PCREL;
8387 break;
8388
8389 default:
8390 break;
8391 }
8392
8393 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8394 if (reloc->howto == NULL)
8395 {
8396 as_bad_where (fixp->fx_file, fixp->fx_line,
8397 _
8398 ("cannot represent %s relocation in this object file format"),
8399 bfd_get_reloc_code_name (code));
8400 return NULL;
8401 }
8402
8403 return reloc;
8404 }
8405
8406 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8407
8408 void
8409 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8410 {
8411 bfd_reloc_code_real_type type;
8412 int pcrel = 0;
8413
8414 /* Pick a reloc.
8415 FIXME: @@ Should look at CPU word size. */
8416 switch (size)
8417 {
8418 case 1:
8419 type = BFD_RELOC_8;
8420 break;
8421 case 2:
8422 type = BFD_RELOC_16;
8423 break;
8424 case 4:
8425 type = BFD_RELOC_32;
8426 break;
8427 case 8:
8428 type = BFD_RELOC_64;
8429 break;
8430 default:
8431 as_bad (_("cannot do %u-byte relocation"), size);
8432 type = BFD_RELOC_UNUSED;
8433 break;
8434 }
8435
8436 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8437 }
8438
8439 int
8440 aarch64_force_relocation (struct fix *fixp)
8441 {
8442 switch (fixp->fx_r_type)
8443 {
8444 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8445 /* Perform these "immediate" internal relocations
8446 even if the symbol is extern or weak. */
8447 return 0;
8448
8449 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8450 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8451 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8452 /* Pseudo relocs that need to be fixed up according to
8453 ilp32_p. */
8454 return 0;
8455
8456 case BFD_RELOC_AARCH64_ADD_LO12:
8457 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8458 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8459 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8460 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8461 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8462 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8463 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8464 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8465 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8466 case BFD_RELOC_AARCH64_LDST128_LO12:
8467 case BFD_RELOC_AARCH64_LDST16_LO12:
8468 case BFD_RELOC_AARCH64_LDST32_LO12:
8469 case BFD_RELOC_AARCH64_LDST64_LO12:
8470 case BFD_RELOC_AARCH64_LDST8_LO12:
8471 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8472 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8473 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8474 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8475 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8476 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8477 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8478 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8479 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8480 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8481 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8482 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8483 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8484 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8485 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8486 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8487 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8488 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8489 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8490 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8491 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8492 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8493 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8494 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8495 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8496 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8497 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8498 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8499 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8500 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8501 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8502 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8503 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8504 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8505 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8506 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8507 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8508 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8509 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8510 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8511 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8512 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8513 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8514 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8515 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8516 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8517 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8518 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8519 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8520 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8521 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8522 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8523 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8524 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8525 /* Always leave these relocations for the linker. */
8526 return 1;
8527
8528 default:
8529 break;
8530 }
8531
8532 return generic_force_reloc (fixp);
8533 }
8534
8535 #ifdef OBJ_ELF
8536
8537 /* Implement md_after_parse_args. This is the earliest time we need to decide
8538 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8539
8540 void
8541 aarch64_after_parse_args (void)
8542 {
8543 if (aarch64_abi != AARCH64_ABI_NONE)
8544 return;
8545
8546 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8547 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8548 aarch64_abi = AARCH64_ABI_ILP32;
8549 else
8550 aarch64_abi = AARCH64_ABI_LP64;
8551 }
8552
8553 const char *
8554 elf64_aarch64_target_format (void)
8555 {
8556 #ifdef TE_CLOUDABI
8557 /* FIXME: What to do for ilp32_p ? */
8558 if (target_big_endian)
8559 return "elf64-bigaarch64-cloudabi";
8560 else
8561 return "elf64-littleaarch64-cloudabi";
8562 #else
8563 if (target_big_endian)
8564 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8565 else
8566 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8567 #endif
8568 }
8569
8570 void
8571 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8572 {
8573 elf_frob_symbol (symp, puntp);
8574 }
8575 #endif
8576
8577 /* MD interface: Finalization. */
8578
8579 /* A good place to do this, although this was probably not intended
8580 for this kind of use. We need to dump the literal pool before
8581 references are made to a null symbol pointer. */
8582
8583 void
8584 aarch64_cleanup (void)
8585 {
8586 literal_pool *pool;
8587
8588 for (pool = list_of_pools; pool; pool = pool->next)
8589 {
8590 /* Put it at the end of the relevant section. */
8591 subseg_set (pool->section, pool->sub_section);
8592 s_ltorg (0);
8593 }
8594 }
8595
8596 #ifdef OBJ_ELF
8597 /* Remove any excess mapping symbols generated for alignment frags in
8598 SEC. We may have created a mapping symbol before a zero byte
8599 alignment; remove it if there's a mapping symbol after the
8600 alignment. */
8601 static void
8602 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8603 void *dummy ATTRIBUTE_UNUSED)
8604 {
8605 segment_info_type *seginfo = seg_info (sec);
8606 fragS *fragp;
8607
8608 if (seginfo == NULL || seginfo->frchainP == NULL)
8609 return;
8610
8611 for (fragp = seginfo->frchainP->frch_root;
8612 fragp != NULL; fragp = fragp->fr_next)
8613 {
8614 symbolS *sym = fragp->tc_frag_data.last_map;
8615 fragS *next = fragp->fr_next;
8616
8617 /* Variable-sized frags have been converted to fixed size by
8618 this point. But if this was variable-sized to start with,
8619 there will be a fixed-size frag after it. So don't handle
8620 next == NULL. */
8621 if (sym == NULL || next == NULL)
8622 continue;
8623
8624 if (S_GET_VALUE (sym) < next->fr_address)
8625 /* Not at the end of this frag. */
8626 continue;
8627 know (S_GET_VALUE (sym) == next->fr_address);
8628
8629 do
8630 {
8631 if (next->tc_frag_data.first_map != NULL)
8632 {
8633 /* Next frag starts with a mapping symbol. Discard this
8634 one. */
8635 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8636 break;
8637 }
8638
8639 if (next->fr_next == NULL)
8640 {
8641 /* This mapping symbol is at the end of the section. Discard
8642 it. */
8643 know (next->fr_fix == 0 && next->fr_var == 0);
8644 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8645 break;
8646 }
8647
8648 /* As long as we have empty frags without any mapping symbols,
8649 keep looking. */
8650 /* If the next frag is non-empty and does not start with a
8651 mapping symbol, then this mapping symbol is required. */
8652 if (next->fr_address != next->fr_next->fr_address)
8653 break;
8654
8655 next = next->fr_next;
8656 }
8657 while (next != NULL);
8658 }
8659 }
8660 #endif
8661
8662 /* Adjust the symbol table. */
8663
8664 void
8665 aarch64_adjust_symtab (void)
8666 {
8667 #ifdef OBJ_ELF
8668 /* Remove any overlapping mapping symbols generated by alignment frags. */
8669 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8670 /* Now do generic ELF adjustments. */
8671 elf_adjust_symtab ();
8672 #endif
8673 }
8674
8675 static void
8676 checked_hash_insert (htab_t table, const char *key, void *value)
8677 {
8678 str_hash_insert (table, key, value, 0);
8679 }
8680
8681 static void
8682 sysreg_hash_insert (htab_t table, const char *key, void *value)
8683 {
8684 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
8685 checked_hash_insert (table, key, value);
8686 }
8687
8688 static void
8689 fill_instruction_hash_table (void)
8690 {
8691 aarch64_opcode *opcode = aarch64_opcode_table;
8692
8693 while (opcode->name != NULL)
8694 {
8695 templates *templ, *new_templ;
8696 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
8697
8698 new_templ = XNEW (templates);
8699 new_templ->opcode = opcode;
8700 new_templ->next = NULL;
8701
8702 if (!templ)
8703 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8704 else
8705 {
8706 new_templ->next = templ->next;
8707 templ->next = new_templ;
8708 }
8709 ++opcode;
8710 }
8711 }
8712
8713 static inline void
8714 convert_to_upper (char *dst, const char *src, size_t num)
8715 {
8716 unsigned int i;
8717 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8718 *dst = TOUPPER (*src);
8719 *dst = '\0';
8720 }
8721
8722 /* Assume STR point to a lower-case string, allocate, convert and return
8723 the corresponding upper-case string. */
8724 static inline const char*
8725 get_upper_str (const char *str)
8726 {
8727 char *ret;
8728 size_t len = strlen (str);
8729 ret = XNEWVEC (char, len + 1);
8730 convert_to_upper (ret, str, len);
8731 return ret;
8732 }
8733
8734 /* MD interface: Initialization. */
8735
8736 void
8737 md_begin (void)
8738 {
8739 unsigned mach;
8740 unsigned int i;
8741
8742 aarch64_ops_hsh = str_htab_create ();
8743 aarch64_cond_hsh = str_htab_create ();
8744 aarch64_shift_hsh = str_htab_create ();
8745 aarch64_sys_regs_hsh = str_htab_create ();
8746 aarch64_pstatefield_hsh = str_htab_create ();
8747 aarch64_sys_regs_ic_hsh = str_htab_create ();
8748 aarch64_sys_regs_dc_hsh = str_htab_create ();
8749 aarch64_sys_regs_at_hsh = str_htab_create ();
8750 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
8751 aarch64_sys_regs_sr_hsh = str_htab_create ();
8752 aarch64_reg_hsh = str_htab_create ();
8753 aarch64_barrier_opt_hsh = str_htab_create ();
8754 aarch64_nzcv_hsh = str_htab_create ();
8755 aarch64_pldop_hsh = str_htab_create ();
8756 aarch64_hint_opt_hsh = str_htab_create ();
8757
8758 fill_instruction_hash_table ();
8759
8760 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8761 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8762 (void *) (aarch64_sys_regs + i));
8763
8764 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8765 sysreg_hash_insert (aarch64_pstatefield_hsh,
8766 aarch64_pstatefields[i].name,
8767 (void *) (aarch64_pstatefields + i));
8768
8769 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8770 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
8771 aarch64_sys_regs_ic[i].name,
8772 (void *) (aarch64_sys_regs_ic + i));
8773
8774 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8775 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
8776 aarch64_sys_regs_dc[i].name,
8777 (void *) (aarch64_sys_regs_dc + i));
8778
8779 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8780 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
8781 aarch64_sys_regs_at[i].name,
8782 (void *) (aarch64_sys_regs_at + i));
8783
8784 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8785 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
8786 aarch64_sys_regs_tlbi[i].name,
8787 (void *) (aarch64_sys_regs_tlbi + i));
8788
8789 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8790 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
8791 aarch64_sys_regs_sr[i].name,
8792 (void *) (aarch64_sys_regs_sr + i));
8793
8794 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8795 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8796 (void *) (reg_names + i));
8797
8798 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8799 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8800 (void *) (nzcv_names + i));
8801
8802 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8803 {
8804 const char *name = aarch64_operand_modifiers[i].name;
8805 checked_hash_insert (aarch64_shift_hsh, name,
8806 (void *) (aarch64_operand_modifiers + i));
8807 /* Also hash the name in the upper case. */
8808 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8809 (void *) (aarch64_operand_modifiers + i));
8810 }
8811
8812 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8813 {
8814 unsigned int j;
8815 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8816 the same condition code. */
8817 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8818 {
8819 const char *name = aarch64_conds[i].names[j];
8820 if (name == NULL)
8821 break;
8822 checked_hash_insert (aarch64_cond_hsh, name,
8823 (void *) (aarch64_conds + i));
8824 /* Also hash the name in the upper case. */
8825 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8826 (void *) (aarch64_conds + i));
8827 }
8828 }
8829
8830 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8831 {
8832 const char *name = aarch64_barrier_options[i].name;
8833 /* Skip xx00 - the unallocated values of option. */
8834 if ((i & 0x3) == 0)
8835 continue;
8836 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8837 (void *) (aarch64_barrier_options + i));
8838 /* Also hash the name in the upper case. */
8839 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8840 (void *) (aarch64_barrier_options + i));
8841 }
8842
8843 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
8844 {
8845 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
8846 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8847 (void *) (aarch64_barrier_dsb_nxs_options + i));
8848 /* Also hash the name in the upper case. */
8849 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8850 (void *) (aarch64_barrier_dsb_nxs_options + i));
8851 }
8852
8853 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8854 {
8855 const char* name = aarch64_prfops[i].name;
8856 /* Skip the unallocated hint encodings. */
8857 if (name == NULL)
8858 continue;
8859 checked_hash_insert (aarch64_pldop_hsh, name,
8860 (void *) (aarch64_prfops + i));
8861 /* Also hash the name in the upper case. */
8862 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8863 (void *) (aarch64_prfops + i));
8864 }
8865
8866 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8867 {
8868 const char* name = aarch64_hint_options[i].name;
8869 const char* upper_name = get_upper_str(name);
8870
8871 checked_hash_insert (aarch64_hint_opt_hsh, name,
8872 (void *) (aarch64_hint_options + i));
8873
8874 /* Also hash the name in the upper case if not the same. */
8875 if (strcmp (name, upper_name) != 0)
8876 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
8877 (void *) (aarch64_hint_options + i));
8878 }
8879
8880 /* Set the cpu variant based on the command-line options. */
8881 if (!mcpu_cpu_opt)
8882 mcpu_cpu_opt = march_cpu_opt;
8883
8884 if (!mcpu_cpu_opt)
8885 mcpu_cpu_opt = &cpu_default;
8886
8887 cpu_variant = *mcpu_cpu_opt;
8888
8889 /* Record the CPU type. */
8890 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8891
8892 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8893 }
8894
8895 /* Command line processing. */
8896
8897 const char *md_shortopts = "m:";
8898
8899 #ifdef AARCH64_BI_ENDIAN
8900 #define OPTION_EB (OPTION_MD_BASE + 0)
8901 #define OPTION_EL (OPTION_MD_BASE + 1)
8902 #else
8903 #if TARGET_BYTES_BIG_ENDIAN
8904 #define OPTION_EB (OPTION_MD_BASE + 0)
8905 #else
8906 #define OPTION_EL (OPTION_MD_BASE + 1)
8907 #endif
8908 #endif
8909
8910 struct option md_longopts[] = {
8911 #ifdef OPTION_EB
8912 {"EB", no_argument, NULL, OPTION_EB},
8913 #endif
8914 #ifdef OPTION_EL
8915 {"EL", no_argument, NULL, OPTION_EL},
8916 #endif
8917 {NULL, no_argument, NULL, 0}
8918 };
8919
8920 size_t md_longopts_size = sizeof (md_longopts);
8921
8922 struct aarch64_option_table
8923 {
8924 const char *option; /* Option name to match. */
8925 const char *help; /* Help information. */
8926 int *var; /* Variable to change. */
8927 int value; /* What to change it to. */
8928 char *deprecated; /* If non-null, print this message. */
8929 };
8930
8931 static struct aarch64_option_table aarch64_opts[] = {
8932 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8933 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8934 NULL},
8935 #ifdef DEBUG_AARCH64
8936 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8937 #endif /* DEBUG_AARCH64 */
8938 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8939 NULL},
8940 {"mno-verbose-error", N_("do not output verbose error messages"),
8941 &verbose_error_p, 0, NULL},
8942 {NULL, NULL, NULL, 0, NULL}
8943 };
8944
8945 struct aarch64_cpu_option_table
8946 {
8947 const char *name;
8948 const aarch64_feature_set value;
8949 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8950 case. */
8951 const char *canonical_name;
8952 };
8953
8954 /* This list should, at a minimum, contain all the cpu names
8955 recognized by GCC. */
8956 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8957 {"all", AARCH64_ANY, NULL},
8958 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
8959 AARCH64_FEATURE_CRC), "Cortex-A34"},
8960 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8961 AARCH64_FEATURE_CRC), "Cortex-A35"},
8962 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8963 AARCH64_FEATURE_CRC), "Cortex-A53"},
8964 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8965 AARCH64_FEATURE_CRC), "Cortex-A57"},
8966 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8967 AARCH64_FEATURE_CRC), "Cortex-A72"},
8968 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8969 AARCH64_FEATURE_CRC), "Cortex-A73"},
8970 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8971 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8972 "Cortex-A55"},
8973 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8974 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8975 "Cortex-A75"},
8976 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8977 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8978 "Cortex-A76"},
8979 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8980 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8981 | AARCH64_FEATURE_DOTPROD
8982 | AARCH64_FEATURE_SSBS),
8983 "Cortex-A76AE"},
8984 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8985 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8986 | AARCH64_FEATURE_DOTPROD
8987 | AARCH64_FEATURE_SSBS),
8988 "Cortex-A77"},
8989 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8990 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8991 | AARCH64_FEATURE_DOTPROD
8992 | AARCH64_FEATURE_SSBS),
8993 "Cortex-A65"},
8994 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8995 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8996 | AARCH64_FEATURE_DOTPROD
8997 | AARCH64_FEATURE_SSBS),
8998 "Cortex-A65AE"},
8999 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9000 AARCH64_FEATURE_F16
9001 | AARCH64_FEATURE_RCPC
9002 | AARCH64_FEATURE_DOTPROD
9003 | AARCH64_FEATURE_SSBS
9004 | AARCH64_FEATURE_PROFILE),
9005 "Cortex-A78"},
9006 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9007 AARCH64_FEATURE_F16
9008 | AARCH64_FEATURE_RCPC
9009 | AARCH64_FEATURE_DOTPROD
9010 | AARCH64_FEATURE_SSBS
9011 | AARCH64_FEATURE_PROFILE),
9012 "Cortex-A78AE"},
9013 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9014 AARCH64_FEATURE_DOTPROD
9015 | AARCH64_FEATURE_F16
9016 | AARCH64_FEATURE_FLAGM
9017 | AARCH64_FEATURE_PAC
9018 | AARCH64_FEATURE_PROFILE
9019 | AARCH64_FEATURE_RCPC
9020 | AARCH64_FEATURE_SSBS),
9021 "Cortex-A78C"},
9022 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9023 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9024 | AARCH64_FEATURE_DOTPROD
9025 | AARCH64_FEATURE_PROFILE),
9026 "Ares"},
9027 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9028 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9029 "Samsung Exynos M1"},
9030 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9031 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9032 | AARCH64_FEATURE_RDMA),
9033 "Qualcomm Falkor"},
9034 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9035 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9036 | AARCH64_FEATURE_DOTPROD
9037 | AARCH64_FEATURE_SSBS),
9038 "Neoverse E1"},
9039 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9040 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9041 | AARCH64_FEATURE_DOTPROD
9042 | AARCH64_FEATURE_PROFILE),
9043 "Neoverse N1"},
9044 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9045 AARCH64_FEATURE_BFLOAT16
9046 | AARCH64_FEATURE_I8MM
9047 | AARCH64_FEATURE_F16
9048 | AARCH64_FEATURE_SVE
9049 | AARCH64_FEATURE_SVE2
9050 | AARCH64_FEATURE_SVE2_BITPERM
9051 | AARCH64_FEATURE_MEMTAG
9052 | AARCH64_FEATURE_RNG),
9053 "Neoverse N2"},
9054 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9055 AARCH64_FEATURE_PROFILE
9056 | AARCH64_FEATURE_CVADP
9057 | AARCH64_FEATURE_SVE
9058 | AARCH64_FEATURE_SSBS
9059 | AARCH64_FEATURE_RNG
9060 | AARCH64_FEATURE_F16
9061 | AARCH64_FEATURE_BFLOAT16
9062 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9063 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9064 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9065 | AARCH64_FEATURE_RDMA),
9066 "Qualcomm QDF24XX"},
9067 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9068 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9069 "Qualcomm Saphira"},
9070 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9071 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9072 "Cavium ThunderX"},
9073 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9074 AARCH64_FEATURE_CRYPTO),
9075 "Broadcom Vulcan"},
9076 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9077 in earlier releases and is superseded by 'xgene1' in all
9078 tools. */
9079 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9080 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9081 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9082 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9083 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9084 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9085 AARCH64_FEATURE_F16
9086 | AARCH64_FEATURE_RCPC
9087 | AARCH64_FEATURE_DOTPROD
9088 | AARCH64_FEATURE_SSBS
9089 | AARCH64_FEATURE_PROFILE),
9090 "Cortex-X1"},
9091 {"generic", AARCH64_ARCH_V8, NULL},
9092
9093 {NULL, AARCH64_ARCH_NONE, NULL}
9094 };
9095
9096 struct aarch64_arch_option_table
9097 {
9098 const char *name;
9099 const aarch64_feature_set value;
9100 };
9101
9102 /* This list should, at a minimum, contain all the architecture names
9103 recognized by GCC. */
9104 static const struct aarch64_arch_option_table aarch64_archs[] = {
9105 {"all", AARCH64_ANY},
9106 {"armv8-a", AARCH64_ARCH_V8},
9107 {"armv8.1-a", AARCH64_ARCH_V8_1},
9108 {"armv8.2-a", AARCH64_ARCH_V8_2},
9109 {"armv8.3-a", AARCH64_ARCH_V8_3},
9110 {"armv8.4-a", AARCH64_ARCH_V8_4},
9111 {"armv8.5-a", AARCH64_ARCH_V8_5},
9112 {"armv8.6-a", AARCH64_ARCH_V8_6},
9113 {"armv8.7-a", AARCH64_ARCH_V8_7},
9114 {"armv8-r", AARCH64_ARCH_V8_R},
9115 {NULL, AARCH64_ARCH_NONE}
9116 };
9117
9118 /* ISA extensions. */
9119 struct aarch64_option_cpu_value_table
9120 {
9121 const char *name;
9122 const aarch64_feature_set value;
9123 const aarch64_feature_set require; /* Feature dependencies. */
9124 };
9125
9126 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9127 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9128 AARCH64_ARCH_NONE},
9129 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9130 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9131 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9132 AARCH64_ARCH_NONE},
9133 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9134 AARCH64_ARCH_NONE},
9135 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9136 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9137 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9138 AARCH64_ARCH_NONE},
9139 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9140 AARCH64_ARCH_NONE},
9141 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9142 AARCH64_ARCH_NONE},
9143 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9144 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9145 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9146 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9147 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9148 AARCH64_FEATURE (AARCH64_FEATURE_FP
9149 | AARCH64_FEATURE_F16, 0)},
9150 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9151 AARCH64_ARCH_NONE},
9152 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9153 AARCH64_FEATURE (AARCH64_FEATURE_F16
9154 | AARCH64_FEATURE_SIMD
9155 | AARCH64_FEATURE_COMPNUM, 0)},
9156 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9157 AARCH64_ARCH_NONE},
9158 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9159 AARCH64_FEATURE (AARCH64_FEATURE_F16
9160 | AARCH64_FEATURE_SIMD, 0)},
9161 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9162 AARCH64_ARCH_NONE},
9163 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9164 AARCH64_ARCH_NONE},
9165 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9166 AARCH64_ARCH_NONE},
9167 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9168 AARCH64_ARCH_NONE},
9169 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9170 AARCH64_ARCH_NONE},
9171 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9172 AARCH64_ARCH_NONE},
9173 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9174 AARCH64_ARCH_NONE},
9175 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9176 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9177 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9178 AARCH64_ARCH_NONE},
9179 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9180 AARCH64_ARCH_NONE},
9181 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9182 AARCH64_ARCH_NONE},
9183 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9184 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9185 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9186 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9187 | AARCH64_FEATURE_SM4, 0)},
9188 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9189 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9190 | AARCH64_FEATURE_AES, 0)},
9191 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9192 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9193 | AARCH64_FEATURE_SHA3, 0)},
9194 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9195 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9196 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9197 AARCH64_ARCH_NONE},
9198 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9199 AARCH64_ARCH_NONE},
9200 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9201 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9202 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9203 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9204 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9205 AARCH64_ARCH_NONE},
9206 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
9207 AARCH64_ARCH_NONE},
9208 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
9209 AARCH64_ARCH_NONE},
9210 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9211 };
9212
9213 struct aarch64_long_option_table
9214 {
9215 const char *option; /* Substring to match. */
9216 const char *help; /* Help information. */
9217 int (*func) (const char *subopt); /* Function to decode sub-option. */
9218 char *deprecated; /* If non-null, print this message. */
9219 };
9220
9221 /* Transitive closure of features depending on set. */
9222 static aarch64_feature_set
9223 aarch64_feature_disable_set (aarch64_feature_set set)
9224 {
9225 const struct aarch64_option_cpu_value_table *opt;
9226 aarch64_feature_set prev = 0;
9227
9228 while (prev != set) {
9229 prev = set;
9230 for (opt = aarch64_features; opt->name != NULL; opt++)
9231 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9232 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9233 }
9234 return set;
9235 }
9236
9237 /* Transitive closure of dependencies of set. */
9238 static aarch64_feature_set
9239 aarch64_feature_enable_set (aarch64_feature_set set)
9240 {
9241 const struct aarch64_option_cpu_value_table *opt;
9242 aarch64_feature_set prev = 0;
9243
9244 while (prev != set) {
9245 prev = set;
9246 for (opt = aarch64_features; opt->name != NULL; opt++)
9247 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9248 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9249 }
9250 return set;
9251 }
9252
9253 static int
9254 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9255 bfd_boolean ext_only)
9256 {
9257 /* We insist on extensions being added before being removed. We achieve
9258 this by using the ADDING_VALUE variable to indicate whether we are
9259 adding an extension (1) or removing it (0) and only allowing it to
9260 change in the order -1 -> 1 -> 0. */
9261 int adding_value = -1;
9262 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9263
9264 /* Copy the feature set, so that we can modify it. */
9265 *ext_set = **opt_p;
9266 *opt_p = ext_set;
9267
9268 while (str != NULL && *str != 0)
9269 {
9270 const struct aarch64_option_cpu_value_table *opt;
9271 const char *ext = NULL;
9272 int optlen;
9273
9274 if (!ext_only)
9275 {
9276 if (*str != '+')
9277 {
9278 as_bad (_("invalid architectural extension"));
9279 return 0;
9280 }
9281
9282 ext = strchr (++str, '+');
9283 }
9284
9285 if (ext != NULL)
9286 optlen = ext - str;
9287 else
9288 optlen = strlen (str);
9289
9290 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
9291 {
9292 if (adding_value != 0)
9293 adding_value = 0;
9294 optlen -= 2;
9295 str += 2;
9296 }
9297 else if (optlen > 0)
9298 {
9299 if (adding_value == -1)
9300 adding_value = 1;
9301 else if (adding_value != 1)
9302 {
9303 as_bad (_("must specify extensions to add before specifying "
9304 "those to remove"));
9305 return FALSE;
9306 }
9307 }
9308
9309 if (optlen == 0)
9310 {
9311 as_bad (_("missing architectural extension"));
9312 return 0;
9313 }
9314
9315 gas_assert (adding_value != -1);
9316
9317 for (opt = aarch64_features; opt->name != NULL; opt++)
9318 if (strncmp (opt->name, str, optlen) == 0)
9319 {
9320 aarch64_feature_set set;
9321
9322 /* Add or remove the extension. */
9323 if (adding_value)
9324 {
9325 set = aarch64_feature_enable_set (opt->value);
9326 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9327 }
9328 else
9329 {
9330 set = aarch64_feature_disable_set (opt->value);
9331 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9332 }
9333 break;
9334 }
9335
9336 if (opt->name == NULL)
9337 {
9338 as_bad (_("unknown architectural extension `%s'"), str);
9339 return 0;
9340 }
9341
9342 str = ext;
9343 };
9344
9345 return 1;
9346 }
9347
9348 static int
9349 aarch64_parse_cpu (const char *str)
9350 {
9351 const struct aarch64_cpu_option_table *opt;
9352 const char *ext = strchr (str, '+');
9353 size_t optlen;
9354
9355 if (ext != NULL)
9356 optlen = ext - str;
9357 else
9358 optlen = strlen (str);
9359
9360 if (optlen == 0)
9361 {
9362 as_bad (_("missing cpu name `%s'"), str);
9363 return 0;
9364 }
9365
9366 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9367 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9368 {
9369 mcpu_cpu_opt = &opt->value;
9370 if (ext != NULL)
9371 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
9372
9373 return 1;
9374 }
9375
9376 as_bad (_("unknown cpu `%s'"), str);
9377 return 0;
9378 }
9379
9380 static int
9381 aarch64_parse_arch (const char *str)
9382 {
9383 const struct aarch64_arch_option_table *opt;
9384 const char *ext = strchr (str, '+');
9385 size_t optlen;
9386
9387 if (ext != NULL)
9388 optlen = ext - str;
9389 else
9390 optlen = strlen (str);
9391
9392 if (optlen == 0)
9393 {
9394 as_bad (_("missing architecture name `%s'"), str);
9395 return 0;
9396 }
9397
9398 for (opt = aarch64_archs; opt->name != NULL; opt++)
9399 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9400 {
9401 march_cpu_opt = &opt->value;
9402 if (ext != NULL)
9403 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
9404
9405 return 1;
9406 }
9407
9408 as_bad (_("unknown architecture `%s'\n"), str);
9409 return 0;
9410 }
9411
9412 /* ABIs. */
9413 struct aarch64_option_abi_value_table
9414 {
9415 const char *name;
9416 enum aarch64_abi_type value;
9417 };
9418
9419 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9420 {"ilp32", AARCH64_ABI_ILP32},
9421 {"lp64", AARCH64_ABI_LP64},
9422 };
9423
9424 static int
9425 aarch64_parse_abi (const char *str)
9426 {
9427 unsigned int i;
9428
9429 if (str[0] == '\0')
9430 {
9431 as_bad (_("missing abi name `%s'"), str);
9432 return 0;
9433 }
9434
9435 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9436 if (strcmp (str, aarch64_abis[i].name) == 0)
9437 {
9438 aarch64_abi = aarch64_abis[i].value;
9439 return 1;
9440 }
9441
9442 as_bad (_("unknown abi `%s'\n"), str);
9443 return 0;
9444 }
9445
9446 static struct aarch64_long_option_table aarch64_long_opts[] = {
9447 #ifdef OBJ_ELF
9448 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9449 aarch64_parse_abi, NULL},
9450 #endif /* OBJ_ELF */
9451 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9452 aarch64_parse_cpu, NULL},
9453 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9454 aarch64_parse_arch, NULL},
9455 {NULL, NULL, 0, NULL}
9456 };
9457
9458 int
9459 md_parse_option (int c, const char *arg)
9460 {
9461 struct aarch64_option_table *opt;
9462 struct aarch64_long_option_table *lopt;
9463
9464 switch (c)
9465 {
9466 #ifdef OPTION_EB
9467 case OPTION_EB:
9468 target_big_endian = 1;
9469 break;
9470 #endif
9471
9472 #ifdef OPTION_EL
9473 case OPTION_EL:
9474 target_big_endian = 0;
9475 break;
9476 #endif
9477
9478 case 'a':
9479 /* Listing option. Just ignore these, we don't support additional
9480 ones. */
9481 return 0;
9482
9483 default:
9484 for (opt = aarch64_opts; opt->option != NULL; opt++)
9485 {
9486 if (c == opt->option[0]
9487 && ((arg == NULL && opt->option[1] == 0)
9488 || streq (arg, opt->option + 1)))
9489 {
9490 /* If the option is deprecated, tell the user. */
9491 if (opt->deprecated != NULL)
9492 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9493 arg ? arg : "", _(opt->deprecated));
9494
9495 if (opt->var != NULL)
9496 *opt->var = opt->value;
9497
9498 return 1;
9499 }
9500 }
9501
9502 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9503 {
9504 /* These options are expected to have an argument. */
9505 if (c == lopt->option[0]
9506 && arg != NULL
9507 && strncmp (arg, lopt->option + 1,
9508 strlen (lopt->option + 1)) == 0)
9509 {
9510 /* If the option is deprecated, tell the user. */
9511 if (lopt->deprecated != NULL)
9512 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9513 _(lopt->deprecated));
9514
9515 /* Call the sup-option parser. */
9516 return lopt->func (arg + strlen (lopt->option) - 1);
9517 }
9518 }
9519
9520 return 0;
9521 }
9522
9523 return 1;
9524 }
9525
9526 void
9527 md_show_usage (FILE * fp)
9528 {
9529 struct aarch64_option_table *opt;
9530 struct aarch64_long_option_table *lopt;
9531
9532 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9533
9534 for (opt = aarch64_opts; opt->option != NULL; opt++)
9535 if (opt->help != NULL)
9536 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9537
9538 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9539 if (lopt->help != NULL)
9540 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9541
9542 #ifdef OPTION_EB
9543 fprintf (fp, _("\
9544 -EB assemble code for a big-endian cpu\n"));
9545 #endif
9546
9547 #ifdef OPTION_EL
9548 fprintf (fp, _("\
9549 -EL assemble code for a little-endian cpu\n"));
9550 #endif
9551 }
9552
9553 /* Parse a .cpu directive. */
9554
9555 static void
9556 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9557 {
9558 const struct aarch64_cpu_option_table *opt;
9559 char saved_char;
9560 char *name;
9561 char *ext;
9562 size_t optlen;
9563
9564 name = input_line_pointer;
9565 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9566 input_line_pointer++;
9567 saved_char = *input_line_pointer;
9568 *input_line_pointer = 0;
9569
9570 ext = strchr (name, '+');
9571
9572 if (ext != NULL)
9573 optlen = ext - name;
9574 else
9575 optlen = strlen (name);
9576
9577 /* Skip the first "all" entry. */
9578 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9579 if (strlen (opt->name) == optlen
9580 && strncmp (name, opt->name, optlen) == 0)
9581 {
9582 mcpu_cpu_opt = &opt->value;
9583 if (ext != NULL)
9584 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9585 return;
9586
9587 cpu_variant = *mcpu_cpu_opt;
9588
9589 *input_line_pointer = saved_char;
9590 demand_empty_rest_of_line ();
9591 return;
9592 }
9593 as_bad (_("unknown cpu `%s'"), name);
9594 *input_line_pointer = saved_char;
9595 ignore_rest_of_line ();
9596 }
9597
9598
9599 /* Parse a .arch directive. */
9600
9601 static void
9602 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9603 {
9604 const struct aarch64_arch_option_table *opt;
9605 char saved_char;
9606 char *name;
9607 char *ext;
9608 size_t optlen;
9609
9610 name = input_line_pointer;
9611 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9612 input_line_pointer++;
9613 saved_char = *input_line_pointer;
9614 *input_line_pointer = 0;
9615
9616 ext = strchr (name, '+');
9617
9618 if (ext != NULL)
9619 optlen = ext - name;
9620 else
9621 optlen = strlen (name);
9622
9623 /* Skip the first "all" entry. */
9624 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9625 if (strlen (opt->name) == optlen
9626 && strncmp (name, opt->name, optlen) == 0)
9627 {
9628 mcpu_cpu_opt = &opt->value;
9629 if (ext != NULL)
9630 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9631 return;
9632
9633 cpu_variant = *mcpu_cpu_opt;
9634
9635 *input_line_pointer = saved_char;
9636 demand_empty_rest_of_line ();
9637 return;
9638 }
9639
9640 as_bad (_("unknown architecture `%s'\n"), name);
9641 *input_line_pointer = saved_char;
9642 ignore_rest_of_line ();
9643 }
9644
9645 /* Parse a .arch_extension directive. */
9646
9647 static void
9648 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9649 {
9650 char saved_char;
9651 char *ext = input_line_pointer;;
9652
9653 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9654 input_line_pointer++;
9655 saved_char = *input_line_pointer;
9656 *input_line_pointer = 0;
9657
9658 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9659 return;
9660
9661 cpu_variant = *mcpu_cpu_opt;
9662
9663 *input_line_pointer = saved_char;
9664 demand_empty_rest_of_line ();
9665 }
9666
9667 /* Copy symbol information. */
9668
9669 void
9670 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9671 {
9672 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9673 }
9674
9675 #ifdef OBJ_ELF
9676 /* Same as elf_copy_symbol_attributes, but without copying st_other.
9677 This is needed so AArch64 specific st_other values can be independently
9678 specified for an IFUNC resolver (that is called by the dynamic linker)
9679 and the symbol it resolves (aliased to the resolver). In particular,
9680 if a function symbol has special st_other value set via directives,
9681 then attaching an IFUNC resolver to that symbol should not override
9682 the st_other setting. Requiring the directive on the IFUNC resolver
9683 symbol would be unexpected and problematic in C code, where the two
9684 symbols appear as two independent function declarations. */
9685
9686 void
9687 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
9688 {
9689 struct elf_obj_sy *srcelf = symbol_get_obj (src);
9690 struct elf_obj_sy *destelf = symbol_get_obj (dest);
9691 if (srcelf->size)
9692 {
9693 if (destelf->size == NULL)
9694 destelf->size = XNEW (expressionS);
9695 *destelf->size = *srcelf->size;
9696 }
9697 else
9698 {
9699 free (destelf->size);
9700 destelf->size = NULL;
9701 }
9702 S_SET_SIZE (dest, S_GET_SIZE (src));
9703 }
9704 #endif