]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
[PATCH][GAS] aarch64: Add atomic 64-byte load/store instructions for Armv8.7
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2020 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bfd_boolean
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Return value for certain parsers when the parsing fails; those parsers
242 return the information of the parsed result, e.g. register number, on
243 success. */
244 #define PARSE_FAIL -1
245
246 /* This is an invalid condition code that means no conditional field is
247 present. */
248 #define COND_ALWAYS 0x10
249
250 typedef struct
251 {
252 const char *template;
253 uint32_t value;
254 } asm_nzcv;
255
256 struct reloc_entry
257 {
258 char *name;
259 bfd_reloc_code_real_type reloc;
260 };
261
262 /* Macros to define the register types and masks for the purpose
263 of parsing. */
264
265 #undef AARCH64_REG_TYPES
266 #define AARCH64_REG_TYPES \
267 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
268 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
269 BASIC_REG_TYPE(SP_32) /* wsp */ \
270 BASIC_REG_TYPE(SP_64) /* sp */ \
271 BASIC_REG_TYPE(Z_32) /* wzr */ \
272 BASIC_REG_TYPE(Z_64) /* xzr */ \
273 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
274 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
275 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
276 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
277 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
278 BASIC_REG_TYPE(VN) /* v[0-31] */ \
279 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
280 BASIC_REG_TYPE(PN) /* p[0-15] */ \
281 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
282 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
283 /* Typecheck: same, plus SVE registers. */ \
284 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
285 | REG_TYPE(ZN)) \
286 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
287 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
289 /* Typecheck: same, plus SVE registers. */ \
290 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
291 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
292 | REG_TYPE(ZN)) \
293 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
294 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
295 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
296 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
297 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
298 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
299 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
300 /* Typecheck: any [BHSDQ]P FP. */ \
301 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
302 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
303 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
304 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
306 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
307 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
308 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
309 be used for SVE instructions, since Zn and Pn are valid symbols \
310 in other contexts. */ \
311 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
312 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
313 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
314 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
315 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
316 | REG_TYPE(ZN) | REG_TYPE(PN)) \
317 /* Any integer register; used for error messages only. */ \
318 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
319 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
320 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
321 /* Pseudo type to mark the end of the enumerator sequence. */ \
322 BASIC_REG_TYPE(MAX)
323
324 #undef BASIC_REG_TYPE
325 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
326 #undef MULTI_REG_TYPE
327 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
328
329 /* Register type enumerators. */
330 typedef enum aarch64_reg_type_
331 {
332 /* A list of REG_TYPE_*. */
333 AARCH64_REG_TYPES
334 } aarch64_reg_type;
335
336 #undef BASIC_REG_TYPE
337 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
338 #undef REG_TYPE
339 #define REG_TYPE(T) (1 << REG_TYPE_##T)
340 #undef MULTI_REG_TYPE
341 #define MULTI_REG_TYPE(T,V) V,
342
343 /* Structure for a hash table entry for a register. */
344 typedef struct
345 {
346 const char *name;
347 unsigned char number;
348 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
349 unsigned char builtin;
350 } reg_entry;
351
352 /* Values indexed by aarch64_reg_type to assist the type checking. */
353 static const unsigned reg_type_masks[] =
354 {
355 AARCH64_REG_TYPES
356 };
357
358 #undef BASIC_REG_TYPE
359 #undef REG_TYPE
360 #undef MULTI_REG_TYPE
361 #undef AARCH64_REG_TYPES
362
363 /* Diagnostics used when we don't get a register of the expected type.
364 Note: this has to synchronized with aarch64_reg_type definitions
365 above. */
366 static const char *
367 get_reg_expected_msg (aarch64_reg_type reg_type)
368 {
369 const char *msg;
370
371 switch (reg_type)
372 {
373 case REG_TYPE_R_32:
374 msg = N_("integer 32-bit register expected");
375 break;
376 case REG_TYPE_R_64:
377 msg = N_("integer 64-bit register expected");
378 break;
379 case REG_TYPE_R_N:
380 msg = N_("integer register expected");
381 break;
382 case REG_TYPE_R64_SP:
383 msg = N_("64-bit integer or SP register expected");
384 break;
385 case REG_TYPE_SVE_BASE:
386 msg = N_("base register expected");
387 break;
388 case REG_TYPE_R_Z:
389 msg = N_("integer or zero register expected");
390 break;
391 case REG_TYPE_SVE_OFFSET:
392 msg = N_("offset register expected");
393 break;
394 case REG_TYPE_R_SP:
395 msg = N_("integer or SP register expected");
396 break;
397 case REG_TYPE_R_Z_SP:
398 msg = N_("integer, zero or SP register expected");
399 break;
400 case REG_TYPE_FP_B:
401 msg = N_("8-bit SIMD scalar register expected");
402 break;
403 case REG_TYPE_FP_H:
404 msg = N_("16-bit SIMD scalar or floating-point half precision "
405 "register expected");
406 break;
407 case REG_TYPE_FP_S:
408 msg = N_("32-bit SIMD scalar or floating-point single precision "
409 "register expected");
410 break;
411 case REG_TYPE_FP_D:
412 msg = N_("64-bit SIMD scalar or floating-point double precision "
413 "register expected");
414 break;
415 case REG_TYPE_FP_Q:
416 msg = N_("128-bit SIMD scalar or floating-point quad precision "
417 "register expected");
418 break;
419 case REG_TYPE_R_Z_BHSDQ_V:
420 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
421 msg = N_("register expected");
422 break;
423 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
424 msg = N_("SIMD scalar or floating-point register expected");
425 break;
426 case REG_TYPE_VN: /* any V reg */
427 msg = N_("vector register expected");
428 break;
429 case REG_TYPE_ZN:
430 msg = N_("SVE vector register expected");
431 break;
432 case REG_TYPE_PN:
433 msg = N_("SVE predicate register expected");
434 break;
435 default:
436 as_fatal (_("invalid register type %d"), reg_type);
437 }
438 return msg;
439 }
440
441 /* Some well known registers that we refer to directly elsewhere. */
442 #define REG_SP 31
443 #define REG_ZR 31
444
445 /* Instructions take 4 bytes in the object file. */
446 #define INSN_SIZE 4
447
448 static htab_t aarch64_ops_hsh;
449 static htab_t aarch64_cond_hsh;
450 static htab_t aarch64_shift_hsh;
451 static htab_t aarch64_sys_regs_hsh;
452 static htab_t aarch64_pstatefield_hsh;
453 static htab_t aarch64_sys_regs_ic_hsh;
454 static htab_t aarch64_sys_regs_dc_hsh;
455 static htab_t aarch64_sys_regs_at_hsh;
456 static htab_t aarch64_sys_regs_tlbi_hsh;
457 static htab_t aarch64_sys_regs_sr_hsh;
458 static htab_t aarch64_reg_hsh;
459 static htab_t aarch64_barrier_opt_hsh;
460 static htab_t aarch64_nzcv_hsh;
461 static htab_t aarch64_pldop_hsh;
462 static htab_t aarch64_hint_opt_hsh;
463
464 /* Stuff needed to resolve the label ambiguity
465 As:
466 ...
467 label: <insn>
468 may differ from:
469 ...
470 label:
471 <insn> */
472
473 static symbolS *last_label_seen;
474
475 /* Literal pool structure. Held on a per-section
476 and per-sub-section basis. */
477
478 #define MAX_LITERAL_POOL_SIZE 1024
479 typedef struct literal_expression
480 {
481 expressionS exp;
482 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
483 LITTLENUM_TYPE * bignum;
484 } literal_expression;
485
486 typedef struct literal_pool
487 {
488 literal_expression literals[MAX_LITERAL_POOL_SIZE];
489 unsigned int next_free_entry;
490 unsigned int id;
491 symbolS *symbol;
492 segT section;
493 subsegT sub_section;
494 int size;
495 struct literal_pool *next;
496 } literal_pool;
497
498 /* Pointer to a linked list of literal pools. */
499 static literal_pool *list_of_pools = NULL;
500 \f
501 /* Pure syntax. */
502
503 /* This array holds the chars that always start a comment. If the
504 pre-processor is disabled, these aren't very useful. */
505 const char comment_chars[] = "";
506
507 /* This array holds the chars that only start a comment at the beginning of
508 a line. If the line seems to have the form '# 123 filename'
509 .line and .file directives will appear in the pre-processed output. */
510 /* Note that input_file.c hand checks for '#' at the beginning of the
511 first line of the input file. This is because the compiler outputs
512 #NO_APP at the beginning of its output. */
513 /* Also note that comments like this one will always work. */
514 const char line_comment_chars[] = "#";
515
516 const char line_separator_chars[] = ";";
517
518 /* Chars that can be used to separate mant
519 from exp in floating point numbers. */
520 const char EXP_CHARS[] = "eE";
521
522 /* Chars that mean this number is a floating point constant. */
523 /* As in 0f12.456 */
524 /* or 0d1.2345e12 */
525
526 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhH";
527
528 /* Prefix character that indicates the start of an immediate value. */
529 #define is_immediate_prefix(C) ((C) == '#')
530
531 /* Separator character handling. */
532
533 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
534
535 static inline bfd_boolean
536 skip_past_char (char **str, char c)
537 {
538 if (**str == c)
539 {
540 (*str)++;
541 return TRUE;
542 }
543 else
544 return FALSE;
545 }
546
547 #define skip_past_comma(str) skip_past_char (str, ',')
548
549 /* Arithmetic expressions (possibly involving symbols). */
550
551 static bfd_boolean in_my_get_expression_p = FALSE;
552
553 /* Third argument to my_get_expression. */
554 #define GE_NO_PREFIX 0
555 #define GE_OPT_PREFIX 1
556
557 /* Return TRUE if the string pointed by *STR is successfully parsed
558 as an valid expression; *EP will be filled with the information of
559 such an expression. Otherwise return FALSE. */
560
561 static bfd_boolean
562 my_get_expression (expressionS * ep, char **str, int prefix_mode,
563 int reject_absent)
564 {
565 char *save_in;
566 segT seg;
567 int prefix_present_p = 0;
568
569 switch (prefix_mode)
570 {
571 case GE_NO_PREFIX:
572 break;
573 case GE_OPT_PREFIX:
574 if (is_immediate_prefix (**str))
575 {
576 (*str)++;
577 prefix_present_p = 1;
578 }
579 break;
580 default:
581 abort ();
582 }
583
584 memset (ep, 0, sizeof (expressionS));
585
586 save_in = input_line_pointer;
587 input_line_pointer = *str;
588 in_my_get_expression_p = TRUE;
589 seg = expression (ep);
590 in_my_get_expression_p = FALSE;
591
592 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
593 {
594 /* We found a bad expression in md_operand(). */
595 *str = input_line_pointer;
596 input_line_pointer = save_in;
597 if (prefix_present_p && ! error_p ())
598 set_fatal_syntax_error (_("bad expression"));
599 else
600 set_first_syntax_error (_("bad expression"));
601 return FALSE;
602 }
603
604 #ifdef OBJ_AOUT
605 if (seg != absolute_section
606 && seg != text_section
607 && seg != data_section
608 && seg != bss_section && seg != undefined_section)
609 {
610 set_syntax_error (_("bad segment"));
611 *str = input_line_pointer;
612 input_line_pointer = save_in;
613 return FALSE;
614 }
615 #else
616 (void) seg;
617 #endif
618
619 *str = input_line_pointer;
620 input_line_pointer = save_in;
621 return TRUE;
622 }
623
624 /* Turn a string in input_line_pointer into a floating point constant
625 of type TYPE, and store the appropriate bytes in *LITP. The number
626 of LITTLENUMS emitted is stored in *SIZEP. An error message is
627 returned, or NULL on OK. */
628
629 const char *
630 md_atof (int type, char *litP, int *sizeP)
631 {
632 /* If this is a bfloat16 type, then parse it slightly differently -
633 as it does not follow the IEEE standard exactly. */
634 if (type == 'b')
635 {
636 char * t;
637 LITTLENUM_TYPE words[MAX_LITTLENUMS];
638 FLONUM_TYPE generic_float;
639
640 t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float);
641
642 if (t)
643 input_line_pointer = t;
644 else
645 return _("invalid floating point number");
646
647 switch (generic_float.sign)
648 {
649 /* Is +Inf. */
650 case 'P':
651 words[0] = 0x7f80;
652 break;
653
654 /* Is -Inf. */
655 case 'N':
656 words[0] = 0xff80;
657 break;
658
659 /* Is NaN. */
660 /* bfloat16 has two types of NaN - quiet and signalling.
661 Quiet NaN has bit[6] == 1 && faction != 0, whereas
662 signalling Nan's have bit[0] == 0 && fraction != 0.
663 Chose this specific encoding as it is the same form
664 as used by other IEEE 754 encodings in GAS. */
665 case 0:
666 words[0] = 0x7fff;
667 break;
668
669 default:
670 break;
671 }
672
673 *sizeP = 2;
674
675 md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE));
676
677 return NULL;
678 }
679
680 return ieee_md_atof (type, litP, sizeP, target_big_endian);
681 }
682
683 /* We handle all bad expressions here, so that we can report the faulty
684 instruction in the error message. */
685 void
686 md_operand (expressionS * exp)
687 {
688 if (in_my_get_expression_p)
689 exp->X_op = O_illegal;
690 }
691
692 /* Immediate values. */
693
694 /* Errors may be set multiple times during parsing or bit encoding
695 (particularly in the Neon bits), but usually the earliest error which is set
696 will be the most meaningful. Avoid overwriting it with later (cascading)
697 errors by calling this function. */
698
699 static void
700 first_error (const char *error)
701 {
702 if (! error_p ())
703 set_syntax_error (error);
704 }
705
706 /* Similar to first_error, but this function accepts formatted error
707 message. */
708 static void
709 first_error_fmt (const char *format, ...)
710 {
711 va_list args;
712 enum
713 { size = 100 };
714 /* N.B. this single buffer will not cause error messages for different
715 instructions to pollute each other; this is because at the end of
716 processing of each assembly line, error message if any will be
717 collected by as_bad. */
718 static char buffer[size];
719
720 if (! error_p ())
721 {
722 int ret ATTRIBUTE_UNUSED;
723 va_start (args, format);
724 ret = vsnprintf (buffer, size, format, args);
725 know (ret <= size - 1 && ret >= 0);
726 va_end (args);
727 set_syntax_error (buffer);
728 }
729 }
730
731 /* Register parsing. */
732
733 /* Generic register parser which is called by other specialized
734 register parsers.
735 CCP points to what should be the beginning of a register name.
736 If it is indeed a valid register name, advance CCP over it and
737 return the reg_entry structure; otherwise return NULL.
738 It does not issue diagnostics. */
739
740 static reg_entry *
741 parse_reg (char **ccp)
742 {
743 char *start = *ccp;
744 char *p;
745 reg_entry *reg;
746
747 #ifdef REGISTER_PREFIX
748 if (*start != REGISTER_PREFIX)
749 return NULL;
750 start++;
751 #endif
752
753 p = start;
754 if (!ISALPHA (*p) || !is_name_beginner (*p))
755 return NULL;
756
757 do
758 p++;
759 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
760
761 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
762
763 if (!reg)
764 return NULL;
765
766 *ccp = p;
767 return reg;
768 }
769
770 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
771 return FALSE. */
772 static bfd_boolean
773 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
774 {
775 return (reg_type_masks[type] & (1 << reg->type)) != 0;
776 }
777
778 /* Try to parse a base or offset register. Allow SVE base and offset
779 registers if REG_TYPE includes SVE registers. Return the register
780 entry on success, setting *QUALIFIER to the register qualifier.
781 Return null otherwise.
782
783 Note that this function does not issue any diagnostics. */
784
785 static const reg_entry *
786 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
787 aarch64_opnd_qualifier_t *qualifier)
788 {
789 char *str = *ccp;
790 const reg_entry *reg = parse_reg (&str);
791
792 if (reg == NULL)
793 return NULL;
794
795 switch (reg->type)
796 {
797 case REG_TYPE_R_32:
798 case REG_TYPE_SP_32:
799 case REG_TYPE_Z_32:
800 *qualifier = AARCH64_OPND_QLF_W;
801 break;
802
803 case REG_TYPE_R_64:
804 case REG_TYPE_SP_64:
805 case REG_TYPE_Z_64:
806 *qualifier = AARCH64_OPND_QLF_X;
807 break;
808
809 case REG_TYPE_ZN:
810 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
811 || str[0] != '.')
812 return NULL;
813 switch (TOLOWER (str[1]))
814 {
815 case 's':
816 *qualifier = AARCH64_OPND_QLF_S_S;
817 break;
818 case 'd':
819 *qualifier = AARCH64_OPND_QLF_S_D;
820 break;
821 default:
822 return NULL;
823 }
824 str += 2;
825 break;
826
827 default:
828 return NULL;
829 }
830
831 *ccp = str;
832
833 return reg;
834 }
835
836 /* Try to parse a base or offset register. Return the register entry
837 on success, setting *QUALIFIER to the register qualifier. Return null
838 otherwise.
839
840 Note that this function does not issue any diagnostics. */
841
842 static const reg_entry *
843 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
844 {
845 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
846 }
847
848 /* Parse the qualifier of a vector register or vector element of type
849 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
850 succeeds; otherwise return FALSE.
851
852 Accept only one occurrence of:
853 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
854 b h s d q */
855 static bfd_boolean
856 parse_vector_type_for_operand (aarch64_reg_type reg_type,
857 struct vector_type_el *parsed_type, char **str)
858 {
859 char *ptr = *str;
860 unsigned width;
861 unsigned element_size;
862 enum vector_el_type type;
863
864 /* skip '.' */
865 gas_assert (*ptr == '.');
866 ptr++;
867
868 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
869 {
870 width = 0;
871 goto elt_size;
872 }
873 width = strtoul (ptr, &ptr, 10);
874 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
875 {
876 first_error_fmt (_("bad size %d in vector width specifier"), width);
877 return FALSE;
878 }
879
880 elt_size:
881 switch (TOLOWER (*ptr))
882 {
883 case 'b':
884 type = NT_b;
885 element_size = 8;
886 break;
887 case 'h':
888 type = NT_h;
889 element_size = 16;
890 break;
891 case 's':
892 type = NT_s;
893 element_size = 32;
894 break;
895 case 'd':
896 type = NT_d;
897 element_size = 64;
898 break;
899 case 'q':
900 if (reg_type == REG_TYPE_ZN || width == 1)
901 {
902 type = NT_q;
903 element_size = 128;
904 break;
905 }
906 /* fall through. */
907 default:
908 if (*ptr != '\0')
909 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
910 else
911 first_error (_("missing element size"));
912 return FALSE;
913 }
914 if (width != 0 && width * element_size != 64
915 && width * element_size != 128
916 && !(width == 2 && element_size == 16)
917 && !(width == 4 && element_size == 8))
918 {
919 first_error_fmt (_
920 ("invalid element size %d and vector size combination %c"),
921 width, *ptr);
922 return FALSE;
923 }
924 ptr++;
925
926 parsed_type->type = type;
927 parsed_type->width = width;
928
929 *str = ptr;
930
931 return TRUE;
932 }
933
934 /* *STR contains an SVE zero/merge predication suffix. Parse it into
935 *PARSED_TYPE and point *STR at the end of the suffix. */
936
937 static bfd_boolean
938 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
939 {
940 char *ptr = *str;
941
942 /* Skip '/'. */
943 gas_assert (*ptr == '/');
944 ptr++;
945 switch (TOLOWER (*ptr))
946 {
947 case 'z':
948 parsed_type->type = NT_zero;
949 break;
950 case 'm':
951 parsed_type->type = NT_merge;
952 break;
953 default:
954 if (*ptr != '\0' && *ptr != ',')
955 first_error_fmt (_("unexpected character `%c' in predication type"),
956 *ptr);
957 else
958 first_error (_("missing predication type"));
959 return FALSE;
960 }
961 parsed_type->width = 0;
962 *str = ptr + 1;
963 return TRUE;
964 }
965
966 /* Parse a register of the type TYPE.
967
968 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
969 name or the parsed register is not of TYPE.
970
971 Otherwise return the register number, and optionally fill in the actual
972 type of the register in *RTYPE when multiple alternatives were given, and
973 return the register shape and element index information in *TYPEINFO.
974
975 IN_REG_LIST should be set with TRUE if the caller is parsing a register
976 list. */
977
978 static int
979 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
980 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
981 {
982 char *str = *ccp;
983 const reg_entry *reg = parse_reg (&str);
984 struct vector_type_el atype;
985 struct vector_type_el parsetype;
986 bfd_boolean is_typed_vecreg = FALSE;
987
988 atype.defined = 0;
989 atype.type = NT_invtype;
990 atype.width = -1;
991 atype.index = 0;
992
993 if (reg == NULL)
994 {
995 if (typeinfo)
996 *typeinfo = atype;
997 set_default_error ();
998 return PARSE_FAIL;
999 }
1000
1001 if (! aarch64_check_reg_type (reg, type))
1002 {
1003 DEBUG_TRACE ("reg type check failed");
1004 set_default_error ();
1005 return PARSE_FAIL;
1006 }
1007 type = reg->type;
1008
1009 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
1010 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
1011 {
1012 if (*str == '.')
1013 {
1014 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1015 return PARSE_FAIL;
1016 }
1017 else
1018 {
1019 if (!parse_predication_for_operand (&parsetype, &str))
1020 return PARSE_FAIL;
1021 }
1022
1023 /* Register if of the form Vn.[bhsdq]. */
1024 is_typed_vecreg = TRUE;
1025
1026 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1027 {
1028 /* The width is always variable; we don't allow an integer width
1029 to be specified. */
1030 gas_assert (parsetype.width == 0);
1031 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1032 }
1033 else if (parsetype.width == 0)
1034 /* Expect index. In the new scheme we cannot have
1035 Vn.[bhsdq] represent a scalar. Therefore any
1036 Vn.[bhsdq] should have an index following it.
1037 Except in reglists of course. */
1038 atype.defined |= NTA_HASINDEX;
1039 else
1040 atype.defined |= NTA_HASTYPE;
1041
1042 atype.type = parsetype.type;
1043 atype.width = parsetype.width;
1044 }
1045
1046 if (skip_past_char (&str, '['))
1047 {
1048 expressionS exp;
1049
1050 /* Reject Sn[index] syntax. */
1051 if (!is_typed_vecreg)
1052 {
1053 first_error (_("this type of register can't be indexed"));
1054 return PARSE_FAIL;
1055 }
1056
1057 if (in_reg_list)
1058 {
1059 first_error (_("index not allowed inside register list"));
1060 return PARSE_FAIL;
1061 }
1062
1063 atype.defined |= NTA_HASINDEX;
1064
1065 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1066
1067 if (exp.X_op != O_constant)
1068 {
1069 first_error (_("constant expression required"));
1070 return PARSE_FAIL;
1071 }
1072
1073 if (! skip_past_char (&str, ']'))
1074 return PARSE_FAIL;
1075
1076 atype.index = exp.X_add_number;
1077 }
1078 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1079 {
1080 /* Indexed vector register expected. */
1081 first_error (_("indexed vector register expected"));
1082 return PARSE_FAIL;
1083 }
1084
1085 /* A vector reg Vn should be typed or indexed. */
1086 if (type == REG_TYPE_VN && atype.defined == 0)
1087 {
1088 first_error (_("invalid use of vector register"));
1089 }
1090
1091 if (typeinfo)
1092 *typeinfo = atype;
1093
1094 if (rtype)
1095 *rtype = type;
1096
1097 *ccp = str;
1098
1099 return reg->number;
1100 }
1101
1102 /* Parse register.
1103
1104 Return the register number on success; return PARSE_FAIL otherwise.
1105
1106 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1107 the register (e.g. NEON double or quad reg when either has been requested).
1108
1109 If this is a NEON vector register with additional type information, fill
1110 in the struct pointed to by VECTYPE (if non-NULL).
1111
1112 This parser does not handle register list. */
1113
1114 static int
1115 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1116 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1117 {
1118 struct vector_type_el atype;
1119 char *str = *ccp;
1120 int reg = parse_typed_reg (&str, type, rtype, &atype,
1121 /*in_reg_list= */ FALSE);
1122
1123 if (reg == PARSE_FAIL)
1124 return PARSE_FAIL;
1125
1126 if (vectype)
1127 *vectype = atype;
1128
1129 *ccp = str;
1130
1131 return reg;
1132 }
1133
1134 static inline bfd_boolean
1135 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1136 {
1137 return
1138 e1.type == e2.type
1139 && e1.defined == e2.defined
1140 && e1.width == e2.width && e1.index == e2.index;
1141 }
1142
1143 /* This function parses a list of vector registers of type TYPE.
1144 On success, it returns the parsed register list information in the
1145 following encoded format:
1146
1147 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1148 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1149
1150 The information of the register shape and/or index is returned in
1151 *VECTYPE.
1152
1153 It returns PARSE_FAIL if the register list is invalid.
1154
1155 The list contains one to four registers.
1156 Each register can be one of:
1157 <Vt>.<T>[<index>]
1158 <Vt>.<T>
1159 All <T> should be identical.
1160 All <index> should be identical.
1161 There are restrictions on <Vt> numbers which are checked later
1162 (by reg_list_valid_p). */
1163
1164 static int
1165 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1166 struct vector_type_el *vectype)
1167 {
1168 char *str = *ccp;
1169 int nb_regs;
1170 struct vector_type_el typeinfo, typeinfo_first;
1171 int val, val_range;
1172 int in_range;
1173 int ret_val;
1174 int i;
1175 bfd_boolean error = FALSE;
1176 bfd_boolean expect_index = FALSE;
1177
1178 if (*str != '{')
1179 {
1180 set_syntax_error (_("expecting {"));
1181 return PARSE_FAIL;
1182 }
1183 str++;
1184
1185 nb_regs = 0;
1186 typeinfo_first.defined = 0;
1187 typeinfo_first.type = NT_invtype;
1188 typeinfo_first.width = -1;
1189 typeinfo_first.index = 0;
1190 ret_val = 0;
1191 val = -1;
1192 val_range = -1;
1193 in_range = 0;
1194 do
1195 {
1196 if (in_range)
1197 {
1198 str++; /* skip over '-' */
1199 val_range = val;
1200 }
1201 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1202 /*in_reg_list= */ TRUE);
1203 if (val == PARSE_FAIL)
1204 {
1205 set_first_syntax_error (_("invalid vector register in list"));
1206 error = TRUE;
1207 continue;
1208 }
1209 /* reject [bhsd]n */
1210 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1211 {
1212 set_first_syntax_error (_("invalid scalar register in list"));
1213 error = TRUE;
1214 continue;
1215 }
1216
1217 if (typeinfo.defined & NTA_HASINDEX)
1218 expect_index = TRUE;
1219
1220 if (in_range)
1221 {
1222 if (val < val_range)
1223 {
1224 set_first_syntax_error
1225 (_("invalid range in vector register list"));
1226 error = TRUE;
1227 }
1228 val_range++;
1229 }
1230 else
1231 {
1232 val_range = val;
1233 if (nb_regs == 0)
1234 typeinfo_first = typeinfo;
1235 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1236 {
1237 set_first_syntax_error
1238 (_("type mismatch in vector register list"));
1239 error = TRUE;
1240 }
1241 }
1242 if (! error)
1243 for (i = val_range; i <= val; i++)
1244 {
1245 ret_val |= i << (5 * nb_regs);
1246 nb_regs++;
1247 }
1248 in_range = 0;
1249 }
1250 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1251
1252 skip_whitespace (str);
1253 if (*str != '}')
1254 {
1255 set_first_syntax_error (_("end of vector register list not found"));
1256 error = TRUE;
1257 }
1258 str++;
1259
1260 skip_whitespace (str);
1261
1262 if (expect_index)
1263 {
1264 if (skip_past_char (&str, '['))
1265 {
1266 expressionS exp;
1267
1268 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1269 if (exp.X_op != O_constant)
1270 {
1271 set_first_syntax_error (_("constant expression required."));
1272 error = TRUE;
1273 }
1274 if (! skip_past_char (&str, ']'))
1275 error = TRUE;
1276 else
1277 typeinfo_first.index = exp.X_add_number;
1278 }
1279 else
1280 {
1281 set_first_syntax_error (_("expected index"));
1282 error = TRUE;
1283 }
1284 }
1285
1286 if (nb_regs > 4)
1287 {
1288 set_first_syntax_error (_("too many registers in vector register list"));
1289 error = TRUE;
1290 }
1291 else if (nb_regs == 0)
1292 {
1293 set_first_syntax_error (_("empty vector register list"));
1294 error = TRUE;
1295 }
1296
1297 *ccp = str;
1298 if (! error)
1299 *vectype = typeinfo_first;
1300
1301 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1302 }
1303
1304 /* Directives: register aliases. */
1305
1306 static reg_entry *
1307 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1308 {
1309 reg_entry *new;
1310 const char *name;
1311
1312 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1313 {
1314 if (new->builtin)
1315 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1316 str);
1317
1318 /* Only warn about a redefinition if it's not defined as the
1319 same register. */
1320 else if (new->number != number || new->type != type)
1321 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1322
1323 return NULL;
1324 }
1325
1326 name = xstrdup (str);
1327 new = XNEW (reg_entry);
1328
1329 new->name = name;
1330 new->number = number;
1331 new->type = type;
1332 new->builtin = FALSE;
1333
1334 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1335
1336 return new;
1337 }
1338
1339 /* Look for the .req directive. This is of the form:
1340
1341 new_register_name .req existing_register_name
1342
1343 If we find one, or if it looks sufficiently like one that we want to
1344 handle any error here, return TRUE. Otherwise return FALSE. */
1345
1346 static bfd_boolean
1347 create_register_alias (char *newname, char *p)
1348 {
1349 const reg_entry *old;
1350 char *oldname, *nbuf;
1351 size_t nlen;
1352
1353 /* The input scrubber ensures that whitespace after the mnemonic is
1354 collapsed to single spaces. */
1355 oldname = p;
1356 if (strncmp (oldname, " .req ", 6) != 0)
1357 return FALSE;
1358
1359 oldname += 6;
1360 if (*oldname == '\0')
1361 return FALSE;
1362
1363 old = str_hash_find (aarch64_reg_hsh, oldname);
1364 if (!old)
1365 {
1366 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1367 return TRUE;
1368 }
1369
1370 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1371 the desired alias name, and p points to its end. If not, then
1372 the desired alias name is in the global original_case_string. */
1373 #ifdef TC_CASE_SENSITIVE
1374 nlen = p - newname;
1375 #else
1376 newname = original_case_string;
1377 nlen = strlen (newname);
1378 #endif
1379
1380 nbuf = xmemdup0 (newname, nlen);
1381
1382 /* Create aliases under the new name as stated; an all-lowercase
1383 version of the new name; and an all-uppercase version of the new
1384 name. */
1385 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1386 {
1387 for (p = nbuf; *p; p++)
1388 *p = TOUPPER (*p);
1389
1390 if (strncmp (nbuf, newname, nlen))
1391 {
1392 /* If this attempt to create an additional alias fails, do not bother
1393 trying to create the all-lower case alias. We will fail and issue
1394 a second, duplicate error message. This situation arises when the
1395 programmer does something like:
1396 foo .req r0
1397 Foo .req r1
1398 The second .req creates the "Foo" alias but then fails to create
1399 the artificial FOO alias because it has already been created by the
1400 first .req. */
1401 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1402 {
1403 free (nbuf);
1404 return TRUE;
1405 }
1406 }
1407
1408 for (p = nbuf; *p; p++)
1409 *p = TOLOWER (*p);
1410
1411 if (strncmp (nbuf, newname, nlen))
1412 insert_reg_alias (nbuf, old->number, old->type);
1413 }
1414
1415 free (nbuf);
1416 return TRUE;
1417 }
1418
1419 /* Should never be called, as .req goes between the alias and the
1420 register name, not at the beginning of the line. */
1421 static void
1422 s_req (int a ATTRIBUTE_UNUSED)
1423 {
1424 as_bad (_("invalid syntax for .req directive"));
1425 }
1426
1427 /* The .unreq directive deletes an alias which was previously defined
1428 by .req. For example:
1429
1430 my_alias .req r11
1431 .unreq my_alias */
1432
1433 static void
1434 s_unreq (int a ATTRIBUTE_UNUSED)
1435 {
1436 char *name;
1437 char saved_char;
1438
1439 name = input_line_pointer;
1440
1441 while (*input_line_pointer != 0
1442 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1443 ++input_line_pointer;
1444
1445 saved_char = *input_line_pointer;
1446 *input_line_pointer = 0;
1447
1448 if (!*name)
1449 as_bad (_("invalid syntax for .unreq directive"));
1450 else
1451 {
1452 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1453
1454 if (!reg)
1455 as_bad (_("unknown register alias '%s'"), name);
1456 else if (reg->builtin)
1457 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1458 name);
1459 else
1460 {
1461 char *p;
1462 char *nbuf;
1463
1464 str_hash_delete (aarch64_reg_hsh, name);
1465 free ((char *) reg->name);
1466 free (reg);
1467
1468 /* Also locate the all upper case and all lower case versions.
1469 Do not complain if we cannot find one or the other as it
1470 was probably deleted above. */
1471
1472 nbuf = strdup (name);
1473 for (p = nbuf; *p; p++)
1474 *p = TOUPPER (*p);
1475 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1476 if (reg)
1477 {
1478 str_hash_delete (aarch64_reg_hsh, nbuf);
1479 free ((char *) reg->name);
1480 free (reg);
1481 }
1482
1483 for (p = nbuf; *p; p++)
1484 *p = TOLOWER (*p);
1485 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1486 if (reg)
1487 {
1488 str_hash_delete (aarch64_reg_hsh, nbuf);
1489 free ((char *) reg->name);
1490 free (reg);
1491 }
1492
1493 free (nbuf);
1494 }
1495 }
1496
1497 *input_line_pointer = saved_char;
1498 demand_empty_rest_of_line ();
1499 }
1500
1501 /* Directives: Instruction set selection. */
1502
1503 #ifdef OBJ_ELF
1504 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1505 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1506 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1507 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1508
1509 /* Create a new mapping symbol for the transition to STATE. */
1510
1511 static void
1512 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1513 {
1514 symbolS *symbolP;
1515 const char *symname;
1516 int type;
1517
1518 switch (state)
1519 {
1520 case MAP_DATA:
1521 symname = "$d";
1522 type = BSF_NO_FLAGS;
1523 break;
1524 case MAP_INSN:
1525 symname = "$x";
1526 type = BSF_NO_FLAGS;
1527 break;
1528 default:
1529 abort ();
1530 }
1531
1532 symbolP = symbol_new (symname, now_seg, frag, value);
1533 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1534
1535 /* Save the mapping symbols for future reference. Also check that
1536 we do not place two mapping symbols at the same offset within a
1537 frag. We'll handle overlap between frags in
1538 check_mapping_symbols.
1539
1540 If .fill or other data filling directive generates zero sized data,
1541 the mapping symbol for the following code will have the same value
1542 as the one generated for the data filling directive. In this case,
1543 we replace the old symbol with the new one at the same address. */
1544 if (value == 0)
1545 {
1546 if (frag->tc_frag_data.first_map != NULL)
1547 {
1548 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1549 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1550 &symbol_lastP);
1551 }
1552 frag->tc_frag_data.first_map = symbolP;
1553 }
1554 if (frag->tc_frag_data.last_map != NULL)
1555 {
1556 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1557 S_GET_VALUE (symbolP));
1558 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1559 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1560 &symbol_lastP);
1561 }
1562 frag->tc_frag_data.last_map = symbolP;
1563 }
1564
1565 /* We must sometimes convert a region marked as code to data during
1566 code alignment, if an odd number of bytes have to be padded. The
1567 code mapping symbol is pushed to an aligned address. */
1568
1569 static void
1570 insert_data_mapping_symbol (enum mstate state,
1571 valueT value, fragS * frag, offsetT bytes)
1572 {
1573 /* If there was already a mapping symbol, remove it. */
1574 if (frag->tc_frag_data.last_map != NULL
1575 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1576 frag->fr_address + value)
1577 {
1578 symbolS *symp = frag->tc_frag_data.last_map;
1579
1580 if (value == 0)
1581 {
1582 know (frag->tc_frag_data.first_map == symp);
1583 frag->tc_frag_data.first_map = NULL;
1584 }
1585 frag->tc_frag_data.last_map = NULL;
1586 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1587 }
1588
1589 make_mapping_symbol (MAP_DATA, value, frag);
1590 make_mapping_symbol (state, value + bytes, frag);
1591 }
1592
1593 static void mapping_state_2 (enum mstate state, int max_chars);
1594
1595 /* Set the mapping state to STATE. Only call this when about to
1596 emit some STATE bytes to the file. */
1597
1598 void
1599 mapping_state (enum mstate state)
1600 {
1601 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1602
1603 if (state == MAP_INSN)
1604 /* AArch64 instructions require 4-byte alignment. When emitting
1605 instructions into any section, record the appropriate section
1606 alignment. */
1607 record_alignment (now_seg, 2);
1608
1609 if (mapstate == state)
1610 /* The mapping symbol has already been emitted.
1611 There is nothing else to do. */
1612 return;
1613
1614 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1615 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1616 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1617 evaluated later in the next else. */
1618 return;
1619 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1620 {
1621 /* Only add the symbol if the offset is > 0:
1622 if we're at the first frag, check it's size > 0;
1623 if we're not at the first frag, then for sure
1624 the offset is > 0. */
1625 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1626 const int add_symbol = (frag_now != frag_first)
1627 || (frag_now_fix () > 0);
1628
1629 if (add_symbol)
1630 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1631 }
1632 #undef TRANSITION
1633
1634 mapping_state_2 (state, 0);
1635 }
1636
1637 /* Same as mapping_state, but MAX_CHARS bytes have already been
1638 allocated. Put the mapping symbol that far back. */
1639
1640 static void
1641 mapping_state_2 (enum mstate state, int max_chars)
1642 {
1643 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1644
1645 if (!SEG_NORMAL (now_seg))
1646 return;
1647
1648 if (mapstate == state)
1649 /* The mapping symbol has already been emitted.
1650 There is nothing else to do. */
1651 return;
1652
1653 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1654 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1655 }
1656 #else
1657 #define mapping_state(x) /* nothing */
1658 #define mapping_state_2(x, y) /* nothing */
1659 #endif
1660
1661 /* Directives: sectioning and alignment. */
1662
1663 static void
1664 s_bss (int ignore ATTRIBUTE_UNUSED)
1665 {
1666 /* We don't support putting frags in the BSS segment, we fake it by
1667 marking in_bss, then looking at s_skip for clues. */
1668 subseg_set (bss_section, 0);
1669 demand_empty_rest_of_line ();
1670 mapping_state (MAP_DATA);
1671 }
1672
1673 static void
1674 s_even (int ignore ATTRIBUTE_UNUSED)
1675 {
1676 /* Never make frag if expect extra pass. */
1677 if (!need_pass_2)
1678 frag_align (1, 0, 0);
1679
1680 record_alignment (now_seg, 1);
1681
1682 demand_empty_rest_of_line ();
1683 }
1684
1685 /* Directives: Literal pools. */
1686
1687 static literal_pool *
1688 find_literal_pool (int size)
1689 {
1690 literal_pool *pool;
1691
1692 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1693 {
1694 if (pool->section == now_seg
1695 && pool->sub_section == now_subseg && pool->size == size)
1696 break;
1697 }
1698
1699 return pool;
1700 }
1701
1702 static literal_pool *
1703 find_or_make_literal_pool (int size)
1704 {
1705 /* Next literal pool ID number. */
1706 static unsigned int latest_pool_num = 1;
1707 literal_pool *pool;
1708
1709 pool = find_literal_pool (size);
1710
1711 if (pool == NULL)
1712 {
1713 /* Create a new pool. */
1714 pool = XNEW (literal_pool);
1715 if (!pool)
1716 return NULL;
1717
1718 /* Currently we always put the literal pool in the current text
1719 section. If we were generating "small" model code where we
1720 knew that all code and initialised data was within 1MB then
1721 we could output literals to mergeable, read-only data
1722 sections. */
1723
1724 pool->next_free_entry = 0;
1725 pool->section = now_seg;
1726 pool->sub_section = now_subseg;
1727 pool->size = size;
1728 pool->next = list_of_pools;
1729 pool->symbol = NULL;
1730
1731 /* Add it to the list. */
1732 list_of_pools = pool;
1733 }
1734
1735 /* New pools, and emptied pools, will have a NULL symbol. */
1736 if (pool->symbol == NULL)
1737 {
1738 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1739 &zero_address_frag, 0);
1740 pool->id = latest_pool_num++;
1741 }
1742
1743 /* Done. */
1744 return pool;
1745 }
1746
1747 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1748 Return TRUE on success, otherwise return FALSE. */
1749 static bfd_boolean
1750 add_to_lit_pool (expressionS *exp, int size)
1751 {
1752 literal_pool *pool;
1753 unsigned int entry;
1754
1755 pool = find_or_make_literal_pool (size);
1756
1757 /* Check if this literal value is already in the pool. */
1758 for (entry = 0; entry < pool->next_free_entry; entry++)
1759 {
1760 expressionS * litexp = & pool->literals[entry].exp;
1761
1762 if ((litexp->X_op == exp->X_op)
1763 && (exp->X_op == O_constant)
1764 && (litexp->X_add_number == exp->X_add_number)
1765 && (litexp->X_unsigned == exp->X_unsigned))
1766 break;
1767
1768 if ((litexp->X_op == exp->X_op)
1769 && (exp->X_op == O_symbol)
1770 && (litexp->X_add_number == exp->X_add_number)
1771 && (litexp->X_add_symbol == exp->X_add_symbol)
1772 && (litexp->X_op_symbol == exp->X_op_symbol))
1773 break;
1774 }
1775
1776 /* Do we need to create a new entry? */
1777 if (entry == pool->next_free_entry)
1778 {
1779 if (entry >= MAX_LITERAL_POOL_SIZE)
1780 {
1781 set_syntax_error (_("literal pool overflow"));
1782 return FALSE;
1783 }
1784
1785 pool->literals[entry].exp = *exp;
1786 pool->next_free_entry += 1;
1787 if (exp->X_op == O_big)
1788 {
1789 /* PR 16688: Bignums are held in a single global array. We must
1790 copy and preserve that value now, before it is overwritten. */
1791 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1792 exp->X_add_number);
1793 memcpy (pool->literals[entry].bignum, generic_bignum,
1794 CHARS_PER_LITTLENUM * exp->X_add_number);
1795 }
1796 else
1797 pool->literals[entry].bignum = NULL;
1798 }
1799
1800 exp->X_op = O_symbol;
1801 exp->X_add_number = ((int) entry) * size;
1802 exp->X_add_symbol = pool->symbol;
1803
1804 return TRUE;
1805 }
1806
1807 /* Can't use symbol_new here, so have to create a symbol and then at
1808 a later date assign it a value. That's what these functions do. */
1809
1810 static void
1811 symbol_locate (symbolS * symbolP,
1812 const char *name,/* It is copied, the caller can modify. */
1813 segT segment, /* Segment identifier (SEG_<something>). */
1814 valueT valu, /* Symbol value. */
1815 fragS * frag) /* Associated fragment. */
1816 {
1817 size_t name_length;
1818 char *preserved_copy_of_name;
1819
1820 name_length = strlen (name) + 1; /* +1 for \0. */
1821 obstack_grow (&notes, name, name_length);
1822 preserved_copy_of_name = obstack_finish (&notes);
1823
1824 #ifdef tc_canonicalize_symbol_name
1825 preserved_copy_of_name =
1826 tc_canonicalize_symbol_name (preserved_copy_of_name);
1827 #endif
1828
1829 S_SET_NAME (symbolP, preserved_copy_of_name);
1830
1831 S_SET_SEGMENT (symbolP, segment);
1832 S_SET_VALUE (symbolP, valu);
1833 symbol_clear_list_pointers (symbolP);
1834
1835 symbol_set_frag (symbolP, frag);
1836
1837 /* Link to end of symbol chain. */
1838 {
1839 extern int symbol_table_frozen;
1840
1841 if (symbol_table_frozen)
1842 abort ();
1843 }
1844
1845 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1846
1847 obj_symbol_new_hook (symbolP);
1848
1849 #ifdef tc_symbol_new_hook
1850 tc_symbol_new_hook (symbolP);
1851 #endif
1852
1853 #ifdef DEBUG_SYMS
1854 verify_symbol_chain (symbol_rootP, symbol_lastP);
1855 #endif /* DEBUG_SYMS */
1856 }
1857
1858
1859 static void
1860 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1861 {
1862 unsigned int entry;
1863 literal_pool *pool;
1864 char sym_name[20];
1865 int align;
1866
1867 for (align = 2; align <= 4; align++)
1868 {
1869 int size = 1 << align;
1870
1871 pool = find_literal_pool (size);
1872 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1873 continue;
1874
1875 /* Align pool as you have word accesses.
1876 Only make a frag if we have to. */
1877 if (!need_pass_2)
1878 frag_align (align, 0, 0);
1879
1880 mapping_state (MAP_DATA);
1881
1882 record_alignment (now_seg, align);
1883
1884 sprintf (sym_name, "$$lit_\002%x", pool->id);
1885
1886 symbol_locate (pool->symbol, sym_name, now_seg,
1887 (valueT) frag_now_fix (), frag_now);
1888 symbol_table_insert (pool->symbol);
1889
1890 for (entry = 0; entry < pool->next_free_entry; entry++)
1891 {
1892 expressionS * exp = & pool->literals[entry].exp;
1893
1894 if (exp->X_op == O_big)
1895 {
1896 /* PR 16688: Restore the global bignum value. */
1897 gas_assert (pool->literals[entry].bignum != NULL);
1898 memcpy (generic_bignum, pool->literals[entry].bignum,
1899 CHARS_PER_LITTLENUM * exp->X_add_number);
1900 }
1901
1902 /* First output the expression in the instruction to the pool. */
1903 emit_expr (exp, size); /* .word|.xword */
1904
1905 if (exp->X_op == O_big)
1906 {
1907 free (pool->literals[entry].bignum);
1908 pool->literals[entry].bignum = NULL;
1909 }
1910 }
1911
1912 /* Mark the pool as empty. */
1913 pool->next_free_entry = 0;
1914 pool->symbol = NULL;
1915 }
1916 }
1917
1918 #ifdef OBJ_ELF
1919 /* Forward declarations for functions below, in the MD interface
1920 section. */
1921 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1922 static struct reloc_table_entry * find_reloc_table_entry (char **);
1923
1924 /* Directives: Data. */
1925 /* N.B. the support for relocation suffix in this directive needs to be
1926 implemented properly. */
1927
1928 static void
1929 s_aarch64_elf_cons (int nbytes)
1930 {
1931 expressionS exp;
1932
1933 #ifdef md_flush_pending_output
1934 md_flush_pending_output ();
1935 #endif
1936
1937 if (is_it_end_of_statement ())
1938 {
1939 demand_empty_rest_of_line ();
1940 return;
1941 }
1942
1943 #ifdef md_cons_align
1944 md_cons_align (nbytes);
1945 #endif
1946
1947 mapping_state (MAP_DATA);
1948 do
1949 {
1950 struct reloc_table_entry *reloc;
1951
1952 expression (&exp);
1953
1954 if (exp.X_op != O_symbol)
1955 emit_expr (&exp, (unsigned int) nbytes);
1956 else
1957 {
1958 skip_past_char (&input_line_pointer, '#');
1959 if (skip_past_char (&input_line_pointer, ':'))
1960 {
1961 reloc = find_reloc_table_entry (&input_line_pointer);
1962 if (reloc == NULL)
1963 as_bad (_("unrecognized relocation suffix"));
1964 else
1965 as_bad (_("unimplemented relocation suffix"));
1966 ignore_rest_of_line ();
1967 return;
1968 }
1969 else
1970 emit_expr (&exp, (unsigned int) nbytes);
1971 }
1972 }
1973 while (*input_line_pointer++ == ',');
1974
1975 /* Put terminator back into stream. */
1976 input_line_pointer--;
1977 demand_empty_rest_of_line ();
1978 }
1979
1980 /* Mark symbol that it follows a variant PCS convention. */
1981
1982 static void
1983 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1984 {
1985 char *name;
1986 char c;
1987 symbolS *sym;
1988 asymbol *bfdsym;
1989 elf_symbol_type *elfsym;
1990
1991 c = get_symbol_name (&name);
1992 if (!*name)
1993 as_bad (_("Missing symbol name in directive"));
1994 sym = symbol_find_or_make (name);
1995 restore_line_pointer (c);
1996 demand_empty_rest_of_line ();
1997 bfdsym = symbol_get_bfdsym (sym);
1998 elfsym = elf_symbol_from (bfdsym);
1999 gas_assert (elfsym);
2000 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2001 }
2002 #endif /* OBJ_ELF */
2003
2004 /* Output a 32-bit word, but mark as an instruction. */
2005
2006 static void
2007 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2008 {
2009 expressionS exp;
2010
2011 #ifdef md_flush_pending_output
2012 md_flush_pending_output ();
2013 #endif
2014
2015 if (is_it_end_of_statement ())
2016 {
2017 demand_empty_rest_of_line ();
2018 return;
2019 }
2020
2021 /* Sections are assumed to start aligned. In executable section, there is no
2022 MAP_DATA symbol pending. So we only align the address during
2023 MAP_DATA --> MAP_INSN transition.
2024 For other sections, this is not guaranteed. */
2025 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2026 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2027 frag_align_code (2, 0);
2028
2029 #ifdef OBJ_ELF
2030 mapping_state (MAP_INSN);
2031 #endif
2032
2033 do
2034 {
2035 expression (&exp);
2036 if (exp.X_op != O_constant)
2037 {
2038 as_bad (_("constant expression required"));
2039 ignore_rest_of_line ();
2040 return;
2041 }
2042
2043 if (target_big_endian)
2044 {
2045 unsigned int val = exp.X_add_number;
2046 exp.X_add_number = SWAP_32 (val);
2047 }
2048 emit_expr (&exp, 4);
2049 }
2050 while (*input_line_pointer++ == ',');
2051
2052 /* Put terminator back into stream. */
2053 input_line_pointer--;
2054 demand_empty_rest_of_line ();
2055 }
2056
2057 static void
2058 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2059 {
2060 demand_empty_rest_of_line ();
2061 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2062 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2063 }
2064
2065 #ifdef OBJ_ELF
2066 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2067
2068 static void
2069 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2070 {
2071 expressionS exp;
2072
2073 expression (&exp);
2074 frag_grow (4);
2075 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2076 BFD_RELOC_AARCH64_TLSDESC_ADD);
2077
2078 demand_empty_rest_of_line ();
2079 }
2080
2081 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2082
2083 static void
2084 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2085 {
2086 expressionS exp;
2087
2088 /* Since we're just labelling the code, there's no need to define a
2089 mapping symbol. */
2090 expression (&exp);
2091 /* Make sure there is enough room in this frag for the following
2092 blr. This trick only works if the blr follows immediately after
2093 the .tlsdesc directive. */
2094 frag_grow (4);
2095 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2096 BFD_RELOC_AARCH64_TLSDESC_CALL);
2097
2098 demand_empty_rest_of_line ();
2099 }
2100
2101 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2102
2103 static void
2104 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2105 {
2106 expressionS exp;
2107
2108 expression (&exp);
2109 frag_grow (4);
2110 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2111 BFD_RELOC_AARCH64_TLSDESC_LDR);
2112
2113 demand_empty_rest_of_line ();
2114 }
2115 #endif /* OBJ_ELF */
2116
2117 static void s_aarch64_arch (int);
2118 static void s_aarch64_cpu (int);
2119 static void s_aarch64_arch_extension (int);
2120
2121 /* This table describes all the machine specific pseudo-ops the assembler
2122 has to support. The fields are:
2123 pseudo-op name without dot
2124 function to call to execute this pseudo-op
2125 Integer arg to pass to the function. */
2126
2127 const pseudo_typeS md_pseudo_table[] = {
2128 /* Never called because '.req' does not start a line. */
2129 {"req", s_req, 0},
2130 {"unreq", s_unreq, 0},
2131 {"bss", s_bss, 0},
2132 {"even", s_even, 0},
2133 {"ltorg", s_ltorg, 0},
2134 {"pool", s_ltorg, 0},
2135 {"cpu", s_aarch64_cpu, 0},
2136 {"arch", s_aarch64_arch, 0},
2137 {"arch_extension", s_aarch64_arch_extension, 0},
2138 {"inst", s_aarch64_inst, 0},
2139 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2140 #ifdef OBJ_ELF
2141 {"tlsdescadd", s_tlsdescadd, 0},
2142 {"tlsdesccall", s_tlsdesccall, 0},
2143 {"tlsdescldr", s_tlsdescldr, 0},
2144 {"word", s_aarch64_elf_cons, 4},
2145 {"long", s_aarch64_elf_cons, 4},
2146 {"xword", s_aarch64_elf_cons, 8},
2147 {"dword", s_aarch64_elf_cons, 8},
2148 {"variant_pcs", s_variant_pcs, 0},
2149 #endif
2150 {"float16", float_cons, 'h'},
2151 {"bfloat16", float_cons, 'b'},
2152 {0, 0, 0}
2153 };
2154 \f
2155
2156 /* Check whether STR points to a register name followed by a comma or the
2157 end of line; REG_TYPE indicates which register types are checked
2158 against. Return TRUE if STR is such a register name; otherwise return
2159 FALSE. The function does not intend to produce any diagnostics, but since
2160 the register parser aarch64_reg_parse, which is called by this function,
2161 does produce diagnostics, we call clear_error to clear any diagnostics
2162 that may be generated by aarch64_reg_parse.
2163 Also, the function returns FALSE directly if there is any user error
2164 present at the function entry. This prevents the existing diagnostics
2165 state from being spoiled.
2166 The function currently serves parse_constant_immediate and
2167 parse_big_immediate only. */
2168 static bfd_boolean
2169 reg_name_p (char *str, aarch64_reg_type reg_type)
2170 {
2171 int reg;
2172
2173 /* Prevent the diagnostics state from being spoiled. */
2174 if (error_p ())
2175 return FALSE;
2176
2177 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2178
2179 /* Clear the parsing error that may be set by the reg parser. */
2180 clear_error ();
2181
2182 if (reg == PARSE_FAIL)
2183 return FALSE;
2184
2185 skip_whitespace (str);
2186 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2187 return TRUE;
2188
2189 return FALSE;
2190 }
2191
2192 /* Parser functions used exclusively in instruction operands. */
2193
2194 /* Parse an immediate expression which may not be constant.
2195
2196 To prevent the expression parser from pushing a register name
2197 into the symbol table as an undefined symbol, firstly a check is
2198 done to find out whether STR is a register of type REG_TYPE followed
2199 by a comma or the end of line. Return FALSE if STR is such a string. */
2200
2201 static bfd_boolean
2202 parse_immediate_expression (char **str, expressionS *exp,
2203 aarch64_reg_type reg_type)
2204 {
2205 if (reg_name_p (*str, reg_type))
2206 {
2207 set_recoverable_error (_("immediate operand required"));
2208 return FALSE;
2209 }
2210
2211 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2212
2213 if (exp->X_op == O_absent)
2214 {
2215 set_fatal_syntax_error (_("missing immediate expression"));
2216 return FALSE;
2217 }
2218
2219 return TRUE;
2220 }
2221
2222 /* Constant immediate-value read function for use in insn parsing.
2223 STR points to the beginning of the immediate (with the optional
2224 leading #); *VAL receives the value. REG_TYPE says which register
2225 names should be treated as registers rather than as symbolic immediates.
2226
2227 Return TRUE on success; otherwise return FALSE. */
2228
2229 static bfd_boolean
2230 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2231 {
2232 expressionS exp;
2233
2234 if (! parse_immediate_expression (str, &exp, reg_type))
2235 return FALSE;
2236
2237 if (exp.X_op != O_constant)
2238 {
2239 set_syntax_error (_("constant expression required"));
2240 return FALSE;
2241 }
2242
2243 *val = exp.X_add_number;
2244 return TRUE;
2245 }
2246
2247 static uint32_t
2248 encode_imm_float_bits (uint32_t imm)
2249 {
2250 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2251 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2252 }
2253
2254 /* Return TRUE if the single-precision floating-point value encoded in IMM
2255 can be expressed in the AArch64 8-bit signed floating-point format with
2256 3-bit exponent and normalized 4 bits of precision; in other words, the
2257 floating-point value must be expressable as
2258 (+/-) n / 16 * power (2, r)
2259 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2260
2261 static bfd_boolean
2262 aarch64_imm_float_p (uint32_t imm)
2263 {
2264 /* If a single-precision floating-point value has the following bit
2265 pattern, it can be expressed in the AArch64 8-bit floating-point
2266 format:
2267
2268 3 32222222 2221111111111
2269 1 09876543 21098765432109876543210
2270 n Eeeeeexx xxxx0000000000000000000
2271
2272 where n, e and each x are either 0 or 1 independently, with
2273 E == ~ e. */
2274
2275 uint32_t pattern;
2276
2277 /* Prepare the pattern for 'Eeeeee'. */
2278 if (((imm >> 30) & 0x1) == 0)
2279 pattern = 0x3e000000;
2280 else
2281 pattern = 0x40000000;
2282
2283 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2284 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2285 }
2286
2287 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2288 as an IEEE float without any loss of precision. Store the value in
2289 *FPWORD if so. */
2290
2291 static bfd_boolean
2292 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2293 {
2294 /* If a double-precision floating-point value has the following bit
2295 pattern, it can be expressed in a float:
2296
2297 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2298 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2299 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2300
2301 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2302 if Eeee_eeee != 1111_1111
2303
2304 where n, e, s and S are either 0 or 1 independently and where ~ is the
2305 inverse of E. */
2306
2307 uint32_t pattern;
2308 uint32_t high32 = imm >> 32;
2309 uint32_t low32 = imm;
2310
2311 /* Lower 29 bits need to be 0s. */
2312 if ((imm & 0x1fffffff) != 0)
2313 return FALSE;
2314
2315 /* Prepare the pattern for 'Eeeeeeeee'. */
2316 if (((high32 >> 30) & 0x1) == 0)
2317 pattern = 0x38000000;
2318 else
2319 pattern = 0x40000000;
2320
2321 /* Check E~~~. */
2322 if ((high32 & 0x78000000) != pattern)
2323 return FALSE;
2324
2325 /* Check Eeee_eeee != 1111_1111. */
2326 if ((high32 & 0x7ff00000) == 0x47f00000)
2327 return FALSE;
2328
2329 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2330 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2331 | (low32 >> 29)); /* 3 S bits. */
2332 return TRUE;
2333 }
2334
2335 /* Return true if we should treat OPERAND as a double-precision
2336 floating-point operand rather than a single-precision one. */
2337 static bfd_boolean
2338 double_precision_operand_p (const aarch64_opnd_info *operand)
2339 {
2340 /* Check for unsuffixed SVE registers, which are allowed
2341 for LDR and STR but not in instructions that require an
2342 immediate. We get better error messages if we arbitrarily
2343 pick one size, parse the immediate normally, and then
2344 report the match failure in the normal way. */
2345 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2346 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2347 }
2348
2349 /* Parse a floating-point immediate. Return TRUE on success and return the
2350 value in *IMMED in the format of IEEE754 single-precision encoding.
2351 *CCP points to the start of the string; DP_P is TRUE when the immediate
2352 is expected to be in double-precision (N.B. this only matters when
2353 hexadecimal representation is involved). REG_TYPE says which register
2354 names should be treated as registers rather than as symbolic immediates.
2355
2356 This routine accepts any IEEE float; it is up to the callers to reject
2357 invalid ones. */
2358
2359 static bfd_boolean
2360 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2361 aarch64_reg_type reg_type)
2362 {
2363 char *str = *ccp;
2364 char *fpnum;
2365 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2366 int64_t val = 0;
2367 unsigned fpword = 0;
2368 bfd_boolean hex_p = FALSE;
2369
2370 skip_past_char (&str, '#');
2371
2372 fpnum = str;
2373 skip_whitespace (fpnum);
2374
2375 if (strncmp (fpnum, "0x", 2) == 0)
2376 {
2377 /* Support the hexadecimal representation of the IEEE754 encoding.
2378 Double-precision is expected when DP_P is TRUE, otherwise the
2379 representation should be in single-precision. */
2380 if (! parse_constant_immediate (&str, &val, reg_type))
2381 goto invalid_fp;
2382
2383 if (dp_p)
2384 {
2385 if (!can_convert_double_to_float (val, &fpword))
2386 goto invalid_fp;
2387 }
2388 else if ((uint64_t) val > 0xffffffff)
2389 goto invalid_fp;
2390 else
2391 fpword = val;
2392
2393 hex_p = TRUE;
2394 }
2395 else if (reg_name_p (str, reg_type))
2396 {
2397 set_recoverable_error (_("immediate operand required"));
2398 return FALSE;
2399 }
2400
2401 if (! hex_p)
2402 {
2403 int i;
2404
2405 if ((str = atof_ieee (str, 's', words)) == NULL)
2406 goto invalid_fp;
2407
2408 /* Our FP word must be 32 bits (single-precision FP). */
2409 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2410 {
2411 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2412 fpword |= words[i];
2413 }
2414 }
2415
2416 *immed = fpword;
2417 *ccp = str;
2418 return TRUE;
2419
2420 invalid_fp:
2421 set_fatal_syntax_error (_("invalid floating-point constant"));
2422 return FALSE;
2423 }
2424
2425 /* Less-generic immediate-value read function with the possibility of loading
2426 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2427 instructions.
2428
2429 To prevent the expression parser from pushing a register name into the
2430 symbol table as an undefined symbol, a check is firstly done to find
2431 out whether STR is a register of type REG_TYPE followed by a comma or
2432 the end of line. Return FALSE if STR is such a register. */
2433
2434 static bfd_boolean
2435 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2436 {
2437 char *ptr = *str;
2438
2439 if (reg_name_p (ptr, reg_type))
2440 {
2441 set_syntax_error (_("immediate operand required"));
2442 return FALSE;
2443 }
2444
2445 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2446
2447 if (inst.reloc.exp.X_op == O_constant)
2448 *imm = inst.reloc.exp.X_add_number;
2449
2450 *str = ptr;
2451
2452 return TRUE;
2453 }
2454
2455 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2456 if NEED_LIBOPCODES is non-zero, the fixup will need
2457 assistance from the libopcodes. */
2458
2459 static inline void
2460 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2461 const aarch64_opnd_info *operand,
2462 int need_libopcodes_p)
2463 {
2464 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2465 reloc->opnd = operand->type;
2466 if (need_libopcodes_p)
2467 reloc->need_libopcodes_p = 1;
2468 };
2469
2470 /* Return TRUE if the instruction needs to be fixed up later internally by
2471 the GAS; otherwise return FALSE. */
2472
2473 static inline bfd_boolean
2474 aarch64_gas_internal_fixup_p (void)
2475 {
2476 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2477 }
2478
2479 /* Assign the immediate value to the relevant field in *OPERAND if
2480 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2481 needs an internal fixup in a later stage.
2482 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2483 IMM.VALUE that may get assigned with the constant. */
2484 static inline void
2485 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2486 aarch64_opnd_info *operand,
2487 int addr_off_p,
2488 int need_libopcodes_p,
2489 int skip_p)
2490 {
2491 if (reloc->exp.X_op == O_constant)
2492 {
2493 if (addr_off_p)
2494 operand->addr.offset.imm = reloc->exp.X_add_number;
2495 else
2496 operand->imm.value = reloc->exp.X_add_number;
2497 reloc->type = BFD_RELOC_UNUSED;
2498 }
2499 else
2500 {
2501 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2502 /* Tell libopcodes to ignore this operand or not. This is helpful
2503 when one of the operands needs to be fixed up later but we need
2504 libopcodes to check the other operands. */
2505 operand->skip = skip_p;
2506 }
2507 }
2508
2509 /* Relocation modifiers. Each entry in the table contains the textual
2510 name for the relocation which may be placed before a symbol used as
2511 a load/store offset, or add immediate. It must be surrounded by a
2512 leading and trailing colon, for example:
2513
2514 ldr x0, [x1, #:rello:varsym]
2515 add x0, x1, #:rello:varsym */
2516
2517 struct reloc_table_entry
2518 {
2519 const char *name;
2520 int pc_rel;
2521 bfd_reloc_code_real_type adr_type;
2522 bfd_reloc_code_real_type adrp_type;
2523 bfd_reloc_code_real_type movw_type;
2524 bfd_reloc_code_real_type add_type;
2525 bfd_reloc_code_real_type ldst_type;
2526 bfd_reloc_code_real_type ld_literal_type;
2527 };
2528
2529 static struct reloc_table_entry reloc_table[] = {
2530 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2531 {"lo12", 0,
2532 0, /* adr_type */
2533 0,
2534 0,
2535 BFD_RELOC_AARCH64_ADD_LO12,
2536 BFD_RELOC_AARCH64_LDST_LO12,
2537 0},
2538
2539 /* Higher 21 bits of pc-relative page offset: ADRP */
2540 {"pg_hi21", 1,
2541 0, /* adr_type */
2542 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2543 0,
2544 0,
2545 0,
2546 0},
2547
2548 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2549 {"pg_hi21_nc", 1,
2550 0, /* adr_type */
2551 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2552 0,
2553 0,
2554 0,
2555 0},
2556
2557 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2558 {"abs_g0", 0,
2559 0, /* adr_type */
2560 0,
2561 BFD_RELOC_AARCH64_MOVW_G0,
2562 0,
2563 0,
2564 0},
2565
2566 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2567 {"abs_g0_s", 0,
2568 0, /* adr_type */
2569 0,
2570 BFD_RELOC_AARCH64_MOVW_G0_S,
2571 0,
2572 0,
2573 0},
2574
2575 /* Less significant bits 0-15 of address/value: MOVK, no check */
2576 {"abs_g0_nc", 0,
2577 0, /* adr_type */
2578 0,
2579 BFD_RELOC_AARCH64_MOVW_G0_NC,
2580 0,
2581 0,
2582 0},
2583
2584 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2585 {"abs_g1", 0,
2586 0, /* adr_type */
2587 0,
2588 BFD_RELOC_AARCH64_MOVW_G1,
2589 0,
2590 0,
2591 0},
2592
2593 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2594 {"abs_g1_s", 0,
2595 0, /* adr_type */
2596 0,
2597 BFD_RELOC_AARCH64_MOVW_G1_S,
2598 0,
2599 0,
2600 0},
2601
2602 /* Less significant bits 16-31 of address/value: MOVK, no check */
2603 {"abs_g1_nc", 0,
2604 0, /* adr_type */
2605 0,
2606 BFD_RELOC_AARCH64_MOVW_G1_NC,
2607 0,
2608 0,
2609 0},
2610
2611 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2612 {"abs_g2", 0,
2613 0, /* adr_type */
2614 0,
2615 BFD_RELOC_AARCH64_MOVW_G2,
2616 0,
2617 0,
2618 0},
2619
2620 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2621 {"abs_g2_s", 0,
2622 0, /* adr_type */
2623 0,
2624 BFD_RELOC_AARCH64_MOVW_G2_S,
2625 0,
2626 0,
2627 0},
2628
2629 /* Less significant bits 32-47 of address/value: MOVK, no check */
2630 {"abs_g2_nc", 0,
2631 0, /* adr_type */
2632 0,
2633 BFD_RELOC_AARCH64_MOVW_G2_NC,
2634 0,
2635 0,
2636 0},
2637
2638 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2639 {"abs_g3", 0,
2640 0, /* adr_type */
2641 0,
2642 BFD_RELOC_AARCH64_MOVW_G3,
2643 0,
2644 0,
2645 0},
2646
2647 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2648 {"prel_g0", 1,
2649 0, /* adr_type */
2650 0,
2651 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2652 0,
2653 0,
2654 0},
2655
2656 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2657 {"prel_g0_nc", 1,
2658 0, /* adr_type */
2659 0,
2660 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2661 0,
2662 0,
2663 0},
2664
2665 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2666 {"prel_g1", 1,
2667 0, /* adr_type */
2668 0,
2669 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2670 0,
2671 0,
2672 0},
2673
2674 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2675 {"prel_g1_nc", 1,
2676 0, /* adr_type */
2677 0,
2678 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2679 0,
2680 0,
2681 0},
2682
2683 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2684 {"prel_g2", 1,
2685 0, /* adr_type */
2686 0,
2687 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2688 0,
2689 0,
2690 0},
2691
2692 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2693 {"prel_g2_nc", 1,
2694 0, /* adr_type */
2695 0,
2696 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2697 0,
2698 0,
2699 0},
2700
2701 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2702 {"prel_g3", 1,
2703 0, /* adr_type */
2704 0,
2705 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2706 0,
2707 0,
2708 0},
2709
2710 /* Get to the page containing GOT entry for a symbol. */
2711 {"got", 1,
2712 0, /* adr_type */
2713 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2714 0,
2715 0,
2716 0,
2717 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2718
2719 /* 12 bit offset into the page containing GOT entry for that symbol. */
2720 {"got_lo12", 0,
2721 0, /* adr_type */
2722 0,
2723 0,
2724 0,
2725 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2726 0},
2727
2728 /* 0-15 bits of address/value: MOVk, no check. */
2729 {"gotoff_g0_nc", 0,
2730 0, /* adr_type */
2731 0,
2732 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2733 0,
2734 0,
2735 0},
2736
2737 /* Most significant bits 16-31 of address/value: MOVZ. */
2738 {"gotoff_g1", 0,
2739 0, /* adr_type */
2740 0,
2741 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2742 0,
2743 0,
2744 0},
2745
2746 /* 15 bit offset into the page containing GOT entry for that symbol. */
2747 {"gotoff_lo15", 0,
2748 0, /* adr_type */
2749 0,
2750 0,
2751 0,
2752 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2753 0},
2754
2755 /* Get to the page containing GOT TLS entry for a symbol */
2756 {"gottprel_g0_nc", 0,
2757 0, /* adr_type */
2758 0,
2759 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2760 0,
2761 0,
2762 0},
2763
2764 /* Get to the page containing GOT TLS entry for a symbol */
2765 {"gottprel_g1", 0,
2766 0, /* adr_type */
2767 0,
2768 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2769 0,
2770 0,
2771 0},
2772
2773 /* Get to the page containing GOT TLS entry for a symbol */
2774 {"tlsgd", 0,
2775 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2776 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2777 0,
2778 0,
2779 0,
2780 0},
2781
2782 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2783 {"tlsgd_lo12", 0,
2784 0, /* adr_type */
2785 0,
2786 0,
2787 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2788 0,
2789 0},
2790
2791 /* Lower 16 bits address/value: MOVk. */
2792 {"tlsgd_g0_nc", 0,
2793 0, /* adr_type */
2794 0,
2795 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2796 0,
2797 0,
2798 0},
2799
2800 /* Most significant bits 16-31 of address/value: MOVZ. */
2801 {"tlsgd_g1", 0,
2802 0, /* adr_type */
2803 0,
2804 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2805 0,
2806 0,
2807 0},
2808
2809 /* Get to the page containing GOT TLS entry for a symbol */
2810 {"tlsdesc", 0,
2811 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2812 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2813 0,
2814 0,
2815 0,
2816 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2817
2818 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2819 {"tlsdesc_lo12", 0,
2820 0, /* adr_type */
2821 0,
2822 0,
2823 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2824 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2825 0},
2826
2827 /* Get to the page containing GOT TLS entry for a symbol.
2828 The same as GD, we allocate two consecutive GOT slots
2829 for module index and module offset, the only difference
2830 with GD is the module offset should be initialized to
2831 zero without any outstanding runtime relocation. */
2832 {"tlsldm", 0,
2833 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2834 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2835 0,
2836 0,
2837 0,
2838 0},
2839
2840 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2841 {"tlsldm_lo12_nc", 0,
2842 0, /* adr_type */
2843 0,
2844 0,
2845 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2846 0,
2847 0},
2848
2849 /* 12 bit offset into the module TLS base address. */
2850 {"dtprel_lo12", 0,
2851 0, /* adr_type */
2852 0,
2853 0,
2854 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2855 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2856 0},
2857
2858 /* Same as dtprel_lo12, no overflow check. */
2859 {"dtprel_lo12_nc", 0,
2860 0, /* adr_type */
2861 0,
2862 0,
2863 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2864 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2865 0},
2866
2867 /* bits[23:12] of offset to the module TLS base address. */
2868 {"dtprel_hi12", 0,
2869 0, /* adr_type */
2870 0,
2871 0,
2872 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2873 0,
2874 0},
2875
2876 /* bits[15:0] of offset to the module TLS base address. */
2877 {"dtprel_g0", 0,
2878 0, /* adr_type */
2879 0,
2880 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2881 0,
2882 0,
2883 0},
2884
2885 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2886 {"dtprel_g0_nc", 0,
2887 0, /* adr_type */
2888 0,
2889 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2890 0,
2891 0,
2892 0},
2893
2894 /* bits[31:16] of offset to the module TLS base address. */
2895 {"dtprel_g1", 0,
2896 0, /* adr_type */
2897 0,
2898 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2899 0,
2900 0,
2901 0},
2902
2903 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2904 {"dtprel_g1_nc", 0,
2905 0, /* adr_type */
2906 0,
2907 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2908 0,
2909 0,
2910 0},
2911
2912 /* bits[47:32] of offset to the module TLS base address. */
2913 {"dtprel_g2", 0,
2914 0, /* adr_type */
2915 0,
2916 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2917 0,
2918 0,
2919 0},
2920
2921 /* Lower 16 bit offset into GOT entry for a symbol */
2922 {"tlsdesc_off_g0_nc", 0,
2923 0, /* adr_type */
2924 0,
2925 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2926 0,
2927 0,
2928 0},
2929
2930 /* Higher 16 bit offset into GOT entry for a symbol */
2931 {"tlsdesc_off_g1", 0,
2932 0, /* adr_type */
2933 0,
2934 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2935 0,
2936 0,
2937 0},
2938
2939 /* Get to the page containing GOT TLS entry for a symbol */
2940 {"gottprel", 0,
2941 0, /* adr_type */
2942 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2943 0,
2944 0,
2945 0,
2946 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2947
2948 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2949 {"gottprel_lo12", 0,
2950 0, /* adr_type */
2951 0,
2952 0,
2953 0,
2954 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2955 0},
2956
2957 /* Get tp offset for a symbol. */
2958 {"tprel", 0,
2959 0, /* adr_type */
2960 0,
2961 0,
2962 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2963 0,
2964 0},
2965
2966 /* Get tp offset for a symbol. */
2967 {"tprel_lo12", 0,
2968 0, /* adr_type */
2969 0,
2970 0,
2971 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2972 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2973 0},
2974
2975 /* Get tp offset for a symbol. */
2976 {"tprel_hi12", 0,
2977 0, /* adr_type */
2978 0,
2979 0,
2980 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2981 0,
2982 0},
2983
2984 /* Get tp offset for a symbol. */
2985 {"tprel_lo12_nc", 0,
2986 0, /* adr_type */
2987 0,
2988 0,
2989 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2990 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2991 0},
2992
2993 /* Most significant bits 32-47 of address/value: MOVZ. */
2994 {"tprel_g2", 0,
2995 0, /* adr_type */
2996 0,
2997 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2998 0,
2999 0,
3000 0},
3001
3002 /* Most significant bits 16-31 of address/value: MOVZ. */
3003 {"tprel_g1", 0,
3004 0, /* adr_type */
3005 0,
3006 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3007 0,
3008 0,
3009 0},
3010
3011 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3012 {"tprel_g1_nc", 0,
3013 0, /* adr_type */
3014 0,
3015 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3016 0,
3017 0,
3018 0},
3019
3020 /* Most significant bits 0-15 of address/value: MOVZ. */
3021 {"tprel_g0", 0,
3022 0, /* adr_type */
3023 0,
3024 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3025 0,
3026 0,
3027 0},
3028
3029 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3030 {"tprel_g0_nc", 0,
3031 0, /* adr_type */
3032 0,
3033 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3034 0,
3035 0,
3036 0},
3037
3038 /* 15bit offset from got entry to base address of GOT table. */
3039 {"gotpage_lo15", 0,
3040 0,
3041 0,
3042 0,
3043 0,
3044 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3045 0},
3046
3047 /* 14bit offset from got entry to base address of GOT table. */
3048 {"gotpage_lo14", 0,
3049 0,
3050 0,
3051 0,
3052 0,
3053 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3054 0},
3055 };
3056
3057 /* Given the address of a pointer pointing to the textual name of a
3058 relocation as may appear in assembler source, attempt to find its
3059 details in reloc_table. The pointer will be updated to the character
3060 after the trailing colon. On failure, NULL will be returned;
3061 otherwise return the reloc_table_entry. */
3062
3063 static struct reloc_table_entry *
3064 find_reloc_table_entry (char **str)
3065 {
3066 unsigned int i;
3067 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3068 {
3069 int length = strlen (reloc_table[i].name);
3070
3071 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3072 && (*str)[length] == ':')
3073 {
3074 *str += (length + 1);
3075 return &reloc_table[i];
3076 }
3077 }
3078
3079 return NULL;
3080 }
3081
3082 /* Mode argument to parse_shift and parser_shifter_operand. */
3083 enum parse_shift_mode
3084 {
3085 SHIFTED_NONE, /* no shifter allowed */
3086 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3087 "#imm{,lsl #n}" */
3088 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3089 "#imm" */
3090 SHIFTED_LSL, /* bare "lsl #n" */
3091 SHIFTED_MUL, /* bare "mul #n" */
3092 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3093 SHIFTED_MUL_VL, /* "mul vl" */
3094 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3095 };
3096
3097 /* Parse a <shift> operator on an AArch64 data processing instruction.
3098 Return TRUE on success; otherwise return FALSE. */
3099 static bfd_boolean
3100 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3101 {
3102 const struct aarch64_name_value_pair *shift_op;
3103 enum aarch64_modifier_kind kind;
3104 expressionS exp;
3105 int exp_has_prefix;
3106 char *s = *str;
3107 char *p = s;
3108
3109 for (p = *str; ISALPHA (*p); p++)
3110 ;
3111
3112 if (p == *str)
3113 {
3114 set_syntax_error (_("shift expression expected"));
3115 return FALSE;
3116 }
3117
3118 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3119
3120 if (shift_op == NULL)
3121 {
3122 set_syntax_error (_("shift operator expected"));
3123 return FALSE;
3124 }
3125
3126 kind = aarch64_get_operand_modifier (shift_op);
3127
3128 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3129 {
3130 set_syntax_error (_("invalid use of 'MSL'"));
3131 return FALSE;
3132 }
3133
3134 if (kind == AARCH64_MOD_MUL
3135 && mode != SHIFTED_MUL
3136 && mode != SHIFTED_MUL_VL)
3137 {
3138 set_syntax_error (_("invalid use of 'MUL'"));
3139 return FALSE;
3140 }
3141
3142 switch (mode)
3143 {
3144 case SHIFTED_LOGIC_IMM:
3145 if (aarch64_extend_operator_p (kind))
3146 {
3147 set_syntax_error (_("extending shift is not permitted"));
3148 return FALSE;
3149 }
3150 break;
3151
3152 case SHIFTED_ARITH_IMM:
3153 if (kind == AARCH64_MOD_ROR)
3154 {
3155 set_syntax_error (_("'ROR' shift is not permitted"));
3156 return FALSE;
3157 }
3158 break;
3159
3160 case SHIFTED_LSL:
3161 if (kind != AARCH64_MOD_LSL)
3162 {
3163 set_syntax_error (_("only 'LSL' shift is permitted"));
3164 return FALSE;
3165 }
3166 break;
3167
3168 case SHIFTED_MUL:
3169 if (kind != AARCH64_MOD_MUL)
3170 {
3171 set_syntax_error (_("only 'MUL' is permitted"));
3172 return FALSE;
3173 }
3174 break;
3175
3176 case SHIFTED_MUL_VL:
3177 /* "MUL VL" consists of two separate tokens. Require the first
3178 token to be "MUL" and look for a following "VL". */
3179 if (kind == AARCH64_MOD_MUL)
3180 {
3181 skip_whitespace (p);
3182 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3183 {
3184 p += 2;
3185 kind = AARCH64_MOD_MUL_VL;
3186 break;
3187 }
3188 }
3189 set_syntax_error (_("only 'MUL VL' is permitted"));
3190 return FALSE;
3191
3192 case SHIFTED_REG_OFFSET:
3193 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3194 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3195 {
3196 set_fatal_syntax_error
3197 (_("invalid shift for the register offset addressing mode"));
3198 return FALSE;
3199 }
3200 break;
3201
3202 case SHIFTED_LSL_MSL:
3203 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3204 {
3205 set_syntax_error (_("invalid shift operator"));
3206 return FALSE;
3207 }
3208 break;
3209
3210 default:
3211 abort ();
3212 }
3213
3214 /* Whitespace can appear here if the next thing is a bare digit. */
3215 skip_whitespace (p);
3216
3217 /* Parse shift amount. */
3218 exp_has_prefix = 0;
3219 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3220 exp.X_op = O_absent;
3221 else
3222 {
3223 if (is_immediate_prefix (*p))
3224 {
3225 p++;
3226 exp_has_prefix = 1;
3227 }
3228 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3229 }
3230 if (kind == AARCH64_MOD_MUL_VL)
3231 /* For consistency, give MUL VL the same shift amount as an implicit
3232 MUL #1. */
3233 operand->shifter.amount = 1;
3234 else if (exp.X_op == O_absent)
3235 {
3236 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3237 {
3238 set_syntax_error (_("missing shift amount"));
3239 return FALSE;
3240 }
3241 operand->shifter.amount = 0;
3242 }
3243 else if (exp.X_op != O_constant)
3244 {
3245 set_syntax_error (_("constant shift amount required"));
3246 return FALSE;
3247 }
3248 /* For parsing purposes, MUL #n has no inherent range. The range
3249 depends on the operand and will be checked by operand-specific
3250 routines. */
3251 else if (kind != AARCH64_MOD_MUL
3252 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3253 {
3254 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3255 return FALSE;
3256 }
3257 else
3258 {
3259 operand->shifter.amount = exp.X_add_number;
3260 operand->shifter.amount_present = 1;
3261 }
3262
3263 operand->shifter.operator_present = 1;
3264 operand->shifter.kind = kind;
3265
3266 *str = p;
3267 return TRUE;
3268 }
3269
3270 /* Parse a <shifter_operand> for a data processing instruction:
3271
3272 #<immediate>
3273 #<immediate>, LSL #imm
3274
3275 Validation of immediate operands is deferred to md_apply_fix.
3276
3277 Return TRUE on success; otherwise return FALSE. */
3278
3279 static bfd_boolean
3280 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3281 enum parse_shift_mode mode)
3282 {
3283 char *p;
3284
3285 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3286 return FALSE;
3287
3288 p = *str;
3289
3290 /* Accept an immediate expression. */
3291 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3292 return FALSE;
3293
3294 /* Accept optional LSL for arithmetic immediate values. */
3295 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3296 if (! parse_shift (&p, operand, SHIFTED_LSL))
3297 return FALSE;
3298
3299 /* Not accept any shifter for logical immediate values. */
3300 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3301 && parse_shift (&p, operand, mode))
3302 {
3303 set_syntax_error (_("unexpected shift operator"));
3304 return FALSE;
3305 }
3306
3307 *str = p;
3308 return TRUE;
3309 }
3310
3311 /* Parse a <shifter_operand> for a data processing instruction:
3312
3313 <Rm>
3314 <Rm>, <shift>
3315 #<immediate>
3316 #<immediate>, LSL #imm
3317
3318 where <shift> is handled by parse_shift above, and the last two
3319 cases are handled by the function above.
3320
3321 Validation of immediate operands is deferred to md_apply_fix.
3322
3323 Return TRUE on success; otherwise return FALSE. */
3324
3325 static bfd_boolean
3326 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3327 enum parse_shift_mode mode)
3328 {
3329 const reg_entry *reg;
3330 aarch64_opnd_qualifier_t qualifier;
3331 enum aarch64_operand_class opd_class
3332 = aarch64_get_operand_class (operand->type);
3333
3334 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3335 if (reg)
3336 {
3337 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3338 {
3339 set_syntax_error (_("unexpected register in the immediate operand"));
3340 return FALSE;
3341 }
3342
3343 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3344 {
3345 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3346 return FALSE;
3347 }
3348
3349 operand->reg.regno = reg->number;
3350 operand->qualifier = qualifier;
3351
3352 /* Accept optional shift operation on register. */
3353 if (! skip_past_comma (str))
3354 return TRUE;
3355
3356 if (! parse_shift (str, operand, mode))
3357 return FALSE;
3358
3359 return TRUE;
3360 }
3361 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3362 {
3363 set_syntax_error
3364 (_("integer register expected in the extended/shifted operand "
3365 "register"));
3366 return FALSE;
3367 }
3368
3369 /* We have a shifted immediate variable. */
3370 return parse_shifter_operand_imm (str, operand, mode);
3371 }
3372
3373 /* Return TRUE on success; return FALSE otherwise. */
3374
3375 static bfd_boolean
3376 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3377 enum parse_shift_mode mode)
3378 {
3379 char *p = *str;
3380
3381 /* Determine if we have the sequence of characters #: or just :
3382 coming next. If we do, then we check for a :rello: relocation
3383 modifier. If we don't, punt the whole lot to
3384 parse_shifter_operand. */
3385
3386 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3387 {
3388 struct reloc_table_entry *entry;
3389
3390 if (p[0] == '#')
3391 p += 2;
3392 else
3393 p++;
3394 *str = p;
3395
3396 /* Try to parse a relocation. Anything else is an error. */
3397 if (!(entry = find_reloc_table_entry (str)))
3398 {
3399 set_syntax_error (_("unknown relocation modifier"));
3400 return FALSE;
3401 }
3402
3403 if (entry->add_type == 0)
3404 {
3405 set_syntax_error
3406 (_("this relocation modifier is not allowed on this instruction"));
3407 return FALSE;
3408 }
3409
3410 /* Save str before we decompose it. */
3411 p = *str;
3412
3413 /* Next, we parse the expression. */
3414 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3415 return FALSE;
3416
3417 /* Record the relocation type (use the ADD variant here). */
3418 inst.reloc.type = entry->add_type;
3419 inst.reloc.pc_rel = entry->pc_rel;
3420
3421 /* If str is empty, we've reached the end, stop here. */
3422 if (**str == '\0')
3423 return TRUE;
3424
3425 /* Otherwise, we have a shifted reloc modifier, so rewind to
3426 recover the variable name and continue parsing for the shifter. */
3427 *str = p;
3428 return parse_shifter_operand_imm (str, operand, mode);
3429 }
3430
3431 return parse_shifter_operand (str, operand, mode);
3432 }
3433
3434 /* Parse all forms of an address expression. Information is written
3435 to *OPERAND and/or inst.reloc.
3436
3437 The A64 instruction set has the following addressing modes:
3438
3439 Offset
3440 [base] // in SIMD ld/st structure
3441 [base{,#0}] // in ld/st exclusive
3442 [base{,#imm}]
3443 [base,Xm{,LSL #imm}]
3444 [base,Xm,SXTX {#imm}]
3445 [base,Wm,(S|U)XTW {#imm}]
3446 Pre-indexed
3447 [base]! // in ldraa/ldrab exclusive
3448 [base,#imm]!
3449 Post-indexed
3450 [base],#imm
3451 [base],Xm // in SIMD ld/st structure
3452 PC-relative (literal)
3453 label
3454 SVE:
3455 [base,#imm,MUL VL]
3456 [base,Zm.D{,LSL #imm}]
3457 [base,Zm.S,(S|U)XTW {#imm}]
3458 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3459 [Zn.S,#imm]
3460 [Zn.D,#imm]
3461 [Zn.S{, Xm}]
3462 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3463 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3464 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3465
3466 (As a convenience, the notation "=immediate" is permitted in conjunction
3467 with the pc-relative literal load instructions to automatically place an
3468 immediate value or symbolic address in a nearby literal pool and generate
3469 a hidden label which references it.)
3470
3471 Upon a successful parsing, the address structure in *OPERAND will be
3472 filled in the following way:
3473
3474 .base_regno = <base>
3475 .offset.is_reg // 1 if the offset is a register
3476 .offset.imm = <imm>
3477 .offset.regno = <Rm>
3478
3479 For different addressing modes defined in the A64 ISA:
3480
3481 Offset
3482 .pcrel=0; .preind=1; .postind=0; .writeback=0
3483 Pre-indexed
3484 .pcrel=0; .preind=1; .postind=0; .writeback=1
3485 Post-indexed
3486 .pcrel=0; .preind=0; .postind=1; .writeback=1
3487 PC-relative (literal)
3488 .pcrel=1; .preind=1; .postind=0; .writeback=0
3489
3490 The shift/extension information, if any, will be stored in .shifter.
3491 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3492 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3493 corresponding register.
3494
3495 BASE_TYPE says which types of base register should be accepted and
3496 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3497 is the type of shifter that is allowed for immediate offsets,
3498 or SHIFTED_NONE if none.
3499
3500 In all other respects, it is the caller's responsibility to check
3501 for addressing modes not supported by the instruction, and to set
3502 inst.reloc.type. */
3503
3504 static bfd_boolean
3505 parse_address_main (char **str, aarch64_opnd_info *operand,
3506 aarch64_opnd_qualifier_t *base_qualifier,
3507 aarch64_opnd_qualifier_t *offset_qualifier,
3508 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3509 enum parse_shift_mode imm_shift_mode)
3510 {
3511 char *p = *str;
3512 const reg_entry *reg;
3513 expressionS *exp = &inst.reloc.exp;
3514
3515 *base_qualifier = AARCH64_OPND_QLF_NIL;
3516 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3517 if (! skip_past_char (&p, '['))
3518 {
3519 /* =immediate or label. */
3520 operand->addr.pcrel = 1;
3521 operand->addr.preind = 1;
3522
3523 /* #:<reloc_op>:<symbol> */
3524 skip_past_char (&p, '#');
3525 if (skip_past_char (&p, ':'))
3526 {
3527 bfd_reloc_code_real_type ty;
3528 struct reloc_table_entry *entry;
3529
3530 /* Try to parse a relocation modifier. Anything else is
3531 an error. */
3532 entry = find_reloc_table_entry (&p);
3533 if (! entry)
3534 {
3535 set_syntax_error (_("unknown relocation modifier"));
3536 return FALSE;
3537 }
3538
3539 switch (operand->type)
3540 {
3541 case AARCH64_OPND_ADDR_PCREL21:
3542 /* adr */
3543 ty = entry->adr_type;
3544 break;
3545
3546 default:
3547 ty = entry->ld_literal_type;
3548 break;
3549 }
3550
3551 if (ty == 0)
3552 {
3553 set_syntax_error
3554 (_("this relocation modifier is not allowed on this "
3555 "instruction"));
3556 return FALSE;
3557 }
3558
3559 /* #:<reloc_op>: */
3560 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3561 {
3562 set_syntax_error (_("invalid relocation expression"));
3563 return FALSE;
3564 }
3565
3566 /* #:<reloc_op>:<expr> */
3567 /* Record the relocation type. */
3568 inst.reloc.type = ty;
3569 inst.reloc.pc_rel = entry->pc_rel;
3570 }
3571 else
3572 {
3573
3574 if (skip_past_char (&p, '='))
3575 /* =immediate; need to generate the literal in the literal pool. */
3576 inst.gen_lit_pool = 1;
3577
3578 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3579 {
3580 set_syntax_error (_("invalid address"));
3581 return FALSE;
3582 }
3583 }
3584
3585 *str = p;
3586 return TRUE;
3587 }
3588
3589 /* [ */
3590
3591 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3592 if (!reg || !aarch64_check_reg_type (reg, base_type))
3593 {
3594 set_syntax_error (_(get_reg_expected_msg (base_type)));
3595 return FALSE;
3596 }
3597 operand->addr.base_regno = reg->number;
3598
3599 /* [Xn */
3600 if (skip_past_comma (&p))
3601 {
3602 /* [Xn, */
3603 operand->addr.preind = 1;
3604
3605 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3606 if (reg)
3607 {
3608 if (!aarch64_check_reg_type (reg, offset_type))
3609 {
3610 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3611 return FALSE;
3612 }
3613
3614 /* [Xn,Rm */
3615 operand->addr.offset.regno = reg->number;
3616 operand->addr.offset.is_reg = 1;
3617 /* Shifted index. */
3618 if (skip_past_comma (&p))
3619 {
3620 /* [Xn,Rm, */
3621 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3622 /* Use the diagnostics set in parse_shift, so not set new
3623 error message here. */
3624 return FALSE;
3625 }
3626 /* We only accept:
3627 [base,Xm] # For vector plus scalar SVE2 indexing.
3628 [base,Xm{,LSL #imm}]
3629 [base,Xm,SXTX {#imm}]
3630 [base,Wm,(S|U)XTW {#imm}] */
3631 if (operand->shifter.kind == AARCH64_MOD_NONE
3632 || operand->shifter.kind == AARCH64_MOD_LSL
3633 || operand->shifter.kind == AARCH64_MOD_SXTX)
3634 {
3635 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3636 {
3637 set_syntax_error (_("invalid use of 32-bit register offset"));
3638 return FALSE;
3639 }
3640 if (aarch64_get_qualifier_esize (*base_qualifier)
3641 != aarch64_get_qualifier_esize (*offset_qualifier)
3642 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3643 || *base_qualifier != AARCH64_OPND_QLF_S_S
3644 || *offset_qualifier != AARCH64_OPND_QLF_X))
3645 {
3646 set_syntax_error (_("offset has different size from base"));
3647 return FALSE;
3648 }
3649 }
3650 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3651 {
3652 set_syntax_error (_("invalid use of 64-bit register offset"));
3653 return FALSE;
3654 }
3655 }
3656 else
3657 {
3658 /* [Xn,#:<reloc_op>:<symbol> */
3659 skip_past_char (&p, '#');
3660 if (skip_past_char (&p, ':'))
3661 {
3662 struct reloc_table_entry *entry;
3663
3664 /* Try to parse a relocation modifier. Anything else is
3665 an error. */
3666 if (!(entry = find_reloc_table_entry (&p)))
3667 {
3668 set_syntax_error (_("unknown relocation modifier"));
3669 return FALSE;
3670 }
3671
3672 if (entry->ldst_type == 0)
3673 {
3674 set_syntax_error
3675 (_("this relocation modifier is not allowed on this "
3676 "instruction"));
3677 return FALSE;
3678 }
3679
3680 /* [Xn,#:<reloc_op>: */
3681 /* We now have the group relocation table entry corresponding to
3682 the name in the assembler source. Next, we parse the
3683 expression. */
3684 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3685 {
3686 set_syntax_error (_("invalid relocation expression"));
3687 return FALSE;
3688 }
3689
3690 /* [Xn,#:<reloc_op>:<expr> */
3691 /* Record the load/store relocation type. */
3692 inst.reloc.type = entry->ldst_type;
3693 inst.reloc.pc_rel = entry->pc_rel;
3694 }
3695 else
3696 {
3697 if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3698 {
3699 set_syntax_error (_("invalid expression in the address"));
3700 return FALSE;
3701 }
3702 /* [Xn,<expr> */
3703 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3704 /* [Xn,<expr>,<shifter> */
3705 if (! parse_shift (&p, operand, imm_shift_mode))
3706 return FALSE;
3707 }
3708 }
3709 }
3710
3711 if (! skip_past_char (&p, ']'))
3712 {
3713 set_syntax_error (_("']' expected"));
3714 return FALSE;
3715 }
3716
3717 if (skip_past_char (&p, '!'))
3718 {
3719 if (operand->addr.preind && operand->addr.offset.is_reg)
3720 {
3721 set_syntax_error (_("register offset not allowed in pre-indexed "
3722 "addressing mode"));
3723 return FALSE;
3724 }
3725 /* [Xn]! */
3726 operand->addr.writeback = 1;
3727 }
3728 else if (skip_past_comma (&p))
3729 {
3730 /* [Xn], */
3731 operand->addr.postind = 1;
3732 operand->addr.writeback = 1;
3733
3734 if (operand->addr.preind)
3735 {
3736 set_syntax_error (_("cannot combine pre- and post-indexing"));
3737 return FALSE;
3738 }
3739
3740 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3741 if (reg)
3742 {
3743 /* [Xn],Xm */
3744 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3745 {
3746 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3747 return FALSE;
3748 }
3749
3750 operand->addr.offset.regno = reg->number;
3751 operand->addr.offset.is_reg = 1;
3752 }
3753 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3754 {
3755 /* [Xn],#expr */
3756 set_syntax_error (_("invalid expression in the address"));
3757 return FALSE;
3758 }
3759 }
3760
3761 /* If at this point neither .preind nor .postind is set, we have a
3762 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3763 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3764 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3765 [Zn.<T>, xzr]. */
3766 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3767 {
3768 if (operand->addr.writeback)
3769 {
3770 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3771 {
3772 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3773 operand->addr.offset.is_reg = 0;
3774 operand->addr.offset.imm = 0;
3775 operand->addr.preind = 1;
3776 }
3777 else
3778 {
3779 /* Reject [Rn]! */
3780 set_syntax_error (_("missing offset in the pre-indexed address"));
3781 return FALSE;
3782 }
3783 }
3784 else
3785 {
3786 operand->addr.preind = 1;
3787 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3788 {
3789 operand->addr.offset.is_reg = 1;
3790 operand->addr.offset.regno = REG_ZR;
3791 *offset_qualifier = AARCH64_OPND_QLF_X;
3792 }
3793 else
3794 {
3795 inst.reloc.exp.X_op = O_constant;
3796 inst.reloc.exp.X_add_number = 0;
3797 }
3798 }
3799 }
3800
3801 *str = p;
3802 return TRUE;
3803 }
3804
3805 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3806 on success. */
3807 static bfd_boolean
3808 parse_address (char **str, aarch64_opnd_info *operand)
3809 {
3810 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3811 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3812 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3813 }
3814
3815 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3816 The arguments have the same meaning as for parse_address_main.
3817 Return TRUE on success. */
3818 static bfd_boolean
3819 parse_sve_address (char **str, aarch64_opnd_info *operand,
3820 aarch64_opnd_qualifier_t *base_qualifier,
3821 aarch64_opnd_qualifier_t *offset_qualifier)
3822 {
3823 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3824 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3825 SHIFTED_MUL_VL);
3826 }
3827
3828 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3829 Return TRUE on success; otherwise return FALSE. */
3830 static bfd_boolean
3831 parse_half (char **str, int *internal_fixup_p)
3832 {
3833 char *p = *str;
3834
3835 skip_past_char (&p, '#');
3836
3837 gas_assert (internal_fixup_p);
3838 *internal_fixup_p = 0;
3839
3840 if (*p == ':')
3841 {
3842 struct reloc_table_entry *entry;
3843
3844 /* Try to parse a relocation. Anything else is an error. */
3845 ++p;
3846 if (!(entry = find_reloc_table_entry (&p)))
3847 {
3848 set_syntax_error (_("unknown relocation modifier"));
3849 return FALSE;
3850 }
3851
3852 if (entry->movw_type == 0)
3853 {
3854 set_syntax_error
3855 (_("this relocation modifier is not allowed on this instruction"));
3856 return FALSE;
3857 }
3858
3859 inst.reloc.type = entry->movw_type;
3860 }
3861 else
3862 *internal_fixup_p = 1;
3863
3864 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3865 return FALSE;
3866
3867 *str = p;
3868 return TRUE;
3869 }
3870
3871 /* Parse an operand for an ADRP instruction:
3872 ADRP <Xd>, <label>
3873 Return TRUE on success; otherwise return FALSE. */
3874
3875 static bfd_boolean
3876 parse_adrp (char **str)
3877 {
3878 char *p;
3879
3880 p = *str;
3881 if (*p == ':')
3882 {
3883 struct reloc_table_entry *entry;
3884
3885 /* Try to parse a relocation. Anything else is an error. */
3886 ++p;
3887 if (!(entry = find_reloc_table_entry (&p)))
3888 {
3889 set_syntax_error (_("unknown relocation modifier"));
3890 return FALSE;
3891 }
3892
3893 if (entry->adrp_type == 0)
3894 {
3895 set_syntax_error
3896 (_("this relocation modifier is not allowed on this instruction"));
3897 return FALSE;
3898 }
3899
3900 inst.reloc.type = entry->adrp_type;
3901 }
3902 else
3903 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3904
3905 inst.reloc.pc_rel = 1;
3906
3907 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3908 return FALSE;
3909
3910 *str = p;
3911 return TRUE;
3912 }
3913
3914 /* Miscellaneous. */
3915
3916 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
3917 of SIZE tokens in which index I gives the token for field value I,
3918 or is null if field value I is invalid. REG_TYPE says which register
3919 names should be treated as registers rather than as symbolic immediates.
3920
3921 Return true on success, moving *STR past the operand and storing the
3922 field value in *VAL. */
3923
3924 static int
3925 parse_enum_string (char **str, int64_t *val, const char *const *array,
3926 size_t size, aarch64_reg_type reg_type)
3927 {
3928 expressionS exp;
3929 char *p, *q;
3930 size_t i;
3931
3932 /* Match C-like tokens. */
3933 p = q = *str;
3934 while (ISALNUM (*q))
3935 q++;
3936
3937 for (i = 0; i < size; ++i)
3938 if (array[i]
3939 && strncasecmp (array[i], p, q - p) == 0
3940 && array[i][q - p] == 0)
3941 {
3942 *val = i;
3943 *str = q;
3944 return TRUE;
3945 }
3946
3947 if (!parse_immediate_expression (&p, &exp, reg_type))
3948 return FALSE;
3949
3950 if (exp.X_op == O_constant
3951 && (uint64_t) exp.X_add_number < size)
3952 {
3953 *val = exp.X_add_number;
3954 *str = p;
3955 return TRUE;
3956 }
3957
3958 /* Use the default error for this operand. */
3959 return FALSE;
3960 }
3961
3962 /* Parse an option for a preload instruction. Returns the encoding for the
3963 option, or PARSE_FAIL. */
3964
3965 static int
3966 parse_pldop (char **str)
3967 {
3968 char *p, *q;
3969 const struct aarch64_name_value_pair *o;
3970
3971 p = q = *str;
3972 while (ISALNUM (*q))
3973 q++;
3974
3975 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
3976 if (!o)
3977 return PARSE_FAIL;
3978
3979 *str = q;
3980 return o->value;
3981 }
3982
3983 /* Parse an option for a barrier instruction. Returns the encoding for the
3984 option, or PARSE_FAIL. */
3985
3986 static int
3987 parse_barrier (char **str)
3988 {
3989 char *p, *q;
3990 const struct aarch64_name_value_pair *o;
3991
3992 p = q = *str;
3993 while (ISALPHA (*q))
3994 q++;
3995
3996 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3997 if (!o)
3998 return PARSE_FAIL;
3999
4000 *str = q;
4001 return o->value;
4002 }
4003
4004 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4005 return 0 if successful. Otherwise return PARSE_FAIL. */
4006
4007 static int
4008 parse_barrier_psb (char **str,
4009 const struct aarch64_name_value_pair ** hint_opt)
4010 {
4011 char *p, *q;
4012 const struct aarch64_name_value_pair *o;
4013
4014 p = q = *str;
4015 while (ISALPHA (*q))
4016 q++;
4017
4018 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4019 if (!o)
4020 {
4021 set_fatal_syntax_error
4022 ( _("unknown or missing option to PSB/TSB"));
4023 return PARSE_FAIL;
4024 }
4025
4026 if (o->value != 0x11)
4027 {
4028 /* PSB only accepts option name 'CSYNC'. */
4029 set_syntax_error
4030 (_("the specified option is not accepted for PSB/TSB"));
4031 return PARSE_FAIL;
4032 }
4033
4034 *str = q;
4035 *hint_opt = o;
4036 return 0;
4037 }
4038
4039 /* Parse an operand for CSR (CSRE instruction). */
4040
4041 static int
4042 parse_csr_operand (char **str)
4043 {
4044 char *p, *q;
4045
4046 p = q = *str;
4047 while (ISALPHA (*q))
4048 q++;
4049
4050 /* Instruction has only one operand PDEC which encodes Rt field of the
4051 operation to 0b11111. */
4052 if (strcasecmp(p, "pdec"))
4053 {
4054 set_syntax_error (_("CSR instruction accepts only PDEC"));
4055 return PARSE_FAIL;
4056 }
4057
4058 *str = q;
4059 return 0;
4060 }
4061
4062 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4063 return 0 if successful. Otherwise return PARSE_FAIL. */
4064
4065 static int
4066 parse_bti_operand (char **str,
4067 const struct aarch64_name_value_pair ** hint_opt)
4068 {
4069 char *p, *q;
4070 const struct aarch64_name_value_pair *o;
4071
4072 p = q = *str;
4073 while (ISALPHA (*q))
4074 q++;
4075
4076 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4077 if (!o)
4078 {
4079 set_fatal_syntax_error
4080 ( _("unknown option to BTI"));
4081 return PARSE_FAIL;
4082 }
4083
4084 switch (o->value)
4085 {
4086 /* Valid BTI operands. */
4087 case HINT_OPD_C:
4088 case HINT_OPD_J:
4089 case HINT_OPD_JC:
4090 break;
4091
4092 default:
4093 set_syntax_error
4094 (_("unknown option to BTI"));
4095 return PARSE_FAIL;
4096 }
4097
4098 *str = q;
4099 *hint_opt = o;
4100 return 0;
4101 }
4102
4103 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4104 Returns the encoding for the option, or PARSE_FAIL.
4105
4106 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4107 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4108
4109 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4110 field, otherwise as a system register.
4111 */
4112
4113 static int
4114 parse_sys_reg (char **str, htab_t sys_regs,
4115 int imple_defined_p, int pstatefield_p,
4116 uint32_t* flags)
4117 {
4118 char *p, *q;
4119 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4120 const aarch64_sys_reg *o;
4121 int value;
4122
4123 p = buf;
4124 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4125 if (p < buf + (sizeof (buf) - 1))
4126 *p++ = TOLOWER (*q);
4127 *p = '\0';
4128
4129 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4130 valid system register. This is enforced by construction of the hash
4131 table. */
4132 if (p - buf != q - *str)
4133 return PARSE_FAIL;
4134
4135 o = str_hash_find (sys_regs, buf);
4136 if (!o)
4137 {
4138 if (!imple_defined_p)
4139 return PARSE_FAIL;
4140 else
4141 {
4142 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4143 unsigned int op0, op1, cn, cm, op2;
4144
4145 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4146 != 5)
4147 return PARSE_FAIL;
4148 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4149 return PARSE_FAIL;
4150 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4151 if (flags)
4152 *flags = 0;
4153 }
4154 }
4155 else
4156 {
4157 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4158 as_bad (_("selected processor does not support PSTATE field "
4159 "name '%s'"), buf);
4160 if (!pstatefield_p
4161 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4162 o->value, o->flags, o->features))
4163 as_bad (_("selected processor does not support system register "
4164 "name '%s'"), buf);
4165 if (aarch64_sys_reg_deprecated_p (o->flags))
4166 as_warn (_("system register name '%s' is deprecated and may be "
4167 "removed in a future release"), buf);
4168 value = o->value;
4169 if (flags)
4170 *flags = o->flags;
4171 }
4172
4173 *str = q;
4174 return value;
4175 }
4176
4177 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4178 for the option, or NULL. */
4179
4180 static const aarch64_sys_ins_reg *
4181 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4182 {
4183 char *p, *q;
4184 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4185 const aarch64_sys_ins_reg *o;
4186
4187 p = buf;
4188 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4189 if (p < buf + (sizeof (buf) - 1))
4190 *p++ = TOLOWER (*q);
4191 *p = '\0';
4192
4193 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4194 valid system register. This is enforced by construction of the hash
4195 table. */
4196 if (p - buf != q - *str)
4197 return NULL;
4198
4199 o = str_hash_find (sys_ins_regs, buf);
4200 if (!o)
4201 return NULL;
4202
4203 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4204 o->name, o->value, o->flags, 0))
4205 as_bad (_("selected processor does not support system register "
4206 "name '%s'"), buf);
4207 if (aarch64_sys_reg_deprecated_p (o->flags))
4208 as_warn (_("system register name '%s' is deprecated and may be "
4209 "removed in a future release"), buf);
4210
4211 *str = q;
4212 return o;
4213 }
4214 \f
4215 #define po_char_or_fail(chr) do { \
4216 if (! skip_past_char (&str, chr)) \
4217 goto failure; \
4218 } while (0)
4219
4220 #define po_reg_or_fail(regtype) do { \
4221 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4222 if (val == PARSE_FAIL) \
4223 { \
4224 set_default_error (); \
4225 goto failure; \
4226 } \
4227 } while (0)
4228
4229 #define po_int_reg_or_fail(reg_type) do { \
4230 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4231 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4232 { \
4233 set_default_error (); \
4234 goto failure; \
4235 } \
4236 info->reg.regno = reg->number; \
4237 info->qualifier = qualifier; \
4238 } while (0)
4239
4240 #define po_imm_nc_or_fail() do { \
4241 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4242 goto failure; \
4243 } while (0)
4244
4245 #define po_imm_or_fail(min, max) do { \
4246 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4247 goto failure; \
4248 if (val < min || val > max) \
4249 { \
4250 set_fatal_syntax_error (_("immediate value out of range "\
4251 #min " to "#max)); \
4252 goto failure; \
4253 } \
4254 } while (0)
4255
4256 #define po_enum_or_fail(array) do { \
4257 if (!parse_enum_string (&str, &val, array, \
4258 ARRAY_SIZE (array), imm_reg_type)) \
4259 goto failure; \
4260 } while (0)
4261
4262 #define po_misc_or_fail(expr) do { \
4263 if (!expr) \
4264 goto failure; \
4265 } while (0)
4266 \f
4267 /* encode the 12-bit imm field of Add/sub immediate */
4268 static inline uint32_t
4269 encode_addsub_imm (uint32_t imm)
4270 {
4271 return imm << 10;
4272 }
4273
4274 /* encode the shift amount field of Add/sub immediate */
4275 static inline uint32_t
4276 encode_addsub_imm_shift_amount (uint32_t cnt)
4277 {
4278 return cnt << 22;
4279 }
4280
4281
4282 /* encode the imm field of Adr instruction */
4283 static inline uint32_t
4284 encode_adr_imm (uint32_t imm)
4285 {
4286 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4287 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4288 }
4289
4290 /* encode the immediate field of Move wide immediate */
4291 static inline uint32_t
4292 encode_movw_imm (uint32_t imm)
4293 {
4294 return imm << 5;
4295 }
4296
4297 /* encode the 26-bit offset of unconditional branch */
4298 static inline uint32_t
4299 encode_branch_ofs_26 (uint32_t ofs)
4300 {
4301 return ofs & ((1 << 26) - 1);
4302 }
4303
4304 /* encode the 19-bit offset of conditional branch and compare & branch */
4305 static inline uint32_t
4306 encode_cond_branch_ofs_19 (uint32_t ofs)
4307 {
4308 return (ofs & ((1 << 19) - 1)) << 5;
4309 }
4310
4311 /* encode the 19-bit offset of ld literal */
4312 static inline uint32_t
4313 encode_ld_lit_ofs_19 (uint32_t ofs)
4314 {
4315 return (ofs & ((1 << 19) - 1)) << 5;
4316 }
4317
4318 /* Encode the 14-bit offset of test & branch. */
4319 static inline uint32_t
4320 encode_tst_branch_ofs_14 (uint32_t ofs)
4321 {
4322 return (ofs & ((1 << 14) - 1)) << 5;
4323 }
4324
4325 /* Encode the 16-bit imm field of svc/hvc/smc. */
4326 static inline uint32_t
4327 encode_svc_imm (uint32_t imm)
4328 {
4329 return imm << 5;
4330 }
4331
4332 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4333 static inline uint32_t
4334 reencode_addsub_switch_add_sub (uint32_t opcode)
4335 {
4336 return opcode ^ (1 << 30);
4337 }
4338
4339 static inline uint32_t
4340 reencode_movzn_to_movz (uint32_t opcode)
4341 {
4342 return opcode | (1 << 30);
4343 }
4344
4345 static inline uint32_t
4346 reencode_movzn_to_movn (uint32_t opcode)
4347 {
4348 return opcode & ~(1 << 30);
4349 }
4350
4351 /* Overall per-instruction processing. */
4352
4353 /* We need to be able to fix up arbitrary expressions in some statements.
4354 This is so that we can handle symbols that are an arbitrary distance from
4355 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4356 which returns part of an address in a form which will be valid for
4357 a data instruction. We do this by pushing the expression into a symbol
4358 in the expr_section, and creating a fix for that. */
4359
4360 static fixS *
4361 fix_new_aarch64 (fragS * frag,
4362 int where,
4363 short int size,
4364 expressionS * exp,
4365 int pc_rel,
4366 int reloc)
4367 {
4368 fixS *new_fix;
4369
4370 switch (exp->X_op)
4371 {
4372 case O_constant:
4373 case O_symbol:
4374 case O_add:
4375 case O_subtract:
4376 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4377 break;
4378
4379 default:
4380 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4381 pc_rel, reloc);
4382 break;
4383 }
4384 return new_fix;
4385 }
4386 \f
4387 /* Diagnostics on operands errors. */
4388
4389 /* By default, output verbose error message.
4390 Disable the verbose error message by -mno-verbose-error. */
4391 static int verbose_error_p = 1;
4392
4393 #ifdef DEBUG_AARCH64
4394 /* N.B. this is only for the purpose of debugging. */
4395 const char* operand_mismatch_kind_names[] =
4396 {
4397 "AARCH64_OPDE_NIL",
4398 "AARCH64_OPDE_RECOVERABLE",
4399 "AARCH64_OPDE_SYNTAX_ERROR",
4400 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4401 "AARCH64_OPDE_INVALID_VARIANT",
4402 "AARCH64_OPDE_OUT_OF_RANGE",
4403 "AARCH64_OPDE_UNALIGNED",
4404 "AARCH64_OPDE_REG_LIST",
4405 "AARCH64_OPDE_OTHER_ERROR",
4406 };
4407 #endif /* DEBUG_AARCH64 */
4408
4409 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4410
4411 When multiple errors of different kinds are found in the same assembly
4412 line, only the error of the highest severity will be picked up for
4413 issuing the diagnostics. */
4414
4415 static inline bfd_boolean
4416 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4417 enum aarch64_operand_error_kind rhs)
4418 {
4419 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4420 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4421 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4422 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4423 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4424 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4425 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4426 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4427 return lhs > rhs;
4428 }
4429
4430 /* Helper routine to get the mnemonic name from the assembly instruction
4431 line; should only be called for the diagnosis purpose, as there is
4432 string copy operation involved, which may affect the runtime
4433 performance if used in elsewhere. */
4434
4435 static const char*
4436 get_mnemonic_name (const char *str)
4437 {
4438 static char mnemonic[32];
4439 char *ptr;
4440
4441 /* Get the first 15 bytes and assume that the full name is included. */
4442 strncpy (mnemonic, str, 31);
4443 mnemonic[31] = '\0';
4444
4445 /* Scan up to the end of the mnemonic, which must end in white space,
4446 '.', or end of string. */
4447 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4448 ;
4449
4450 *ptr = '\0';
4451
4452 /* Append '...' to the truncated long name. */
4453 if (ptr - mnemonic == 31)
4454 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4455
4456 return mnemonic;
4457 }
4458
4459 static void
4460 reset_aarch64_instruction (aarch64_instruction *instruction)
4461 {
4462 memset (instruction, '\0', sizeof (aarch64_instruction));
4463 instruction->reloc.type = BFD_RELOC_UNUSED;
4464 }
4465
4466 /* Data structures storing one user error in the assembly code related to
4467 operands. */
4468
4469 struct operand_error_record
4470 {
4471 const aarch64_opcode *opcode;
4472 aarch64_operand_error detail;
4473 struct operand_error_record *next;
4474 };
4475
4476 typedef struct operand_error_record operand_error_record;
4477
4478 struct operand_errors
4479 {
4480 operand_error_record *head;
4481 operand_error_record *tail;
4482 };
4483
4484 typedef struct operand_errors operand_errors;
4485
4486 /* Top-level data structure reporting user errors for the current line of
4487 the assembly code.
4488 The way md_assemble works is that all opcodes sharing the same mnemonic
4489 name are iterated to find a match to the assembly line. In this data
4490 structure, each of the such opcodes will have one operand_error_record
4491 allocated and inserted. In other words, excessive errors related with
4492 a single opcode are disregarded. */
4493 operand_errors operand_error_report;
4494
4495 /* Free record nodes. */
4496 static operand_error_record *free_opnd_error_record_nodes = NULL;
4497
4498 /* Initialize the data structure that stores the operand mismatch
4499 information on assembling one line of the assembly code. */
4500 static void
4501 init_operand_error_report (void)
4502 {
4503 if (operand_error_report.head != NULL)
4504 {
4505 gas_assert (operand_error_report.tail != NULL);
4506 operand_error_report.tail->next = free_opnd_error_record_nodes;
4507 free_opnd_error_record_nodes = operand_error_report.head;
4508 operand_error_report.head = NULL;
4509 operand_error_report.tail = NULL;
4510 return;
4511 }
4512 gas_assert (operand_error_report.tail == NULL);
4513 }
4514
4515 /* Return TRUE if some operand error has been recorded during the
4516 parsing of the current assembly line using the opcode *OPCODE;
4517 otherwise return FALSE. */
4518 static inline bfd_boolean
4519 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4520 {
4521 operand_error_record *record = operand_error_report.head;
4522 return record && record->opcode == opcode;
4523 }
4524
4525 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4526 OPCODE field is initialized with OPCODE.
4527 N.B. only one record for each opcode, i.e. the maximum of one error is
4528 recorded for each instruction template. */
4529
4530 static void
4531 add_operand_error_record (const operand_error_record* new_record)
4532 {
4533 const aarch64_opcode *opcode = new_record->opcode;
4534 operand_error_record* record = operand_error_report.head;
4535
4536 /* The record may have been created for this opcode. If not, we need
4537 to prepare one. */
4538 if (! opcode_has_operand_error_p (opcode))
4539 {
4540 /* Get one empty record. */
4541 if (free_opnd_error_record_nodes == NULL)
4542 {
4543 record = XNEW (operand_error_record);
4544 }
4545 else
4546 {
4547 record = free_opnd_error_record_nodes;
4548 free_opnd_error_record_nodes = record->next;
4549 }
4550 record->opcode = opcode;
4551 /* Insert at the head. */
4552 record->next = operand_error_report.head;
4553 operand_error_report.head = record;
4554 if (operand_error_report.tail == NULL)
4555 operand_error_report.tail = record;
4556 }
4557 else if (record->detail.kind != AARCH64_OPDE_NIL
4558 && record->detail.index <= new_record->detail.index
4559 && operand_error_higher_severity_p (record->detail.kind,
4560 new_record->detail.kind))
4561 {
4562 /* In the case of multiple errors found on operands related with a
4563 single opcode, only record the error of the leftmost operand and
4564 only if the error is of higher severity. */
4565 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4566 " the existing error %s on operand %d",
4567 operand_mismatch_kind_names[new_record->detail.kind],
4568 new_record->detail.index,
4569 operand_mismatch_kind_names[record->detail.kind],
4570 record->detail.index);
4571 return;
4572 }
4573
4574 record->detail = new_record->detail;
4575 }
4576
4577 static inline void
4578 record_operand_error_info (const aarch64_opcode *opcode,
4579 aarch64_operand_error *error_info)
4580 {
4581 operand_error_record record;
4582 record.opcode = opcode;
4583 record.detail = *error_info;
4584 add_operand_error_record (&record);
4585 }
4586
4587 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4588 error message *ERROR, for operand IDX (count from 0). */
4589
4590 static void
4591 record_operand_error (const aarch64_opcode *opcode, int idx,
4592 enum aarch64_operand_error_kind kind,
4593 const char* error)
4594 {
4595 aarch64_operand_error info;
4596 memset(&info, 0, sizeof (info));
4597 info.index = idx;
4598 info.kind = kind;
4599 info.error = error;
4600 info.non_fatal = FALSE;
4601 record_operand_error_info (opcode, &info);
4602 }
4603
4604 static void
4605 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4606 enum aarch64_operand_error_kind kind,
4607 const char* error, const int *extra_data)
4608 {
4609 aarch64_operand_error info;
4610 info.index = idx;
4611 info.kind = kind;
4612 info.error = error;
4613 info.data[0] = extra_data[0];
4614 info.data[1] = extra_data[1];
4615 info.data[2] = extra_data[2];
4616 info.non_fatal = FALSE;
4617 record_operand_error_info (opcode, &info);
4618 }
4619
4620 static void
4621 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4622 const char* error, int lower_bound,
4623 int upper_bound)
4624 {
4625 int data[3] = {lower_bound, upper_bound, 0};
4626 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4627 error, data);
4628 }
4629
4630 /* Remove the operand error record for *OPCODE. */
4631 static void ATTRIBUTE_UNUSED
4632 remove_operand_error_record (const aarch64_opcode *opcode)
4633 {
4634 if (opcode_has_operand_error_p (opcode))
4635 {
4636 operand_error_record* record = operand_error_report.head;
4637 gas_assert (record != NULL && operand_error_report.tail != NULL);
4638 operand_error_report.head = record->next;
4639 record->next = free_opnd_error_record_nodes;
4640 free_opnd_error_record_nodes = record;
4641 if (operand_error_report.head == NULL)
4642 {
4643 gas_assert (operand_error_report.tail == record);
4644 operand_error_report.tail = NULL;
4645 }
4646 }
4647 }
4648
4649 /* Given the instruction in *INSTR, return the index of the best matched
4650 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4651
4652 Return -1 if there is no qualifier sequence; return the first match
4653 if there is multiple matches found. */
4654
4655 static int
4656 find_best_match (const aarch64_inst *instr,
4657 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4658 {
4659 int i, num_opnds, max_num_matched, idx;
4660
4661 num_opnds = aarch64_num_of_operands (instr->opcode);
4662 if (num_opnds == 0)
4663 {
4664 DEBUG_TRACE ("no operand");
4665 return -1;
4666 }
4667
4668 max_num_matched = 0;
4669 idx = 0;
4670
4671 /* For each pattern. */
4672 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4673 {
4674 int j, num_matched;
4675 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4676
4677 /* Most opcodes has much fewer patterns in the list. */
4678 if (empty_qualifier_sequence_p (qualifiers))
4679 {
4680 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4681 break;
4682 }
4683
4684 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4685 if (*qualifiers == instr->operands[j].qualifier)
4686 ++num_matched;
4687
4688 if (num_matched > max_num_matched)
4689 {
4690 max_num_matched = num_matched;
4691 idx = i;
4692 }
4693 }
4694
4695 DEBUG_TRACE ("return with %d", idx);
4696 return idx;
4697 }
4698
4699 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4700 corresponding operands in *INSTR. */
4701
4702 static inline void
4703 assign_qualifier_sequence (aarch64_inst *instr,
4704 const aarch64_opnd_qualifier_t *qualifiers)
4705 {
4706 int i = 0;
4707 int num_opnds = aarch64_num_of_operands (instr->opcode);
4708 gas_assert (num_opnds);
4709 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4710 instr->operands[i].qualifier = *qualifiers;
4711 }
4712
4713 /* Print operands for the diagnosis purpose. */
4714
4715 static void
4716 print_operands (char *buf, const aarch64_opcode *opcode,
4717 const aarch64_opnd_info *opnds)
4718 {
4719 int i;
4720
4721 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4722 {
4723 char str[128];
4724
4725 /* We regard the opcode operand info more, however we also look into
4726 the inst->operands to support the disassembling of the optional
4727 operand.
4728 The two operand code should be the same in all cases, apart from
4729 when the operand can be optional. */
4730 if (opcode->operands[i] == AARCH64_OPND_NIL
4731 || opnds[i].type == AARCH64_OPND_NIL)
4732 break;
4733
4734 /* Generate the operand string in STR. */
4735 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4736 NULL, cpu_variant);
4737
4738 /* Delimiter. */
4739 if (str[0] != '\0')
4740 strcat (buf, i == 0 ? " " : ", ");
4741
4742 /* Append the operand string. */
4743 strcat (buf, str);
4744 }
4745 }
4746
4747 /* Send to stderr a string as information. */
4748
4749 static void
4750 output_info (const char *format, ...)
4751 {
4752 const char *file;
4753 unsigned int line;
4754 va_list args;
4755
4756 file = as_where (&line);
4757 if (file)
4758 {
4759 if (line != 0)
4760 fprintf (stderr, "%s:%u: ", file, line);
4761 else
4762 fprintf (stderr, "%s: ", file);
4763 }
4764 fprintf (stderr, _("Info: "));
4765 va_start (args, format);
4766 vfprintf (stderr, format, args);
4767 va_end (args);
4768 (void) putc ('\n', stderr);
4769 }
4770
4771 /* Output one operand error record. */
4772
4773 static void
4774 output_operand_error_record (const operand_error_record *record, char *str)
4775 {
4776 const aarch64_operand_error *detail = &record->detail;
4777 int idx = detail->index;
4778 const aarch64_opcode *opcode = record->opcode;
4779 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4780 : AARCH64_OPND_NIL);
4781
4782 typedef void (*handler_t)(const char *format, ...);
4783 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4784
4785 switch (detail->kind)
4786 {
4787 case AARCH64_OPDE_NIL:
4788 gas_assert (0);
4789 break;
4790 case AARCH64_OPDE_SYNTAX_ERROR:
4791 case AARCH64_OPDE_RECOVERABLE:
4792 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4793 case AARCH64_OPDE_OTHER_ERROR:
4794 /* Use the prepared error message if there is, otherwise use the
4795 operand description string to describe the error. */
4796 if (detail->error != NULL)
4797 {
4798 if (idx < 0)
4799 handler (_("%s -- `%s'"), detail->error, str);
4800 else
4801 handler (_("%s at operand %d -- `%s'"),
4802 detail->error, idx + 1, str);
4803 }
4804 else
4805 {
4806 gas_assert (idx >= 0);
4807 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4808 aarch64_get_operand_desc (opd_code), str);
4809 }
4810 break;
4811
4812 case AARCH64_OPDE_INVALID_VARIANT:
4813 handler (_("operand mismatch -- `%s'"), str);
4814 if (verbose_error_p)
4815 {
4816 /* We will try to correct the erroneous instruction and also provide
4817 more information e.g. all other valid variants.
4818
4819 The string representation of the corrected instruction and other
4820 valid variants are generated by
4821
4822 1) obtaining the intermediate representation of the erroneous
4823 instruction;
4824 2) manipulating the IR, e.g. replacing the operand qualifier;
4825 3) printing out the instruction by calling the printer functions
4826 shared with the disassembler.
4827
4828 The limitation of this method is that the exact input assembly
4829 line cannot be accurately reproduced in some cases, for example an
4830 optional operand present in the actual assembly line will be
4831 omitted in the output; likewise for the optional syntax rules,
4832 e.g. the # before the immediate. Another limitation is that the
4833 assembly symbols and relocation operations in the assembly line
4834 currently cannot be printed out in the error report. Last but not
4835 least, when there is other error(s) co-exist with this error, the
4836 'corrected' instruction may be still incorrect, e.g. given
4837 'ldnp h0,h1,[x0,#6]!'
4838 this diagnosis will provide the version:
4839 'ldnp s0,s1,[x0,#6]!'
4840 which is still not right. */
4841 size_t len = strlen (get_mnemonic_name (str));
4842 int i, qlf_idx;
4843 bfd_boolean result;
4844 char buf[2048];
4845 aarch64_inst *inst_base = &inst.base;
4846 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4847
4848 /* Init inst. */
4849 reset_aarch64_instruction (&inst);
4850 inst_base->opcode = opcode;
4851
4852 /* Reset the error report so that there is no side effect on the
4853 following operand parsing. */
4854 init_operand_error_report ();
4855
4856 /* Fill inst. */
4857 result = parse_operands (str + len, opcode)
4858 && programmer_friendly_fixup (&inst);
4859 gas_assert (result);
4860 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4861 NULL, NULL, insn_sequence);
4862 gas_assert (!result);
4863
4864 /* Find the most matched qualifier sequence. */
4865 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4866 gas_assert (qlf_idx > -1);
4867
4868 /* Assign the qualifiers. */
4869 assign_qualifier_sequence (inst_base,
4870 opcode->qualifiers_list[qlf_idx]);
4871
4872 /* Print the hint. */
4873 output_info (_(" did you mean this?"));
4874 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4875 print_operands (buf, opcode, inst_base->operands);
4876 output_info (_(" %s"), buf);
4877
4878 /* Print out other variant(s) if there is any. */
4879 if (qlf_idx != 0 ||
4880 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4881 output_info (_(" other valid variant(s):"));
4882
4883 /* For each pattern. */
4884 qualifiers_list = opcode->qualifiers_list;
4885 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4886 {
4887 /* Most opcodes has much fewer patterns in the list.
4888 First NIL qualifier indicates the end in the list. */
4889 if (empty_qualifier_sequence_p (*qualifiers_list))
4890 break;
4891
4892 if (i != qlf_idx)
4893 {
4894 /* Mnemonics name. */
4895 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4896
4897 /* Assign the qualifiers. */
4898 assign_qualifier_sequence (inst_base, *qualifiers_list);
4899
4900 /* Print instruction. */
4901 print_operands (buf, opcode, inst_base->operands);
4902
4903 output_info (_(" %s"), buf);
4904 }
4905 }
4906 }
4907 break;
4908
4909 case AARCH64_OPDE_UNTIED_OPERAND:
4910 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4911 detail->index + 1, str);
4912 break;
4913
4914 case AARCH64_OPDE_OUT_OF_RANGE:
4915 if (detail->data[0] != detail->data[1])
4916 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4917 detail->error ? detail->error : _("immediate value"),
4918 detail->data[0], detail->data[1], idx + 1, str);
4919 else
4920 handler (_("%s must be %d at operand %d -- `%s'"),
4921 detail->error ? detail->error : _("immediate value"),
4922 detail->data[0], idx + 1, str);
4923 break;
4924
4925 case AARCH64_OPDE_REG_LIST:
4926 if (detail->data[0] == 1)
4927 handler (_("invalid number of registers in the list; "
4928 "only 1 register is expected at operand %d -- `%s'"),
4929 idx + 1, str);
4930 else
4931 handler (_("invalid number of registers in the list; "
4932 "%d registers are expected at operand %d -- `%s'"),
4933 detail->data[0], idx + 1, str);
4934 break;
4935
4936 case AARCH64_OPDE_UNALIGNED:
4937 handler (_("immediate value must be a multiple of "
4938 "%d at operand %d -- `%s'"),
4939 detail->data[0], idx + 1, str);
4940 break;
4941
4942 default:
4943 gas_assert (0);
4944 break;
4945 }
4946 }
4947
4948 /* Process and output the error message about the operand mismatching.
4949
4950 When this function is called, the operand error information had
4951 been collected for an assembly line and there will be multiple
4952 errors in the case of multiple instruction templates; output the
4953 error message that most closely describes the problem.
4954
4955 The errors to be printed can be filtered on printing all errors
4956 or only non-fatal errors. This distinction has to be made because
4957 the error buffer may already be filled with fatal errors we don't want to
4958 print due to the different instruction templates. */
4959
4960 static void
4961 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4962 {
4963 int largest_error_pos;
4964 const char *msg = NULL;
4965 enum aarch64_operand_error_kind kind;
4966 operand_error_record *curr;
4967 operand_error_record *head = operand_error_report.head;
4968 operand_error_record *record = NULL;
4969
4970 /* No error to report. */
4971 if (head == NULL)
4972 return;
4973
4974 gas_assert (head != NULL && operand_error_report.tail != NULL);
4975
4976 /* Only one error. */
4977 if (head == operand_error_report.tail)
4978 {
4979 /* If the only error is a non-fatal one and we don't want to print it,
4980 just exit. */
4981 if (!non_fatal_only || head->detail.non_fatal)
4982 {
4983 DEBUG_TRACE ("single opcode entry with error kind: %s",
4984 operand_mismatch_kind_names[head->detail.kind]);
4985 output_operand_error_record (head, str);
4986 }
4987 return;
4988 }
4989
4990 /* Find the error kind of the highest severity. */
4991 DEBUG_TRACE ("multiple opcode entries with error kind");
4992 kind = AARCH64_OPDE_NIL;
4993 for (curr = head; curr != NULL; curr = curr->next)
4994 {
4995 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4996 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4997 if (operand_error_higher_severity_p (curr->detail.kind, kind)
4998 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4999 kind = curr->detail.kind;
5000 }
5001
5002 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5003
5004 /* Pick up one of errors of KIND to report. */
5005 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5006 for (curr = head; curr != NULL; curr = curr->next)
5007 {
5008 /* If we don't want to print non-fatal errors then don't consider them
5009 at all. */
5010 if (curr->detail.kind != kind
5011 || (non_fatal_only && !curr->detail.non_fatal))
5012 continue;
5013 /* If there are multiple errors, pick up the one with the highest
5014 mismatching operand index. In the case of multiple errors with
5015 the equally highest operand index, pick up the first one or the
5016 first one with non-NULL error message. */
5017 if (curr->detail.index > largest_error_pos
5018 || (curr->detail.index == largest_error_pos && msg == NULL
5019 && curr->detail.error != NULL))
5020 {
5021 largest_error_pos = curr->detail.index;
5022 record = curr;
5023 msg = record->detail.error;
5024 }
5025 }
5026
5027 /* The way errors are collected in the back-end is a bit non-intuitive. But
5028 essentially, because each operand template is tried recursively you may
5029 always have errors collected from the previous tried OPND. These are
5030 usually skipped if there is one successful match. However now with the
5031 non-fatal errors we have to ignore those previously collected hard errors
5032 when we're only interested in printing the non-fatal ones. This condition
5033 prevents us from printing errors that are not appropriate, since we did
5034 match a condition, but it also has warnings that it wants to print. */
5035 if (non_fatal_only && !record)
5036 return;
5037
5038 gas_assert (largest_error_pos != -2 && record != NULL);
5039 DEBUG_TRACE ("Pick up error kind %s to report",
5040 operand_mismatch_kind_names[record->detail.kind]);
5041
5042 /* Output. */
5043 output_operand_error_record (record, str);
5044 }
5045 \f
5046 /* Write an AARCH64 instruction to buf - always little-endian. */
5047 static void
5048 put_aarch64_insn (char *buf, uint32_t insn)
5049 {
5050 unsigned char *where = (unsigned char *) buf;
5051 where[0] = insn;
5052 where[1] = insn >> 8;
5053 where[2] = insn >> 16;
5054 where[3] = insn >> 24;
5055 }
5056
5057 static uint32_t
5058 get_aarch64_insn (char *buf)
5059 {
5060 unsigned char *where = (unsigned char *) buf;
5061 uint32_t result;
5062 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5063 | ((uint32_t) where[3] << 24)));
5064 return result;
5065 }
5066
5067 static void
5068 output_inst (struct aarch64_inst *new_inst)
5069 {
5070 char *to = NULL;
5071
5072 to = frag_more (INSN_SIZE);
5073
5074 frag_now->tc_frag_data.recorded = 1;
5075
5076 put_aarch64_insn (to, inst.base.value);
5077
5078 if (inst.reloc.type != BFD_RELOC_UNUSED)
5079 {
5080 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5081 INSN_SIZE, &inst.reloc.exp,
5082 inst.reloc.pc_rel,
5083 inst.reloc.type);
5084 DEBUG_TRACE ("Prepared relocation fix up");
5085 /* Don't check the addend value against the instruction size,
5086 that's the job of our code in md_apply_fix(). */
5087 fixp->fx_no_overflow = 1;
5088 if (new_inst != NULL)
5089 fixp->tc_fix_data.inst = new_inst;
5090 if (aarch64_gas_internal_fixup_p ())
5091 {
5092 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5093 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5094 fixp->fx_addnumber = inst.reloc.flags;
5095 }
5096 }
5097
5098 dwarf2_emit_insn (INSN_SIZE);
5099 }
5100
5101 /* Link together opcodes of the same name. */
5102
5103 struct templates
5104 {
5105 aarch64_opcode *opcode;
5106 struct templates *next;
5107 };
5108
5109 typedef struct templates templates;
5110
5111 static templates *
5112 lookup_mnemonic (const char *start, int len)
5113 {
5114 templates *templ = NULL;
5115
5116 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5117 return templ;
5118 }
5119
5120 /* Subroutine of md_assemble, responsible for looking up the primary
5121 opcode from the mnemonic the user wrote. STR points to the
5122 beginning of the mnemonic. */
5123
5124 static templates *
5125 opcode_lookup (char **str)
5126 {
5127 char *end, *base, *dot;
5128 const aarch64_cond *cond;
5129 char condname[16];
5130 int len;
5131
5132 /* Scan up to the end of the mnemonic, which must end in white space,
5133 '.', or end of string. */
5134 dot = 0;
5135 for (base = end = *str; is_part_of_name(*end); end++)
5136 if (*end == '.' && !dot)
5137 dot = end;
5138
5139 if (end == base || dot == base)
5140 return 0;
5141
5142 inst.cond = COND_ALWAYS;
5143
5144 /* Handle a possible condition. */
5145 if (dot)
5146 {
5147 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5148 if (cond)
5149 {
5150 inst.cond = cond->value;
5151 *str = end;
5152 }
5153 else
5154 {
5155 *str = dot;
5156 return 0;
5157 }
5158 len = dot - base;
5159 }
5160 else
5161 {
5162 *str = end;
5163 len = end - base;
5164 }
5165
5166 if (inst.cond == COND_ALWAYS)
5167 {
5168 /* Look for unaffixed mnemonic. */
5169 return lookup_mnemonic (base, len);
5170 }
5171 else if (len <= 13)
5172 {
5173 /* append ".c" to mnemonic if conditional */
5174 memcpy (condname, base, len);
5175 memcpy (condname + len, ".c", 2);
5176 base = condname;
5177 len += 2;
5178 return lookup_mnemonic (base, len);
5179 }
5180
5181 return NULL;
5182 }
5183
5184 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5185 to a corresponding operand qualifier. */
5186
5187 static inline aarch64_opnd_qualifier_t
5188 vectype_to_qualifier (const struct vector_type_el *vectype)
5189 {
5190 /* Element size in bytes indexed by vector_el_type. */
5191 const unsigned char ele_size[5]
5192 = {1, 2, 4, 8, 16};
5193 const unsigned int ele_base [5] =
5194 {
5195 AARCH64_OPND_QLF_V_4B,
5196 AARCH64_OPND_QLF_V_2H,
5197 AARCH64_OPND_QLF_V_2S,
5198 AARCH64_OPND_QLF_V_1D,
5199 AARCH64_OPND_QLF_V_1Q
5200 };
5201
5202 if (!vectype->defined || vectype->type == NT_invtype)
5203 goto vectype_conversion_fail;
5204
5205 if (vectype->type == NT_zero)
5206 return AARCH64_OPND_QLF_P_Z;
5207 if (vectype->type == NT_merge)
5208 return AARCH64_OPND_QLF_P_M;
5209
5210 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5211
5212 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5213 {
5214 /* Special case S_4B. */
5215 if (vectype->type == NT_b && vectype->width == 4)
5216 return AARCH64_OPND_QLF_S_4B;
5217
5218 /* Special case S_2H. */
5219 if (vectype->type == NT_h && vectype->width == 2)
5220 return AARCH64_OPND_QLF_S_2H;
5221
5222 /* Vector element register. */
5223 return AARCH64_OPND_QLF_S_B + vectype->type;
5224 }
5225 else
5226 {
5227 /* Vector register. */
5228 int reg_size = ele_size[vectype->type] * vectype->width;
5229 unsigned offset;
5230 unsigned shift;
5231 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5232 goto vectype_conversion_fail;
5233
5234 /* The conversion is by calculating the offset from the base operand
5235 qualifier for the vector type. The operand qualifiers are regular
5236 enough that the offset can established by shifting the vector width by
5237 a vector-type dependent amount. */
5238 shift = 0;
5239 if (vectype->type == NT_b)
5240 shift = 3;
5241 else if (vectype->type == NT_h || vectype->type == NT_s)
5242 shift = 2;
5243 else if (vectype->type >= NT_d)
5244 shift = 1;
5245 else
5246 gas_assert (0);
5247
5248 offset = ele_base [vectype->type] + (vectype->width >> shift);
5249 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5250 && offset <= AARCH64_OPND_QLF_V_1Q);
5251 return offset;
5252 }
5253
5254 vectype_conversion_fail:
5255 first_error (_("bad vector arrangement type"));
5256 return AARCH64_OPND_QLF_NIL;
5257 }
5258
5259 /* Process an optional operand that is found omitted from the assembly line.
5260 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5261 instruction's opcode entry while IDX is the index of this omitted operand.
5262 */
5263
5264 static void
5265 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5266 int idx, aarch64_opnd_info *operand)
5267 {
5268 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5269 gas_assert (optional_operand_p (opcode, idx));
5270 gas_assert (!operand->present);
5271
5272 switch (type)
5273 {
5274 case AARCH64_OPND_Rd:
5275 case AARCH64_OPND_Rn:
5276 case AARCH64_OPND_Rm:
5277 case AARCH64_OPND_Rt:
5278 case AARCH64_OPND_Rt2:
5279 case AARCH64_OPND_Rt_SP:
5280 case AARCH64_OPND_Rs:
5281 case AARCH64_OPND_Ra:
5282 case AARCH64_OPND_Rt_SYS:
5283 case AARCH64_OPND_Rd_SP:
5284 case AARCH64_OPND_Rn_SP:
5285 case AARCH64_OPND_Rm_SP:
5286 case AARCH64_OPND_Fd:
5287 case AARCH64_OPND_Fn:
5288 case AARCH64_OPND_Fm:
5289 case AARCH64_OPND_Fa:
5290 case AARCH64_OPND_Ft:
5291 case AARCH64_OPND_Ft2:
5292 case AARCH64_OPND_Sd:
5293 case AARCH64_OPND_Sn:
5294 case AARCH64_OPND_Sm:
5295 case AARCH64_OPND_Va:
5296 case AARCH64_OPND_Vd:
5297 case AARCH64_OPND_Vn:
5298 case AARCH64_OPND_Vm:
5299 case AARCH64_OPND_VdD1:
5300 case AARCH64_OPND_VnD1:
5301 operand->reg.regno = default_value;
5302 break;
5303
5304 case AARCH64_OPND_Ed:
5305 case AARCH64_OPND_En:
5306 case AARCH64_OPND_Em:
5307 case AARCH64_OPND_Em16:
5308 case AARCH64_OPND_SM3_IMM2:
5309 operand->reglane.regno = default_value;
5310 break;
5311
5312 case AARCH64_OPND_IDX:
5313 case AARCH64_OPND_BIT_NUM:
5314 case AARCH64_OPND_IMMR:
5315 case AARCH64_OPND_IMMS:
5316 case AARCH64_OPND_SHLL_IMM:
5317 case AARCH64_OPND_IMM_VLSL:
5318 case AARCH64_OPND_IMM_VLSR:
5319 case AARCH64_OPND_CCMP_IMM:
5320 case AARCH64_OPND_FBITS:
5321 case AARCH64_OPND_UIMM4:
5322 case AARCH64_OPND_UIMM3_OP1:
5323 case AARCH64_OPND_UIMM3_OP2:
5324 case AARCH64_OPND_IMM:
5325 case AARCH64_OPND_IMM_2:
5326 case AARCH64_OPND_WIDTH:
5327 case AARCH64_OPND_UIMM7:
5328 case AARCH64_OPND_NZCV:
5329 case AARCH64_OPND_SVE_PATTERN:
5330 case AARCH64_OPND_SVE_PRFOP:
5331 operand->imm.value = default_value;
5332 break;
5333
5334 case AARCH64_OPND_SVE_PATTERN_SCALED:
5335 operand->imm.value = default_value;
5336 operand->shifter.kind = AARCH64_MOD_MUL;
5337 operand->shifter.amount = 1;
5338 break;
5339
5340 case AARCH64_OPND_EXCEPTION:
5341 inst.reloc.type = BFD_RELOC_UNUSED;
5342 break;
5343
5344 case AARCH64_OPND_BARRIER_ISB:
5345 operand->barrier = aarch64_barrier_options + default_value;
5346 break;
5347
5348 case AARCH64_OPND_BTI_TARGET:
5349 operand->hint_option = aarch64_hint_options + default_value;
5350 break;
5351
5352 default:
5353 break;
5354 }
5355 }
5356
5357 /* Process the relocation type for move wide instructions.
5358 Return TRUE on success; otherwise return FALSE. */
5359
5360 static bfd_boolean
5361 process_movw_reloc_info (void)
5362 {
5363 int is32;
5364 unsigned shift;
5365
5366 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5367
5368 if (inst.base.opcode->op == OP_MOVK)
5369 switch (inst.reloc.type)
5370 {
5371 case BFD_RELOC_AARCH64_MOVW_G0_S:
5372 case BFD_RELOC_AARCH64_MOVW_G1_S:
5373 case BFD_RELOC_AARCH64_MOVW_G2_S:
5374 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5375 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5376 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5377 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5378 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5379 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5380 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5381 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5382 set_syntax_error
5383 (_("the specified relocation type is not allowed for MOVK"));
5384 return FALSE;
5385 default:
5386 break;
5387 }
5388
5389 switch (inst.reloc.type)
5390 {
5391 case BFD_RELOC_AARCH64_MOVW_G0:
5392 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5393 case BFD_RELOC_AARCH64_MOVW_G0_S:
5394 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5395 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5396 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5397 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5398 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5399 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5400 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5401 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5402 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5403 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5404 shift = 0;
5405 break;
5406 case BFD_RELOC_AARCH64_MOVW_G1:
5407 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5408 case BFD_RELOC_AARCH64_MOVW_G1_S:
5409 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5410 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5411 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5412 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5413 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5414 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5415 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5416 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5417 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5418 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5419 shift = 16;
5420 break;
5421 case BFD_RELOC_AARCH64_MOVW_G2:
5422 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5423 case BFD_RELOC_AARCH64_MOVW_G2_S:
5424 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5425 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5426 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5427 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5428 if (is32)
5429 {
5430 set_fatal_syntax_error
5431 (_("the specified relocation type is not allowed for 32-bit "
5432 "register"));
5433 return FALSE;
5434 }
5435 shift = 32;
5436 break;
5437 case BFD_RELOC_AARCH64_MOVW_G3:
5438 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5439 if (is32)
5440 {
5441 set_fatal_syntax_error
5442 (_("the specified relocation type is not allowed for 32-bit "
5443 "register"));
5444 return FALSE;
5445 }
5446 shift = 48;
5447 break;
5448 default:
5449 /* More cases should be added when more MOVW-related relocation types
5450 are supported in GAS. */
5451 gas_assert (aarch64_gas_internal_fixup_p ());
5452 /* The shift amount should have already been set by the parser. */
5453 return TRUE;
5454 }
5455 inst.base.operands[1].shifter.amount = shift;
5456 return TRUE;
5457 }
5458
5459 /* A primitive log calculator. */
5460
5461 static inline unsigned int
5462 get_logsz (unsigned int size)
5463 {
5464 const unsigned char ls[16] =
5465 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5466 if (size > 16)
5467 {
5468 gas_assert (0);
5469 return -1;
5470 }
5471 gas_assert (ls[size - 1] != (unsigned char)-1);
5472 return ls[size - 1];
5473 }
5474
5475 /* Determine and return the real reloc type code for an instruction
5476 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5477
5478 static inline bfd_reloc_code_real_type
5479 ldst_lo12_determine_real_reloc_type (void)
5480 {
5481 unsigned logsz;
5482 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5483 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5484
5485 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5486 {
5487 BFD_RELOC_AARCH64_LDST8_LO12,
5488 BFD_RELOC_AARCH64_LDST16_LO12,
5489 BFD_RELOC_AARCH64_LDST32_LO12,
5490 BFD_RELOC_AARCH64_LDST64_LO12,
5491 BFD_RELOC_AARCH64_LDST128_LO12
5492 },
5493 {
5494 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5495 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5496 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5497 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5498 BFD_RELOC_AARCH64_NONE
5499 },
5500 {
5501 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5502 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5503 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5504 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5505 BFD_RELOC_AARCH64_NONE
5506 },
5507 {
5508 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5509 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5510 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5511 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5512 BFD_RELOC_AARCH64_NONE
5513 },
5514 {
5515 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5516 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5517 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5518 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5519 BFD_RELOC_AARCH64_NONE
5520 }
5521 };
5522
5523 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5524 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5525 || (inst.reloc.type
5526 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5527 || (inst.reloc.type
5528 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5529 || (inst.reloc.type
5530 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5531 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5532
5533 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5534 opd1_qlf =
5535 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5536 1, opd0_qlf, 0);
5537 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5538
5539 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5540 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5541 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5542 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5543 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5544 gas_assert (logsz <= 3);
5545 else
5546 gas_assert (logsz <= 4);
5547
5548 /* In reloc.c, these pseudo relocation types should be defined in similar
5549 order as above reloc_ldst_lo12 array. Because the array index calculation
5550 below relies on this. */
5551 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5552 }
5553
5554 /* Check whether a register list REGINFO is valid. The registers must be
5555 numbered in increasing order (modulo 32), in increments of one or two.
5556
5557 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5558 increments of two.
5559
5560 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5561
5562 static bfd_boolean
5563 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5564 {
5565 uint32_t i, nb_regs, prev_regno, incr;
5566
5567 nb_regs = 1 + (reginfo & 0x3);
5568 reginfo >>= 2;
5569 prev_regno = reginfo & 0x1f;
5570 incr = accept_alternate ? 2 : 1;
5571
5572 for (i = 1; i < nb_regs; ++i)
5573 {
5574 uint32_t curr_regno;
5575 reginfo >>= 5;
5576 curr_regno = reginfo & 0x1f;
5577 if (curr_regno != ((prev_regno + incr) & 0x1f))
5578 return FALSE;
5579 prev_regno = curr_regno;
5580 }
5581
5582 return TRUE;
5583 }
5584
5585 /* Generic instruction operand parser. This does no encoding and no
5586 semantic validation; it merely squirrels values away in the inst
5587 structure. Returns TRUE or FALSE depending on whether the
5588 specified grammar matched. */
5589
5590 static bfd_boolean
5591 parse_operands (char *str, const aarch64_opcode *opcode)
5592 {
5593 int i;
5594 char *backtrack_pos = 0;
5595 const enum aarch64_opnd *operands = opcode->operands;
5596 aarch64_reg_type imm_reg_type;
5597
5598 clear_error ();
5599 skip_whitespace (str);
5600
5601 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5602 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5603 else
5604 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5605
5606 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5607 {
5608 int64_t val;
5609 const reg_entry *reg;
5610 int comma_skipped_p = 0;
5611 aarch64_reg_type rtype;
5612 struct vector_type_el vectype;
5613 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5614 aarch64_opnd_info *info = &inst.base.operands[i];
5615 aarch64_reg_type reg_type;
5616
5617 DEBUG_TRACE ("parse operand %d", i);
5618
5619 /* Assign the operand code. */
5620 info->type = operands[i];
5621
5622 if (optional_operand_p (opcode, i))
5623 {
5624 /* Remember where we are in case we need to backtrack. */
5625 gas_assert (!backtrack_pos);
5626 backtrack_pos = str;
5627 }
5628
5629 /* Expect comma between operands; the backtrack mechanism will take
5630 care of cases of omitted optional operand. */
5631 if (i > 0 && ! skip_past_char (&str, ','))
5632 {
5633 set_syntax_error (_("comma expected between operands"));
5634 goto failure;
5635 }
5636 else
5637 comma_skipped_p = 1;
5638
5639 switch (operands[i])
5640 {
5641 case AARCH64_OPND_Rd:
5642 case AARCH64_OPND_Rn:
5643 case AARCH64_OPND_Rm:
5644 case AARCH64_OPND_Rt:
5645 case AARCH64_OPND_Rt2:
5646 case AARCH64_OPND_Rs:
5647 case AARCH64_OPND_Ra:
5648 case AARCH64_OPND_Rt_SYS:
5649 case AARCH64_OPND_PAIRREG:
5650 case AARCH64_OPND_SVE_Rm:
5651 po_int_reg_or_fail (REG_TYPE_R_Z);
5652 break;
5653
5654 case AARCH64_OPND_Rd_SP:
5655 case AARCH64_OPND_Rn_SP:
5656 case AARCH64_OPND_Rt_SP:
5657 case AARCH64_OPND_SVE_Rn_SP:
5658 case AARCH64_OPND_Rm_SP:
5659 po_int_reg_or_fail (REG_TYPE_R_SP);
5660 break;
5661
5662 case AARCH64_OPND_Rm_EXT:
5663 case AARCH64_OPND_Rm_SFT:
5664 po_misc_or_fail (parse_shifter_operand
5665 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5666 ? SHIFTED_ARITH_IMM
5667 : SHIFTED_LOGIC_IMM)));
5668 if (!info->shifter.operator_present)
5669 {
5670 /* Default to LSL if not present. Libopcodes prefers shifter
5671 kind to be explicit. */
5672 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5673 info->shifter.kind = AARCH64_MOD_LSL;
5674 /* For Rm_EXT, libopcodes will carry out further check on whether
5675 or not stack pointer is used in the instruction (Recall that
5676 "the extend operator is not optional unless at least one of
5677 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5678 }
5679 break;
5680
5681 case AARCH64_OPND_Fd:
5682 case AARCH64_OPND_Fn:
5683 case AARCH64_OPND_Fm:
5684 case AARCH64_OPND_Fa:
5685 case AARCH64_OPND_Ft:
5686 case AARCH64_OPND_Ft2:
5687 case AARCH64_OPND_Sd:
5688 case AARCH64_OPND_Sn:
5689 case AARCH64_OPND_Sm:
5690 case AARCH64_OPND_SVE_VZn:
5691 case AARCH64_OPND_SVE_Vd:
5692 case AARCH64_OPND_SVE_Vm:
5693 case AARCH64_OPND_SVE_Vn:
5694 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5695 if (val == PARSE_FAIL)
5696 {
5697 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5698 goto failure;
5699 }
5700 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5701
5702 info->reg.regno = val;
5703 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5704 break;
5705
5706 case AARCH64_OPND_SVE_Pd:
5707 case AARCH64_OPND_SVE_Pg3:
5708 case AARCH64_OPND_SVE_Pg4_5:
5709 case AARCH64_OPND_SVE_Pg4_10:
5710 case AARCH64_OPND_SVE_Pg4_16:
5711 case AARCH64_OPND_SVE_Pm:
5712 case AARCH64_OPND_SVE_Pn:
5713 case AARCH64_OPND_SVE_Pt:
5714 reg_type = REG_TYPE_PN;
5715 goto vector_reg;
5716
5717 case AARCH64_OPND_SVE_Za_5:
5718 case AARCH64_OPND_SVE_Za_16:
5719 case AARCH64_OPND_SVE_Zd:
5720 case AARCH64_OPND_SVE_Zm_5:
5721 case AARCH64_OPND_SVE_Zm_16:
5722 case AARCH64_OPND_SVE_Zn:
5723 case AARCH64_OPND_SVE_Zt:
5724 reg_type = REG_TYPE_ZN;
5725 goto vector_reg;
5726
5727 case AARCH64_OPND_Va:
5728 case AARCH64_OPND_Vd:
5729 case AARCH64_OPND_Vn:
5730 case AARCH64_OPND_Vm:
5731 reg_type = REG_TYPE_VN;
5732 vector_reg:
5733 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5734 if (val == PARSE_FAIL)
5735 {
5736 first_error (_(get_reg_expected_msg (reg_type)));
5737 goto failure;
5738 }
5739 if (vectype.defined & NTA_HASINDEX)
5740 goto failure;
5741
5742 info->reg.regno = val;
5743 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5744 && vectype.type == NT_invtype)
5745 /* Unqualified Pn and Zn registers are allowed in certain
5746 contexts. Rely on F_STRICT qualifier checking to catch
5747 invalid uses. */
5748 info->qualifier = AARCH64_OPND_QLF_NIL;
5749 else
5750 {
5751 info->qualifier = vectype_to_qualifier (&vectype);
5752 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5753 goto failure;
5754 }
5755 break;
5756
5757 case AARCH64_OPND_VdD1:
5758 case AARCH64_OPND_VnD1:
5759 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5760 if (val == PARSE_FAIL)
5761 {
5762 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5763 goto failure;
5764 }
5765 if (vectype.type != NT_d || vectype.index != 1)
5766 {
5767 set_fatal_syntax_error
5768 (_("the top half of a 128-bit FP/SIMD register is expected"));
5769 goto failure;
5770 }
5771 info->reg.regno = val;
5772 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5773 here; it is correct for the purpose of encoding/decoding since
5774 only the register number is explicitly encoded in the related
5775 instructions, although this appears a bit hacky. */
5776 info->qualifier = AARCH64_OPND_QLF_S_D;
5777 break;
5778
5779 case AARCH64_OPND_SVE_Zm3_INDEX:
5780 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5781 case AARCH64_OPND_SVE_Zm3_11_INDEX:
5782 case AARCH64_OPND_SVE_Zm4_11_INDEX:
5783 case AARCH64_OPND_SVE_Zm4_INDEX:
5784 case AARCH64_OPND_SVE_Zn_INDEX:
5785 reg_type = REG_TYPE_ZN;
5786 goto vector_reg_index;
5787
5788 case AARCH64_OPND_Ed:
5789 case AARCH64_OPND_En:
5790 case AARCH64_OPND_Em:
5791 case AARCH64_OPND_Em16:
5792 case AARCH64_OPND_SM3_IMM2:
5793 reg_type = REG_TYPE_VN;
5794 vector_reg_index:
5795 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5796 if (val == PARSE_FAIL)
5797 {
5798 first_error (_(get_reg_expected_msg (reg_type)));
5799 goto failure;
5800 }
5801 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5802 goto failure;
5803
5804 info->reglane.regno = val;
5805 info->reglane.index = vectype.index;
5806 info->qualifier = vectype_to_qualifier (&vectype);
5807 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5808 goto failure;
5809 break;
5810
5811 case AARCH64_OPND_SVE_ZnxN:
5812 case AARCH64_OPND_SVE_ZtxN:
5813 reg_type = REG_TYPE_ZN;
5814 goto vector_reg_list;
5815
5816 case AARCH64_OPND_LVn:
5817 case AARCH64_OPND_LVt:
5818 case AARCH64_OPND_LVt_AL:
5819 case AARCH64_OPND_LEt:
5820 reg_type = REG_TYPE_VN;
5821 vector_reg_list:
5822 if (reg_type == REG_TYPE_ZN
5823 && get_opcode_dependent_value (opcode) == 1
5824 && *str != '{')
5825 {
5826 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5827 if (val == PARSE_FAIL)
5828 {
5829 first_error (_(get_reg_expected_msg (reg_type)));
5830 goto failure;
5831 }
5832 info->reglist.first_regno = val;
5833 info->reglist.num_regs = 1;
5834 }
5835 else
5836 {
5837 val = parse_vector_reg_list (&str, reg_type, &vectype);
5838 if (val == PARSE_FAIL)
5839 goto failure;
5840
5841 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5842 {
5843 set_fatal_syntax_error (_("invalid register list"));
5844 goto failure;
5845 }
5846
5847 if (vectype.width != 0 && *str != ',')
5848 {
5849 set_fatal_syntax_error
5850 (_("expected element type rather than vector type"));
5851 goto failure;
5852 }
5853
5854 info->reglist.first_regno = (val >> 2) & 0x1f;
5855 info->reglist.num_regs = (val & 0x3) + 1;
5856 }
5857 if (operands[i] == AARCH64_OPND_LEt)
5858 {
5859 if (!(vectype.defined & NTA_HASINDEX))
5860 goto failure;
5861 info->reglist.has_index = 1;
5862 info->reglist.index = vectype.index;
5863 }
5864 else
5865 {
5866 if (vectype.defined & NTA_HASINDEX)
5867 goto failure;
5868 if (!(vectype.defined & NTA_HASTYPE))
5869 {
5870 if (reg_type == REG_TYPE_ZN)
5871 set_fatal_syntax_error (_("missing type suffix"));
5872 goto failure;
5873 }
5874 }
5875 info->qualifier = vectype_to_qualifier (&vectype);
5876 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5877 goto failure;
5878 break;
5879
5880 case AARCH64_OPND_CRn:
5881 case AARCH64_OPND_CRm:
5882 {
5883 char prefix = *(str++);
5884 if (prefix != 'c' && prefix != 'C')
5885 goto failure;
5886
5887 po_imm_nc_or_fail ();
5888 if (val > 15)
5889 {
5890 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5891 goto failure;
5892 }
5893 info->qualifier = AARCH64_OPND_QLF_CR;
5894 info->imm.value = val;
5895 break;
5896 }
5897
5898 case AARCH64_OPND_SHLL_IMM:
5899 case AARCH64_OPND_IMM_VLSR:
5900 po_imm_or_fail (1, 64);
5901 info->imm.value = val;
5902 break;
5903
5904 case AARCH64_OPND_CCMP_IMM:
5905 case AARCH64_OPND_SIMM5:
5906 case AARCH64_OPND_FBITS:
5907 case AARCH64_OPND_TME_UIMM16:
5908 case AARCH64_OPND_UIMM4:
5909 case AARCH64_OPND_UIMM4_ADDG:
5910 case AARCH64_OPND_UIMM10:
5911 case AARCH64_OPND_UIMM3_OP1:
5912 case AARCH64_OPND_UIMM3_OP2:
5913 case AARCH64_OPND_IMM_VLSL:
5914 case AARCH64_OPND_IMM:
5915 case AARCH64_OPND_IMM_2:
5916 case AARCH64_OPND_WIDTH:
5917 case AARCH64_OPND_SVE_INV_LIMM:
5918 case AARCH64_OPND_SVE_LIMM:
5919 case AARCH64_OPND_SVE_LIMM_MOV:
5920 case AARCH64_OPND_SVE_SHLIMM_PRED:
5921 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5922 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
5923 case AARCH64_OPND_SVE_SHRIMM_PRED:
5924 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5925 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
5926 case AARCH64_OPND_SVE_SIMM5:
5927 case AARCH64_OPND_SVE_SIMM5B:
5928 case AARCH64_OPND_SVE_SIMM6:
5929 case AARCH64_OPND_SVE_SIMM8:
5930 case AARCH64_OPND_SVE_UIMM3:
5931 case AARCH64_OPND_SVE_UIMM7:
5932 case AARCH64_OPND_SVE_UIMM8:
5933 case AARCH64_OPND_SVE_UIMM8_53:
5934 case AARCH64_OPND_IMM_ROT1:
5935 case AARCH64_OPND_IMM_ROT2:
5936 case AARCH64_OPND_IMM_ROT3:
5937 case AARCH64_OPND_SVE_IMM_ROT1:
5938 case AARCH64_OPND_SVE_IMM_ROT2:
5939 case AARCH64_OPND_SVE_IMM_ROT3:
5940 po_imm_nc_or_fail ();
5941 info->imm.value = val;
5942 break;
5943
5944 case AARCH64_OPND_SVE_AIMM:
5945 case AARCH64_OPND_SVE_ASIMM:
5946 po_imm_nc_or_fail ();
5947 info->imm.value = val;
5948 skip_whitespace (str);
5949 if (skip_past_comma (&str))
5950 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5951 else
5952 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5953 break;
5954
5955 case AARCH64_OPND_SVE_PATTERN:
5956 po_enum_or_fail (aarch64_sve_pattern_array);
5957 info->imm.value = val;
5958 break;
5959
5960 case AARCH64_OPND_SVE_PATTERN_SCALED:
5961 po_enum_or_fail (aarch64_sve_pattern_array);
5962 info->imm.value = val;
5963 if (skip_past_comma (&str)
5964 && !parse_shift (&str, info, SHIFTED_MUL))
5965 goto failure;
5966 if (!info->shifter.operator_present)
5967 {
5968 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5969 info->shifter.kind = AARCH64_MOD_MUL;
5970 info->shifter.amount = 1;
5971 }
5972 break;
5973
5974 case AARCH64_OPND_SVE_PRFOP:
5975 po_enum_or_fail (aarch64_sve_prfop_array);
5976 info->imm.value = val;
5977 break;
5978
5979 case AARCH64_OPND_UIMM7:
5980 po_imm_or_fail (0, 127);
5981 info->imm.value = val;
5982 break;
5983
5984 case AARCH64_OPND_IDX:
5985 case AARCH64_OPND_MASK:
5986 case AARCH64_OPND_BIT_NUM:
5987 case AARCH64_OPND_IMMR:
5988 case AARCH64_OPND_IMMS:
5989 po_imm_or_fail (0, 63);
5990 info->imm.value = val;
5991 break;
5992
5993 case AARCH64_OPND_IMM0:
5994 po_imm_nc_or_fail ();
5995 if (val != 0)
5996 {
5997 set_fatal_syntax_error (_("immediate zero expected"));
5998 goto failure;
5999 }
6000 info->imm.value = 0;
6001 break;
6002
6003 case AARCH64_OPND_FPIMM0:
6004 {
6005 int qfloat;
6006 bfd_boolean res1 = FALSE, res2 = FALSE;
6007 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6008 it is probably not worth the effort to support it. */
6009 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
6010 imm_reg_type))
6011 && (error_p ()
6012 || !(res2 = parse_constant_immediate (&str, &val,
6013 imm_reg_type))))
6014 goto failure;
6015 if ((res1 && qfloat == 0) || (res2 && val == 0))
6016 {
6017 info->imm.value = 0;
6018 info->imm.is_fp = 1;
6019 break;
6020 }
6021 set_fatal_syntax_error (_("immediate zero expected"));
6022 goto failure;
6023 }
6024
6025 case AARCH64_OPND_IMM_MOV:
6026 {
6027 char *saved = str;
6028 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6029 reg_name_p (str, REG_TYPE_VN))
6030 goto failure;
6031 str = saved;
6032 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6033 GE_OPT_PREFIX, 1));
6034 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6035 later. fix_mov_imm_insn will try to determine a machine
6036 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6037 message if the immediate cannot be moved by a single
6038 instruction. */
6039 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6040 inst.base.operands[i].skip = 1;
6041 }
6042 break;
6043
6044 case AARCH64_OPND_SIMD_IMM:
6045 case AARCH64_OPND_SIMD_IMM_SFT:
6046 if (! parse_big_immediate (&str, &val, imm_reg_type))
6047 goto failure;
6048 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6049 /* addr_off_p */ 0,
6050 /* need_libopcodes_p */ 1,
6051 /* skip_p */ 1);
6052 /* Parse shift.
6053 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6054 shift, we don't check it here; we leave the checking to
6055 the libopcodes (operand_general_constraint_met_p). By
6056 doing this, we achieve better diagnostics. */
6057 if (skip_past_comma (&str)
6058 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6059 goto failure;
6060 if (!info->shifter.operator_present
6061 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6062 {
6063 /* Default to LSL if not present. Libopcodes prefers shifter
6064 kind to be explicit. */
6065 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6066 info->shifter.kind = AARCH64_MOD_LSL;
6067 }
6068 break;
6069
6070 case AARCH64_OPND_FPIMM:
6071 case AARCH64_OPND_SIMD_FPIMM:
6072 case AARCH64_OPND_SVE_FPIMM8:
6073 {
6074 int qfloat;
6075 bfd_boolean dp_p;
6076
6077 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6078 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6079 || !aarch64_imm_float_p (qfloat))
6080 {
6081 if (!error_p ())
6082 set_fatal_syntax_error (_("invalid floating-point"
6083 " constant"));
6084 goto failure;
6085 }
6086 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6087 inst.base.operands[i].imm.is_fp = 1;
6088 }
6089 break;
6090
6091 case AARCH64_OPND_SVE_I1_HALF_ONE:
6092 case AARCH64_OPND_SVE_I1_HALF_TWO:
6093 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6094 {
6095 int qfloat;
6096 bfd_boolean dp_p;
6097
6098 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6099 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6100 {
6101 if (!error_p ())
6102 set_fatal_syntax_error (_("invalid floating-point"
6103 " constant"));
6104 goto failure;
6105 }
6106 inst.base.operands[i].imm.value = qfloat;
6107 inst.base.operands[i].imm.is_fp = 1;
6108 }
6109 break;
6110
6111 case AARCH64_OPND_LIMM:
6112 po_misc_or_fail (parse_shifter_operand (&str, info,
6113 SHIFTED_LOGIC_IMM));
6114 if (info->shifter.operator_present)
6115 {
6116 set_fatal_syntax_error
6117 (_("shift not allowed for bitmask immediate"));
6118 goto failure;
6119 }
6120 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6121 /* addr_off_p */ 0,
6122 /* need_libopcodes_p */ 1,
6123 /* skip_p */ 1);
6124 break;
6125
6126 case AARCH64_OPND_AIMM:
6127 if (opcode->op == OP_ADD)
6128 /* ADD may have relocation types. */
6129 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6130 SHIFTED_ARITH_IMM));
6131 else
6132 po_misc_or_fail (parse_shifter_operand (&str, info,
6133 SHIFTED_ARITH_IMM));
6134 switch (inst.reloc.type)
6135 {
6136 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6137 info->shifter.amount = 12;
6138 break;
6139 case BFD_RELOC_UNUSED:
6140 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6141 if (info->shifter.kind != AARCH64_MOD_NONE)
6142 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6143 inst.reloc.pc_rel = 0;
6144 break;
6145 default:
6146 break;
6147 }
6148 info->imm.value = 0;
6149 if (!info->shifter.operator_present)
6150 {
6151 /* Default to LSL if not present. Libopcodes prefers shifter
6152 kind to be explicit. */
6153 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6154 info->shifter.kind = AARCH64_MOD_LSL;
6155 }
6156 break;
6157
6158 case AARCH64_OPND_HALF:
6159 {
6160 /* #<imm16> or relocation. */
6161 int internal_fixup_p;
6162 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6163 if (internal_fixup_p)
6164 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6165 skip_whitespace (str);
6166 if (skip_past_comma (&str))
6167 {
6168 /* {, LSL #<shift>} */
6169 if (! aarch64_gas_internal_fixup_p ())
6170 {
6171 set_fatal_syntax_error (_("can't mix relocation modifier "
6172 "with explicit shift"));
6173 goto failure;
6174 }
6175 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6176 }
6177 else
6178 inst.base.operands[i].shifter.amount = 0;
6179 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6180 inst.base.operands[i].imm.value = 0;
6181 if (! process_movw_reloc_info ())
6182 goto failure;
6183 }
6184 break;
6185
6186 case AARCH64_OPND_EXCEPTION:
6187 case AARCH64_OPND_UNDEFINED:
6188 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6189 imm_reg_type));
6190 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6191 /* addr_off_p */ 0,
6192 /* need_libopcodes_p */ 0,
6193 /* skip_p */ 1);
6194 break;
6195
6196 case AARCH64_OPND_NZCV:
6197 {
6198 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6199 if (nzcv != NULL)
6200 {
6201 str += 4;
6202 info->imm.value = nzcv->value;
6203 break;
6204 }
6205 po_imm_or_fail (0, 15);
6206 info->imm.value = val;
6207 }
6208 break;
6209
6210 case AARCH64_OPND_COND:
6211 case AARCH64_OPND_COND1:
6212 {
6213 char *start = str;
6214 do
6215 str++;
6216 while (ISALPHA (*str));
6217 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6218 if (info->cond == NULL)
6219 {
6220 set_syntax_error (_("invalid condition"));
6221 goto failure;
6222 }
6223 else if (operands[i] == AARCH64_OPND_COND1
6224 && (info->cond->value & 0xe) == 0xe)
6225 {
6226 /* Do not allow AL or NV. */
6227 set_default_error ();
6228 goto failure;
6229 }
6230 }
6231 break;
6232
6233 case AARCH64_OPND_ADDR_ADRP:
6234 po_misc_or_fail (parse_adrp (&str));
6235 /* Clear the value as operand needs to be relocated. */
6236 info->imm.value = 0;
6237 break;
6238
6239 case AARCH64_OPND_ADDR_PCREL14:
6240 case AARCH64_OPND_ADDR_PCREL19:
6241 case AARCH64_OPND_ADDR_PCREL21:
6242 case AARCH64_OPND_ADDR_PCREL26:
6243 po_misc_or_fail (parse_address (&str, info));
6244 if (!info->addr.pcrel)
6245 {
6246 set_syntax_error (_("invalid pc-relative address"));
6247 goto failure;
6248 }
6249 if (inst.gen_lit_pool
6250 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6251 {
6252 /* Only permit "=value" in the literal load instructions.
6253 The literal will be generated by programmer_friendly_fixup. */
6254 set_syntax_error (_("invalid use of \"=immediate\""));
6255 goto failure;
6256 }
6257 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6258 {
6259 set_syntax_error (_("unrecognized relocation suffix"));
6260 goto failure;
6261 }
6262 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6263 {
6264 info->imm.value = inst.reloc.exp.X_add_number;
6265 inst.reloc.type = BFD_RELOC_UNUSED;
6266 }
6267 else
6268 {
6269 info->imm.value = 0;
6270 if (inst.reloc.type == BFD_RELOC_UNUSED)
6271 switch (opcode->iclass)
6272 {
6273 case compbranch:
6274 case condbranch:
6275 /* e.g. CBZ or B.COND */
6276 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6277 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6278 break;
6279 case testbranch:
6280 /* e.g. TBZ */
6281 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6282 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6283 break;
6284 case branch_imm:
6285 /* e.g. B or BL */
6286 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6287 inst.reloc.type =
6288 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6289 : BFD_RELOC_AARCH64_JUMP26;
6290 break;
6291 case loadlit:
6292 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6293 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6294 break;
6295 case pcreladdr:
6296 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6297 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6298 break;
6299 default:
6300 gas_assert (0);
6301 abort ();
6302 }
6303 inst.reloc.pc_rel = 1;
6304 }
6305 break;
6306
6307 case AARCH64_OPND_ADDR_SIMPLE:
6308 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6309 {
6310 /* [<Xn|SP>{, #<simm>}] */
6311 char *start = str;
6312 /* First use the normal address-parsing routines, to get
6313 the usual syntax errors. */
6314 po_misc_or_fail (parse_address (&str, info));
6315 if (info->addr.pcrel || info->addr.offset.is_reg
6316 || !info->addr.preind || info->addr.postind
6317 || info->addr.writeback)
6318 {
6319 set_syntax_error (_("invalid addressing mode"));
6320 goto failure;
6321 }
6322
6323 /* Then retry, matching the specific syntax of these addresses. */
6324 str = start;
6325 po_char_or_fail ('[');
6326 po_reg_or_fail (REG_TYPE_R64_SP);
6327 /* Accept optional ", #0". */
6328 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6329 && skip_past_char (&str, ','))
6330 {
6331 skip_past_char (&str, '#');
6332 if (! skip_past_char (&str, '0'))
6333 {
6334 set_fatal_syntax_error
6335 (_("the optional immediate offset can only be 0"));
6336 goto failure;
6337 }
6338 }
6339 po_char_or_fail (']');
6340 break;
6341 }
6342
6343 case AARCH64_OPND_ADDR_REGOFF:
6344 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6345 po_misc_or_fail (parse_address (&str, info));
6346 regoff_addr:
6347 if (info->addr.pcrel || !info->addr.offset.is_reg
6348 || !info->addr.preind || info->addr.postind
6349 || info->addr.writeback)
6350 {
6351 set_syntax_error (_("invalid addressing mode"));
6352 goto failure;
6353 }
6354 if (!info->shifter.operator_present)
6355 {
6356 /* Default to LSL if not present. Libopcodes prefers shifter
6357 kind to be explicit. */
6358 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6359 info->shifter.kind = AARCH64_MOD_LSL;
6360 }
6361 /* Qualifier to be deduced by libopcodes. */
6362 break;
6363
6364 case AARCH64_OPND_ADDR_SIMM7:
6365 po_misc_or_fail (parse_address (&str, info));
6366 if (info->addr.pcrel || info->addr.offset.is_reg
6367 || (!info->addr.preind && !info->addr.postind))
6368 {
6369 set_syntax_error (_("invalid addressing mode"));
6370 goto failure;
6371 }
6372 if (inst.reloc.type != BFD_RELOC_UNUSED)
6373 {
6374 set_syntax_error (_("relocation not allowed"));
6375 goto failure;
6376 }
6377 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6378 /* addr_off_p */ 1,
6379 /* need_libopcodes_p */ 1,
6380 /* skip_p */ 0);
6381 break;
6382
6383 case AARCH64_OPND_ADDR_SIMM9:
6384 case AARCH64_OPND_ADDR_SIMM9_2:
6385 case AARCH64_OPND_ADDR_SIMM11:
6386 case AARCH64_OPND_ADDR_SIMM13:
6387 po_misc_or_fail (parse_address (&str, info));
6388 if (info->addr.pcrel || info->addr.offset.is_reg
6389 || (!info->addr.preind && !info->addr.postind)
6390 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6391 && info->addr.writeback))
6392 {
6393 set_syntax_error (_("invalid addressing mode"));
6394 goto failure;
6395 }
6396 if (inst.reloc.type != BFD_RELOC_UNUSED)
6397 {
6398 set_syntax_error (_("relocation not allowed"));
6399 goto failure;
6400 }
6401 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6402 /* addr_off_p */ 1,
6403 /* need_libopcodes_p */ 1,
6404 /* skip_p */ 0);
6405 break;
6406
6407 case AARCH64_OPND_ADDR_SIMM10:
6408 case AARCH64_OPND_ADDR_OFFSET:
6409 po_misc_or_fail (parse_address (&str, info));
6410 if (info->addr.pcrel || info->addr.offset.is_reg
6411 || !info->addr.preind || info->addr.postind)
6412 {
6413 set_syntax_error (_("invalid addressing mode"));
6414 goto failure;
6415 }
6416 if (inst.reloc.type != BFD_RELOC_UNUSED)
6417 {
6418 set_syntax_error (_("relocation not allowed"));
6419 goto failure;
6420 }
6421 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6422 /* addr_off_p */ 1,
6423 /* need_libopcodes_p */ 1,
6424 /* skip_p */ 0);
6425 break;
6426
6427 case AARCH64_OPND_ADDR_UIMM12:
6428 po_misc_or_fail (parse_address (&str, info));
6429 if (info->addr.pcrel || info->addr.offset.is_reg
6430 || !info->addr.preind || info->addr.writeback)
6431 {
6432 set_syntax_error (_("invalid addressing mode"));
6433 goto failure;
6434 }
6435 if (inst.reloc.type == BFD_RELOC_UNUSED)
6436 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6437 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6438 || (inst.reloc.type
6439 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6440 || (inst.reloc.type
6441 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6442 || (inst.reloc.type
6443 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6444 || (inst.reloc.type
6445 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6446 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6447 /* Leave qualifier to be determined by libopcodes. */
6448 break;
6449
6450 case AARCH64_OPND_SIMD_ADDR_POST:
6451 /* [<Xn|SP>], <Xm|#<amount>> */
6452 po_misc_or_fail (parse_address (&str, info));
6453 if (!info->addr.postind || !info->addr.writeback)
6454 {
6455 set_syntax_error (_("invalid addressing mode"));
6456 goto failure;
6457 }
6458 if (!info->addr.offset.is_reg)
6459 {
6460 if (inst.reloc.exp.X_op == O_constant)
6461 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6462 else
6463 {
6464 set_fatal_syntax_error
6465 (_("writeback value must be an immediate constant"));
6466 goto failure;
6467 }
6468 }
6469 /* No qualifier. */
6470 break;
6471
6472 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6473 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
6474 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6475 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6476 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6477 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6478 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6479 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6480 case AARCH64_OPND_SVE_ADDR_RI_U6:
6481 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6482 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6483 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6484 /* [X<n>{, #imm, MUL VL}]
6485 [X<n>{, #imm}]
6486 but recognizing SVE registers. */
6487 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6488 &offset_qualifier));
6489 if (base_qualifier != AARCH64_OPND_QLF_X)
6490 {
6491 set_syntax_error (_("invalid addressing mode"));
6492 goto failure;
6493 }
6494 sve_regimm:
6495 if (info->addr.pcrel || info->addr.offset.is_reg
6496 || !info->addr.preind || info->addr.writeback)
6497 {
6498 set_syntax_error (_("invalid addressing mode"));
6499 goto failure;
6500 }
6501 if (inst.reloc.type != BFD_RELOC_UNUSED
6502 || inst.reloc.exp.X_op != O_constant)
6503 {
6504 /* Make sure this has priority over
6505 "invalid addressing mode". */
6506 set_fatal_syntax_error (_("constant offset required"));
6507 goto failure;
6508 }
6509 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6510 break;
6511
6512 case AARCH64_OPND_SVE_ADDR_R:
6513 /* [<Xn|SP>{, <R><m>}]
6514 but recognizing SVE registers. */
6515 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6516 &offset_qualifier));
6517 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6518 {
6519 offset_qualifier = AARCH64_OPND_QLF_X;
6520 info->addr.offset.is_reg = 1;
6521 info->addr.offset.regno = 31;
6522 }
6523 else if (base_qualifier != AARCH64_OPND_QLF_X
6524 || offset_qualifier != AARCH64_OPND_QLF_X)
6525 {
6526 set_syntax_error (_("invalid addressing mode"));
6527 goto failure;
6528 }
6529 goto regoff_addr;
6530
6531 case AARCH64_OPND_SVE_ADDR_RR:
6532 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6533 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6534 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6535 case AARCH64_OPND_SVE_ADDR_RX:
6536 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6537 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6538 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6539 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6540 but recognizing SVE registers. */
6541 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6542 &offset_qualifier));
6543 if (base_qualifier != AARCH64_OPND_QLF_X
6544 || offset_qualifier != AARCH64_OPND_QLF_X)
6545 {
6546 set_syntax_error (_("invalid addressing mode"));
6547 goto failure;
6548 }
6549 goto regoff_addr;
6550
6551 case AARCH64_OPND_SVE_ADDR_RZ:
6552 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6553 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6554 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6555 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6556 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6557 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6558 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6559 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6560 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6561 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6562 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6563 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6564 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6565 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6566 &offset_qualifier));
6567 if (base_qualifier != AARCH64_OPND_QLF_X
6568 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6569 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6570 {
6571 set_syntax_error (_("invalid addressing mode"));
6572 goto failure;
6573 }
6574 info->qualifier = offset_qualifier;
6575 goto regoff_addr;
6576
6577 case AARCH64_OPND_SVE_ADDR_ZX:
6578 /* [Zn.<T>{, <Xm>}]. */
6579 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6580 &offset_qualifier));
6581 /* Things to check:
6582 base_qualifier either S_S or S_D
6583 offset_qualifier must be X
6584 */
6585 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6586 && base_qualifier != AARCH64_OPND_QLF_S_D)
6587 || offset_qualifier != AARCH64_OPND_QLF_X)
6588 {
6589 set_syntax_error (_("invalid addressing mode"));
6590 goto failure;
6591 }
6592 info->qualifier = base_qualifier;
6593 if (!info->addr.offset.is_reg || info->addr.pcrel
6594 || !info->addr.preind || info->addr.writeback
6595 || info->shifter.operator_present != 0)
6596 {
6597 set_syntax_error (_("invalid addressing mode"));
6598 goto failure;
6599 }
6600 info->shifter.kind = AARCH64_MOD_LSL;
6601 break;
6602
6603
6604 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6605 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6606 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6607 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6608 /* [Z<n>.<T>{, #imm}] */
6609 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6610 &offset_qualifier));
6611 if (base_qualifier != AARCH64_OPND_QLF_S_S
6612 && base_qualifier != AARCH64_OPND_QLF_S_D)
6613 {
6614 set_syntax_error (_("invalid addressing mode"));
6615 goto failure;
6616 }
6617 info->qualifier = base_qualifier;
6618 goto sve_regimm;
6619
6620 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6621 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6622 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6623 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6624 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6625
6626 We don't reject:
6627
6628 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6629
6630 here since we get better error messages by leaving it to
6631 the qualifier checking routines. */
6632 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6633 &offset_qualifier));
6634 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6635 && base_qualifier != AARCH64_OPND_QLF_S_D)
6636 || offset_qualifier != base_qualifier)
6637 {
6638 set_syntax_error (_("invalid addressing mode"));
6639 goto failure;
6640 }
6641 info->qualifier = base_qualifier;
6642 goto regoff_addr;
6643
6644 case AARCH64_OPND_SYSREG:
6645 {
6646 uint32_t sysreg_flags;
6647 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6648 &sysreg_flags)) == PARSE_FAIL)
6649 {
6650 set_syntax_error (_("unknown or missing system register name"));
6651 goto failure;
6652 }
6653 inst.base.operands[i].sysreg.value = val;
6654 inst.base.operands[i].sysreg.flags = sysreg_flags;
6655 break;
6656 }
6657
6658 case AARCH64_OPND_PSTATEFIELD:
6659 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6660 == PARSE_FAIL)
6661 {
6662 set_syntax_error (_("unknown or missing PSTATE field name"));
6663 goto failure;
6664 }
6665 inst.base.operands[i].pstatefield = val;
6666 break;
6667
6668 case AARCH64_OPND_SYSREG_IC:
6669 inst.base.operands[i].sysins_op =
6670 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6671 goto sys_reg_ins;
6672
6673 case AARCH64_OPND_SYSREG_DC:
6674 inst.base.operands[i].sysins_op =
6675 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6676 goto sys_reg_ins;
6677
6678 case AARCH64_OPND_SYSREG_AT:
6679 inst.base.operands[i].sysins_op =
6680 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6681 goto sys_reg_ins;
6682
6683 case AARCH64_OPND_SYSREG_SR:
6684 inst.base.operands[i].sysins_op =
6685 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6686 goto sys_reg_ins;
6687
6688 case AARCH64_OPND_SYSREG_TLBI:
6689 inst.base.operands[i].sysins_op =
6690 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6691 sys_reg_ins:
6692 if (inst.base.operands[i].sysins_op == NULL)
6693 {
6694 set_fatal_syntax_error ( _("unknown or missing operation name"));
6695 goto failure;
6696 }
6697 break;
6698
6699 case AARCH64_OPND_BARRIER:
6700 case AARCH64_OPND_BARRIER_ISB:
6701 val = parse_barrier (&str);
6702 if (val != PARSE_FAIL
6703 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6704 {
6705 /* ISB only accepts options name 'sy'. */
6706 set_syntax_error
6707 (_("the specified option is not accepted in ISB"));
6708 /* Turn off backtrack as this optional operand is present. */
6709 backtrack_pos = 0;
6710 goto failure;
6711 }
6712 if (val != PARSE_FAIL
6713 && operands[i] == AARCH64_OPND_BARRIER)
6714 {
6715 /* Regular barriers accept options CRm (C0-C15).
6716 DSB nXS barrier variant accepts values > 15. */
6717 if (val < 0 || val > 15)
6718 {
6719 set_syntax_error (_("the specified option is not accepted in DSB"));
6720 goto failure;
6721 }
6722 }
6723 /* This is an extension to accept a 0..15 immediate. */
6724 if (val == PARSE_FAIL)
6725 po_imm_or_fail (0, 15);
6726 info->barrier = aarch64_barrier_options + val;
6727 break;
6728
6729 case AARCH64_OPND_BARRIER_DSB_NXS:
6730 val = parse_barrier (&str);
6731 if (val != PARSE_FAIL)
6732 {
6733 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
6734 if (!(val == 16 || val == 20 || val == 24 || val == 28))
6735 {
6736 set_syntax_error (_("the specified option is not accepted in DSB"));
6737 /* Turn off backtrack as this optional operand is present. */
6738 backtrack_pos = 0;
6739 goto failure;
6740 }
6741 }
6742 else
6743 {
6744 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
6745 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
6746 if (! parse_constant_immediate (&str, &val, imm_reg_type))
6747 goto failure;
6748 if (!(val == 16 || val == 20 || val == 24 || val == 28))
6749 {
6750 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
6751 goto failure;
6752 }
6753 }
6754 /* Option index is encoded as 2-bit value in val<3:2>. */
6755 val = (val >> 2) - 4;
6756 info->barrier = aarch64_barrier_dsb_nxs_options + val;
6757 break;
6758
6759 case AARCH64_OPND_PRFOP:
6760 val = parse_pldop (&str);
6761 /* This is an extension to accept a 0..31 immediate. */
6762 if (val == PARSE_FAIL)
6763 po_imm_or_fail (0, 31);
6764 inst.base.operands[i].prfop = aarch64_prfops + val;
6765 break;
6766
6767 case AARCH64_OPND_BARRIER_PSB:
6768 val = parse_barrier_psb (&str, &(info->hint_option));
6769 if (val == PARSE_FAIL)
6770 goto failure;
6771 break;
6772
6773 case AARCH64_OPND_BTI_TARGET:
6774 val = parse_bti_operand (&str, &(info->hint_option));
6775 if (val == PARSE_FAIL)
6776 goto failure;
6777 break;
6778
6779 case AARCH64_OPND_CSRE_CSR:
6780 val = parse_csr_operand (&str);
6781 if (val == PARSE_FAIL)
6782 goto failure;
6783 break;
6784
6785 default:
6786 as_fatal (_("unhandled operand code %d"), operands[i]);
6787 }
6788
6789 /* If we get here, this operand was successfully parsed. */
6790 inst.base.operands[i].present = 1;
6791 continue;
6792
6793 failure:
6794 /* The parse routine should already have set the error, but in case
6795 not, set a default one here. */
6796 if (! error_p ())
6797 set_default_error ();
6798
6799 if (! backtrack_pos)
6800 goto parse_operands_return;
6801
6802 {
6803 /* We reach here because this operand is marked as optional, and
6804 either no operand was supplied or the operand was supplied but it
6805 was syntactically incorrect. In the latter case we report an
6806 error. In the former case we perform a few more checks before
6807 dropping through to the code to insert the default operand. */
6808
6809 char *tmp = backtrack_pos;
6810 char endchar = END_OF_INSN;
6811
6812 if (i != (aarch64_num_of_operands (opcode) - 1))
6813 endchar = ',';
6814 skip_past_char (&tmp, ',');
6815
6816 if (*tmp != endchar)
6817 /* The user has supplied an operand in the wrong format. */
6818 goto parse_operands_return;
6819
6820 /* Make sure there is not a comma before the optional operand.
6821 For example the fifth operand of 'sys' is optional:
6822
6823 sys #0,c0,c0,#0, <--- wrong
6824 sys #0,c0,c0,#0 <--- correct. */
6825 if (comma_skipped_p && i && endchar == END_OF_INSN)
6826 {
6827 set_fatal_syntax_error
6828 (_("unexpected comma before the omitted optional operand"));
6829 goto parse_operands_return;
6830 }
6831 }
6832
6833 /* Reaching here means we are dealing with an optional operand that is
6834 omitted from the assembly line. */
6835 gas_assert (optional_operand_p (opcode, i));
6836 info->present = 0;
6837 process_omitted_operand (operands[i], opcode, i, info);
6838
6839 /* Try again, skipping the optional operand at backtrack_pos. */
6840 str = backtrack_pos;
6841 backtrack_pos = 0;
6842
6843 /* Clear any error record after the omitted optional operand has been
6844 successfully handled. */
6845 clear_error ();
6846 }
6847
6848 /* Check if we have parsed all the operands. */
6849 if (*str != '\0' && ! error_p ())
6850 {
6851 /* Set I to the index of the last present operand; this is
6852 for the purpose of diagnostics. */
6853 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6854 ;
6855 set_fatal_syntax_error
6856 (_("unexpected characters following instruction"));
6857 }
6858
6859 parse_operands_return:
6860
6861 if (error_p ())
6862 {
6863 DEBUG_TRACE ("parsing FAIL: %s - %s",
6864 operand_mismatch_kind_names[get_error_kind ()],
6865 get_error_message ());
6866 /* Record the operand error properly; this is useful when there
6867 are multiple instruction templates for a mnemonic name, so that
6868 later on, we can select the error that most closely describes
6869 the problem. */
6870 record_operand_error (opcode, i, get_error_kind (),
6871 get_error_message ());
6872 return FALSE;
6873 }
6874 else
6875 {
6876 DEBUG_TRACE ("parsing SUCCESS");
6877 return TRUE;
6878 }
6879 }
6880
6881 /* It does some fix-up to provide some programmer friendly feature while
6882 keeping the libopcodes happy, i.e. libopcodes only accepts
6883 the preferred architectural syntax.
6884 Return FALSE if there is any failure; otherwise return TRUE. */
6885
6886 static bfd_boolean
6887 programmer_friendly_fixup (aarch64_instruction *instr)
6888 {
6889 aarch64_inst *base = &instr->base;
6890 const aarch64_opcode *opcode = base->opcode;
6891 enum aarch64_op op = opcode->op;
6892 aarch64_opnd_info *operands = base->operands;
6893
6894 DEBUG_TRACE ("enter");
6895
6896 switch (opcode->iclass)
6897 {
6898 case testbranch:
6899 /* TBNZ Xn|Wn, #uimm6, label
6900 Test and Branch Not Zero: conditionally jumps to label if bit number
6901 uimm6 in register Xn is not zero. The bit number implies the width of
6902 the register, which may be written and should be disassembled as Wn if
6903 uimm is less than 32. */
6904 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6905 {
6906 if (operands[1].imm.value >= 32)
6907 {
6908 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6909 0, 31);
6910 return FALSE;
6911 }
6912 operands[0].qualifier = AARCH64_OPND_QLF_X;
6913 }
6914 break;
6915 case loadlit:
6916 /* LDR Wt, label | =value
6917 As a convenience assemblers will typically permit the notation
6918 "=value" in conjunction with the pc-relative literal load instructions
6919 to automatically place an immediate value or symbolic address in a
6920 nearby literal pool and generate a hidden label which references it.
6921 ISREG has been set to 0 in the case of =value. */
6922 if (instr->gen_lit_pool
6923 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6924 {
6925 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6926 if (op == OP_LDRSW_LIT)
6927 size = 4;
6928 if (instr->reloc.exp.X_op != O_constant
6929 && instr->reloc.exp.X_op != O_big
6930 && instr->reloc.exp.X_op != O_symbol)
6931 {
6932 record_operand_error (opcode, 1,
6933 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6934 _("constant expression expected"));
6935 return FALSE;
6936 }
6937 if (! add_to_lit_pool (&instr->reloc.exp, size))
6938 {
6939 record_operand_error (opcode, 1,
6940 AARCH64_OPDE_OTHER_ERROR,
6941 _("literal pool insertion failed"));
6942 return FALSE;
6943 }
6944 }
6945 break;
6946 case log_shift:
6947 case bitfield:
6948 /* UXT[BHW] Wd, Wn
6949 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6950 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6951 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6952 A programmer-friendly assembler should accept a destination Xd in
6953 place of Wd, however that is not the preferred form for disassembly.
6954 */
6955 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6956 && operands[1].qualifier == AARCH64_OPND_QLF_W
6957 && operands[0].qualifier == AARCH64_OPND_QLF_X)
6958 operands[0].qualifier = AARCH64_OPND_QLF_W;
6959 break;
6960
6961 case addsub_ext:
6962 {
6963 /* In the 64-bit form, the final register operand is written as Wm
6964 for all but the (possibly omitted) UXTX/LSL and SXTX
6965 operators.
6966 As a programmer-friendly assembler, we accept e.g.
6967 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6968 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
6969 int idx = aarch64_operand_index (opcode->operands,
6970 AARCH64_OPND_Rm_EXT);
6971 gas_assert (idx == 1 || idx == 2);
6972 if (operands[0].qualifier == AARCH64_OPND_QLF_X
6973 && operands[idx].qualifier == AARCH64_OPND_QLF_X
6974 && operands[idx].shifter.kind != AARCH64_MOD_LSL
6975 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6976 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6977 operands[idx].qualifier = AARCH64_OPND_QLF_W;
6978 }
6979 break;
6980
6981 default:
6982 break;
6983 }
6984
6985 DEBUG_TRACE ("exit with SUCCESS");
6986 return TRUE;
6987 }
6988
6989 /* Check for loads and stores that will cause unpredictable behavior. */
6990
6991 static void
6992 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6993 {
6994 aarch64_inst *base = &instr->base;
6995 const aarch64_opcode *opcode = base->opcode;
6996 const aarch64_opnd_info *opnds = base->operands;
6997 switch (opcode->iclass)
6998 {
6999 case ldst_pos:
7000 case ldst_imm9:
7001 case ldst_imm10:
7002 case ldst_unscaled:
7003 case ldst_unpriv:
7004 /* Loading/storing the base register is unpredictable if writeback. */
7005 if ((aarch64_get_operand_class (opnds[0].type)
7006 == AARCH64_OPND_CLASS_INT_REG)
7007 && opnds[0].reg.regno == opnds[1].addr.base_regno
7008 && opnds[1].addr.base_regno != REG_SP
7009 /* Exempt STG/STZG/ST2G/STZ2G. */
7010 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7011 && opnds[1].addr.writeback)
7012 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7013 break;
7014
7015 case ldstpair_off:
7016 case ldstnapair_offs:
7017 case ldstpair_indexed:
7018 /* Loading/storing the base register is unpredictable if writeback. */
7019 if ((aarch64_get_operand_class (opnds[0].type)
7020 == AARCH64_OPND_CLASS_INT_REG)
7021 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7022 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7023 && opnds[2].addr.base_regno != REG_SP
7024 /* Exempt STGP. */
7025 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7026 && opnds[2].addr.writeback)
7027 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7028 /* Load operations must load different registers. */
7029 if ((opcode->opcode & (1 << 22))
7030 && opnds[0].reg.regno == opnds[1].reg.regno)
7031 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7032 break;
7033
7034 case ldstexcl:
7035 /* It is unpredictable if the destination and status registers are the
7036 same. */
7037 if ((aarch64_get_operand_class (opnds[0].type)
7038 == AARCH64_OPND_CLASS_INT_REG)
7039 && (aarch64_get_operand_class (opnds[1].type)
7040 == AARCH64_OPND_CLASS_INT_REG)
7041 && (opnds[0].reg.regno == opnds[1].reg.regno
7042 || opnds[0].reg.regno == opnds[2].reg.regno))
7043 as_warn (_("unpredictable: identical transfer and status registers"
7044 " --`%s'"),
7045 str);
7046
7047 break;
7048
7049 default:
7050 break;
7051 }
7052 }
7053
7054 static void
7055 force_automatic_sequence_close (void)
7056 {
7057 if (now_instr_sequence.instr)
7058 {
7059 as_warn (_("previous `%s' sequence has not been closed"),
7060 now_instr_sequence.instr->opcode->name);
7061 init_insn_sequence (NULL, &now_instr_sequence);
7062 }
7063 }
7064
7065 /* A wrapper function to interface with libopcodes on encoding and
7066 record the error message if there is any.
7067
7068 Return TRUE on success; otherwise return FALSE. */
7069
7070 static bfd_boolean
7071 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7072 aarch64_insn *code)
7073 {
7074 aarch64_operand_error error_info;
7075 memset (&error_info, '\0', sizeof (error_info));
7076 error_info.kind = AARCH64_OPDE_NIL;
7077 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7078 && !error_info.non_fatal)
7079 return TRUE;
7080
7081 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7082 record_operand_error_info (opcode, &error_info);
7083 return error_info.non_fatal;
7084 }
7085
7086 #ifdef DEBUG_AARCH64
7087 static inline void
7088 dump_opcode_operands (const aarch64_opcode *opcode)
7089 {
7090 int i = 0;
7091 while (opcode->operands[i] != AARCH64_OPND_NIL)
7092 {
7093 aarch64_verbose ("\t\t opnd%d: %s", i,
7094 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7095 ? aarch64_get_operand_name (opcode->operands[i])
7096 : aarch64_get_operand_desc (opcode->operands[i]));
7097 ++i;
7098 }
7099 }
7100 #endif /* DEBUG_AARCH64 */
7101
7102 /* This is the guts of the machine-dependent assembler. STR points to a
7103 machine dependent instruction. This function is supposed to emit
7104 the frags/bytes it assembles to. */
7105
7106 void
7107 md_assemble (char *str)
7108 {
7109 char *p = str;
7110 templates *template;
7111 aarch64_opcode *opcode;
7112 aarch64_inst *inst_base;
7113 unsigned saved_cond;
7114
7115 /* Align the previous label if needed. */
7116 if (last_label_seen != NULL)
7117 {
7118 symbol_set_frag (last_label_seen, frag_now);
7119 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7120 S_SET_SEGMENT (last_label_seen, now_seg);
7121 }
7122
7123 /* Update the current insn_sequence from the segment. */
7124 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
7125
7126 inst.reloc.type = BFD_RELOC_UNUSED;
7127
7128 DEBUG_TRACE ("\n\n");
7129 DEBUG_TRACE ("==============================");
7130 DEBUG_TRACE ("Enter md_assemble with %s", str);
7131
7132 template = opcode_lookup (&p);
7133 if (!template)
7134 {
7135 /* It wasn't an instruction, but it might be a register alias of
7136 the form alias .req reg directive. */
7137 if (!create_register_alias (str, p))
7138 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7139 str);
7140 return;
7141 }
7142
7143 skip_whitespace (p);
7144 if (*p == ',')
7145 {
7146 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7147 get_mnemonic_name (str), str);
7148 return;
7149 }
7150
7151 init_operand_error_report ();
7152
7153 /* Sections are assumed to start aligned. In executable section, there is no
7154 MAP_DATA symbol pending. So we only align the address during
7155 MAP_DATA --> MAP_INSN transition.
7156 For other sections, this is not guaranteed. */
7157 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7158 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7159 frag_align_code (2, 0);
7160
7161 saved_cond = inst.cond;
7162 reset_aarch64_instruction (&inst);
7163 inst.cond = saved_cond;
7164
7165 /* Iterate through all opcode entries with the same mnemonic name. */
7166 do
7167 {
7168 opcode = template->opcode;
7169
7170 DEBUG_TRACE ("opcode %s found", opcode->name);
7171 #ifdef DEBUG_AARCH64
7172 if (debug_dump)
7173 dump_opcode_operands (opcode);
7174 #endif /* DEBUG_AARCH64 */
7175
7176 mapping_state (MAP_INSN);
7177
7178 inst_base = &inst.base;
7179 inst_base->opcode = opcode;
7180
7181 /* Truly conditionally executed instructions, e.g. b.cond. */
7182 if (opcode->flags & F_COND)
7183 {
7184 gas_assert (inst.cond != COND_ALWAYS);
7185 inst_base->cond = get_cond_from_value (inst.cond);
7186 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7187 }
7188 else if (inst.cond != COND_ALWAYS)
7189 {
7190 /* It shouldn't arrive here, where the assembly looks like a
7191 conditional instruction but the found opcode is unconditional. */
7192 gas_assert (0);
7193 continue;
7194 }
7195
7196 if (parse_operands (p, opcode)
7197 && programmer_friendly_fixup (&inst)
7198 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7199 {
7200 /* Check that this instruction is supported for this CPU. */
7201 if (!opcode->avariant
7202 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7203 {
7204 as_bad (_("selected processor does not support `%s'"), str);
7205 return;
7206 }
7207
7208 warn_unpredictable_ldst (&inst, str);
7209
7210 if (inst.reloc.type == BFD_RELOC_UNUSED
7211 || !inst.reloc.need_libopcodes_p)
7212 output_inst (NULL);
7213 else
7214 {
7215 /* If there is relocation generated for the instruction,
7216 store the instruction information for the future fix-up. */
7217 struct aarch64_inst *copy;
7218 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7219 copy = XNEW (struct aarch64_inst);
7220 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7221 output_inst (copy);
7222 }
7223
7224 /* Issue non-fatal messages if any. */
7225 output_operand_error_report (str, TRUE);
7226 return;
7227 }
7228
7229 template = template->next;
7230 if (template != NULL)
7231 {
7232 reset_aarch64_instruction (&inst);
7233 inst.cond = saved_cond;
7234 }
7235 }
7236 while (template != NULL);
7237
7238 /* Issue the error messages if any. */
7239 output_operand_error_report (str, FALSE);
7240 }
7241
7242 /* Various frobbings of labels and their addresses. */
7243
7244 void
7245 aarch64_start_line_hook (void)
7246 {
7247 last_label_seen = NULL;
7248 }
7249
7250 void
7251 aarch64_frob_label (symbolS * sym)
7252 {
7253 last_label_seen = sym;
7254
7255 dwarf2_emit_label (sym);
7256 }
7257
7258 void
7259 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7260 {
7261 /* Check to see if we have a block to close. */
7262 force_automatic_sequence_close ();
7263 }
7264
7265 int
7266 aarch64_data_in_code (void)
7267 {
7268 if (!strncmp (input_line_pointer + 1, "data:", 5))
7269 {
7270 *input_line_pointer = '/';
7271 input_line_pointer += 5;
7272 *input_line_pointer = 0;
7273 return 1;
7274 }
7275
7276 return 0;
7277 }
7278
7279 char *
7280 aarch64_canonicalize_symbol_name (char *name)
7281 {
7282 int len;
7283
7284 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7285 *(name + len - 5) = 0;
7286
7287 return name;
7288 }
7289 \f
7290 /* Table of all register names defined by default. The user can
7291 define additional names with .req. Note that all register names
7292 should appear in both upper and lowercase variants. Some registers
7293 also have mixed-case names. */
7294
7295 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
7296 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
7297 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7298 #define REGSET16(p,t) \
7299 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7300 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7301 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7302 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7303 #define REGSET31(p,t) \
7304 REGSET16(p, t), \
7305 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7306 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7307 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7308 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7309 #define REGSET(p,t) \
7310 REGSET31(p,t), REGNUM(p,31,t)
7311
7312 /* These go into aarch64_reg_hsh hash-table. */
7313 static const reg_entry reg_names[] = {
7314 /* Integer registers. */
7315 REGSET31 (x, R_64), REGSET31 (X, R_64),
7316 REGSET31 (w, R_32), REGSET31 (W, R_32),
7317
7318 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7319 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7320 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7321 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7322 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7323 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7324
7325 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7326 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7327
7328 /* Floating-point single precision registers. */
7329 REGSET (s, FP_S), REGSET (S, FP_S),
7330
7331 /* Floating-point double precision registers. */
7332 REGSET (d, FP_D), REGSET (D, FP_D),
7333
7334 /* Floating-point half precision registers. */
7335 REGSET (h, FP_H), REGSET (H, FP_H),
7336
7337 /* Floating-point byte precision registers. */
7338 REGSET (b, FP_B), REGSET (B, FP_B),
7339
7340 /* Floating-point quad precision registers. */
7341 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7342
7343 /* FP/SIMD registers. */
7344 REGSET (v, VN), REGSET (V, VN),
7345
7346 /* SVE vector registers. */
7347 REGSET (z, ZN), REGSET (Z, ZN),
7348
7349 /* SVE predicate registers. */
7350 REGSET16 (p, PN), REGSET16 (P, PN)
7351 };
7352
7353 #undef REGDEF
7354 #undef REGDEF_ALIAS
7355 #undef REGNUM
7356 #undef REGSET16
7357 #undef REGSET31
7358 #undef REGSET
7359
7360 #define N 1
7361 #define n 0
7362 #define Z 1
7363 #define z 0
7364 #define C 1
7365 #define c 0
7366 #define V 1
7367 #define v 0
7368 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7369 static const asm_nzcv nzcv_names[] = {
7370 {"nzcv", B (n, z, c, v)},
7371 {"nzcV", B (n, z, c, V)},
7372 {"nzCv", B (n, z, C, v)},
7373 {"nzCV", B (n, z, C, V)},
7374 {"nZcv", B (n, Z, c, v)},
7375 {"nZcV", B (n, Z, c, V)},
7376 {"nZCv", B (n, Z, C, v)},
7377 {"nZCV", B (n, Z, C, V)},
7378 {"Nzcv", B (N, z, c, v)},
7379 {"NzcV", B (N, z, c, V)},
7380 {"NzCv", B (N, z, C, v)},
7381 {"NzCV", B (N, z, C, V)},
7382 {"NZcv", B (N, Z, c, v)},
7383 {"NZcV", B (N, Z, c, V)},
7384 {"NZCv", B (N, Z, C, v)},
7385 {"NZCV", B (N, Z, C, V)}
7386 };
7387
7388 #undef N
7389 #undef n
7390 #undef Z
7391 #undef z
7392 #undef C
7393 #undef c
7394 #undef V
7395 #undef v
7396 #undef B
7397 \f
7398 /* MD interface: bits in the object file. */
7399
7400 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7401 for use in the a.out file, and stores them in the array pointed to by buf.
7402 This knows about the endian-ness of the target machine and does
7403 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7404 2 (short) and 4 (long) Floating numbers are put out as a series of
7405 LITTLENUMS (shorts, here at least). */
7406
7407 void
7408 md_number_to_chars (char *buf, valueT val, int n)
7409 {
7410 if (target_big_endian)
7411 number_to_chars_bigendian (buf, val, n);
7412 else
7413 number_to_chars_littleendian (buf, val, n);
7414 }
7415
7416 /* MD interface: Sections. */
7417
7418 /* Estimate the size of a frag before relaxing. Assume everything fits in
7419 4 bytes. */
7420
7421 int
7422 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7423 {
7424 fragp->fr_var = 4;
7425 return 4;
7426 }
7427
7428 /* Round up a section size to the appropriate boundary. */
7429
7430 valueT
7431 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7432 {
7433 return size;
7434 }
7435
7436 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7437 of an rs_align_code fragment.
7438
7439 Here we fill the frag with the appropriate info for padding the
7440 output stream. The resulting frag will consist of a fixed (fr_fix)
7441 and of a repeating (fr_var) part.
7442
7443 The fixed content is always emitted before the repeating content and
7444 these two parts are used as follows in constructing the output:
7445 - the fixed part will be used to align to a valid instruction word
7446 boundary, in case that we start at a misaligned address; as no
7447 executable instruction can live at the misaligned location, we
7448 simply fill with zeros;
7449 - the variable part will be used to cover the remaining padding and
7450 we fill using the AArch64 NOP instruction.
7451
7452 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7453 enough storage space for up to 3 bytes for padding the back to a valid
7454 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7455
7456 void
7457 aarch64_handle_align (fragS * fragP)
7458 {
7459 /* NOP = d503201f */
7460 /* AArch64 instructions are always little-endian. */
7461 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7462
7463 int bytes, fix, noop_size;
7464 char *p;
7465
7466 if (fragP->fr_type != rs_align_code)
7467 return;
7468
7469 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7470 p = fragP->fr_literal + fragP->fr_fix;
7471
7472 #ifdef OBJ_ELF
7473 gas_assert (fragP->tc_frag_data.recorded);
7474 #endif
7475
7476 noop_size = sizeof (aarch64_noop);
7477
7478 fix = bytes & (noop_size - 1);
7479 if (fix)
7480 {
7481 #ifdef OBJ_ELF
7482 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7483 #endif
7484 memset (p, 0, fix);
7485 p += fix;
7486 fragP->fr_fix += fix;
7487 }
7488
7489 if (noop_size)
7490 memcpy (p, aarch64_noop, noop_size);
7491 fragP->fr_var = noop_size;
7492 }
7493
7494 /* Perform target specific initialisation of a frag.
7495 Note - despite the name this initialisation is not done when the frag
7496 is created, but only when its type is assigned. A frag can be created
7497 and used a long time before its type is set, so beware of assuming that
7498 this initialisation is performed first. */
7499
7500 #ifndef OBJ_ELF
7501 void
7502 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7503 int max_chars ATTRIBUTE_UNUSED)
7504 {
7505 }
7506
7507 #else /* OBJ_ELF is defined. */
7508 void
7509 aarch64_init_frag (fragS * fragP, int max_chars)
7510 {
7511 /* Record a mapping symbol for alignment frags. We will delete this
7512 later if the alignment ends up empty. */
7513 if (!fragP->tc_frag_data.recorded)
7514 fragP->tc_frag_data.recorded = 1;
7515
7516 /* PR 21809: Do not set a mapping state for debug sections
7517 - it just confuses other tools. */
7518 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
7519 return;
7520
7521 switch (fragP->fr_type)
7522 {
7523 case rs_align_test:
7524 case rs_fill:
7525 mapping_state_2 (MAP_DATA, max_chars);
7526 break;
7527 case rs_align:
7528 /* PR 20364: We can get alignment frags in code sections,
7529 so do not just assume that we should use the MAP_DATA state. */
7530 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7531 break;
7532 case rs_align_code:
7533 mapping_state_2 (MAP_INSN, max_chars);
7534 break;
7535 default:
7536 break;
7537 }
7538 }
7539 \f
7540 /* Initialize the DWARF-2 unwind information for this procedure. */
7541
7542 void
7543 tc_aarch64_frame_initial_instructions (void)
7544 {
7545 cfi_add_CFA_def_cfa (REG_SP, 0);
7546 }
7547 #endif /* OBJ_ELF */
7548
7549 /* Convert REGNAME to a DWARF-2 register number. */
7550
7551 int
7552 tc_aarch64_regname_to_dw2regnum (char *regname)
7553 {
7554 const reg_entry *reg = parse_reg (&regname);
7555 if (reg == NULL)
7556 return -1;
7557
7558 switch (reg->type)
7559 {
7560 case REG_TYPE_SP_32:
7561 case REG_TYPE_SP_64:
7562 case REG_TYPE_R_32:
7563 case REG_TYPE_R_64:
7564 return reg->number;
7565
7566 case REG_TYPE_FP_B:
7567 case REG_TYPE_FP_H:
7568 case REG_TYPE_FP_S:
7569 case REG_TYPE_FP_D:
7570 case REG_TYPE_FP_Q:
7571 return reg->number + 64;
7572
7573 default:
7574 break;
7575 }
7576 return -1;
7577 }
7578
7579 /* Implement DWARF2_ADDR_SIZE. */
7580
7581 int
7582 aarch64_dwarf2_addr_size (void)
7583 {
7584 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7585 if (ilp32_p)
7586 return 4;
7587 #endif
7588 return bfd_arch_bits_per_address (stdoutput) / 8;
7589 }
7590
7591 /* MD interface: Symbol and relocation handling. */
7592
7593 /* Return the address within the segment that a PC-relative fixup is
7594 relative to. For AArch64 PC-relative fixups applied to instructions
7595 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7596
7597 long
7598 md_pcrel_from_section (fixS * fixP, segT seg)
7599 {
7600 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7601
7602 /* If this is pc-relative and we are going to emit a relocation
7603 then we just want to put out any pipeline compensation that the linker
7604 will need. Otherwise we want to use the calculated base. */
7605 if (fixP->fx_pcrel
7606 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7607 || aarch64_force_relocation (fixP)))
7608 base = 0;
7609
7610 /* AArch64 should be consistent for all pc-relative relocations. */
7611 return base + AARCH64_PCREL_OFFSET;
7612 }
7613
7614 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7615 Otherwise we have no need to default values of symbols. */
7616
7617 symbolS *
7618 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7619 {
7620 #ifdef OBJ_ELF
7621 if (name[0] == '_' && name[1] == 'G'
7622 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7623 {
7624 if (!GOT_symbol)
7625 {
7626 if (symbol_find (name))
7627 as_bad (_("GOT already in the symbol table"));
7628
7629 GOT_symbol = symbol_new (name, undefined_section,
7630 &zero_address_frag, 0);
7631 }
7632
7633 return GOT_symbol;
7634 }
7635 #endif
7636
7637 return 0;
7638 }
7639
7640 /* Return non-zero if the indicated VALUE has overflowed the maximum
7641 range expressible by a unsigned number with the indicated number of
7642 BITS. */
7643
7644 static bfd_boolean
7645 unsigned_overflow (valueT value, unsigned bits)
7646 {
7647 valueT lim;
7648 if (bits >= sizeof (valueT) * 8)
7649 return FALSE;
7650 lim = (valueT) 1 << bits;
7651 return (value >= lim);
7652 }
7653
7654
7655 /* Return non-zero if the indicated VALUE has overflowed the maximum
7656 range expressible by an signed number with the indicated number of
7657 BITS. */
7658
7659 static bfd_boolean
7660 signed_overflow (offsetT value, unsigned bits)
7661 {
7662 offsetT lim;
7663 if (bits >= sizeof (offsetT) * 8)
7664 return FALSE;
7665 lim = (offsetT) 1 << (bits - 1);
7666 return (value < -lim || value >= lim);
7667 }
7668
7669 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7670 unsigned immediate offset load/store instruction, try to encode it as
7671 an unscaled, 9-bit, signed immediate offset load/store instruction.
7672 Return TRUE if it is successful; otherwise return FALSE.
7673
7674 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7675 in response to the standard LDR/STR mnemonics when the immediate offset is
7676 unambiguous, i.e. when it is negative or unaligned. */
7677
7678 static bfd_boolean
7679 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7680 {
7681 int idx;
7682 enum aarch64_op new_op;
7683 const aarch64_opcode *new_opcode;
7684
7685 gas_assert (instr->opcode->iclass == ldst_pos);
7686
7687 switch (instr->opcode->op)
7688 {
7689 case OP_LDRB_POS:new_op = OP_LDURB; break;
7690 case OP_STRB_POS: new_op = OP_STURB; break;
7691 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7692 case OP_LDRH_POS: new_op = OP_LDURH; break;
7693 case OP_STRH_POS: new_op = OP_STURH; break;
7694 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7695 case OP_LDR_POS: new_op = OP_LDUR; break;
7696 case OP_STR_POS: new_op = OP_STUR; break;
7697 case OP_LDRF_POS: new_op = OP_LDURV; break;
7698 case OP_STRF_POS: new_op = OP_STURV; break;
7699 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7700 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7701 default: new_op = OP_NIL; break;
7702 }
7703
7704 if (new_op == OP_NIL)
7705 return FALSE;
7706
7707 new_opcode = aarch64_get_opcode (new_op);
7708 gas_assert (new_opcode != NULL);
7709
7710 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7711 instr->opcode->op, new_opcode->op);
7712
7713 aarch64_replace_opcode (instr, new_opcode);
7714
7715 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7716 qualifier matching may fail because the out-of-date qualifier will
7717 prevent the operand being updated with a new and correct qualifier. */
7718 idx = aarch64_operand_index (instr->opcode->operands,
7719 AARCH64_OPND_ADDR_SIMM9);
7720 gas_assert (idx == 1);
7721 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7722
7723 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7724
7725 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7726 insn_sequence))
7727 return FALSE;
7728
7729 return TRUE;
7730 }
7731
7732 /* Called by fix_insn to fix a MOV immediate alias instruction.
7733
7734 Operand for a generic move immediate instruction, which is an alias
7735 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7736 a 32-bit/64-bit immediate value into general register. An assembler error
7737 shall result if the immediate cannot be created by a single one of these
7738 instructions. If there is a choice, then to ensure reversability an
7739 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7740
7741 static void
7742 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7743 {
7744 const aarch64_opcode *opcode;
7745
7746 /* Need to check if the destination is SP/ZR. The check has to be done
7747 before any aarch64_replace_opcode. */
7748 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7749 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7750
7751 instr->operands[1].imm.value = value;
7752 instr->operands[1].skip = 0;
7753
7754 if (try_mov_wide_p)
7755 {
7756 /* Try the MOVZ alias. */
7757 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7758 aarch64_replace_opcode (instr, opcode);
7759 if (aarch64_opcode_encode (instr->opcode, instr,
7760 &instr->value, NULL, NULL, insn_sequence))
7761 {
7762 put_aarch64_insn (buf, instr->value);
7763 return;
7764 }
7765 /* Try the MOVK alias. */
7766 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7767 aarch64_replace_opcode (instr, opcode);
7768 if (aarch64_opcode_encode (instr->opcode, instr,
7769 &instr->value, NULL, NULL, insn_sequence))
7770 {
7771 put_aarch64_insn (buf, instr->value);
7772 return;
7773 }
7774 }
7775
7776 if (try_mov_bitmask_p)
7777 {
7778 /* Try the ORR alias. */
7779 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7780 aarch64_replace_opcode (instr, opcode);
7781 if (aarch64_opcode_encode (instr->opcode, instr,
7782 &instr->value, NULL, NULL, insn_sequence))
7783 {
7784 put_aarch64_insn (buf, instr->value);
7785 return;
7786 }
7787 }
7788
7789 as_bad_where (fixP->fx_file, fixP->fx_line,
7790 _("immediate cannot be moved by a single instruction"));
7791 }
7792
7793 /* An instruction operand which is immediate related may have symbol used
7794 in the assembly, e.g.
7795
7796 mov w0, u32
7797 .set u32, 0x00ffff00
7798
7799 At the time when the assembly instruction is parsed, a referenced symbol,
7800 like 'u32' in the above example may not have been seen; a fixS is created
7801 in such a case and is handled here after symbols have been resolved.
7802 Instruction is fixed up with VALUE using the information in *FIXP plus
7803 extra information in FLAGS.
7804
7805 This function is called by md_apply_fix to fix up instructions that need
7806 a fix-up described above but does not involve any linker-time relocation. */
7807
7808 static void
7809 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7810 {
7811 int idx;
7812 uint32_t insn;
7813 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7814 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7815 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7816
7817 if (new_inst)
7818 {
7819 /* Now the instruction is about to be fixed-up, so the operand that
7820 was previously marked as 'ignored' needs to be unmarked in order
7821 to get the encoding done properly. */
7822 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7823 new_inst->operands[idx].skip = 0;
7824 }
7825
7826 gas_assert (opnd != AARCH64_OPND_NIL);
7827
7828 switch (opnd)
7829 {
7830 case AARCH64_OPND_EXCEPTION:
7831 case AARCH64_OPND_UNDEFINED:
7832 if (unsigned_overflow (value, 16))
7833 as_bad_where (fixP->fx_file, fixP->fx_line,
7834 _("immediate out of range"));
7835 insn = get_aarch64_insn (buf);
7836 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
7837 put_aarch64_insn (buf, insn);
7838 break;
7839
7840 case AARCH64_OPND_AIMM:
7841 /* ADD or SUB with immediate.
7842 NOTE this assumes we come here with a add/sub shifted reg encoding
7843 3 322|2222|2 2 2 21111 111111
7844 1 098|7654|3 2 1 09876 543210 98765 43210
7845 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
7846 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
7847 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
7848 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
7849 ->
7850 3 322|2222|2 2 221111111111
7851 1 098|7654|3 2 109876543210 98765 43210
7852 11000000 sf 001|0001|shift imm12 Rn Rd ADD
7853 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
7854 51000000 sf 101|0001|shift imm12 Rn Rd SUB
7855 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
7856 Fields sf Rn Rd are already set. */
7857 insn = get_aarch64_insn (buf);
7858 if (value < 0)
7859 {
7860 /* Add <-> sub. */
7861 insn = reencode_addsub_switch_add_sub (insn);
7862 value = -value;
7863 }
7864
7865 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7866 && unsigned_overflow (value, 12))
7867 {
7868 /* Try to shift the value by 12 to make it fit. */
7869 if (((value >> 12) << 12) == value
7870 && ! unsigned_overflow (value, 12 + 12))
7871 {
7872 value >>= 12;
7873 insn |= encode_addsub_imm_shift_amount (1);
7874 }
7875 }
7876
7877 if (unsigned_overflow (value, 12))
7878 as_bad_where (fixP->fx_file, fixP->fx_line,
7879 _("immediate out of range"));
7880
7881 insn |= encode_addsub_imm (value);
7882
7883 put_aarch64_insn (buf, insn);
7884 break;
7885
7886 case AARCH64_OPND_SIMD_IMM:
7887 case AARCH64_OPND_SIMD_IMM_SFT:
7888 case AARCH64_OPND_LIMM:
7889 /* Bit mask immediate. */
7890 gas_assert (new_inst != NULL);
7891 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7892 new_inst->operands[idx].imm.value = value;
7893 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7894 &new_inst->value, NULL, NULL, insn_sequence))
7895 put_aarch64_insn (buf, new_inst->value);
7896 else
7897 as_bad_where (fixP->fx_file, fixP->fx_line,
7898 _("invalid immediate"));
7899 break;
7900
7901 case AARCH64_OPND_HALF:
7902 /* 16-bit unsigned immediate. */
7903 if (unsigned_overflow (value, 16))
7904 as_bad_where (fixP->fx_file, fixP->fx_line,
7905 _("immediate out of range"));
7906 insn = get_aarch64_insn (buf);
7907 insn |= encode_movw_imm (value & 0xffff);
7908 put_aarch64_insn (buf, insn);
7909 break;
7910
7911 case AARCH64_OPND_IMM_MOV:
7912 /* Operand for a generic move immediate instruction, which is
7913 an alias instruction that generates a single MOVZ, MOVN or ORR
7914 instruction to loads a 32-bit/64-bit immediate value into general
7915 register. An assembler error shall result if the immediate cannot be
7916 created by a single one of these instructions. If there is a choice,
7917 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7918 and MOVZ or MOVN to ORR. */
7919 gas_assert (new_inst != NULL);
7920 fix_mov_imm_insn (fixP, buf, new_inst, value);
7921 break;
7922
7923 case AARCH64_OPND_ADDR_SIMM7:
7924 case AARCH64_OPND_ADDR_SIMM9:
7925 case AARCH64_OPND_ADDR_SIMM9_2:
7926 case AARCH64_OPND_ADDR_SIMM10:
7927 case AARCH64_OPND_ADDR_UIMM12:
7928 case AARCH64_OPND_ADDR_SIMM11:
7929 case AARCH64_OPND_ADDR_SIMM13:
7930 /* Immediate offset in an address. */
7931 insn = get_aarch64_insn (buf);
7932
7933 gas_assert (new_inst != NULL && new_inst->value == insn);
7934 gas_assert (new_inst->opcode->operands[1] == opnd
7935 || new_inst->opcode->operands[2] == opnd);
7936
7937 /* Get the index of the address operand. */
7938 if (new_inst->opcode->operands[1] == opnd)
7939 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
7940 idx = 1;
7941 else
7942 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
7943 idx = 2;
7944
7945 /* Update the resolved offset value. */
7946 new_inst->operands[idx].addr.offset.imm = value;
7947
7948 /* Encode/fix-up. */
7949 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7950 &new_inst->value, NULL, NULL, insn_sequence))
7951 {
7952 put_aarch64_insn (buf, new_inst->value);
7953 break;
7954 }
7955 else if (new_inst->opcode->iclass == ldst_pos
7956 && try_to_encode_as_unscaled_ldst (new_inst))
7957 {
7958 put_aarch64_insn (buf, new_inst->value);
7959 break;
7960 }
7961
7962 as_bad_where (fixP->fx_file, fixP->fx_line,
7963 _("immediate offset out of range"));
7964 break;
7965
7966 default:
7967 gas_assert (0);
7968 as_fatal (_("unhandled operand code %d"), opnd);
7969 }
7970 }
7971
7972 /* Apply a fixup (fixP) to segment data, once it has been determined
7973 by our caller that we have all the info we need to fix it up.
7974
7975 Parameter valP is the pointer to the value of the bits. */
7976
7977 void
7978 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7979 {
7980 offsetT value = *valP;
7981 uint32_t insn;
7982 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7983 int scale;
7984 unsigned flags = fixP->fx_addnumber;
7985
7986 DEBUG_TRACE ("\n\n");
7987 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7988 DEBUG_TRACE ("Enter md_apply_fix");
7989
7990 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7991
7992 /* Note whether this will delete the relocation. */
7993
7994 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7995 fixP->fx_done = 1;
7996
7997 /* Process the relocations. */
7998 switch (fixP->fx_r_type)
7999 {
8000 case BFD_RELOC_NONE:
8001 /* This will need to go in the object file. */
8002 fixP->fx_done = 0;
8003 break;
8004
8005 case BFD_RELOC_8:
8006 case BFD_RELOC_8_PCREL:
8007 if (fixP->fx_done || !seg->use_rela_p)
8008 md_number_to_chars (buf, value, 1);
8009 break;
8010
8011 case BFD_RELOC_16:
8012 case BFD_RELOC_16_PCREL:
8013 if (fixP->fx_done || !seg->use_rela_p)
8014 md_number_to_chars (buf, value, 2);
8015 break;
8016
8017 case BFD_RELOC_32:
8018 case BFD_RELOC_32_PCREL:
8019 if (fixP->fx_done || !seg->use_rela_p)
8020 md_number_to_chars (buf, value, 4);
8021 break;
8022
8023 case BFD_RELOC_64:
8024 case BFD_RELOC_64_PCREL:
8025 if (fixP->fx_done || !seg->use_rela_p)
8026 md_number_to_chars (buf, value, 8);
8027 break;
8028
8029 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8030 /* We claim that these fixups have been processed here, even if
8031 in fact we generate an error because we do not have a reloc
8032 for them, so tc_gen_reloc() will reject them. */
8033 fixP->fx_done = 1;
8034 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8035 {
8036 as_bad_where (fixP->fx_file, fixP->fx_line,
8037 _("undefined symbol %s used as an immediate value"),
8038 S_GET_NAME (fixP->fx_addsy));
8039 goto apply_fix_return;
8040 }
8041 fix_insn (fixP, flags, value);
8042 break;
8043
8044 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8045 if (fixP->fx_done || !seg->use_rela_p)
8046 {
8047 if (value & 3)
8048 as_bad_where (fixP->fx_file, fixP->fx_line,
8049 _("pc-relative load offset not word aligned"));
8050 if (signed_overflow (value, 21))
8051 as_bad_where (fixP->fx_file, fixP->fx_line,
8052 _("pc-relative load offset out of range"));
8053 insn = get_aarch64_insn (buf);
8054 insn |= encode_ld_lit_ofs_19 (value >> 2);
8055 put_aarch64_insn (buf, insn);
8056 }
8057 break;
8058
8059 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8060 if (fixP->fx_done || !seg->use_rela_p)
8061 {
8062 if (signed_overflow (value, 21))
8063 as_bad_where (fixP->fx_file, fixP->fx_line,
8064 _("pc-relative address offset out of range"));
8065 insn = get_aarch64_insn (buf);
8066 insn |= encode_adr_imm (value);
8067 put_aarch64_insn (buf, insn);
8068 }
8069 break;
8070
8071 case BFD_RELOC_AARCH64_BRANCH19:
8072 if (fixP->fx_done || !seg->use_rela_p)
8073 {
8074 if (value & 3)
8075 as_bad_where (fixP->fx_file, fixP->fx_line,
8076 _("conditional branch target not word aligned"));
8077 if (signed_overflow (value, 21))
8078 as_bad_where (fixP->fx_file, fixP->fx_line,
8079 _("conditional branch out of range"));
8080 insn = get_aarch64_insn (buf);
8081 insn |= encode_cond_branch_ofs_19 (value >> 2);
8082 put_aarch64_insn (buf, insn);
8083 }
8084 break;
8085
8086 case BFD_RELOC_AARCH64_TSTBR14:
8087 if (fixP->fx_done || !seg->use_rela_p)
8088 {
8089 if (value & 3)
8090 as_bad_where (fixP->fx_file, fixP->fx_line,
8091 _("conditional branch target not word aligned"));
8092 if (signed_overflow (value, 16))
8093 as_bad_where (fixP->fx_file, fixP->fx_line,
8094 _("conditional branch out of range"));
8095 insn = get_aarch64_insn (buf);
8096 insn |= encode_tst_branch_ofs_14 (value >> 2);
8097 put_aarch64_insn (buf, insn);
8098 }
8099 break;
8100
8101 case BFD_RELOC_AARCH64_CALL26:
8102 case BFD_RELOC_AARCH64_JUMP26:
8103 if (fixP->fx_done || !seg->use_rela_p)
8104 {
8105 if (value & 3)
8106 as_bad_where (fixP->fx_file, fixP->fx_line,
8107 _("branch target not word aligned"));
8108 if (signed_overflow (value, 28))
8109 as_bad_where (fixP->fx_file, fixP->fx_line,
8110 _("branch out of range"));
8111 insn = get_aarch64_insn (buf);
8112 insn |= encode_branch_ofs_26 (value >> 2);
8113 put_aarch64_insn (buf, insn);
8114 }
8115 break;
8116
8117 case BFD_RELOC_AARCH64_MOVW_G0:
8118 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8119 case BFD_RELOC_AARCH64_MOVW_G0_S:
8120 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8121 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8122 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8123 scale = 0;
8124 goto movw_common;
8125 case BFD_RELOC_AARCH64_MOVW_G1:
8126 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8127 case BFD_RELOC_AARCH64_MOVW_G1_S:
8128 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8129 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8130 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8131 scale = 16;
8132 goto movw_common;
8133 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8134 scale = 0;
8135 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8136 /* Should always be exported to object file, see
8137 aarch64_force_relocation(). */
8138 gas_assert (!fixP->fx_done);
8139 gas_assert (seg->use_rela_p);
8140 goto movw_common;
8141 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8142 scale = 16;
8143 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8144 /* Should always be exported to object file, see
8145 aarch64_force_relocation(). */
8146 gas_assert (!fixP->fx_done);
8147 gas_assert (seg->use_rela_p);
8148 goto movw_common;
8149 case BFD_RELOC_AARCH64_MOVW_G2:
8150 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8151 case BFD_RELOC_AARCH64_MOVW_G2_S:
8152 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8153 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8154 scale = 32;
8155 goto movw_common;
8156 case BFD_RELOC_AARCH64_MOVW_G3:
8157 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8158 scale = 48;
8159 movw_common:
8160 if (fixP->fx_done || !seg->use_rela_p)
8161 {
8162 insn = get_aarch64_insn (buf);
8163
8164 if (!fixP->fx_done)
8165 {
8166 /* REL signed addend must fit in 16 bits */
8167 if (signed_overflow (value, 16))
8168 as_bad_where (fixP->fx_file, fixP->fx_line,
8169 _("offset out of range"));
8170 }
8171 else
8172 {
8173 /* Check for overflow and scale. */
8174 switch (fixP->fx_r_type)
8175 {
8176 case BFD_RELOC_AARCH64_MOVW_G0:
8177 case BFD_RELOC_AARCH64_MOVW_G1:
8178 case BFD_RELOC_AARCH64_MOVW_G2:
8179 case BFD_RELOC_AARCH64_MOVW_G3:
8180 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8181 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8182 if (unsigned_overflow (value, scale + 16))
8183 as_bad_where (fixP->fx_file, fixP->fx_line,
8184 _("unsigned value out of range"));
8185 break;
8186 case BFD_RELOC_AARCH64_MOVW_G0_S:
8187 case BFD_RELOC_AARCH64_MOVW_G1_S:
8188 case BFD_RELOC_AARCH64_MOVW_G2_S:
8189 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8190 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8191 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8192 /* NOTE: We can only come here with movz or movn. */
8193 if (signed_overflow (value, scale + 16))
8194 as_bad_where (fixP->fx_file, fixP->fx_line,
8195 _("signed value out of range"));
8196 if (value < 0)
8197 {
8198 /* Force use of MOVN. */
8199 value = ~value;
8200 insn = reencode_movzn_to_movn (insn);
8201 }
8202 else
8203 {
8204 /* Force use of MOVZ. */
8205 insn = reencode_movzn_to_movz (insn);
8206 }
8207 break;
8208 default:
8209 /* Unchecked relocations. */
8210 break;
8211 }
8212 value >>= scale;
8213 }
8214
8215 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8216 insn |= encode_movw_imm (value & 0xffff);
8217
8218 put_aarch64_insn (buf, insn);
8219 }
8220 break;
8221
8222 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8223 fixP->fx_r_type = (ilp32_p
8224 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8225 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8226 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8227 /* Should always be exported to object file, see
8228 aarch64_force_relocation(). */
8229 gas_assert (!fixP->fx_done);
8230 gas_assert (seg->use_rela_p);
8231 break;
8232
8233 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8234 fixP->fx_r_type = (ilp32_p
8235 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8236 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8237 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8238 /* Should always be exported to object file, see
8239 aarch64_force_relocation(). */
8240 gas_assert (!fixP->fx_done);
8241 gas_assert (seg->use_rela_p);
8242 break;
8243
8244 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8245 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8246 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8247 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8248 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8249 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8250 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8251 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8252 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8253 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8254 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8255 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8256 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8257 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8258 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8259 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8260 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8261 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8262 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8263 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8264 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8265 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8266 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8267 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8268 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8269 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8270 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8271 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8272 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8273 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8274 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8275 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8276 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8277 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8278 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8279 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8280 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8281 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8282 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8283 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8284 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8285 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8286 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8287 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8288 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8289 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8290 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8291 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8292 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8293 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8294 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8295 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8296 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8297 /* Should always be exported to object file, see
8298 aarch64_force_relocation(). */
8299 gas_assert (!fixP->fx_done);
8300 gas_assert (seg->use_rela_p);
8301 break;
8302
8303 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8304 /* Should always be exported to object file, see
8305 aarch64_force_relocation(). */
8306 fixP->fx_r_type = (ilp32_p
8307 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8308 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8309 gas_assert (!fixP->fx_done);
8310 gas_assert (seg->use_rela_p);
8311 break;
8312
8313 case BFD_RELOC_AARCH64_ADD_LO12:
8314 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8315 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8316 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8317 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8318 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8319 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8320 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8321 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8322 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8323 case BFD_RELOC_AARCH64_LDST128_LO12:
8324 case BFD_RELOC_AARCH64_LDST16_LO12:
8325 case BFD_RELOC_AARCH64_LDST32_LO12:
8326 case BFD_RELOC_AARCH64_LDST64_LO12:
8327 case BFD_RELOC_AARCH64_LDST8_LO12:
8328 /* Should always be exported to object file, see
8329 aarch64_force_relocation(). */
8330 gas_assert (!fixP->fx_done);
8331 gas_assert (seg->use_rela_p);
8332 break;
8333
8334 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8335 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8336 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8337 break;
8338
8339 case BFD_RELOC_UNUSED:
8340 /* An error will already have been reported. */
8341 break;
8342
8343 default:
8344 as_bad_where (fixP->fx_file, fixP->fx_line,
8345 _("unexpected %s fixup"),
8346 bfd_get_reloc_code_name (fixP->fx_r_type));
8347 break;
8348 }
8349
8350 apply_fix_return:
8351 /* Free the allocated the struct aarch64_inst.
8352 N.B. currently there are very limited number of fix-up types actually use
8353 this field, so the impact on the performance should be minimal . */
8354 free (fixP->tc_fix_data.inst);
8355
8356 return;
8357 }
8358
8359 /* Translate internal representation of relocation info to BFD target
8360 format. */
8361
8362 arelent *
8363 tc_gen_reloc (asection * section, fixS * fixp)
8364 {
8365 arelent *reloc;
8366 bfd_reloc_code_real_type code;
8367
8368 reloc = XNEW (arelent);
8369
8370 reloc->sym_ptr_ptr = XNEW (asymbol *);
8371 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8372 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8373
8374 if (fixp->fx_pcrel)
8375 {
8376 if (section->use_rela_p)
8377 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8378 else
8379 fixp->fx_offset = reloc->address;
8380 }
8381 reloc->addend = fixp->fx_offset;
8382
8383 code = fixp->fx_r_type;
8384 switch (code)
8385 {
8386 case BFD_RELOC_16:
8387 if (fixp->fx_pcrel)
8388 code = BFD_RELOC_16_PCREL;
8389 break;
8390
8391 case BFD_RELOC_32:
8392 if (fixp->fx_pcrel)
8393 code = BFD_RELOC_32_PCREL;
8394 break;
8395
8396 case BFD_RELOC_64:
8397 if (fixp->fx_pcrel)
8398 code = BFD_RELOC_64_PCREL;
8399 break;
8400
8401 default:
8402 break;
8403 }
8404
8405 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8406 if (reloc->howto == NULL)
8407 {
8408 as_bad_where (fixp->fx_file, fixp->fx_line,
8409 _
8410 ("cannot represent %s relocation in this object file format"),
8411 bfd_get_reloc_code_name (code));
8412 return NULL;
8413 }
8414
8415 return reloc;
8416 }
8417
8418 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8419
8420 void
8421 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8422 {
8423 bfd_reloc_code_real_type type;
8424 int pcrel = 0;
8425
8426 /* Pick a reloc.
8427 FIXME: @@ Should look at CPU word size. */
8428 switch (size)
8429 {
8430 case 1:
8431 type = BFD_RELOC_8;
8432 break;
8433 case 2:
8434 type = BFD_RELOC_16;
8435 break;
8436 case 4:
8437 type = BFD_RELOC_32;
8438 break;
8439 case 8:
8440 type = BFD_RELOC_64;
8441 break;
8442 default:
8443 as_bad (_("cannot do %u-byte relocation"), size);
8444 type = BFD_RELOC_UNUSED;
8445 break;
8446 }
8447
8448 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8449 }
8450
8451 int
8452 aarch64_force_relocation (struct fix *fixp)
8453 {
8454 switch (fixp->fx_r_type)
8455 {
8456 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8457 /* Perform these "immediate" internal relocations
8458 even if the symbol is extern or weak. */
8459 return 0;
8460
8461 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8462 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8463 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8464 /* Pseudo relocs that need to be fixed up according to
8465 ilp32_p. */
8466 return 0;
8467
8468 case BFD_RELOC_AARCH64_ADD_LO12:
8469 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8470 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8471 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8472 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8473 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8474 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8475 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8476 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8477 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8478 case BFD_RELOC_AARCH64_LDST128_LO12:
8479 case BFD_RELOC_AARCH64_LDST16_LO12:
8480 case BFD_RELOC_AARCH64_LDST32_LO12:
8481 case BFD_RELOC_AARCH64_LDST64_LO12:
8482 case BFD_RELOC_AARCH64_LDST8_LO12:
8483 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8484 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8485 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8486 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8487 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8488 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8489 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8490 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8491 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8492 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8493 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8494 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8495 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8496 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8497 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8498 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8499 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8500 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8501 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8502 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8503 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8504 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8505 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8506 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8507 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8508 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8509 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8510 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8511 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8512 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8513 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8514 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8515 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8516 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8517 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8518 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8519 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8520 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8521 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8522 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8523 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8524 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8525 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8526 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8527 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8528 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8529 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8530 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8531 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8532 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8533 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8534 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8535 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8536 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8537 /* Always leave these relocations for the linker. */
8538 return 1;
8539
8540 default:
8541 break;
8542 }
8543
8544 return generic_force_reloc (fixp);
8545 }
8546
8547 #ifdef OBJ_ELF
8548
8549 /* Implement md_after_parse_args. This is the earliest time we need to decide
8550 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8551
8552 void
8553 aarch64_after_parse_args (void)
8554 {
8555 if (aarch64_abi != AARCH64_ABI_NONE)
8556 return;
8557
8558 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8559 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8560 aarch64_abi = AARCH64_ABI_ILP32;
8561 else
8562 aarch64_abi = AARCH64_ABI_LP64;
8563 }
8564
8565 const char *
8566 elf64_aarch64_target_format (void)
8567 {
8568 #ifdef TE_CLOUDABI
8569 /* FIXME: What to do for ilp32_p ? */
8570 if (target_big_endian)
8571 return "elf64-bigaarch64-cloudabi";
8572 else
8573 return "elf64-littleaarch64-cloudabi";
8574 #else
8575 if (target_big_endian)
8576 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8577 else
8578 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8579 #endif
8580 }
8581
8582 void
8583 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8584 {
8585 elf_frob_symbol (symp, puntp);
8586 }
8587 #endif
8588
8589 /* MD interface: Finalization. */
8590
8591 /* A good place to do this, although this was probably not intended
8592 for this kind of use. We need to dump the literal pool before
8593 references are made to a null symbol pointer. */
8594
8595 void
8596 aarch64_cleanup (void)
8597 {
8598 literal_pool *pool;
8599
8600 for (pool = list_of_pools; pool; pool = pool->next)
8601 {
8602 /* Put it at the end of the relevant section. */
8603 subseg_set (pool->section, pool->sub_section);
8604 s_ltorg (0);
8605 }
8606 }
8607
8608 #ifdef OBJ_ELF
8609 /* Remove any excess mapping symbols generated for alignment frags in
8610 SEC. We may have created a mapping symbol before a zero byte
8611 alignment; remove it if there's a mapping symbol after the
8612 alignment. */
8613 static void
8614 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8615 void *dummy ATTRIBUTE_UNUSED)
8616 {
8617 segment_info_type *seginfo = seg_info (sec);
8618 fragS *fragp;
8619
8620 if (seginfo == NULL || seginfo->frchainP == NULL)
8621 return;
8622
8623 for (fragp = seginfo->frchainP->frch_root;
8624 fragp != NULL; fragp = fragp->fr_next)
8625 {
8626 symbolS *sym = fragp->tc_frag_data.last_map;
8627 fragS *next = fragp->fr_next;
8628
8629 /* Variable-sized frags have been converted to fixed size by
8630 this point. But if this was variable-sized to start with,
8631 there will be a fixed-size frag after it. So don't handle
8632 next == NULL. */
8633 if (sym == NULL || next == NULL)
8634 continue;
8635
8636 if (S_GET_VALUE (sym) < next->fr_address)
8637 /* Not at the end of this frag. */
8638 continue;
8639 know (S_GET_VALUE (sym) == next->fr_address);
8640
8641 do
8642 {
8643 if (next->tc_frag_data.first_map != NULL)
8644 {
8645 /* Next frag starts with a mapping symbol. Discard this
8646 one. */
8647 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8648 break;
8649 }
8650
8651 if (next->fr_next == NULL)
8652 {
8653 /* This mapping symbol is at the end of the section. Discard
8654 it. */
8655 know (next->fr_fix == 0 && next->fr_var == 0);
8656 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8657 break;
8658 }
8659
8660 /* As long as we have empty frags without any mapping symbols,
8661 keep looking. */
8662 /* If the next frag is non-empty and does not start with a
8663 mapping symbol, then this mapping symbol is required. */
8664 if (next->fr_address != next->fr_next->fr_address)
8665 break;
8666
8667 next = next->fr_next;
8668 }
8669 while (next != NULL);
8670 }
8671 }
8672 #endif
8673
8674 /* Adjust the symbol table. */
8675
8676 void
8677 aarch64_adjust_symtab (void)
8678 {
8679 #ifdef OBJ_ELF
8680 /* Remove any overlapping mapping symbols generated by alignment frags. */
8681 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8682 /* Now do generic ELF adjustments. */
8683 elf_adjust_symtab ();
8684 #endif
8685 }
8686
8687 static void
8688 checked_hash_insert (htab_t table, const char *key, void *value)
8689 {
8690 str_hash_insert (table, key, value, 0);
8691 }
8692
8693 static void
8694 sysreg_hash_insert (htab_t table, const char *key, void *value)
8695 {
8696 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
8697 checked_hash_insert (table, key, value);
8698 }
8699
8700 static void
8701 fill_instruction_hash_table (void)
8702 {
8703 aarch64_opcode *opcode = aarch64_opcode_table;
8704
8705 while (opcode->name != NULL)
8706 {
8707 templates *templ, *new_templ;
8708 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
8709
8710 new_templ = XNEW (templates);
8711 new_templ->opcode = opcode;
8712 new_templ->next = NULL;
8713
8714 if (!templ)
8715 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8716 else
8717 {
8718 new_templ->next = templ->next;
8719 templ->next = new_templ;
8720 }
8721 ++opcode;
8722 }
8723 }
8724
8725 static inline void
8726 convert_to_upper (char *dst, const char *src, size_t num)
8727 {
8728 unsigned int i;
8729 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8730 *dst = TOUPPER (*src);
8731 *dst = '\0';
8732 }
8733
8734 /* Assume STR point to a lower-case string, allocate, convert and return
8735 the corresponding upper-case string. */
8736 static inline const char*
8737 get_upper_str (const char *str)
8738 {
8739 char *ret;
8740 size_t len = strlen (str);
8741 ret = XNEWVEC (char, len + 1);
8742 convert_to_upper (ret, str, len);
8743 return ret;
8744 }
8745
8746 /* MD interface: Initialization. */
8747
8748 void
8749 md_begin (void)
8750 {
8751 unsigned mach;
8752 unsigned int i;
8753
8754 aarch64_ops_hsh = str_htab_create ();
8755 aarch64_cond_hsh = str_htab_create ();
8756 aarch64_shift_hsh = str_htab_create ();
8757 aarch64_sys_regs_hsh = str_htab_create ();
8758 aarch64_pstatefield_hsh = str_htab_create ();
8759 aarch64_sys_regs_ic_hsh = str_htab_create ();
8760 aarch64_sys_regs_dc_hsh = str_htab_create ();
8761 aarch64_sys_regs_at_hsh = str_htab_create ();
8762 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
8763 aarch64_sys_regs_sr_hsh = str_htab_create ();
8764 aarch64_reg_hsh = str_htab_create ();
8765 aarch64_barrier_opt_hsh = str_htab_create ();
8766 aarch64_nzcv_hsh = str_htab_create ();
8767 aarch64_pldop_hsh = str_htab_create ();
8768 aarch64_hint_opt_hsh = str_htab_create ();
8769
8770 fill_instruction_hash_table ();
8771
8772 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8773 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8774 (void *) (aarch64_sys_regs + i));
8775
8776 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8777 sysreg_hash_insert (aarch64_pstatefield_hsh,
8778 aarch64_pstatefields[i].name,
8779 (void *) (aarch64_pstatefields + i));
8780
8781 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8782 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
8783 aarch64_sys_regs_ic[i].name,
8784 (void *) (aarch64_sys_regs_ic + i));
8785
8786 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8787 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
8788 aarch64_sys_regs_dc[i].name,
8789 (void *) (aarch64_sys_regs_dc + i));
8790
8791 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8792 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
8793 aarch64_sys_regs_at[i].name,
8794 (void *) (aarch64_sys_regs_at + i));
8795
8796 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8797 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
8798 aarch64_sys_regs_tlbi[i].name,
8799 (void *) (aarch64_sys_regs_tlbi + i));
8800
8801 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8802 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
8803 aarch64_sys_regs_sr[i].name,
8804 (void *) (aarch64_sys_regs_sr + i));
8805
8806 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8807 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8808 (void *) (reg_names + i));
8809
8810 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8811 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8812 (void *) (nzcv_names + i));
8813
8814 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8815 {
8816 const char *name = aarch64_operand_modifiers[i].name;
8817 checked_hash_insert (aarch64_shift_hsh, name,
8818 (void *) (aarch64_operand_modifiers + i));
8819 /* Also hash the name in the upper case. */
8820 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8821 (void *) (aarch64_operand_modifiers + i));
8822 }
8823
8824 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8825 {
8826 unsigned int j;
8827 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8828 the same condition code. */
8829 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8830 {
8831 const char *name = aarch64_conds[i].names[j];
8832 if (name == NULL)
8833 break;
8834 checked_hash_insert (aarch64_cond_hsh, name,
8835 (void *) (aarch64_conds + i));
8836 /* Also hash the name in the upper case. */
8837 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8838 (void *) (aarch64_conds + i));
8839 }
8840 }
8841
8842 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8843 {
8844 const char *name = aarch64_barrier_options[i].name;
8845 /* Skip xx00 - the unallocated values of option. */
8846 if ((i & 0x3) == 0)
8847 continue;
8848 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8849 (void *) (aarch64_barrier_options + i));
8850 /* Also hash the name in the upper case. */
8851 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8852 (void *) (aarch64_barrier_options + i));
8853 }
8854
8855 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
8856 {
8857 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
8858 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8859 (void *) (aarch64_barrier_dsb_nxs_options + i));
8860 /* Also hash the name in the upper case. */
8861 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8862 (void *) (aarch64_barrier_dsb_nxs_options + i));
8863 }
8864
8865 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8866 {
8867 const char* name = aarch64_prfops[i].name;
8868 /* Skip the unallocated hint encodings. */
8869 if (name == NULL)
8870 continue;
8871 checked_hash_insert (aarch64_pldop_hsh, name,
8872 (void *) (aarch64_prfops + i));
8873 /* Also hash the name in the upper case. */
8874 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8875 (void *) (aarch64_prfops + i));
8876 }
8877
8878 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8879 {
8880 const char* name = aarch64_hint_options[i].name;
8881 const char* upper_name = get_upper_str(name);
8882
8883 checked_hash_insert (aarch64_hint_opt_hsh, name,
8884 (void *) (aarch64_hint_options + i));
8885
8886 /* Also hash the name in the upper case if not the same. */
8887 if (strcmp (name, upper_name) != 0)
8888 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
8889 (void *) (aarch64_hint_options + i));
8890 }
8891
8892 /* Set the cpu variant based on the command-line options. */
8893 if (!mcpu_cpu_opt)
8894 mcpu_cpu_opt = march_cpu_opt;
8895
8896 if (!mcpu_cpu_opt)
8897 mcpu_cpu_opt = &cpu_default;
8898
8899 cpu_variant = *mcpu_cpu_opt;
8900
8901 /* Record the CPU type. */
8902 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8903
8904 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8905 }
8906
8907 /* Command line processing. */
8908
8909 const char *md_shortopts = "m:";
8910
8911 #ifdef AARCH64_BI_ENDIAN
8912 #define OPTION_EB (OPTION_MD_BASE + 0)
8913 #define OPTION_EL (OPTION_MD_BASE + 1)
8914 #else
8915 #if TARGET_BYTES_BIG_ENDIAN
8916 #define OPTION_EB (OPTION_MD_BASE + 0)
8917 #else
8918 #define OPTION_EL (OPTION_MD_BASE + 1)
8919 #endif
8920 #endif
8921
8922 struct option md_longopts[] = {
8923 #ifdef OPTION_EB
8924 {"EB", no_argument, NULL, OPTION_EB},
8925 #endif
8926 #ifdef OPTION_EL
8927 {"EL", no_argument, NULL, OPTION_EL},
8928 #endif
8929 {NULL, no_argument, NULL, 0}
8930 };
8931
8932 size_t md_longopts_size = sizeof (md_longopts);
8933
8934 struct aarch64_option_table
8935 {
8936 const char *option; /* Option name to match. */
8937 const char *help; /* Help information. */
8938 int *var; /* Variable to change. */
8939 int value; /* What to change it to. */
8940 char *deprecated; /* If non-null, print this message. */
8941 };
8942
8943 static struct aarch64_option_table aarch64_opts[] = {
8944 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8945 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8946 NULL},
8947 #ifdef DEBUG_AARCH64
8948 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8949 #endif /* DEBUG_AARCH64 */
8950 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8951 NULL},
8952 {"mno-verbose-error", N_("do not output verbose error messages"),
8953 &verbose_error_p, 0, NULL},
8954 {NULL, NULL, NULL, 0, NULL}
8955 };
8956
8957 struct aarch64_cpu_option_table
8958 {
8959 const char *name;
8960 const aarch64_feature_set value;
8961 /* The canonical name of the CPU, or NULL to use NAME converted to upper
8962 case. */
8963 const char *canonical_name;
8964 };
8965
8966 /* This list should, at a minimum, contain all the cpu names
8967 recognized by GCC. */
8968 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8969 {"all", AARCH64_ANY, NULL},
8970 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
8971 AARCH64_FEATURE_CRC), "Cortex-A34"},
8972 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8973 AARCH64_FEATURE_CRC), "Cortex-A35"},
8974 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8975 AARCH64_FEATURE_CRC), "Cortex-A53"},
8976 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8977 AARCH64_FEATURE_CRC), "Cortex-A57"},
8978 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8979 AARCH64_FEATURE_CRC), "Cortex-A72"},
8980 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8981 AARCH64_FEATURE_CRC), "Cortex-A73"},
8982 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8983 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8984 "Cortex-A55"},
8985 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8986 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8987 "Cortex-A75"},
8988 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8989 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8990 "Cortex-A76"},
8991 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8992 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8993 | AARCH64_FEATURE_DOTPROD
8994 | AARCH64_FEATURE_SSBS),
8995 "Cortex-A76AE"},
8996 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8997 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8998 | AARCH64_FEATURE_DOTPROD
8999 | AARCH64_FEATURE_SSBS),
9000 "Cortex-A77"},
9001 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9002 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9003 | AARCH64_FEATURE_DOTPROD
9004 | AARCH64_FEATURE_SSBS),
9005 "Cortex-A65"},
9006 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9007 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9008 | AARCH64_FEATURE_DOTPROD
9009 | AARCH64_FEATURE_SSBS),
9010 "Cortex-A65AE"},
9011 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9012 AARCH64_FEATURE_F16
9013 | AARCH64_FEATURE_RCPC
9014 | AARCH64_FEATURE_DOTPROD
9015 | AARCH64_FEATURE_SSBS
9016 | AARCH64_FEATURE_PROFILE),
9017 "Cortex-A78"},
9018 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9019 AARCH64_FEATURE_F16
9020 | AARCH64_FEATURE_RCPC
9021 | AARCH64_FEATURE_DOTPROD
9022 | AARCH64_FEATURE_SSBS
9023 | AARCH64_FEATURE_PROFILE),
9024 "Cortex-A78AE"},
9025 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9026 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9027 | AARCH64_FEATURE_DOTPROD
9028 | AARCH64_FEATURE_PROFILE),
9029 "Ares"},
9030 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9031 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9032 "Samsung Exynos M1"},
9033 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9034 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9035 | AARCH64_FEATURE_RDMA),
9036 "Qualcomm Falkor"},
9037 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9038 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9039 | AARCH64_FEATURE_DOTPROD
9040 | AARCH64_FEATURE_SSBS),
9041 "Neoverse E1"},
9042 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9043 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9044 | AARCH64_FEATURE_DOTPROD
9045 | AARCH64_FEATURE_PROFILE),
9046 "Neoverse N1"},
9047 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9048 AARCH64_FEATURE_BFLOAT16
9049 | AARCH64_FEATURE_I8MM
9050 | AARCH64_FEATURE_F16
9051 | AARCH64_FEATURE_SVE
9052 | AARCH64_FEATURE_SVE2
9053 | AARCH64_FEATURE_SVE2_BITPERM
9054 | AARCH64_FEATURE_MEMTAG
9055 | AARCH64_FEATURE_RNG),
9056 "Neoverse N2"},
9057 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9058 AARCH64_FEATURE_PROFILE
9059 | AARCH64_FEATURE_CVADP
9060 | AARCH64_FEATURE_SVE
9061 | AARCH64_FEATURE_SSBS
9062 | AARCH64_FEATURE_RNG
9063 | AARCH64_FEATURE_F16
9064 | AARCH64_FEATURE_BFLOAT16
9065 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9066 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9067 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9068 | AARCH64_FEATURE_RDMA),
9069 "Qualcomm QDF24XX"},
9070 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9071 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9072 "Qualcomm Saphira"},
9073 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9074 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9075 "Cavium ThunderX"},
9076 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9077 AARCH64_FEATURE_CRYPTO),
9078 "Broadcom Vulcan"},
9079 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9080 in earlier releases and is superseded by 'xgene1' in all
9081 tools. */
9082 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9083 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9084 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9085 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9086 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9087 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9088 AARCH64_FEATURE_F16
9089 | AARCH64_FEATURE_RCPC
9090 | AARCH64_FEATURE_DOTPROD
9091 | AARCH64_FEATURE_SSBS
9092 | AARCH64_FEATURE_PROFILE),
9093 "Cortex-X1"},
9094 {"generic", AARCH64_ARCH_V8, NULL},
9095
9096 {NULL, AARCH64_ARCH_NONE, NULL}
9097 };
9098
9099 struct aarch64_arch_option_table
9100 {
9101 const char *name;
9102 const aarch64_feature_set value;
9103 };
9104
9105 /* This list should, at a minimum, contain all the architecture names
9106 recognized by GCC. */
9107 static const struct aarch64_arch_option_table aarch64_archs[] = {
9108 {"all", AARCH64_ANY},
9109 {"armv8-a", AARCH64_ARCH_V8},
9110 {"armv8.1-a", AARCH64_ARCH_V8_1},
9111 {"armv8.2-a", AARCH64_ARCH_V8_2},
9112 {"armv8.3-a", AARCH64_ARCH_V8_3},
9113 {"armv8.4-a", AARCH64_ARCH_V8_4},
9114 {"armv8.5-a", AARCH64_ARCH_V8_5},
9115 {"armv8.6-a", AARCH64_ARCH_V8_6},
9116 {"armv8.7-a", AARCH64_ARCH_V8_7},
9117 {"armv8-r", AARCH64_ARCH_V8_R},
9118 {NULL, AARCH64_ARCH_NONE}
9119 };
9120
9121 /* ISA extensions. */
9122 struct aarch64_option_cpu_value_table
9123 {
9124 const char *name;
9125 const aarch64_feature_set value;
9126 const aarch64_feature_set require; /* Feature dependencies. */
9127 };
9128
9129 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9130 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9131 AARCH64_ARCH_NONE},
9132 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9133 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9134 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9135 AARCH64_ARCH_NONE},
9136 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9137 AARCH64_ARCH_NONE},
9138 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9139 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9140 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9141 AARCH64_ARCH_NONE},
9142 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9143 AARCH64_ARCH_NONE},
9144 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9145 AARCH64_ARCH_NONE},
9146 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9147 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9148 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9149 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9150 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9151 AARCH64_FEATURE (AARCH64_FEATURE_FP
9152 | AARCH64_FEATURE_F16, 0)},
9153 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9154 AARCH64_ARCH_NONE},
9155 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9156 AARCH64_FEATURE (AARCH64_FEATURE_F16
9157 | AARCH64_FEATURE_SIMD
9158 | AARCH64_FEATURE_COMPNUM, 0)},
9159 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9160 AARCH64_ARCH_NONE},
9161 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9162 AARCH64_FEATURE (AARCH64_FEATURE_F16
9163 | AARCH64_FEATURE_SIMD, 0)},
9164 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9165 AARCH64_ARCH_NONE},
9166 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9167 AARCH64_ARCH_NONE},
9168 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9169 AARCH64_ARCH_NONE},
9170 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9171 AARCH64_ARCH_NONE},
9172 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9173 AARCH64_ARCH_NONE},
9174 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9175 AARCH64_ARCH_NONE},
9176 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9177 AARCH64_ARCH_NONE},
9178 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9179 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9180 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9181 AARCH64_ARCH_NONE},
9182 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9183 AARCH64_ARCH_NONE},
9184 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9185 AARCH64_ARCH_NONE},
9186 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9187 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9188 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9189 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9190 | AARCH64_FEATURE_SM4, 0)},
9191 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9192 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9193 | AARCH64_FEATURE_AES, 0)},
9194 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9195 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9196 | AARCH64_FEATURE_SHA3, 0)},
9197 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9198 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9199 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9200 AARCH64_ARCH_NONE},
9201 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9202 AARCH64_ARCH_NONE},
9203 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9204 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9205 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9206 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9207 {"csre", AARCH64_FEATURE (AARCH64_FEATURE_CSRE, 0),
9208 AARCH64_ARCH_NONE},
9209 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9210 AARCH64_ARCH_NONE},
9211 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9212 };
9213
9214 struct aarch64_long_option_table
9215 {
9216 const char *option; /* Substring to match. */
9217 const char *help; /* Help information. */
9218 int (*func) (const char *subopt); /* Function to decode sub-option. */
9219 char *deprecated; /* If non-null, print this message. */
9220 };
9221
9222 /* Transitive closure of features depending on set. */
9223 static aarch64_feature_set
9224 aarch64_feature_disable_set (aarch64_feature_set set)
9225 {
9226 const struct aarch64_option_cpu_value_table *opt;
9227 aarch64_feature_set prev = 0;
9228
9229 while (prev != set) {
9230 prev = set;
9231 for (opt = aarch64_features; opt->name != NULL; opt++)
9232 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9233 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9234 }
9235 return set;
9236 }
9237
9238 /* Transitive closure of dependencies of set. */
9239 static aarch64_feature_set
9240 aarch64_feature_enable_set (aarch64_feature_set set)
9241 {
9242 const struct aarch64_option_cpu_value_table *opt;
9243 aarch64_feature_set prev = 0;
9244
9245 while (prev != set) {
9246 prev = set;
9247 for (opt = aarch64_features; opt->name != NULL; opt++)
9248 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9249 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9250 }
9251 return set;
9252 }
9253
9254 static int
9255 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9256 bfd_boolean ext_only)
9257 {
9258 /* We insist on extensions being added before being removed. We achieve
9259 this by using the ADDING_VALUE variable to indicate whether we are
9260 adding an extension (1) or removing it (0) and only allowing it to
9261 change in the order -1 -> 1 -> 0. */
9262 int adding_value = -1;
9263 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9264
9265 /* Copy the feature set, so that we can modify it. */
9266 *ext_set = **opt_p;
9267 *opt_p = ext_set;
9268
9269 while (str != NULL && *str != 0)
9270 {
9271 const struct aarch64_option_cpu_value_table *opt;
9272 const char *ext = NULL;
9273 int optlen;
9274
9275 if (!ext_only)
9276 {
9277 if (*str != '+')
9278 {
9279 as_bad (_("invalid architectural extension"));
9280 return 0;
9281 }
9282
9283 ext = strchr (++str, '+');
9284 }
9285
9286 if (ext != NULL)
9287 optlen = ext - str;
9288 else
9289 optlen = strlen (str);
9290
9291 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
9292 {
9293 if (adding_value != 0)
9294 adding_value = 0;
9295 optlen -= 2;
9296 str += 2;
9297 }
9298 else if (optlen > 0)
9299 {
9300 if (adding_value == -1)
9301 adding_value = 1;
9302 else if (adding_value != 1)
9303 {
9304 as_bad (_("must specify extensions to add before specifying "
9305 "those to remove"));
9306 return FALSE;
9307 }
9308 }
9309
9310 if (optlen == 0)
9311 {
9312 as_bad (_("missing architectural extension"));
9313 return 0;
9314 }
9315
9316 gas_assert (adding_value != -1);
9317
9318 for (opt = aarch64_features; opt->name != NULL; opt++)
9319 if (strncmp (opt->name, str, optlen) == 0)
9320 {
9321 aarch64_feature_set set;
9322
9323 /* Add or remove the extension. */
9324 if (adding_value)
9325 {
9326 set = aarch64_feature_enable_set (opt->value);
9327 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9328 }
9329 else
9330 {
9331 set = aarch64_feature_disable_set (opt->value);
9332 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9333 }
9334 break;
9335 }
9336
9337 if (opt->name == NULL)
9338 {
9339 as_bad (_("unknown architectural extension `%s'"), str);
9340 return 0;
9341 }
9342
9343 str = ext;
9344 };
9345
9346 return 1;
9347 }
9348
9349 static int
9350 aarch64_parse_cpu (const char *str)
9351 {
9352 const struct aarch64_cpu_option_table *opt;
9353 const char *ext = strchr (str, '+');
9354 size_t optlen;
9355
9356 if (ext != NULL)
9357 optlen = ext - str;
9358 else
9359 optlen = strlen (str);
9360
9361 if (optlen == 0)
9362 {
9363 as_bad (_("missing cpu name `%s'"), str);
9364 return 0;
9365 }
9366
9367 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9368 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9369 {
9370 mcpu_cpu_opt = &opt->value;
9371 if (ext != NULL)
9372 return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
9373
9374 return 1;
9375 }
9376
9377 as_bad (_("unknown cpu `%s'"), str);
9378 return 0;
9379 }
9380
9381 static int
9382 aarch64_parse_arch (const char *str)
9383 {
9384 const struct aarch64_arch_option_table *opt;
9385 const char *ext = strchr (str, '+');
9386 size_t optlen;
9387
9388 if (ext != NULL)
9389 optlen = ext - str;
9390 else
9391 optlen = strlen (str);
9392
9393 if (optlen == 0)
9394 {
9395 as_bad (_("missing architecture name `%s'"), str);
9396 return 0;
9397 }
9398
9399 for (opt = aarch64_archs; opt->name != NULL; opt++)
9400 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9401 {
9402 march_cpu_opt = &opt->value;
9403 if (ext != NULL)
9404 return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
9405
9406 return 1;
9407 }
9408
9409 as_bad (_("unknown architecture `%s'\n"), str);
9410 return 0;
9411 }
9412
9413 /* ABIs. */
9414 struct aarch64_option_abi_value_table
9415 {
9416 const char *name;
9417 enum aarch64_abi_type value;
9418 };
9419
9420 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9421 {"ilp32", AARCH64_ABI_ILP32},
9422 {"lp64", AARCH64_ABI_LP64},
9423 };
9424
9425 static int
9426 aarch64_parse_abi (const char *str)
9427 {
9428 unsigned int i;
9429
9430 if (str[0] == '\0')
9431 {
9432 as_bad (_("missing abi name `%s'"), str);
9433 return 0;
9434 }
9435
9436 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9437 if (strcmp (str, aarch64_abis[i].name) == 0)
9438 {
9439 aarch64_abi = aarch64_abis[i].value;
9440 return 1;
9441 }
9442
9443 as_bad (_("unknown abi `%s'\n"), str);
9444 return 0;
9445 }
9446
9447 static struct aarch64_long_option_table aarch64_long_opts[] = {
9448 #ifdef OBJ_ELF
9449 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9450 aarch64_parse_abi, NULL},
9451 #endif /* OBJ_ELF */
9452 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9453 aarch64_parse_cpu, NULL},
9454 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9455 aarch64_parse_arch, NULL},
9456 {NULL, NULL, 0, NULL}
9457 };
9458
9459 int
9460 md_parse_option (int c, const char *arg)
9461 {
9462 struct aarch64_option_table *opt;
9463 struct aarch64_long_option_table *lopt;
9464
9465 switch (c)
9466 {
9467 #ifdef OPTION_EB
9468 case OPTION_EB:
9469 target_big_endian = 1;
9470 break;
9471 #endif
9472
9473 #ifdef OPTION_EL
9474 case OPTION_EL:
9475 target_big_endian = 0;
9476 break;
9477 #endif
9478
9479 case 'a':
9480 /* Listing option. Just ignore these, we don't support additional
9481 ones. */
9482 return 0;
9483
9484 default:
9485 for (opt = aarch64_opts; opt->option != NULL; opt++)
9486 {
9487 if (c == opt->option[0]
9488 && ((arg == NULL && opt->option[1] == 0)
9489 || streq (arg, opt->option + 1)))
9490 {
9491 /* If the option is deprecated, tell the user. */
9492 if (opt->deprecated != NULL)
9493 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9494 arg ? arg : "", _(opt->deprecated));
9495
9496 if (opt->var != NULL)
9497 *opt->var = opt->value;
9498
9499 return 1;
9500 }
9501 }
9502
9503 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9504 {
9505 /* These options are expected to have an argument. */
9506 if (c == lopt->option[0]
9507 && arg != NULL
9508 && strncmp (arg, lopt->option + 1,
9509 strlen (lopt->option + 1)) == 0)
9510 {
9511 /* If the option is deprecated, tell the user. */
9512 if (lopt->deprecated != NULL)
9513 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9514 _(lopt->deprecated));
9515
9516 /* Call the sup-option parser. */
9517 return lopt->func (arg + strlen (lopt->option) - 1);
9518 }
9519 }
9520
9521 return 0;
9522 }
9523
9524 return 1;
9525 }
9526
9527 void
9528 md_show_usage (FILE * fp)
9529 {
9530 struct aarch64_option_table *opt;
9531 struct aarch64_long_option_table *lopt;
9532
9533 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9534
9535 for (opt = aarch64_opts; opt->option != NULL; opt++)
9536 if (opt->help != NULL)
9537 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9538
9539 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9540 if (lopt->help != NULL)
9541 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9542
9543 #ifdef OPTION_EB
9544 fprintf (fp, _("\
9545 -EB assemble code for a big-endian cpu\n"));
9546 #endif
9547
9548 #ifdef OPTION_EL
9549 fprintf (fp, _("\
9550 -EL assemble code for a little-endian cpu\n"));
9551 #endif
9552 }
9553
9554 /* Parse a .cpu directive. */
9555
9556 static void
9557 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9558 {
9559 const struct aarch64_cpu_option_table *opt;
9560 char saved_char;
9561 char *name;
9562 char *ext;
9563 size_t optlen;
9564
9565 name = input_line_pointer;
9566 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9567 input_line_pointer++;
9568 saved_char = *input_line_pointer;
9569 *input_line_pointer = 0;
9570
9571 ext = strchr (name, '+');
9572
9573 if (ext != NULL)
9574 optlen = ext - name;
9575 else
9576 optlen = strlen (name);
9577
9578 /* Skip the first "all" entry. */
9579 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9580 if (strlen (opt->name) == optlen
9581 && strncmp (name, opt->name, optlen) == 0)
9582 {
9583 mcpu_cpu_opt = &opt->value;
9584 if (ext != NULL)
9585 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9586 return;
9587
9588 cpu_variant = *mcpu_cpu_opt;
9589
9590 *input_line_pointer = saved_char;
9591 demand_empty_rest_of_line ();
9592 return;
9593 }
9594 as_bad (_("unknown cpu `%s'"), name);
9595 *input_line_pointer = saved_char;
9596 ignore_rest_of_line ();
9597 }
9598
9599
9600 /* Parse a .arch directive. */
9601
9602 static void
9603 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9604 {
9605 const struct aarch64_arch_option_table *opt;
9606 char saved_char;
9607 char *name;
9608 char *ext;
9609 size_t optlen;
9610
9611 name = input_line_pointer;
9612 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9613 input_line_pointer++;
9614 saved_char = *input_line_pointer;
9615 *input_line_pointer = 0;
9616
9617 ext = strchr (name, '+');
9618
9619 if (ext != NULL)
9620 optlen = ext - name;
9621 else
9622 optlen = strlen (name);
9623
9624 /* Skip the first "all" entry. */
9625 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9626 if (strlen (opt->name) == optlen
9627 && strncmp (name, opt->name, optlen) == 0)
9628 {
9629 mcpu_cpu_opt = &opt->value;
9630 if (ext != NULL)
9631 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9632 return;
9633
9634 cpu_variant = *mcpu_cpu_opt;
9635
9636 *input_line_pointer = saved_char;
9637 demand_empty_rest_of_line ();
9638 return;
9639 }
9640
9641 as_bad (_("unknown architecture `%s'\n"), name);
9642 *input_line_pointer = saved_char;
9643 ignore_rest_of_line ();
9644 }
9645
9646 /* Parse a .arch_extension directive. */
9647
9648 static void
9649 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9650 {
9651 char saved_char;
9652 char *ext = input_line_pointer;;
9653
9654 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9655 input_line_pointer++;
9656 saved_char = *input_line_pointer;
9657 *input_line_pointer = 0;
9658
9659 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9660 return;
9661
9662 cpu_variant = *mcpu_cpu_opt;
9663
9664 *input_line_pointer = saved_char;
9665 demand_empty_rest_of_line ();
9666 }
9667
9668 /* Copy symbol information. */
9669
9670 void
9671 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9672 {
9673 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9674 }
9675
9676 #ifdef OBJ_ELF
9677 /* Same as elf_copy_symbol_attributes, but without copying st_other.
9678 This is needed so AArch64 specific st_other values can be independently
9679 specified for an IFUNC resolver (that is called by the dynamic linker)
9680 and the symbol it resolves (aliased to the resolver). In particular,
9681 if a function symbol has special st_other value set via directives,
9682 then attaching an IFUNC resolver to that symbol should not override
9683 the st_other setting. Requiring the directive on the IFUNC resolver
9684 symbol would be unexpected and problematic in C code, where the two
9685 symbols appear as two independent function declarations. */
9686
9687 void
9688 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
9689 {
9690 struct elf_obj_sy *srcelf = symbol_get_obj (src);
9691 struct elf_obj_sy *destelf = symbol_get_obj (dest);
9692 if (srcelf->size)
9693 {
9694 if (destelf->size == NULL)
9695 destelf->size = XNEW (expressionS);
9696 *destelf->size = *srcelf->size;
9697 }
9698 else
9699 {
9700 free (destelf->size);
9701 destelf->size = NULL;
9702 }
9703 S_SET_SIZE (dest, S_GET_SIZE (src));
9704 }
9705 #endif