]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
AArch64: Fix Diagnostic messaging for LD/ST Exclusive.
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2021 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include <stdint.h>
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 /* Currently active instruction sequence. */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_NONE = 0,
69 AARCH64_ABI_LP64 = 1,
70 AARCH64_ABI_ILP32 = 2
71 };
72
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt. */
78 static const char *default_arch = DEFAULT_ARCH;
79
80 /* AArch64 ABI for the output file. */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82
83 /* When non-zero, program to a 32-bit model, in which the C data types
84 int, long and all pointer types are 32-bit objects (ILP32); or to a
85 64-bit model, in which the C int type is 32-bits but the C long type
86 and all pointer types are 64-bit objects (LP64). */
87 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89
90 enum vector_el_type
91 {
92 NT_invtype = -1,
93 NT_b,
94 NT_h,
95 NT_s,
96 NT_d,
97 NT_q,
98 NT_zero,
99 NT_merge
100 };
101
102 /* Bits for DEFINED field in vector_type_el. */
103 #define NTA_HASTYPE 1
104 #define NTA_HASINDEX 2
105 #define NTA_HASVARWIDTH 4
106
107 struct vector_type_el
108 {
109 enum vector_el_type type;
110 unsigned char defined;
111 unsigned width;
112 int64_t index;
113 };
114
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
116
117 struct reloc
118 {
119 bfd_reloc_code_real_type type;
120 expressionS exp;
121 int pc_rel;
122 enum aarch64_opnd opnd;
123 uint32_t flags;
124 unsigned need_libopcodes_p : 1;
125 };
126
127 struct aarch64_instruction
128 {
129 /* libopcodes structure for instruction intermediate representation. */
130 aarch64_inst base;
131 /* Record assembly errors found during the parsing. */
132 struct
133 {
134 enum aarch64_operand_error_kind kind;
135 const char *error;
136 } parsing_error;
137 /* The condition that appears in the assembly line. */
138 int cond;
139 /* Relocation information (including the GAS internal fixup). */
140 struct reloc reloc;
141 /* Need to generate an immediate in the literal pool. */
142 unsigned gen_lit_pool : 1;
143 };
144
145 typedef struct aarch64_instruction aarch64_instruction;
146
147 static aarch64_instruction inst;
148
149 static bool parse_operands (char *, const aarch64_opcode *);
150 static bool programmer_friendly_fixup (aarch64_instruction *);
151
152 #ifdef OBJ_ELF
153 # define now_instr_sequence seg_info \
154 (now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158
159 /* Diagnostics inline function utilities.
160
161 These are lightweight utilities which should only be called by parse_operands
162 and other parsers. GAS processes each assembly line by parsing it against
163 instruction template(s), in the case of multiple templates (for the same
164 mnemonic name), those templates are tried one by one until one succeeds or
165 all fail. An assembly line may fail a few templates before being
166 successfully parsed; an error saved here in most cases is not a user error
167 but an error indicating the current template is not the right template.
168 Therefore it is very important that errors can be saved at a low cost during
169 the parsing; we don't want to slow down the whole parsing by recording
170 non-user errors in detail.
171
172 Remember that the objective is to help GAS pick up the most appropriate
173 error message in the case of multiple templates, e.g. FMOV which has 8
174 templates. */
175
176 static inline void
177 clear_error (void)
178 {
179 inst.parsing_error.kind = AARCH64_OPDE_NIL;
180 inst.parsing_error.error = NULL;
181 }
182
183 static inline bool
184 error_p (void)
185 {
186 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188
189 static inline const char *
190 get_error_message (void)
191 {
192 return inst.parsing_error.error;
193 }
194
195 static inline enum aarch64_operand_error_kind
196 get_error_kind (void)
197 {
198 return inst.parsing_error.kind;
199 }
200
201 static inline void
202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204 inst.parsing_error.kind = kind;
205 inst.parsing_error.error = error;
206 }
207
208 static inline void
209 set_recoverable_error (const char *error)
210 {
211 set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215 the error message. */
216 static inline void
217 set_default_error (void)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221
222 static inline void
223 set_syntax_error (const char *error)
224 {
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_first_syntax_error (const char *error)
230 {
231 if (! error_p ())
232 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234
235 static inline void
236 set_fatal_syntax_error (const char *error)
237 {
238 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 \f
241 /* Return value for certain parsers when the parsing fails; those parsers
242 return the information of the parsed result, e.g. register number, on
243 success. */
244 #define PARSE_FAIL -1
245
246 /* This is an invalid condition code that means no conditional field is
247 present. */
248 #define COND_ALWAYS 0x10
249
250 typedef struct
251 {
252 const char *template;
253 uint32_t value;
254 } asm_nzcv;
255
256 struct reloc_entry
257 {
258 char *name;
259 bfd_reloc_code_real_type reloc;
260 };
261
262 /* Macros to define the register types and masks for the purpose
263 of parsing. */
264
265 #undef AARCH64_REG_TYPES
266 #define AARCH64_REG_TYPES \
267 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
268 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
269 BASIC_REG_TYPE(SP_32) /* wsp */ \
270 BASIC_REG_TYPE(SP_64) /* sp */ \
271 BASIC_REG_TYPE(Z_32) /* wzr */ \
272 BASIC_REG_TYPE(Z_64) /* xzr */ \
273 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
274 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
275 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
276 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
277 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
278 BASIC_REG_TYPE(VN) /* v[0-31] */ \
279 BASIC_REG_TYPE(ZN) /* z[0-31] */ \
280 BASIC_REG_TYPE(PN) /* p[0-15] */ \
281 /* Typecheck: any 64-bit int reg (inc SP exc XZR). */ \
282 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
283 /* Typecheck: same, plus SVE registers. */ \
284 MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64) \
285 | REG_TYPE(ZN)) \
286 /* Typecheck: x[0-30], w[0-30] or [xw]zr. */ \
287 MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64) \
288 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
289 /* Typecheck: same, plus SVE registers. */ \
290 MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64) \
291 | REG_TYPE(Z_32) | REG_TYPE(Z_64) \
292 | REG_TYPE(ZN)) \
293 /* Typecheck: x[0-30], w[0-30] or {w}sp. */ \
294 MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
295 | REG_TYPE(SP_32) | REG_TYPE(SP_64)) \
296 /* Typecheck: any int (inc {W}SP inc [WX]ZR). */ \
297 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
298 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
299 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
300 /* Typecheck: any [BHSDQ]P FP. */ \
301 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
302 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
303 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR). */ \
304 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
305 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
306 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
307 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
308 /* Typecheck: as above, but also Zn, Pn, and {W}SP. This should only \
309 be used for SVE instructions, since Zn and Pn are valid symbols \
310 in other contexts. */ \
311 MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64) \
312 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
313 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
314 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
315 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q) \
316 | REG_TYPE(ZN) | REG_TYPE(PN)) \
317 /* Any integer register; used for error messages only. */ \
318 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
319 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
320 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
321 /* Pseudo type to mark the end of the enumerator sequence. */ \
322 BASIC_REG_TYPE(MAX)
323
324 #undef BASIC_REG_TYPE
325 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
326 #undef MULTI_REG_TYPE
327 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
328
329 /* Register type enumerators. */
330 typedef enum aarch64_reg_type_
331 {
332 /* A list of REG_TYPE_*. */
333 AARCH64_REG_TYPES
334 } aarch64_reg_type;
335
336 #undef BASIC_REG_TYPE
337 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
338 #undef REG_TYPE
339 #define REG_TYPE(T) (1 << REG_TYPE_##T)
340 #undef MULTI_REG_TYPE
341 #define MULTI_REG_TYPE(T,V) V,
342
343 /* Structure for a hash table entry for a register. */
344 typedef struct
345 {
346 const char *name;
347 unsigned char number;
348 ENUM_BITFIELD (aarch64_reg_type_) type : 8;
349 unsigned char builtin;
350 } reg_entry;
351
352 /* Values indexed by aarch64_reg_type to assist the type checking. */
353 static const unsigned reg_type_masks[] =
354 {
355 AARCH64_REG_TYPES
356 };
357
358 #undef BASIC_REG_TYPE
359 #undef REG_TYPE
360 #undef MULTI_REG_TYPE
361 #undef AARCH64_REG_TYPES
362
363 /* Diagnostics used when we don't get a register of the expected type.
364 Note: this has to synchronized with aarch64_reg_type definitions
365 above. */
366 static const char *
367 get_reg_expected_msg (aarch64_reg_type reg_type)
368 {
369 const char *msg;
370
371 switch (reg_type)
372 {
373 case REG_TYPE_R_32:
374 msg = N_("integer 32-bit register expected");
375 break;
376 case REG_TYPE_R_64:
377 msg = N_("integer 64-bit register expected");
378 break;
379 case REG_TYPE_R_N:
380 msg = N_("integer register expected");
381 break;
382 case REG_TYPE_R64_SP:
383 msg = N_("64-bit integer or SP register expected");
384 break;
385 case REG_TYPE_SVE_BASE:
386 msg = N_("base register expected");
387 break;
388 case REG_TYPE_R_Z:
389 msg = N_("integer or zero register expected");
390 break;
391 case REG_TYPE_SVE_OFFSET:
392 msg = N_("offset register expected");
393 break;
394 case REG_TYPE_R_SP:
395 msg = N_("integer or SP register expected");
396 break;
397 case REG_TYPE_R_Z_SP:
398 msg = N_("integer, zero or SP register expected");
399 break;
400 case REG_TYPE_FP_B:
401 msg = N_("8-bit SIMD scalar register expected");
402 break;
403 case REG_TYPE_FP_H:
404 msg = N_("16-bit SIMD scalar or floating-point half precision "
405 "register expected");
406 break;
407 case REG_TYPE_FP_S:
408 msg = N_("32-bit SIMD scalar or floating-point single precision "
409 "register expected");
410 break;
411 case REG_TYPE_FP_D:
412 msg = N_("64-bit SIMD scalar or floating-point double precision "
413 "register expected");
414 break;
415 case REG_TYPE_FP_Q:
416 msg = N_("128-bit SIMD scalar or floating-point quad precision "
417 "register expected");
418 break;
419 case REG_TYPE_R_Z_BHSDQ_V:
420 case REG_TYPE_R_Z_SP_BHSDQ_VZP:
421 msg = N_("register expected");
422 break;
423 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
424 msg = N_("SIMD scalar or floating-point register expected");
425 break;
426 case REG_TYPE_VN: /* any V reg */
427 msg = N_("vector register expected");
428 break;
429 case REG_TYPE_ZN:
430 msg = N_("SVE vector register expected");
431 break;
432 case REG_TYPE_PN:
433 msg = N_("SVE predicate register expected");
434 break;
435 default:
436 as_fatal (_("invalid register type %d"), reg_type);
437 }
438 return msg;
439 }
440
441 /* Some well known registers that we refer to directly elsewhere. */
442 #define REG_SP 31
443 #define REG_ZR 31
444
445 /* Instructions take 4 bytes in the object file. */
446 #define INSN_SIZE 4
447
448 static htab_t aarch64_ops_hsh;
449 static htab_t aarch64_cond_hsh;
450 static htab_t aarch64_shift_hsh;
451 static htab_t aarch64_sys_regs_hsh;
452 static htab_t aarch64_pstatefield_hsh;
453 static htab_t aarch64_sys_regs_ic_hsh;
454 static htab_t aarch64_sys_regs_dc_hsh;
455 static htab_t aarch64_sys_regs_at_hsh;
456 static htab_t aarch64_sys_regs_tlbi_hsh;
457 static htab_t aarch64_sys_regs_sr_hsh;
458 static htab_t aarch64_reg_hsh;
459 static htab_t aarch64_barrier_opt_hsh;
460 static htab_t aarch64_nzcv_hsh;
461 static htab_t aarch64_pldop_hsh;
462 static htab_t aarch64_hint_opt_hsh;
463
464 /* Stuff needed to resolve the label ambiguity
465 As:
466 ...
467 label: <insn>
468 may differ from:
469 ...
470 label:
471 <insn> */
472
473 static symbolS *last_label_seen;
474
475 /* Literal pool structure. Held on a per-section
476 and per-sub-section basis. */
477
478 #define MAX_LITERAL_POOL_SIZE 1024
479 typedef struct literal_expression
480 {
481 expressionS exp;
482 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
483 LITTLENUM_TYPE * bignum;
484 } literal_expression;
485
486 typedef struct literal_pool
487 {
488 literal_expression literals[MAX_LITERAL_POOL_SIZE];
489 unsigned int next_free_entry;
490 unsigned int id;
491 symbolS *symbol;
492 segT section;
493 subsegT sub_section;
494 int size;
495 struct literal_pool *next;
496 } literal_pool;
497
498 /* Pointer to a linked list of literal pools. */
499 static literal_pool *list_of_pools = NULL;
500 \f
501 /* Pure syntax. */
502
503 /* This array holds the chars that always start a comment. If the
504 pre-processor is disabled, these aren't very useful. */
505 const char comment_chars[] = "";
506
507 /* This array holds the chars that only start a comment at the beginning of
508 a line. If the line seems to have the form '# 123 filename'
509 .line and .file directives will appear in the pre-processed output. */
510 /* Note that input_file.c hand checks for '#' at the beginning of the
511 first line of the input file. This is because the compiler outputs
512 #NO_APP at the beginning of its output. */
513 /* Also note that comments like this one will always work. */
514 const char line_comment_chars[] = "#";
515
516 const char line_separator_chars[] = ";";
517
518 /* Chars that can be used to separate mant
519 from exp in floating point numbers. */
520 const char EXP_CHARS[] = "eE";
521
522 /* Chars that mean this number is a floating point constant. */
523 /* As in 0f12.456 */
524 /* or 0d1.2345e12 */
525
526 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhH";
527
528 /* Prefix character that indicates the start of an immediate value. */
529 #define is_immediate_prefix(C) ((C) == '#')
530
531 /* Separator character handling. */
532
533 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
534
535 static inline bool
536 skip_past_char (char **str, char c)
537 {
538 if (**str == c)
539 {
540 (*str)++;
541 return true;
542 }
543 else
544 return false;
545 }
546
547 #define skip_past_comma(str) skip_past_char (str, ',')
548
549 /* Arithmetic expressions (possibly involving symbols). */
550
551 static bool in_aarch64_get_expression = false;
552
553 /* Third argument to aarch64_get_expression. */
554 #define GE_NO_PREFIX false
555 #define GE_OPT_PREFIX true
556
557 /* Fourth argument to aarch64_get_expression. */
558 #define ALLOW_ABSENT false
559 #define REJECT_ABSENT true
560
561 /* Fifth argument to aarch64_get_expression. */
562 #define NORMAL_RESOLUTION false
563
564 /* Return TRUE if the string pointed by *STR is successfully parsed
565 as an valid expression; *EP will be filled with the information of
566 such an expression. Otherwise return FALSE.
567
568 If ALLOW_IMMEDIATE_PREFIX is true then skip a '#' at the start.
569 If REJECT_ABSENT is true then trat missing expressions as an error.
570 If DEFER_RESOLUTION is true, then do not resolve expressions against
571 constant symbols. Necessary if the expression is part of a fixup
572 that uses a reloc that must be emitted. */
573
574 static bool
575 aarch64_get_expression (expressionS * ep,
576 char ** str,
577 bool allow_immediate_prefix,
578 bool reject_absent,
579 bool defer_resolution)
580 {
581 char *save_in;
582 segT seg;
583 bool prefix_present = false;
584
585 if (allow_immediate_prefix)
586 {
587 if (is_immediate_prefix (**str))
588 {
589 (*str)++;
590 prefix_present = true;
591 }
592 }
593
594 memset (ep, 0, sizeof (expressionS));
595
596 save_in = input_line_pointer;
597 input_line_pointer = *str;
598 in_aarch64_get_expression = true;
599 if (defer_resolution)
600 seg = deferred_expression (ep);
601 else
602 seg = expression (ep);
603 in_aarch64_get_expression = false;
604
605 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
606 {
607 /* We found a bad expression in md_operand(). */
608 *str = input_line_pointer;
609 input_line_pointer = save_in;
610 if (prefix_present && ! error_p ())
611 set_fatal_syntax_error (_("bad expression"));
612 else
613 set_first_syntax_error (_("bad expression"));
614 return false;
615 }
616
617 #ifdef OBJ_AOUT
618 if (seg != absolute_section
619 && seg != text_section
620 && seg != data_section
621 && seg != bss_section
622 && seg != undefined_section)
623 {
624 set_syntax_error (_("bad segment"));
625 *str = input_line_pointer;
626 input_line_pointer = save_in;
627 return false;
628 }
629 #else
630 (void) seg;
631 #endif
632
633 *str = input_line_pointer;
634 input_line_pointer = save_in;
635 return true;
636 }
637
638 /* Turn a string in input_line_pointer into a floating point constant
639 of type TYPE, and store the appropriate bytes in *LITP. The number
640 of LITTLENUMS emitted is stored in *SIZEP. An error message is
641 returned, or NULL on OK. */
642
643 const char *
644 md_atof (int type, char *litP, int *sizeP)
645 {
646 /* If this is a bfloat16 type, then parse it slightly differently -
647 as it does not follow the IEEE standard exactly. */
648 if (type == 'b')
649 {
650 char * t;
651 LITTLENUM_TYPE words[MAX_LITTLENUMS];
652 FLONUM_TYPE generic_float;
653
654 t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float);
655
656 if (t)
657 input_line_pointer = t;
658 else
659 return _("invalid floating point number");
660
661 switch (generic_float.sign)
662 {
663 /* Is +Inf. */
664 case 'P':
665 words[0] = 0x7f80;
666 break;
667
668 /* Is -Inf. */
669 case 'N':
670 words[0] = 0xff80;
671 break;
672
673 /* Is NaN. */
674 /* bfloat16 has two types of NaN - quiet and signalling.
675 Quiet NaN has bit[6] == 1 && faction != 0, whereas
676 signalling Nan's have bit[0] == 0 && fraction != 0.
677 Chose this specific encoding as it is the same form
678 as used by other IEEE 754 encodings in GAS. */
679 case 0:
680 words[0] = 0x7fff;
681 break;
682
683 default:
684 break;
685 }
686
687 *sizeP = 2;
688
689 md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE));
690
691 return NULL;
692 }
693
694 return ieee_md_atof (type, litP, sizeP, target_big_endian);
695 }
696
697 /* We handle all bad expressions here, so that we can report the faulty
698 instruction in the error message. */
699 void
700 md_operand (expressionS * exp)
701 {
702 if (in_aarch64_get_expression)
703 exp->X_op = O_illegal;
704 }
705
706 /* Immediate values. */
707
708 /* Errors may be set multiple times during parsing or bit encoding
709 (particularly in the Neon bits), but usually the earliest error which is set
710 will be the most meaningful. Avoid overwriting it with later (cascading)
711 errors by calling this function. */
712
713 static void
714 first_error (const char *error)
715 {
716 if (! error_p ())
717 set_syntax_error (error);
718 }
719
720 /* Similar to first_error, but this function accepts formatted error
721 message. */
722 static void
723 first_error_fmt (const char *format, ...)
724 {
725 va_list args;
726 enum
727 { size = 100 };
728 /* N.B. this single buffer will not cause error messages for different
729 instructions to pollute each other; this is because at the end of
730 processing of each assembly line, error message if any will be
731 collected by as_bad. */
732 static char buffer[size];
733
734 if (! error_p ())
735 {
736 int ret ATTRIBUTE_UNUSED;
737 va_start (args, format);
738 ret = vsnprintf (buffer, size, format, args);
739 know (ret <= size - 1 && ret >= 0);
740 va_end (args);
741 set_syntax_error (buffer);
742 }
743 }
744
745 /* Register parsing. */
746
747 /* Generic register parser which is called by other specialized
748 register parsers.
749 CCP points to what should be the beginning of a register name.
750 If it is indeed a valid register name, advance CCP over it and
751 return the reg_entry structure; otherwise return NULL.
752 It does not issue diagnostics. */
753
754 static reg_entry *
755 parse_reg (char **ccp)
756 {
757 char *start = *ccp;
758 char *p;
759 reg_entry *reg;
760
761 #ifdef REGISTER_PREFIX
762 if (*start != REGISTER_PREFIX)
763 return NULL;
764 start++;
765 #endif
766
767 p = start;
768 if (!ISALPHA (*p) || !is_name_beginner (*p))
769 return NULL;
770
771 do
772 p++;
773 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
774
775 reg = (reg_entry *) str_hash_find_n (aarch64_reg_hsh, start, p - start);
776
777 if (!reg)
778 return NULL;
779
780 *ccp = p;
781 return reg;
782 }
783
784 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
785 return FALSE. */
786 static bool
787 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
788 {
789 return (reg_type_masks[type] & (1 << reg->type)) != 0;
790 }
791
792 /* Try to parse a base or offset register. Allow SVE base and offset
793 registers if REG_TYPE includes SVE registers. Return the register
794 entry on success, setting *QUALIFIER to the register qualifier.
795 Return null otherwise.
796
797 Note that this function does not issue any diagnostics. */
798
799 static const reg_entry *
800 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
801 aarch64_opnd_qualifier_t *qualifier)
802 {
803 char *str = *ccp;
804 const reg_entry *reg = parse_reg (&str);
805
806 if (reg == NULL)
807 return NULL;
808
809 switch (reg->type)
810 {
811 case REG_TYPE_R_32:
812 case REG_TYPE_SP_32:
813 case REG_TYPE_Z_32:
814 *qualifier = AARCH64_OPND_QLF_W;
815 break;
816
817 case REG_TYPE_R_64:
818 case REG_TYPE_SP_64:
819 case REG_TYPE_Z_64:
820 *qualifier = AARCH64_OPND_QLF_X;
821 break;
822
823 case REG_TYPE_ZN:
824 if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
825 || str[0] != '.')
826 return NULL;
827 switch (TOLOWER (str[1]))
828 {
829 case 's':
830 *qualifier = AARCH64_OPND_QLF_S_S;
831 break;
832 case 'd':
833 *qualifier = AARCH64_OPND_QLF_S_D;
834 break;
835 default:
836 return NULL;
837 }
838 str += 2;
839 break;
840
841 default:
842 return NULL;
843 }
844
845 *ccp = str;
846
847 return reg;
848 }
849
850 /* Try to parse a base or offset register. Return the register entry
851 on success, setting *QUALIFIER to the register qualifier. Return null
852 otherwise.
853
854 Note that this function does not issue any diagnostics. */
855
856 static const reg_entry *
857 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
858 {
859 return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
860 }
861
862 /* Parse the qualifier of a vector register or vector element of type
863 REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
864 succeeds; otherwise return FALSE.
865
866 Accept only one occurrence of:
867 4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
868 b h s d q */
869 static bool
870 parse_vector_type_for_operand (aarch64_reg_type reg_type,
871 struct vector_type_el *parsed_type, char **str)
872 {
873 char *ptr = *str;
874 unsigned width;
875 unsigned element_size;
876 enum vector_el_type type;
877
878 /* skip '.' */
879 gas_assert (*ptr == '.');
880 ptr++;
881
882 if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
883 {
884 width = 0;
885 goto elt_size;
886 }
887 width = strtoul (ptr, &ptr, 10);
888 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
889 {
890 first_error_fmt (_("bad size %d in vector width specifier"), width);
891 return false;
892 }
893
894 elt_size:
895 switch (TOLOWER (*ptr))
896 {
897 case 'b':
898 type = NT_b;
899 element_size = 8;
900 break;
901 case 'h':
902 type = NT_h;
903 element_size = 16;
904 break;
905 case 's':
906 type = NT_s;
907 element_size = 32;
908 break;
909 case 'd':
910 type = NT_d;
911 element_size = 64;
912 break;
913 case 'q':
914 if (reg_type == REG_TYPE_ZN || width == 1)
915 {
916 type = NT_q;
917 element_size = 128;
918 break;
919 }
920 /* fall through. */
921 default:
922 if (*ptr != '\0')
923 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
924 else
925 first_error (_("missing element size"));
926 return false;
927 }
928 if (width != 0 && width * element_size != 64
929 && width * element_size != 128
930 && !(width == 2 && element_size == 16)
931 && !(width == 4 && element_size == 8))
932 {
933 first_error_fmt (_
934 ("invalid element size %d and vector size combination %c"),
935 width, *ptr);
936 return false;
937 }
938 ptr++;
939
940 parsed_type->type = type;
941 parsed_type->width = width;
942
943 *str = ptr;
944
945 return true;
946 }
947
948 /* *STR contains an SVE zero/merge predication suffix. Parse it into
949 *PARSED_TYPE and point *STR at the end of the suffix. */
950
951 static bool
952 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
953 {
954 char *ptr = *str;
955
956 /* Skip '/'. */
957 gas_assert (*ptr == '/');
958 ptr++;
959 switch (TOLOWER (*ptr))
960 {
961 case 'z':
962 parsed_type->type = NT_zero;
963 break;
964 case 'm':
965 parsed_type->type = NT_merge;
966 break;
967 default:
968 if (*ptr != '\0' && *ptr != ',')
969 first_error_fmt (_("unexpected character `%c' in predication type"),
970 *ptr);
971 else
972 first_error (_("missing predication type"));
973 return false;
974 }
975 parsed_type->width = 0;
976 *str = ptr + 1;
977 return true;
978 }
979
980 /* Parse a register of the type TYPE.
981
982 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
983 name or the parsed register is not of TYPE.
984
985 Otherwise return the register number, and optionally fill in the actual
986 type of the register in *RTYPE when multiple alternatives were given, and
987 return the register shape and element index information in *TYPEINFO.
988
989 IN_REG_LIST should be set with TRUE if the caller is parsing a register
990 list. */
991
992 static int
993 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
994 struct vector_type_el *typeinfo, bool in_reg_list)
995 {
996 char *str = *ccp;
997 const reg_entry *reg = parse_reg (&str);
998 struct vector_type_el atype;
999 struct vector_type_el parsetype;
1000 bool is_typed_vecreg = false;
1001
1002 atype.defined = 0;
1003 atype.type = NT_invtype;
1004 atype.width = -1;
1005 atype.index = 0;
1006
1007 if (reg == NULL)
1008 {
1009 if (typeinfo)
1010 *typeinfo = atype;
1011 set_default_error ();
1012 return PARSE_FAIL;
1013 }
1014
1015 if (! aarch64_check_reg_type (reg, type))
1016 {
1017 DEBUG_TRACE ("reg type check failed");
1018 set_default_error ();
1019 return PARSE_FAIL;
1020 }
1021 type = reg->type;
1022
1023 if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
1024 && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
1025 {
1026 if (*str == '.')
1027 {
1028 if (!parse_vector_type_for_operand (type, &parsetype, &str))
1029 return PARSE_FAIL;
1030 }
1031 else
1032 {
1033 if (!parse_predication_for_operand (&parsetype, &str))
1034 return PARSE_FAIL;
1035 }
1036
1037 /* Register if of the form Vn.[bhsdq]. */
1038 is_typed_vecreg = true;
1039
1040 if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1041 {
1042 /* The width is always variable; we don't allow an integer width
1043 to be specified. */
1044 gas_assert (parsetype.width == 0);
1045 atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1046 }
1047 else if (parsetype.width == 0)
1048 /* Expect index. In the new scheme we cannot have
1049 Vn.[bhsdq] represent a scalar. Therefore any
1050 Vn.[bhsdq] should have an index following it.
1051 Except in reglists of course. */
1052 atype.defined |= NTA_HASINDEX;
1053 else
1054 atype.defined |= NTA_HASTYPE;
1055
1056 atype.type = parsetype.type;
1057 atype.width = parsetype.width;
1058 }
1059
1060 if (skip_past_char (&str, '['))
1061 {
1062 expressionS exp;
1063
1064 /* Reject Sn[index] syntax. */
1065 if (!is_typed_vecreg)
1066 {
1067 first_error (_("this type of register can't be indexed"));
1068 return PARSE_FAIL;
1069 }
1070
1071 if (in_reg_list)
1072 {
1073 first_error (_("index not allowed inside register list"));
1074 return PARSE_FAIL;
1075 }
1076
1077 atype.defined |= NTA_HASINDEX;
1078
1079 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1080 NORMAL_RESOLUTION);
1081
1082 if (exp.X_op != O_constant)
1083 {
1084 first_error (_("constant expression required"));
1085 return PARSE_FAIL;
1086 }
1087
1088 if (! skip_past_char (&str, ']'))
1089 return PARSE_FAIL;
1090
1091 atype.index = exp.X_add_number;
1092 }
1093 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1094 {
1095 /* Indexed vector register expected. */
1096 first_error (_("indexed vector register expected"));
1097 return PARSE_FAIL;
1098 }
1099
1100 /* A vector reg Vn should be typed or indexed. */
1101 if (type == REG_TYPE_VN && atype.defined == 0)
1102 {
1103 first_error (_("invalid use of vector register"));
1104 }
1105
1106 if (typeinfo)
1107 *typeinfo = atype;
1108
1109 if (rtype)
1110 *rtype = type;
1111
1112 *ccp = str;
1113
1114 return reg->number;
1115 }
1116
1117 /* Parse register.
1118
1119 Return the register number on success; return PARSE_FAIL otherwise.
1120
1121 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1122 the register (e.g. NEON double or quad reg when either has been requested).
1123
1124 If this is a NEON vector register with additional type information, fill
1125 in the struct pointed to by VECTYPE (if non-NULL).
1126
1127 This parser does not handle register list. */
1128
1129 static int
1130 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1131 aarch64_reg_type *rtype, struct vector_type_el *vectype)
1132 {
1133 struct vector_type_el atype;
1134 char *str = *ccp;
1135 int reg = parse_typed_reg (&str, type, rtype, &atype,
1136 /*in_reg_list= */ false);
1137
1138 if (reg == PARSE_FAIL)
1139 return PARSE_FAIL;
1140
1141 if (vectype)
1142 *vectype = atype;
1143
1144 *ccp = str;
1145
1146 return reg;
1147 }
1148
1149 static inline bool
1150 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1151 {
1152 return
1153 e1.type == e2.type
1154 && e1.defined == e2.defined
1155 && e1.width == e2.width && e1.index == e2.index;
1156 }
1157
1158 /* This function parses a list of vector registers of type TYPE.
1159 On success, it returns the parsed register list information in the
1160 following encoded format:
1161
1162 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1163 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1164
1165 The information of the register shape and/or index is returned in
1166 *VECTYPE.
1167
1168 It returns PARSE_FAIL if the register list is invalid.
1169
1170 The list contains one to four registers.
1171 Each register can be one of:
1172 <Vt>.<T>[<index>]
1173 <Vt>.<T>
1174 All <T> should be identical.
1175 All <index> should be identical.
1176 There are restrictions on <Vt> numbers which are checked later
1177 (by reg_list_valid_p). */
1178
1179 static int
1180 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1181 struct vector_type_el *vectype)
1182 {
1183 char *str = *ccp;
1184 int nb_regs;
1185 struct vector_type_el typeinfo, typeinfo_first;
1186 int val, val_range;
1187 int in_range;
1188 int ret_val;
1189 int i;
1190 bool error = false;
1191 bool expect_index = false;
1192
1193 if (*str != '{')
1194 {
1195 set_syntax_error (_("expecting {"));
1196 return PARSE_FAIL;
1197 }
1198 str++;
1199
1200 nb_regs = 0;
1201 typeinfo_first.defined = 0;
1202 typeinfo_first.type = NT_invtype;
1203 typeinfo_first.width = -1;
1204 typeinfo_first.index = 0;
1205 ret_val = 0;
1206 val = -1;
1207 val_range = -1;
1208 in_range = 0;
1209 do
1210 {
1211 if (in_range)
1212 {
1213 str++; /* skip over '-' */
1214 val_range = val;
1215 }
1216 val = parse_typed_reg (&str, type, NULL, &typeinfo,
1217 /*in_reg_list= */ true);
1218 if (val == PARSE_FAIL)
1219 {
1220 set_first_syntax_error (_("invalid vector register in list"));
1221 error = true;
1222 continue;
1223 }
1224 /* reject [bhsd]n */
1225 if (type == REG_TYPE_VN && typeinfo.defined == 0)
1226 {
1227 set_first_syntax_error (_("invalid scalar register in list"));
1228 error = true;
1229 continue;
1230 }
1231
1232 if (typeinfo.defined & NTA_HASINDEX)
1233 expect_index = true;
1234
1235 if (in_range)
1236 {
1237 if (val < val_range)
1238 {
1239 set_first_syntax_error
1240 (_("invalid range in vector register list"));
1241 error = true;
1242 }
1243 val_range++;
1244 }
1245 else
1246 {
1247 val_range = val;
1248 if (nb_regs == 0)
1249 typeinfo_first = typeinfo;
1250 else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1251 {
1252 set_first_syntax_error
1253 (_("type mismatch in vector register list"));
1254 error = true;
1255 }
1256 }
1257 if (! error)
1258 for (i = val_range; i <= val; i++)
1259 {
1260 ret_val |= i << (5 * nb_regs);
1261 nb_regs++;
1262 }
1263 in_range = 0;
1264 }
1265 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1266
1267 skip_whitespace (str);
1268 if (*str != '}')
1269 {
1270 set_first_syntax_error (_("end of vector register list not found"));
1271 error = true;
1272 }
1273 str++;
1274
1275 skip_whitespace (str);
1276
1277 if (expect_index)
1278 {
1279 if (skip_past_char (&str, '['))
1280 {
1281 expressionS exp;
1282
1283 aarch64_get_expression (&exp, &str, GE_NO_PREFIX, REJECT_ABSENT,
1284 NORMAL_RESOLUTION);
1285 if (exp.X_op != O_constant)
1286 {
1287 set_first_syntax_error (_("constant expression required."));
1288 error = true;
1289 }
1290 if (! skip_past_char (&str, ']'))
1291 error = true;
1292 else
1293 typeinfo_first.index = exp.X_add_number;
1294 }
1295 else
1296 {
1297 set_first_syntax_error (_("expected index"));
1298 error = true;
1299 }
1300 }
1301
1302 if (nb_regs > 4)
1303 {
1304 set_first_syntax_error (_("too many registers in vector register list"));
1305 error = true;
1306 }
1307 else if (nb_regs == 0)
1308 {
1309 set_first_syntax_error (_("empty vector register list"));
1310 error = true;
1311 }
1312
1313 *ccp = str;
1314 if (! error)
1315 *vectype = typeinfo_first;
1316
1317 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1318 }
1319
1320 /* Directives: register aliases. */
1321
1322 static reg_entry *
1323 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1324 {
1325 reg_entry *new;
1326 const char *name;
1327
1328 if ((new = str_hash_find (aarch64_reg_hsh, str)) != 0)
1329 {
1330 if (new->builtin)
1331 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1332 str);
1333
1334 /* Only warn about a redefinition if it's not defined as the
1335 same register. */
1336 else if (new->number != number || new->type != type)
1337 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1338
1339 return NULL;
1340 }
1341
1342 name = xstrdup (str);
1343 new = XNEW (reg_entry);
1344
1345 new->name = name;
1346 new->number = number;
1347 new->type = type;
1348 new->builtin = false;
1349
1350 str_hash_insert (aarch64_reg_hsh, name, new, 0);
1351
1352 return new;
1353 }
1354
1355 /* Look for the .req directive. This is of the form:
1356
1357 new_register_name .req existing_register_name
1358
1359 If we find one, or if it looks sufficiently like one that we want to
1360 handle any error here, return TRUE. Otherwise return FALSE. */
1361
1362 static bool
1363 create_register_alias (char *newname, char *p)
1364 {
1365 const reg_entry *old;
1366 char *oldname, *nbuf;
1367 size_t nlen;
1368
1369 /* The input scrubber ensures that whitespace after the mnemonic is
1370 collapsed to single spaces. */
1371 oldname = p;
1372 if (!startswith (oldname, " .req "))
1373 return false;
1374
1375 oldname += 6;
1376 if (*oldname == '\0')
1377 return false;
1378
1379 old = str_hash_find (aarch64_reg_hsh, oldname);
1380 if (!old)
1381 {
1382 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1383 return true;
1384 }
1385
1386 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1387 the desired alias name, and p points to its end. If not, then
1388 the desired alias name is in the global original_case_string. */
1389 #ifdef TC_CASE_SENSITIVE
1390 nlen = p - newname;
1391 #else
1392 newname = original_case_string;
1393 nlen = strlen (newname);
1394 #endif
1395
1396 nbuf = xmemdup0 (newname, nlen);
1397
1398 /* Create aliases under the new name as stated; an all-lowercase
1399 version of the new name; and an all-uppercase version of the new
1400 name. */
1401 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1402 {
1403 for (p = nbuf; *p; p++)
1404 *p = TOUPPER (*p);
1405
1406 if (strncmp (nbuf, newname, nlen))
1407 {
1408 /* If this attempt to create an additional alias fails, do not bother
1409 trying to create the all-lower case alias. We will fail and issue
1410 a second, duplicate error message. This situation arises when the
1411 programmer does something like:
1412 foo .req r0
1413 Foo .req r1
1414 The second .req creates the "Foo" alias but then fails to create
1415 the artificial FOO alias because it has already been created by the
1416 first .req. */
1417 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1418 {
1419 free (nbuf);
1420 return true;
1421 }
1422 }
1423
1424 for (p = nbuf; *p; p++)
1425 *p = TOLOWER (*p);
1426
1427 if (strncmp (nbuf, newname, nlen))
1428 insert_reg_alias (nbuf, old->number, old->type);
1429 }
1430
1431 free (nbuf);
1432 return true;
1433 }
1434
1435 /* Should never be called, as .req goes between the alias and the
1436 register name, not at the beginning of the line. */
1437 static void
1438 s_req (int a ATTRIBUTE_UNUSED)
1439 {
1440 as_bad (_("invalid syntax for .req directive"));
1441 }
1442
1443 /* The .unreq directive deletes an alias which was previously defined
1444 by .req. For example:
1445
1446 my_alias .req r11
1447 .unreq my_alias */
1448
1449 static void
1450 s_unreq (int a ATTRIBUTE_UNUSED)
1451 {
1452 char *name;
1453 char saved_char;
1454
1455 name = input_line_pointer;
1456
1457 while (*input_line_pointer != 0
1458 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1459 ++input_line_pointer;
1460
1461 saved_char = *input_line_pointer;
1462 *input_line_pointer = 0;
1463
1464 if (!*name)
1465 as_bad (_("invalid syntax for .unreq directive"));
1466 else
1467 {
1468 reg_entry *reg = str_hash_find (aarch64_reg_hsh, name);
1469
1470 if (!reg)
1471 as_bad (_("unknown register alias '%s'"), name);
1472 else if (reg->builtin)
1473 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1474 name);
1475 else
1476 {
1477 char *p;
1478 char *nbuf;
1479
1480 str_hash_delete (aarch64_reg_hsh, name);
1481 free ((char *) reg->name);
1482 free (reg);
1483
1484 /* Also locate the all upper case and all lower case versions.
1485 Do not complain if we cannot find one or the other as it
1486 was probably deleted above. */
1487
1488 nbuf = strdup (name);
1489 for (p = nbuf; *p; p++)
1490 *p = TOUPPER (*p);
1491 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1492 if (reg)
1493 {
1494 str_hash_delete (aarch64_reg_hsh, nbuf);
1495 free ((char *) reg->name);
1496 free (reg);
1497 }
1498
1499 for (p = nbuf; *p; p++)
1500 *p = TOLOWER (*p);
1501 reg = str_hash_find (aarch64_reg_hsh, nbuf);
1502 if (reg)
1503 {
1504 str_hash_delete (aarch64_reg_hsh, nbuf);
1505 free ((char *) reg->name);
1506 free (reg);
1507 }
1508
1509 free (nbuf);
1510 }
1511 }
1512
1513 *input_line_pointer = saved_char;
1514 demand_empty_rest_of_line ();
1515 }
1516
1517 /* Directives: Instruction set selection. */
1518
1519 #ifdef OBJ_ELF
1520 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1521 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1522 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1523 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1524
1525 /* Create a new mapping symbol for the transition to STATE. */
1526
1527 static void
1528 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1529 {
1530 symbolS *symbolP;
1531 const char *symname;
1532 int type;
1533
1534 switch (state)
1535 {
1536 case MAP_DATA:
1537 symname = "$d";
1538 type = BSF_NO_FLAGS;
1539 break;
1540 case MAP_INSN:
1541 symname = "$x";
1542 type = BSF_NO_FLAGS;
1543 break;
1544 default:
1545 abort ();
1546 }
1547
1548 symbolP = symbol_new (symname, now_seg, frag, value);
1549 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1550
1551 /* Save the mapping symbols for future reference. Also check that
1552 we do not place two mapping symbols at the same offset within a
1553 frag. We'll handle overlap between frags in
1554 check_mapping_symbols.
1555
1556 If .fill or other data filling directive generates zero sized data,
1557 the mapping symbol for the following code will have the same value
1558 as the one generated for the data filling directive. In this case,
1559 we replace the old symbol with the new one at the same address. */
1560 if (value == 0)
1561 {
1562 if (frag->tc_frag_data.first_map != NULL)
1563 {
1564 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1565 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1566 &symbol_lastP);
1567 }
1568 frag->tc_frag_data.first_map = symbolP;
1569 }
1570 if (frag->tc_frag_data.last_map != NULL)
1571 {
1572 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1573 S_GET_VALUE (symbolP));
1574 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1575 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1576 &symbol_lastP);
1577 }
1578 frag->tc_frag_data.last_map = symbolP;
1579 }
1580
1581 /* We must sometimes convert a region marked as code to data during
1582 code alignment, if an odd number of bytes have to be padded. The
1583 code mapping symbol is pushed to an aligned address. */
1584
1585 static void
1586 insert_data_mapping_symbol (enum mstate state,
1587 valueT value, fragS * frag, offsetT bytes)
1588 {
1589 /* If there was already a mapping symbol, remove it. */
1590 if (frag->tc_frag_data.last_map != NULL
1591 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1592 frag->fr_address + value)
1593 {
1594 symbolS *symp = frag->tc_frag_data.last_map;
1595
1596 if (value == 0)
1597 {
1598 know (frag->tc_frag_data.first_map == symp);
1599 frag->tc_frag_data.first_map = NULL;
1600 }
1601 frag->tc_frag_data.last_map = NULL;
1602 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1603 }
1604
1605 make_mapping_symbol (MAP_DATA, value, frag);
1606 make_mapping_symbol (state, value + bytes, frag);
1607 }
1608
1609 static void mapping_state_2 (enum mstate state, int max_chars);
1610
1611 /* Set the mapping state to STATE. Only call this when about to
1612 emit some STATE bytes to the file. */
1613
1614 void
1615 mapping_state (enum mstate state)
1616 {
1617 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1618
1619 if (state == MAP_INSN)
1620 /* AArch64 instructions require 4-byte alignment. When emitting
1621 instructions into any section, record the appropriate section
1622 alignment. */
1623 record_alignment (now_seg, 2);
1624
1625 if (mapstate == state)
1626 /* The mapping symbol has already been emitted.
1627 There is nothing else to do. */
1628 return;
1629
1630 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1631 if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1632 /* Emit MAP_DATA within executable section in order. Otherwise, it will be
1633 evaluated later in the next else. */
1634 return;
1635 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1636 {
1637 /* Only add the symbol if the offset is > 0:
1638 if we're at the first frag, check it's size > 0;
1639 if we're not at the first frag, then for sure
1640 the offset is > 0. */
1641 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1642 const int add_symbol = (frag_now != frag_first)
1643 || (frag_now_fix () > 0);
1644
1645 if (add_symbol)
1646 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1647 }
1648 #undef TRANSITION
1649
1650 mapping_state_2 (state, 0);
1651 }
1652
1653 /* Same as mapping_state, but MAX_CHARS bytes have already been
1654 allocated. Put the mapping symbol that far back. */
1655
1656 static void
1657 mapping_state_2 (enum mstate state, int max_chars)
1658 {
1659 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1660
1661 if (!SEG_NORMAL (now_seg))
1662 return;
1663
1664 if (mapstate == state)
1665 /* The mapping symbol has already been emitted.
1666 There is nothing else to do. */
1667 return;
1668
1669 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1670 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1671 }
1672 #else
1673 #define mapping_state(x) /* nothing */
1674 #define mapping_state_2(x, y) /* nothing */
1675 #endif
1676
1677 /* Directives: sectioning and alignment. */
1678
1679 static void
1680 s_bss (int ignore ATTRIBUTE_UNUSED)
1681 {
1682 /* We don't support putting frags in the BSS segment, we fake it by
1683 marking in_bss, then looking at s_skip for clues. */
1684 subseg_set (bss_section, 0);
1685 demand_empty_rest_of_line ();
1686 mapping_state (MAP_DATA);
1687 }
1688
1689 static void
1690 s_even (int ignore ATTRIBUTE_UNUSED)
1691 {
1692 /* Never make frag if expect extra pass. */
1693 if (!need_pass_2)
1694 frag_align (1, 0, 0);
1695
1696 record_alignment (now_seg, 1);
1697
1698 demand_empty_rest_of_line ();
1699 }
1700
1701 /* Directives: Literal pools. */
1702
1703 static literal_pool *
1704 find_literal_pool (int size)
1705 {
1706 literal_pool *pool;
1707
1708 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1709 {
1710 if (pool->section == now_seg
1711 && pool->sub_section == now_subseg && pool->size == size)
1712 break;
1713 }
1714
1715 return pool;
1716 }
1717
1718 static literal_pool *
1719 find_or_make_literal_pool (int size)
1720 {
1721 /* Next literal pool ID number. */
1722 static unsigned int latest_pool_num = 1;
1723 literal_pool *pool;
1724
1725 pool = find_literal_pool (size);
1726
1727 if (pool == NULL)
1728 {
1729 /* Create a new pool. */
1730 pool = XNEW (literal_pool);
1731 if (!pool)
1732 return NULL;
1733
1734 /* Currently we always put the literal pool in the current text
1735 section. If we were generating "small" model code where we
1736 knew that all code and initialised data was within 1MB then
1737 we could output literals to mergeable, read-only data
1738 sections. */
1739
1740 pool->next_free_entry = 0;
1741 pool->section = now_seg;
1742 pool->sub_section = now_subseg;
1743 pool->size = size;
1744 pool->next = list_of_pools;
1745 pool->symbol = NULL;
1746
1747 /* Add it to the list. */
1748 list_of_pools = pool;
1749 }
1750
1751 /* New pools, and emptied pools, will have a NULL symbol. */
1752 if (pool->symbol == NULL)
1753 {
1754 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1755 &zero_address_frag, 0);
1756 pool->id = latest_pool_num++;
1757 }
1758
1759 /* Done. */
1760 return pool;
1761 }
1762
1763 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1764 Return TRUE on success, otherwise return FALSE. */
1765 static bool
1766 add_to_lit_pool (expressionS *exp, int size)
1767 {
1768 literal_pool *pool;
1769 unsigned int entry;
1770
1771 pool = find_or_make_literal_pool (size);
1772
1773 /* Check if this literal value is already in the pool. */
1774 for (entry = 0; entry < pool->next_free_entry; entry++)
1775 {
1776 expressionS * litexp = & pool->literals[entry].exp;
1777
1778 if ((litexp->X_op == exp->X_op)
1779 && (exp->X_op == O_constant)
1780 && (litexp->X_add_number == exp->X_add_number)
1781 && (litexp->X_unsigned == exp->X_unsigned))
1782 break;
1783
1784 if ((litexp->X_op == exp->X_op)
1785 && (exp->X_op == O_symbol)
1786 && (litexp->X_add_number == exp->X_add_number)
1787 && (litexp->X_add_symbol == exp->X_add_symbol)
1788 && (litexp->X_op_symbol == exp->X_op_symbol))
1789 break;
1790 }
1791
1792 /* Do we need to create a new entry? */
1793 if (entry == pool->next_free_entry)
1794 {
1795 if (entry >= MAX_LITERAL_POOL_SIZE)
1796 {
1797 set_syntax_error (_("literal pool overflow"));
1798 return false;
1799 }
1800
1801 pool->literals[entry].exp = *exp;
1802 pool->next_free_entry += 1;
1803 if (exp->X_op == O_big)
1804 {
1805 /* PR 16688: Bignums are held in a single global array. We must
1806 copy and preserve that value now, before it is overwritten. */
1807 pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1808 exp->X_add_number);
1809 memcpy (pool->literals[entry].bignum, generic_bignum,
1810 CHARS_PER_LITTLENUM * exp->X_add_number);
1811 }
1812 else
1813 pool->literals[entry].bignum = NULL;
1814 }
1815
1816 exp->X_op = O_symbol;
1817 exp->X_add_number = ((int) entry) * size;
1818 exp->X_add_symbol = pool->symbol;
1819
1820 return true;
1821 }
1822
1823 /* Can't use symbol_new here, so have to create a symbol and then at
1824 a later date assign it a value. That's what these functions do. */
1825
1826 static void
1827 symbol_locate (symbolS * symbolP,
1828 const char *name,/* It is copied, the caller can modify. */
1829 segT segment, /* Segment identifier (SEG_<something>). */
1830 valueT valu, /* Symbol value. */
1831 fragS * frag) /* Associated fragment. */
1832 {
1833 size_t name_length;
1834 char *preserved_copy_of_name;
1835
1836 name_length = strlen (name) + 1; /* +1 for \0. */
1837 obstack_grow (&notes, name, name_length);
1838 preserved_copy_of_name = obstack_finish (&notes);
1839
1840 #ifdef tc_canonicalize_symbol_name
1841 preserved_copy_of_name =
1842 tc_canonicalize_symbol_name (preserved_copy_of_name);
1843 #endif
1844
1845 S_SET_NAME (symbolP, preserved_copy_of_name);
1846
1847 S_SET_SEGMENT (symbolP, segment);
1848 S_SET_VALUE (symbolP, valu);
1849 symbol_clear_list_pointers (symbolP);
1850
1851 symbol_set_frag (symbolP, frag);
1852
1853 /* Link to end of symbol chain. */
1854 {
1855 extern int symbol_table_frozen;
1856
1857 if (symbol_table_frozen)
1858 abort ();
1859 }
1860
1861 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1862
1863 obj_symbol_new_hook (symbolP);
1864
1865 #ifdef tc_symbol_new_hook
1866 tc_symbol_new_hook (symbolP);
1867 #endif
1868
1869 #ifdef DEBUG_SYMS
1870 verify_symbol_chain (symbol_rootP, symbol_lastP);
1871 #endif /* DEBUG_SYMS */
1872 }
1873
1874
1875 static void
1876 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1877 {
1878 unsigned int entry;
1879 literal_pool *pool;
1880 char sym_name[20];
1881 int align;
1882
1883 for (align = 2; align <= 4; align++)
1884 {
1885 int size = 1 << align;
1886
1887 pool = find_literal_pool (size);
1888 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1889 continue;
1890
1891 /* Align pool as you have word accesses.
1892 Only make a frag if we have to. */
1893 if (!need_pass_2)
1894 frag_align (align, 0, 0);
1895
1896 mapping_state (MAP_DATA);
1897
1898 record_alignment (now_seg, align);
1899
1900 sprintf (sym_name, "$$lit_\002%x", pool->id);
1901
1902 symbol_locate (pool->symbol, sym_name, now_seg,
1903 (valueT) frag_now_fix (), frag_now);
1904 symbol_table_insert (pool->symbol);
1905
1906 for (entry = 0; entry < pool->next_free_entry; entry++)
1907 {
1908 expressionS * exp = & pool->literals[entry].exp;
1909
1910 if (exp->X_op == O_big)
1911 {
1912 /* PR 16688: Restore the global bignum value. */
1913 gas_assert (pool->literals[entry].bignum != NULL);
1914 memcpy (generic_bignum, pool->literals[entry].bignum,
1915 CHARS_PER_LITTLENUM * exp->X_add_number);
1916 }
1917
1918 /* First output the expression in the instruction to the pool. */
1919 emit_expr (exp, size); /* .word|.xword */
1920
1921 if (exp->X_op == O_big)
1922 {
1923 free (pool->literals[entry].bignum);
1924 pool->literals[entry].bignum = NULL;
1925 }
1926 }
1927
1928 /* Mark the pool as empty. */
1929 pool->next_free_entry = 0;
1930 pool->symbol = NULL;
1931 }
1932 }
1933
1934 #ifdef OBJ_ELF
1935 /* Forward declarations for functions below, in the MD interface
1936 section. */
1937 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1938 static struct reloc_table_entry * find_reloc_table_entry (char **);
1939
1940 /* Directives: Data. */
1941 /* N.B. the support for relocation suffix in this directive needs to be
1942 implemented properly. */
1943
1944 static void
1945 s_aarch64_elf_cons (int nbytes)
1946 {
1947 expressionS exp;
1948
1949 #ifdef md_flush_pending_output
1950 md_flush_pending_output ();
1951 #endif
1952
1953 if (is_it_end_of_statement ())
1954 {
1955 demand_empty_rest_of_line ();
1956 return;
1957 }
1958
1959 #ifdef md_cons_align
1960 md_cons_align (nbytes);
1961 #endif
1962
1963 mapping_state (MAP_DATA);
1964 do
1965 {
1966 struct reloc_table_entry *reloc;
1967
1968 expression (&exp);
1969
1970 if (exp.X_op != O_symbol)
1971 emit_expr (&exp, (unsigned int) nbytes);
1972 else
1973 {
1974 skip_past_char (&input_line_pointer, '#');
1975 if (skip_past_char (&input_line_pointer, ':'))
1976 {
1977 reloc = find_reloc_table_entry (&input_line_pointer);
1978 if (reloc == NULL)
1979 as_bad (_("unrecognized relocation suffix"));
1980 else
1981 as_bad (_("unimplemented relocation suffix"));
1982 ignore_rest_of_line ();
1983 return;
1984 }
1985 else
1986 emit_expr (&exp, (unsigned int) nbytes);
1987 }
1988 }
1989 while (*input_line_pointer++ == ',');
1990
1991 /* Put terminator back into stream. */
1992 input_line_pointer--;
1993 demand_empty_rest_of_line ();
1994 }
1995
1996 /* Mark symbol that it follows a variant PCS convention. */
1997
1998 static void
1999 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
2000 {
2001 char *name;
2002 char c;
2003 symbolS *sym;
2004 asymbol *bfdsym;
2005 elf_symbol_type *elfsym;
2006
2007 c = get_symbol_name (&name);
2008 if (!*name)
2009 as_bad (_("Missing symbol name in directive"));
2010 sym = symbol_find_or_make (name);
2011 restore_line_pointer (c);
2012 demand_empty_rest_of_line ();
2013 bfdsym = symbol_get_bfdsym (sym);
2014 elfsym = elf_symbol_from (bfdsym);
2015 gas_assert (elfsym);
2016 elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2017 }
2018 #endif /* OBJ_ELF */
2019
2020 /* Output a 32-bit word, but mark as an instruction. */
2021
2022 static void
2023 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2024 {
2025 expressionS exp;
2026
2027 #ifdef md_flush_pending_output
2028 md_flush_pending_output ();
2029 #endif
2030
2031 if (is_it_end_of_statement ())
2032 {
2033 demand_empty_rest_of_line ();
2034 return;
2035 }
2036
2037 /* Sections are assumed to start aligned. In executable section, there is no
2038 MAP_DATA symbol pending. So we only align the address during
2039 MAP_DATA --> MAP_INSN transition.
2040 For other sections, this is not guaranteed. */
2041 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2042 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2043 frag_align_code (2, 0);
2044
2045 #ifdef OBJ_ELF
2046 mapping_state (MAP_INSN);
2047 #endif
2048
2049 do
2050 {
2051 expression (&exp);
2052 if (exp.X_op != O_constant)
2053 {
2054 as_bad (_("constant expression required"));
2055 ignore_rest_of_line ();
2056 return;
2057 }
2058
2059 if (target_big_endian)
2060 {
2061 unsigned int val = exp.X_add_number;
2062 exp.X_add_number = SWAP_32 (val);
2063 }
2064 emit_expr (&exp, 4);
2065 }
2066 while (*input_line_pointer++ == ',');
2067
2068 /* Put terminator back into stream. */
2069 input_line_pointer--;
2070 demand_empty_rest_of_line ();
2071 }
2072
2073 static void
2074 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2075 {
2076 demand_empty_rest_of_line ();
2077 struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2078 fde->pauth_key = AARCH64_PAUTH_KEY_B;
2079 }
2080
2081 #ifdef OBJ_ELF
2082 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction. */
2083
2084 static void
2085 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2086 {
2087 expressionS exp;
2088
2089 expression (&exp);
2090 frag_grow (4);
2091 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2092 BFD_RELOC_AARCH64_TLSDESC_ADD);
2093
2094 demand_empty_rest_of_line ();
2095 }
2096
2097 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
2098
2099 static void
2100 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2101 {
2102 expressionS exp;
2103
2104 /* Since we're just labelling the code, there's no need to define a
2105 mapping symbol. */
2106 expression (&exp);
2107 /* Make sure there is enough room in this frag for the following
2108 blr. This trick only works if the blr follows immediately after
2109 the .tlsdesc directive. */
2110 frag_grow (4);
2111 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2112 BFD_RELOC_AARCH64_TLSDESC_CALL);
2113
2114 demand_empty_rest_of_line ();
2115 }
2116
2117 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction. */
2118
2119 static void
2120 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2121 {
2122 expressionS exp;
2123
2124 expression (&exp);
2125 frag_grow (4);
2126 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2127 BFD_RELOC_AARCH64_TLSDESC_LDR);
2128
2129 demand_empty_rest_of_line ();
2130 }
2131 #endif /* OBJ_ELF */
2132
2133 static void s_aarch64_arch (int);
2134 static void s_aarch64_cpu (int);
2135 static void s_aarch64_arch_extension (int);
2136
2137 /* This table describes all the machine specific pseudo-ops the assembler
2138 has to support. The fields are:
2139 pseudo-op name without dot
2140 function to call to execute this pseudo-op
2141 Integer arg to pass to the function. */
2142
2143 const pseudo_typeS md_pseudo_table[] = {
2144 /* Never called because '.req' does not start a line. */
2145 {"req", s_req, 0},
2146 {"unreq", s_unreq, 0},
2147 {"bss", s_bss, 0},
2148 {"even", s_even, 0},
2149 {"ltorg", s_ltorg, 0},
2150 {"pool", s_ltorg, 0},
2151 {"cpu", s_aarch64_cpu, 0},
2152 {"arch", s_aarch64_arch, 0},
2153 {"arch_extension", s_aarch64_arch_extension, 0},
2154 {"inst", s_aarch64_inst, 0},
2155 {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2156 #ifdef OBJ_ELF
2157 {"tlsdescadd", s_tlsdescadd, 0},
2158 {"tlsdesccall", s_tlsdesccall, 0},
2159 {"tlsdescldr", s_tlsdescldr, 0},
2160 {"word", s_aarch64_elf_cons, 4},
2161 {"long", s_aarch64_elf_cons, 4},
2162 {"xword", s_aarch64_elf_cons, 8},
2163 {"dword", s_aarch64_elf_cons, 8},
2164 {"variant_pcs", s_variant_pcs, 0},
2165 #endif
2166 {"float16", float_cons, 'h'},
2167 {"bfloat16", float_cons, 'b'},
2168 {0, 0, 0}
2169 };
2170 \f
2171
2172 /* Check whether STR points to a register name followed by a comma or the
2173 end of line; REG_TYPE indicates which register types are checked
2174 against. Return TRUE if STR is such a register name; otherwise return
2175 FALSE. The function does not intend to produce any diagnostics, but since
2176 the register parser aarch64_reg_parse, which is called by this function,
2177 does produce diagnostics, we call clear_error to clear any diagnostics
2178 that may be generated by aarch64_reg_parse.
2179 Also, the function returns FALSE directly if there is any user error
2180 present at the function entry. This prevents the existing diagnostics
2181 state from being spoiled.
2182 The function currently serves parse_constant_immediate and
2183 parse_big_immediate only. */
2184 static bool
2185 reg_name_p (char *str, aarch64_reg_type reg_type)
2186 {
2187 int reg;
2188
2189 /* Prevent the diagnostics state from being spoiled. */
2190 if (error_p ())
2191 return false;
2192
2193 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2194
2195 /* Clear the parsing error that may be set by the reg parser. */
2196 clear_error ();
2197
2198 if (reg == PARSE_FAIL)
2199 return false;
2200
2201 skip_whitespace (str);
2202 if (*str == ',' || is_end_of_line[(unsigned char) *str])
2203 return true;
2204
2205 return false;
2206 }
2207
2208 /* Parser functions used exclusively in instruction operands. */
2209
2210 /* Parse an immediate expression which may not be constant.
2211
2212 To prevent the expression parser from pushing a register name
2213 into the symbol table as an undefined symbol, firstly a check is
2214 done to find out whether STR is a register of type REG_TYPE followed
2215 by a comma or the end of line. Return FALSE if STR is such a string. */
2216
2217 static bool
2218 parse_immediate_expression (char **str, expressionS *exp,
2219 aarch64_reg_type reg_type)
2220 {
2221 if (reg_name_p (*str, reg_type))
2222 {
2223 set_recoverable_error (_("immediate operand required"));
2224 return false;
2225 }
2226
2227 aarch64_get_expression (exp, str, GE_OPT_PREFIX, REJECT_ABSENT,
2228 NORMAL_RESOLUTION);
2229
2230 if (exp->X_op == O_absent)
2231 {
2232 set_fatal_syntax_error (_("missing immediate expression"));
2233 return false;
2234 }
2235
2236 return true;
2237 }
2238
2239 /* Constant immediate-value read function for use in insn parsing.
2240 STR points to the beginning of the immediate (with the optional
2241 leading #); *VAL receives the value. REG_TYPE says which register
2242 names should be treated as registers rather than as symbolic immediates.
2243
2244 Return TRUE on success; otherwise return FALSE. */
2245
2246 static bool
2247 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2248 {
2249 expressionS exp;
2250
2251 if (! parse_immediate_expression (str, &exp, reg_type))
2252 return false;
2253
2254 if (exp.X_op != O_constant)
2255 {
2256 set_syntax_error (_("constant expression required"));
2257 return false;
2258 }
2259
2260 *val = exp.X_add_number;
2261 return true;
2262 }
2263
2264 static uint32_t
2265 encode_imm_float_bits (uint32_t imm)
2266 {
2267 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2268 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2269 }
2270
2271 /* Return TRUE if the single-precision floating-point value encoded in IMM
2272 can be expressed in the AArch64 8-bit signed floating-point format with
2273 3-bit exponent and normalized 4 bits of precision; in other words, the
2274 floating-point value must be expressable as
2275 (+/-) n / 16 * power (2, r)
2276 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2277
2278 static bool
2279 aarch64_imm_float_p (uint32_t imm)
2280 {
2281 /* If a single-precision floating-point value has the following bit
2282 pattern, it can be expressed in the AArch64 8-bit floating-point
2283 format:
2284
2285 3 32222222 2221111111111
2286 1 09876543 21098765432109876543210
2287 n Eeeeeexx xxxx0000000000000000000
2288
2289 where n, e and each x are either 0 or 1 independently, with
2290 E == ~ e. */
2291
2292 uint32_t pattern;
2293
2294 /* Prepare the pattern for 'Eeeeee'. */
2295 if (((imm >> 30) & 0x1) == 0)
2296 pattern = 0x3e000000;
2297 else
2298 pattern = 0x40000000;
2299
2300 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2301 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2302 }
2303
2304 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2305 as an IEEE float without any loss of precision. Store the value in
2306 *FPWORD if so. */
2307
2308 static bool
2309 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2310 {
2311 /* If a double-precision floating-point value has the following bit
2312 pattern, it can be expressed in a float:
2313
2314 6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2315 3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2316 n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2317
2318 -----------------------------> nEeeeeee esssssss ssssssss sssssSSS
2319 if Eeee_eeee != 1111_1111
2320
2321 where n, e, s and S are either 0 or 1 independently and where ~ is the
2322 inverse of E. */
2323
2324 uint32_t pattern;
2325 uint32_t high32 = imm >> 32;
2326 uint32_t low32 = imm;
2327
2328 /* Lower 29 bits need to be 0s. */
2329 if ((imm & 0x1fffffff) != 0)
2330 return false;
2331
2332 /* Prepare the pattern for 'Eeeeeeeee'. */
2333 if (((high32 >> 30) & 0x1) == 0)
2334 pattern = 0x38000000;
2335 else
2336 pattern = 0x40000000;
2337
2338 /* Check E~~~. */
2339 if ((high32 & 0x78000000) != pattern)
2340 return false;
2341
2342 /* Check Eeee_eeee != 1111_1111. */
2343 if ((high32 & 0x7ff00000) == 0x47f00000)
2344 return false;
2345
2346 *fpword = ((high32 & 0xc0000000) /* 1 n bit and 1 E bit. */
2347 | ((high32 << 3) & 0x3ffffff8) /* 7 e and 20 s bits. */
2348 | (low32 >> 29)); /* 3 S bits. */
2349 return true;
2350 }
2351
2352 /* Return true if we should treat OPERAND as a double-precision
2353 floating-point operand rather than a single-precision one. */
2354 static bool
2355 double_precision_operand_p (const aarch64_opnd_info *operand)
2356 {
2357 /* Check for unsuffixed SVE registers, which are allowed
2358 for LDR and STR but not in instructions that require an
2359 immediate. We get better error messages if we arbitrarily
2360 pick one size, parse the immediate normally, and then
2361 report the match failure in the normal way. */
2362 return (operand->qualifier == AARCH64_OPND_QLF_NIL
2363 || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2364 }
2365
2366 /* Parse a floating-point immediate. Return TRUE on success and return the
2367 value in *IMMED in the format of IEEE754 single-precision encoding.
2368 *CCP points to the start of the string; DP_P is TRUE when the immediate
2369 is expected to be in double-precision (N.B. this only matters when
2370 hexadecimal representation is involved). REG_TYPE says which register
2371 names should be treated as registers rather than as symbolic immediates.
2372
2373 This routine accepts any IEEE float; it is up to the callers to reject
2374 invalid ones. */
2375
2376 static bool
2377 parse_aarch64_imm_float (char **ccp, int *immed, bool dp_p,
2378 aarch64_reg_type reg_type)
2379 {
2380 char *str = *ccp;
2381 char *fpnum;
2382 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2383 int64_t val = 0;
2384 unsigned fpword = 0;
2385 bool hex_p = false;
2386
2387 skip_past_char (&str, '#');
2388
2389 fpnum = str;
2390 skip_whitespace (fpnum);
2391
2392 if (startswith (fpnum, "0x"))
2393 {
2394 /* Support the hexadecimal representation of the IEEE754 encoding.
2395 Double-precision is expected when DP_P is TRUE, otherwise the
2396 representation should be in single-precision. */
2397 if (! parse_constant_immediate (&str, &val, reg_type))
2398 goto invalid_fp;
2399
2400 if (dp_p)
2401 {
2402 if (!can_convert_double_to_float (val, &fpword))
2403 goto invalid_fp;
2404 }
2405 else if ((uint64_t) val > 0xffffffff)
2406 goto invalid_fp;
2407 else
2408 fpword = val;
2409
2410 hex_p = true;
2411 }
2412 else if (reg_name_p (str, reg_type))
2413 {
2414 set_recoverable_error (_("immediate operand required"));
2415 return false;
2416 }
2417
2418 if (! hex_p)
2419 {
2420 int i;
2421
2422 if ((str = atof_ieee (str, 's', words)) == NULL)
2423 goto invalid_fp;
2424
2425 /* Our FP word must be 32 bits (single-precision FP). */
2426 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2427 {
2428 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2429 fpword |= words[i];
2430 }
2431 }
2432
2433 *immed = fpword;
2434 *ccp = str;
2435 return true;
2436
2437 invalid_fp:
2438 set_fatal_syntax_error (_("invalid floating-point constant"));
2439 return false;
2440 }
2441
2442 /* Less-generic immediate-value read function with the possibility of loading
2443 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2444 instructions.
2445
2446 To prevent the expression parser from pushing a register name into the
2447 symbol table as an undefined symbol, a check is firstly done to find
2448 out whether STR is a register of type REG_TYPE followed by a comma or
2449 the end of line. Return FALSE if STR is such a register. */
2450
2451 static bool
2452 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2453 {
2454 char *ptr = *str;
2455
2456 if (reg_name_p (ptr, reg_type))
2457 {
2458 set_syntax_error (_("immediate operand required"));
2459 return false;
2460 }
2461
2462 aarch64_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, REJECT_ABSENT,
2463 NORMAL_RESOLUTION);
2464
2465 if (inst.reloc.exp.X_op == O_constant)
2466 *imm = inst.reloc.exp.X_add_number;
2467
2468 *str = ptr;
2469
2470 return true;
2471 }
2472
2473 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2474 if NEED_LIBOPCODES is non-zero, the fixup will need
2475 assistance from the libopcodes. */
2476
2477 static inline void
2478 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2479 const aarch64_opnd_info *operand,
2480 int need_libopcodes_p)
2481 {
2482 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2483 reloc->opnd = operand->type;
2484 if (need_libopcodes_p)
2485 reloc->need_libopcodes_p = 1;
2486 };
2487
2488 /* Return TRUE if the instruction needs to be fixed up later internally by
2489 the GAS; otherwise return FALSE. */
2490
2491 static inline bool
2492 aarch64_gas_internal_fixup_p (void)
2493 {
2494 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2495 }
2496
2497 /* Assign the immediate value to the relevant field in *OPERAND if
2498 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2499 needs an internal fixup in a later stage.
2500 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2501 IMM.VALUE that may get assigned with the constant. */
2502 static inline void
2503 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2504 aarch64_opnd_info *operand,
2505 int addr_off_p,
2506 int need_libopcodes_p,
2507 int skip_p)
2508 {
2509 if (reloc->exp.X_op == O_constant)
2510 {
2511 if (addr_off_p)
2512 operand->addr.offset.imm = reloc->exp.X_add_number;
2513 else
2514 operand->imm.value = reloc->exp.X_add_number;
2515 reloc->type = BFD_RELOC_UNUSED;
2516 }
2517 else
2518 {
2519 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2520 /* Tell libopcodes to ignore this operand or not. This is helpful
2521 when one of the operands needs to be fixed up later but we need
2522 libopcodes to check the other operands. */
2523 operand->skip = skip_p;
2524 }
2525 }
2526
2527 /* Relocation modifiers. Each entry in the table contains the textual
2528 name for the relocation which may be placed before a symbol used as
2529 a load/store offset, or add immediate. It must be surrounded by a
2530 leading and trailing colon, for example:
2531
2532 ldr x0, [x1, #:rello:varsym]
2533 add x0, x1, #:rello:varsym */
2534
2535 struct reloc_table_entry
2536 {
2537 const char *name;
2538 int pc_rel;
2539 bfd_reloc_code_real_type adr_type;
2540 bfd_reloc_code_real_type adrp_type;
2541 bfd_reloc_code_real_type movw_type;
2542 bfd_reloc_code_real_type add_type;
2543 bfd_reloc_code_real_type ldst_type;
2544 bfd_reloc_code_real_type ld_literal_type;
2545 };
2546
2547 static struct reloc_table_entry reloc_table[] =
2548 {
2549 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2550 {"lo12", 0,
2551 0, /* adr_type */
2552 0,
2553 0,
2554 BFD_RELOC_AARCH64_ADD_LO12,
2555 BFD_RELOC_AARCH64_LDST_LO12,
2556 0},
2557
2558 /* Higher 21 bits of pc-relative page offset: ADRP */
2559 {"pg_hi21", 1,
2560 0, /* adr_type */
2561 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2562 0,
2563 0,
2564 0,
2565 0},
2566
2567 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2568 {"pg_hi21_nc", 1,
2569 0, /* adr_type */
2570 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2571 0,
2572 0,
2573 0,
2574 0},
2575
2576 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2577 {"abs_g0", 0,
2578 0, /* adr_type */
2579 0,
2580 BFD_RELOC_AARCH64_MOVW_G0,
2581 0,
2582 0,
2583 0},
2584
2585 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2586 {"abs_g0_s", 0,
2587 0, /* adr_type */
2588 0,
2589 BFD_RELOC_AARCH64_MOVW_G0_S,
2590 0,
2591 0,
2592 0},
2593
2594 /* Less significant bits 0-15 of address/value: MOVK, no check */
2595 {"abs_g0_nc", 0,
2596 0, /* adr_type */
2597 0,
2598 BFD_RELOC_AARCH64_MOVW_G0_NC,
2599 0,
2600 0,
2601 0},
2602
2603 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2604 {"abs_g1", 0,
2605 0, /* adr_type */
2606 0,
2607 BFD_RELOC_AARCH64_MOVW_G1,
2608 0,
2609 0,
2610 0},
2611
2612 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2613 {"abs_g1_s", 0,
2614 0, /* adr_type */
2615 0,
2616 BFD_RELOC_AARCH64_MOVW_G1_S,
2617 0,
2618 0,
2619 0},
2620
2621 /* Less significant bits 16-31 of address/value: MOVK, no check */
2622 {"abs_g1_nc", 0,
2623 0, /* adr_type */
2624 0,
2625 BFD_RELOC_AARCH64_MOVW_G1_NC,
2626 0,
2627 0,
2628 0},
2629
2630 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2631 {"abs_g2", 0,
2632 0, /* adr_type */
2633 0,
2634 BFD_RELOC_AARCH64_MOVW_G2,
2635 0,
2636 0,
2637 0},
2638
2639 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2640 {"abs_g2_s", 0,
2641 0, /* adr_type */
2642 0,
2643 BFD_RELOC_AARCH64_MOVW_G2_S,
2644 0,
2645 0,
2646 0},
2647
2648 /* Less significant bits 32-47 of address/value: MOVK, no check */
2649 {"abs_g2_nc", 0,
2650 0, /* adr_type */
2651 0,
2652 BFD_RELOC_AARCH64_MOVW_G2_NC,
2653 0,
2654 0,
2655 0},
2656
2657 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2658 {"abs_g3", 0,
2659 0, /* adr_type */
2660 0,
2661 BFD_RELOC_AARCH64_MOVW_G3,
2662 0,
2663 0,
2664 0},
2665
2666 /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2667 {"prel_g0", 1,
2668 0, /* adr_type */
2669 0,
2670 BFD_RELOC_AARCH64_MOVW_PREL_G0,
2671 0,
2672 0,
2673 0},
2674
2675 /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2676 {"prel_g0_nc", 1,
2677 0, /* adr_type */
2678 0,
2679 BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2680 0,
2681 0,
2682 0},
2683
2684 /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2685 {"prel_g1", 1,
2686 0, /* adr_type */
2687 0,
2688 BFD_RELOC_AARCH64_MOVW_PREL_G1,
2689 0,
2690 0,
2691 0},
2692
2693 /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2694 {"prel_g1_nc", 1,
2695 0, /* adr_type */
2696 0,
2697 BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2698 0,
2699 0,
2700 0},
2701
2702 /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2703 {"prel_g2", 1,
2704 0, /* adr_type */
2705 0,
2706 BFD_RELOC_AARCH64_MOVW_PREL_G2,
2707 0,
2708 0,
2709 0},
2710
2711 /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2712 {"prel_g2_nc", 1,
2713 0, /* adr_type */
2714 0,
2715 BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2716 0,
2717 0,
2718 0},
2719
2720 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2721 {"prel_g3", 1,
2722 0, /* adr_type */
2723 0,
2724 BFD_RELOC_AARCH64_MOVW_PREL_G3,
2725 0,
2726 0,
2727 0},
2728
2729 /* Get to the page containing GOT entry for a symbol. */
2730 {"got", 1,
2731 0, /* adr_type */
2732 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2733 0,
2734 0,
2735 0,
2736 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2737
2738 /* 12 bit offset into the page containing GOT entry for that symbol. */
2739 {"got_lo12", 0,
2740 0, /* adr_type */
2741 0,
2742 0,
2743 0,
2744 BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2745 0},
2746
2747 /* 0-15 bits of address/value: MOVk, no check. */
2748 {"gotoff_g0_nc", 0,
2749 0, /* adr_type */
2750 0,
2751 BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2752 0,
2753 0,
2754 0},
2755
2756 /* Most significant bits 16-31 of address/value: MOVZ. */
2757 {"gotoff_g1", 0,
2758 0, /* adr_type */
2759 0,
2760 BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2761 0,
2762 0,
2763 0},
2764
2765 /* 15 bit offset into the page containing GOT entry for that symbol. */
2766 {"gotoff_lo15", 0,
2767 0, /* adr_type */
2768 0,
2769 0,
2770 0,
2771 BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2772 0},
2773
2774 /* Get to the page containing GOT TLS entry for a symbol */
2775 {"gottprel_g0_nc", 0,
2776 0, /* adr_type */
2777 0,
2778 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2779 0,
2780 0,
2781 0},
2782
2783 /* Get to the page containing GOT TLS entry for a symbol */
2784 {"gottprel_g1", 0,
2785 0, /* adr_type */
2786 0,
2787 BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2788 0,
2789 0,
2790 0},
2791
2792 /* Get to the page containing GOT TLS entry for a symbol */
2793 {"tlsgd", 0,
2794 BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2795 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2796 0,
2797 0,
2798 0,
2799 0},
2800
2801 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2802 {"tlsgd_lo12", 0,
2803 0, /* adr_type */
2804 0,
2805 0,
2806 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2807 0,
2808 0},
2809
2810 /* Lower 16 bits address/value: MOVk. */
2811 {"tlsgd_g0_nc", 0,
2812 0, /* adr_type */
2813 0,
2814 BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2815 0,
2816 0,
2817 0},
2818
2819 /* Most significant bits 16-31 of address/value: MOVZ. */
2820 {"tlsgd_g1", 0,
2821 0, /* adr_type */
2822 0,
2823 BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2824 0,
2825 0,
2826 0},
2827
2828 /* Get to the page containing GOT TLS entry for a symbol */
2829 {"tlsdesc", 0,
2830 BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2831 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2832 0,
2833 0,
2834 0,
2835 BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2836
2837 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2838 {"tlsdesc_lo12", 0,
2839 0, /* adr_type */
2840 0,
2841 0,
2842 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2843 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2844 0},
2845
2846 /* Get to the page containing GOT TLS entry for a symbol.
2847 The same as GD, we allocate two consecutive GOT slots
2848 for module index and module offset, the only difference
2849 with GD is the module offset should be initialized to
2850 zero without any outstanding runtime relocation. */
2851 {"tlsldm", 0,
2852 BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2853 BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2854 0,
2855 0,
2856 0,
2857 0},
2858
2859 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2860 {"tlsldm_lo12_nc", 0,
2861 0, /* adr_type */
2862 0,
2863 0,
2864 BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2865 0,
2866 0},
2867
2868 /* 12 bit offset into the module TLS base address. */
2869 {"dtprel_lo12", 0,
2870 0, /* adr_type */
2871 0,
2872 0,
2873 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2874 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2875 0},
2876
2877 /* Same as dtprel_lo12, no overflow check. */
2878 {"dtprel_lo12_nc", 0,
2879 0, /* adr_type */
2880 0,
2881 0,
2882 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2883 BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2884 0},
2885
2886 /* bits[23:12] of offset to the module TLS base address. */
2887 {"dtprel_hi12", 0,
2888 0, /* adr_type */
2889 0,
2890 0,
2891 BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2892 0,
2893 0},
2894
2895 /* bits[15:0] of offset to the module TLS base address. */
2896 {"dtprel_g0", 0,
2897 0, /* adr_type */
2898 0,
2899 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2900 0,
2901 0,
2902 0},
2903
2904 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0. */
2905 {"dtprel_g0_nc", 0,
2906 0, /* adr_type */
2907 0,
2908 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2909 0,
2910 0,
2911 0},
2912
2913 /* bits[31:16] of offset to the module TLS base address. */
2914 {"dtprel_g1", 0,
2915 0, /* adr_type */
2916 0,
2917 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2918 0,
2919 0,
2920 0},
2921
2922 /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1. */
2923 {"dtprel_g1_nc", 0,
2924 0, /* adr_type */
2925 0,
2926 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2927 0,
2928 0,
2929 0},
2930
2931 /* bits[47:32] of offset to the module TLS base address. */
2932 {"dtprel_g2", 0,
2933 0, /* adr_type */
2934 0,
2935 BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2936 0,
2937 0,
2938 0},
2939
2940 /* Lower 16 bit offset into GOT entry for a symbol */
2941 {"tlsdesc_off_g0_nc", 0,
2942 0, /* adr_type */
2943 0,
2944 BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2945 0,
2946 0,
2947 0},
2948
2949 /* Higher 16 bit offset into GOT entry for a symbol */
2950 {"tlsdesc_off_g1", 0,
2951 0, /* adr_type */
2952 0,
2953 BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2954 0,
2955 0,
2956 0},
2957
2958 /* Get to the page containing GOT TLS entry for a symbol */
2959 {"gottprel", 0,
2960 0, /* adr_type */
2961 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2962 0,
2963 0,
2964 0,
2965 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2966
2967 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2968 {"gottprel_lo12", 0,
2969 0, /* adr_type */
2970 0,
2971 0,
2972 0,
2973 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2974 0},
2975
2976 /* Get tp offset for a symbol. */
2977 {"tprel", 0,
2978 0, /* adr_type */
2979 0,
2980 0,
2981 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2982 0,
2983 0},
2984
2985 /* Get tp offset for a symbol. */
2986 {"tprel_lo12", 0,
2987 0, /* adr_type */
2988 0,
2989 0,
2990 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2991 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2992 0},
2993
2994 /* Get tp offset for a symbol. */
2995 {"tprel_hi12", 0,
2996 0, /* adr_type */
2997 0,
2998 0,
2999 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
3000 0,
3001 0},
3002
3003 /* Get tp offset for a symbol. */
3004 {"tprel_lo12_nc", 0,
3005 0, /* adr_type */
3006 0,
3007 0,
3008 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
3009 BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
3010 0},
3011
3012 /* Most significant bits 32-47 of address/value: MOVZ. */
3013 {"tprel_g2", 0,
3014 0, /* adr_type */
3015 0,
3016 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
3017 0,
3018 0,
3019 0},
3020
3021 /* Most significant bits 16-31 of address/value: MOVZ. */
3022 {"tprel_g1", 0,
3023 0, /* adr_type */
3024 0,
3025 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3026 0,
3027 0,
3028 0},
3029
3030 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
3031 {"tprel_g1_nc", 0,
3032 0, /* adr_type */
3033 0,
3034 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3035 0,
3036 0,
3037 0},
3038
3039 /* Most significant bits 0-15 of address/value: MOVZ. */
3040 {"tprel_g0", 0,
3041 0, /* adr_type */
3042 0,
3043 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3044 0,
3045 0,
3046 0},
3047
3048 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
3049 {"tprel_g0_nc", 0,
3050 0, /* adr_type */
3051 0,
3052 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3053 0,
3054 0,
3055 0},
3056
3057 /* 15bit offset from got entry to base address of GOT table. */
3058 {"gotpage_lo15", 0,
3059 0,
3060 0,
3061 0,
3062 0,
3063 BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3064 0},
3065
3066 /* 14bit offset from got entry to base address of GOT table. */
3067 {"gotpage_lo14", 0,
3068 0,
3069 0,
3070 0,
3071 0,
3072 BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3073 0},
3074 };
3075
3076 /* Given the address of a pointer pointing to the textual name of a
3077 relocation as may appear in assembler source, attempt to find its
3078 details in reloc_table. The pointer will be updated to the character
3079 after the trailing colon. On failure, NULL will be returned;
3080 otherwise return the reloc_table_entry. */
3081
3082 static struct reloc_table_entry *
3083 find_reloc_table_entry (char **str)
3084 {
3085 unsigned int i;
3086 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3087 {
3088 int length = strlen (reloc_table[i].name);
3089
3090 if (strncasecmp (reloc_table[i].name, *str, length) == 0
3091 && (*str)[length] == ':')
3092 {
3093 *str += (length + 1);
3094 return &reloc_table[i];
3095 }
3096 }
3097
3098 return NULL;
3099 }
3100
3101 /* Returns 0 if the relocation should never be forced,
3102 1 if the relocation must be forced, and -1 if either
3103 result is OK. */
3104
3105 static signed int
3106 aarch64_force_reloc (unsigned int type)
3107 {
3108 switch (type)
3109 {
3110 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
3111 /* Perform these "immediate" internal relocations
3112 even if the symbol is extern or weak. */
3113 return 0;
3114
3115 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
3116 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
3117 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
3118 /* Pseudo relocs that need to be fixed up according to
3119 ilp32_p. */
3120 return 0;
3121
3122 case BFD_RELOC_AARCH64_ADD_LO12:
3123 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3124 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
3125 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
3126 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3127 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3128 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
3129 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
3130 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
3131 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3132 case BFD_RELOC_AARCH64_LDST128_LO12:
3133 case BFD_RELOC_AARCH64_LDST16_LO12:
3134 case BFD_RELOC_AARCH64_LDST32_LO12:
3135 case BFD_RELOC_AARCH64_LDST64_LO12:
3136 case BFD_RELOC_AARCH64_LDST8_LO12:
3137 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
3138 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3139 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3140 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3141 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
3142 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3143 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
3144 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
3145 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3146 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3147 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3148 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
3149 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
3150 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3151 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3152 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3153 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3154 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
3155 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
3156 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
3157 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
3158 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
3159 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
3160 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
3161 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
3162 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
3163 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
3164 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
3165 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
3166 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
3167 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
3168 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
3169 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
3170 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
3171 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
3172 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
3173 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
3174 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
3175 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
3176 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
3177 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
3178 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
3179 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
3180 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
3181 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
3182 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
3183 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3184 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3185 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3186 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3187 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3188 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3189 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3190 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3191 /* Always leave these relocations for the linker. */
3192 return 1;
3193
3194 default:
3195 return -1;
3196 }
3197 }
3198
3199 int
3200 aarch64_force_relocation (struct fix *fixp)
3201 {
3202 int res = aarch64_force_reloc (fixp->fx_r_type);
3203
3204 if (res == -1)
3205 return generic_force_reloc (fixp);
3206 return res;
3207 }
3208
3209 /* Mode argument to parse_shift and parser_shifter_operand. */
3210 enum parse_shift_mode
3211 {
3212 SHIFTED_NONE, /* no shifter allowed */
3213 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3214 "#imm{,lsl #n}" */
3215 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
3216 "#imm" */
3217 SHIFTED_LSL, /* bare "lsl #n" */
3218 SHIFTED_MUL, /* bare "mul #n" */
3219 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
3220 SHIFTED_MUL_VL, /* "mul vl" */
3221 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
3222 };
3223
3224 /* Parse a <shift> operator on an AArch64 data processing instruction.
3225 Return TRUE on success; otherwise return FALSE. */
3226 static bool
3227 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3228 {
3229 const struct aarch64_name_value_pair *shift_op;
3230 enum aarch64_modifier_kind kind;
3231 expressionS exp;
3232 int exp_has_prefix;
3233 char *s = *str;
3234 char *p = s;
3235
3236 for (p = *str; ISALPHA (*p); p++)
3237 ;
3238
3239 if (p == *str)
3240 {
3241 set_syntax_error (_("shift expression expected"));
3242 return false;
3243 }
3244
3245 shift_op = str_hash_find_n (aarch64_shift_hsh, *str, p - *str);
3246
3247 if (shift_op == NULL)
3248 {
3249 set_syntax_error (_("shift operator expected"));
3250 return false;
3251 }
3252
3253 kind = aarch64_get_operand_modifier (shift_op);
3254
3255 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3256 {
3257 set_syntax_error (_("invalid use of 'MSL'"));
3258 return false;
3259 }
3260
3261 if (kind == AARCH64_MOD_MUL
3262 && mode != SHIFTED_MUL
3263 && mode != SHIFTED_MUL_VL)
3264 {
3265 set_syntax_error (_("invalid use of 'MUL'"));
3266 return false;
3267 }
3268
3269 switch (mode)
3270 {
3271 case SHIFTED_LOGIC_IMM:
3272 if (aarch64_extend_operator_p (kind))
3273 {
3274 set_syntax_error (_("extending shift is not permitted"));
3275 return false;
3276 }
3277 break;
3278
3279 case SHIFTED_ARITH_IMM:
3280 if (kind == AARCH64_MOD_ROR)
3281 {
3282 set_syntax_error (_("'ROR' shift is not permitted"));
3283 return false;
3284 }
3285 break;
3286
3287 case SHIFTED_LSL:
3288 if (kind != AARCH64_MOD_LSL)
3289 {
3290 set_syntax_error (_("only 'LSL' shift is permitted"));
3291 return false;
3292 }
3293 break;
3294
3295 case SHIFTED_MUL:
3296 if (kind != AARCH64_MOD_MUL)
3297 {
3298 set_syntax_error (_("only 'MUL' is permitted"));
3299 return false;
3300 }
3301 break;
3302
3303 case SHIFTED_MUL_VL:
3304 /* "MUL VL" consists of two separate tokens. Require the first
3305 token to be "MUL" and look for a following "VL". */
3306 if (kind == AARCH64_MOD_MUL)
3307 {
3308 skip_whitespace (p);
3309 if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3310 {
3311 p += 2;
3312 kind = AARCH64_MOD_MUL_VL;
3313 break;
3314 }
3315 }
3316 set_syntax_error (_("only 'MUL VL' is permitted"));
3317 return false;
3318
3319 case SHIFTED_REG_OFFSET:
3320 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3321 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3322 {
3323 set_fatal_syntax_error
3324 (_("invalid shift for the register offset addressing mode"));
3325 return false;
3326 }
3327 break;
3328
3329 case SHIFTED_LSL_MSL:
3330 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3331 {
3332 set_syntax_error (_("invalid shift operator"));
3333 return false;
3334 }
3335 break;
3336
3337 default:
3338 abort ();
3339 }
3340
3341 /* Whitespace can appear here if the next thing is a bare digit. */
3342 skip_whitespace (p);
3343
3344 /* Parse shift amount. */
3345 exp_has_prefix = 0;
3346 if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3347 exp.X_op = O_absent;
3348 else
3349 {
3350 if (is_immediate_prefix (*p))
3351 {
3352 p++;
3353 exp_has_prefix = 1;
3354 }
3355 (void) aarch64_get_expression (&exp, &p, GE_NO_PREFIX, ALLOW_ABSENT,
3356 NORMAL_RESOLUTION);
3357 }
3358 if (kind == AARCH64_MOD_MUL_VL)
3359 /* For consistency, give MUL VL the same shift amount as an implicit
3360 MUL #1. */
3361 operand->shifter.amount = 1;
3362 else if (exp.X_op == O_absent)
3363 {
3364 if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3365 {
3366 set_syntax_error (_("missing shift amount"));
3367 return false;
3368 }
3369 operand->shifter.amount = 0;
3370 }
3371 else if (exp.X_op != O_constant)
3372 {
3373 set_syntax_error (_("constant shift amount required"));
3374 return false;
3375 }
3376 /* For parsing purposes, MUL #n has no inherent range. The range
3377 depends on the operand and will be checked by operand-specific
3378 routines. */
3379 else if (kind != AARCH64_MOD_MUL
3380 && (exp.X_add_number < 0 || exp.X_add_number > 63))
3381 {
3382 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3383 return false;
3384 }
3385 else
3386 {
3387 operand->shifter.amount = exp.X_add_number;
3388 operand->shifter.amount_present = 1;
3389 }
3390
3391 operand->shifter.operator_present = 1;
3392 operand->shifter.kind = kind;
3393
3394 *str = p;
3395 return true;
3396 }
3397
3398 /* Parse a <shifter_operand> for a data processing instruction:
3399
3400 #<immediate>
3401 #<immediate>, LSL #imm
3402
3403 Validation of immediate operands is deferred to md_apply_fix.
3404
3405 Return TRUE on success; otherwise return FALSE. */
3406
3407 static bool
3408 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3409 enum parse_shift_mode mode)
3410 {
3411 char *p;
3412
3413 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3414 return false;
3415
3416 p = *str;
3417
3418 /* Accept an immediate expression. */
3419 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX,
3420 REJECT_ABSENT, NORMAL_RESOLUTION))
3421 return false;
3422
3423 /* Accept optional LSL for arithmetic immediate values. */
3424 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3425 if (! parse_shift (&p, operand, SHIFTED_LSL))
3426 return false;
3427
3428 /* Not accept any shifter for logical immediate values. */
3429 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3430 && parse_shift (&p, operand, mode))
3431 {
3432 set_syntax_error (_("unexpected shift operator"));
3433 return false;
3434 }
3435
3436 *str = p;
3437 return true;
3438 }
3439
3440 /* Parse a <shifter_operand> for a data processing instruction:
3441
3442 <Rm>
3443 <Rm>, <shift>
3444 #<immediate>
3445 #<immediate>, LSL #imm
3446
3447 where <shift> is handled by parse_shift above, and the last two
3448 cases are handled by the function above.
3449
3450 Validation of immediate operands is deferred to md_apply_fix.
3451
3452 Return TRUE on success; otherwise return FALSE. */
3453
3454 static bool
3455 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3456 enum parse_shift_mode mode)
3457 {
3458 const reg_entry *reg;
3459 aarch64_opnd_qualifier_t qualifier;
3460 enum aarch64_operand_class opd_class
3461 = aarch64_get_operand_class (operand->type);
3462
3463 reg = aarch64_reg_parse_32_64 (str, &qualifier);
3464 if (reg)
3465 {
3466 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3467 {
3468 set_syntax_error (_("unexpected register in the immediate operand"));
3469 return false;
3470 }
3471
3472 if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3473 {
3474 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3475 return false;
3476 }
3477
3478 operand->reg.regno = reg->number;
3479 operand->qualifier = qualifier;
3480
3481 /* Accept optional shift operation on register. */
3482 if (! skip_past_comma (str))
3483 return true;
3484
3485 if (! parse_shift (str, operand, mode))
3486 return false;
3487
3488 return true;
3489 }
3490 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3491 {
3492 set_syntax_error
3493 (_("integer register expected in the extended/shifted operand "
3494 "register"));
3495 return false;
3496 }
3497
3498 /* We have a shifted immediate variable. */
3499 return parse_shifter_operand_imm (str, operand, mode);
3500 }
3501
3502 /* Return TRUE on success; return FALSE otherwise. */
3503
3504 static bool
3505 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3506 enum parse_shift_mode mode)
3507 {
3508 char *p = *str;
3509
3510 /* Determine if we have the sequence of characters #: or just :
3511 coming next. If we do, then we check for a :rello: relocation
3512 modifier. If we don't, punt the whole lot to
3513 parse_shifter_operand. */
3514
3515 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3516 {
3517 struct reloc_table_entry *entry;
3518
3519 if (p[0] == '#')
3520 p += 2;
3521 else
3522 p++;
3523 *str = p;
3524
3525 /* Try to parse a relocation. Anything else is an error. */
3526 if (!(entry = find_reloc_table_entry (str)))
3527 {
3528 set_syntax_error (_("unknown relocation modifier"));
3529 return false;
3530 }
3531
3532 if (entry->add_type == 0)
3533 {
3534 set_syntax_error
3535 (_("this relocation modifier is not allowed on this instruction"));
3536 return false;
3537 }
3538
3539 /* Save str before we decompose it. */
3540 p = *str;
3541
3542 /* Next, we parse the expression. */
3543 if (! aarch64_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX,
3544 REJECT_ABSENT,
3545 aarch64_force_reloc (entry->add_type) == 1))
3546 return false;
3547
3548 /* Record the relocation type (use the ADD variant here). */
3549 inst.reloc.type = entry->add_type;
3550 inst.reloc.pc_rel = entry->pc_rel;
3551
3552 /* If str is empty, we've reached the end, stop here. */
3553 if (**str == '\0')
3554 return true;
3555
3556 /* Otherwise, we have a shifted reloc modifier, so rewind to
3557 recover the variable name and continue parsing for the shifter. */
3558 *str = p;
3559 return parse_shifter_operand_imm (str, operand, mode);
3560 }
3561
3562 return parse_shifter_operand (str, operand, mode);
3563 }
3564
3565 /* Parse all forms of an address expression. Information is written
3566 to *OPERAND and/or inst.reloc.
3567
3568 The A64 instruction set has the following addressing modes:
3569
3570 Offset
3571 [base] // in SIMD ld/st structure
3572 [base{,#0}] // in ld/st exclusive
3573 [base{,#imm}]
3574 [base,Xm{,LSL #imm}]
3575 [base,Xm,SXTX {#imm}]
3576 [base,Wm,(S|U)XTW {#imm}]
3577 Pre-indexed
3578 [base]! // in ldraa/ldrab exclusive
3579 [base,#imm]!
3580 Post-indexed
3581 [base],#imm
3582 [base],Xm // in SIMD ld/st structure
3583 PC-relative (literal)
3584 label
3585 SVE:
3586 [base,#imm,MUL VL]
3587 [base,Zm.D{,LSL #imm}]
3588 [base,Zm.S,(S|U)XTW {#imm}]
3589 [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3590 [Zn.S,#imm]
3591 [Zn.D,#imm]
3592 [Zn.S{, Xm}]
3593 [Zn.S,Zm.S{,LSL #imm}] // in ADR
3594 [Zn.D,Zm.D{,LSL #imm}] // in ADR
3595 [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3596
3597 (As a convenience, the notation "=immediate" is permitted in conjunction
3598 with the pc-relative literal load instructions to automatically place an
3599 immediate value or symbolic address in a nearby literal pool and generate
3600 a hidden label which references it.)
3601
3602 Upon a successful parsing, the address structure in *OPERAND will be
3603 filled in the following way:
3604
3605 .base_regno = <base>
3606 .offset.is_reg // 1 if the offset is a register
3607 .offset.imm = <imm>
3608 .offset.regno = <Rm>
3609
3610 For different addressing modes defined in the A64 ISA:
3611
3612 Offset
3613 .pcrel=0; .preind=1; .postind=0; .writeback=0
3614 Pre-indexed
3615 .pcrel=0; .preind=1; .postind=0; .writeback=1
3616 Post-indexed
3617 .pcrel=0; .preind=0; .postind=1; .writeback=1
3618 PC-relative (literal)
3619 .pcrel=1; .preind=1; .postind=0; .writeback=0
3620
3621 The shift/extension information, if any, will be stored in .shifter.
3622 The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3623 *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3624 corresponding register.
3625
3626 BASE_TYPE says which types of base register should be accepted and
3627 OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE
3628 is the type of shifter that is allowed for immediate offsets,
3629 or SHIFTED_NONE if none.
3630
3631 In all other respects, it is the caller's responsibility to check
3632 for addressing modes not supported by the instruction, and to set
3633 inst.reloc.type. */
3634
3635 static bool
3636 parse_address_main (char **str, aarch64_opnd_info *operand,
3637 aarch64_opnd_qualifier_t *base_qualifier,
3638 aarch64_opnd_qualifier_t *offset_qualifier,
3639 aarch64_reg_type base_type, aarch64_reg_type offset_type,
3640 enum parse_shift_mode imm_shift_mode)
3641 {
3642 char *p = *str;
3643 const reg_entry *reg;
3644 expressionS *exp = &inst.reloc.exp;
3645
3646 *base_qualifier = AARCH64_OPND_QLF_NIL;
3647 *offset_qualifier = AARCH64_OPND_QLF_NIL;
3648 if (! skip_past_char (&p, '['))
3649 {
3650 /* =immediate or label. */
3651 operand->addr.pcrel = 1;
3652 operand->addr.preind = 1;
3653
3654 /* #:<reloc_op>:<symbol> */
3655 skip_past_char (&p, '#');
3656 if (skip_past_char (&p, ':'))
3657 {
3658 bfd_reloc_code_real_type ty;
3659 struct reloc_table_entry *entry;
3660
3661 /* Try to parse a relocation modifier. Anything else is
3662 an error. */
3663 entry = find_reloc_table_entry (&p);
3664 if (! entry)
3665 {
3666 set_syntax_error (_("unknown relocation modifier"));
3667 return false;
3668 }
3669
3670 switch (operand->type)
3671 {
3672 case AARCH64_OPND_ADDR_PCREL21:
3673 /* adr */
3674 ty = entry->adr_type;
3675 break;
3676
3677 default:
3678 ty = entry->ld_literal_type;
3679 break;
3680 }
3681
3682 if (ty == 0)
3683 {
3684 set_syntax_error
3685 (_("this relocation modifier is not allowed on this "
3686 "instruction"));
3687 return false;
3688 }
3689
3690 /* #:<reloc_op>: */
3691 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3692 aarch64_force_reloc (entry->add_type) == 1))
3693 {
3694 set_syntax_error (_("invalid relocation expression"));
3695 return false;
3696 }
3697 /* #:<reloc_op>:<expr> */
3698 /* Record the relocation type. */
3699 inst.reloc.type = ty;
3700 inst.reloc.pc_rel = entry->pc_rel;
3701 }
3702 else
3703 {
3704 if (skip_past_char (&p, '='))
3705 /* =immediate; need to generate the literal in the literal pool. */
3706 inst.gen_lit_pool = 1;
3707
3708 if (!aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3709 NORMAL_RESOLUTION))
3710 {
3711 set_syntax_error (_("invalid address"));
3712 return false;
3713 }
3714 }
3715
3716 *str = p;
3717 return true;
3718 }
3719
3720 /* [ */
3721
3722 reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3723 if (!reg || !aarch64_check_reg_type (reg, base_type))
3724 {
3725 set_syntax_error (_(get_reg_expected_msg (base_type)));
3726 return false;
3727 }
3728 operand->addr.base_regno = reg->number;
3729
3730 /* [Xn */
3731 if (skip_past_comma (&p))
3732 {
3733 /* [Xn, */
3734 operand->addr.preind = 1;
3735
3736 reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3737 if (reg)
3738 {
3739 if (!aarch64_check_reg_type (reg, offset_type))
3740 {
3741 set_syntax_error (_(get_reg_expected_msg (offset_type)));
3742 return false;
3743 }
3744
3745 /* [Xn,Rm */
3746 operand->addr.offset.regno = reg->number;
3747 operand->addr.offset.is_reg = 1;
3748 /* Shifted index. */
3749 if (skip_past_comma (&p))
3750 {
3751 /* [Xn,Rm, */
3752 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3753 /* Use the diagnostics set in parse_shift, so not set new
3754 error message here. */
3755 return false;
3756 }
3757 /* We only accept:
3758 [base,Xm] # For vector plus scalar SVE2 indexing.
3759 [base,Xm{,LSL #imm}]
3760 [base,Xm,SXTX {#imm}]
3761 [base,Wm,(S|U)XTW {#imm}] */
3762 if (operand->shifter.kind == AARCH64_MOD_NONE
3763 || operand->shifter.kind == AARCH64_MOD_LSL
3764 || operand->shifter.kind == AARCH64_MOD_SXTX)
3765 {
3766 if (*offset_qualifier == AARCH64_OPND_QLF_W)
3767 {
3768 set_syntax_error (_("invalid use of 32-bit register offset"));
3769 return false;
3770 }
3771 if (aarch64_get_qualifier_esize (*base_qualifier)
3772 != aarch64_get_qualifier_esize (*offset_qualifier)
3773 && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3774 || *base_qualifier != AARCH64_OPND_QLF_S_S
3775 || *offset_qualifier != AARCH64_OPND_QLF_X))
3776 {
3777 set_syntax_error (_("offset has different size from base"));
3778 return false;
3779 }
3780 }
3781 else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3782 {
3783 set_syntax_error (_("invalid use of 64-bit register offset"));
3784 return false;
3785 }
3786 }
3787 else
3788 {
3789 /* [Xn,#:<reloc_op>:<symbol> */
3790 skip_past_char (&p, '#');
3791 if (skip_past_char (&p, ':'))
3792 {
3793 struct reloc_table_entry *entry;
3794
3795 /* Try to parse a relocation modifier. Anything else is
3796 an error. */
3797 if (!(entry = find_reloc_table_entry (&p)))
3798 {
3799 set_syntax_error (_("unknown relocation modifier"));
3800 return false;
3801 }
3802
3803 if (entry->ldst_type == 0)
3804 {
3805 set_syntax_error
3806 (_("this relocation modifier is not allowed on this "
3807 "instruction"));
3808 return false;
3809 }
3810
3811 /* [Xn,#:<reloc_op>: */
3812 /* We now have the group relocation table entry corresponding to
3813 the name in the assembler source. Next, we parse the
3814 expression. */
3815 if (! aarch64_get_expression (exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
3816 aarch64_force_reloc (entry->add_type) == 1))
3817 {
3818 set_syntax_error (_("invalid relocation expression"));
3819 return false;
3820 }
3821
3822 /* [Xn,#:<reloc_op>:<expr> */
3823 /* Record the load/store relocation type. */
3824 inst.reloc.type = entry->ldst_type;
3825 inst.reloc.pc_rel = entry->pc_rel;
3826 }
3827 else
3828 {
3829 if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3830 NORMAL_RESOLUTION))
3831 {
3832 set_syntax_error (_("invalid expression in the address"));
3833 return false;
3834 }
3835 /* [Xn,<expr> */
3836 if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3837 /* [Xn,<expr>,<shifter> */
3838 if (! parse_shift (&p, operand, imm_shift_mode))
3839 return false;
3840 }
3841 }
3842 }
3843
3844 if (! skip_past_char (&p, ']'))
3845 {
3846 set_syntax_error (_("']' expected"));
3847 return false;
3848 }
3849
3850 if (skip_past_char (&p, '!'))
3851 {
3852 if (operand->addr.preind && operand->addr.offset.is_reg)
3853 {
3854 set_syntax_error (_("register offset not allowed in pre-indexed "
3855 "addressing mode"));
3856 return false;
3857 }
3858 /* [Xn]! */
3859 operand->addr.writeback = 1;
3860 }
3861 else if (skip_past_comma (&p))
3862 {
3863 /* [Xn], */
3864 operand->addr.postind = 1;
3865 operand->addr.writeback = 1;
3866
3867 if (operand->addr.preind)
3868 {
3869 set_syntax_error (_("cannot combine pre- and post-indexing"));
3870 return false;
3871 }
3872
3873 reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3874 if (reg)
3875 {
3876 /* [Xn],Xm */
3877 if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3878 {
3879 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3880 return false;
3881 }
3882
3883 operand->addr.offset.regno = reg->number;
3884 operand->addr.offset.is_reg = 1;
3885 }
3886 else if (! aarch64_get_expression (exp, &p, GE_OPT_PREFIX, REJECT_ABSENT,
3887 NORMAL_RESOLUTION))
3888 {
3889 /* [Xn],#expr */
3890 set_syntax_error (_("invalid expression in the address"));
3891 return false;
3892 }
3893 }
3894
3895 /* If at this point neither .preind nor .postind is set, we have a
3896 bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3897 ldrab, accept [Rn] as a shorthand for [Rn,#0].
3898 For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3899 [Zn.<T>, xzr]. */
3900 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3901 {
3902 if (operand->addr.writeback)
3903 {
3904 if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3905 {
3906 /* Accept [Rn]! as a shorthand for [Rn,#0]! */
3907 operand->addr.offset.is_reg = 0;
3908 operand->addr.offset.imm = 0;
3909 operand->addr.preind = 1;
3910 }
3911 else
3912 {
3913 /* Reject [Rn]! */
3914 set_syntax_error (_("missing offset in the pre-indexed address"));
3915 return false;
3916 }
3917 }
3918 else
3919 {
3920 operand->addr.preind = 1;
3921 if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3922 {
3923 operand->addr.offset.is_reg = 1;
3924 operand->addr.offset.regno = REG_ZR;
3925 *offset_qualifier = AARCH64_OPND_QLF_X;
3926 }
3927 else
3928 {
3929 inst.reloc.exp.X_op = O_constant;
3930 inst.reloc.exp.X_add_number = 0;
3931 }
3932 }
3933 }
3934
3935 *str = p;
3936 return true;
3937 }
3938
3939 /* Parse a base AArch64 address (as opposed to an SVE one). Return TRUE
3940 on success. */
3941 static bool
3942 parse_address (char **str, aarch64_opnd_info *operand)
3943 {
3944 aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3945 return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3946 REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3947 }
3948
3949 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3950 The arguments have the same meaning as for parse_address_main.
3951 Return TRUE on success. */
3952 static bool
3953 parse_sve_address (char **str, aarch64_opnd_info *operand,
3954 aarch64_opnd_qualifier_t *base_qualifier,
3955 aarch64_opnd_qualifier_t *offset_qualifier)
3956 {
3957 return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3958 REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3959 SHIFTED_MUL_VL);
3960 }
3961
3962 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3963 Return TRUE on success; otherwise return FALSE. */
3964 static bool
3965 parse_half (char **str, int *internal_fixup_p)
3966 {
3967 char *p = *str;
3968
3969 skip_past_char (&p, '#');
3970
3971 gas_assert (internal_fixup_p);
3972 *internal_fixup_p = 0;
3973
3974 if (*p == ':')
3975 {
3976 struct reloc_table_entry *entry;
3977
3978 /* Try to parse a relocation. Anything else is an error. */
3979 ++p;
3980
3981 if (!(entry = find_reloc_table_entry (&p)))
3982 {
3983 set_syntax_error (_("unknown relocation modifier"));
3984 return false;
3985 }
3986
3987 if (entry->movw_type == 0)
3988 {
3989 set_syntax_error
3990 (_("this relocation modifier is not allowed on this instruction"));
3991 return false;
3992 }
3993
3994 inst.reloc.type = entry->movw_type;
3995 }
3996 else
3997 *internal_fixup_p = 1;
3998
3999 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
4000 aarch64_force_reloc (inst.reloc.type) == 1))
4001 return false;
4002
4003 *str = p;
4004 return true;
4005 }
4006
4007 /* Parse an operand for an ADRP instruction:
4008 ADRP <Xd>, <label>
4009 Return TRUE on success; otherwise return FALSE. */
4010
4011 static bool
4012 parse_adrp (char **str)
4013 {
4014 char *p;
4015
4016 p = *str;
4017 if (*p == ':')
4018 {
4019 struct reloc_table_entry *entry;
4020
4021 /* Try to parse a relocation. Anything else is an error. */
4022 ++p;
4023 if (!(entry = find_reloc_table_entry (&p)))
4024 {
4025 set_syntax_error (_("unknown relocation modifier"));
4026 return false;
4027 }
4028
4029 if (entry->adrp_type == 0)
4030 {
4031 set_syntax_error
4032 (_("this relocation modifier is not allowed on this instruction"));
4033 return false;
4034 }
4035
4036 inst.reloc.type = entry->adrp_type;
4037 }
4038 else
4039 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
4040
4041 inst.reloc.pc_rel = 1;
4042 if (! aarch64_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, REJECT_ABSENT,
4043 aarch64_force_reloc (inst.reloc.type) == 1))
4044 return false;
4045 *str = p;
4046 return true;
4047 }
4048
4049 /* Miscellaneous. */
4050
4051 /* Parse a symbolic operand such as "pow2" at *STR. ARRAY is an array
4052 of SIZE tokens in which index I gives the token for field value I,
4053 or is null if field value I is invalid. REG_TYPE says which register
4054 names should be treated as registers rather than as symbolic immediates.
4055
4056 Return true on success, moving *STR past the operand and storing the
4057 field value in *VAL. */
4058
4059 static int
4060 parse_enum_string (char **str, int64_t *val, const char *const *array,
4061 size_t size, aarch64_reg_type reg_type)
4062 {
4063 expressionS exp;
4064 char *p, *q;
4065 size_t i;
4066
4067 /* Match C-like tokens. */
4068 p = q = *str;
4069 while (ISALNUM (*q))
4070 q++;
4071
4072 for (i = 0; i < size; ++i)
4073 if (array[i]
4074 && strncasecmp (array[i], p, q - p) == 0
4075 && array[i][q - p] == 0)
4076 {
4077 *val = i;
4078 *str = q;
4079 return true;
4080 }
4081
4082 if (!parse_immediate_expression (&p, &exp, reg_type))
4083 return false;
4084
4085 if (exp.X_op == O_constant
4086 && (uint64_t) exp.X_add_number < size)
4087 {
4088 *val = exp.X_add_number;
4089 *str = p;
4090 return true;
4091 }
4092
4093 /* Use the default error for this operand. */
4094 return false;
4095 }
4096
4097 /* Parse an option for a preload instruction. Returns the encoding for the
4098 option, or PARSE_FAIL. */
4099
4100 static int
4101 parse_pldop (char **str)
4102 {
4103 char *p, *q;
4104 const struct aarch64_name_value_pair *o;
4105
4106 p = q = *str;
4107 while (ISALNUM (*q))
4108 q++;
4109
4110 o = str_hash_find_n (aarch64_pldop_hsh, p, q - p);
4111 if (!o)
4112 return PARSE_FAIL;
4113
4114 *str = q;
4115 return o->value;
4116 }
4117
4118 /* Parse an option for a barrier instruction. Returns the encoding for the
4119 option, or PARSE_FAIL. */
4120
4121 static int
4122 parse_barrier (char **str)
4123 {
4124 char *p, *q;
4125 const struct aarch64_name_value_pair *o;
4126
4127 p = q = *str;
4128 while (ISALPHA (*q))
4129 q++;
4130
4131 o = str_hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
4132 if (!o)
4133 return PARSE_FAIL;
4134
4135 *str = q;
4136 return o->value;
4137 }
4138
4139 /* Parse an operand for a PSB barrier. Set *HINT_OPT to the hint-option record
4140 return 0 if successful. Otherwise return PARSE_FAIL. */
4141
4142 static int
4143 parse_barrier_psb (char **str,
4144 const struct aarch64_name_value_pair ** hint_opt)
4145 {
4146 char *p, *q;
4147 const struct aarch64_name_value_pair *o;
4148
4149 p = q = *str;
4150 while (ISALPHA (*q))
4151 q++;
4152
4153 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4154 if (!o)
4155 {
4156 set_fatal_syntax_error
4157 ( _("unknown or missing option to PSB/TSB"));
4158 return PARSE_FAIL;
4159 }
4160
4161 if (o->value != 0x11)
4162 {
4163 /* PSB only accepts option name 'CSYNC'. */
4164 set_syntax_error
4165 (_("the specified option is not accepted for PSB/TSB"));
4166 return PARSE_FAIL;
4167 }
4168
4169 *str = q;
4170 *hint_opt = o;
4171 return 0;
4172 }
4173
4174 /* Parse an operand for BTI. Set *HINT_OPT to the hint-option record
4175 return 0 if successful. Otherwise return PARSE_FAIL. */
4176
4177 static int
4178 parse_bti_operand (char **str,
4179 const struct aarch64_name_value_pair ** hint_opt)
4180 {
4181 char *p, *q;
4182 const struct aarch64_name_value_pair *o;
4183
4184 p = q = *str;
4185 while (ISALPHA (*q))
4186 q++;
4187
4188 o = str_hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4189 if (!o)
4190 {
4191 set_fatal_syntax_error
4192 ( _("unknown option to BTI"));
4193 return PARSE_FAIL;
4194 }
4195
4196 switch (o->value)
4197 {
4198 /* Valid BTI operands. */
4199 case HINT_OPD_C:
4200 case HINT_OPD_J:
4201 case HINT_OPD_JC:
4202 break;
4203
4204 default:
4205 set_syntax_error
4206 (_("unknown option to BTI"));
4207 return PARSE_FAIL;
4208 }
4209
4210 *str = q;
4211 *hint_opt = o;
4212 return 0;
4213 }
4214
4215 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4216 Returns the encoding for the option, or PARSE_FAIL.
4217
4218 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4219 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4220
4221 If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4222 field, otherwise as a system register.
4223 */
4224
4225 static int
4226 parse_sys_reg (char **str, htab_t sys_regs,
4227 int imple_defined_p, int pstatefield_p,
4228 uint32_t* flags)
4229 {
4230 char *p, *q;
4231 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4232 const aarch64_sys_reg *o;
4233 int value;
4234
4235 p = buf;
4236 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4237 if (p < buf + (sizeof (buf) - 1))
4238 *p++ = TOLOWER (*q);
4239 *p = '\0';
4240
4241 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4242 valid system register. This is enforced by construction of the hash
4243 table. */
4244 if (p - buf != q - *str)
4245 return PARSE_FAIL;
4246
4247 o = str_hash_find (sys_regs, buf);
4248 if (!o)
4249 {
4250 if (!imple_defined_p)
4251 return PARSE_FAIL;
4252 else
4253 {
4254 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
4255 unsigned int op0, op1, cn, cm, op2;
4256
4257 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4258 != 5)
4259 return PARSE_FAIL;
4260 if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4261 return PARSE_FAIL;
4262 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4263 if (flags)
4264 *flags = 0;
4265 }
4266 }
4267 else
4268 {
4269 if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4270 as_bad (_("selected processor does not support PSTATE field "
4271 "name '%s'"), buf);
4272 if (!pstatefield_p
4273 && !aarch64_sys_ins_reg_supported_p (cpu_variant, o->name,
4274 o->value, o->flags, o->features))
4275 as_bad (_("selected processor does not support system register "
4276 "name '%s'"), buf);
4277 if (aarch64_sys_reg_deprecated_p (o->flags))
4278 as_warn (_("system register name '%s' is deprecated and may be "
4279 "removed in a future release"), buf);
4280 value = o->value;
4281 if (flags)
4282 *flags = o->flags;
4283 }
4284
4285 *str = q;
4286 return value;
4287 }
4288
4289 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
4290 for the option, or NULL. */
4291
4292 static const aarch64_sys_ins_reg *
4293 parse_sys_ins_reg (char **str, htab_t sys_ins_regs)
4294 {
4295 char *p, *q;
4296 char buf[AARCH64_MAX_SYSREG_NAME_LEN];
4297 const aarch64_sys_ins_reg *o;
4298
4299 p = buf;
4300 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4301 if (p < buf + (sizeof (buf) - 1))
4302 *p++ = TOLOWER (*q);
4303 *p = '\0';
4304
4305 /* If the name is longer than AARCH64_MAX_SYSREG_NAME_LEN then it cannot be a
4306 valid system register. This is enforced by construction of the hash
4307 table. */
4308 if (p - buf != q - *str)
4309 return NULL;
4310
4311 o = str_hash_find (sys_ins_regs, buf);
4312 if (!o)
4313 return NULL;
4314
4315 if (!aarch64_sys_ins_reg_supported_p (cpu_variant,
4316 o->name, o->value, o->flags, 0))
4317 as_bad (_("selected processor does not support system register "
4318 "name '%s'"), buf);
4319 if (aarch64_sys_reg_deprecated_p (o->flags))
4320 as_warn (_("system register name '%s' is deprecated and may be "
4321 "removed in a future release"), buf);
4322
4323 *str = q;
4324 return o;
4325 }
4326 \f
4327 #define po_char_or_fail(chr) do { \
4328 if (! skip_past_char (&str, chr)) \
4329 goto failure; \
4330 } while (0)
4331
4332 #define po_reg_or_fail(regtype) do { \
4333 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
4334 if (val == PARSE_FAIL) \
4335 { \
4336 set_default_error (); \
4337 goto failure; \
4338 } \
4339 } while (0)
4340
4341 #define po_int_reg_or_fail(reg_type) do { \
4342 reg = aarch64_reg_parse_32_64 (&str, &qualifier); \
4343 if (!reg || !aarch64_check_reg_type (reg, reg_type)) \
4344 { \
4345 set_default_error (); \
4346 goto failure; \
4347 } \
4348 info->reg.regno = reg->number; \
4349 info->qualifier = qualifier; \
4350 } while (0)
4351
4352 #define po_imm_nc_or_fail() do { \
4353 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4354 goto failure; \
4355 } while (0)
4356
4357 #define po_imm_or_fail(min, max) do { \
4358 if (! parse_constant_immediate (&str, &val, imm_reg_type)) \
4359 goto failure; \
4360 if (val < min || val > max) \
4361 { \
4362 set_fatal_syntax_error (_("immediate value out of range "\
4363 #min " to "#max)); \
4364 goto failure; \
4365 } \
4366 } while (0)
4367
4368 #define po_enum_or_fail(array) do { \
4369 if (!parse_enum_string (&str, &val, array, \
4370 ARRAY_SIZE (array), imm_reg_type)) \
4371 goto failure; \
4372 } while (0)
4373
4374 #define po_misc_or_fail(expr) do { \
4375 if (!expr) \
4376 goto failure; \
4377 } while (0)
4378 \f
4379 /* encode the 12-bit imm field of Add/sub immediate */
4380 static inline uint32_t
4381 encode_addsub_imm (uint32_t imm)
4382 {
4383 return imm << 10;
4384 }
4385
4386 /* encode the shift amount field of Add/sub immediate */
4387 static inline uint32_t
4388 encode_addsub_imm_shift_amount (uint32_t cnt)
4389 {
4390 return cnt << 22;
4391 }
4392
4393
4394 /* encode the imm field of Adr instruction */
4395 static inline uint32_t
4396 encode_adr_imm (uint32_t imm)
4397 {
4398 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
4399 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
4400 }
4401
4402 /* encode the immediate field of Move wide immediate */
4403 static inline uint32_t
4404 encode_movw_imm (uint32_t imm)
4405 {
4406 return imm << 5;
4407 }
4408
4409 /* encode the 26-bit offset of unconditional branch */
4410 static inline uint32_t
4411 encode_branch_ofs_26 (uint32_t ofs)
4412 {
4413 return ofs & ((1 << 26) - 1);
4414 }
4415
4416 /* encode the 19-bit offset of conditional branch and compare & branch */
4417 static inline uint32_t
4418 encode_cond_branch_ofs_19 (uint32_t ofs)
4419 {
4420 return (ofs & ((1 << 19) - 1)) << 5;
4421 }
4422
4423 /* encode the 19-bit offset of ld literal */
4424 static inline uint32_t
4425 encode_ld_lit_ofs_19 (uint32_t ofs)
4426 {
4427 return (ofs & ((1 << 19) - 1)) << 5;
4428 }
4429
4430 /* Encode the 14-bit offset of test & branch. */
4431 static inline uint32_t
4432 encode_tst_branch_ofs_14 (uint32_t ofs)
4433 {
4434 return (ofs & ((1 << 14) - 1)) << 5;
4435 }
4436
4437 /* Encode the 16-bit imm field of svc/hvc/smc. */
4438 static inline uint32_t
4439 encode_svc_imm (uint32_t imm)
4440 {
4441 return imm << 5;
4442 }
4443
4444 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
4445 static inline uint32_t
4446 reencode_addsub_switch_add_sub (uint32_t opcode)
4447 {
4448 return opcode ^ (1 << 30);
4449 }
4450
4451 static inline uint32_t
4452 reencode_movzn_to_movz (uint32_t opcode)
4453 {
4454 return opcode | (1 << 30);
4455 }
4456
4457 static inline uint32_t
4458 reencode_movzn_to_movn (uint32_t opcode)
4459 {
4460 return opcode & ~(1 << 30);
4461 }
4462
4463 /* Overall per-instruction processing. */
4464
4465 /* We need to be able to fix up arbitrary expressions in some statements.
4466 This is so that we can handle symbols that are an arbitrary distance from
4467 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4468 which returns part of an address in a form which will be valid for
4469 a data instruction. We do this by pushing the expression into a symbol
4470 in the expr_section, and creating a fix for that. */
4471
4472 static fixS *
4473 fix_new_aarch64 (fragS * frag,
4474 int where,
4475 short int size,
4476 expressionS * exp,
4477 int pc_rel,
4478 int reloc)
4479 {
4480 fixS *new_fix;
4481
4482 switch (exp->X_op)
4483 {
4484 case O_constant:
4485 case O_symbol:
4486 case O_add:
4487 case O_subtract:
4488 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4489 break;
4490
4491 default:
4492 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4493 pc_rel, reloc);
4494 break;
4495 }
4496 return new_fix;
4497 }
4498 \f
4499 /* Diagnostics on operands errors. */
4500
4501 /* By default, output verbose error message.
4502 Disable the verbose error message by -mno-verbose-error. */
4503 static int verbose_error_p = 1;
4504
4505 #ifdef DEBUG_AARCH64
4506 /* N.B. this is only for the purpose of debugging. */
4507 const char* operand_mismatch_kind_names[] =
4508 {
4509 "AARCH64_OPDE_NIL",
4510 "AARCH64_OPDE_RECOVERABLE",
4511 "AARCH64_OPDE_SYNTAX_ERROR",
4512 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4513 "AARCH64_OPDE_INVALID_VARIANT",
4514 "AARCH64_OPDE_OUT_OF_RANGE",
4515 "AARCH64_OPDE_UNALIGNED",
4516 "AARCH64_OPDE_REG_LIST",
4517 "AARCH64_OPDE_OTHER_ERROR",
4518 };
4519 #endif /* DEBUG_AARCH64 */
4520
4521 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4522
4523 When multiple errors of different kinds are found in the same assembly
4524 line, only the error of the highest severity will be picked up for
4525 issuing the diagnostics. */
4526
4527 static inline bool
4528 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4529 enum aarch64_operand_error_kind rhs)
4530 {
4531 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4532 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4533 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4534 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4535 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4536 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4537 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4538 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4539 return lhs > rhs;
4540 }
4541
4542 /* Helper routine to get the mnemonic name from the assembly instruction
4543 line; should only be called for the diagnosis purpose, as there is
4544 string copy operation involved, which may affect the runtime
4545 performance if used in elsewhere. */
4546
4547 static const char*
4548 get_mnemonic_name (const char *str)
4549 {
4550 static char mnemonic[32];
4551 char *ptr;
4552
4553 /* Get the first 15 bytes and assume that the full name is included. */
4554 strncpy (mnemonic, str, 31);
4555 mnemonic[31] = '\0';
4556
4557 /* Scan up to the end of the mnemonic, which must end in white space,
4558 '.', or end of string. */
4559 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4560 ;
4561
4562 *ptr = '\0';
4563
4564 /* Append '...' to the truncated long name. */
4565 if (ptr - mnemonic == 31)
4566 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4567
4568 return mnemonic;
4569 }
4570
4571 static void
4572 reset_aarch64_instruction (aarch64_instruction *instruction)
4573 {
4574 memset (instruction, '\0', sizeof (aarch64_instruction));
4575 instruction->reloc.type = BFD_RELOC_UNUSED;
4576 }
4577
4578 /* Data structures storing one user error in the assembly code related to
4579 operands. */
4580
4581 struct operand_error_record
4582 {
4583 const aarch64_opcode *opcode;
4584 aarch64_operand_error detail;
4585 struct operand_error_record *next;
4586 };
4587
4588 typedef struct operand_error_record operand_error_record;
4589
4590 struct operand_errors
4591 {
4592 operand_error_record *head;
4593 operand_error_record *tail;
4594 };
4595
4596 typedef struct operand_errors operand_errors;
4597
4598 /* Top-level data structure reporting user errors for the current line of
4599 the assembly code.
4600 The way md_assemble works is that all opcodes sharing the same mnemonic
4601 name are iterated to find a match to the assembly line. In this data
4602 structure, each of the such opcodes will have one operand_error_record
4603 allocated and inserted. In other words, excessive errors related with
4604 a single opcode are disregarded. */
4605 operand_errors operand_error_report;
4606
4607 /* Free record nodes. */
4608 static operand_error_record *free_opnd_error_record_nodes = NULL;
4609
4610 /* Initialize the data structure that stores the operand mismatch
4611 information on assembling one line of the assembly code. */
4612 static void
4613 init_operand_error_report (void)
4614 {
4615 if (operand_error_report.head != NULL)
4616 {
4617 gas_assert (operand_error_report.tail != NULL);
4618 operand_error_report.tail->next = free_opnd_error_record_nodes;
4619 free_opnd_error_record_nodes = operand_error_report.head;
4620 operand_error_report.head = NULL;
4621 operand_error_report.tail = NULL;
4622 return;
4623 }
4624 gas_assert (operand_error_report.tail == NULL);
4625 }
4626
4627 /* Return TRUE if some operand error has been recorded during the
4628 parsing of the current assembly line using the opcode *OPCODE;
4629 otherwise return FALSE. */
4630 static inline bool
4631 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4632 {
4633 operand_error_record *record = operand_error_report.head;
4634 return record && record->opcode == opcode;
4635 }
4636
4637 /* Add the error record *NEW_RECORD to operand_error_report. The record's
4638 OPCODE field is initialized with OPCODE.
4639 N.B. only one record for each opcode, i.e. the maximum of one error is
4640 recorded for each instruction template. */
4641
4642 static void
4643 add_operand_error_record (const operand_error_record* new_record)
4644 {
4645 const aarch64_opcode *opcode = new_record->opcode;
4646 operand_error_record* record = operand_error_report.head;
4647
4648 /* The record may have been created for this opcode. If not, we need
4649 to prepare one. */
4650 if (! opcode_has_operand_error_p (opcode))
4651 {
4652 /* Get one empty record. */
4653 if (free_opnd_error_record_nodes == NULL)
4654 {
4655 record = XNEW (operand_error_record);
4656 }
4657 else
4658 {
4659 record = free_opnd_error_record_nodes;
4660 free_opnd_error_record_nodes = record->next;
4661 }
4662 record->opcode = opcode;
4663 /* Insert at the head. */
4664 record->next = operand_error_report.head;
4665 operand_error_report.head = record;
4666 if (operand_error_report.tail == NULL)
4667 operand_error_report.tail = record;
4668 }
4669 else if (record->detail.kind != AARCH64_OPDE_NIL
4670 && record->detail.index <= new_record->detail.index
4671 && operand_error_higher_severity_p (record->detail.kind,
4672 new_record->detail.kind))
4673 {
4674 /* In the case of multiple errors found on operands related with a
4675 single opcode, only record the error of the leftmost operand and
4676 only if the error is of higher severity. */
4677 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4678 " the existing error %s on operand %d",
4679 operand_mismatch_kind_names[new_record->detail.kind],
4680 new_record->detail.index,
4681 operand_mismatch_kind_names[record->detail.kind],
4682 record->detail.index);
4683 return;
4684 }
4685
4686 record->detail = new_record->detail;
4687 }
4688
4689 static inline void
4690 record_operand_error_info (const aarch64_opcode *opcode,
4691 aarch64_operand_error *error_info)
4692 {
4693 operand_error_record record;
4694 record.opcode = opcode;
4695 record.detail = *error_info;
4696 add_operand_error_record (&record);
4697 }
4698
4699 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4700 error message *ERROR, for operand IDX (count from 0). */
4701
4702 static void
4703 record_operand_error (const aarch64_opcode *opcode, int idx,
4704 enum aarch64_operand_error_kind kind,
4705 const char* error)
4706 {
4707 aarch64_operand_error info;
4708 memset(&info, 0, sizeof (info));
4709 info.index = idx;
4710 info.kind = kind;
4711 info.error = error;
4712 info.non_fatal = false;
4713 record_operand_error_info (opcode, &info);
4714 }
4715
4716 static void
4717 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4718 enum aarch64_operand_error_kind kind,
4719 const char* error, const int *extra_data)
4720 {
4721 aarch64_operand_error info;
4722 info.index = idx;
4723 info.kind = kind;
4724 info.error = error;
4725 info.data[0] = extra_data[0];
4726 info.data[1] = extra_data[1];
4727 info.data[2] = extra_data[2];
4728 info.non_fatal = false;
4729 record_operand_error_info (opcode, &info);
4730 }
4731
4732 static void
4733 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4734 const char* error, int lower_bound,
4735 int upper_bound)
4736 {
4737 int data[3] = {lower_bound, upper_bound, 0};
4738 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4739 error, data);
4740 }
4741
4742 /* Remove the operand error record for *OPCODE. */
4743 static void ATTRIBUTE_UNUSED
4744 remove_operand_error_record (const aarch64_opcode *opcode)
4745 {
4746 if (opcode_has_operand_error_p (opcode))
4747 {
4748 operand_error_record* record = operand_error_report.head;
4749 gas_assert (record != NULL && operand_error_report.tail != NULL);
4750 operand_error_report.head = record->next;
4751 record->next = free_opnd_error_record_nodes;
4752 free_opnd_error_record_nodes = record;
4753 if (operand_error_report.head == NULL)
4754 {
4755 gas_assert (operand_error_report.tail == record);
4756 operand_error_report.tail = NULL;
4757 }
4758 }
4759 }
4760
4761 /* Given the instruction in *INSTR, return the index of the best matched
4762 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4763
4764 Return -1 if there is no qualifier sequence; return the first match
4765 if there is multiple matches found. */
4766
4767 static int
4768 find_best_match (const aarch64_inst *instr,
4769 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4770 {
4771 int i, num_opnds, max_num_matched, idx;
4772
4773 num_opnds = aarch64_num_of_operands (instr->opcode);
4774 if (num_opnds == 0)
4775 {
4776 DEBUG_TRACE ("no operand");
4777 return -1;
4778 }
4779
4780 max_num_matched = 0;
4781 idx = 0;
4782
4783 /* For each pattern. */
4784 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4785 {
4786 int j, num_matched;
4787 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4788
4789 /* Most opcodes has much fewer patterns in the list. */
4790 if (empty_qualifier_sequence_p (qualifiers))
4791 {
4792 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4793 break;
4794 }
4795
4796 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4797 if (*qualifiers == instr->operands[j].qualifier)
4798 ++num_matched;
4799
4800 if (num_matched > max_num_matched)
4801 {
4802 max_num_matched = num_matched;
4803 idx = i;
4804 }
4805 }
4806
4807 DEBUG_TRACE ("return with %d", idx);
4808 return idx;
4809 }
4810
4811 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4812 corresponding operands in *INSTR. */
4813
4814 static inline void
4815 assign_qualifier_sequence (aarch64_inst *instr,
4816 const aarch64_opnd_qualifier_t *qualifiers)
4817 {
4818 int i = 0;
4819 int num_opnds = aarch64_num_of_operands (instr->opcode);
4820 gas_assert (num_opnds);
4821 for (i = 0; i < num_opnds; ++i, ++qualifiers)
4822 instr->operands[i].qualifier = *qualifiers;
4823 }
4824
4825 /* Print operands for the diagnosis purpose. */
4826
4827 static void
4828 print_operands (char *buf, const aarch64_opcode *opcode,
4829 const aarch64_opnd_info *opnds)
4830 {
4831 int i;
4832
4833 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4834 {
4835 char str[128];
4836
4837 /* We regard the opcode operand info more, however we also look into
4838 the inst->operands to support the disassembling of the optional
4839 operand.
4840 The two operand code should be the same in all cases, apart from
4841 when the operand can be optional. */
4842 if (opcode->operands[i] == AARCH64_OPND_NIL
4843 || opnds[i].type == AARCH64_OPND_NIL)
4844 break;
4845
4846 /* Generate the operand string in STR. */
4847 aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4848 NULL, cpu_variant);
4849
4850 /* Delimiter. */
4851 if (str[0] != '\0')
4852 strcat (buf, i == 0 ? " " : ", ");
4853
4854 /* Append the operand string. */
4855 strcat (buf, str);
4856 }
4857 }
4858
4859 /* Send to stderr a string as information. */
4860
4861 static void
4862 output_info (const char *format, ...)
4863 {
4864 const char *file;
4865 unsigned int line;
4866 va_list args;
4867
4868 file = as_where (&line);
4869 if (file)
4870 {
4871 if (line != 0)
4872 fprintf (stderr, "%s:%u: ", file, line);
4873 else
4874 fprintf (stderr, "%s: ", file);
4875 }
4876 fprintf (stderr, _("Info: "));
4877 va_start (args, format);
4878 vfprintf (stderr, format, args);
4879 va_end (args);
4880 (void) putc ('\n', stderr);
4881 }
4882
4883 /* Output one operand error record. */
4884
4885 static void
4886 output_operand_error_record (const operand_error_record *record, char *str)
4887 {
4888 const aarch64_operand_error *detail = &record->detail;
4889 int idx = detail->index;
4890 const aarch64_opcode *opcode = record->opcode;
4891 enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4892 : AARCH64_OPND_NIL);
4893
4894 typedef void (*handler_t)(const char *format, ...);
4895 handler_t handler = detail->non_fatal ? as_warn : as_bad;
4896
4897 switch (detail->kind)
4898 {
4899 case AARCH64_OPDE_NIL:
4900 gas_assert (0);
4901 break;
4902 case AARCH64_OPDE_SYNTAX_ERROR:
4903 case AARCH64_OPDE_RECOVERABLE:
4904 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4905 case AARCH64_OPDE_OTHER_ERROR:
4906 /* Use the prepared error message if there is, otherwise use the
4907 operand description string to describe the error. */
4908 if (detail->error != NULL)
4909 {
4910 if (idx < 0)
4911 handler (_("%s -- `%s'"), detail->error, str);
4912 else
4913 handler (_("%s at operand %d -- `%s'"),
4914 detail->error, idx + 1, str);
4915 }
4916 else
4917 {
4918 gas_assert (idx >= 0);
4919 handler (_("operand %d must be %s -- `%s'"), idx + 1,
4920 aarch64_get_operand_desc (opd_code), str);
4921 }
4922 break;
4923
4924 case AARCH64_OPDE_INVALID_VARIANT:
4925 handler (_("operand mismatch -- `%s'"), str);
4926 if (verbose_error_p)
4927 {
4928 /* We will try to correct the erroneous instruction and also provide
4929 more information e.g. all other valid variants.
4930
4931 The string representation of the corrected instruction and other
4932 valid variants are generated by
4933
4934 1) obtaining the intermediate representation of the erroneous
4935 instruction;
4936 2) manipulating the IR, e.g. replacing the operand qualifier;
4937 3) printing out the instruction by calling the printer functions
4938 shared with the disassembler.
4939
4940 The limitation of this method is that the exact input assembly
4941 line cannot be accurately reproduced in some cases, for example an
4942 optional operand present in the actual assembly line will be
4943 omitted in the output; likewise for the optional syntax rules,
4944 e.g. the # before the immediate. Another limitation is that the
4945 assembly symbols and relocation operations in the assembly line
4946 currently cannot be printed out in the error report. Last but not
4947 least, when there is other error(s) co-exist with this error, the
4948 'corrected' instruction may be still incorrect, e.g. given
4949 'ldnp h0,h1,[x0,#6]!'
4950 this diagnosis will provide the version:
4951 'ldnp s0,s1,[x0,#6]!'
4952 which is still not right. */
4953 size_t len = strlen (get_mnemonic_name (str));
4954 int i, qlf_idx;
4955 bool result;
4956 char buf[2048];
4957 aarch64_inst *inst_base = &inst.base;
4958 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4959
4960 /* Init inst. */
4961 reset_aarch64_instruction (&inst);
4962 inst_base->opcode = opcode;
4963
4964 /* Reset the error report so that there is no side effect on the
4965 following operand parsing. */
4966 init_operand_error_report ();
4967
4968 /* Fill inst. */
4969 result = parse_operands (str + len, opcode)
4970 && programmer_friendly_fixup (&inst);
4971 gas_assert (result);
4972 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4973 NULL, NULL, insn_sequence);
4974 gas_assert (!result);
4975
4976 /* Find the most matched qualifier sequence. */
4977 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4978 gas_assert (qlf_idx > -1);
4979
4980 /* Assign the qualifiers. */
4981 assign_qualifier_sequence (inst_base,
4982 opcode->qualifiers_list[qlf_idx]);
4983
4984 /* Print the hint. */
4985 output_info (_(" did you mean this?"));
4986 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4987 print_operands (buf, opcode, inst_base->operands);
4988 output_info (_(" %s"), buf);
4989
4990 /* Print out other variant(s) if there is any. */
4991 if (qlf_idx != 0 ||
4992 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4993 output_info (_(" other valid variant(s):"));
4994
4995 /* For each pattern. */
4996 qualifiers_list = opcode->qualifiers_list;
4997 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4998 {
4999 /* Most opcodes has much fewer patterns in the list.
5000 First NIL qualifier indicates the end in the list. */
5001 if (empty_qualifier_sequence_p (*qualifiers_list))
5002 break;
5003
5004 if (i != qlf_idx)
5005 {
5006 /* Mnemonics name. */
5007 snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
5008
5009 /* Assign the qualifiers. */
5010 assign_qualifier_sequence (inst_base, *qualifiers_list);
5011
5012 /* Print instruction. */
5013 print_operands (buf, opcode, inst_base->operands);
5014
5015 output_info (_(" %s"), buf);
5016 }
5017 }
5018 }
5019 break;
5020
5021 case AARCH64_OPDE_UNTIED_OPERAND:
5022 handler (_("operand %d must be the same register as operand 1 -- `%s'"),
5023 detail->index + 1, str);
5024 break;
5025
5026 case AARCH64_OPDE_OUT_OF_RANGE:
5027 if (detail->data[0] != detail->data[1])
5028 handler (_("%s out of range %d to %d at operand %d -- `%s'"),
5029 detail->error ? detail->error : _("immediate value"),
5030 detail->data[0], detail->data[1], idx + 1, str);
5031 else
5032 handler (_("%s must be %d at operand %d -- `%s'"),
5033 detail->error ? detail->error : _("immediate value"),
5034 detail->data[0], idx + 1, str);
5035 break;
5036
5037 case AARCH64_OPDE_REG_LIST:
5038 if (detail->data[0] == 1)
5039 handler (_("invalid number of registers in the list; "
5040 "only 1 register is expected at operand %d -- `%s'"),
5041 idx + 1, str);
5042 else
5043 handler (_("invalid number of registers in the list; "
5044 "%d registers are expected at operand %d -- `%s'"),
5045 detail->data[0], idx + 1, str);
5046 break;
5047
5048 case AARCH64_OPDE_UNALIGNED:
5049 handler (_("immediate value must be a multiple of "
5050 "%d at operand %d -- `%s'"),
5051 detail->data[0], idx + 1, str);
5052 break;
5053
5054 default:
5055 gas_assert (0);
5056 break;
5057 }
5058 }
5059
5060 /* Process and output the error message about the operand mismatching.
5061
5062 When this function is called, the operand error information had
5063 been collected for an assembly line and there will be multiple
5064 errors in the case of multiple instruction templates; output the
5065 error message that most closely describes the problem.
5066
5067 The errors to be printed can be filtered on printing all errors
5068 or only non-fatal errors. This distinction has to be made because
5069 the error buffer may already be filled with fatal errors we don't want to
5070 print due to the different instruction templates. */
5071
5072 static void
5073 output_operand_error_report (char *str, bool non_fatal_only)
5074 {
5075 int largest_error_pos;
5076 const char *msg = NULL;
5077 enum aarch64_operand_error_kind kind;
5078 operand_error_record *curr;
5079 operand_error_record *head = operand_error_report.head;
5080 operand_error_record *record = NULL;
5081
5082 /* No error to report. */
5083 if (head == NULL)
5084 return;
5085
5086 gas_assert (head != NULL && operand_error_report.tail != NULL);
5087
5088 /* Only one error. */
5089 if (head == operand_error_report.tail)
5090 {
5091 /* If the only error is a non-fatal one and we don't want to print it,
5092 just exit. */
5093 if (!non_fatal_only || head->detail.non_fatal)
5094 {
5095 DEBUG_TRACE ("single opcode entry with error kind: %s",
5096 operand_mismatch_kind_names[head->detail.kind]);
5097 output_operand_error_record (head, str);
5098 }
5099 return;
5100 }
5101
5102 /* Find the error kind of the highest severity. */
5103 DEBUG_TRACE ("multiple opcode entries with error kind");
5104 kind = AARCH64_OPDE_NIL;
5105 for (curr = head; curr != NULL; curr = curr->next)
5106 {
5107 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
5108 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
5109 if (operand_error_higher_severity_p (curr->detail.kind, kind)
5110 && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
5111 kind = curr->detail.kind;
5112 }
5113
5114 gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
5115
5116 /* Pick up one of errors of KIND to report. */
5117 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
5118 for (curr = head; curr != NULL; curr = curr->next)
5119 {
5120 /* If we don't want to print non-fatal errors then don't consider them
5121 at all. */
5122 if (curr->detail.kind != kind
5123 || (non_fatal_only && !curr->detail.non_fatal))
5124 continue;
5125 /* If there are multiple errors, pick up the one with the highest
5126 mismatching operand index. In the case of multiple errors with
5127 the equally highest operand index, pick up the first one or the
5128 first one with non-NULL error message. */
5129 if (curr->detail.index > largest_error_pos
5130 || (curr->detail.index == largest_error_pos && msg == NULL
5131 && curr->detail.error != NULL))
5132 {
5133 largest_error_pos = curr->detail.index;
5134 record = curr;
5135 msg = record->detail.error;
5136 }
5137 }
5138
5139 /* The way errors are collected in the back-end is a bit non-intuitive. But
5140 essentially, because each operand template is tried recursively you may
5141 always have errors collected from the previous tried OPND. These are
5142 usually skipped if there is one successful match. However now with the
5143 non-fatal errors we have to ignore those previously collected hard errors
5144 when we're only interested in printing the non-fatal ones. This condition
5145 prevents us from printing errors that are not appropriate, since we did
5146 match a condition, but it also has warnings that it wants to print. */
5147 if (non_fatal_only && !record)
5148 return;
5149
5150 gas_assert (largest_error_pos != -2 && record != NULL);
5151 DEBUG_TRACE ("Pick up error kind %s to report",
5152 operand_mismatch_kind_names[record->detail.kind]);
5153
5154 /* Output. */
5155 output_operand_error_record (record, str);
5156 }
5157 \f
5158 /* Write an AARCH64 instruction to buf - always little-endian. */
5159 static void
5160 put_aarch64_insn (char *buf, uint32_t insn)
5161 {
5162 unsigned char *where = (unsigned char *) buf;
5163 where[0] = insn;
5164 where[1] = insn >> 8;
5165 where[2] = insn >> 16;
5166 where[3] = insn >> 24;
5167 }
5168
5169 static uint32_t
5170 get_aarch64_insn (char *buf)
5171 {
5172 unsigned char *where = (unsigned char *) buf;
5173 uint32_t result;
5174 result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5175 | ((uint32_t) where[3] << 24)));
5176 return result;
5177 }
5178
5179 static void
5180 output_inst (struct aarch64_inst *new_inst)
5181 {
5182 char *to = NULL;
5183
5184 to = frag_more (INSN_SIZE);
5185
5186 frag_now->tc_frag_data.recorded = 1;
5187
5188 put_aarch64_insn (to, inst.base.value);
5189
5190 if (inst.reloc.type != BFD_RELOC_UNUSED)
5191 {
5192 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5193 INSN_SIZE, &inst.reloc.exp,
5194 inst.reloc.pc_rel,
5195 inst.reloc.type);
5196 DEBUG_TRACE ("Prepared relocation fix up");
5197 /* Don't check the addend value against the instruction size,
5198 that's the job of our code in md_apply_fix(). */
5199 fixp->fx_no_overflow = 1;
5200 if (new_inst != NULL)
5201 fixp->tc_fix_data.inst = new_inst;
5202 if (aarch64_gas_internal_fixup_p ())
5203 {
5204 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5205 fixp->tc_fix_data.opnd = inst.reloc.opnd;
5206 fixp->fx_addnumber = inst.reloc.flags;
5207 }
5208 }
5209
5210 dwarf2_emit_insn (INSN_SIZE);
5211 }
5212
5213 /* Link together opcodes of the same name. */
5214
5215 struct templates
5216 {
5217 aarch64_opcode *opcode;
5218 struct templates *next;
5219 };
5220
5221 typedef struct templates templates;
5222
5223 static templates *
5224 lookup_mnemonic (const char *start, int len)
5225 {
5226 templates *templ = NULL;
5227
5228 templ = str_hash_find_n (aarch64_ops_hsh, start, len);
5229 return templ;
5230 }
5231
5232 /* Subroutine of md_assemble, responsible for looking up the primary
5233 opcode from the mnemonic the user wrote. STR points to the
5234 beginning of the mnemonic. */
5235
5236 static templates *
5237 opcode_lookup (char **str)
5238 {
5239 char *end, *base, *dot;
5240 const aarch64_cond *cond;
5241 char condname[16];
5242 int len;
5243
5244 /* Scan up to the end of the mnemonic, which must end in white space,
5245 '.', or end of string. */
5246 dot = 0;
5247 for (base = end = *str; is_part_of_name(*end); end++)
5248 if (*end == '.' && !dot)
5249 dot = end;
5250
5251 if (end == base || dot == base)
5252 return 0;
5253
5254 inst.cond = COND_ALWAYS;
5255
5256 /* Handle a possible condition. */
5257 if (dot)
5258 {
5259 cond = str_hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5260 if (cond)
5261 {
5262 inst.cond = cond->value;
5263 *str = end;
5264 }
5265 else
5266 {
5267 *str = dot;
5268 return 0;
5269 }
5270 len = dot - base;
5271 }
5272 else
5273 {
5274 *str = end;
5275 len = end - base;
5276 }
5277
5278 if (inst.cond == COND_ALWAYS)
5279 {
5280 /* Look for unaffixed mnemonic. */
5281 return lookup_mnemonic (base, len);
5282 }
5283 else if (len <= 13)
5284 {
5285 /* append ".c" to mnemonic if conditional */
5286 memcpy (condname, base, len);
5287 memcpy (condname + len, ".c", 2);
5288 base = condname;
5289 len += 2;
5290 return lookup_mnemonic (base, len);
5291 }
5292
5293 return NULL;
5294 }
5295
5296 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5297 to a corresponding operand qualifier. */
5298
5299 static inline aarch64_opnd_qualifier_t
5300 vectype_to_qualifier (const struct vector_type_el *vectype)
5301 {
5302 /* Element size in bytes indexed by vector_el_type. */
5303 const unsigned char ele_size[5]
5304 = {1, 2, 4, 8, 16};
5305 const unsigned int ele_base [5] =
5306 {
5307 AARCH64_OPND_QLF_V_4B,
5308 AARCH64_OPND_QLF_V_2H,
5309 AARCH64_OPND_QLF_V_2S,
5310 AARCH64_OPND_QLF_V_1D,
5311 AARCH64_OPND_QLF_V_1Q
5312 };
5313
5314 if (!vectype->defined || vectype->type == NT_invtype)
5315 goto vectype_conversion_fail;
5316
5317 if (vectype->type == NT_zero)
5318 return AARCH64_OPND_QLF_P_Z;
5319 if (vectype->type == NT_merge)
5320 return AARCH64_OPND_QLF_P_M;
5321
5322 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5323
5324 if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5325 {
5326 /* Special case S_4B. */
5327 if (vectype->type == NT_b && vectype->width == 4)
5328 return AARCH64_OPND_QLF_S_4B;
5329
5330 /* Special case S_2H. */
5331 if (vectype->type == NT_h && vectype->width == 2)
5332 return AARCH64_OPND_QLF_S_2H;
5333
5334 /* Vector element register. */
5335 return AARCH64_OPND_QLF_S_B + vectype->type;
5336 }
5337 else
5338 {
5339 /* Vector register. */
5340 int reg_size = ele_size[vectype->type] * vectype->width;
5341 unsigned offset;
5342 unsigned shift;
5343 if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5344 goto vectype_conversion_fail;
5345
5346 /* The conversion is by calculating the offset from the base operand
5347 qualifier for the vector type. The operand qualifiers are regular
5348 enough that the offset can established by shifting the vector width by
5349 a vector-type dependent amount. */
5350 shift = 0;
5351 if (vectype->type == NT_b)
5352 shift = 3;
5353 else if (vectype->type == NT_h || vectype->type == NT_s)
5354 shift = 2;
5355 else if (vectype->type >= NT_d)
5356 shift = 1;
5357 else
5358 gas_assert (0);
5359
5360 offset = ele_base [vectype->type] + (vectype->width >> shift);
5361 gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5362 && offset <= AARCH64_OPND_QLF_V_1Q);
5363 return offset;
5364 }
5365
5366 vectype_conversion_fail:
5367 first_error (_("bad vector arrangement type"));
5368 return AARCH64_OPND_QLF_NIL;
5369 }
5370
5371 /* Process an optional operand that is found omitted from the assembly line.
5372 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
5373 instruction's opcode entry while IDX is the index of this omitted operand.
5374 */
5375
5376 static void
5377 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5378 int idx, aarch64_opnd_info *operand)
5379 {
5380 aarch64_insn default_value = get_optional_operand_default_value (opcode);
5381 gas_assert (optional_operand_p (opcode, idx));
5382 gas_assert (!operand->present);
5383
5384 switch (type)
5385 {
5386 case AARCH64_OPND_Rd:
5387 case AARCH64_OPND_Rn:
5388 case AARCH64_OPND_Rm:
5389 case AARCH64_OPND_Rt:
5390 case AARCH64_OPND_Rt2:
5391 case AARCH64_OPND_Rt_LS64:
5392 case AARCH64_OPND_Rt_SP:
5393 case AARCH64_OPND_Rs:
5394 case AARCH64_OPND_Ra:
5395 case AARCH64_OPND_Rt_SYS:
5396 case AARCH64_OPND_Rd_SP:
5397 case AARCH64_OPND_Rn_SP:
5398 case AARCH64_OPND_Rm_SP:
5399 case AARCH64_OPND_Fd:
5400 case AARCH64_OPND_Fn:
5401 case AARCH64_OPND_Fm:
5402 case AARCH64_OPND_Fa:
5403 case AARCH64_OPND_Ft:
5404 case AARCH64_OPND_Ft2:
5405 case AARCH64_OPND_Sd:
5406 case AARCH64_OPND_Sn:
5407 case AARCH64_OPND_Sm:
5408 case AARCH64_OPND_Va:
5409 case AARCH64_OPND_Vd:
5410 case AARCH64_OPND_Vn:
5411 case AARCH64_OPND_Vm:
5412 case AARCH64_OPND_VdD1:
5413 case AARCH64_OPND_VnD1:
5414 operand->reg.regno = default_value;
5415 break;
5416
5417 case AARCH64_OPND_Ed:
5418 case AARCH64_OPND_En:
5419 case AARCH64_OPND_Em:
5420 case AARCH64_OPND_Em16:
5421 case AARCH64_OPND_SM3_IMM2:
5422 operand->reglane.regno = default_value;
5423 break;
5424
5425 case AARCH64_OPND_IDX:
5426 case AARCH64_OPND_BIT_NUM:
5427 case AARCH64_OPND_IMMR:
5428 case AARCH64_OPND_IMMS:
5429 case AARCH64_OPND_SHLL_IMM:
5430 case AARCH64_OPND_IMM_VLSL:
5431 case AARCH64_OPND_IMM_VLSR:
5432 case AARCH64_OPND_CCMP_IMM:
5433 case AARCH64_OPND_FBITS:
5434 case AARCH64_OPND_UIMM4:
5435 case AARCH64_OPND_UIMM3_OP1:
5436 case AARCH64_OPND_UIMM3_OP2:
5437 case AARCH64_OPND_IMM:
5438 case AARCH64_OPND_IMM_2:
5439 case AARCH64_OPND_WIDTH:
5440 case AARCH64_OPND_UIMM7:
5441 case AARCH64_OPND_NZCV:
5442 case AARCH64_OPND_SVE_PATTERN:
5443 case AARCH64_OPND_SVE_PRFOP:
5444 operand->imm.value = default_value;
5445 break;
5446
5447 case AARCH64_OPND_SVE_PATTERN_SCALED:
5448 operand->imm.value = default_value;
5449 operand->shifter.kind = AARCH64_MOD_MUL;
5450 operand->shifter.amount = 1;
5451 break;
5452
5453 case AARCH64_OPND_EXCEPTION:
5454 inst.reloc.type = BFD_RELOC_UNUSED;
5455 break;
5456
5457 case AARCH64_OPND_BARRIER_ISB:
5458 operand->barrier = aarch64_barrier_options + default_value;
5459 break;
5460
5461 case AARCH64_OPND_BTI_TARGET:
5462 operand->hint_option = aarch64_hint_options + default_value;
5463 break;
5464
5465 default:
5466 break;
5467 }
5468 }
5469
5470 /* Process the relocation type for move wide instructions.
5471 Return TRUE on success; otherwise return FALSE. */
5472
5473 static bool
5474 process_movw_reloc_info (void)
5475 {
5476 int is32;
5477 unsigned shift;
5478
5479 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5480
5481 if (inst.base.opcode->op == OP_MOVK)
5482 switch (inst.reloc.type)
5483 {
5484 case BFD_RELOC_AARCH64_MOVW_G0_S:
5485 case BFD_RELOC_AARCH64_MOVW_G1_S:
5486 case BFD_RELOC_AARCH64_MOVW_G2_S:
5487 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5488 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5489 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5490 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5491 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5492 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5493 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5494 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5495 set_syntax_error
5496 (_("the specified relocation type is not allowed for MOVK"));
5497 return false;
5498 default:
5499 break;
5500 }
5501
5502 switch (inst.reloc.type)
5503 {
5504 case BFD_RELOC_AARCH64_MOVW_G0:
5505 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5506 case BFD_RELOC_AARCH64_MOVW_G0_S:
5507 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5508 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5509 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5510 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5511 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5512 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5513 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5514 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5515 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5516 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5517 shift = 0;
5518 break;
5519 case BFD_RELOC_AARCH64_MOVW_G1:
5520 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5521 case BFD_RELOC_AARCH64_MOVW_G1_S:
5522 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5523 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5524 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5525 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5526 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5527 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5528 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5529 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5530 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5531 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5532 shift = 16;
5533 break;
5534 case BFD_RELOC_AARCH64_MOVW_G2:
5535 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5536 case BFD_RELOC_AARCH64_MOVW_G2_S:
5537 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5538 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5539 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5540 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5541 if (is32)
5542 {
5543 set_fatal_syntax_error
5544 (_("the specified relocation type is not allowed for 32-bit "
5545 "register"));
5546 return false;
5547 }
5548 shift = 32;
5549 break;
5550 case BFD_RELOC_AARCH64_MOVW_G3:
5551 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5552 if (is32)
5553 {
5554 set_fatal_syntax_error
5555 (_("the specified relocation type is not allowed for 32-bit "
5556 "register"));
5557 return false;
5558 }
5559 shift = 48;
5560 break;
5561 default:
5562 /* More cases should be added when more MOVW-related relocation types
5563 are supported in GAS. */
5564 gas_assert (aarch64_gas_internal_fixup_p ());
5565 /* The shift amount should have already been set by the parser. */
5566 return true;
5567 }
5568 inst.base.operands[1].shifter.amount = shift;
5569 return true;
5570 }
5571
5572 /* A primitive log calculator. */
5573
5574 static inline unsigned int
5575 get_logsz (unsigned int size)
5576 {
5577 const unsigned char ls[16] =
5578 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5579 if (size > 16)
5580 {
5581 gas_assert (0);
5582 return -1;
5583 }
5584 gas_assert (ls[size - 1] != (unsigned char)-1);
5585 return ls[size - 1];
5586 }
5587
5588 /* Determine and return the real reloc type code for an instruction
5589 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
5590
5591 static inline bfd_reloc_code_real_type
5592 ldst_lo12_determine_real_reloc_type (void)
5593 {
5594 unsigned logsz;
5595 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5596 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5597
5598 const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5599 {
5600 BFD_RELOC_AARCH64_LDST8_LO12,
5601 BFD_RELOC_AARCH64_LDST16_LO12,
5602 BFD_RELOC_AARCH64_LDST32_LO12,
5603 BFD_RELOC_AARCH64_LDST64_LO12,
5604 BFD_RELOC_AARCH64_LDST128_LO12
5605 },
5606 {
5607 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5608 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5609 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5610 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5611 BFD_RELOC_AARCH64_NONE
5612 },
5613 {
5614 BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5615 BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5616 BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5617 BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5618 BFD_RELOC_AARCH64_NONE
5619 },
5620 {
5621 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5622 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5623 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5624 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5625 BFD_RELOC_AARCH64_NONE
5626 },
5627 {
5628 BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5629 BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5630 BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5631 BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5632 BFD_RELOC_AARCH64_NONE
5633 }
5634 };
5635
5636 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5637 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5638 || (inst.reloc.type
5639 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5640 || (inst.reloc.type
5641 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5642 || (inst.reloc.type
5643 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5644 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5645
5646 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5647 opd1_qlf =
5648 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5649 1, opd0_qlf, 0);
5650 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5651
5652 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5653 if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5654 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5655 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5656 || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5657 gas_assert (logsz <= 3);
5658 else
5659 gas_assert (logsz <= 4);
5660
5661 /* In reloc.c, these pseudo relocation types should be defined in similar
5662 order as above reloc_ldst_lo12 array. Because the array index calculation
5663 below relies on this. */
5664 return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5665 }
5666
5667 /* Check whether a register list REGINFO is valid. The registers must be
5668 numbered in increasing order (modulo 32), in increments of one or two.
5669
5670 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5671 increments of two.
5672
5673 Return FALSE if such a register list is invalid, otherwise return TRUE. */
5674
5675 static bool
5676 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5677 {
5678 uint32_t i, nb_regs, prev_regno, incr;
5679
5680 nb_regs = 1 + (reginfo & 0x3);
5681 reginfo >>= 2;
5682 prev_regno = reginfo & 0x1f;
5683 incr = accept_alternate ? 2 : 1;
5684
5685 for (i = 1; i < nb_regs; ++i)
5686 {
5687 uint32_t curr_regno;
5688 reginfo >>= 5;
5689 curr_regno = reginfo & 0x1f;
5690 if (curr_regno != ((prev_regno + incr) & 0x1f))
5691 return false;
5692 prev_regno = curr_regno;
5693 }
5694
5695 return true;
5696 }
5697
5698 /* Generic instruction operand parser. This does no encoding and no
5699 semantic validation; it merely squirrels values away in the inst
5700 structure. Returns TRUE or FALSE depending on whether the
5701 specified grammar matched. */
5702
5703 static bool
5704 parse_operands (char *str, const aarch64_opcode *opcode)
5705 {
5706 int i;
5707 char *backtrack_pos = 0;
5708 const enum aarch64_opnd *operands = opcode->operands;
5709 aarch64_reg_type imm_reg_type;
5710
5711 clear_error ();
5712 skip_whitespace (str);
5713
5714 if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5715 imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5716 else
5717 imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5718
5719 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5720 {
5721 int64_t val;
5722 const reg_entry *reg;
5723 int comma_skipped_p = 0;
5724 aarch64_reg_type rtype;
5725 struct vector_type_el vectype;
5726 aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5727 aarch64_opnd_info *info = &inst.base.operands[i];
5728 aarch64_reg_type reg_type;
5729
5730 DEBUG_TRACE ("parse operand %d", i);
5731
5732 /* Assign the operand code. */
5733 info->type = operands[i];
5734
5735 if (optional_operand_p (opcode, i))
5736 {
5737 /* Remember where we are in case we need to backtrack. */
5738 gas_assert (!backtrack_pos);
5739 backtrack_pos = str;
5740 }
5741
5742 /* Expect comma between operands; the backtrack mechanism will take
5743 care of cases of omitted optional operand. */
5744 if (i > 0 && ! skip_past_char (&str, ','))
5745 {
5746 set_syntax_error (_("comma expected between operands"));
5747 goto failure;
5748 }
5749 else
5750 comma_skipped_p = 1;
5751
5752 switch (operands[i])
5753 {
5754 case AARCH64_OPND_Rd:
5755 case AARCH64_OPND_Rn:
5756 case AARCH64_OPND_Rm:
5757 case AARCH64_OPND_Rt:
5758 case AARCH64_OPND_Rt2:
5759 case AARCH64_OPND_Rs:
5760 case AARCH64_OPND_Ra:
5761 case AARCH64_OPND_Rt_LS64:
5762 case AARCH64_OPND_Rt_SYS:
5763 case AARCH64_OPND_PAIRREG:
5764 case AARCH64_OPND_SVE_Rm:
5765 po_int_reg_or_fail (REG_TYPE_R_Z);
5766
5767 /* In LS64 load/store instructions Rt register number must be even
5768 and <=22. */
5769 if (operands[i] == AARCH64_OPND_Rt_LS64)
5770 {
5771 /* We've already checked if this is valid register.
5772 This will check if register number (Rt) is not undefined for LS64
5773 instructions:
5774 if Rt<4:3> == '11' || Rt<0> == '1' then UNDEFINED. */
5775 if ((info->reg.regno & 0x18) == 0x18 || (info->reg.regno & 0x01) == 0x01)
5776 {
5777 set_syntax_error (_("invalid Rt register number in 64-byte load/store"));
5778 goto failure;
5779 }
5780 }
5781 break;
5782
5783 case AARCH64_OPND_Rd_SP:
5784 case AARCH64_OPND_Rn_SP:
5785 case AARCH64_OPND_Rt_SP:
5786 case AARCH64_OPND_SVE_Rn_SP:
5787 case AARCH64_OPND_Rm_SP:
5788 po_int_reg_or_fail (REG_TYPE_R_SP);
5789 break;
5790
5791 case AARCH64_OPND_Rm_EXT:
5792 case AARCH64_OPND_Rm_SFT:
5793 po_misc_or_fail (parse_shifter_operand
5794 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5795 ? SHIFTED_ARITH_IMM
5796 : SHIFTED_LOGIC_IMM)));
5797 if (!info->shifter.operator_present)
5798 {
5799 /* Default to LSL if not present. Libopcodes prefers shifter
5800 kind to be explicit. */
5801 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5802 info->shifter.kind = AARCH64_MOD_LSL;
5803 /* For Rm_EXT, libopcodes will carry out further check on whether
5804 or not stack pointer is used in the instruction (Recall that
5805 "the extend operator is not optional unless at least one of
5806 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
5807 }
5808 break;
5809
5810 case AARCH64_OPND_Fd:
5811 case AARCH64_OPND_Fn:
5812 case AARCH64_OPND_Fm:
5813 case AARCH64_OPND_Fa:
5814 case AARCH64_OPND_Ft:
5815 case AARCH64_OPND_Ft2:
5816 case AARCH64_OPND_Sd:
5817 case AARCH64_OPND_Sn:
5818 case AARCH64_OPND_Sm:
5819 case AARCH64_OPND_SVE_VZn:
5820 case AARCH64_OPND_SVE_Vd:
5821 case AARCH64_OPND_SVE_Vm:
5822 case AARCH64_OPND_SVE_Vn:
5823 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5824 if (val == PARSE_FAIL)
5825 {
5826 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5827 goto failure;
5828 }
5829 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5830
5831 info->reg.regno = val;
5832 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5833 break;
5834
5835 case AARCH64_OPND_SVE_Pd:
5836 case AARCH64_OPND_SVE_Pg3:
5837 case AARCH64_OPND_SVE_Pg4_5:
5838 case AARCH64_OPND_SVE_Pg4_10:
5839 case AARCH64_OPND_SVE_Pg4_16:
5840 case AARCH64_OPND_SVE_Pm:
5841 case AARCH64_OPND_SVE_Pn:
5842 case AARCH64_OPND_SVE_Pt:
5843 reg_type = REG_TYPE_PN;
5844 goto vector_reg;
5845
5846 case AARCH64_OPND_SVE_Za_5:
5847 case AARCH64_OPND_SVE_Za_16:
5848 case AARCH64_OPND_SVE_Zd:
5849 case AARCH64_OPND_SVE_Zm_5:
5850 case AARCH64_OPND_SVE_Zm_16:
5851 case AARCH64_OPND_SVE_Zn:
5852 case AARCH64_OPND_SVE_Zt:
5853 reg_type = REG_TYPE_ZN;
5854 goto vector_reg;
5855
5856 case AARCH64_OPND_Va:
5857 case AARCH64_OPND_Vd:
5858 case AARCH64_OPND_Vn:
5859 case AARCH64_OPND_Vm:
5860 reg_type = REG_TYPE_VN;
5861 vector_reg:
5862 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5863 if (val == PARSE_FAIL)
5864 {
5865 first_error (_(get_reg_expected_msg (reg_type)));
5866 goto failure;
5867 }
5868 if (vectype.defined & NTA_HASINDEX)
5869 goto failure;
5870
5871 info->reg.regno = val;
5872 if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5873 && vectype.type == NT_invtype)
5874 /* Unqualified Pn and Zn registers are allowed in certain
5875 contexts. Rely on F_STRICT qualifier checking to catch
5876 invalid uses. */
5877 info->qualifier = AARCH64_OPND_QLF_NIL;
5878 else
5879 {
5880 info->qualifier = vectype_to_qualifier (&vectype);
5881 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5882 goto failure;
5883 }
5884 break;
5885
5886 case AARCH64_OPND_VdD1:
5887 case AARCH64_OPND_VnD1:
5888 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5889 if (val == PARSE_FAIL)
5890 {
5891 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5892 goto failure;
5893 }
5894 if (vectype.type != NT_d || vectype.index != 1)
5895 {
5896 set_fatal_syntax_error
5897 (_("the top half of a 128-bit FP/SIMD register is expected"));
5898 goto failure;
5899 }
5900 info->reg.regno = val;
5901 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5902 here; it is correct for the purpose of encoding/decoding since
5903 only the register number is explicitly encoded in the related
5904 instructions, although this appears a bit hacky. */
5905 info->qualifier = AARCH64_OPND_QLF_S_D;
5906 break;
5907
5908 case AARCH64_OPND_SVE_Zm3_INDEX:
5909 case AARCH64_OPND_SVE_Zm3_22_INDEX:
5910 case AARCH64_OPND_SVE_Zm3_11_INDEX:
5911 case AARCH64_OPND_SVE_Zm4_11_INDEX:
5912 case AARCH64_OPND_SVE_Zm4_INDEX:
5913 case AARCH64_OPND_SVE_Zn_INDEX:
5914 reg_type = REG_TYPE_ZN;
5915 goto vector_reg_index;
5916
5917 case AARCH64_OPND_Ed:
5918 case AARCH64_OPND_En:
5919 case AARCH64_OPND_Em:
5920 case AARCH64_OPND_Em16:
5921 case AARCH64_OPND_SM3_IMM2:
5922 reg_type = REG_TYPE_VN;
5923 vector_reg_index:
5924 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5925 if (val == PARSE_FAIL)
5926 {
5927 first_error (_(get_reg_expected_msg (reg_type)));
5928 goto failure;
5929 }
5930 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5931 goto failure;
5932
5933 info->reglane.regno = val;
5934 info->reglane.index = vectype.index;
5935 info->qualifier = vectype_to_qualifier (&vectype);
5936 if (info->qualifier == AARCH64_OPND_QLF_NIL)
5937 goto failure;
5938 break;
5939
5940 case AARCH64_OPND_SVE_ZnxN:
5941 case AARCH64_OPND_SVE_ZtxN:
5942 reg_type = REG_TYPE_ZN;
5943 goto vector_reg_list;
5944
5945 case AARCH64_OPND_LVn:
5946 case AARCH64_OPND_LVt:
5947 case AARCH64_OPND_LVt_AL:
5948 case AARCH64_OPND_LEt:
5949 reg_type = REG_TYPE_VN;
5950 vector_reg_list:
5951 if (reg_type == REG_TYPE_ZN
5952 && get_opcode_dependent_value (opcode) == 1
5953 && *str != '{')
5954 {
5955 val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5956 if (val == PARSE_FAIL)
5957 {
5958 first_error (_(get_reg_expected_msg (reg_type)));
5959 goto failure;
5960 }
5961 info->reglist.first_regno = val;
5962 info->reglist.num_regs = 1;
5963 }
5964 else
5965 {
5966 val = parse_vector_reg_list (&str, reg_type, &vectype);
5967 if (val == PARSE_FAIL)
5968 goto failure;
5969
5970 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5971 {
5972 set_fatal_syntax_error (_("invalid register list"));
5973 goto failure;
5974 }
5975
5976 if (vectype.width != 0 && *str != ',')
5977 {
5978 set_fatal_syntax_error
5979 (_("expected element type rather than vector type"));
5980 goto failure;
5981 }
5982
5983 info->reglist.first_regno = (val >> 2) & 0x1f;
5984 info->reglist.num_regs = (val & 0x3) + 1;
5985 }
5986 if (operands[i] == AARCH64_OPND_LEt)
5987 {
5988 if (!(vectype.defined & NTA_HASINDEX))
5989 goto failure;
5990 info->reglist.has_index = 1;
5991 info->reglist.index = vectype.index;
5992 }
5993 else
5994 {
5995 if (vectype.defined & NTA_HASINDEX)
5996 goto failure;
5997 if (!(vectype.defined & NTA_HASTYPE))
5998 {
5999 if (reg_type == REG_TYPE_ZN)
6000 set_fatal_syntax_error (_("missing type suffix"));
6001 goto failure;
6002 }
6003 }
6004 info->qualifier = vectype_to_qualifier (&vectype);
6005 if (info->qualifier == AARCH64_OPND_QLF_NIL)
6006 goto failure;
6007 break;
6008
6009 case AARCH64_OPND_CRn:
6010 case AARCH64_OPND_CRm:
6011 {
6012 char prefix = *(str++);
6013 if (prefix != 'c' && prefix != 'C')
6014 goto failure;
6015
6016 po_imm_nc_or_fail ();
6017 if (val > 15)
6018 {
6019 set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
6020 goto failure;
6021 }
6022 info->qualifier = AARCH64_OPND_QLF_CR;
6023 info->imm.value = val;
6024 break;
6025 }
6026
6027 case AARCH64_OPND_SHLL_IMM:
6028 case AARCH64_OPND_IMM_VLSR:
6029 po_imm_or_fail (1, 64);
6030 info->imm.value = val;
6031 break;
6032
6033 case AARCH64_OPND_CCMP_IMM:
6034 case AARCH64_OPND_SIMM5:
6035 case AARCH64_OPND_FBITS:
6036 case AARCH64_OPND_TME_UIMM16:
6037 case AARCH64_OPND_UIMM4:
6038 case AARCH64_OPND_UIMM4_ADDG:
6039 case AARCH64_OPND_UIMM10:
6040 case AARCH64_OPND_UIMM3_OP1:
6041 case AARCH64_OPND_UIMM3_OP2:
6042 case AARCH64_OPND_IMM_VLSL:
6043 case AARCH64_OPND_IMM:
6044 case AARCH64_OPND_IMM_2:
6045 case AARCH64_OPND_WIDTH:
6046 case AARCH64_OPND_SVE_INV_LIMM:
6047 case AARCH64_OPND_SVE_LIMM:
6048 case AARCH64_OPND_SVE_LIMM_MOV:
6049 case AARCH64_OPND_SVE_SHLIMM_PRED:
6050 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
6051 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
6052 case AARCH64_OPND_SVE_SHRIMM_PRED:
6053 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
6054 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
6055 case AARCH64_OPND_SVE_SIMM5:
6056 case AARCH64_OPND_SVE_SIMM5B:
6057 case AARCH64_OPND_SVE_SIMM6:
6058 case AARCH64_OPND_SVE_SIMM8:
6059 case AARCH64_OPND_SVE_UIMM3:
6060 case AARCH64_OPND_SVE_UIMM7:
6061 case AARCH64_OPND_SVE_UIMM8:
6062 case AARCH64_OPND_SVE_UIMM8_53:
6063 case AARCH64_OPND_IMM_ROT1:
6064 case AARCH64_OPND_IMM_ROT2:
6065 case AARCH64_OPND_IMM_ROT3:
6066 case AARCH64_OPND_SVE_IMM_ROT1:
6067 case AARCH64_OPND_SVE_IMM_ROT2:
6068 case AARCH64_OPND_SVE_IMM_ROT3:
6069 po_imm_nc_or_fail ();
6070 info->imm.value = val;
6071 break;
6072
6073 case AARCH64_OPND_SVE_AIMM:
6074 case AARCH64_OPND_SVE_ASIMM:
6075 po_imm_nc_or_fail ();
6076 info->imm.value = val;
6077 skip_whitespace (str);
6078 if (skip_past_comma (&str))
6079 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6080 else
6081 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6082 break;
6083
6084 case AARCH64_OPND_SVE_PATTERN:
6085 po_enum_or_fail (aarch64_sve_pattern_array);
6086 info->imm.value = val;
6087 break;
6088
6089 case AARCH64_OPND_SVE_PATTERN_SCALED:
6090 po_enum_or_fail (aarch64_sve_pattern_array);
6091 info->imm.value = val;
6092 if (skip_past_comma (&str)
6093 && !parse_shift (&str, info, SHIFTED_MUL))
6094 goto failure;
6095 if (!info->shifter.operator_present)
6096 {
6097 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6098 info->shifter.kind = AARCH64_MOD_MUL;
6099 info->shifter.amount = 1;
6100 }
6101 break;
6102
6103 case AARCH64_OPND_SVE_PRFOP:
6104 po_enum_or_fail (aarch64_sve_prfop_array);
6105 info->imm.value = val;
6106 break;
6107
6108 case AARCH64_OPND_UIMM7:
6109 po_imm_or_fail (0, 127);
6110 info->imm.value = val;
6111 break;
6112
6113 case AARCH64_OPND_IDX:
6114 case AARCH64_OPND_MASK:
6115 case AARCH64_OPND_BIT_NUM:
6116 case AARCH64_OPND_IMMR:
6117 case AARCH64_OPND_IMMS:
6118 po_imm_or_fail (0, 63);
6119 info->imm.value = val;
6120 break;
6121
6122 case AARCH64_OPND_IMM0:
6123 po_imm_nc_or_fail ();
6124 if (val != 0)
6125 {
6126 set_fatal_syntax_error (_("immediate zero expected"));
6127 goto failure;
6128 }
6129 info->imm.value = 0;
6130 break;
6131
6132 case AARCH64_OPND_FPIMM0:
6133 {
6134 int qfloat;
6135 bool res1 = false, res2 = false;
6136 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
6137 it is probably not worth the effort to support it. */
6138 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, false,
6139 imm_reg_type))
6140 && (error_p ()
6141 || !(res2 = parse_constant_immediate (&str, &val,
6142 imm_reg_type))))
6143 goto failure;
6144 if ((res1 && qfloat == 0) || (res2 && val == 0))
6145 {
6146 info->imm.value = 0;
6147 info->imm.is_fp = 1;
6148 break;
6149 }
6150 set_fatal_syntax_error (_("immediate zero expected"));
6151 goto failure;
6152 }
6153
6154 case AARCH64_OPND_IMM_MOV:
6155 {
6156 char *saved = str;
6157 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
6158 reg_name_p (str, REG_TYPE_VN))
6159 goto failure;
6160 str = saved;
6161 po_misc_or_fail (aarch64_get_expression (&inst.reloc.exp, &str,
6162 GE_OPT_PREFIX, REJECT_ABSENT,
6163 NORMAL_RESOLUTION));
6164 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
6165 later. fix_mov_imm_insn will try to determine a machine
6166 instruction (MOVZ, MOVN or ORR) for it and will issue an error
6167 message if the immediate cannot be moved by a single
6168 instruction. */
6169 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6170 inst.base.operands[i].skip = 1;
6171 }
6172 break;
6173
6174 case AARCH64_OPND_SIMD_IMM:
6175 case AARCH64_OPND_SIMD_IMM_SFT:
6176 if (! parse_big_immediate (&str, &val, imm_reg_type))
6177 goto failure;
6178 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6179 /* addr_off_p */ 0,
6180 /* need_libopcodes_p */ 1,
6181 /* skip_p */ 1);
6182 /* Parse shift.
6183 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6184 shift, we don't check it here; we leave the checking to
6185 the libopcodes (operand_general_constraint_met_p). By
6186 doing this, we achieve better diagnostics. */
6187 if (skip_past_comma (&str)
6188 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6189 goto failure;
6190 if (!info->shifter.operator_present
6191 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6192 {
6193 /* Default to LSL if not present. Libopcodes prefers shifter
6194 kind to be explicit. */
6195 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6196 info->shifter.kind = AARCH64_MOD_LSL;
6197 }
6198 break;
6199
6200 case AARCH64_OPND_FPIMM:
6201 case AARCH64_OPND_SIMD_FPIMM:
6202 case AARCH64_OPND_SVE_FPIMM8:
6203 {
6204 int qfloat;
6205 bool dp_p;
6206
6207 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6208 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6209 || !aarch64_imm_float_p (qfloat))
6210 {
6211 if (!error_p ())
6212 set_fatal_syntax_error (_("invalid floating-point"
6213 " constant"));
6214 goto failure;
6215 }
6216 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6217 inst.base.operands[i].imm.is_fp = 1;
6218 }
6219 break;
6220
6221 case AARCH64_OPND_SVE_I1_HALF_ONE:
6222 case AARCH64_OPND_SVE_I1_HALF_TWO:
6223 case AARCH64_OPND_SVE_I1_ZERO_ONE:
6224 {
6225 int qfloat;
6226 bool dp_p;
6227
6228 dp_p = double_precision_operand_p (&inst.base.operands[0]);
6229 if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6230 {
6231 if (!error_p ())
6232 set_fatal_syntax_error (_("invalid floating-point"
6233 " constant"));
6234 goto failure;
6235 }
6236 inst.base.operands[i].imm.value = qfloat;
6237 inst.base.operands[i].imm.is_fp = 1;
6238 }
6239 break;
6240
6241 case AARCH64_OPND_LIMM:
6242 po_misc_or_fail (parse_shifter_operand (&str, info,
6243 SHIFTED_LOGIC_IMM));
6244 if (info->shifter.operator_present)
6245 {
6246 set_fatal_syntax_error
6247 (_("shift not allowed for bitmask immediate"));
6248 goto failure;
6249 }
6250 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6251 /* addr_off_p */ 0,
6252 /* need_libopcodes_p */ 1,
6253 /* skip_p */ 1);
6254 break;
6255
6256 case AARCH64_OPND_AIMM:
6257 if (opcode->op == OP_ADD)
6258 /* ADD may have relocation types. */
6259 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6260 SHIFTED_ARITH_IMM));
6261 else
6262 po_misc_or_fail (parse_shifter_operand (&str, info,
6263 SHIFTED_ARITH_IMM));
6264 switch (inst.reloc.type)
6265 {
6266 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6267 info->shifter.amount = 12;
6268 break;
6269 case BFD_RELOC_UNUSED:
6270 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6271 if (info->shifter.kind != AARCH64_MOD_NONE)
6272 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6273 inst.reloc.pc_rel = 0;
6274 break;
6275 default:
6276 break;
6277 }
6278 info->imm.value = 0;
6279 if (!info->shifter.operator_present)
6280 {
6281 /* Default to LSL if not present. Libopcodes prefers shifter
6282 kind to be explicit. */
6283 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6284 info->shifter.kind = AARCH64_MOD_LSL;
6285 }
6286 break;
6287
6288 case AARCH64_OPND_HALF:
6289 {
6290 /* #<imm16> or relocation. */
6291 int internal_fixup_p;
6292 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6293 if (internal_fixup_p)
6294 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6295 skip_whitespace (str);
6296 if (skip_past_comma (&str))
6297 {
6298 /* {, LSL #<shift>} */
6299 if (! aarch64_gas_internal_fixup_p ())
6300 {
6301 set_fatal_syntax_error (_("can't mix relocation modifier "
6302 "with explicit shift"));
6303 goto failure;
6304 }
6305 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6306 }
6307 else
6308 inst.base.operands[i].shifter.amount = 0;
6309 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6310 inst.base.operands[i].imm.value = 0;
6311 if (! process_movw_reloc_info ())
6312 goto failure;
6313 }
6314 break;
6315
6316 case AARCH64_OPND_EXCEPTION:
6317 case AARCH64_OPND_UNDEFINED:
6318 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6319 imm_reg_type));
6320 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6321 /* addr_off_p */ 0,
6322 /* need_libopcodes_p */ 0,
6323 /* skip_p */ 1);
6324 break;
6325
6326 case AARCH64_OPND_NZCV:
6327 {
6328 const asm_nzcv *nzcv = str_hash_find_n (aarch64_nzcv_hsh, str, 4);
6329 if (nzcv != NULL)
6330 {
6331 str += 4;
6332 info->imm.value = nzcv->value;
6333 break;
6334 }
6335 po_imm_or_fail (0, 15);
6336 info->imm.value = val;
6337 }
6338 break;
6339
6340 case AARCH64_OPND_COND:
6341 case AARCH64_OPND_COND1:
6342 {
6343 char *start = str;
6344 do
6345 str++;
6346 while (ISALPHA (*str));
6347 info->cond = str_hash_find_n (aarch64_cond_hsh, start, str - start);
6348 if (info->cond == NULL)
6349 {
6350 set_syntax_error (_("invalid condition"));
6351 goto failure;
6352 }
6353 else if (operands[i] == AARCH64_OPND_COND1
6354 && (info->cond->value & 0xe) == 0xe)
6355 {
6356 /* Do not allow AL or NV. */
6357 set_default_error ();
6358 goto failure;
6359 }
6360 }
6361 break;
6362
6363 case AARCH64_OPND_ADDR_ADRP:
6364 po_misc_or_fail (parse_adrp (&str));
6365 /* Clear the value as operand needs to be relocated. */
6366 info->imm.value = 0;
6367 break;
6368
6369 case AARCH64_OPND_ADDR_PCREL14:
6370 case AARCH64_OPND_ADDR_PCREL19:
6371 case AARCH64_OPND_ADDR_PCREL21:
6372 case AARCH64_OPND_ADDR_PCREL26:
6373 po_misc_or_fail (parse_address (&str, info));
6374 if (!info->addr.pcrel)
6375 {
6376 set_syntax_error (_("invalid pc-relative address"));
6377 goto failure;
6378 }
6379 if (inst.gen_lit_pool
6380 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6381 {
6382 /* Only permit "=value" in the literal load instructions.
6383 The literal will be generated by programmer_friendly_fixup. */
6384 set_syntax_error (_("invalid use of \"=immediate\""));
6385 goto failure;
6386 }
6387 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6388 {
6389 set_syntax_error (_("unrecognized relocation suffix"));
6390 goto failure;
6391 }
6392 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6393 {
6394 info->imm.value = inst.reloc.exp.X_add_number;
6395 inst.reloc.type = BFD_RELOC_UNUSED;
6396 }
6397 else
6398 {
6399 info->imm.value = 0;
6400 if (inst.reloc.type == BFD_RELOC_UNUSED)
6401 switch (opcode->iclass)
6402 {
6403 case compbranch:
6404 case condbranch:
6405 /* e.g. CBZ or B.COND */
6406 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6407 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6408 break;
6409 case testbranch:
6410 /* e.g. TBZ */
6411 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6412 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6413 break;
6414 case branch_imm:
6415 /* e.g. B or BL */
6416 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6417 inst.reloc.type =
6418 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6419 : BFD_RELOC_AARCH64_JUMP26;
6420 break;
6421 case loadlit:
6422 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6423 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6424 break;
6425 case pcreladdr:
6426 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6427 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6428 break;
6429 default:
6430 gas_assert (0);
6431 abort ();
6432 }
6433 inst.reloc.pc_rel = 1;
6434 }
6435 break;
6436
6437 case AARCH64_OPND_ADDR_SIMPLE:
6438 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6439 {
6440 /* [<Xn|SP>{, #<simm>}] */
6441 char *start = str;
6442 /* First use the normal address-parsing routines, to get
6443 the usual syntax errors. */
6444 po_misc_or_fail (parse_address (&str, info));
6445 if (info->addr.pcrel || info->addr.offset.is_reg
6446 || !info->addr.preind || info->addr.postind
6447 || info->addr.writeback)
6448 {
6449 set_syntax_error (_("invalid addressing mode"));
6450 goto failure;
6451 }
6452
6453 /* Then retry, matching the specific syntax of these addresses. */
6454 str = start;
6455 po_char_or_fail ('[');
6456 po_reg_or_fail (REG_TYPE_R64_SP);
6457 /* Accept optional ", #0". */
6458 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6459 && skip_past_char (&str, ','))
6460 {
6461 skip_past_char (&str, '#');
6462 if (! skip_past_char (&str, '0'))
6463 {
6464 set_fatal_syntax_error
6465 (_("the optional immediate offset can only be 0"));
6466 goto failure;
6467 }
6468 }
6469 po_char_or_fail (']');
6470 break;
6471 }
6472
6473 case AARCH64_OPND_ADDR_REGOFF:
6474 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
6475 po_misc_or_fail (parse_address (&str, info));
6476 regoff_addr:
6477 if (info->addr.pcrel || !info->addr.offset.is_reg
6478 || !info->addr.preind || info->addr.postind
6479 || info->addr.writeback)
6480 {
6481 set_syntax_error (_("invalid addressing mode"));
6482 goto failure;
6483 }
6484 if (!info->shifter.operator_present)
6485 {
6486 /* Default to LSL if not present. Libopcodes prefers shifter
6487 kind to be explicit. */
6488 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6489 info->shifter.kind = AARCH64_MOD_LSL;
6490 }
6491 /* Qualifier to be deduced by libopcodes. */
6492 break;
6493
6494 case AARCH64_OPND_ADDR_SIMM7:
6495 po_misc_or_fail (parse_address (&str, info));
6496 if (info->addr.pcrel || info->addr.offset.is_reg
6497 || (!info->addr.preind && !info->addr.postind))
6498 {
6499 set_syntax_error (_("invalid addressing mode"));
6500 goto failure;
6501 }
6502 if (inst.reloc.type != BFD_RELOC_UNUSED)
6503 {
6504 set_syntax_error (_("relocation not allowed"));
6505 goto failure;
6506 }
6507 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6508 /* addr_off_p */ 1,
6509 /* need_libopcodes_p */ 1,
6510 /* skip_p */ 0);
6511 break;
6512
6513 case AARCH64_OPND_ADDR_SIMM9:
6514 case AARCH64_OPND_ADDR_SIMM9_2:
6515 case AARCH64_OPND_ADDR_SIMM11:
6516 case AARCH64_OPND_ADDR_SIMM13:
6517 po_misc_or_fail (parse_address (&str, info));
6518 if (info->addr.pcrel || info->addr.offset.is_reg
6519 || (!info->addr.preind && !info->addr.postind)
6520 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6521 && info->addr.writeback))
6522 {
6523 set_syntax_error (_("invalid addressing mode"));
6524 goto failure;
6525 }
6526 if (inst.reloc.type != BFD_RELOC_UNUSED)
6527 {
6528 set_syntax_error (_("relocation not allowed"));
6529 goto failure;
6530 }
6531 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6532 /* addr_off_p */ 1,
6533 /* need_libopcodes_p */ 1,
6534 /* skip_p */ 0);
6535 break;
6536
6537 case AARCH64_OPND_ADDR_SIMM10:
6538 case AARCH64_OPND_ADDR_OFFSET:
6539 po_misc_or_fail (parse_address (&str, info));
6540 if (info->addr.pcrel || info->addr.offset.is_reg
6541 || !info->addr.preind || info->addr.postind)
6542 {
6543 set_syntax_error (_("invalid addressing mode"));
6544 goto failure;
6545 }
6546 if (inst.reloc.type != BFD_RELOC_UNUSED)
6547 {
6548 set_syntax_error (_("relocation not allowed"));
6549 goto failure;
6550 }
6551 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6552 /* addr_off_p */ 1,
6553 /* need_libopcodes_p */ 1,
6554 /* skip_p */ 0);
6555 break;
6556
6557 case AARCH64_OPND_ADDR_UIMM12:
6558 po_misc_or_fail (parse_address (&str, info));
6559 if (info->addr.pcrel || info->addr.offset.is_reg
6560 || !info->addr.preind || info->addr.writeback)
6561 {
6562 set_syntax_error (_("invalid addressing mode"));
6563 goto failure;
6564 }
6565 if (inst.reloc.type == BFD_RELOC_UNUSED)
6566 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6567 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6568 || (inst.reloc.type
6569 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6570 || (inst.reloc.type
6571 == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6572 || (inst.reloc.type
6573 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6574 || (inst.reloc.type
6575 == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6576 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6577 /* Leave qualifier to be determined by libopcodes. */
6578 break;
6579
6580 case AARCH64_OPND_SIMD_ADDR_POST:
6581 /* [<Xn|SP>], <Xm|#<amount>> */
6582 po_misc_or_fail (parse_address (&str, info));
6583 if (!info->addr.postind || !info->addr.writeback)
6584 {
6585 set_syntax_error (_("invalid addressing mode"));
6586 goto failure;
6587 }
6588 if (!info->addr.offset.is_reg)
6589 {
6590 if (inst.reloc.exp.X_op == O_constant)
6591 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6592 else
6593 {
6594 set_fatal_syntax_error
6595 (_("writeback value must be an immediate constant"));
6596 goto failure;
6597 }
6598 }
6599 /* No qualifier. */
6600 break;
6601
6602 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6603 case AARCH64_OPND_SVE_ADDR_RI_S4x32:
6604 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6605 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6606 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6607 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6608 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6609 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6610 case AARCH64_OPND_SVE_ADDR_RI_U6:
6611 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6612 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6613 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6614 /* [X<n>{, #imm, MUL VL}]
6615 [X<n>{, #imm}]
6616 but recognizing SVE registers. */
6617 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6618 &offset_qualifier));
6619 if (base_qualifier != AARCH64_OPND_QLF_X)
6620 {
6621 set_syntax_error (_("invalid addressing mode"));
6622 goto failure;
6623 }
6624 sve_regimm:
6625 if (info->addr.pcrel || info->addr.offset.is_reg
6626 || !info->addr.preind || info->addr.writeback)
6627 {
6628 set_syntax_error (_("invalid addressing mode"));
6629 goto failure;
6630 }
6631 if (inst.reloc.type != BFD_RELOC_UNUSED
6632 || inst.reloc.exp.X_op != O_constant)
6633 {
6634 /* Make sure this has priority over
6635 "invalid addressing mode". */
6636 set_fatal_syntax_error (_("constant offset required"));
6637 goto failure;
6638 }
6639 info->addr.offset.imm = inst.reloc.exp.X_add_number;
6640 break;
6641
6642 case AARCH64_OPND_SVE_ADDR_R:
6643 /* [<Xn|SP>{, <R><m>}]
6644 but recognizing SVE registers. */
6645 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6646 &offset_qualifier));
6647 if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6648 {
6649 offset_qualifier = AARCH64_OPND_QLF_X;
6650 info->addr.offset.is_reg = 1;
6651 info->addr.offset.regno = 31;
6652 }
6653 else if (base_qualifier != AARCH64_OPND_QLF_X
6654 || offset_qualifier != AARCH64_OPND_QLF_X)
6655 {
6656 set_syntax_error (_("invalid addressing mode"));
6657 goto failure;
6658 }
6659 goto regoff_addr;
6660
6661 case AARCH64_OPND_SVE_ADDR_RR:
6662 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6663 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6664 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6665 case AARCH64_OPND_SVE_ADDR_RX:
6666 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6667 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6668 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6669 /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6670 but recognizing SVE registers. */
6671 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6672 &offset_qualifier));
6673 if (base_qualifier != AARCH64_OPND_QLF_X
6674 || offset_qualifier != AARCH64_OPND_QLF_X)
6675 {
6676 set_syntax_error (_("invalid addressing mode"));
6677 goto failure;
6678 }
6679 goto regoff_addr;
6680
6681 case AARCH64_OPND_SVE_ADDR_RZ:
6682 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6683 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6684 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6685 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6686 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6687 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6688 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6689 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6690 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6691 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6692 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6693 /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6694 [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
6695 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6696 &offset_qualifier));
6697 if (base_qualifier != AARCH64_OPND_QLF_X
6698 || (offset_qualifier != AARCH64_OPND_QLF_S_S
6699 && offset_qualifier != AARCH64_OPND_QLF_S_D))
6700 {
6701 set_syntax_error (_("invalid addressing mode"));
6702 goto failure;
6703 }
6704 info->qualifier = offset_qualifier;
6705 goto regoff_addr;
6706
6707 case AARCH64_OPND_SVE_ADDR_ZX:
6708 /* [Zn.<T>{, <Xm>}]. */
6709 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6710 &offset_qualifier));
6711 /* Things to check:
6712 base_qualifier either S_S or S_D
6713 offset_qualifier must be X
6714 */
6715 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6716 && base_qualifier != AARCH64_OPND_QLF_S_D)
6717 || offset_qualifier != AARCH64_OPND_QLF_X)
6718 {
6719 set_syntax_error (_("invalid addressing mode"));
6720 goto failure;
6721 }
6722 info->qualifier = base_qualifier;
6723 if (!info->addr.offset.is_reg || info->addr.pcrel
6724 || !info->addr.preind || info->addr.writeback
6725 || info->shifter.operator_present != 0)
6726 {
6727 set_syntax_error (_("invalid addressing mode"));
6728 goto failure;
6729 }
6730 info->shifter.kind = AARCH64_MOD_LSL;
6731 break;
6732
6733
6734 case AARCH64_OPND_SVE_ADDR_ZI_U5:
6735 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6736 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6737 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6738 /* [Z<n>.<T>{, #imm}] */
6739 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6740 &offset_qualifier));
6741 if (base_qualifier != AARCH64_OPND_QLF_S_S
6742 && base_qualifier != AARCH64_OPND_QLF_S_D)
6743 {
6744 set_syntax_error (_("invalid addressing mode"));
6745 goto failure;
6746 }
6747 info->qualifier = base_qualifier;
6748 goto sve_regimm;
6749
6750 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6751 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6752 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6753 /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6754 [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6755
6756 We don't reject:
6757
6758 [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6759
6760 here since we get better error messages by leaving it to
6761 the qualifier checking routines. */
6762 po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6763 &offset_qualifier));
6764 if ((base_qualifier != AARCH64_OPND_QLF_S_S
6765 && base_qualifier != AARCH64_OPND_QLF_S_D)
6766 || offset_qualifier != base_qualifier)
6767 {
6768 set_syntax_error (_("invalid addressing mode"));
6769 goto failure;
6770 }
6771 info->qualifier = base_qualifier;
6772 goto regoff_addr;
6773
6774 case AARCH64_OPND_SYSREG:
6775 {
6776 uint32_t sysreg_flags;
6777 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6778 &sysreg_flags)) == PARSE_FAIL)
6779 {
6780 set_syntax_error (_("unknown or missing system register name"));
6781 goto failure;
6782 }
6783 inst.base.operands[i].sysreg.value = val;
6784 inst.base.operands[i].sysreg.flags = sysreg_flags;
6785 break;
6786 }
6787
6788 case AARCH64_OPND_PSTATEFIELD:
6789 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6790 == PARSE_FAIL)
6791 {
6792 set_syntax_error (_("unknown or missing PSTATE field name"));
6793 goto failure;
6794 }
6795 inst.base.operands[i].pstatefield = val;
6796 break;
6797
6798 case AARCH64_OPND_SYSREG_IC:
6799 inst.base.operands[i].sysins_op =
6800 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6801 goto sys_reg_ins;
6802
6803 case AARCH64_OPND_SYSREG_DC:
6804 inst.base.operands[i].sysins_op =
6805 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6806 goto sys_reg_ins;
6807
6808 case AARCH64_OPND_SYSREG_AT:
6809 inst.base.operands[i].sysins_op =
6810 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6811 goto sys_reg_ins;
6812
6813 case AARCH64_OPND_SYSREG_SR:
6814 inst.base.operands[i].sysins_op =
6815 parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6816 goto sys_reg_ins;
6817
6818 case AARCH64_OPND_SYSREG_TLBI:
6819 inst.base.operands[i].sysins_op =
6820 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6821 sys_reg_ins:
6822 if (inst.base.operands[i].sysins_op == NULL)
6823 {
6824 set_fatal_syntax_error ( _("unknown or missing operation name"));
6825 goto failure;
6826 }
6827 break;
6828
6829 case AARCH64_OPND_BARRIER:
6830 case AARCH64_OPND_BARRIER_ISB:
6831 val = parse_barrier (&str);
6832 if (val != PARSE_FAIL
6833 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6834 {
6835 /* ISB only accepts options name 'sy'. */
6836 set_syntax_error
6837 (_("the specified option is not accepted in ISB"));
6838 /* Turn off backtrack as this optional operand is present. */
6839 backtrack_pos = 0;
6840 goto failure;
6841 }
6842 if (val != PARSE_FAIL
6843 && operands[i] == AARCH64_OPND_BARRIER)
6844 {
6845 /* Regular barriers accept options CRm (C0-C15).
6846 DSB nXS barrier variant accepts values > 15. */
6847 if (val < 0 || val > 15)
6848 {
6849 set_syntax_error (_("the specified option is not accepted in DSB"));
6850 goto failure;
6851 }
6852 }
6853 /* This is an extension to accept a 0..15 immediate. */
6854 if (val == PARSE_FAIL)
6855 po_imm_or_fail (0, 15);
6856 info->barrier = aarch64_barrier_options + val;
6857 break;
6858
6859 case AARCH64_OPND_BARRIER_DSB_NXS:
6860 val = parse_barrier (&str);
6861 if (val != PARSE_FAIL)
6862 {
6863 /* DSB nXS barrier variant accept only <option>nXS qualifiers. */
6864 if (!(val == 16 || val == 20 || val == 24 || val == 28))
6865 {
6866 set_syntax_error (_("the specified option is not accepted in DSB"));
6867 /* Turn off backtrack as this optional operand is present. */
6868 backtrack_pos = 0;
6869 goto failure;
6870 }
6871 }
6872 else
6873 {
6874 /* DSB nXS barrier variant accept 5-bit unsigned immediate, with
6875 possible values 16, 20, 24 or 28 , encoded as val<3:2>. */
6876 if (! parse_constant_immediate (&str, &val, imm_reg_type))
6877 goto failure;
6878 if (!(val == 16 || val == 20 || val == 24 || val == 28))
6879 {
6880 set_syntax_error (_("immediate value must be 16, 20, 24, 28"));
6881 goto failure;
6882 }
6883 }
6884 /* Option index is encoded as 2-bit value in val<3:2>. */
6885 val = (val >> 2) - 4;
6886 info->barrier = aarch64_barrier_dsb_nxs_options + val;
6887 break;
6888
6889 case AARCH64_OPND_PRFOP:
6890 val = parse_pldop (&str);
6891 /* This is an extension to accept a 0..31 immediate. */
6892 if (val == PARSE_FAIL)
6893 po_imm_or_fail (0, 31);
6894 inst.base.operands[i].prfop = aarch64_prfops + val;
6895 break;
6896
6897 case AARCH64_OPND_BARRIER_PSB:
6898 val = parse_barrier_psb (&str, &(info->hint_option));
6899 if (val == PARSE_FAIL)
6900 goto failure;
6901 break;
6902
6903 case AARCH64_OPND_BTI_TARGET:
6904 val = parse_bti_operand (&str, &(info->hint_option));
6905 if (val == PARSE_FAIL)
6906 goto failure;
6907 break;
6908
6909 default:
6910 as_fatal (_("unhandled operand code %d"), operands[i]);
6911 }
6912
6913 /* If we get here, this operand was successfully parsed. */
6914 inst.base.operands[i].present = 1;
6915 continue;
6916
6917 failure:
6918 /* The parse routine should already have set the error, but in case
6919 not, set a default one here. */
6920 if (! error_p ())
6921 set_default_error ();
6922
6923 if (! backtrack_pos)
6924 goto parse_operands_return;
6925
6926 {
6927 /* We reach here because this operand is marked as optional, and
6928 either no operand was supplied or the operand was supplied but it
6929 was syntactically incorrect. In the latter case we report an
6930 error. In the former case we perform a few more checks before
6931 dropping through to the code to insert the default operand. */
6932
6933 char *tmp = backtrack_pos;
6934 char endchar = END_OF_INSN;
6935
6936 if (i != (aarch64_num_of_operands (opcode) - 1))
6937 endchar = ',';
6938 skip_past_char (&tmp, ',');
6939
6940 if (*tmp != endchar)
6941 /* The user has supplied an operand in the wrong format. */
6942 goto parse_operands_return;
6943
6944 /* Make sure there is not a comma before the optional operand.
6945 For example the fifth operand of 'sys' is optional:
6946
6947 sys #0,c0,c0,#0, <--- wrong
6948 sys #0,c0,c0,#0 <--- correct. */
6949 if (comma_skipped_p && i && endchar == END_OF_INSN)
6950 {
6951 set_fatal_syntax_error
6952 (_("unexpected comma before the omitted optional operand"));
6953 goto parse_operands_return;
6954 }
6955 }
6956
6957 /* Reaching here means we are dealing with an optional operand that is
6958 omitted from the assembly line. */
6959 gas_assert (optional_operand_p (opcode, i));
6960 info->present = 0;
6961 process_omitted_operand (operands[i], opcode, i, info);
6962
6963 /* Try again, skipping the optional operand at backtrack_pos. */
6964 str = backtrack_pos;
6965 backtrack_pos = 0;
6966
6967 /* Clear any error record after the omitted optional operand has been
6968 successfully handled. */
6969 clear_error ();
6970 }
6971
6972 /* Check if we have parsed all the operands. */
6973 if (*str != '\0' && ! error_p ())
6974 {
6975 /* Set I to the index of the last present operand; this is
6976 for the purpose of diagnostics. */
6977 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6978 ;
6979 set_fatal_syntax_error
6980 (_("unexpected characters following instruction"));
6981 }
6982
6983 parse_operands_return:
6984
6985 if (error_p ())
6986 {
6987 DEBUG_TRACE ("parsing FAIL: %s - %s",
6988 operand_mismatch_kind_names[get_error_kind ()],
6989 get_error_message ());
6990 /* Record the operand error properly; this is useful when there
6991 are multiple instruction templates for a mnemonic name, so that
6992 later on, we can select the error that most closely describes
6993 the problem. */
6994 record_operand_error (opcode, i, get_error_kind (),
6995 get_error_message ());
6996 return false;
6997 }
6998 else
6999 {
7000 DEBUG_TRACE ("parsing SUCCESS");
7001 return true;
7002 }
7003 }
7004
7005 /* It does some fix-up to provide some programmer friendly feature while
7006 keeping the libopcodes happy, i.e. libopcodes only accepts
7007 the preferred architectural syntax.
7008 Return FALSE if there is any failure; otherwise return TRUE. */
7009
7010 static bool
7011 programmer_friendly_fixup (aarch64_instruction *instr)
7012 {
7013 aarch64_inst *base = &instr->base;
7014 const aarch64_opcode *opcode = base->opcode;
7015 enum aarch64_op op = opcode->op;
7016 aarch64_opnd_info *operands = base->operands;
7017
7018 DEBUG_TRACE ("enter");
7019
7020 switch (opcode->iclass)
7021 {
7022 case testbranch:
7023 /* TBNZ Xn|Wn, #uimm6, label
7024 Test and Branch Not Zero: conditionally jumps to label if bit number
7025 uimm6 in register Xn is not zero. The bit number implies the width of
7026 the register, which may be written and should be disassembled as Wn if
7027 uimm is less than 32. */
7028 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
7029 {
7030 if (operands[1].imm.value >= 32)
7031 {
7032 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
7033 0, 31);
7034 return false;
7035 }
7036 operands[0].qualifier = AARCH64_OPND_QLF_X;
7037 }
7038 break;
7039 case loadlit:
7040 /* LDR Wt, label | =value
7041 As a convenience assemblers will typically permit the notation
7042 "=value" in conjunction with the pc-relative literal load instructions
7043 to automatically place an immediate value or symbolic address in a
7044 nearby literal pool and generate a hidden label which references it.
7045 ISREG has been set to 0 in the case of =value. */
7046 if (instr->gen_lit_pool
7047 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
7048 {
7049 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
7050 if (op == OP_LDRSW_LIT)
7051 size = 4;
7052 if (instr->reloc.exp.X_op != O_constant
7053 && instr->reloc.exp.X_op != O_big
7054 && instr->reloc.exp.X_op != O_symbol)
7055 {
7056 record_operand_error (opcode, 1,
7057 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
7058 _("constant expression expected"));
7059 return false;
7060 }
7061 if (! add_to_lit_pool (&instr->reloc.exp, size))
7062 {
7063 record_operand_error (opcode, 1,
7064 AARCH64_OPDE_OTHER_ERROR,
7065 _("literal pool insertion failed"));
7066 return false;
7067 }
7068 }
7069 break;
7070 case log_shift:
7071 case bitfield:
7072 /* UXT[BHW] Wd, Wn
7073 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
7074 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
7075 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
7076 A programmer-friendly assembler should accept a destination Xd in
7077 place of Wd, however that is not the preferred form for disassembly.
7078 */
7079 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
7080 && operands[1].qualifier == AARCH64_OPND_QLF_W
7081 && operands[0].qualifier == AARCH64_OPND_QLF_X)
7082 operands[0].qualifier = AARCH64_OPND_QLF_W;
7083 break;
7084
7085 case addsub_ext:
7086 {
7087 /* In the 64-bit form, the final register operand is written as Wm
7088 for all but the (possibly omitted) UXTX/LSL and SXTX
7089 operators.
7090 As a programmer-friendly assembler, we accept e.g.
7091 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
7092 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
7093 int idx = aarch64_operand_index (opcode->operands,
7094 AARCH64_OPND_Rm_EXT);
7095 gas_assert (idx == 1 || idx == 2);
7096 if (operands[0].qualifier == AARCH64_OPND_QLF_X
7097 && operands[idx].qualifier == AARCH64_OPND_QLF_X
7098 && operands[idx].shifter.kind != AARCH64_MOD_LSL
7099 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
7100 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
7101 operands[idx].qualifier = AARCH64_OPND_QLF_W;
7102 }
7103 break;
7104
7105 default:
7106 break;
7107 }
7108
7109 DEBUG_TRACE ("exit with SUCCESS");
7110 return true;
7111 }
7112
7113 /* Check for loads and stores that will cause unpredictable behavior. */
7114
7115 static void
7116 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
7117 {
7118 aarch64_inst *base = &instr->base;
7119 const aarch64_opcode *opcode = base->opcode;
7120 const aarch64_opnd_info *opnds = base->operands;
7121 switch (opcode->iclass)
7122 {
7123 case ldst_pos:
7124 case ldst_imm9:
7125 case ldst_imm10:
7126 case ldst_unscaled:
7127 case ldst_unpriv:
7128 /* Loading/storing the base register is unpredictable if writeback. */
7129 if ((aarch64_get_operand_class (opnds[0].type)
7130 == AARCH64_OPND_CLASS_INT_REG)
7131 && opnds[0].reg.regno == opnds[1].addr.base_regno
7132 && opnds[1].addr.base_regno != REG_SP
7133 /* Exempt STG/STZG/ST2G/STZ2G. */
7134 && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
7135 && opnds[1].addr.writeback)
7136 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7137 break;
7138
7139 case ldstpair_off:
7140 case ldstnapair_offs:
7141 case ldstpair_indexed:
7142 /* Loading/storing the base register is unpredictable if writeback. */
7143 if ((aarch64_get_operand_class (opnds[0].type)
7144 == AARCH64_OPND_CLASS_INT_REG)
7145 && (opnds[0].reg.regno == opnds[2].addr.base_regno
7146 || opnds[1].reg.regno == opnds[2].addr.base_regno)
7147 && opnds[2].addr.base_regno != REG_SP
7148 /* Exempt STGP. */
7149 && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
7150 && opnds[2].addr.writeback)
7151 as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
7152 /* Load operations must load different registers. */
7153 if ((opcode->opcode & (1 << 22))
7154 && opnds[0].reg.regno == opnds[1].reg.regno)
7155 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7156 break;
7157
7158 case ldstexcl:
7159 if ((aarch64_get_operand_class (opnds[0].type)
7160 == AARCH64_OPND_CLASS_INT_REG)
7161 && (aarch64_get_operand_class (opnds[1].type)
7162 == AARCH64_OPND_CLASS_INT_REG))
7163 {
7164 if ((opcode->opcode & (1 << 22)))
7165 {
7166 /* It is unpredictable if load-exclusive pair with Rt == Rt2. */
7167 if ((opcode->opcode & (1 << 21))
7168 && opnds[0].reg.regno == opnds[1].reg.regno)
7169 as_warn (_("unpredictable load of register pair -- `%s'"), str);
7170 }
7171 else
7172 {
7173 /* Store-Exclusive is unpredictable if Rt == Rs. */
7174 if (opnds[0].reg.regno == opnds[1].reg.regno)
7175 as_warn
7176 (_("unpredictable: identical transfer and status registers"
7177 " --`%s'"),str);
7178
7179 if (opnds[0].reg.regno == opnds[2].reg.regno)
7180 {
7181 if (!(opcode->opcode & (1 << 21)))
7182 /* Store-Exclusive is unpredictable if Rn == Rs. */
7183 as_warn
7184 (_("unpredictable: identical base and status registers"
7185 " --`%s'"),str);
7186 else
7187 /* Store-Exclusive pair is unpredictable if Rt2 == Rs. */
7188 as_warn
7189 (_("unpredictable: "
7190 "identical transfer and status registers"
7191 " --`%s'"),str);
7192 }
7193
7194 /* Store-Exclusive pair is unpredictable if Rn == Rs. */
7195 if ((opcode->opcode & (1 << 21))
7196 && opnds[0].reg.regno == opnds[3].reg.regno
7197 && opnds[3].reg.regno != REG_SP)
7198 as_warn (_("unpredictable: identical base and status registers"
7199 " --`%s'"),str);
7200 }
7201 }
7202 break;
7203
7204 default:
7205 break;
7206 }
7207 }
7208
7209 static void
7210 force_automatic_sequence_close (void)
7211 {
7212 if (now_instr_sequence.instr)
7213 {
7214 as_warn (_("previous `%s' sequence has not been closed"),
7215 now_instr_sequence.instr->opcode->name);
7216 init_insn_sequence (NULL, &now_instr_sequence);
7217 }
7218 }
7219
7220 /* A wrapper function to interface with libopcodes on encoding and
7221 record the error message if there is any.
7222
7223 Return TRUE on success; otherwise return FALSE. */
7224
7225 static bool
7226 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
7227 aarch64_insn *code)
7228 {
7229 aarch64_operand_error error_info;
7230 memset (&error_info, '\0', sizeof (error_info));
7231 error_info.kind = AARCH64_OPDE_NIL;
7232 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
7233 && !error_info.non_fatal)
7234 return true;
7235
7236 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
7237 record_operand_error_info (opcode, &error_info);
7238 return error_info.non_fatal;
7239 }
7240
7241 #ifdef DEBUG_AARCH64
7242 static inline void
7243 dump_opcode_operands (const aarch64_opcode *opcode)
7244 {
7245 int i = 0;
7246 while (opcode->operands[i] != AARCH64_OPND_NIL)
7247 {
7248 aarch64_verbose ("\t\t opnd%d: %s", i,
7249 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7250 ? aarch64_get_operand_name (opcode->operands[i])
7251 : aarch64_get_operand_desc (opcode->operands[i]));
7252 ++i;
7253 }
7254 }
7255 #endif /* DEBUG_AARCH64 */
7256
7257 /* This is the guts of the machine-dependent assembler. STR points to a
7258 machine dependent instruction. This function is supposed to emit
7259 the frags/bytes it assembles to. */
7260
7261 void
7262 md_assemble (char *str)
7263 {
7264 char *p = str;
7265 templates *template;
7266 aarch64_opcode *opcode;
7267 aarch64_inst *inst_base;
7268 unsigned saved_cond;
7269
7270 /* Align the previous label if needed. */
7271 if (last_label_seen != NULL)
7272 {
7273 symbol_set_frag (last_label_seen, frag_now);
7274 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7275 S_SET_SEGMENT (last_label_seen, now_seg);
7276 }
7277
7278 /* Update the current insn_sequence from the segment. */
7279 insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
7280
7281 inst.reloc.type = BFD_RELOC_UNUSED;
7282
7283 DEBUG_TRACE ("\n\n");
7284 DEBUG_TRACE ("==============================");
7285 DEBUG_TRACE ("Enter md_assemble with %s", str);
7286
7287 template = opcode_lookup (&p);
7288 if (!template)
7289 {
7290 /* It wasn't an instruction, but it might be a register alias of
7291 the form alias .req reg directive. */
7292 if (!create_register_alias (str, p))
7293 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7294 str);
7295 return;
7296 }
7297
7298 skip_whitespace (p);
7299 if (*p == ',')
7300 {
7301 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7302 get_mnemonic_name (str), str);
7303 return;
7304 }
7305
7306 init_operand_error_report ();
7307
7308 /* Sections are assumed to start aligned. In executable section, there is no
7309 MAP_DATA symbol pending. So we only align the address during
7310 MAP_DATA --> MAP_INSN transition.
7311 For other sections, this is not guaranteed. */
7312 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7313 if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7314 frag_align_code (2, 0);
7315
7316 saved_cond = inst.cond;
7317 reset_aarch64_instruction (&inst);
7318 inst.cond = saved_cond;
7319
7320 /* Iterate through all opcode entries with the same mnemonic name. */
7321 do
7322 {
7323 opcode = template->opcode;
7324
7325 DEBUG_TRACE ("opcode %s found", opcode->name);
7326 #ifdef DEBUG_AARCH64
7327 if (debug_dump)
7328 dump_opcode_operands (opcode);
7329 #endif /* DEBUG_AARCH64 */
7330
7331 mapping_state (MAP_INSN);
7332
7333 inst_base = &inst.base;
7334 inst_base->opcode = opcode;
7335
7336 /* Truly conditionally executed instructions, e.g. b.cond. */
7337 if (opcode->flags & F_COND)
7338 {
7339 gas_assert (inst.cond != COND_ALWAYS);
7340 inst_base->cond = get_cond_from_value (inst.cond);
7341 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7342 }
7343 else if (inst.cond != COND_ALWAYS)
7344 {
7345 /* It shouldn't arrive here, where the assembly looks like a
7346 conditional instruction but the found opcode is unconditional. */
7347 gas_assert (0);
7348 continue;
7349 }
7350
7351 if (parse_operands (p, opcode)
7352 && programmer_friendly_fixup (&inst)
7353 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7354 {
7355 /* Check that this instruction is supported for this CPU. */
7356 if (!opcode->avariant
7357 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7358 {
7359 as_bad (_("selected processor does not support `%s'"), str);
7360 return;
7361 }
7362
7363 warn_unpredictable_ldst (&inst, str);
7364
7365 if (inst.reloc.type == BFD_RELOC_UNUSED
7366 || !inst.reloc.need_libopcodes_p)
7367 output_inst (NULL);
7368 else
7369 {
7370 /* If there is relocation generated for the instruction,
7371 store the instruction information for the future fix-up. */
7372 struct aarch64_inst *copy;
7373 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7374 copy = XNEW (struct aarch64_inst);
7375 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7376 output_inst (copy);
7377 }
7378
7379 /* Issue non-fatal messages if any. */
7380 output_operand_error_report (str, true);
7381 return;
7382 }
7383
7384 template = template->next;
7385 if (template != NULL)
7386 {
7387 reset_aarch64_instruction (&inst);
7388 inst.cond = saved_cond;
7389 }
7390 }
7391 while (template != NULL);
7392
7393 /* Issue the error messages if any. */
7394 output_operand_error_report (str, false);
7395 }
7396
7397 /* Various frobbings of labels and their addresses. */
7398
7399 void
7400 aarch64_start_line_hook (void)
7401 {
7402 last_label_seen = NULL;
7403 }
7404
7405 void
7406 aarch64_frob_label (symbolS * sym)
7407 {
7408 last_label_seen = sym;
7409
7410 dwarf2_emit_label (sym);
7411 }
7412
7413 void
7414 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7415 {
7416 /* Check to see if we have a block to close. */
7417 force_automatic_sequence_close ();
7418 }
7419
7420 int
7421 aarch64_data_in_code (void)
7422 {
7423 if (startswith (input_line_pointer + 1, "data:"))
7424 {
7425 *input_line_pointer = '/';
7426 input_line_pointer += 5;
7427 *input_line_pointer = 0;
7428 return 1;
7429 }
7430
7431 return 0;
7432 }
7433
7434 char *
7435 aarch64_canonicalize_symbol_name (char *name)
7436 {
7437 int len;
7438
7439 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7440 *(name + len - 5) = 0;
7441
7442 return name;
7443 }
7444 \f
7445 /* Table of all register names defined by default. The user can
7446 define additional names with .req. Note that all register names
7447 should appear in both upper and lowercase variants. Some registers
7448 also have mixed-case names. */
7449
7450 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true }
7451 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, false}
7452 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7453 #define REGSET16(p,t) \
7454 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7455 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7456 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7457 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7458 #define REGSET31(p,t) \
7459 REGSET16(p, t), \
7460 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7461 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7462 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7463 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7464 #define REGSET(p,t) \
7465 REGSET31(p,t), REGNUM(p,31,t)
7466
7467 /* These go into aarch64_reg_hsh hash-table. */
7468 static const reg_entry reg_names[] = {
7469 /* Integer registers. */
7470 REGSET31 (x, R_64), REGSET31 (X, R_64),
7471 REGSET31 (w, R_32), REGSET31 (W, R_32),
7472
7473 REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7474 REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7475 REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7476 REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7477 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7478 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7479
7480 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7481 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7482
7483 /* Floating-point single precision registers. */
7484 REGSET (s, FP_S), REGSET (S, FP_S),
7485
7486 /* Floating-point double precision registers. */
7487 REGSET (d, FP_D), REGSET (D, FP_D),
7488
7489 /* Floating-point half precision registers. */
7490 REGSET (h, FP_H), REGSET (H, FP_H),
7491
7492 /* Floating-point byte precision registers. */
7493 REGSET (b, FP_B), REGSET (B, FP_B),
7494
7495 /* Floating-point quad precision registers. */
7496 REGSET (q, FP_Q), REGSET (Q, FP_Q),
7497
7498 /* FP/SIMD registers. */
7499 REGSET (v, VN), REGSET (V, VN),
7500
7501 /* SVE vector registers. */
7502 REGSET (z, ZN), REGSET (Z, ZN),
7503
7504 /* SVE predicate registers. */
7505 REGSET16 (p, PN), REGSET16 (P, PN)
7506 };
7507
7508 #undef REGDEF
7509 #undef REGDEF_ALIAS
7510 #undef REGNUM
7511 #undef REGSET16
7512 #undef REGSET31
7513 #undef REGSET
7514
7515 #define N 1
7516 #define n 0
7517 #define Z 1
7518 #define z 0
7519 #define C 1
7520 #define c 0
7521 #define V 1
7522 #define v 0
7523 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7524 static const asm_nzcv nzcv_names[] = {
7525 {"nzcv", B (n, z, c, v)},
7526 {"nzcV", B (n, z, c, V)},
7527 {"nzCv", B (n, z, C, v)},
7528 {"nzCV", B (n, z, C, V)},
7529 {"nZcv", B (n, Z, c, v)},
7530 {"nZcV", B (n, Z, c, V)},
7531 {"nZCv", B (n, Z, C, v)},
7532 {"nZCV", B (n, Z, C, V)},
7533 {"Nzcv", B (N, z, c, v)},
7534 {"NzcV", B (N, z, c, V)},
7535 {"NzCv", B (N, z, C, v)},
7536 {"NzCV", B (N, z, C, V)},
7537 {"NZcv", B (N, Z, c, v)},
7538 {"NZcV", B (N, Z, c, V)},
7539 {"NZCv", B (N, Z, C, v)},
7540 {"NZCV", B (N, Z, C, V)}
7541 };
7542
7543 #undef N
7544 #undef n
7545 #undef Z
7546 #undef z
7547 #undef C
7548 #undef c
7549 #undef V
7550 #undef v
7551 #undef B
7552 \f
7553 /* MD interface: bits in the object file. */
7554
7555 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7556 for use in the a.out file, and stores them in the array pointed to by buf.
7557 This knows about the endian-ness of the target machine and does
7558 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
7559 2 (short) and 4 (long) Floating numbers are put out as a series of
7560 LITTLENUMS (shorts, here at least). */
7561
7562 void
7563 md_number_to_chars (char *buf, valueT val, int n)
7564 {
7565 if (target_big_endian)
7566 number_to_chars_bigendian (buf, val, n);
7567 else
7568 number_to_chars_littleendian (buf, val, n);
7569 }
7570
7571 /* MD interface: Sections. */
7572
7573 /* Estimate the size of a frag before relaxing. Assume everything fits in
7574 4 bytes. */
7575
7576 int
7577 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7578 {
7579 fragp->fr_var = 4;
7580 return 4;
7581 }
7582
7583 /* Round up a section size to the appropriate boundary. */
7584
7585 valueT
7586 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7587 {
7588 return size;
7589 }
7590
7591 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
7592 of an rs_align_code fragment.
7593
7594 Here we fill the frag with the appropriate info for padding the
7595 output stream. The resulting frag will consist of a fixed (fr_fix)
7596 and of a repeating (fr_var) part.
7597
7598 The fixed content is always emitted before the repeating content and
7599 these two parts are used as follows in constructing the output:
7600 - the fixed part will be used to align to a valid instruction word
7601 boundary, in case that we start at a misaligned address; as no
7602 executable instruction can live at the misaligned location, we
7603 simply fill with zeros;
7604 - the variable part will be used to cover the remaining padding and
7605 we fill using the AArch64 NOP instruction.
7606
7607 Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7608 enough storage space for up to 3 bytes for padding the back to a valid
7609 instruction alignment and exactly 4 bytes to store the NOP pattern. */
7610
7611 void
7612 aarch64_handle_align (fragS * fragP)
7613 {
7614 /* NOP = d503201f */
7615 /* AArch64 instructions are always little-endian. */
7616 static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7617
7618 int bytes, fix, noop_size;
7619 char *p;
7620
7621 if (fragP->fr_type != rs_align_code)
7622 return;
7623
7624 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7625 p = fragP->fr_literal + fragP->fr_fix;
7626
7627 #ifdef OBJ_ELF
7628 gas_assert (fragP->tc_frag_data.recorded);
7629 #endif
7630
7631 noop_size = sizeof (aarch64_noop);
7632
7633 fix = bytes & (noop_size - 1);
7634 if (fix)
7635 {
7636 #ifdef OBJ_ELF
7637 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7638 #endif
7639 memset (p, 0, fix);
7640 p += fix;
7641 fragP->fr_fix += fix;
7642 }
7643
7644 if (noop_size)
7645 memcpy (p, aarch64_noop, noop_size);
7646 fragP->fr_var = noop_size;
7647 }
7648
7649 /* Perform target specific initialisation of a frag.
7650 Note - despite the name this initialisation is not done when the frag
7651 is created, but only when its type is assigned. A frag can be created
7652 and used a long time before its type is set, so beware of assuming that
7653 this initialisation is performed first. */
7654
7655 #ifndef OBJ_ELF
7656 void
7657 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7658 int max_chars ATTRIBUTE_UNUSED)
7659 {
7660 }
7661
7662 #else /* OBJ_ELF is defined. */
7663 void
7664 aarch64_init_frag (fragS * fragP, int max_chars)
7665 {
7666 /* Record a mapping symbol for alignment frags. We will delete this
7667 later if the alignment ends up empty. */
7668 if (!fragP->tc_frag_data.recorded)
7669 fragP->tc_frag_data.recorded = 1;
7670
7671 /* PR 21809: Do not set a mapping state for debug sections
7672 - it just confuses other tools. */
7673 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
7674 return;
7675
7676 switch (fragP->fr_type)
7677 {
7678 case rs_align_test:
7679 case rs_fill:
7680 mapping_state_2 (MAP_DATA, max_chars);
7681 break;
7682 case rs_align:
7683 /* PR 20364: We can get alignment frags in code sections,
7684 so do not just assume that we should use the MAP_DATA state. */
7685 mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7686 break;
7687 case rs_align_code:
7688 mapping_state_2 (MAP_INSN, max_chars);
7689 break;
7690 default:
7691 break;
7692 }
7693 }
7694 \f
7695 /* Initialize the DWARF-2 unwind information for this procedure. */
7696
7697 void
7698 tc_aarch64_frame_initial_instructions (void)
7699 {
7700 cfi_add_CFA_def_cfa (REG_SP, 0);
7701 }
7702 #endif /* OBJ_ELF */
7703
7704 /* Convert REGNAME to a DWARF-2 register number. */
7705
7706 int
7707 tc_aarch64_regname_to_dw2regnum (char *regname)
7708 {
7709 const reg_entry *reg = parse_reg (&regname);
7710 if (reg == NULL)
7711 return -1;
7712
7713 switch (reg->type)
7714 {
7715 case REG_TYPE_SP_32:
7716 case REG_TYPE_SP_64:
7717 case REG_TYPE_R_32:
7718 case REG_TYPE_R_64:
7719 return reg->number;
7720
7721 case REG_TYPE_FP_B:
7722 case REG_TYPE_FP_H:
7723 case REG_TYPE_FP_S:
7724 case REG_TYPE_FP_D:
7725 case REG_TYPE_FP_Q:
7726 return reg->number + 64;
7727
7728 default:
7729 break;
7730 }
7731 return -1;
7732 }
7733
7734 /* Implement DWARF2_ADDR_SIZE. */
7735
7736 int
7737 aarch64_dwarf2_addr_size (void)
7738 {
7739 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7740 if (ilp32_p)
7741 return 4;
7742 #endif
7743 return bfd_arch_bits_per_address (stdoutput) / 8;
7744 }
7745
7746 /* MD interface: Symbol and relocation handling. */
7747
7748 /* Return the address within the segment that a PC-relative fixup is
7749 relative to. For AArch64 PC-relative fixups applied to instructions
7750 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
7751
7752 long
7753 md_pcrel_from_section (fixS * fixP, segT seg)
7754 {
7755 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7756
7757 /* If this is pc-relative and we are going to emit a relocation
7758 then we just want to put out any pipeline compensation that the linker
7759 will need. Otherwise we want to use the calculated base. */
7760 if (fixP->fx_pcrel
7761 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7762 || aarch64_force_relocation (fixP)))
7763 base = 0;
7764
7765 /* AArch64 should be consistent for all pc-relative relocations. */
7766 return base + AARCH64_PCREL_OFFSET;
7767 }
7768
7769 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7770 Otherwise we have no need to default values of symbols. */
7771
7772 symbolS *
7773 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7774 {
7775 #ifdef OBJ_ELF
7776 if (name[0] == '_' && name[1] == 'G'
7777 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7778 {
7779 if (!GOT_symbol)
7780 {
7781 if (symbol_find (name))
7782 as_bad (_("GOT already in the symbol table"));
7783
7784 GOT_symbol = symbol_new (name, undefined_section,
7785 &zero_address_frag, 0);
7786 }
7787
7788 return GOT_symbol;
7789 }
7790 #endif
7791
7792 return 0;
7793 }
7794
7795 /* Return non-zero if the indicated VALUE has overflowed the maximum
7796 range expressible by a unsigned number with the indicated number of
7797 BITS. */
7798
7799 static bool
7800 unsigned_overflow (valueT value, unsigned bits)
7801 {
7802 valueT lim;
7803 if (bits >= sizeof (valueT) * 8)
7804 return false;
7805 lim = (valueT) 1 << bits;
7806 return (value >= lim);
7807 }
7808
7809
7810 /* Return non-zero if the indicated VALUE has overflowed the maximum
7811 range expressible by an signed number with the indicated number of
7812 BITS. */
7813
7814 static bool
7815 signed_overflow (offsetT value, unsigned bits)
7816 {
7817 offsetT lim;
7818 if (bits >= sizeof (offsetT) * 8)
7819 return false;
7820 lim = (offsetT) 1 << (bits - 1);
7821 return (value < -lim || value >= lim);
7822 }
7823
7824 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7825 unsigned immediate offset load/store instruction, try to encode it as
7826 an unscaled, 9-bit, signed immediate offset load/store instruction.
7827 Return TRUE if it is successful; otherwise return FALSE.
7828
7829 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7830 in response to the standard LDR/STR mnemonics when the immediate offset is
7831 unambiguous, i.e. when it is negative or unaligned. */
7832
7833 static bool
7834 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7835 {
7836 int idx;
7837 enum aarch64_op new_op;
7838 const aarch64_opcode *new_opcode;
7839
7840 gas_assert (instr->opcode->iclass == ldst_pos);
7841
7842 switch (instr->opcode->op)
7843 {
7844 case OP_LDRB_POS:new_op = OP_LDURB; break;
7845 case OP_STRB_POS: new_op = OP_STURB; break;
7846 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7847 case OP_LDRH_POS: new_op = OP_LDURH; break;
7848 case OP_STRH_POS: new_op = OP_STURH; break;
7849 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7850 case OP_LDR_POS: new_op = OP_LDUR; break;
7851 case OP_STR_POS: new_op = OP_STUR; break;
7852 case OP_LDRF_POS: new_op = OP_LDURV; break;
7853 case OP_STRF_POS: new_op = OP_STURV; break;
7854 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7855 case OP_PRFM_POS: new_op = OP_PRFUM; break;
7856 default: new_op = OP_NIL; break;
7857 }
7858
7859 if (new_op == OP_NIL)
7860 return false;
7861
7862 new_opcode = aarch64_get_opcode (new_op);
7863 gas_assert (new_opcode != NULL);
7864
7865 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7866 instr->opcode->op, new_opcode->op);
7867
7868 aarch64_replace_opcode (instr, new_opcode);
7869
7870 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7871 qualifier matching may fail because the out-of-date qualifier will
7872 prevent the operand being updated with a new and correct qualifier. */
7873 idx = aarch64_operand_index (instr->opcode->operands,
7874 AARCH64_OPND_ADDR_SIMM9);
7875 gas_assert (idx == 1);
7876 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7877
7878 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7879
7880 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7881 insn_sequence))
7882 return false;
7883
7884 return true;
7885 }
7886
7887 /* Called by fix_insn to fix a MOV immediate alias instruction.
7888
7889 Operand for a generic move immediate instruction, which is an alias
7890 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7891 a 32-bit/64-bit immediate value into general register. An assembler error
7892 shall result if the immediate cannot be created by a single one of these
7893 instructions. If there is a choice, then to ensure reversability an
7894 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
7895
7896 static void
7897 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7898 {
7899 const aarch64_opcode *opcode;
7900
7901 /* Need to check if the destination is SP/ZR. The check has to be done
7902 before any aarch64_replace_opcode. */
7903 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7904 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7905
7906 instr->operands[1].imm.value = value;
7907 instr->operands[1].skip = 0;
7908
7909 if (try_mov_wide_p)
7910 {
7911 /* Try the MOVZ alias. */
7912 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7913 aarch64_replace_opcode (instr, opcode);
7914 if (aarch64_opcode_encode (instr->opcode, instr,
7915 &instr->value, NULL, NULL, insn_sequence))
7916 {
7917 put_aarch64_insn (buf, instr->value);
7918 return;
7919 }
7920 /* Try the MOVK alias. */
7921 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7922 aarch64_replace_opcode (instr, opcode);
7923 if (aarch64_opcode_encode (instr->opcode, instr,
7924 &instr->value, NULL, NULL, insn_sequence))
7925 {
7926 put_aarch64_insn (buf, instr->value);
7927 return;
7928 }
7929 }
7930
7931 if (try_mov_bitmask_p)
7932 {
7933 /* Try the ORR alias. */
7934 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7935 aarch64_replace_opcode (instr, opcode);
7936 if (aarch64_opcode_encode (instr->opcode, instr,
7937 &instr->value, NULL, NULL, insn_sequence))
7938 {
7939 put_aarch64_insn (buf, instr->value);
7940 return;
7941 }
7942 }
7943
7944 as_bad_where (fixP->fx_file, fixP->fx_line,
7945 _("immediate cannot be moved by a single instruction"));
7946 }
7947
7948 /* An instruction operand which is immediate related may have symbol used
7949 in the assembly, e.g.
7950
7951 mov w0, u32
7952 .set u32, 0x00ffff00
7953
7954 At the time when the assembly instruction is parsed, a referenced symbol,
7955 like 'u32' in the above example may not have been seen; a fixS is created
7956 in such a case and is handled here after symbols have been resolved.
7957 Instruction is fixed up with VALUE using the information in *FIXP plus
7958 extra information in FLAGS.
7959
7960 This function is called by md_apply_fix to fix up instructions that need
7961 a fix-up described above but does not involve any linker-time relocation. */
7962
7963 static void
7964 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7965 {
7966 int idx;
7967 uint32_t insn;
7968 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7969 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7970 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7971
7972 if (new_inst)
7973 {
7974 /* Now the instruction is about to be fixed-up, so the operand that
7975 was previously marked as 'ignored' needs to be unmarked in order
7976 to get the encoding done properly. */
7977 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7978 new_inst->operands[idx].skip = 0;
7979 }
7980
7981 gas_assert (opnd != AARCH64_OPND_NIL);
7982
7983 switch (opnd)
7984 {
7985 case AARCH64_OPND_EXCEPTION:
7986 case AARCH64_OPND_UNDEFINED:
7987 if (unsigned_overflow (value, 16))
7988 as_bad_where (fixP->fx_file, fixP->fx_line,
7989 _("immediate out of range"));
7990 insn = get_aarch64_insn (buf);
7991 insn |= (opnd == AARCH64_OPND_EXCEPTION) ? encode_svc_imm (value) : value;
7992 put_aarch64_insn (buf, insn);
7993 break;
7994
7995 case AARCH64_OPND_AIMM:
7996 /* ADD or SUB with immediate.
7997 NOTE this assumes we come here with a add/sub shifted reg encoding
7998 3 322|2222|2 2 2 21111 111111
7999 1 098|7654|3 2 1 09876 543210 98765 43210
8000 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
8001 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
8002 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
8003 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
8004 ->
8005 3 322|2222|2 2 221111111111
8006 1 098|7654|3 2 109876543210 98765 43210
8007 11000000 sf 001|0001|shift imm12 Rn Rd ADD
8008 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
8009 51000000 sf 101|0001|shift imm12 Rn Rd SUB
8010 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
8011 Fields sf Rn Rd are already set. */
8012 insn = get_aarch64_insn (buf);
8013 if (value < 0)
8014 {
8015 /* Add <-> sub. */
8016 insn = reencode_addsub_switch_add_sub (insn);
8017 value = -value;
8018 }
8019
8020 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
8021 && unsigned_overflow (value, 12))
8022 {
8023 /* Try to shift the value by 12 to make it fit. */
8024 if (((value >> 12) << 12) == value
8025 && ! unsigned_overflow (value, 12 + 12))
8026 {
8027 value >>= 12;
8028 insn |= encode_addsub_imm_shift_amount (1);
8029 }
8030 }
8031
8032 if (unsigned_overflow (value, 12))
8033 as_bad_where (fixP->fx_file, fixP->fx_line,
8034 _("immediate out of range"));
8035
8036 insn |= encode_addsub_imm (value);
8037
8038 put_aarch64_insn (buf, insn);
8039 break;
8040
8041 case AARCH64_OPND_SIMD_IMM:
8042 case AARCH64_OPND_SIMD_IMM_SFT:
8043 case AARCH64_OPND_LIMM:
8044 /* Bit mask immediate. */
8045 gas_assert (new_inst != NULL);
8046 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
8047 new_inst->operands[idx].imm.value = value;
8048 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8049 &new_inst->value, NULL, NULL, insn_sequence))
8050 put_aarch64_insn (buf, new_inst->value);
8051 else
8052 as_bad_where (fixP->fx_file, fixP->fx_line,
8053 _("invalid immediate"));
8054 break;
8055
8056 case AARCH64_OPND_HALF:
8057 /* 16-bit unsigned immediate. */
8058 if (unsigned_overflow (value, 16))
8059 as_bad_where (fixP->fx_file, fixP->fx_line,
8060 _("immediate out of range"));
8061 insn = get_aarch64_insn (buf);
8062 insn |= encode_movw_imm (value & 0xffff);
8063 put_aarch64_insn (buf, insn);
8064 break;
8065
8066 case AARCH64_OPND_IMM_MOV:
8067 /* Operand for a generic move immediate instruction, which is
8068 an alias instruction that generates a single MOVZ, MOVN or ORR
8069 instruction to loads a 32-bit/64-bit immediate value into general
8070 register. An assembler error shall result if the immediate cannot be
8071 created by a single one of these instructions. If there is a choice,
8072 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
8073 and MOVZ or MOVN to ORR. */
8074 gas_assert (new_inst != NULL);
8075 fix_mov_imm_insn (fixP, buf, new_inst, value);
8076 break;
8077
8078 case AARCH64_OPND_ADDR_SIMM7:
8079 case AARCH64_OPND_ADDR_SIMM9:
8080 case AARCH64_OPND_ADDR_SIMM9_2:
8081 case AARCH64_OPND_ADDR_SIMM10:
8082 case AARCH64_OPND_ADDR_UIMM12:
8083 case AARCH64_OPND_ADDR_SIMM11:
8084 case AARCH64_OPND_ADDR_SIMM13:
8085 /* Immediate offset in an address. */
8086 insn = get_aarch64_insn (buf);
8087
8088 gas_assert (new_inst != NULL && new_inst->value == insn);
8089 gas_assert (new_inst->opcode->operands[1] == opnd
8090 || new_inst->opcode->operands[2] == opnd);
8091
8092 /* Get the index of the address operand. */
8093 if (new_inst->opcode->operands[1] == opnd)
8094 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
8095 idx = 1;
8096 else
8097 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
8098 idx = 2;
8099
8100 /* Update the resolved offset value. */
8101 new_inst->operands[idx].addr.offset.imm = value;
8102
8103 /* Encode/fix-up. */
8104 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
8105 &new_inst->value, NULL, NULL, insn_sequence))
8106 {
8107 put_aarch64_insn (buf, new_inst->value);
8108 break;
8109 }
8110 else if (new_inst->opcode->iclass == ldst_pos
8111 && try_to_encode_as_unscaled_ldst (new_inst))
8112 {
8113 put_aarch64_insn (buf, new_inst->value);
8114 break;
8115 }
8116
8117 as_bad_where (fixP->fx_file, fixP->fx_line,
8118 _("immediate offset out of range"));
8119 break;
8120
8121 default:
8122 gas_assert (0);
8123 as_fatal (_("unhandled operand code %d"), opnd);
8124 }
8125 }
8126
8127 /* Apply a fixup (fixP) to segment data, once it has been determined
8128 by our caller that we have all the info we need to fix it up.
8129
8130 Parameter valP is the pointer to the value of the bits. */
8131
8132 void
8133 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
8134 {
8135 offsetT value = *valP;
8136 uint32_t insn;
8137 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
8138 int scale;
8139 unsigned flags = fixP->fx_addnumber;
8140
8141 DEBUG_TRACE ("\n\n");
8142 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
8143 DEBUG_TRACE ("Enter md_apply_fix");
8144
8145 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
8146
8147 /* Note whether this will delete the relocation. */
8148
8149 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
8150 fixP->fx_done = 1;
8151
8152 /* Process the relocations. */
8153 switch (fixP->fx_r_type)
8154 {
8155 case BFD_RELOC_NONE:
8156 /* This will need to go in the object file. */
8157 fixP->fx_done = 0;
8158 break;
8159
8160 case BFD_RELOC_8:
8161 case BFD_RELOC_8_PCREL:
8162 if (fixP->fx_done || !seg->use_rela_p)
8163 md_number_to_chars (buf, value, 1);
8164 break;
8165
8166 case BFD_RELOC_16:
8167 case BFD_RELOC_16_PCREL:
8168 if (fixP->fx_done || !seg->use_rela_p)
8169 md_number_to_chars (buf, value, 2);
8170 break;
8171
8172 case BFD_RELOC_32:
8173 case BFD_RELOC_32_PCREL:
8174 if (fixP->fx_done || !seg->use_rela_p)
8175 md_number_to_chars (buf, value, 4);
8176 break;
8177
8178 case BFD_RELOC_64:
8179 case BFD_RELOC_64_PCREL:
8180 if (fixP->fx_done || !seg->use_rela_p)
8181 md_number_to_chars (buf, value, 8);
8182 break;
8183
8184 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8185 /* We claim that these fixups have been processed here, even if
8186 in fact we generate an error because we do not have a reloc
8187 for them, so tc_gen_reloc() will reject them. */
8188 fixP->fx_done = 1;
8189 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
8190 {
8191 as_bad_where (fixP->fx_file, fixP->fx_line,
8192 _("undefined symbol %s used as an immediate value"),
8193 S_GET_NAME (fixP->fx_addsy));
8194 goto apply_fix_return;
8195 }
8196 fix_insn (fixP, flags, value);
8197 break;
8198
8199 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
8200 if (fixP->fx_done || !seg->use_rela_p)
8201 {
8202 if (value & 3)
8203 as_bad_where (fixP->fx_file, fixP->fx_line,
8204 _("pc-relative load offset not word aligned"));
8205 if (signed_overflow (value, 21))
8206 as_bad_where (fixP->fx_file, fixP->fx_line,
8207 _("pc-relative load offset out of range"));
8208 insn = get_aarch64_insn (buf);
8209 insn |= encode_ld_lit_ofs_19 (value >> 2);
8210 put_aarch64_insn (buf, insn);
8211 }
8212 break;
8213
8214 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
8215 if (fixP->fx_done || !seg->use_rela_p)
8216 {
8217 if (signed_overflow (value, 21))
8218 as_bad_where (fixP->fx_file, fixP->fx_line,
8219 _("pc-relative address offset out of range"));
8220 insn = get_aarch64_insn (buf);
8221 insn |= encode_adr_imm (value);
8222 put_aarch64_insn (buf, insn);
8223 }
8224 break;
8225
8226 case BFD_RELOC_AARCH64_BRANCH19:
8227 if (fixP->fx_done || !seg->use_rela_p)
8228 {
8229 if (value & 3)
8230 as_bad_where (fixP->fx_file, fixP->fx_line,
8231 _("conditional branch target not word aligned"));
8232 if (signed_overflow (value, 21))
8233 as_bad_where (fixP->fx_file, fixP->fx_line,
8234 _("conditional branch out of range"));
8235 insn = get_aarch64_insn (buf);
8236 insn |= encode_cond_branch_ofs_19 (value >> 2);
8237 put_aarch64_insn (buf, insn);
8238 }
8239 break;
8240
8241 case BFD_RELOC_AARCH64_TSTBR14:
8242 if (fixP->fx_done || !seg->use_rela_p)
8243 {
8244 if (value & 3)
8245 as_bad_where (fixP->fx_file, fixP->fx_line,
8246 _("conditional branch target not word aligned"));
8247 if (signed_overflow (value, 16))
8248 as_bad_where (fixP->fx_file, fixP->fx_line,
8249 _("conditional branch out of range"));
8250 insn = get_aarch64_insn (buf);
8251 insn |= encode_tst_branch_ofs_14 (value >> 2);
8252 put_aarch64_insn (buf, insn);
8253 }
8254 break;
8255
8256 case BFD_RELOC_AARCH64_CALL26:
8257 case BFD_RELOC_AARCH64_JUMP26:
8258 if (fixP->fx_done || !seg->use_rela_p)
8259 {
8260 if (value & 3)
8261 as_bad_where (fixP->fx_file, fixP->fx_line,
8262 _("branch target not word aligned"));
8263 if (signed_overflow (value, 28))
8264 as_bad_where (fixP->fx_file, fixP->fx_line,
8265 _("branch out of range"));
8266 insn = get_aarch64_insn (buf);
8267 insn |= encode_branch_ofs_26 (value >> 2);
8268 put_aarch64_insn (buf, insn);
8269 }
8270 break;
8271
8272 case BFD_RELOC_AARCH64_MOVW_G0:
8273 case BFD_RELOC_AARCH64_MOVW_G0_NC:
8274 case BFD_RELOC_AARCH64_MOVW_G0_S:
8275 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8276 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8277 case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8278 scale = 0;
8279 goto movw_common;
8280 case BFD_RELOC_AARCH64_MOVW_G1:
8281 case BFD_RELOC_AARCH64_MOVW_G1_NC:
8282 case BFD_RELOC_AARCH64_MOVW_G1_S:
8283 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8284 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8285 case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8286 scale = 16;
8287 goto movw_common;
8288 case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8289 scale = 0;
8290 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8291 /* Should always be exported to object file, see
8292 aarch64_force_relocation(). */
8293 gas_assert (!fixP->fx_done);
8294 gas_assert (seg->use_rela_p);
8295 goto movw_common;
8296 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8297 scale = 16;
8298 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8299 /* Should always be exported to object file, see
8300 aarch64_force_relocation(). */
8301 gas_assert (!fixP->fx_done);
8302 gas_assert (seg->use_rela_p);
8303 goto movw_common;
8304 case BFD_RELOC_AARCH64_MOVW_G2:
8305 case BFD_RELOC_AARCH64_MOVW_G2_NC:
8306 case BFD_RELOC_AARCH64_MOVW_G2_S:
8307 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8308 case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8309 scale = 32;
8310 goto movw_common;
8311 case BFD_RELOC_AARCH64_MOVW_G3:
8312 case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8313 scale = 48;
8314 movw_common:
8315 if (fixP->fx_done || !seg->use_rela_p)
8316 {
8317 insn = get_aarch64_insn (buf);
8318
8319 if (!fixP->fx_done)
8320 {
8321 /* REL signed addend must fit in 16 bits */
8322 if (signed_overflow (value, 16))
8323 as_bad_where (fixP->fx_file, fixP->fx_line,
8324 _("offset out of range"));
8325 }
8326 else
8327 {
8328 /* Check for overflow and scale. */
8329 switch (fixP->fx_r_type)
8330 {
8331 case BFD_RELOC_AARCH64_MOVW_G0:
8332 case BFD_RELOC_AARCH64_MOVW_G1:
8333 case BFD_RELOC_AARCH64_MOVW_G2:
8334 case BFD_RELOC_AARCH64_MOVW_G3:
8335 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8336 case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8337 if (unsigned_overflow (value, scale + 16))
8338 as_bad_where (fixP->fx_file, fixP->fx_line,
8339 _("unsigned value out of range"));
8340 break;
8341 case BFD_RELOC_AARCH64_MOVW_G0_S:
8342 case BFD_RELOC_AARCH64_MOVW_G1_S:
8343 case BFD_RELOC_AARCH64_MOVW_G2_S:
8344 case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8345 case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8346 case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8347 /* NOTE: We can only come here with movz or movn. */
8348 if (signed_overflow (value, scale + 16))
8349 as_bad_where (fixP->fx_file, fixP->fx_line,
8350 _("signed value out of range"));
8351 if (value < 0)
8352 {
8353 /* Force use of MOVN. */
8354 value = ~value;
8355 insn = reencode_movzn_to_movn (insn);
8356 }
8357 else
8358 {
8359 /* Force use of MOVZ. */
8360 insn = reencode_movzn_to_movz (insn);
8361 }
8362 break;
8363 default:
8364 /* Unchecked relocations. */
8365 break;
8366 }
8367 value >>= scale;
8368 }
8369
8370 /* Insert value into MOVN/MOVZ/MOVK instruction. */
8371 insn |= encode_movw_imm (value & 0xffff);
8372
8373 put_aarch64_insn (buf, insn);
8374 }
8375 break;
8376
8377 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8378 fixP->fx_r_type = (ilp32_p
8379 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8380 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8381 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8382 /* Should always be exported to object file, see
8383 aarch64_force_relocation(). */
8384 gas_assert (!fixP->fx_done);
8385 gas_assert (seg->use_rela_p);
8386 break;
8387
8388 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8389 fixP->fx_r_type = (ilp32_p
8390 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8391 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8392 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8393 /* Should always be exported to object file, see
8394 aarch64_force_relocation(). */
8395 gas_assert (!fixP->fx_done);
8396 gas_assert (seg->use_rela_p);
8397 break;
8398
8399 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8400 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8401 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8402 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8403 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8404 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8405 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8406 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8407 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8408 case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8409 case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8410 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8411 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8412 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8413 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8414 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8415 case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8416 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8417 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8418 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8419 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8420 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8421 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8422 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8423 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8424 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8425 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8426 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8427 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8428 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8429 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8430 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8431 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8432 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8433 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8434 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8435 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8436 case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8437 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8438 case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8439 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8440 case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8441 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8442 case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8443 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8444 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8445 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8446 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8447 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8448 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8449 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8450 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8451 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8452 /* Should always be exported to object file, see
8453 aarch64_force_relocation(). */
8454 gas_assert (!fixP->fx_done);
8455 gas_assert (seg->use_rela_p);
8456 break;
8457
8458 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8459 /* Should always be exported to object file, see
8460 aarch64_force_relocation(). */
8461 fixP->fx_r_type = (ilp32_p
8462 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8463 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8464 gas_assert (!fixP->fx_done);
8465 gas_assert (seg->use_rela_p);
8466 break;
8467
8468 case BFD_RELOC_AARCH64_ADD_LO12:
8469 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8470 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8471 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8472 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8473 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8474 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8475 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8476 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8477 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8478 case BFD_RELOC_AARCH64_LDST128_LO12:
8479 case BFD_RELOC_AARCH64_LDST16_LO12:
8480 case BFD_RELOC_AARCH64_LDST32_LO12:
8481 case BFD_RELOC_AARCH64_LDST64_LO12:
8482 case BFD_RELOC_AARCH64_LDST8_LO12:
8483 /* Should always be exported to object file, see
8484 aarch64_force_relocation(). */
8485 gas_assert (!fixP->fx_done);
8486 gas_assert (seg->use_rela_p);
8487 break;
8488
8489 case BFD_RELOC_AARCH64_TLSDESC_ADD:
8490 case BFD_RELOC_AARCH64_TLSDESC_CALL:
8491 case BFD_RELOC_AARCH64_TLSDESC_LDR:
8492 break;
8493
8494 case BFD_RELOC_UNUSED:
8495 /* An error will already have been reported. */
8496 break;
8497
8498 default:
8499 as_bad_where (fixP->fx_file, fixP->fx_line,
8500 _("unexpected %s fixup"),
8501 bfd_get_reloc_code_name (fixP->fx_r_type));
8502 break;
8503 }
8504
8505 apply_fix_return:
8506 /* Free the allocated the struct aarch64_inst.
8507 N.B. currently there are very limited number of fix-up types actually use
8508 this field, so the impact on the performance should be minimal . */
8509 free (fixP->tc_fix_data.inst);
8510
8511 return;
8512 }
8513
8514 /* Translate internal representation of relocation info to BFD target
8515 format. */
8516
8517 arelent *
8518 tc_gen_reloc (asection * section, fixS * fixp)
8519 {
8520 arelent *reloc;
8521 bfd_reloc_code_real_type code;
8522
8523 reloc = XNEW (arelent);
8524
8525 reloc->sym_ptr_ptr = XNEW (asymbol *);
8526 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8527 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8528
8529 if (fixp->fx_pcrel)
8530 {
8531 if (section->use_rela_p)
8532 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8533 else
8534 fixp->fx_offset = reloc->address;
8535 }
8536 reloc->addend = fixp->fx_offset;
8537
8538 code = fixp->fx_r_type;
8539 switch (code)
8540 {
8541 case BFD_RELOC_16:
8542 if (fixp->fx_pcrel)
8543 code = BFD_RELOC_16_PCREL;
8544 break;
8545
8546 case BFD_RELOC_32:
8547 if (fixp->fx_pcrel)
8548 code = BFD_RELOC_32_PCREL;
8549 break;
8550
8551 case BFD_RELOC_64:
8552 if (fixp->fx_pcrel)
8553 code = BFD_RELOC_64_PCREL;
8554 break;
8555
8556 default:
8557 break;
8558 }
8559
8560 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8561 if (reloc->howto == NULL)
8562 {
8563 as_bad_where (fixp->fx_file, fixp->fx_line,
8564 _
8565 ("cannot represent %s relocation in this object file format"),
8566 bfd_get_reloc_code_name (code));
8567 return NULL;
8568 }
8569
8570 return reloc;
8571 }
8572
8573 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
8574
8575 void
8576 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8577 {
8578 bfd_reloc_code_real_type type;
8579 int pcrel = 0;
8580
8581 /* Pick a reloc.
8582 FIXME: @@ Should look at CPU word size. */
8583 switch (size)
8584 {
8585 case 1:
8586 type = BFD_RELOC_8;
8587 break;
8588 case 2:
8589 type = BFD_RELOC_16;
8590 break;
8591 case 4:
8592 type = BFD_RELOC_32;
8593 break;
8594 case 8:
8595 type = BFD_RELOC_64;
8596 break;
8597 default:
8598 as_bad (_("cannot do %u-byte relocation"), size);
8599 type = BFD_RELOC_UNUSED;
8600 break;
8601 }
8602
8603 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8604 }
8605
8606 #ifdef OBJ_ELF
8607
8608 /* Implement md_after_parse_args. This is the earliest time we need to decide
8609 ABI. If no -mabi specified, the ABI will be decided by target triplet. */
8610
8611 void
8612 aarch64_after_parse_args (void)
8613 {
8614 if (aarch64_abi != AARCH64_ABI_NONE)
8615 return;
8616
8617 /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32. */
8618 if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8619 aarch64_abi = AARCH64_ABI_ILP32;
8620 else
8621 aarch64_abi = AARCH64_ABI_LP64;
8622 }
8623
8624 const char *
8625 elf64_aarch64_target_format (void)
8626 {
8627 #ifdef TE_CLOUDABI
8628 /* FIXME: What to do for ilp32_p ? */
8629 if (target_big_endian)
8630 return "elf64-bigaarch64-cloudabi";
8631 else
8632 return "elf64-littleaarch64-cloudabi";
8633 #else
8634 if (target_big_endian)
8635 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8636 else
8637 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8638 #endif
8639 }
8640
8641 void
8642 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8643 {
8644 elf_frob_symbol (symp, puntp);
8645 }
8646 #endif
8647
8648 /* MD interface: Finalization. */
8649
8650 /* A good place to do this, although this was probably not intended
8651 for this kind of use. We need to dump the literal pool before
8652 references are made to a null symbol pointer. */
8653
8654 void
8655 aarch64_cleanup (void)
8656 {
8657 literal_pool *pool;
8658
8659 for (pool = list_of_pools; pool; pool = pool->next)
8660 {
8661 /* Put it at the end of the relevant section. */
8662 subseg_set (pool->section, pool->sub_section);
8663 s_ltorg (0);
8664 }
8665 }
8666
8667 #ifdef OBJ_ELF
8668 /* Remove any excess mapping symbols generated for alignment frags in
8669 SEC. We may have created a mapping symbol before a zero byte
8670 alignment; remove it if there's a mapping symbol after the
8671 alignment. */
8672 static void
8673 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8674 void *dummy ATTRIBUTE_UNUSED)
8675 {
8676 segment_info_type *seginfo = seg_info (sec);
8677 fragS *fragp;
8678
8679 if (seginfo == NULL || seginfo->frchainP == NULL)
8680 return;
8681
8682 for (fragp = seginfo->frchainP->frch_root;
8683 fragp != NULL; fragp = fragp->fr_next)
8684 {
8685 symbolS *sym = fragp->tc_frag_data.last_map;
8686 fragS *next = fragp->fr_next;
8687
8688 /* Variable-sized frags have been converted to fixed size by
8689 this point. But if this was variable-sized to start with,
8690 there will be a fixed-size frag after it. So don't handle
8691 next == NULL. */
8692 if (sym == NULL || next == NULL)
8693 continue;
8694
8695 if (S_GET_VALUE (sym) < next->fr_address)
8696 /* Not at the end of this frag. */
8697 continue;
8698 know (S_GET_VALUE (sym) == next->fr_address);
8699
8700 do
8701 {
8702 if (next->tc_frag_data.first_map != NULL)
8703 {
8704 /* Next frag starts with a mapping symbol. Discard this
8705 one. */
8706 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8707 break;
8708 }
8709
8710 if (next->fr_next == NULL)
8711 {
8712 /* This mapping symbol is at the end of the section. Discard
8713 it. */
8714 know (next->fr_fix == 0 && next->fr_var == 0);
8715 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8716 break;
8717 }
8718
8719 /* As long as we have empty frags without any mapping symbols,
8720 keep looking. */
8721 /* If the next frag is non-empty and does not start with a
8722 mapping symbol, then this mapping symbol is required. */
8723 if (next->fr_address != next->fr_next->fr_address)
8724 break;
8725
8726 next = next->fr_next;
8727 }
8728 while (next != NULL);
8729 }
8730 }
8731 #endif
8732
8733 /* Adjust the symbol table. */
8734
8735 void
8736 aarch64_adjust_symtab (void)
8737 {
8738 #ifdef OBJ_ELF
8739 /* Remove any overlapping mapping symbols generated by alignment frags. */
8740 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8741 /* Now do generic ELF adjustments. */
8742 elf_adjust_symtab ();
8743 #endif
8744 }
8745
8746 static void
8747 checked_hash_insert (htab_t table, const char *key, void *value)
8748 {
8749 str_hash_insert (table, key, value, 0);
8750 }
8751
8752 static void
8753 sysreg_hash_insert (htab_t table, const char *key, void *value)
8754 {
8755 gas_assert (strlen (key) < AARCH64_MAX_SYSREG_NAME_LEN);
8756 checked_hash_insert (table, key, value);
8757 }
8758
8759 static void
8760 fill_instruction_hash_table (void)
8761 {
8762 aarch64_opcode *opcode = aarch64_opcode_table;
8763
8764 while (opcode->name != NULL)
8765 {
8766 templates *templ, *new_templ;
8767 templ = str_hash_find (aarch64_ops_hsh, opcode->name);
8768
8769 new_templ = XNEW (templates);
8770 new_templ->opcode = opcode;
8771 new_templ->next = NULL;
8772
8773 if (!templ)
8774 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8775 else
8776 {
8777 new_templ->next = templ->next;
8778 templ->next = new_templ;
8779 }
8780 ++opcode;
8781 }
8782 }
8783
8784 static inline void
8785 convert_to_upper (char *dst, const char *src, size_t num)
8786 {
8787 unsigned int i;
8788 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8789 *dst = TOUPPER (*src);
8790 *dst = '\0';
8791 }
8792
8793 /* Assume STR point to a lower-case string, allocate, convert and return
8794 the corresponding upper-case string. */
8795 static inline const char*
8796 get_upper_str (const char *str)
8797 {
8798 char *ret;
8799 size_t len = strlen (str);
8800 ret = XNEWVEC (char, len + 1);
8801 convert_to_upper (ret, str, len);
8802 return ret;
8803 }
8804
8805 /* MD interface: Initialization. */
8806
8807 void
8808 md_begin (void)
8809 {
8810 unsigned mach;
8811 unsigned int i;
8812
8813 aarch64_ops_hsh = str_htab_create ();
8814 aarch64_cond_hsh = str_htab_create ();
8815 aarch64_shift_hsh = str_htab_create ();
8816 aarch64_sys_regs_hsh = str_htab_create ();
8817 aarch64_pstatefield_hsh = str_htab_create ();
8818 aarch64_sys_regs_ic_hsh = str_htab_create ();
8819 aarch64_sys_regs_dc_hsh = str_htab_create ();
8820 aarch64_sys_regs_at_hsh = str_htab_create ();
8821 aarch64_sys_regs_tlbi_hsh = str_htab_create ();
8822 aarch64_sys_regs_sr_hsh = str_htab_create ();
8823 aarch64_reg_hsh = str_htab_create ();
8824 aarch64_barrier_opt_hsh = str_htab_create ();
8825 aarch64_nzcv_hsh = str_htab_create ();
8826 aarch64_pldop_hsh = str_htab_create ();
8827 aarch64_hint_opt_hsh = str_htab_create ();
8828
8829 fill_instruction_hash_table ();
8830
8831 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8832 sysreg_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8833 (void *) (aarch64_sys_regs + i));
8834
8835 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8836 sysreg_hash_insert (aarch64_pstatefield_hsh,
8837 aarch64_pstatefields[i].name,
8838 (void *) (aarch64_pstatefields + i));
8839
8840 for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8841 sysreg_hash_insert (aarch64_sys_regs_ic_hsh,
8842 aarch64_sys_regs_ic[i].name,
8843 (void *) (aarch64_sys_regs_ic + i));
8844
8845 for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8846 sysreg_hash_insert (aarch64_sys_regs_dc_hsh,
8847 aarch64_sys_regs_dc[i].name,
8848 (void *) (aarch64_sys_regs_dc + i));
8849
8850 for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8851 sysreg_hash_insert (aarch64_sys_regs_at_hsh,
8852 aarch64_sys_regs_at[i].name,
8853 (void *) (aarch64_sys_regs_at + i));
8854
8855 for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8856 sysreg_hash_insert (aarch64_sys_regs_tlbi_hsh,
8857 aarch64_sys_regs_tlbi[i].name,
8858 (void *) (aarch64_sys_regs_tlbi + i));
8859
8860 for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8861 sysreg_hash_insert (aarch64_sys_regs_sr_hsh,
8862 aarch64_sys_regs_sr[i].name,
8863 (void *) (aarch64_sys_regs_sr + i));
8864
8865 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8866 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8867 (void *) (reg_names + i));
8868
8869 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8870 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8871 (void *) (nzcv_names + i));
8872
8873 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8874 {
8875 const char *name = aarch64_operand_modifiers[i].name;
8876 checked_hash_insert (aarch64_shift_hsh, name,
8877 (void *) (aarch64_operand_modifiers + i));
8878 /* Also hash the name in the upper case. */
8879 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8880 (void *) (aarch64_operand_modifiers + i));
8881 }
8882
8883 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8884 {
8885 unsigned int j;
8886 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8887 the same condition code. */
8888 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8889 {
8890 const char *name = aarch64_conds[i].names[j];
8891 if (name == NULL)
8892 break;
8893 checked_hash_insert (aarch64_cond_hsh, name,
8894 (void *) (aarch64_conds + i));
8895 /* Also hash the name in the upper case. */
8896 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8897 (void *) (aarch64_conds + i));
8898 }
8899 }
8900
8901 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8902 {
8903 const char *name = aarch64_barrier_options[i].name;
8904 /* Skip xx00 - the unallocated values of option. */
8905 if ((i & 0x3) == 0)
8906 continue;
8907 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8908 (void *) (aarch64_barrier_options + i));
8909 /* Also hash the name in the upper case. */
8910 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8911 (void *) (aarch64_barrier_options + i));
8912 }
8913
8914 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_dsb_nxs_options); i++)
8915 {
8916 const char *name = aarch64_barrier_dsb_nxs_options[i].name;
8917 checked_hash_insert (aarch64_barrier_opt_hsh, name,
8918 (void *) (aarch64_barrier_dsb_nxs_options + i));
8919 /* Also hash the name in the upper case. */
8920 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8921 (void *) (aarch64_barrier_dsb_nxs_options + i));
8922 }
8923
8924 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8925 {
8926 const char* name = aarch64_prfops[i].name;
8927 /* Skip the unallocated hint encodings. */
8928 if (name == NULL)
8929 continue;
8930 checked_hash_insert (aarch64_pldop_hsh, name,
8931 (void *) (aarch64_prfops + i));
8932 /* Also hash the name in the upper case. */
8933 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8934 (void *) (aarch64_prfops + i));
8935 }
8936
8937 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8938 {
8939 const char* name = aarch64_hint_options[i].name;
8940 const char* upper_name = get_upper_str(name);
8941
8942 checked_hash_insert (aarch64_hint_opt_hsh, name,
8943 (void *) (aarch64_hint_options + i));
8944
8945 /* Also hash the name in the upper case if not the same. */
8946 if (strcmp (name, upper_name) != 0)
8947 checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
8948 (void *) (aarch64_hint_options + i));
8949 }
8950
8951 /* Set the cpu variant based on the command-line options. */
8952 if (!mcpu_cpu_opt)
8953 mcpu_cpu_opt = march_cpu_opt;
8954
8955 if (!mcpu_cpu_opt)
8956 mcpu_cpu_opt = &cpu_default;
8957
8958 cpu_variant = *mcpu_cpu_opt;
8959
8960 /* Record the CPU type. */
8961 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8962
8963 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8964 }
8965
8966 /* Command line processing. */
8967
8968 const char *md_shortopts = "m:";
8969
8970 #ifdef AARCH64_BI_ENDIAN
8971 #define OPTION_EB (OPTION_MD_BASE + 0)
8972 #define OPTION_EL (OPTION_MD_BASE + 1)
8973 #else
8974 #if TARGET_BYTES_BIG_ENDIAN
8975 #define OPTION_EB (OPTION_MD_BASE + 0)
8976 #else
8977 #define OPTION_EL (OPTION_MD_BASE + 1)
8978 #endif
8979 #endif
8980
8981 struct option md_longopts[] = {
8982 #ifdef OPTION_EB
8983 {"EB", no_argument, NULL, OPTION_EB},
8984 #endif
8985 #ifdef OPTION_EL
8986 {"EL", no_argument, NULL, OPTION_EL},
8987 #endif
8988 {NULL, no_argument, NULL, 0}
8989 };
8990
8991 size_t md_longopts_size = sizeof (md_longopts);
8992
8993 struct aarch64_option_table
8994 {
8995 const char *option; /* Option name to match. */
8996 const char *help; /* Help information. */
8997 int *var; /* Variable to change. */
8998 int value; /* What to change it to. */
8999 char *deprecated; /* If non-null, print this message. */
9000 };
9001
9002 static struct aarch64_option_table aarch64_opts[] = {
9003 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
9004 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
9005 NULL},
9006 #ifdef DEBUG_AARCH64
9007 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
9008 #endif /* DEBUG_AARCH64 */
9009 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
9010 NULL},
9011 {"mno-verbose-error", N_("do not output verbose error messages"),
9012 &verbose_error_p, 0, NULL},
9013 {NULL, NULL, NULL, 0, NULL}
9014 };
9015
9016 struct aarch64_cpu_option_table
9017 {
9018 const char *name;
9019 const aarch64_feature_set value;
9020 /* The canonical name of the CPU, or NULL to use NAME converted to upper
9021 case. */
9022 const char *canonical_name;
9023 };
9024
9025 /* This list should, at a minimum, contain all the cpu names
9026 recognized by GCC. */
9027 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
9028 {"all", AARCH64_ANY, NULL},
9029 {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
9030 AARCH64_FEATURE_CRC), "Cortex-A34"},
9031 {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
9032 AARCH64_FEATURE_CRC), "Cortex-A35"},
9033 {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
9034 AARCH64_FEATURE_CRC), "Cortex-A53"},
9035 {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
9036 AARCH64_FEATURE_CRC), "Cortex-A57"},
9037 {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
9038 AARCH64_FEATURE_CRC), "Cortex-A72"},
9039 {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
9040 AARCH64_FEATURE_CRC), "Cortex-A73"},
9041 {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9042 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9043 "Cortex-A55"},
9044 {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9045 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9046 "Cortex-A75"},
9047 {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9048 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
9049 "Cortex-A76"},
9050 {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9051 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9052 | AARCH64_FEATURE_DOTPROD
9053 | AARCH64_FEATURE_SSBS),
9054 "Cortex-A76AE"},
9055 {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9056 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9057 | AARCH64_FEATURE_DOTPROD
9058 | AARCH64_FEATURE_SSBS),
9059 "Cortex-A77"},
9060 {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9061 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9062 | AARCH64_FEATURE_DOTPROD
9063 | AARCH64_FEATURE_SSBS),
9064 "Cortex-A65"},
9065 {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9066 AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
9067 | AARCH64_FEATURE_DOTPROD
9068 | AARCH64_FEATURE_SSBS),
9069 "Cortex-A65AE"},
9070 {"cortex-a78", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9071 AARCH64_FEATURE_F16
9072 | AARCH64_FEATURE_RCPC
9073 | AARCH64_FEATURE_DOTPROD
9074 | AARCH64_FEATURE_SSBS
9075 | AARCH64_FEATURE_PROFILE),
9076 "Cortex-A78"},
9077 {"cortex-a78ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9078 AARCH64_FEATURE_F16
9079 | AARCH64_FEATURE_RCPC
9080 | AARCH64_FEATURE_DOTPROD
9081 | AARCH64_FEATURE_SSBS
9082 | AARCH64_FEATURE_PROFILE),
9083 "Cortex-A78AE"},
9084 {"cortex-a78c", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9085 AARCH64_FEATURE_DOTPROD
9086 | AARCH64_FEATURE_F16
9087 | AARCH64_FEATURE_FLAGM
9088 | AARCH64_FEATURE_PAC
9089 | AARCH64_FEATURE_PROFILE
9090 | AARCH64_FEATURE_RCPC
9091 | AARCH64_FEATURE_SSBS),
9092 "Cortex-A78C"},
9093 {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9094 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9095 | AARCH64_FEATURE_DOTPROD
9096 | AARCH64_FEATURE_PROFILE),
9097 "Ares"},
9098 {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
9099 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9100 "Samsung Exynos M1"},
9101 {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
9102 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9103 | AARCH64_FEATURE_RDMA),
9104 "Qualcomm Falkor"},
9105 {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9106 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9107 | AARCH64_FEATURE_DOTPROD
9108 | AARCH64_FEATURE_SSBS),
9109 "Neoverse E1"},
9110 {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9111 AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
9112 | AARCH64_FEATURE_DOTPROD
9113 | AARCH64_FEATURE_PROFILE),
9114 "Neoverse N1"},
9115 {"neoverse-n2", AARCH64_FEATURE (AARCH64_ARCH_V8_5,
9116 AARCH64_FEATURE_BFLOAT16
9117 | AARCH64_FEATURE_I8MM
9118 | AARCH64_FEATURE_F16
9119 | AARCH64_FEATURE_SVE
9120 | AARCH64_FEATURE_SVE2
9121 | AARCH64_FEATURE_SVE2_BITPERM
9122 | AARCH64_FEATURE_MEMTAG
9123 | AARCH64_FEATURE_RNG),
9124 "Neoverse N2"},
9125 {"neoverse-v1", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9126 AARCH64_FEATURE_PROFILE
9127 | AARCH64_FEATURE_CVADP
9128 | AARCH64_FEATURE_SVE
9129 | AARCH64_FEATURE_SSBS
9130 | AARCH64_FEATURE_RNG
9131 | AARCH64_FEATURE_F16
9132 | AARCH64_FEATURE_BFLOAT16
9133 | AARCH64_FEATURE_I8MM), "Neoverse V1"},
9134 {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9135 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
9136 | AARCH64_FEATURE_RDMA),
9137 "Qualcomm QDF24XX"},
9138 {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
9139 AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
9140 "Qualcomm Saphira"},
9141 {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
9142 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
9143 "Cavium ThunderX"},
9144 {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
9145 AARCH64_FEATURE_CRYPTO),
9146 "Broadcom Vulcan"},
9147 /* The 'xgene-1' name is an older name for 'xgene1', which was used
9148 in earlier releases and is superseded by 'xgene1' in all
9149 tools. */
9150 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9151 {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
9152 {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
9153 AARCH64_FEATURE_CRC), "APM X-Gene 2"},
9154 {"cortex-r82", AARCH64_ARCH_V8_R, "Cortex-R82"},
9155 {"cortex-x1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
9156 AARCH64_FEATURE_F16
9157 | AARCH64_FEATURE_RCPC
9158 | AARCH64_FEATURE_DOTPROD
9159 | AARCH64_FEATURE_SSBS
9160 | AARCH64_FEATURE_PROFILE),
9161 "Cortex-X1"},
9162 {"generic", AARCH64_ARCH_V8, NULL},
9163
9164 {NULL, AARCH64_ARCH_NONE, NULL}
9165 };
9166
9167 struct aarch64_arch_option_table
9168 {
9169 const char *name;
9170 const aarch64_feature_set value;
9171 };
9172
9173 /* This list should, at a minimum, contain all the architecture names
9174 recognized by GCC. */
9175 static const struct aarch64_arch_option_table aarch64_archs[] = {
9176 {"all", AARCH64_ANY},
9177 {"armv8-a", AARCH64_ARCH_V8},
9178 {"armv8.1-a", AARCH64_ARCH_V8_1},
9179 {"armv8.2-a", AARCH64_ARCH_V8_2},
9180 {"armv8.3-a", AARCH64_ARCH_V8_3},
9181 {"armv8.4-a", AARCH64_ARCH_V8_4},
9182 {"armv8.5-a", AARCH64_ARCH_V8_5},
9183 {"armv8.6-a", AARCH64_ARCH_V8_6},
9184 {"armv8.7-a", AARCH64_ARCH_V8_7},
9185 {"armv8-r", AARCH64_ARCH_V8_R},
9186 {NULL, AARCH64_ARCH_NONE}
9187 };
9188
9189 /* ISA extensions. */
9190 struct aarch64_option_cpu_value_table
9191 {
9192 const char *name;
9193 const aarch64_feature_set value;
9194 const aarch64_feature_set require; /* Feature dependencies. */
9195 };
9196
9197 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
9198 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
9199 AARCH64_ARCH_NONE},
9200 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
9201 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9202 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
9203 AARCH64_ARCH_NONE},
9204 {"lse", AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
9205 AARCH64_ARCH_NONE},
9206 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
9207 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9208 {"pan", AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
9209 AARCH64_ARCH_NONE},
9210 {"lor", AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
9211 AARCH64_ARCH_NONE},
9212 {"ras", AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9213 AARCH64_ARCH_NONE},
9214 {"rdma", AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9215 AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9216 {"fp16", AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9217 AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9218 {"fp16fml", AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9219 AARCH64_FEATURE (AARCH64_FEATURE_FP
9220 | AARCH64_FEATURE_F16, 0)},
9221 {"profile", AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9222 AARCH64_ARCH_NONE},
9223 {"sve", AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9224 AARCH64_FEATURE (AARCH64_FEATURE_F16
9225 | AARCH64_FEATURE_SIMD
9226 | AARCH64_FEATURE_COMPNUM, 0)},
9227 {"tme", AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9228 AARCH64_ARCH_NONE},
9229 {"compnum", AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9230 AARCH64_FEATURE (AARCH64_FEATURE_F16
9231 | AARCH64_FEATURE_SIMD, 0)},
9232 {"rcpc", AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9233 AARCH64_ARCH_NONE},
9234 {"dotprod", AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9235 AARCH64_ARCH_NONE},
9236 {"sha2", AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9237 AARCH64_ARCH_NONE},
9238 {"sb", AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9239 AARCH64_ARCH_NONE},
9240 {"predres", AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9241 AARCH64_ARCH_NONE},
9242 {"aes", AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9243 AARCH64_ARCH_NONE},
9244 {"sm4", AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9245 AARCH64_ARCH_NONE},
9246 {"sha3", AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9247 AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9248 {"rng", AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9249 AARCH64_ARCH_NONE},
9250 {"ssbs", AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9251 AARCH64_ARCH_NONE},
9252 {"memtag", AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9253 AARCH64_ARCH_NONE},
9254 {"sve2", AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9255 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9256 {"sve2-sm4", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9257 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9258 | AARCH64_FEATURE_SM4, 0)},
9259 {"sve2-aes", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9260 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9261 | AARCH64_FEATURE_AES, 0)},
9262 {"sve2-sha3", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9263 AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9264 | AARCH64_FEATURE_SHA3, 0)},
9265 {"sve2-bitperm", AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9266 AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9267 {"bf16", AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9268 AARCH64_ARCH_NONE},
9269 {"i8mm", AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9270 AARCH64_ARCH_NONE},
9271 {"f32mm", AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9272 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9273 {"f64mm", AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9274 AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9275 {"ls64", AARCH64_FEATURE (AARCH64_FEATURE_LS64, 0),
9276 AARCH64_ARCH_NONE},
9277 {"flagm", AARCH64_FEATURE (AARCH64_FEATURE_FLAGM, 0),
9278 AARCH64_ARCH_NONE},
9279 {"pauth", AARCH64_FEATURE (AARCH64_FEATURE_PAC, 0),
9280 AARCH64_ARCH_NONE},
9281 {NULL, AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9282 };
9283
9284 struct aarch64_long_option_table
9285 {
9286 const char *option; /* Substring to match. */
9287 const char *help; /* Help information. */
9288 int (*func) (const char *subopt); /* Function to decode sub-option. */
9289 char *deprecated; /* If non-null, print this message. */
9290 };
9291
9292 /* Transitive closure of features depending on set. */
9293 static aarch64_feature_set
9294 aarch64_feature_disable_set (aarch64_feature_set set)
9295 {
9296 const struct aarch64_option_cpu_value_table *opt;
9297 aarch64_feature_set prev = 0;
9298
9299 while (prev != set) {
9300 prev = set;
9301 for (opt = aarch64_features; opt->name != NULL; opt++)
9302 if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9303 AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9304 }
9305 return set;
9306 }
9307
9308 /* Transitive closure of dependencies of set. */
9309 static aarch64_feature_set
9310 aarch64_feature_enable_set (aarch64_feature_set set)
9311 {
9312 const struct aarch64_option_cpu_value_table *opt;
9313 aarch64_feature_set prev = 0;
9314
9315 while (prev != set) {
9316 prev = set;
9317 for (opt = aarch64_features; opt->name != NULL; opt++)
9318 if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9319 AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9320 }
9321 return set;
9322 }
9323
9324 static int
9325 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9326 bool ext_only)
9327 {
9328 /* We insist on extensions being added before being removed. We achieve
9329 this by using the ADDING_VALUE variable to indicate whether we are
9330 adding an extension (1) or removing it (0) and only allowing it to
9331 change in the order -1 -> 1 -> 0. */
9332 int adding_value = -1;
9333 aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9334
9335 /* Copy the feature set, so that we can modify it. */
9336 *ext_set = **opt_p;
9337 *opt_p = ext_set;
9338
9339 while (str != NULL && *str != 0)
9340 {
9341 const struct aarch64_option_cpu_value_table *opt;
9342 const char *ext = NULL;
9343 int optlen;
9344
9345 if (!ext_only)
9346 {
9347 if (*str != '+')
9348 {
9349 as_bad (_("invalid architectural extension"));
9350 return 0;
9351 }
9352
9353 ext = strchr (++str, '+');
9354 }
9355
9356 if (ext != NULL)
9357 optlen = ext - str;
9358 else
9359 optlen = strlen (str);
9360
9361 if (optlen >= 2 && startswith (str, "no"))
9362 {
9363 if (adding_value != 0)
9364 adding_value = 0;
9365 optlen -= 2;
9366 str += 2;
9367 }
9368 else if (optlen > 0)
9369 {
9370 if (adding_value == -1)
9371 adding_value = 1;
9372 else if (adding_value != 1)
9373 {
9374 as_bad (_("must specify extensions to add before specifying "
9375 "those to remove"));
9376 return false;
9377 }
9378 }
9379
9380 if (optlen == 0)
9381 {
9382 as_bad (_("missing architectural extension"));
9383 return 0;
9384 }
9385
9386 gas_assert (adding_value != -1);
9387
9388 for (opt = aarch64_features; opt->name != NULL; opt++)
9389 if (strncmp (opt->name, str, optlen) == 0)
9390 {
9391 aarch64_feature_set set;
9392
9393 /* Add or remove the extension. */
9394 if (adding_value)
9395 {
9396 set = aarch64_feature_enable_set (opt->value);
9397 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9398 }
9399 else
9400 {
9401 set = aarch64_feature_disable_set (opt->value);
9402 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9403 }
9404 break;
9405 }
9406
9407 if (opt->name == NULL)
9408 {
9409 as_bad (_("unknown architectural extension `%s'"), str);
9410 return 0;
9411 }
9412
9413 str = ext;
9414 };
9415
9416 return 1;
9417 }
9418
9419 static int
9420 aarch64_parse_cpu (const char *str)
9421 {
9422 const struct aarch64_cpu_option_table *opt;
9423 const char *ext = strchr (str, '+');
9424 size_t optlen;
9425
9426 if (ext != NULL)
9427 optlen = ext - str;
9428 else
9429 optlen = strlen (str);
9430
9431 if (optlen == 0)
9432 {
9433 as_bad (_("missing cpu name `%s'"), str);
9434 return 0;
9435 }
9436
9437 for (opt = aarch64_cpus; opt->name != NULL; opt++)
9438 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9439 {
9440 mcpu_cpu_opt = &opt->value;
9441 if (ext != NULL)
9442 return aarch64_parse_features (ext, &mcpu_cpu_opt, false);
9443
9444 return 1;
9445 }
9446
9447 as_bad (_("unknown cpu `%s'"), str);
9448 return 0;
9449 }
9450
9451 static int
9452 aarch64_parse_arch (const char *str)
9453 {
9454 const struct aarch64_arch_option_table *opt;
9455 const char *ext = strchr (str, '+');
9456 size_t optlen;
9457
9458 if (ext != NULL)
9459 optlen = ext - str;
9460 else
9461 optlen = strlen (str);
9462
9463 if (optlen == 0)
9464 {
9465 as_bad (_("missing architecture name `%s'"), str);
9466 return 0;
9467 }
9468
9469 for (opt = aarch64_archs; opt->name != NULL; opt++)
9470 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9471 {
9472 march_cpu_opt = &opt->value;
9473 if (ext != NULL)
9474 return aarch64_parse_features (ext, &march_cpu_opt, false);
9475
9476 return 1;
9477 }
9478
9479 as_bad (_("unknown architecture `%s'\n"), str);
9480 return 0;
9481 }
9482
9483 /* ABIs. */
9484 struct aarch64_option_abi_value_table
9485 {
9486 const char *name;
9487 enum aarch64_abi_type value;
9488 };
9489
9490 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9491 {"ilp32", AARCH64_ABI_ILP32},
9492 {"lp64", AARCH64_ABI_LP64},
9493 };
9494
9495 static int
9496 aarch64_parse_abi (const char *str)
9497 {
9498 unsigned int i;
9499
9500 if (str[0] == '\0')
9501 {
9502 as_bad (_("missing abi name `%s'"), str);
9503 return 0;
9504 }
9505
9506 for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9507 if (strcmp (str, aarch64_abis[i].name) == 0)
9508 {
9509 aarch64_abi = aarch64_abis[i].value;
9510 return 1;
9511 }
9512
9513 as_bad (_("unknown abi `%s'\n"), str);
9514 return 0;
9515 }
9516
9517 static struct aarch64_long_option_table aarch64_long_opts[] = {
9518 #ifdef OBJ_ELF
9519 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
9520 aarch64_parse_abi, NULL},
9521 #endif /* OBJ_ELF */
9522 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
9523 aarch64_parse_cpu, NULL},
9524 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
9525 aarch64_parse_arch, NULL},
9526 {NULL, NULL, 0, NULL}
9527 };
9528
9529 int
9530 md_parse_option (int c, const char *arg)
9531 {
9532 struct aarch64_option_table *opt;
9533 struct aarch64_long_option_table *lopt;
9534
9535 switch (c)
9536 {
9537 #ifdef OPTION_EB
9538 case OPTION_EB:
9539 target_big_endian = 1;
9540 break;
9541 #endif
9542
9543 #ifdef OPTION_EL
9544 case OPTION_EL:
9545 target_big_endian = 0;
9546 break;
9547 #endif
9548
9549 case 'a':
9550 /* Listing option. Just ignore these, we don't support additional
9551 ones. */
9552 return 0;
9553
9554 default:
9555 for (opt = aarch64_opts; opt->option != NULL; opt++)
9556 {
9557 if (c == opt->option[0]
9558 && ((arg == NULL && opt->option[1] == 0)
9559 || streq (arg, opt->option + 1)))
9560 {
9561 /* If the option is deprecated, tell the user. */
9562 if (opt->deprecated != NULL)
9563 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9564 arg ? arg : "", _(opt->deprecated));
9565
9566 if (opt->var != NULL)
9567 *opt->var = opt->value;
9568
9569 return 1;
9570 }
9571 }
9572
9573 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9574 {
9575 /* These options are expected to have an argument. */
9576 if (c == lopt->option[0]
9577 && arg != NULL
9578 && startswith (arg, lopt->option + 1))
9579 {
9580 /* If the option is deprecated, tell the user. */
9581 if (lopt->deprecated != NULL)
9582 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9583 _(lopt->deprecated));
9584
9585 /* Call the sup-option parser. */
9586 return lopt->func (arg + strlen (lopt->option) - 1);
9587 }
9588 }
9589
9590 return 0;
9591 }
9592
9593 return 1;
9594 }
9595
9596 void
9597 md_show_usage (FILE * fp)
9598 {
9599 struct aarch64_option_table *opt;
9600 struct aarch64_long_option_table *lopt;
9601
9602 fprintf (fp, _(" AArch64-specific assembler options:\n"));
9603
9604 for (opt = aarch64_opts; opt->option != NULL; opt++)
9605 if (opt->help != NULL)
9606 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
9607
9608 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9609 if (lopt->help != NULL)
9610 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
9611
9612 #ifdef OPTION_EB
9613 fprintf (fp, _("\
9614 -EB assemble code for a big-endian cpu\n"));
9615 #endif
9616
9617 #ifdef OPTION_EL
9618 fprintf (fp, _("\
9619 -EL assemble code for a little-endian cpu\n"));
9620 #endif
9621 }
9622
9623 /* Parse a .cpu directive. */
9624
9625 static void
9626 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9627 {
9628 const struct aarch64_cpu_option_table *opt;
9629 char saved_char;
9630 char *name;
9631 char *ext;
9632 size_t optlen;
9633
9634 name = input_line_pointer;
9635 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9636 input_line_pointer++;
9637 saved_char = *input_line_pointer;
9638 *input_line_pointer = 0;
9639
9640 ext = strchr (name, '+');
9641
9642 if (ext != NULL)
9643 optlen = ext - name;
9644 else
9645 optlen = strlen (name);
9646
9647 /* Skip the first "all" entry. */
9648 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9649 if (strlen (opt->name) == optlen
9650 && strncmp (name, opt->name, optlen) == 0)
9651 {
9652 mcpu_cpu_opt = &opt->value;
9653 if (ext != NULL)
9654 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
9655 return;
9656
9657 cpu_variant = *mcpu_cpu_opt;
9658
9659 *input_line_pointer = saved_char;
9660 demand_empty_rest_of_line ();
9661 return;
9662 }
9663 as_bad (_("unknown cpu `%s'"), name);
9664 *input_line_pointer = saved_char;
9665 ignore_rest_of_line ();
9666 }
9667
9668
9669 /* Parse a .arch directive. */
9670
9671 static void
9672 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9673 {
9674 const struct aarch64_arch_option_table *opt;
9675 char saved_char;
9676 char *name;
9677 char *ext;
9678 size_t optlen;
9679
9680 name = input_line_pointer;
9681 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9682 input_line_pointer++;
9683 saved_char = *input_line_pointer;
9684 *input_line_pointer = 0;
9685
9686 ext = strchr (name, '+');
9687
9688 if (ext != NULL)
9689 optlen = ext - name;
9690 else
9691 optlen = strlen (name);
9692
9693 /* Skip the first "all" entry. */
9694 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9695 if (strlen (opt->name) == optlen
9696 && strncmp (name, opt->name, optlen) == 0)
9697 {
9698 mcpu_cpu_opt = &opt->value;
9699 if (ext != NULL)
9700 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, false))
9701 return;
9702
9703 cpu_variant = *mcpu_cpu_opt;
9704
9705 *input_line_pointer = saved_char;
9706 demand_empty_rest_of_line ();
9707 return;
9708 }
9709
9710 as_bad (_("unknown architecture `%s'\n"), name);
9711 *input_line_pointer = saved_char;
9712 ignore_rest_of_line ();
9713 }
9714
9715 /* Parse a .arch_extension directive. */
9716
9717 static void
9718 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9719 {
9720 char saved_char;
9721 char *ext = input_line_pointer;;
9722
9723 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9724 input_line_pointer++;
9725 saved_char = *input_line_pointer;
9726 *input_line_pointer = 0;
9727
9728 if (!aarch64_parse_features (ext, &mcpu_cpu_opt, true))
9729 return;
9730
9731 cpu_variant = *mcpu_cpu_opt;
9732
9733 *input_line_pointer = saved_char;
9734 demand_empty_rest_of_line ();
9735 }
9736
9737 /* Copy symbol information. */
9738
9739 void
9740 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9741 {
9742 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9743 }
9744
9745 #ifdef OBJ_ELF
9746 /* Same as elf_copy_symbol_attributes, but without copying st_other.
9747 This is needed so AArch64 specific st_other values can be independently
9748 specified for an IFUNC resolver (that is called by the dynamic linker)
9749 and the symbol it resolves (aliased to the resolver). In particular,
9750 if a function symbol has special st_other value set via directives,
9751 then attaching an IFUNC resolver to that symbol should not override
9752 the st_other setting. Requiring the directive on the IFUNC resolver
9753 symbol would be unexpected and problematic in C code, where the two
9754 symbols appear as two independent function declarations. */
9755
9756 void
9757 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
9758 {
9759 struct elf_obj_sy *srcelf = symbol_get_obj (src);
9760 struct elf_obj_sy *destelf = symbol_get_obj (dest);
9761 if (srcelf->size)
9762 {
9763 if (destelf->size == NULL)
9764 destelf->size = XNEW (expressionS);
9765 *destelf->size = *srcelf->size;
9766 }
9767 else
9768 {
9769 free (destelf->size);
9770 destelf->size = NULL;
9771 }
9772 S_SET_SIZE (dest, S_GET_SIZE (src));
9773 }
9774 #endif