]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-aarch64.c
Prepare gas for 64-bit obstacks
[thirdparty/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2014 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 #define END_OF_INSN '\0'
46
47 static aarch64_feature_set cpu_variant;
48
49 /* Variables that we set while parsing command-line options. Once all
50 options have been read we re-process these values to set the real
51 assembly flags. */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54
55 /* Constants for known architecture features. */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58 static const aarch64_feature_set aarch64_arch_any = AARCH64_ANY;
59 static const aarch64_feature_set aarch64_arch_none = AARCH64_ARCH_NONE;
60
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
63 static symbolS *GOT_symbol;
64
65 /* Which ABI to use. */
66 enum aarch64_abi_type
67 {
68 AARCH64_ABI_LP64 = 0,
69 AARCH64_ABI_ILP32 = 1
70 };
71
72 /* AArch64 ABI for the output file. */
73 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
74
75 /* When non-zero, program to a 32-bit model, in which the C data types
76 int, long and all pointer types are 32-bit objects (ILP32); or to a
77 64-bit model, in which the C int type is 32-bits but the C long type
78 and all pointer types are 64-bit objects (LP64). */
79 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
80 #endif
81
82 enum neon_el_type
83 {
84 NT_invtype = -1,
85 NT_b,
86 NT_h,
87 NT_s,
88 NT_d,
89 NT_q
90 };
91
92 /* Bits for DEFINED field in neon_type_el. */
93 #define NTA_HASTYPE 1
94 #define NTA_HASINDEX 2
95
96 struct neon_type_el
97 {
98 enum neon_el_type type;
99 unsigned char defined;
100 unsigned width;
101 int64_t index;
102 };
103
104 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
105
106 struct reloc
107 {
108 bfd_reloc_code_real_type type;
109 expressionS exp;
110 int pc_rel;
111 enum aarch64_opnd opnd;
112 uint32_t flags;
113 unsigned need_libopcodes_p : 1;
114 };
115
116 struct aarch64_instruction
117 {
118 /* libopcodes structure for instruction intermediate representation. */
119 aarch64_inst base;
120 /* Record assembly errors found during the parsing. */
121 struct
122 {
123 enum aarch64_operand_error_kind kind;
124 const char *error;
125 } parsing_error;
126 /* The condition that appears in the assembly line. */
127 int cond;
128 /* Relocation information (including the GAS internal fixup). */
129 struct reloc reloc;
130 /* Need to generate an immediate in the literal pool. */
131 unsigned gen_lit_pool : 1;
132 };
133
134 typedef struct aarch64_instruction aarch64_instruction;
135
136 static aarch64_instruction inst;
137
138 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
139 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
140
141 /* Diagnostics inline function utilites.
142
143 These are lightweight utlities which should only be called by parse_operands
144 and other parsers. GAS processes each assembly line by parsing it against
145 instruction template(s), in the case of multiple templates (for the same
146 mnemonic name), those templates are tried one by one until one succeeds or
147 all fail. An assembly line may fail a few templates before being
148 successfully parsed; an error saved here in most cases is not a user error
149 but an error indicating the current template is not the right template.
150 Therefore it is very important that errors can be saved at a low cost during
151 the parsing; we don't want to slow down the whole parsing by recording
152 non-user errors in detail.
153
154 Remember that the objective is to help GAS pick up the most approapriate
155 error message in the case of multiple templates, e.g. FMOV which has 8
156 templates. */
157
158 static inline void
159 clear_error (void)
160 {
161 inst.parsing_error.kind = AARCH64_OPDE_NIL;
162 inst.parsing_error.error = NULL;
163 }
164
165 static inline bfd_boolean
166 error_p (void)
167 {
168 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
169 }
170
171 static inline const char *
172 get_error_message (void)
173 {
174 return inst.parsing_error.error;
175 }
176
177 static inline void
178 set_error_message (const char *error)
179 {
180 inst.parsing_error.error = error;
181 }
182
183 static inline enum aarch64_operand_error_kind
184 get_error_kind (void)
185 {
186 return inst.parsing_error.kind;
187 }
188
189 static inline void
190 set_error_kind (enum aarch64_operand_error_kind kind)
191 {
192 inst.parsing_error.kind = kind;
193 }
194
195 static inline void
196 set_error (enum aarch64_operand_error_kind kind, const char *error)
197 {
198 inst.parsing_error.kind = kind;
199 inst.parsing_error.error = error;
200 }
201
202 static inline void
203 set_recoverable_error (const char *error)
204 {
205 set_error (AARCH64_OPDE_RECOVERABLE, error);
206 }
207
208 /* Use the DESC field of the corresponding aarch64_operand entry to compose
209 the error message. */
210 static inline void
211 set_default_error (void)
212 {
213 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
214 }
215
216 static inline void
217 set_syntax_error (const char *error)
218 {
219 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
220 }
221
222 static inline void
223 set_first_syntax_error (const char *error)
224 {
225 if (! error_p ())
226 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
227 }
228
229 static inline void
230 set_fatal_syntax_error (const char *error)
231 {
232 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
233 }
234 \f
235 /* Number of littlenums required to hold an extended precision number. */
236 #define MAX_LITTLENUMS 6
237
238 /* Return value for certain parsers when the parsing fails; those parsers
239 return the information of the parsed result, e.g. register number, on
240 success. */
241 #define PARSE_FAIL -1
242
243 /* This is an invalid condition code that means no conditional field is
244 present. */
245 #define COND_ALWAYS 0x10
246
247 typedef struct
248 {
249 const char *template;
250 unsigned long value;
251 } asm_barrier_opt;
252
253 typedef struct
254 {
255 const char *template;
256 uint32_t value;
257 } asm_nzcv;
258
259 struct reloc_entry
260 {
261 char *name;
262 bfd_reloc_code_real_type reloc;
263 };
264
265 /* Structure for a hash table entry for a register. */
266 typedef struct
267 {
268 const char *name;
269 unsigned char number;
270 unsigned char type;
271 unsigned char builtin;
272 } reg_entry;
273
274 /* Macros to define the register types and masks for the purpose
275 of parsing. */
276
277 #undef AARCH64_REG_TYPES
278 #define AARCH64_REG_TYPES \
279 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
280 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
281 BASIC_REG_TYPE(SP_32) /* wsp */ \
282 BASIC_REG_TYPE(SP_64) /* sp */ \
283 BASIC_REG_TYPE(Z_32) /* wzr */ \
284 BASIC_REG_TYPE(Z_64) /* xzr */ \
285 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
286 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
287 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
288 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
289 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
290 BASIC_REG_TYPE(CN) /* c[0-7] */ \
291 BASIC_REG_TYPE(VN) /* v[0-31] */ \
292 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
293 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
294 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
295 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
296 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
297 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
298 /* Typecheck: any [BHSDQ]P FP. */ \
299 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
300 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
301 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
302 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
303 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
304 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
305 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
306 /* Any integer register; used for error messages only. */ \
307 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
308 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
309 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
310 /* Pseudo type to mark the end of the enumerator sequence. */ \
311 BASIC_REG_TYPE(MAX)
312
313 #undef BASIC_REG_TYPE
314 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
315 #undef MULTI_REG_TYPE
316 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
317
318 /* Register type enumerators. */
319 typedef enum
320 {
321 /* A list of REG_TYPE_*. */
322 AARCH64_REG_TYPES
323 } aarch64_reg_type;
324
325 #undef BASIC_REG_TYPE
326 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
327 #undef REG_TYPE
328 #define REG_TYPE(T) (1 << REG_TYPE_##T)
329 #undef MULTI_REG_TYPE
330 #define MULTI_REG_TYPE(T,V) V,
331
332 /* Values indexed by aarch64_reg_type to assist the type checking. */
333 static const unsigned reg_type_masks[] =
334 {
335 AARCH64_REG_TYPES
336 };
337
338 #undef BASIC_REG_TYPE
339 #undef REG_TYPE
340 #undef MULTI_REG_TYPE
341 #undef AARCH64_REG_TYPES
342
343 /* Diagnostics used when we don't get a register of the expected type.
344 Note: this has to synchronized with aarch64_reg_type definitions
345 above. */
346 static const char *
347 get_reg_expected_msg (aarch64_reg_type reg_type)
348 {
349 const char *msg;
350
351 switch (reg_type)
352 {
353 case REG_TYPE_R_32:
354 msg = N_("integer 32-bit register expected");
355 break;
356 case REG_TYPE_R_64:
357 msg = N_("integer 64-bit register expected");
358 break;
359 case REG_TYPE_R_N:
360 msg = N_("integer register expected");
361 break;
362 case REG_TYPE_R_Z_SP:
363 msg = N_("integer, zero or SP register expected");
364 break;
365 case REG_TYPE_FP_B:
366 msg = N_("8-bit SIMD scalar register expected");
367 break;
368 case REG_TYPE_FP_H:
369 msg = N_("16-bit SIMD scalar or floating-point half precision "
370 "register expected");
371 break;
372 case REG_TYPE_FP_S:
373 msg = N_("32-bit SIMD scalar or floating-point single precision "
374 "register expected");
375 break;
376 case REG_TYPE_FP_D:
377 msg = N_("64-bit SIMD scalar or floating-point double precision "
378 "register expected");
379 break;
380 case REG_TYPE_FP_Q:
381 msg = N_("128-bit SIMD scalar or floating-point quad precision "
382 "register expected");
383 break;
384 case REG_TYPE_CN:
385 msg = N_("C0 - C15 expected");
386 break;
387 case REG_TYPE_R_Z_BHSDQ_V:
388 msg = N_("register expected");
389 break;
390 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
391 msg = N_("SIMD scalar or floating-point register expected");
392 break;
393 case REG_TYPE_VN: /* any V reg */
394 msg = N_("vector register expected");
395 break;
396 default:
397 as_fatal (_("invalid register type %d"), reg_type);
398 }
399 return msg;
400 }
401
402 /* Some well known registers that we refer to directly elsewhere. */
403 #define REG_SP 31
404
405 /* Instructions take 4 bytes in the object file. */
406 #define INSN_SIZE 4
407
408 /* Define some common error messages. */
409 #define BAD_SP _("SP not allowed here")
410
411 static struct hash_control *aarch64_ops_hsh;
412 static struct hash_control *aarch64_cond_hsh;
413 static struct hash_control *aarch64_shift_hsh;
414 static struct hash_control *aarch64_sys_regs_hsh;
415 static struct hash_control *aarch64_pstatefield_hsh;
416 static struct hash_control *aarch64_sys_regs_ic_hsh;
417 static struct hash_control *aarch64_sys_regs_dc_hsh;
418 static struct hash_control *aarch64_sys_regs_at_hsh;
419 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
420 static struct hash_control *aarch64_reg_hsh;
421 static struct hash_control *aarch64_barrier_opt_hsh;
422 static struct hash_control *aarch64_nzcv_hsh;
423 static struct hash_control *aarch64_pldop_hsh;
424
425 /* Stuff needed to resolve the label ambiguity
426 As:
427 ...
428 label: <insn>
429 may differ from:
430 ...
431 label:
432 <insn> */
433
434 static symbolS *last_label_seen;
435
436 /* Literal pool structure. Held on a per-section
437 and per-sub-section basis. */
438
439 #define MAX_LITERAL_POOL_SIZE 1024
440 typedef struct literal_expression
441 {
442 expressionS exp;
443 /* If exp.op == O_big then this bignum holds a copy of the global bignum value. */
444 LITTLENUM_TYPE * bignum;
445 } literal_expression;
446
447 typedef struct literal_pool
448 {
449 literal_expression literals[MAX_LITERAL_POOL_SIZE];
450 unsigned int next_free_entry;
451 unsigned int id;
452 symbolS *symbol;
453 segT section;
454 subsegT sub_section;
455 int size;
456 struct literal_pool *next;
457 } literal_pool;
458
459 /* Pointer to a linked list of literal pools. */
460 static literal_pool *list_of_pools = NULL;
461 \f
462 /* Pure syntax. */
463
464 /* This array holds the chars that always start a comment. If the
465 pre-processor is disabled, these aren't very useful. */
466 const char comment_chars[] = "";
467
468 /* This array holds the chars that only start a comment at the beginning of
469 a line. If the line seems to have the form '# 123 filename'
470 .line and .file directives will appear in the pre-processed output. */
471 /* Note that input_file.c hand checks for '#' at the beginning of the
472 first line of the input file. This is because the compiler outputs
473 #NO_APP at the beginning of its output. */
474 /* Also note that comments like this one will always work. */
475 const char line_comment_chars[] = "#";
476
477 const char line_separator_chars[] = ";";
478
479 /* Chars that can be used to separate mant
480 from exp in floating point numbers. */
481 const char EXP_CHARS[] = "eE";
482
483 /* Chars that mean this number is a floating point constant. */
484 /* As in 0f12.456 */
485 /* or 0d1.2345e12 */
486
487 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
488
489 /* Prefix character that indicates the start of an immediate value. */
490 #define is_immediate_prefix(C) ((C) == '#')
491
492 /* Separator character handling. */
493
494 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
495
496 static inline bfd_boolean
497 skip_past_char (char **str, char c)
498 {
499 if (**str == c)
500 {
501 (*str)++;
502 return TRUE;
503 }
504 else
505 return FALSE;
506 }
507
508 #define skip_past_comma(str) skip_past_char (str, ',')
509
510 /* Arithmetic expressions (possibly involving symbols). */
511
512 static bfd_boolean in_my_get_expression_p = FALSE;
513
514 /* Third argument to my_get_expression. */
515 #define GE_NO_PREFIX 0
516 #define GE_OPT_PREFIX 1
517
518 /* Return TRUE if the string pointed by *STR is successfully parsed
519 as an valid expression; *EP will be filled with the information of
520 such an expression. Otherwise return FALSE. */
521
522 static bfd_boolean
523 my_get_expression (expressionS * ep, char **str, int prefix_mode,
524 int reject_absent)
525 {
526 char *save_in;
527 segT seg;
528 int prefix_present_p = 0;
529
530 switch (prefix_mode)
531 {
532 case GE_NO_PREFIX:
533 break;
534 case GE_OPT_PREFIX:
535 if (is_immediate_prefix (**str))
536 {
537 (*str)++;
538 prefix_present_p = 1;
539 }
540 break;
541 default:
542 abort ();
543 }
544
545 memset (ep, 0, sizeof (expressionS));
546
547 save_in = input_line_pointer;
548 input_line_pointer = *str;
549 in_my_get_expression_p = TRUE;
550 seg = expression (ep);
551 in_my_get_expression_p = FALSE;
552
553 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
554 {
555 /* We found a bad expression in md_operand(). */
556 *str = input_line_pointer;
557 input_line_pointer = save_in;
558 if (prefix_present_p && ! error_p ())
559 set_fatal_syntax_error (_("bad expression"));
560 else
561 set_first_syntax_error (_("bad expression"));
562 return FALSE;
563 }
564
565 #ifdef OBJ_AOUT
566 if (seg != absolute_section
567 && seg != text_section
568 && seg != data_section
569 && seg != bss_section && seg != undefined_section)
570 {
571 set_syntax_error (_("bad segment"));
572 *str = input_line_pointer;
573 input_line_pointer = save_in;
574 return FALSE;
575 }
576 #else
577 (void) seg;
578 #endif
579
580 *str = input_line_pointer;
581 input_line_pointer = save_in;
582 return TRUE;
583 }
584
585 /* Turn a string in input_line_pointer into a floating point constant
586 of type TYPE, and store the appropriate bytes in *LITP. The number
587 of LITTLENUMS emitted is stored in *SIZEP. An error message is
588 returned, or NULL on OK. */
589
590 char *
591 md_atof (int type, char *litP, int *sizeP)
592 {
593 return ieee_md_atof (type, litP, sizeP, target_big_endian);
594 }
595
596 /* We handle all bad expressions here, so that we can report the faulty
597 instruction in the error message. */
598 void
599 md_operand (expressionS * exp)
600 {
601 if (in_my_get_expression_p)
602 exp->X_op = O_illegal;
603 }
604
605 /* Immediate values. */
606
607 /* Errors may be set multiple times during parsing or bit encoding
608 (particularly in the Neon bits), but usually the earliest error which is set
609 will be the most meaningful. Avoid overwriting it with later (cascading)
610 errors by calling this function. */
611
612 static void
613 first_error (const char *error)
614 {
615 if (! error_p ())
616 set_syntax_error (error);
617 }
618
619 /* Similiar to first_error, but this function accepts formatted error
620 message. */
621 static void
622 first_error_fmt (const char *format, ...)
623 {
624 va_list args;
625 enum
626 { size = 100 };
627 /* N.B. this single buffer will not cause error messages for different
628 instructions to pollute each other; this is because at the end of
629 processing of each assembly line, error message if any will be
630 collected by as_bad. */
631 static char buffer[size];
632
633 if (! error_p ())
634 {
635 int ret ATTRIBUTE_UNUSED;
636 va_start (args, format);
637 ret = vsnprintf (buffer, size, format, args);
638 know (ret <= size - 1 && ret >= 0);
639 va_end (args);
640 set_syntax_error (buffer);
641 }
642 }
643
644 /* Register parsing. */
645
646 /* Generic register parser which is called by other specialized
647 register parsers.
648 CCP points to what should be the beginning of a register name.
649 If it is indeed a valid register name, advance CCP over it and
650 return the reg_entry structure; otherwise return NULL.
651 It does not issue diagnostics. */
652
653 static reg_entry *
654 parse_reg (char **ccp)
655 {
656 char *start = *ccp;
657 char *p;
658 reg_entry *reg;
659
660 #ifdef REGISTER_PREFIX
661 if (*start != REGISTER_PREFIX)
662 return NULL;
663 start++;
664 #endif
665
666 p = start;
667 if (!ISALPHA (*p) || !is_name_beginner (*p))
668 return NULL;
669
670 do
671 p++;
672 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
673
674 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
675
676 if (!reg)
677 return NULL;
678
679 *ccp = p;
680 return reg;
681 }
682
683 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
684 return FALSE. */
685 static bfd_boolean
686 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
687 {
688 if (reg->type == type)
689 return TRUE;
690
691 switch (type)
692 {
693 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
694 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
695 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
696 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
697 case REG_TYPE_VN: /* Vector register. */
698 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
699 return ((reg_type_masks[reg->type] & reg_type_masks[type])
700 == reg_type_masks[reg->type]);
701 default:
702 as_fatal ("unhandled type %d", type);
703 abort ();
704 }
705 }
706
707 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
708 Return the register number otherwise. *ISREG32 is set to one if the
709 register is 32-bit wide; *ISREGZERO is set to one if the register is
710 of type Z_32 or Z_64.
711 Note that this function does not issue any diagnostics. */
712
713 static int
714 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
715 int *isreg32, int *isregzero)
716 {
717 char *str = *ccp;
718 const reg_entry *reg = parse_reg (&str);
719
720 if (reg == NULL)
721 return PARSE_FAIL;
722
723 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
724 return PARSE_FAIL;
725
726 switch (reg->type)
727 {
728 case REG_TYPE_SP_32:
729 case REG_TYPE_SP_64:
730 if (reject_sp)
731 return PARSE_FAIL;
732 *isreg32 = reg->type == REG_TYPE_SP_32;
733 *isregzero = 0;
734 break;
735 case REG_TYPE_R_32:
736 case REG_TYPE_R_64:
737 *isreg32 = reg->type == REG_TYPE_R_32;
738 *isregzero = 0;
739 break;
740 case REG_TYPE_Z_32:
741 case REG_TYPE_Z_64:
742 if (reject_rz)
743 return PARSE_FAIL;
744 *isreg32 = reg->type == REG_TYPE_Z_32;
745 *isregzero = 1;
746 break;
747 default:
748 return PARSE_FAIL;
749 }
750
751 *ccp = str;
752
753 return reg->number;
754 }
755
756 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
757 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
758 otherwise return FALSE.
759
760 Accept only one occurrence of:
761 8b 16b 4h 8h 2s 4s 1d 2d
762 b h s d q */
763 static bfd_boolean
764 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
765 {
766 char *ptr = *str;
767 unsigned width;
768 unsigned element_size;
769 enum neon_el_type type;
770
771 /* skip '.' */
772 ptr++;
773
774 if (!ISDIGIT (*ptr))
775 {
776 width = 0;
777 goto elt_size;
778 }
779 width = strtoul (ptr, &ptr, 10);
780 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
781 {
782 first_error_fmt (_("bad size %d in vector width specifier"), width);
783 return FALSE;
784 }
785
786 elt_size:
787 switch (TOLOWER (*ptr))
788 {
789 case 'b':
790 type = NT_b;
791 element_size = 8;
792 break;
793 case 'h':
794 type = NT_h;
795 element_size = 16;
796 break;
797 case 's':
798 type = NT_s;
799 element_size = 32;
800 break;
801 case 'd':
802 type = NT_d;
803 element_size = 64;
804 break;
805 case 'q':
806 if (width == 1)
807 {
808 type = NT_q;
809 element_size = 128;
810 break;
811 }
812 /* fall through. */
813 default:
814 if (*ptr != '\0')
815 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
816 else
817 first_error (_("missing element size"));
818 return FALSE;
819 }
820 if (width != 0 && width * element_size != 64 && width * element_size != 128)
821 {
822 first_error_fmt (_
823 ("invalid element size %d and vector size combination %c"),
824 width, *ptr);
825 return FALSE;
826 }
827 ptr++;
828
829 parsed_type->type = type;
830 parsed_type->width = width;
831
832 *str = ptr;
833
834 return TRUE;
835 }
836
837 /* Parse a single type, e.g. ".8b", leading period included.
838 Only applicable to Vn registers.
839
840 Return TRUE on success; otherwise return FALSE. */
841 static bfd_boolean
842 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
843 {
844 char *str = *ccp;
845
846 if (*str == '.')
847 {
848 if (! parse_neon_type_for_operand (vectype, &str))
849 {
850 first_error (_("vector type expected"));
851 return FALSE;
852 }
853 }
854 else
855 return FALSE;
856
857 *ccp = str;
858
859 return TRUE;
860 }
861
862 /* Parse a register of the type TYPE.
863
864 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
865 name or the parsed register is not of TYPE.
866
867 Otherwise return the register number, and optionally fill in the actual
868 type of the register in *RTYPE when multiple alternatives were given, and
869 return the register shape and element index information in *TYPEINFO.
870
871 IN_REG_LIST should be set with TRUE if the caller is parsing a register
872 list. */
873
874 static int
875 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
876 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
877 {
878 char *str = *ccp;
879 const reg_entry *reg = parse_reg (&str);
880 struct neon_type_el atype;
881 struct neon_type_el parsetype;
882 bfd_boolean is_typed_vecreg = FALSE;
883
884 atype.defined = 0;
885 atype.type = NT_invtype;
886 atype.width = -1;
887 atype.index = 0;
888
889 if (reg == NULL)
890 {
891 if (typeinfo)
892 *typeinfo = atype;
893 set_default_error ();
894 return PARSE_FAIL;
895 }
896
897 if (! aarch64_check_reg_type (reg, type))
898 {
899 DEBUG_TRACE ("reg type check failed");
900 set_default_error ();
901 return PARSE_FAIL;
902 }
903 type = reg->type;
904
905 if (type == REG_TYPE_VN
906 && parse_neon_operand_type (&parsetype, &str))
907 {
908 /* Register if of the form Vn.[bhsdq]. */
909 is_typed_vecreg = TRUE;
910
911 if (parsetype.width == 0)
912 /* Expect index. In the new scheme we cannot have
913 Vn.[bhsdq] represent a scalar. Therefore any
914 Vn.[bhsdq] should have an index following it.
915 Except in reglists ofcourse. */
916 atype.defined |= NTA_HASINDEX;
917 else
918 atype.defined |= NTA_HASTYPE;
919
920 atype.type = parsetype.type;
921 atype.width = parsetype.width;
922 }
923
924 if (skip_past_char (&str, '['))
925 {
926 expressionS exp;
927
928 /* Reject Sn[index] syntax. */
929 if (!is_typed_vecreg)
930 {
931 first_error (_("this type of register can't be indexed"));
932 return PARSE_FAIL;
933 }
934
935 if (in_reg_list == TRUE)
936 {
937 first_error (_("index not allowed inside register list"));
938 return PARSE_FAIL;
939 }
940
941 atype.defined |= NTA_HASINDEX;
942
943 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
944
945 if (exp.X_op != O_constant)
946 {
947 first_error (_("constant expression required"));
948 return PARSE_FAIL;
949 }
950
951 if (! skip_past_char (&str, ']'))
952 return PARSE_FAIL;
953
954 atype.index = exp.X_add_number;
955 }
956 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
957 {
958 /* Indexed vector register expected. */
959 first_error (_("indexed vector register expected"));
960 return PARSE_FAIL;
961 }
962
963 /* A vector reg Vn should be typed or indexed. */
964 if (type == REG_TYPE_VN && atype.defined == 0)
965 {
966 first_error (_("invalid use of vector register"));
967 }
968
969 if (typeinfo)
970 *typeinfo = atype;
971
972 if (rtype)
973 *rtype = type;
974
975 *ccp = str;
976
977 return reg->number;
978 }
979
980 /* Parse register.
981
982 Return the register number on success; return PARSE_FAIL otherwise.
983
984 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
985 the register (e.g. NEON double or quad reg when either has been requested).
986
987 If this is a NEON vector register with additional type information, fill
988 in the struct pointed to by VECTYPE (if non-NULL).
989
990 This parser does not handle register list. */
991
992 static int
993 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
994 aarch64_reg_type *rtype, struct neon_type_el *vectype)
995 {
996 struct neon_type_el atype;
997 char *str = *ccp;
998 int reg = parse_typed_reg (&str, type, rtype, &atype,
999 /*in_reg_list= */ FALSE);
1000
1001 if (reg == PARSE_FAIL)
1002 return PARSE_FAIL;
1003
1004 if (vectype)
1005 *vectype = atype;
1006
1007 *ccp = str;
1008
1009 return reg;
1010 }
1011
1012 static inline bfd_boolean
1013 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
1014 {
1015 return
1016 e1.type == e2.type
1017 && e1.defined == e2.defined
1018 && e1.width == e2.width && e1.index == e2.index;
1019 }
1020
1021 /* This function parses the NEON register list. On success, it returns
1022 the parsed register list information in the following encoded format:
1023
1024 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1025 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1026
1027 The information of the register shape and/or index is returned in
1028 *VECTYPE.
1029
1030 It returns PARSE_FAIL if the register list is invalid.
1031
1032 The list contains one to four registers.
1033 Each register can be one of:
1034 <Vt>.<T>[<index>]
1035 <Vt>.<T>
1036 All <T> should be identical.
1037 All <index> should be identical.
1038 There are restrictions on <Vt> numbers which are checked later
1039 (by reg_list_valid_p). */
1040
1041 static int
1042 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1043 {
1044 char *str = *ccp;
1045 int nb_regs;
1046 struct neon_type_el typeinfo, typeinfo_first;
1047 int val, val_range;
1048 int in_range;
1049 int ret_val;
1050 int i;
1051 bfd_boolean error = FALSE;
1052 bfd_boolean expect_index = FALSE;
1053
1054 if (*str != '{')
1055 {
1056 set_syntax_error (_("expecting {"));
1057 return PARSE_FAIL;
1058 }
1059 str++;
1060
1061 nb_regs = 0;
1062 typeinfo_first.defined = 0;
1063 typeinfo_first.type = NT_invtype;
1064 typeinfo_first.width = -1;
1065 typeinfo_first.index = 0;
1066 ret_val = 0;
1067 val = -1;
1068 val_range = -1;
1069 in_range = 0;
1070 do
1071 {
1072 if (in_range)
1073 {
1074 str++; /* skip over '-' */
1075 val_range = val;
1076 }
1077 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1078 /*in_reg_list= */ TRUE);
1079 if (val == PARSE_FAIL)
1080 {
1081 set_first_syntax_error (_("invalid vector register in list"));
1082 error = TRUE;
1083 continue;
1084 }
1085 /* reject [bhsd]n */
1086 if (typeinfo.defined == 0)
1087 {
1088 set_first_syntax_error (_("invalid scalar register in list"));
1089 error = TRUE;
1090 continue;
1091 }
1092
1093 if (typeinfo.defined & NTA_HASINDEX)
1094 expect_index = TRUE;
1095
1096 if (in_range)
1097 {
1098 if (val < val_range)
1099 {
1100 set_first_syntax_error
1101 (_("invalid range in vector register list"));
1102 error = TRUE;
1103 }
1104 val_range++;
1105 }
1106 else
1107 {
1108 val_range = val;
1109 if (nb_regs == 0)
1110 typeinfo_first = typeinfo;
1111 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1112 {
1113 set_first_syntax_error
1114 (_("type mismatch in vector register list"));
1115 error = TRUE;
1116 }
1117 }
1118 if (! error)
1119 for (i = val_range; i <= val; i++)
1120 {
1121 ret_val |= i << (5 * nb_regs);
1122 nb_regs++;
1123 }
1124 in_range = 0;
1125 }
1126 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1127
1128 skip_whitespace (str);
1129 if (*str != '}')
1130 {
1131 set_first_syntax_error (_("end of vector register list not found"));
1132 error = TRUE;
1133 }
1134 str++;
1135
1136 skip_whitespace (str);
1137
1138 if (expect_index)
1139 {
1140 if (skip_past_char (&str, '['))
1141 {
1142 expressionS exp;
1143
1144 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1145 if (exp.X_op != O_constant)
1146 {
1147 set_first_syntax_error (_("constant expression required."));
1148 error = TRUE;
1149 }
1150 if (! skip_past_char (&str, ']'))
1151 error = TRUE;
1152 else
1153 typeinfo_first.index = exp.X_add_number;
1154 }
1155 else
1156 {
1157 set_first_syntax_error (_("expected index"));
1158 error = TRUE;
1159 }
1160 }
1161
1162 if (nb_regs > 4)
1163 {
1164 set_first_syntax_error (_("too many registers in vector register list"));
1165 error = TRUE;
1166 }
1167 else if (nb_regs == 0)
1168 {
1169 set_first_syntax_error (_("empty vector register list"));
1170 error = TRUE;
1171 }
1172
1173 *ccp = str;
1174 if (! error)
1175 *vectype = typeinfo_first;
1176
1177 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1178 }
1179
1180 /* Directives: register aliases. */
1181
1182 static reg_entry *
1183 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1184 {
1185 reg_entry *new;
1186 const char *name;
1187
1188 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1189 {
1190 if (new->builtin)
1191 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1192 str);
1193
1194 /* Only warn about a redefinition if it's not defined as the
1195 same register. */
1196 else if (new->number != number || new->type != type)
1197 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1198
1199 return NULL;
1200 }
1201
1202 name = xstrdup (str);
1203 new = xmalloc (sizeof (reg_entry));
1204
1205 new->name = name;
1206 new->number = number;
1207 new->type = type;
1208 new->builtin = FALSE;
1209
1210 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1211 abort ();
1212
1213 return new;
1214 }
1215
1216 /* Look for the .req directive. This is of the form:
1217
1218 new_register_name .req existing_register_name
1219
1220 If we find one, or if it looks sufficiently like one that we want to
1221 handle any error here, return TRUE. Otherwise return FALSE. */
1222
1223 static bfd_boolean
1224 create_register_alias (char *newname, char *p)
1225 {
1226 const reg_entry *old;
1227 char *oldname, *nbuf;
1228 size_t nlen;
1229
1230 /* The input scrubber ensures that whitespace after the mnemonic is
1231 collapsed to single spaces. */
1232 oldname = p;
1233 if (strncmp (oldname, " .req ", 6) != 0)
1234 return FALSE;
1235
1236 oldname += 6;
1237 if (*oldname == '\0')
1238 return FALSE;
1239
1240 old = hash_find (aarch64_reg_hsh, oldname);
1241 if (!old)
1242 {
1243 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1244 return TRUE;
1245 }
1246
1247 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1248 the desired alias name, and p points to its end. If not, then
1249 the desired alias name is in the global original_case_string. */
1250 #ifdef TC_CASE_SENSITIVE
1251 nlen = p - newname;
1252 #else
1253 newname = original_case_string;
1254 nlen = strlen (newname);
1255 #endif
1256
1257 nbuf = alloca (nlen + 1);
1258 memcpy (nbuf, newname, nlen);
1259 nbuf[nlen] = '\0';
1260
1261 /* Create aliases under the new name as stated; an all-lowercase
1262 version of the new name; and an all-uppercase version of the new
1263 name. */
1264 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1265 {
1266 for (p = nbuf; *p; p++)
1267 *p = TOUPPER (*p);
1268
1269 if (strncmp (nbuf, newname, nlen))
1270 {
1271 /* If this attempt to create an additional alias fails, do not bother
1272 trying to create the all-lower case alias. We will fail and issue
1273 a second, duplicate error message. This situation arises when the
1274 programmer does something like:
1275 foo .req r0
1276 Foo .req r1
1277 The second .req creates the "Foo" alias but then fails to create
1278 the artificial FOO alias because it has already been created by the
1279 first .req. */
1280 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1281 return TRUE;
1282 }
1283
1284 for (p = nbuf; *p; p++)
1285 *p = TOLOWER (*p);
1286
1287 if (strncmp (nbuf, newname, nlen))
1288 insert_reg_alias (nbuf, old->number, old->type);
1289 }
1290
1291 return TRUE;
1292 }
1293
1294 /* Should never be called, as .req goes between the alias and the
1295 register name, not at the beginning of the line. */
1296 static void
1297 s_req (int a ATTRIBUTE_UNUSED)
1298 {
1299 as_bad (_("invalid syntax for .req directive"));
1300 }
1301
1302 /* The .unreq directive deletes an alias which was previously defined
1303 by .req. For example:
1304
1305 my_alias .req r11
1306 .unreq my_alias */
1307
1308 static void
1309 s_unreq (int a ATTRIBUTE_UNUSED)
1310 {
1311 char *name;
1312 char saved_char;
1313
1314 name = input_line_pointer;
1315
1316 while (*input_line_pointer != 0
1317 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1318 ++input_line_pointer;
1319
1320 saved_char = *input_line_pointer;
1321 *input_line_pointer = 0;
1322
1323 if (!*name)
1324 as_bad (_("invalid syntax for .unreq directive"));
1325 else
1326 {
1327 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1328
1329 if (!reg)
1330 as_bad (_("unknown register alias '%s'"), name);
1331 else if (reg->builtin)
1332 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1333 name);
1334 else
1335 {
1336 char *p;
1337 char *nbuf;
1338
1339 hash_delete (aarch64_reg_hsh, name, FALSE);
1340 free ((char *) reg->name);
1341 free (reg);
1342
1343 /* Also locate the all upper case and all lower case versions.
1344 Do not complain if we cannot find one or the other as it
1345 was probably deleted above. */
1346
1347 nbuf = strdup (name);
1348 for (p = nbuf; *p; p++)
1349 *p = TOUPPER (*p);
1350 reg = hash_find (aarch64_reg_hsh, nbuf);
1351 if (reg)
1352 {
1353 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1354 free ((char *) reg->name);
1355 free (reg);
1356 }
1357
1358 for (p = nbuf; *p; p++)
1359 *p = TOLOWER (*p);
1360 reg = hash_find (aarch64_reg_hsh, nbuf);
1361 if (reg)
1362 {
1363 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1364 free ((char *) reg->name);
1365 free (reg);
1366 }
1367
1368 free (nbuf);
1369 }
1370 }
1371
1372 *input_line_pointer = saved_char;
1373 demand_empty_rest_of_line ();
1374 }
1375
1376 /* Directives: Instruction set selection. */
1377
1378 #ifdef OBJ_ELF
1379 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1380 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1381 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1382 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1383
1384 /* Create a new mapping symbol for the transition to STATE. */
1385
1386 static void
1387 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1388 {
1389 symbolS *symbolP;
1390 const char *symname;
1391 int type;
1392
1393 switch (state)
1394 {
1395 case MAP_DATA:
1396 symname = "$d";
1397 type = BSF_NO_FLAGS;
1398 break;
1399 case MAP_INSN:
1400 symname = "$x";
1401 type = BSF_NO_FLAGS;
1402 break;
1403 default:
1404 abort ();
1405 }
1406
1407 symbolP = symbol_new (symname, now_seg, value, frag);
1408 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1409
1410 /* Save the mapping symbols for future reference. Also check that
1411 we do not place two mapping symbols at the same offset within a
1412 frag. We'll handle overlap between frags in
1413 check_mapping_symbols.
1414
1415 If .fill or other data filling directive generates zero sized data,
1416 the mapping symbol for the following code will have the same value
1417 as the one generated for the data filling directive. In this case,
1418 we replace the old symbol with the new one at the same address. */
1419 if (value == 0)
1420 {
1421 if (frag->tc_frag_data.first_map != NULL)
1422 {
1423 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1424 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1425 &symbol_lastP);
1426 }
1427 frag->tc_frag_data.first_map = symbolP;
1428 }
1429 if (frag->tc_frag_data.last_map != NULL)
1430 {
1431 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1432 S_GET_VALUE (symbolP));
1433 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1434 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1435 &symbol_lastP);
1436 }
1437 frag->tc_frag_data.last_map = symbolP;
1438 }
1439
1440 /* We must sometimes convert a region marked as code to data during
1441 code alignment, if an odd number of bytes have to be padded. The
1442 code mapping symbol is pushed to an aligned address. */
1443
1444 static void
1445 insert_data_mapping_symbol (enum mstate state,
1446 valueT value, fragS * frag, offsetT bytes)
1447 {
1448 /* If there was already a mapping symbol, remove it. */
1449 if (frag->tc_frag_data.last_map != NULL
1450 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1451 frag->fr_address + value)
1452 {
1453 symbolS *symp = frag->tc_frag_data.last_map;
1454
1455 if (value == 0)
1456 {
1457 know (frag->tc_frag_data.first_map == symp);
1458 frag->tc_frag_data.first_map = NULL;
1459 }
1460 frag->tc_frag_data.last_map = NULL;
1461 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1462 }
1463
1464 make_mapping_symbol (MAP_DATA, value, frag);
1465 make_mapping_symbol (state, value + bytes, frag);
1466 }
1467
1468 static void mapping_state_2 (enum mstate state, int max_chars);
1469
1470 /* Set the mapping state to STATE. Only call this when about to
1471 emit some STATE bytes to the file. */
1472
1473 void
1474 mapping_state (enum mstate state)
1475 {
1476 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1477
1478 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1479
1480 if (mapstate == state)
1481 /* The mapping symbol has already been emitted.
1482 There is nothing else to do. */
1483 return;
1484 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
1485 /* This case will be evaluated later in the next else. */
1486 return;
1487 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1488 {
1489 /* Only add the symbol if the offset is > 0:
1490 if we're at the first frag, check it's size > 0;
1491 if we're not at the first frag, then for sure
1492 the offset is > 0. */
1493 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1494 const int add_symbol = (frag_now != frag_first)
1495 || (frag_now_fix () > 0);
1496
1497 if (add_symbol)
1498 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1499 }
1500
1501 mapping_state_2 (state, 0);
1502 #undef TRANSITION
1503 }
1504
1505 /* Same as mapping_state, but MAX_CHARS bytes have already been
1506 allocated. Put the mapping symbol that far back. */
1507
1508 static void
1509 mapping_state_2 (enum mstate state, int max_chars)
1510 {
1511 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1512
1513 if (!SEG_NORMAL (now_seg))
1514 return;
1515
1516 if (mapstate == state)
1517 /* The mapping symbol has already been emitted.
1518 There is nothing else to do. */
1519 return;
1520
1521 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1522 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1523 }
1524 #else
1525 #define mapping_state(x) /* nothing */
1526 #define mapping_state_2(x, y) /* nothing */
1527 #endif
1528
1529 /* Directives: sectioning and alignment. */
1530
1531 static void
1532 s_bss (int ignore ATTRIBUTE_UNUSED)
1533 {
1534 /* We don't support putting frags in the BSS segment, we fake it by
1535 marking in_bss, then looking at s_skip for clues. */
1536 subseg_set (bss_section, 0);
1537 demand_empty_rest_of_line ();
1538 mapping_state (MAP_DATA);
1539 }
1540
1541 static void
1542 s_even (int ignore ATTRIBUTE_UNUSED)
1543 {
1544 /* Never make frag if expect extra pass. */
1545 if (!need_pass_2)
1546 frag_align (1, 0, 0);
1547
1548 record_alignment (now_seg, 1);
1549
1550 demand_empty_rest_of_line ();
1551 }
1552
1553 /* Directives: Literal pools. */
1554
1555 static literal_pool *
1556 find_literal_pool (int size)
1557 {
1558 literal_pool *pool;
1559
1560 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1561 {
1562 if (pool->section == now_seg
1563 && pool->sub_section == now_subseg && pool->size == size)
1564 break;
1565 }
1566
1567 return pool;
1568 }
1569
1570 static literal_pool *
1571 find_or_make_literal_pool (int size)
1572 {
1573 /* Next literal pool ID number. */
1574 static unsigned int latest_pool_num = 1;
1575 literal_pool *pool;
1576
1577 pool = find_literal_pool (size);
1578
1579 if (pool == NULL)
1580 {
1581 /* Create a new pool. */
1582 pool = xmalloc (sizeof (*pool));
1583 if (!pool)
1584 return NULL;
1585
1586 /* Currently we always put the literal pool in the current text
1587 section. If we were generating "small" model code where we
1588 knew that all code and initialised data was within 1MB then
1589 we could output literals to mergeable, read-only data
1590 sections. */
1591
1592 pool->next_free_entry = 0;
1593 pool->section = now_seg;
1594 pool->sub_section = now_subseg;
1595 pool->size = size;
1596 pool->next = list_of_pools;
1597 pool->symbol = NULL;
1598
1599 /* Add it to the list. */
1600 list_of_pools = pool;
1601 }
1602
1603 /* New pools, and emptied pools, will have a NULL symbol. */
1604 if (pool->symbol == NULL)
1605 {
1606 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1607 (valueT) 0, &zero_address_frag);
1608 pool->id = latest_pool_num++;
1609 }
1610
1611 /* Done. */
1612 return pool;
1613 }
1614
1615 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1616 Return TRUE on success, otherwise return FALSE. */
1617 static bfd_boolean
1618 add_to_lit_pool (expressionS *exp, int size)
1619 {
1620 literal_pool *pool;
1621 unsigned int entry;
1622
1623 pool = find_or_make_literal_pool (size);
1624
1625 /* Check if this literal value is already in the pool. */
1626 for (entry = 0; entry < pool->next_free_entry; entry++)
1627 {
1628 expressionS * litexp = & pool->literals[entry].exp;
1629
1630 if ((litexp->X_op == exp->X_op)
1631 && (exp->X_op == O_constant)
1632 && (litexp->X_add_number == exp->X_add_number)
1633 && (litexp->X_unsigned == exp->X_unsigned))
1634 break;
1635
1636 if ((litexp->X_op == exp->X_op)
1637 && (exp->X_op == O_symbol)
1638 && (litexp->X_add_number == exp->X_add_number)
1639 && (litexp->X_add_symbol == exp->X_add_symbol)
1640 && (litexp->X_op_symbol == exp->X_op_symbol))
1641 break;
1642 }
1643
1644 /* Do we need to create a new entry? */
1645 if (entry == pool->next_free_entry)
1646 {
1647 if (entry >= MAX_LITERAL_POOL_SIZE)
1648 {
1649 set_syntax_error (_("literal pool overflow"));
1650 return FALSE;
1651 }
1652
1653 pool->literals[entry].exp = *exp;
1654 pool->next_free_entry += 1;
1655 if (exp->X_op == O_big)
1656 {
1657 /* PR 16688: Bignums are held in a single global array. We must
1658 copy and preserve that value now, before it is overwritten. */
1659 pool->literals[entry].bignum = xmalloc (CHARS_PER_LITTLENUM * exp->X_add_number);
1660 memcpy (pool->literals[entry].bignum, generic_bignum,
1661 CHARS_PER_LITTLENUM * exp->X_add_number);
1662 }
1663 else
1664 pool->literals[entry].bignum = NULL;
1665 }
1666
1667 exp->X_op = O_symbol;
1668 exp->X_add_number = ((int) entry) * size;
1669 exp->X_add_symbol = pool->symbol;
1670
1671 return TRUE;
1672 }
1673
1674 /* Can't use symbol_new here, so have to create a symbol and then at
1675 a later date assign it a value. Thats what these functions do. */
1676
1677 static void
1678 symbol_locate (symbolS * symbolP,
1679 const char *name,/* It is copied, the caller can modify. */
1680 segT segment, /* Segment identifier (SEG_<something>). */
1681 valueT valu, /* Symbol value. */
1682 fragS * frag) /* Associated fragment. */
1683 {
1684 size_t name_length;
1685 char *preserved_copy_of_name;
1686
1687 name_length = strlen (name) + 1; /* +1 for \0. */
1688 obstack_grow (&notes, name, name_length);
1689 preserved_copy_of_name = obstack_finish (&notes);
1690
1691 #ifdef tc_canonicalize_symbol_name
1692 preserved_copy_of_name =
1693 tc_canonicalize_symbol_name (preserved_copy_of_name);
1694 #endif
1695
1696 S_SET_NAME (symbolP, preserved_copy_of_name);
1697
1698 S_SET_SEGMENT (symbolP, segment);
1699 S_SET_VALUE (symbolP, valu);
1700 symbol_clear_list_pointers (symbolP);
1701
1702 symbol_set_frag (symbolP, frag);
1703
1704 /* Link to end of symbol chain. */
1705 {
1706 extern int symbol_table_frozen;
1707
1708 if (symbol_table_frozen)
1709 abort ();
1710 }
1711
1712 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1713
1714 obj_symbol_new_hook (symbolP);
1715
1716 #ifdef tc_symbol_new_hook
1717 tc_symbol_new_hook (symbolP);
1718 #endif
1719
1720 #ifdef DEBUG_SYMS
1721 verify_symbol_chain (symbol_rootP, symbol_lastP);
1722 #endif /* DEBUG_SYMS */
1723 }
1724
1725
1726 static void
1727 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1728 {
1729 unsigned int entry;
1730 literal_pool *pool;
1731 char sym_name[20];
1732 int align;
1733
1734 for (align = 2; align <= 4; align++)
1735 {
1736 int size = 1 << align;
1737
1738 pool = find_literal_pool (size);
1739 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1740 continue;
1741
1742 mapping_state (MAP_DATA);
1743
1744 /* Align pool as you have word accesses.
1745 Only make a frag if we have to. */
1746 if (!need_pass_2)
1747 frag_align (align, 0, 0);
1748
1749 record_alignment (now_seg, align);
1750
1751 sprintf (sym_name, "$$lit_\002%x", pool->id);
1752
1753 symbol_locate (pool->symbol, sym_name, now_seg,
1754 (valueT) frag_now_fix (), frag_now);
1755 symbol_table_insert (pool->symbol);
1756
1757 for (entry = 0; entry < pool->next_free_entry; entry++)
1758 {
1759 expressionS * exp = & pool->literals[entry].exp;
1760
1761 if (exp->X_op == O_big)
1762 {
1763 /* PR 16688: Restore the global bignum value. */
1764 gas_assert (pool->literals[entry].bignum != NULL);
1765 memcpy (generic_bignum, pool->literals[entry].bignum,
1766 CHARS_PER_LITTLENUM * exp->X_add_number);
1767 }
1768
1769 /* First output the expression in the instruction to the pool. */
1770 emit_expr (exp, size); /* .word|.xword */
1771
1772 if (exp->X_op == O_big)
1773 {
1774 free (pool->literals[entry].bignum);
1775 pool->literals[entry].bignum = NULL;
1776 }
1777 }
1778
1779 /* Mark the pool as empty. */
1780 pool->next_free_entry = 0;
1781 pool->symbol = NULL;
1782 }
1783 }
1784
1785 #ifdef OBJ_ELF
1786 /* Forward declarations for functions below, in the MD interface
1787 section. */
1788 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1789 static struct reloc_table_entry * find_reloc_table_entry (char **);
1790
1791 /* Directives: Data. */
1792 /* N.B. the support for relocation suffix in this directive needs to be
1793 implemented properly. */
1794
1795 static void
1796 s_aarch64_elf_cons (int nbytes)
1797 {
1798 expressionS exp;
1799
1800 #ifdef md_flush_pending_output
1801 md_flush_pending_output ();
1802 #endif
1803
1804 if (is_it_end_of_statement ())
1805 {
1806 demand_empty_rest_of_line ();
1807 return;
1808 }
1809
1810 #ifdef md_cons_align
1811 md_cons_align (nbytes);
1812 #endif
1813
1814 mapping_state (MAP_DATA);
1815 do
1816 {
1817 struct reloc_table_entry *reloc;
1818
1819 expression (&exp);
1820
1821 if (exp.X_op != O_symbol)
1822 emit_expr (&exp, (unsigned int) nbytes);
1823 else
1824 {
1825 skip_past_char (&input_line_pointer, '#');
1826 if (skip_past_char (&input_line_pointer, ':'))
1827 {
1828 reloc = find_reloc_table_entry (&input_line_pointer);
1829 if (reloc == NULL)
1830 as_bad (_("unrecognized relocation suffix"));
1831 else
1832 as_bad (_("unimplemented relocation suffix"));
1833 ignore_rest_of_line ();
1834 return;
1835 }
1836 else
1837 emit_expr (&exp, (unsigned int) nbytes);
1838 }
1839 }
1840 while (*input_line_pointer++ == ',');
1841
1842 /* Put terminator back into stream. */
1843 input_line_pointer--;
1844 demand_empty_rest_of_line ();
1845 }
1846
1847 #endif /* OBJ_ELF */
1848
1849 /* Output a 32-bit word, but mark as an instruction. */
1850
1851 static void
1852 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1853 {
1854 expressionS exp;
1855
1856 #ifdef md_flush_pending_output
1857 md_flush_pending_output ();
1858 #endif
1859
1860 if (is_it_end_of_statement ())
1861 {
1862 demand_empty_rest_of_line ();
1863 return;
1864 }
1865
1866 if (!need_pass_2)
1867 frag_align_code (2, 0);
1868 #ifdef OBJ_ELF
1869 mapping_state (MAP_INSN);
1870 #endif
1871
1872 do
1873 {
1874 expression (&exp);
1875 if (exp.X_op != O_constant)
1876 {
1877 as_bad (_("constant expression required"));
1878 ignore_rest_of_line ();
1879 return;
1880 }
1881
1882 if (target_big_endian)
1883 {
1884 unsigned int val = exp.X_add_number;
1885 exp.X_add_number = SWAP_32 (val);
1886 }
1887 emit_expr (&exp, 4);
1888 }
1889 while (*input_line_pointer++ == ',');
1890
1891 /* Put terminator back into stream. */
1892 input_line_pointer--;
1893 demand_empty_rest_of_line ();
1894 }
1895
1896 #ifdef OBJ_ELF
1897 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1898
1899 static void
1900 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1901 {
1902 expressionS exp;
1903
1904 /* Since we're just labelling the code, there's no need to define a
1905 mapping symbol. */
1906 expression (&exp);
1907 /* Make sure there is enough room in this frag for the following
1908 blr. This trick only works if the blr follows immediately after
1909 the .tlsdesc directive. */
1910 frag_grow (4);
1911 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1912 BFD_RELOC_AARCH64_TLSDESC_CALL);
1913
1914 demand_empty_rest_of_line ();
1915 }
1916 #endif /* OBJ_ELF */
1917
1918 static void s_aarch64_arch (int);
1919 static void s_aarch64_cpu (int);
1920
1921 /* This table describes all the machine specific pseudo-ops the assembler
1922 has to support. The fields are:
1923 pseudo-op name without dot
1924 function to call to execute this pseudo-op
1925 Integer arg to pass to the function. */
1926
1927 const pseudo_typeS md_pseudo_table[] = {
1928 /* Never called because '.req' does not start a line. */
1929 {"req", s_req, 0},
1930 {"unreq", s_unreq, 0},
1931 {"bss", s_bss, 0},
1932 {"even", s_even, 0},
1933 {"ltorg", s_ltorg, 0},
1934 {"pool", s_ltorg, 0},
1935 {"cpu", s_aarch64_cpu, 0},
1936 {"arch", s_aarch64_arch, 0},
1937 {"inst", s_aarch64_inst, 0},
1938 #ifdef OBJ_ELF
1939 {"tlsdesccall", s_tlsdesccall, 0},
1940 {"word", s_aarch64_elf_cons, 4},
1941 {"long", s_aarch64_elf_cons, 4},
1942 {"xword", s_aarch64_elf_cons, 8},
1943 {"dword", s_aarch64_elf_cons, 8},
1944 #endif
1945 {0, 0, 0}
1946 };
1947 \f
1948
1949 /* Check whether STR points to a register name followed by a comma or the
1950 end of line; REG_TYPE indicates which register types are checked
1951 against. Return TRUE if STR is such a register name; otherwise return
1952 FALSE. The function does not intend to produce any diagnostics, but since
1953 the register parser aarch64_reg_parse, which is called by this function,
1954 does produce diagnostics, we call clear_error to clear any diagnostics
1955 that may be generated by aarch64_reg_parse.
1956 Also, the function returns FALSE directly if there is any user error
1957 present at the function entry. This prevents the existing diagnostics
1958 state from being spoiled.
1959 The function currently serves parse_constant_immediate and
1960 parse_big_immediate only. */
1961 static bfd_boolean
1962 reg_name_p (char *str, aarch64_reg_type reg_type)
1963 {
1964 int reg;
1965
1966 /* Prevent the diagnostics state from being spoiled. */
1967 if (error_p ())
1968 return FALSE;
1969
1970 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1971
1972 /* Clear the parsing error that may be set by the reg parser. */
1973 clear_error ();
1974
1975 if (reg == PARSE_FAIL)
1976 return FALSE;
1977
1978 skip_whitespace (str);
1979 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1980 return TRUE;
1981
1982 return FALSE;
1983 }
1984
1985 /* Parser functions used exclusively in instruction operands. */
1986
1987 /* Parse an immediate expression which may not be constant.
1988
1989 To prevent the expression parser from pushing a register name
1990 into the symbol table as an undefined symbol, firstly a check is
1991 done to find out whether STR is a valid register name followed
1992 by a comma or the end of line. Return FALSE if STR is such a
1993 string. */
1994
1995 static bfd_boolean
1996 parse_immediate_expression (char **str, expressionS *exp)
1997 {
1998 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1999 {
2000 set_recoverable_error (_("immediate operand required"));
2001 return FALSE;
2002 }
2003
2004 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2005
2006 if (exp->X_op == O_absent)
2007 {
2008 set_fatal_syntax_error (_("missing immediate expression"));
2009 return FALSE;
2010 }
2011
2012 return TRUE;
2013 }
2014
2015 /* Constant immediate-value read function for use in insn parsing.
2016 STR points to the beginning of the immediate (with the optional
2017 leading #); *VAL receives the value.
2018
2019 Return TRUE on success; otherwise return FALSE. */
2020
2021 static bfd_boolean
2022 parse_constant_immediate (char **str, int64_t * val)
2023 {
2024 expressionS exp;
2025
2026 if (! parse_immediate_expression (str, &exp))
2027 return FALSE;
2028
2029 if (exp.X_op != O_constant)
2030 {
2031 set_syntax_error (_("constant expression required"));
2032 return FALSE;
2033 }
2034
2035 *val = exp.X_add_number;
2036 return TRUE;
2037 }
2038
2039 static uint32_t
2040 encode_imm_float_bits (uint32_t imm)
2041 {
2042 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2043 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2044 }
2045
2046 /* Return TRUE if the single-precision floating-point value encoded in IMM
2047 can be expressed in the AArch64 8-bit signed floating-point format with
2048 3-bit exponent and normalized 4 bits of precision; in other words, the
2049 floating-point value must be expressable as
2050 (+/-) n / 16 * power (2, r)
2051 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2052
2053 static bfd_boolean
2054 aarch64_imm_float_p (uint32_t imm)
2055 {
2056 /* If a single-precision floating-point value has the following bit
2057 pattern, it can be expressed in the AArch64 8-bit floating-point
2058 format:
2059
2060 3 32222222 2221111111111
2061 1 09876543 21098765432109876543210
2062 n Eeeeeexx xxxx0000000000000000000
2063
2064 where n, e and each x are either 0 or 1 independently, with
2065 E == ~ e. */
2066
2067 uint32_t pattern;
2068
2069 /* Prepare the pattern for 'Eeeeee'. */
2070 if (((imm >> 30) & 0x1) == 0)
2071 pattern = 0x3e000000;
2072 else
2073 pattern = 0x40000000;
2074
2075 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2076 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2077 }
2078
2079 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2080
2081 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2082 8-bit signed floating-point format with 3-bit exponent and normalized 4
2083 bits of precision (i.e. can be used in an FMOV instruction); return the
2084 equivalent single-precision encoding in *FPWORD.
2085
2086 Otherwise return FALSE. */
2087
2088 static bfd_boolean
2089 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2090 {
2091 /* If a double-precision floating-point value has the following bit
2092 pattern, it can be expressed in the AArch64 8-bit floating-point
2093 format:
2094
2095 6 66655555555 554444444...21111111111
2096 3 21098765432 109876543...098765432109876543210
2097 n Eeeeeeeeexx xxxx00000...000000000000000000000
2098
2099 where n, e and each x are either 0 or 1 independently, with
2100 E == ~ e. */
2101
2102 uint32_t pattern;
2103 uint32_t high32 = imm >> 32;
2104
2105 /* Lower 32 bits need to be 0s. */
2106 if ((imm & 0xffffffff) != 0)
2107 return FALSE;
2108
2109 /* Prepare the pattern for 'Eeeeeeeee'. */
2110 if (((high32 >> 30) & 0x1) == 0)
2111 pattern = 0x3fc00000;
2112 else
2113 pattern = 0x40000000;
2114
2115 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2116 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2117 {
2118 /* Convert to the single-precision encoding.
2119 i.e. convert
2120 n Eeeeeeeeexx xxxx00000...000000000000000000000
2121 to
2122 n Eeeeeexx xxxx0000000000000000000. */
2123 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2124 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2125 return TRUE;
2126 }
2127 else
2128 return FALSE;
2129 }
2130
2131 /* Parse a floating-point immediate. Return TRUE on success and return the
2132 value in *IMMED in the format of IEEE754 single-precision encoding.
2133 *CCP points to the start of the string; DP_P is TRUE when the immediate
2134 is expected to be in double-precision (N.B. this only matters when
2135 hexadecimal representation is involved).
2136
2137 N.B. 0.0 is accepted by this function. */
2138
2139 static bfd_boolean
2140 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2141 {
2142 char *str = *ccp;
2143 char *fpnum;
2144 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2145 int found_fpchar = 0;
2146 int64_t val = 0;
2147 unsigned fpword = 0;
2148 bfd_boolean hex_p = FALSE;
2149
2150 skip_past_char (&str, '#');
2151
2152 fpnum = str;
2153 skip_whitespace (fpnum);
2154
2155 if (strncmp (fpnum, "0x", 2) == 0)
2156 {
2157 /* Support the hexadecimal representation of the IEEE754 encoding.
2158 Double-precision is expected when DP_P is TRUE, otherwise the
2159 representation should be in single-precision. */
2160 if (! parse_constant_immediate (&str, &val))
2161 goto invalid_fp;
2162
2163 if (dp_p)
2164 {
2165 if (! aarch64_double_precision_fmovable (val, &fpword))
2166 goto invalid_fp;
2167 }
2168 else if ((uint64_t) val > 0xffffffff)
2169 goto invalid_fp;
2170 else
2171 fpword = val;
2172
2173 hex_p = TRUE;
2174 }
2175 else
2176 {
2177 /* We must not accidentally parse an integer as a floating-point number.
2178 Make sure that the value we parse is not an integer by checking for
2179 special characters '.' or 'e'. */
2180 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2181 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2182 {
2183 found_fpchar = 1;
2184 break;
2185 }
2186
2187 if (!found_fpchar)
2188 return FALSE;
2189 }
2190
2191 if (! hex_p)
2192 {
2193 int i;
2194
2195 if ((str = atof_ieee (str, 's', words)) == NULL)
2196 goto invalid_fp;
2197
2198 /* Our FP word must be 32 bits (single-precision FP). */
2199 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2200 {
2201 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2202 fpword |= words[i];
2203 }
2204 }
2205
2206 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2207 {
2208 *immed = fpword;
2209 *ccp = str;
2210 return TRUE;
2211 }
2212
2213 invalid_fp:
2214 set_fatal_syntax_error (_("invalid floating-point constant"));
2215 return FALSE;
2216 }
2217
2218 /* Less-generic immediate-value read function with the possibility of loading
2219 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2220 instructions.
2221
2222 To prevent the expression parser from pushing a register name into the
2223 symbol table as an undefined symbol, a check is firstly done to find
2224 out whether STR is a valid register name followed by a comma or the end
2225 of line. Return FALSE if STR is such a register. */
2226
2227 static bfd_boolean
2228 parse_big_immediate (char **str, int64_t *imm)
2229 {
2230 char *ptr = *str;
2231
2232 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2233 {
2234 set_syntax_error (_("immediate operand required"));
2235 return FALSE;
2236 }
2237
2238 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2239
2240 if (inst.reloc.exp.X_op == O_constant)
2241 *imm = inst.reloc.exp.X_add_number;
2242
2243 *str = ptr;
2244
2245 return TRUE;
2246 }
2247
2248 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2249 if NEED_LIBOPCODES is non-zero, the fixup will need
2250 assistance from the libopcodes. */
2251
2252 static inline void
2253 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2254 const aarch64_opnd_info *operand,
2255 int need_libopcodes_p)
2256 {
2257 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2258 reloc->opnd = operand->type;
2259 if (need_libopcodes_p)
2260 reloc->need_libopcodes_p = 1;
2261 };
2262
2263 /* Return TRUE if the instruction needs to be fixed up later internally by
2264 the GAS; otherwise return FALSE. */
2265
2266 static inline bfd_boolean
2267 aarch64_gas_internal_fixup_p (void)
2268 {
2269 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2270 }
2271
2272 /* Assign the immediate value to the relavant field in *OPERAND if
2273 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2274 needs an internal fixup in a later stage.
2275 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2276 IMM.VALUE that may get assigned with the constant. */
2277 static inline void
2278 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2279 aarch64_opnd_info *operand,
2280 int addr_off_p,
2281 int need_libopcodes_p,
2282 int skip_p)
2283 {
2284 if (reloc->exp.X_op == O_constant)
2285 {
2286 if (addr_off_p)
2287 operand->addr.offset.imm = reloc->exp.X_add_number;
2288 else
2289 operand->imm.value = reloc->exp.X_add_number;
2290 reloc->type = BFD_RELOC_UNUSED;
2291 }
2292 else
2293 {
2294 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2295 /* Tell libopcodes to ignore this operand or not. This is helpful
2296 when one of the operands needs to be fixed up later but we need
2297 libopcodes to check the other operands. */
2298 operand->skip = skip_p;
2299 }
2300 }
2301
2302 /* Relocation modifiers. Each entry in the table contains the textual
2303 name for the relocation which may be placed before a symbol used as
2304 a load/store offset, or add immediate. It must be surrounded by a
2305 leading and trailing colon, for example:
2306
2307 ldr x0, [x1, #:rello:varsym]
2308 add x0, x1, #:rello:varsym */
2309
2310 struct reloc_table_entry
2311 {
2312 const char *name;
2313 int pc_rel;
2314 bfd_reloc_code_real_type adrp_type;
2315 bfd_reloc_code_real_type movw_type;
2316 bfd_reloc_code_real_type add_type;
2317 bfd_reloc_code_real_type ldst_type;
2318 };
2319
2320 static struct reloc_table_entry reloc_table[] = {
2321 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2322 {"lo12", 0,
2323 0,
2324 0,
2325 BFD_RELOC_AARCH64_ADD_LO12,
2326 BFD_RELOC_AARCH64_LDST_LO12},
2327
2328 /* Higher 21 bits of pc-relative page offset: ADRP */
2329 {"pg_hi21", 1,
2330 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2331 0,
2332 0,
2333 0},
2334
2335 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2336 {"pg_hi21_nc", 1,
2337 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2338 0,
2339 0,
2340 0},
2341
2342 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2343 {"abs_g0", 0,
2344 0,
2345 BFD_RELOC_AARCH64_MOVW_G0,
2346 0,
2347 0},
2348
2349 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2350 {"abs_g0_s", 0,
2351 0,
2352 BFD_RELOC_AARCH64_MOVW_G0_S,
2353 0,
2354 0},
2355
2356 /* Less significant bits 0-15 of address/value: MOVK, no check */
2357 {"abs_g0_nc", 0,
2358 0,
2359 BFD_RELOC_AARCH64_MOVW_G0_NC,
2360 0,
2361 0},
2362
2363 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2364 {"abs_g1", 0,
2365 0,
2366 BFD_RELOC_AARCH64_MOVW_G1,
2367 0,
2368 0},
2369
2370 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2371 {"abs_g1_s", 0,
2372 0,
2373 BFD_RELOC_AARCH64_MOVW_G1_S,
2374 0,
2375 0},
2376
2377 /* Less significant bits 16-31 of address/value: MOVK, no check */
2378 {"abs_g1_nc", 0,
2379 0,
2380 BFD_RELOC_AARCH64_MOVW_G1_NC,
2381 0,
2382 0},
2383
2384 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2385 {"abs_g2", 0,
2386 0,
2387 BFD_RELOC_AARCH64_MOVW_G2,
2388 0,
2389 0},
2390
2391 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2392 {"abs_g2_s", 0,
2393 0,
2394 BFD_RELOC_AARCH64_MOVW_G2_S,
2395 0,
2396 0},
2397
2398 /* Less significant bits 32-47 of address/value: MOVK, no check */
2399 {"abs_g2_nc", 0,
2400 0,
2401 BFD_RELOC_AARCH64_MOVW_G2_NC,
2402 0,
2403 0},
2404
2405 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2406 {"abs_g3", 0,
2407 0,
2408 BFD_RELOC_AARCH64_MOVW_G3,
2409 0,
2410 0},
2411
2412 /* Get to the page containing GOT entry for a symbol. */
2413 {"got", 1,
2414 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2415 0,
2416 0,
2417 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2418
2419 /* 12 bit offset into the page containing GOT entry for that symbol. */
2420 {"got_lo12", 0,
2421 0,
2422 0,
2423 0,
2424 BFD_RELOC_AARCH64_LD_GOT_LO12_NC},
2425
2426 /* Get to the page containing GOT TLS entry for a symbol */
2427 {"tlsgd", 0,
2428 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2429 0,
2430 0,
2431 0},
2432
2433 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2434 {"tlsgd_lo12", 0,
2435 0,
2436 0,
2437 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2438 0},
2439
2440 /* Get to the page containing GOT TLS entry for a symbol */
2441 {"tlsdesc", 0,
2442 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2443 0,
2444 0,
2445 0},
2446
2447 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2448 {"tlsdesc_lo12", 0,
2449 0,
2450 0,
2451 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2452 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC},
2453
2454 /* Get to the page containing GOT TLS entry for a symbol */
2455 {"gottprel", 0,
2456 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2457 0,
2458 0,
2459 0},
2460
2461 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2462 {"gottprel_lo12", 0,
2463 0,
2464 0,
2465 0,
2466 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC},
2467
2468 /* Get tp offset for a symbol. */
2469 {"tprel", 0,
2470 0,
2471 0,
2472 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2473 0},
2474
2475 /* Get tp offset for a symbol. */
2476 {"tprel_lo12", 0,
2477 0,
2478 0,
2479 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2480 0},
2481
2482 /* Get tp offset for a symbol. */
2483 {"tprel_hi12", 0,
2484 0,
2485 0,
2486 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2487 0},
2488
2489 /* Get tp offset for a symbol. */
2490 {"tprel_lo12_nc", 0,
2491 0,
2492 0,
2493 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2494 0},
2495
2496 /* Most significant bits 32-47 of address/value: MOVZ. */
2497 {"tprel_g2", 0,
2498 0,
2499 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2500 0,
2501 0},
2502
2503 /* Most significant bits 16-31 of address/value: MOVZ. */
2504 {"tprel_g1", 0,
2505 0,
2506 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2507 0,
2508 0},
2509
2510 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2511 {"tprel_g1_nc", 0,
2512 0,
2513 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2514 0,
2515 0},
2516
2517 /* Most significant bits 0-15 of address/value: MOVZ. */
2518 {"tprel_g0", 0,
2519 0,
2520 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2521 0,
2522 0},
2523
2524 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2525 {"tprel_g0_nc", 0,
2526 0,
2527 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2528 0,
2529 0},
2530 };
2531
2532 /* Given the address of a pointer pointing to the textual name of a
2533 relocation as may appear in assembler source, attempt to find its
2534 details in reloc_table. The pointer will be updated to the character
2535 after the trailing colon. On failure, NULL will be returned;
2536 otherwise return the reloc_table_entry. */
2537
2538 static struct reloc_table_entry *
2539 find_reloc_table_entry (char **str)
2540 {
2541 unsigned int i;
2542 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2543 {
2544 int length = strlen (reloc_table[i].name);
2545
2546 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2547 && (*str)[length] == ':')
2548 {
2549 *str += (length + 1);
2550 return &reloc_table[i];
2551 }
2552 }
2553
2554 return NULL;
2555 }
2556
2557 /* Mode argument to parse_shift and parser_shifter_operand. */
2558 enum parse_shift_mode
2559 {
2560 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2561 "#imm{,lsl #n}" */
2562 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2563 "#imm" */
2564 SHIFTED_LSL, /* bare "lsl #n" */
2565 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2566 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2567 };
2568
2569 /* Parse a <shift> operator on an AArch64 data processing instruction.
2570 Return TRUE on success; otherwise return FALSE. */
2571 static bfd_boolean
2572 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2573 {
2574 const struct aarch64_name_value_pair *shift_op;
2575 enum aarch64_modifier_kind kind;
2576 expressionS exp;
2577 int exp_has_prefix;
2578 char *s = *str;
2579 char *p = s;
2580
2581 for (p = *str; ISALPHA (*p); p++)
2582 ;
2583
2584 if (p == *str)
2585 {
2586 set_syntax_error (_("shift expression expected"));
2587 return FALSE;
2588 }
2589
2590 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2591
2592 if (shift_op == NULL)
2593 {
2594 set_syntax_error (_("shift operator expected"));
2595 return FALSE;
2596 }
2597
2598 kind = aarch64_get_operand_modifier (shift_op);
2599
2600 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2601 {
2602 set_syntax_error (_("invalid use of 'MSL'"));
2603 return FALSE;
2604 }
2605
2606 switch (mode)
2607 {
2608 case SHIFTED_LOGIC_IMM:
2609 if (aarch64_extend_operator_p (kind) == TRUE)
2610 {
2611 set_syntax_error (_("extending shift is not permitted"));
2612 return FALSE;
2613 }
2614 break;
2615
2616 case SHIFTED_ARITH_IMM:
2617 if (kind == AARCH64_MOD_ROR)
2618 {
2619 set_syntax_error (_("'ROR' shift is not permitted"));
2620 return FALSE;
2621 }
2622 break;
2623
2624 case SHIFTED_LSL:
2625 if (kind != AARCH64_MOD_LSL)
2626 {
2627 set_syntax_error (_("only 'LSL' shift is permitted"));
2628 return FALSE;
2629 }
2630 break;
2631
2632 case SHIFTED_REG_OFFSET:
2633 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2634 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2635 {
2636 set_fatal_syntax_error
2637 (_("invalid shift for the register offset addressing mode"));
2638 return FALSE;
2639 }
2640 break;
2641
2642 case SHIFTED_LSL_MSL:
2643 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2644 {
2645 set_syntax_error (_("invalid shift operator"));
2646 return FALSE;
2647 }
2648 break;
2649
2650 default:
2651 abort ();
2652 }
2653
2654 /* Whitespace can appear here if the next thing is a bare digit. */
2655 skip_whitespace (p);
2656
2657 /* Parse shift amount. */
2658 exp_has_prefix = 0;
2659 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2660 exp.X_op = O_absent;
2661 else
2662 {
2663 if (is_immediate_prefix (*p))
2664 {
2665 p++;
2666 exp_has_prefix = 1;
2667 }
2668 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2669 }
2670 if (exp.X_op == O_absent)
2671 {
2672 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2673 {
2674 set_syntax_error (_("missing shift amount"));
2675 return FALSE;
2676 }
2677 operand->shifter.amount = 0;
2678 }
2679 else if (exp.X_op != O_constant)
2680 {
2681 set_syntax_error (_("constant shift amount required"));
2682 return FALSE;
2683 }
2684 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2685 {
2686 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2687 return FALSE;
2688 }
2689 else
2690 {
2691 operand->shifter.amount = exp.X_add_number;
2692 operand->shifter.amount_present = 1;
2693 }
2694
2695 operand->shifter.operator_present = 1;
2696 operand->shifter.kind = kind;
2697
2698 *str = p;
2699 return TRUE;
2700 }
2701
2702 /* Parse a <shifter_operand> for a data processing instruction:
2703
2704 #<immediate>
2705 #<immediate>, LSL #imm
2706
2707 Validation of immediate operands is deferred to md_apply_fix.
2708
2709 Return TRUE on success; otherwise return FALSE. */
2710
2711 static bfd_boolean
2712 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2713 enum parse_shift_mode mode)
2714 {
2715 char *p;
2716
2717 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2718 return FALSE;
2719
2720 p = *str;
2721
2722 /* Accept an immediate expression. */
2723 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2724 return FALSE;
2725
2726 /* Accept optional LSL for arithmetic immediate values. */
2727 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2728 if (! parse_shift (&p, operand, SHIFTED_LSL))
2729 return FALSE;
2730
2731 /* Not accept any shifter for logical immediate values. */
2732 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2733 && parse_shift (&p, operand, mode))
2734 {
2735 set_syntax_error (_("unexpected shift operator"));
2736 return FALSE;
2737 }
2738
2739 *str = p;
2740 return TRUE;
2741 }
2742
2743 /* Parse a <shifter_operand> for a data processing instruction:
2744
2745 <Rm>
2746 <Rm>, <shift>
2747 #<immediate>
2748 #<immediate>, LSL #imm
2749
2750 where <shift> is handled by parse_shift above, and the last two
2751 cases are handled by the function above.
2752
2753 Validation of immediate operands is deferred to md_apply_fix.
2754
2755 Return TRUE on success; otherwise return FALSE. */
2756
2757 static bfd_boolean
2758 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2759 enum parse_shift_mode mode)
2760 {
2761 int reg;
2762 int isreg32, isregzero;
2763 enum aarch64_operand_class opd_class
2764 = aarch64_get_operand_class (operand->type);
2765
2766 if ((reg =
2767 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2768 {
2769 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2770 {
2771 set_syntax_error (_("unexpected register in the immediate operand"));
2772 return FALSE;
2773 }
2774
2775 if (!isregzero && reg == REG_SP)
2776 {
2777 set_syntax_error (BAD_SP);
2778 return FALSE;
2779 }
2780
2781 operand->reg.regno = reg;
2782 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2783
2784 /* Accept optional shift operation on register. */
2785 if (! skip_past_comma (str))
2786 return TRUE;
2787
2788 if (! parse_shift (str, operand, mode))
2789 return FALSE;
2790
2791 return TRUE;
2792 }
2793 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2794 {
2795 set_syntax_error
2796 (_("integer register expected in the extended/shifted operand "
2797 "register"));
2798 return FALSE;
2799 }
2800
2801 /* We have a shifted immediate variable. */
2802 return parse_shifter_operand_imm (str, operand, mode);
2803 }
2804
2805 /* Return TRUE on success; return FALSE otherwise. */
2806
2807 static bfd_boolean
2808 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2809 enum parse_shift_mode mode)
2810 {
2811 char *p = *str;
2812
2813 /* Determine if we have the sequence of characters #: or just :
2814 coming next. If we do, then we check for a :rello: relocation
2815 modifier. If we don't, punt the whole lot to
2816 parse_shifter_operand. */
2817
2818 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2819 {
2820 struct reloc_table_entry *entry;
2821
2822 if (p[0] == '#')
2823 p += 2;
2824 else
2825 p++;
2826 *str = p;
2827
2828 /* Try to parse a relocation. Anything else is an error. */
2829 if (!(entry = find_reloc_table_entry (str)))
2830 {
2831 set_syntax_error (_("unknown relocation modifier"));
2832 return FALSE;
2833 }
2834
2835 if (entry->add_type == 0)
2836 {
2837 set_syntax_error
2838 (_("this relocation modifier is not allowed on this instruction"));
2839 return FALSE;
2840 }
2841
2842 /* Save str before we decompose it. */
2843 p = *str;
2844
2845 /* Next, we parse the expression. */
2846 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2847 return FALSE;
2848
2849 /* Record the relocation type (use the ADD variant here). */
2850 inst.reloc.type = entry->add_type;
2851 inst.reloc.pc_rel = entry->pc_rel;
2852
2853 /* If str is empty, we've reached the end, stop here. */
2854 if (**str == '\0')
2855 return TRUE;
2856
2857 /* Otherwise, we have a shifted reloc modifier, so rewind to
2858 recover the variable name and continue parsing for the shifter. */
2859 *str = p;
2860 return parse_shifter_operand_imm (str, operand, mode);
2861 }
2862
2863 return parse_shifter_operand (str, operand, mode);
2864 }
2865
2866 /* Parse all forms of an address expression. Information is written
2867 to *OPERAND and/or inst.reloc.
2868
2869 The A64 instruction set has the following addressing modes:
2870
2871 Offset
2872 [base] // in SIMD ld/st structure
2873 [base{,#0}] // in ld/st exclusive
2874 [base{,#imm}]
2875 [base,Xm{,LSL #imm}]
2876 [base,Xm,SXTX {#imm}]
2877 [base,Wm,(S|U)XTW {#imm}]
2878 Pre-indexed
2879 [base,#imm]!
2880 Post-indexed
2881 [base],#imm
2882 [base],Xm // in SIMD ld/st structure
2883 PC-relative (literal)
2884 label
2885 =immediate
2886
2887 (As a convenience, the notation "=immediate" is permitted in conjunction
2888 with the pc-relative literal load instructions to automatically place an
2889 immediate value or symbolic address in a nearby literal pool and generate
2890 a hidden label which references it.)
2891
2892 Upon a successful parsing, the address structure in *OPERAND will be
2893 filled in the following way:
2894
2895 .base_regno = <base>
2896 .offset.is_reg // 1 if the offset is a register
2897 .offset.imm = <imm>
2898 .offset.regno = <Rm>
2899
2900 For different addressing modes defined in the A64 ISA:
2901
2902 Offset
2903 .pcrel=0; .preind=1; .postind=0; .writeback=0
2904 Pre-indexed
2905 .pcrel=0; .preind=1; .postind=0; .writeback=1
2906 Post-indexed
2907 .pcrel=0; .preind=0; .postind=1; .writeback=1
2908 PC-relative (literal)
2909 .pcrel=1; .preind=1; .postind=0; .writeback=0
2910
2911 The shift/extension information, if any, will be stored in .shifter.
2912
2913 It is the caller's responsibility to check for addressing modes not
2914 supported by the instruction, and to set inst.reloc.type. */
2915
2916 static bfd_boolean
2917 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2918 int accept_reg_post_index)
2919 {
2920 char *p = *str;
2921 int reg;
2922 int isreg32, isregzero;
2923 expressionS *exp = &inst.reloc.exp;
2924
2925 if (! skip_past_char (&p, '['))
2926 {
2927 /* =immediate or label. */
2928 operand->addr.pcrel = 1;
2929 operand->addr.preind = 1;
2930
2931 /* #:<reloc_op>:<symbol> */
2932 skip_past_char (&p, '#');
2933 if (reloc && skip_past_char (&p, ':'))
2934 {
2935 struct reloc_table_entry *entry;
2936
2937 /* Try to parse a relocation modifier. Anything else is
2938 an error. */
2939 entry = find_reloc_table_entry (&p);
2940 if (! entry)
2941 {
2942 set_syntax_error (_("unknown relocation modifier"));
2943 return FALSE;
2944 }
2945
2946 if (entry->ldst_type == 0)
2947 {
2948 set_syntax_error
2949 (_("this relocation modifier is not allowed on this "
2950 "instruction"));
2951 return FALSE;
2952 }
2953
2954 /* #:<reloc_op>: */
2955 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2956 {
2957 set_syntax_error (_("invalid relocation expression"));
2958 return FALSE;
2959 }
2960
2961 /* #:<reloc_op>:<expr> */
2962 /* Record the load/store relocation type. */
2963 inst.reloc.type = entry->ldst_type;
2964 inst.reloc.pc_rel = entry->pc_rel;
2965 }
2966 else
2967 {
2968
2969 if (skip_past_char (&p, '='))
2970 /* =immediate; need to generate the literal in the literal pool. */
2971 inst.gen_lit_pool = 1;
2972
2973 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2974 {
2975 set_syntax_error (_("invalid address"));
2976 return FALSE;
2977 }
2978 }
2979
2980 *str = p;
2981 return TRUE;
2982 }
2983
2984 /* [ */
2985
2986 /* Accept SP and reject ZR */
2987 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
2988 if (reg == PARSE_FAIL || isreg32)
2989 {
2990 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
2991 return FALSE;
2992 }
2993 operand->addr.base_regno = reg;
2994
2995 /* [Xn */
2996 if (skip_past_comma (&p))
2997 {
2998 /* [Xn, */
2999 operand->addr.preind = 1;
3000
3001 /* Reject SP and accept ZR */
3002 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
3003 if (reg != PARSE_FAIL)
3004 {
3005 /* [Xn,Rm */
3006 operand->addr.offset.regno = reg;
3007 operand->addr.offset.is_reg = 1;
3008 /* Shifted index. */
3009 if (skip_past_comma (&p))
3010 {
3011 /* [Xn,Rm, */
3012 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3013 /* Use the diagnostics set in parse_shift, so not set new
3014 error message here. */
3015 return FALSE;
3016 }
3017 /* We only accept:
3018 [base,Xm{,LSL #imm}]
3019 [base,Xm,SXTX {#imm}]
3020 [base,Wm,(S|U)XTW {#imm}] */
3021 if (operand->shifter.kind == AARCH64_MOD_NONE
3022 || operand->shifter.kind == AARCH64_MOD_LSL
3023 || operand->shifter.kind == AARCH64_MOD_SXTX)
3024 {
3025 if (isreg32)
3026 {
3027 set_syntax_error (_("invalid use of 32-bit register offset"));
3028 return FALSE;
3029 }
3030 }
3031 else if (!isreg32)
3032 {
3033 set_syntax_error (_("invalid use of 64-bit register offset"));
3034 return FALSE;
3035 }
3036 }
3037 else
3038 {
3039 /* [Xn,#:<reloc_op>:<symbol> */
3040 skip_past_char (&p, '#');
3041 if (reloc && skip_past_char (&p, ':'))
3042 {
3043 struct reloc_table_entry *entry;
3044
3045 /* Try to parse a relocation modifier. Anything else is
3046 an error. */
3047 if (!(entry = find_reloc_table_entry (&p)))
3048 {
3049 set_syntax_error (_("unknown relocation modifier"));
3050 return FALSE;
3051 }
3052
3053 if (entry->ldst_type == 0)
3054 {
3055 set_syntax_error
3056 (_("this relocation modifier is not allowed on this "
3057 "instruction"));
3058 return FALSE;
3059 }
3060
3061 /* [Xn,#:<reloc_op>: */
3062 /* We now have the group relocation table entry corresponding to
3063 the name in the assembler source. Next, we parse the
3064 expression. */
3065 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3066 {
3067 set_syntax_error (_("invalid relocation expression"));
3068 return FALSE;
3069 }
3070
3071 /* [Xn,#:<reloc_op>:<expr> */
3072 /* Record the load/store relocation type. */
3073 inst.reloc.type = entry->ldst_type;
3074 inst.reloc.pc_rel = entry->pc_rel;
3075 }
3076 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3077 {
3078 set_syntax_error (_("invalid expression in the address"));
3079 return FALSE;
3080 }
3081 /* [Xn,<expr> */
3082 }
3083 }
3084
3085 if (! skip_past_char (&p, ']'))
3086 {
3087 set_syntax_error (_("']' expected"));
3088 return FALSE;
3089 }
3090
3091 if (skip_past_char (&p, '!'))
3092 {
3093 if (operand->addr.preind && operand->addr.offset.is_reg)
3094 {
3095 set_syntax_error (_("register offset not allowed in pre-indexed "
3096 "addressing mode"));
3097 return FALSE;
3098 }
3099 /* [Xn]! */
3100 operand->addr.writeback = 1;
3101 }
3102 else if (skip_past_comma (&p))
3103 {
3104 /* [Xn], */
3105 operand->addr.postind = 1;
3106 operand->addr.writeback = 1;
3107
3108 if (operand->addr.preind)
3109 {
3110 set_syntax_error (_("cannot combine pre- and post-indexing"));
3111 return FALSE;
3112 }
3113
3114 if (accept_reg_post_index
3115 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3116 &isregzero)) != PARSE_FAIL)
3117 {
3118 /* [Xn],Xm */
3119 if (isreg32)
3120 {
3121 set_syntax_error (_("invalid 32-bit register offset"));
3122 return FALSE;
3123 }
3124 operand->addr.offset.regno = reg;
3125 operand->addr.offset.is_reg = 1;
3126 }
3127 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3128 {
3129 /* [Xn],#expr */
3130 set_syntax_error (_("invalid expression in the address"));
3131 return FALSE;
3132 }
3133 }
3134
3135 /* If at this point neither .preind nor .postind is set, we have a
3136 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3137 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3138 {
3139 if (operand->addr.writeback)
3140 {
3141 /* Reject [Rn]! */
3142 set_syntax_error (_("missing offset in the pre-indexed address"));
3143 return FALSE;
3144 }
3145 operand->addr.preind = 1;
3146 inst.reloc.exp.X_op = O_constant;
3147 inst.reloc.exp.X_add_number = 0;
3148 }
3149
3150 *str = p;
3151 return TRUE;
3152 }
3153
3154 /* Return TRUE on success; otherwise return FALSE. */
3155 static bfd_boolean
3156 parse_address (char **str, aarch64_opnd_info *operand,
3157 int accept_reg_post_index)
3158 {
3159 return parse_address_main (str, operand, 0, accept_reg_post_index);
3160 }
3161
3162 /* Return TRUE on success; otherwise return FALSE. */
3163 static bfd_boolean
3164 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3165 {
3166 return parse_address_main (str, operand, 1, 0);
3167 }
3168
3169 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3170 Return TRUE on success; otherwise return FALSE. */
3171 static bfd_boolean
3172 parse_half (char **str, int *internal_fixup_p)
3173 {
3174 char *p, *saved;
3175 int dummy;
3176
3177 p = *str;
3178 skip_past_char (&p, '#');
3179
3180 gas_assert (internal_fixup_p);
3181 *internal_fixup_p = 0;
3182
3183 if (*p == ':')
3184 {
3185 struct reloc_table_entry *entry;
3186
3187 /* Try to parse a relocation. Anything else is an error. */
3188 ++p;
3189 if (!(entry = find_reloc_table_entry (&p)))
3190 {
3191 set_syntax_error (_("unknown relocation modifier"));
3192 return FALSE;
3193 }
3194
3195 if (entry->movw_type == 0)
3196 {
3197 set_syntax_error
3198 (_("this relocation modifier is not allowed on this instruction"));
3199 return FALSE;
3200 }
3201
3202 inst.reloc.type = entry->movw_type;
3203 }
3204 else
3205 *internal_fixup_p = 1;
3206
3207 /* Avoid parsing a register as a general symbol. */
3208 saved = p;
3209 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3210 return FALSE;
3211 p = saved;
3212
3213 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3214 return FALSE;
3215
3216 *str = p;
3217 return TRUE;
3218 }
3219
3220 /* Parse an operand for an ADRP instruction:
3221 ADRP <Xd>, <label>
3222 Return TRUE on success; otherwise return FALSE. */
3223
3224 static bfd_boolean
3225 parse_adrp (char **str)
3226 {
3227 char *p;
3228
3229 p = *str;
3230 if (*p == ':')
3231 {
3232 struct reloc_table_entry *entry;
3233
3234 /* Try to parse a relocation. Anything else is an error. */
3235 ++p;
3236 if (!(entry = find_reloc_table_entry (&p)))
3237 {
3238 set_syntax_error (_("unknown relocation modifier"));
3239 return FALSE;
3240 }
3241
3242 if (entry->adrp_type == 0)
3243 {
3244 set_syntax_error
3245 (_("this relocation modifier is not allowed on this instruction"));
3246 return FALSE;
3247 }
3248
3249 inst.reloc.type = entry->adrp_type;
3250 }
3251 else
3252 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3253
3254 inst.reloc.pc_rel = 1;
3255
3256 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3257 return FALSE;
3258
3259 *str = p;
3260 return TRUE;
3261 }
3262
3263 /* Miscellaneous. */
3264
3265 /* Parse an option for a preload instruction. Returns the encoding for the
3266 option, or PARSE_FAIL. */
3267
3268 static int
3269 parse_pldop (char **str)
3270 {
3271 char *p, *q;
3272 const struct aarch64_name_value_pair *o;
3273
3274 p = q = *str;
3275 while (ISALNUM (*q))
3276 q++;
3277
3278 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3279 if (!o)
3280 return PARSE_FAIL;
3281
3282 *str = q;
3283 return o->value;
3284 }
3285
3286 /* Parse an option for a barrier instruction. Returns the encoding for the
3287 option, or PARSE_FAIL. */
3288
3289 static int
3290 parse_barrier (char **str)
3291 {
3292 char *p, *q;
3293 const asm_barrier_opt *o;
3294
3295 p = q = *str;
3296 while (ISALPHA (*q))
3297 q++;
3298
3299 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3300 if (!o)
3301 return PARSE_FAIL;
3302
3303 *str = q;
3304 return o->value;
3305 }
3306
3307 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3308 Returns the encoding for the option, or PARSE_FAIL.
3309
3310 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3311 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3312
3313 static int
3314 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3315 {
3316 char *p, *q;
3317 char buf[32];
3318 const aarch64_sys_reg *o;
3319 int value;
3320
3321 p = buf;
3322 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3323 if (p < buf + 31)
3324 *p++ = TOLOWER (*q);
3325 *p = '\0';
3326 /* Assert that BUF be large enough. */
3327 gas_assert (p - buf == q - *str);
3328
3329 o = hash_find (sys_regs, buf);
3330 if (!o)
3331 {
3332 if (!imple_defined_p)
3333 return PARSE_FAIL;
3334 else
3335 {
3336 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>, the implementation defined
3337 registers. */
3338 unsigned int op0, op1, cn, cm, op2;
3339 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2) != 5)
3340 return PARSE_FAIL;
3341 /* The architecture specifies the encoding space for implementation
3342 defined registers as:
3343 op0 op1 CRn CRm op2
3344 1x xxx 1x11 xxxx xxx
3345 For convenience GAS accepts a wider encoding space, as follows:
3346 op0 op1 CRn CRm op2
3347 1x xxx xxxx xxxx xxx */
3348 if ((op0 != 2 && op0 != 3) || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3349 return PARSE_FAIL;
3350 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3351 }
3352 }
3353 else
3354 {
3355 if (aarch64_sys_reg_deprecated_p (o))
3356 as_warn (_("system register name '%s' is deprecated and may be "
3357 "removed in a future release"), buf);
3358 value = o->value;
3359 }
3360
3361 *str = q;
3362 return value;
3363 }
3364
3365 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3366 for the option, or NULL. */
3367
3368 static const aarch64_sys_ins_reg *
3369 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3370 {
3371 char *p, *q;
3372 char buf[32];
3373 const aarch64_sys_ins_reg *o;
3374
3375 p = buf;
3376 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3377 if (p < buf + 31)
3378 *p++ = TOLOWER (*q);
3379 *p = '\0';
3380
3381 o = hash_find (sys_ins_regs, buf);
3382 if (!o)
3383 return NULL;
3384
3385 *str = q;
3386 return o;
3387 }
3388 \f
3389 #define po_char_or_fail(chr) do { \
3390 if (! skip_past_char (&str, chr)) \
3391 goto failure; \
3392 } while (0)
3393
3394 #define po_reg_or_fail(regtype) do { \
3395 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3396 if (val == PARSE_FAIL) \
3397 { \
3398 set_default_error (); \
3399 goto failure; \
3400 } \
3401 } while (0)
3402
3403 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3404 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3405 &isreg32, &isregzero); \
3406 if (val == PARSE_FAIL) \
3407 { \
3408 set_default_error (); \
3409 goto failure; \
3410 } \
3411 info->reg.regno = val; \
3412 if (isreg32) \
3413 info->qualifier = AARCH64_OPND_QLF_W; \
3414 else \
3415 info->qualifier = AARCH64_OPND_QLF_X; \
3416 } while (0)
3417
3418 #define po_imm_nc_or_fail() do { \
3419 if (! parse_constant_immediate (&str, &val)) \
3420 goto failure; \
3421 } while (0)
3422
3423 #define po_imm_or_fail(min, max) do { \
3424 if (! parse_constant_immediate (&str, &val)) \
3425 goto failure; \
3426 if (val < min || val > max) \
3427 { \
3428 set_fatal_syntax_error (_("immediate value out of range "\
3429 #min " to "#max)); \
3430 goto failure; \
3431 } \
3432 } while (0)
3433
3434 #define po_misc_or_fail(expr) do { \
3435 if (!expr) \
3436 goto failure; \
3437 } while (0)
3438 \f
3439 /* encode the 12-bit imm field of Add/sub immediate */
3440 static inline uint32_t
3441 encode_addsub_imm (uint32_t imm)
3442 {
3443 return imm << 10;
3444 }
3445
3446 /* encode the shift amount field of Add/sub immediate */
3447 static inline uint32_t
3448 encode_addsub_imm_shift_amount (uint32_t cnt)
3449 {
3450 return cnt << 22;
3451 }
3452
3453
3454 /* encode the imm field of Adr instruction */
3455 static inline uint32_t
3456 encode_adr_imm (uint32_t imm)
3457 {
3458 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3459 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3460 }
3461
3462 /* encode the immediate field of Move wide immediate */
3463 static inline uint32_t
3464 encode_movw_imm (uint32_t imm)
3465 {
3466 return imm << 5;
3467 }
3468
3469 /* encode the 26-bit offset of unconditional branch */
3470 static inline uint32_t
3471 encode_branch_ofs_26 (uint32_t ofs)
3472 {
3473 return ofs & ((1 << 26) - 1);
3474 }
3475
3476 /* encode the 19-bit offset of conditional branch and compare & branch */
3477 static inline uint32_t
3478 encode_cond_branch_ofs_19 (uint32_t ofs)
3479 {
3480 return (ofs & ((1 << 19) - 1)) << 5;
3481 }
3482
3483 /* encode the 19-bit offset of ld literal */
3484 static inline uint32_t
3485 encode_ld_lit_ofs_19 (uint32_t ofs)
3486 {
3487 return (ofs & ((1 << 19) - 1)) << 5;
3488 }
3489
3490 /* Encode the 14-bit offset of test & branch. */
3491 static inline uint32_t
3492 encode_tst_branch_ofs_14 (uint32_t ofs)
3493 {
3494 return (ofs & ((1 << 14) - 1)) << 5;
3495 }
3496
3497 /* Encode the 16-bit imm field of svc/hvc/smc. */
3498 static inline uint32_t
3499 encode_svc_imm (uint32_t imm)
3500 {
3501 return imm << 5;
3502 }
3503
3504 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3505 static inline uint32_t
3506 reencode_addsub_switch_add_sub (uint32_t opcode)
3507 {
3508 return opcode ^ (1 << 30);
3509 }
3510
3511 static inline uint32_t
3512 reencode_movzn_to_movz (uint32_t opcode)
3513 {
3514 return opcode | (1 << 30);
3515 }
3516
3517 static inline uint32_t
3518 reencode_movzn_to_movn (uint32_t opcode)
3519 {
3520 return opcode & ~(1 << 30);
3521 }
3522
3523 /* Overall per-instruction processing. */
3524
3525 /* We need to be able to fix up arbitrary expressions in some statements.
3526 This is so that we can handle symbols that are an arbitrary distance from
3527 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3528 which returns part of an address in a form which will be valid for
3529 a data instruction. We do this by pushing the expression into a symbol
3530 in the expr_section, and creating a fix for that. */
3531
3532 static fixS *
3533 fix_new_aarch64 (fragS * frag,
3534 int where,
3535 short int size, expressionS * exp, int pc_rel, int reloc)
3536 {
3537 fixS *new_fix;
3538
3539 switch (exp->X_op)
3540 {
3541 case O_constant:
3542 case O_symbol:
3543 case O_add:
3544 case O_subtract:
3545 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3546 break;
3547
3548 default:
3549 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3550 pc_rel, reloc);
3551 break;
3552 }
3553 return new_fix;
3554 }
3555 \f
3556 /* Diagnostics on operands errors. */
3557
3558 /* By default, output verbose error message.
3559 Disable the verbose error message by -mno-verbose-error. */
3560 static int verbose_error_p = 1;
3561
3562 #ifdef DEBUG_AARCH64
3563 /* N.B. this is only for the purpose of debugging. */
3564 const char* operand_mismatch_kind_names[] =
3565 {
3566 "AARCH64_OPDE_NIL",
3567 "AARCH64_OPDE_RECOVERABLE",
3568 "AARCH64_OPDE_SYNTAX_ERROR",
3569 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3570 "AARCH64_OPDE_INVALID_VARIANT",
3571 "AARCH64_OPDE_OUT_OF_RANGE",
3572 "AARCH64_OPDE_UNALIGNED",
3573 "AARCH64_OPDE_REG_LIST",
3574 "AARCH64_OPDE_OTHER_ERROR",
3575 };
3576 #endif /* DEBUG_AARCH64 */
3577
3578 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3579
3580 When multiple errors of different kinds are found in the same assembly
3581 line, only the error of the highest severity will be picked up for
3582 issuing the diagnostics. */
3583
3584 static inline bfd_boolean
3585 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3586 enum aarch64_operand_error_kind rhs)
3587 {
3588 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3589 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3590 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3591 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3592 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3593 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3594 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3595 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3596 return lhs > rhs;
3597 }
3598
3599 /* Helper routine to get the mnemonic name from the assembly instruction
3600 line; should only be called for the diagnosis purpose, as there is
3601 string copy operation involved, which may affect the runtime
3602 performance if used in elsewhere. */
3603
3604 static const char*
3605 get_mnemonic_name (const char *str)
3606 {
3607 static char mnemonic[32];
3608 char *ptr;
3609
3610 /* Get the first 15 bytes and assume that the full name is included. */
3611 strncpy (mnemonic, str, 31);
3612 mnemonic[31] = '\0';
3613
3614 /* Scan up to the end of the mnemonic, which must end in white space,
3615 '.', or end of string. */
3616 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3617 ;
3618
3619 *ptr = '\0';
3620
3621 /* Append '...' to the truncated long name. */
3622 if (ptr - mnemonic == 31)
3623 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3624
3625 return mnemonic;
3626 }
3627
3628 static void
3629 reset_aarch64_instruction (aarch64_instruction *instruction)
3630 {
3631 memset (instruction, '\0', sizeof (aarch64_instruction));
3632 instruction->reloc.type = BFD_RELOC_UNUSED;
3633 }
3634
3635 /* Data strutures storing one user error in the assembly code related to
3636 operands. */
3637
3638 struct operand_error_record
3639 {
3640 const aarch64_opcode *opcode;
3641 aarch64_operand_error detail;
3642 struct operand_error_record *next;
3643 };
3644
3645 typedef struct operand_error_record operand_error_record;
3646
3647 struct operand_errors
3648 {
3649 operand_error_record *head;
3650 operand_error_record *tail;
3651 };
3652
3653 typedef struct operand_errors operand_errors;
3654
3655 /* Top-level data structure reporting user errors for the current line of
3656 the assembly code.
3657 The way md_assemble works is that all opcodes sharing the same mnemonic
3658 name are iterated to find a match to the assembly line. In this data
3659 structure, each of the such opcodes will have one operand_error_record
3660 allocated and inserted. In other words, excessive errors related with
3661 a single opcode are disregarded. */
3662 operand_errors operand_error_report;
3663
3664 /* Free record nodes. */
3665 static operand_error_record *free_opnd_error_record_nodes = NULL;
3666
3667 /* Initialize the data structure that stores the operand mismatch
3668 information on assembling one line of the assembly code. */
3669 static void
3670 init_operand_error_report (void)
3671 {
3672 if (operand_error_report.head != NULL)
3673 {
3674 gas_assert (operand_error_report.tail != NULL);
3675 operand_error_report.tail->next = free_opnd_error_record_nodes;
3676 free_opnd_error_record_nodes = operand_error_report.head;
3677 operand_error_report.head = NULL;
3678 operand_error_report.tail = NULL;
3679 return;
3680 }
3681 gas_assert (operand_error_report.tail == NULL);
3682 }
3683
3684 /* Return TRUE if some operand error has been recorded during the
3685 parsing of the current assembly line using the opcode *OPCODE;
3686 otherwise return FALSE. */
3687 static inline bfd_boolean
3688 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3689 {
3690 operand_error_record *record = operand_error_report.head;
3691 return record && record->opcode == opcode;
3692 }
3693
3694 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3695 OPCODE field is initialized with OPCODE.
3696 N.B. only one record for each opcode, i.e. the maximum of one error is
3697 recorded for each instruction template. */
3698
3699 static void
3700 add_operand_error_record (const operand_error_record* new_record)
3701 {
3702 const aarch64_opcode *opcode = new_record->opcode;
3703 operand_error_record* record = operand_error_report.head;
3704
3705 /* The record may have been created for this opcode. If not, we need
3706 to prepare one. */
3707 if (! opcode_has_operand_error_p (opcode))
3708 {
3709 /* Get one empty record. */
3710 if (free_opnd_error_record_nodes == NULL)
3711 {
3712 record = xmalloc (sizeof (operand_error_record));
3713 if (record == NULL)
3714 abort ();
3715 }
3716 else
3717 {
3718 record = free_opnd_error_record_nodes;
3719 free_opnd_error_record_nodes = record->next;
3720 }
3721 record->opcode = opcode;
3722 /* Insert at the head. */
3723 record->next = operand_error_report.head;
3724 operand_error_report.head = record;
3725 if (operand_error_report.tail == NULL)
3726 operand_error_report.tail = record;
3727 }
3728 else if (record->detail.kind != AARCH64_OPDE_NIL
3729 && record->detail.index <= new_record->detail.index
3730 && operand_error_higher_severity_p (record->detail.kind,
3731 new_record->detail.kind))
3732 {
3733 /* In the case of multiple errors found on operands related with a
3734 single opcode, only record the error of the leftmost operand and
3735 only if the error is of higher severity. */
3736 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3737 " the existing error %s on operand %d",
3738 operand_mismatch_kind_names[new_record->detail.kind],
3739 new_record->detail.index,
3740 operand_mismatch_kind_names[record->detail.kind],
3741 record->detail.index);
3742 return;
3743 }
3744
3745 record->detail = new_record->detail;
3746 }
3747
3748 static inline void
3749 record_operand_error_info (const aarch64_opcode *opcode,
3750 aarch64_operand_error *error_info)
3751 {
3752 operand_error_record record;
3753 record.opcode = opcode;
3754 record.detail = *error_info;
3755 add_operand_error_record (&record);
3756 }
3757
3758 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3759 error message *ERROR, for operand IDX (count from 0). */
3760
3761 static void
3762 record_operand_error (const aarch64_opcode *opcode, int idx,
3763 enum aarch64_operand_error_kind kind,
3764 const char* error)
3765 {
3766 aarch64_operand_error info;
3767 memset(&info, 0, sizeof (info));
3768 info.index = idx;
3769 info.kind = kind;
3770 info.error = error;
3771 record_operand_error_info (opcode, &info);
3772 }
3773
3774 static void
3775 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3776 enum aarch64_operand_error_kind kind,
3777 const char* error, const int *extra_data)
3778 {
3779 aarch64_operand_error info;
3780 info.index = idx;
3781 info.kind = kind;
3782 info.error = error;
3783 info.data[0] = extra_data[0];
3784 info.data[1] = extra_data[1];
3785 info.data[2] = extra_data[2];
3786 record_operand_error_info (opcode, &info);
3787 }
3788
3789 static void
3790 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3791 const char* error, int lower_bound,
3792 int upper_bound)
3793 {
3794 int data[3] = {lower_bound, upper_bound, 0};
3795 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3796 error, data);
3797 }
3798
3799 /* Remove the operand error record for *OPCODE. */
3800 static void ATTRIBUTE_UNUSED
3801 remove_operand_error_record (const aarch64_opcode *opcode)
3802 {
3803 if (opcode_has_operand_error_p (opcode))
3804 {
3805 operand_error_record* record = operand_error_report.head;
3806 gas_assert (record != NULL && operand_error_report.tail != NULL);
3807 operand_error_report.head = record->next;
3808 record->next = free_opnd_error_record_nodes;
3809 free_opnd_error_record_nodes = record;
3810 if (operand_error_report.head == NULL)
3811 {
3812 gas_assert (operand_error_report.tail == record);
3813 operand_error_report.tail = NULL;
3814 }
3815 }
3816 }
3817
3818 /* Given the instruction in *INSTR, return the index of the best matched
3819 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3820
3821 Return -1 if there is no qualifier sequence; return the first match
3822 if there is multiple matches found. */
3823
3824 static int
3825 find_best_match (const aarch64_inst *instr,
3826 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3827 {
3828 int i, num_opnds, max_num_matched, idx;
3829
3830 num_opnds = aarch64_num_of_operands (instr->opcode);
3831 if (num_opnds == 0)
3832 {
3833 DEBUG_TRACE ("no operand");
3834 return -1;
3835 }
3836
3837 max_num_matched = 0;
3838 idx = -1;
3839
3840 /* For each pattern. */
3841 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3842 {
3843 int j, num_matched;
3844 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3845
3846 /* Most opcodes has much fewer patterns in the list. */
3847 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3848 {
3849 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3850 if (i != 0 && idx == -1)
3851 /* If nothing has been matched, return the 1st sequence. */
3852 idx = 0;
3853 break;
3854 }
3855
3856 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3857 if (*qualifiers == instr->operands[j].qualifier)
3858 ++num_matched;
3859
3860 if (num_matched > max_num_matched)
3861 {
3862 max_num_matched = num_matched;
3863 idx = i;
3864 }
3865 }
3866
3867 DEBUG_TRACE ("return with %d", idx);
3868 return idx;
3869 }
3870
3871 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3872 corresponding operands in *INSTR. */
3873
3874 static inline void
3875 assign_qualifier_sequence (aarch64_inst *instr,
3876 const aarch64_opnd_qualifier_t *qualifiers)
3877 {
3878 int i = 0;
3879 int num_opnds = aarch64_num_of_operands (instr->opcode);
3880 gas_assert (num_opnds);
3881 for (i = 0; i < num_opnds; ++i, ++qualifiers)
3882 instr->operands[i].qualifier = *qualifiers;
3883 }
3884
3885 /* Print operands for the diagnosis purpose. */
3886
3887 static void
3888 print_operands (char *buf, const aarch64_opcode *opcode,
3889 const aarch64_opnd_info *opnds)
3890 {
3891 int i;
3892
3893 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3894 {
3895 const size_t size = 128;
3896 char str[size];
3897
3898 /* We regard the opcode operand info more, however we also look into
3899 the inst->operands to support the disassembling of the optional
3900 operand.
3901 The two operand code should be the same in all cases, apart from
3902 when the operand can be optional. */
3903 if (opcode->operands[i] == AARCH64_OPND_NIL
3904 || opnds[i].type == AARCH64_OPND_NIL)
3905 break;
3906
3907 /* Generate the operand string in STR. */
3908 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3909
3910 /* Delimiter. */
3911 if (str[0] != '\0')
3912 strcat (buf, i == 0 ? " " : ",");
3913
3914 /* Append the operand string. */
3915 strcat (buf, str);
3916 }
3917 }
3918
3919 /* Send to stderr a string as information. */
3920
3921 static void
3922 output_info (const char *format, ...)
3923 {
3924 char *file;
3925 unsigned int line;
3926 va_list args;
3927
3928 as_where (&file, &line);
3929 if (file)
3930 {
3931 if (line != 0)
3932 fprintf (stderr, "%s:%u: ", file, line);
3933 else
3934 fprintf (stderr, "%s: ", file);
3935 }
3936 fprintf (stderr, _("Info: "));
3937 va_start (args, format);
3938 vfprintf (stderr, format, args);
3939 va_end (args);
3940 (void) putc ('\n', stderr);
3941 }
3942
3943 /* Output one operand error record. */
3944
3945 static void
3946 output_operand_error_record (const operand_error_record *record, char *str)
3947 {
3948 int idx = record->detail.index;
3949 const aarch64_opcode *opcode = record->opcode;
3950 enum aarch64_opnd opd_code = (idx != -1 ? opcode->operands[idx]
3951 : AARCH64_OPND_NIL);
3952 const aarch64_operand_error *detail = &record->detail;
3953
3954 switch (detail->kind)
3955 {
3956 case AARCH64_OPDE_NIL:
3957 gas_assert (0);
3958 break;
3959
3960 case AARCH64_OPDE_SYNTAX_ERROR:
3961 case AARCH64_OPDE_RECOVERABLE:
3962 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
3963 case AARCH64_OPDE_OTHER_ERROR:
3964 gas_assert (idx >= 0);
3965 /* Use the prepared error message if there is, otherwise use the
3966 operand description string to describe the error. */
3967 if (detail->error != NULL)
3968 {
3969 if (detail->index == -1)
3970 as_bad (_("%s -- `%s'"), detail->error, str);
3971 else
3972 as_bad (_("%s at operand %d -- `%s'"),
3973 detail->error, detail->index + 1, str);
3974 }
3975 else
3976 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
3977 aarch64_get_operand_desc (opd_code), str);
3978 break;
3979
3980 case AARCH64_OPDE_INVALID_VARIANT:
3981 as_bad (_("operand mismatch -- `%s'"), str);
3982 if (verbose_error_p)
3983 {
3984 /* We will try to correct the erroneous instruction and also provide
3985 more information e.g. all other valid variants.
3986
3987 The string representation of the corrected instruction and other
3988 valid variants are generated by
3989
3990 1) obtaining the intermediate representation of the erroneous
3991 instruction;
3992 2) manipulating the IR, e.g. replacing the operand qualifier;
3993 3) printing out the instruction by calling the printer functions
3994 shared with the disassembler.
3995
3996 The limitation of this method is that the exact input assembly
3997 line cannot be accurately reproduced in some cases, for example an
3998 optional operand present in the actual assembly line will be
3999 omitted in the output; likewise for the optional syntax rules,
4000 e.g. the # before the immediate. Another limitation is that the
4001 assembly symbols and relocation operations in the assembly line
4002 currently cannot be printed out in the error report. Last but not
4003 least, when there is other error(s) co-exist with this error, the
4004 'corrected' instruction may be still incorrect, e.g. given
4005 'ldnp h0,h1,[x0,#6]!'
4006 this diagnosis will provide the version:
4007 'ldnp s0,s1,[x0,#6]!'
4008 which is still not right. */
4009 size_t len = strlen (get_mnemonic_name (str));
4010 int i, qlf_idx;
4011 bfd_boolean result;
4012 const size_t size = 2048;
4013 char buf[size];
4014 aarch64_inst *inst_base = &inst.base;
4015 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4016
4017 /* Init inst. */
4018 reset_aarch64_instruction (&inst);
4019 inst_base->opcode = opcode;
4020
4021 /* Reset the error report so that there is no side effect on the
4022 following operand parsing. */
4023 init_operand_error_report ();
4024
4025 /* Fill inst. */
4026 result = parse_operands (str + len, opcode)
4027 && programmer_friendly_fixup (&inst);
4028 gas_assert (result);
4029 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4030 NULL, NULL);
4031 gas_assert (!result);
4032
4033 /* Find the most matched qualifier sequence. */
4034 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4035 gas_assert (qlf_idx > -1);
4036
4037 /* Assign the qualifiers. */
4038 assign_qualifier_sequence (inst_base,
4039 opcode->qualifiers_list[qlf_idx]);
4040
4041 /* Print the hint. */
4042 output_info (_(" did you mean this?"));
4043 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4044 print_operands (buf, opcode, inst_base->operands);
4045 output_info (_(" %s"), buf);
4046
4047 /* Print out other variant(s) if there is any. */
4048 if (qlf_idx != 0 ||
4049 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4050 output_info (_(" other valid variant(s):"));
4051
4052 /* For each pattern. */
4053 qualifiers_list = opcode->qualifiers_list;
4054 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4055 {
4056 /* Most opcodes has much fewer patterns in the list.
4057 First NIL qualifier indicates the end in the list. */
4058 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4059 break;
4060
4061 if (i != qlf_idx)
4062 {
4063 /* Mnemonics name. */
4064 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4065
4066 /* Assign the qualifiers. */
4067 assign_qualifier_sequence (inst_base, *qualifiers_list);
4068
4069 /* Print instruction. */
4070 print_operands (buf, opcode, inst_base->operands);
4071
4072 output_info (_(" %s"), buf);
4073 }
4074 }
4075 }
4076 break;
4077
4078 case AARCH64_OPDE_OUT_OF_RANGE:
4079 if (detail->data[0] != detail->data[1])
4080 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4081 detail->error ? detail->error : _("immediate value"),
4082 detail->data[0], detail->data[1], detail->index + 1, str);
4083 else
4084 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4085 detail->error ? detail->error : _("immediate value"),
4086 detail->data[0], detail->index + 1, str);
4087 break;
4088
4089 case AARCH64_OPDE_REG_LIST:
4090 if (detail->data[0] == 1)
4091 as_bad (_("invalid number of registers in the list; "
4092 "only 1 register is expected at operand %d -- `%s'"),
4093 detail->index + 1, str);
4094 else
4095 as_bad (_("invalid number of registers in the list; "
4096 "%d registers are expected at operand %d -- `%s'"),
4097 detail->data[0], detail->index + 1, str);
4098 break;
4099
4100 case AARCH64_OPDE_UNALIGNED:
4101 as_bad (_("immediate value should be a multiple of "
4102 "%d at operand %d -- `%s'"),
4103 detail->data[0], detail->index + 1, str);
4104 break;
4105
4106 default:
4107 gas_assert (0);
4108 break;
4109 }
4110 }
4111
4112 /* Process and output the error message about the operand mismatching.
4113
4114 When this function is called, the operand error information had
4115 been collected for an assembly line and there will be multiple
4116 errors in the case of mulitple instruction templates; output the
4117 error message that most closely describes the problem. */
4118
4119 static void
4120 output_operand_error_report (char *str)
4121 {
4122 int largest_error_pos;
4123 const char *msg = NULL;
4124 enum aarch64_operand_error_kind kind;
4125 operand_error_record *curr;
4126 operand_error_record *head = operand_error_report.head;
4127 operand_error_record *record = NULL;
4128
4129 /* No error to report. */
4130 if (head == NULL)
4131 return;
4132
4133 gas_assert (head != NULL && operand_error_report.tail != NULL);
4134
4135 /* Only one error. */
4136 if (head == operand_error_report.tail)
4137 {
4138 DEBUG_TRACE ("single opcode entry with error kind: %s",
4139 operand_mismatch_kind_names[head->detail.kind]);
4140 output_operand_error_record (head, str);
4141 return;
4142 }
4143
4144 /* Find the error kind of the highest severity. */
4145 DEBUG_TRACE ("multiple opcode entres with error kind");
4146 kind = AARCH64_OPDE_NIL;
4147 for (curr = head; curr != NULL; curr = curr->next)
4148 {
4149 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4150 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4151 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4152 kind = curr->detail.kind;
4153 }
4154 gas_assert (kind != AARCH64_OPDE_NIL);
4155
4156 /* Pick up one of errors of KIND to report. */
4157 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4158 for (curr = head; curr != NULL; curr = curr->next)
4159 {
4160 if (curr->detail.kind != kind)
4161 continue;
4162 /* If there are multiple errors, pick up the one with the highest
4163 mismatching operand index. In the case of multiple errors with
4164 the equally highest operand index, pick up the first one or the
4165 first one with non-NULL error message. */
4166 if (curr->detail.index > largest_error_pos
4167 || (curr->detail.index == largest_error_pos && msg == NULL
4168 && curr->detail.error != NULL))
4169 {
4170 largest_error_pos = curr->detail.index;
4171 record = curr;
4172 msg = record->detail.error;
4173 }
4174 }
4175
4176 gas_assert (largest_error_pos != -2 && record != NULL);
4177 DEBUG_TRACE ("Pick up error kind %s to report",
4178 operand_mismatch_kind_names[record->detail.kind]);
4179
4180 /* Output. */
4181 output_operand_error_record (record, str);
4182 }
4183 \f
4184 /* Write an AARCH64 instruction to buf - always little-endian. */
4185 static void
4186 put_aarch64_insn (char *buf, uint32_t insn)
4187 {
4188 unsigned char *where = (unsigned char *) buf;
4189 where[0] = insn;
4190 where[1] = insn >> 8;
4191 where[2] = insn >> 16;
4192 where[3] = insn >> 24;
4193 }
4194
4195 static uint32_t
4196 get_aarch64_insn (char *buf)
4197 {
4198 unsigned char *where = (unsigned char *) buf;
4199 uint32_t result;
4200 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4201 return result;
4202 }
4203
4204 static void
4205 output_inst (struct aarch64_inst *new_inst)
4206 {
4207 char *to = NULL;
4208
4209 to = frag_more (INSN_SIZE);
4210
4211 frag_now->tc_frag_data.recorded = 1;
4212
4213 put_aarch64_insn (to, inst.base.value);
4214
4215 if (inst.reloc.type != BFD_RELOC_UNUSED)
4216 {
4217 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4218 INSN_SIZE, &inst.reloc.exp,
4219 inst.reloc.pc_rel,
4220 inst.reloc.type);
4221 DEBUG_TRACE ("Prepared relocation fix up");
4222 /* Don't check the addend value against the instruction size,
4223 that's the job of our code in md_apply_fix(). */
4224 fixp->fx_no_overflow = 1;
4225 if (new_inst != NULL)
4226 fixp->tc_fix_data.inst = new_inst;
4227 if (aarch64_gas_internal_fixup_p ())
4228 {
4229 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4230 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4231 fixp->fx_addnumber = inst.reloc.flags;
4232 }
4233 }
4234
4235 dwarf2_emit_insn (INSN_SIZE);
4236 }
4237
4238 /* Link together opcodes of the same name. */
4239
4240 struct templates
4241 {
4242 aarch64_opcode *opcode;
4243 struct templates *next;
4244 };
4245
4246 typedef struct templates templates;
4247
4248 static templates *
4249 lookup_mnemonic (const char *start, int len)
4250 {
4251 templates *templ = NULL;
4252
4253 templ = hash_find_n (aarch64_ops_hsh, start, len);
4254 return templ;
4255 }
4256
4257 /* Subroutine of md_assemble, responsible for looking up the primary
4258 opcode from the mnemonic the user wrote. STR points to the
4259 beginning of the mnemonic. */
4260
4261 static templates *
4262 opcode_lookup (char **str)
4263 {
4264 char *end, *base;
4265 const aarch64_cond *cond;
4266 char condname[16];
4267 int len;
4268
4269 /* Scan up to the end of the mnemonic, which must end in white space,
4270 '.', or end of string. */
4271 for (base = end = *str; is_part_of_name(*end); end++)
4272 if (*end == '.')
4273 break;
4274
4275 if (end == base)
4276 return 0;
4277
4278 inst.cond = COND_ALWAYS;
4279
4280 /* Handle a possible condition. */
4281 if (end[0] == '.')
4282 {
4283 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4284 if (cond)
4285 {
4286 inst.cond = cond->value;
4287 *str = end + 3;
4288 }
4289 else
4290 {
4291 *str = end;
4292 return 0;
4293 }
4294 }
4295 else
4296 *str = end;
4297
4298 len = end - base;
4299
4300 if (inst.cond == COND_ALWAYS)
4301 {
4302 /* Look for unaffixed mnemonic. */
4303 return lookup_mnemonic (base, len);
4304 }
4305 else if (len <= 13)
4306 {
4307 /* append ".c" to mnemonic if conditional */
4308 memcpy (condname, base, len);
4309 memcpy (condname + len, ".c", 2);
4310 base = condname;
4311 len += 2;
4312 return lookup_mnemonic (base, len);
4313 }
4314
4315 return NULL;
4316 }
4317
4318 /* Internal helper routine converting a vector neon_type_el structure
4319 *VECTYPE to a corresponding operand qualifier. */
4320
4321 static inline aarch64_opnd_qualifier_t
4322 vectype_to_qualifier (const struct neon_type_el *vectype)
4323 {
4324 /* Element size in bytes indexed by neon_el_type. */
4325 const unsigned char ele_size[5]
4326 = {1, 2, 4, 8, 16};
4327
4328 if (!vectype->defined || vectype->type == NT_invtype)
4329 goto vectype_conversion_fail;
4330
4331 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4332
4333 if (vectype->defined & NTA_HASINDEX)
4334 /* Vector element register. */
4335 return AARCH64_OPND_QLF_S_B + vectype->type;
4336 else
4337 {
4338 /* Vector register. */
4339 int reg_size = ele_size[vectype->type] * vectype->width;
4340 unsigned offset;
4341 if (reg_size != 16 && reg_size != 8)
4342 goto vectype_conversion_fail;
4343 /* The conversion is calculated based on the relation of the order of
4344 qualifiers to the vector element size and vector register size. */
4345 offset = (vectype->type == NT_q)
4346 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4347 gas_assert (offset <= 8);
4348 return AARCH64_OPND_QLF_V_8B + offset;
4349 }
4350
4351 vectype_conversion_fail:
4352 first_error (_("bad vector arrangement type"));
4353 return AARCH64_OPND_QLF_NIL;
4354 }
4355
4356 /* Process an optional operand that is found omitted from the assembly line.
4357 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4358 instruction's opcode entry while IDX is the index of this omitted operand.
4359 */
4360
4361 static void
4362 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4363 int idx, aarch64_opnd_info *operand)
4364 {
4365 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4366 gas_assert (optional_operand_p (opcode, idx));
4367 gas_assert (!operand->present);
4368
4369 switch (type)
4370 {
4371 case AARCH64_OPND_Rd:
4372 case AARCH64_OPND_Rn:
4373 case AARCH64_OPND_Rm:
4374 case AARCH64_OPND_Rt:
4375 case AARCH64_OPND_Rt2:
4376 case AARCH64_OPND_Rs:
4377 case AARCH64_OPND_Ra:
4378 case AARCH64_OPND_Rt_SYS:
4379 case AARCH64_OPND_Rd_SP:
4380 case AARCH64_OPND_Rn_SP:
4381 case AARCH64_OPND_Fd:
4382 case AARCH64_OPND_Fn:
4383 case AARCH64_OPND_Fm:
4384 case AARCH64_OPND_Fa:
4385 case AARCH64_OPND_Ft:
4386 case AARCH64_OPND_Ft2:
4387 case AARCH64_OPND_Sd:
4388 case AARCH64_OPND_Sn:
4389 case AARCH64_OPND_Sm:
4390 case AARCH64_OPND_Vd:
4391 case AARCH64_OPND_Vn:
4392 case AARCH64_OPND_Vm:
4393 case AARCH64_OPND_VdD1:
4394 case AARCH64_OPND_VnD1:
4395 operand->reg.regno = default_value;
4396 break;
4397
4398 case AARCH64_OPND_Ed:
4399 case AARCH64_OPND_En:
4400 case AARCH64_OPND_Em:
4401 operand->reglane.regno = default_value;
4402 break;
4403
4404 case AARCH64_OPND_IDX:
4405 case AARCH64_OPND_BIT_NUM:
4406 case AARCH64_OPND_IMMR:
4407 case AARCH64_OPND_IMMS:
4408 case AARCH64_OPND_SHLL_IMM:
4409 case AARCH64_OPND_IMM_VLSL:
4410 case AARCH64_OPND_IMM_VLSR:
4411 case AARCH64_OPND_CCMP_IMM:
4412 case AARCH64_OPND_FBITS:
4413 case AARCH64_OPND_UIMM4:
4414 case AARCH64_OPND_UIMM3_OP1:
4415 case AARCH64_OPND_UIMM3_OP2:
4416 case AARCH64_OPND_IMM:
4417 case AARCH64_OPND_WIDTH:
4418 case AARCH64_OPND_UIMM7:
4419 case AARCH64_OPND_NZCV:
4420 operand->imm.value = default_value;
4421 break;
4422
4423 case AARCH64_OPND_EXCEPTION:
4424 inst.reloc.type = BFD_RELOC_UNUSED;
4425 break;
4426
4427 case AARCH64_OPND_BARRIER_ISB:
4428 operand->barrier = aarch64_barrier_options + default_value;
4429
4430 default:
4431 break;
4432 }
4433 }
4434
4435 /* Process the relocation type for move wide instructions.
4436 Return TRUE on success; otherwise return FALSE. */
4437
4438 static bfd_boolean
4439 process_movw_reloc_info (void)
4440 {
4441 int is32;
4442 unsigned shift;
4443
4444 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4445
4446 if (inst.base.opcode->op == OP_MOVK)
4447 switch (inst.reloc.type)
4448 {
4449 case BFD_RELOC_AARCH64_MOVW_G0_S:
4450 case BFD_RELOC_AARCH64_MOVW_G1_S:
4451 case BFD_RELOC_AARCH64_MOVW_G2_S:
4452 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4453 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4454 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4455 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4456 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4457 set_syntax_error
4458 (_("the specified relocation type is not allowed for MOVK"));
4459 return FALSE;
4460 default:
4461 break;
4462 }
4463
4464 switch (inst.reloc.type)
4465 {
4466 case BFD_RELOC_AARCH64_MOVW_G0:
4467 case BFD_RELOC_AARCH64_MOVW_G0_S:
4468 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4469 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4470 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4471 shift = 0;
4472 break;
4473 case BFD_RELOC_AARCH64_MOVW_G1:
4474 case BFD_RELOC_AARCH64_MOVW_G1_S:
4475 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4476 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4477 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4478 shift = 16;
4479 break;
4480 case BFD_RELOC_AARCH64_MOVW_G2:
4481 case BFD_RELOC_AARCH64_MOVW_G2_S:
4482 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4483 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4484 if (is32)
4485 {
4486 set_fatal_syntax_error
4487 (_("the specified relocation type is not allowed for 32-bit "
4488 "register"));
4489 return FALSE;
4490 }
4491 shift = 32;
4492 break;
4493 case BFD_RELOC_AARCH64_MOVW_G3:
4494 if (is32)
4495 {
4496 set_fatal_syntax_error
4497 (_("the specified relocation type is not allowed for 32-bit "
4498 "register"));
4499 return FALSE;
4500 }
4501 shift = 48;
4502 break;
4503 default:
4504 /* More cases should be added when more MOVW-related relocation types
4505 are supported in GAS. */
4506 gas_assert (aarch64_gas_internal_fixup_p ());
4507 /* The shift amount should have already been set by the parser. */
4508 return TRUE;
4509 }
4510 inst.base.operands[1].shifter.amount = shift;
4511 return TRUE;
4512 }
4513
4514 /* A primitive log caculator. */
4515
4516 static inline unsigned int
4517 get_logsz (unsigned int size)
4518 {
4519 const unsigned char ls[16] =
4520 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4521 if (size > 16)
4522 {
4523 gas_assert (0);
4524 return -1;
4525 }
4526 gas_assert (ls[size - 1] != (unsigned char)-1);
4527 return ls[size - 1];
4528 }
4529
4530 /* Determine and return the real reloc type code for an instruction
4531 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4532
4533 static inline bfd_reloc_code_real_type
4534 ldst_lo12_determine_real_reloc_type (void)
4535 {
4536 int logsz;
4537 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4538 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4539
4540 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4541 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4542 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4543 BFD_RELOC_AARCH64_LDST128_LO12
4544 };
4545
4546 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4547 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4548
4549 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4550 opd1_qlf =
4551 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4552 1, opd0_qlf, 0);
4553 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4554
4555 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4556 gas_assert (logsz >= 0 && logsz <= 4);
4557
4558 return reloc_ldst_lo12[logsz];
4559 }
4560
4561 /* Check whether a register list REGINFO is valid. The registers must be
4562 numbered in increasing order (modulo 32), in increments of one or two.
4563
4564 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4565 increments of two.
4566
4567 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4568
4569 static bfd_boolean
4570 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4571 {
4572 uint32_t i, nb_regs, prev_regno, incr;
4573
4574 nb_regs = 1 + (reginfo & 0x3);
4575 reginfo >>= 2;
4576 prev_regno = reginfo & 0x1f;
4577 incr = accept_alternate ? 2 : 1;
4578
4579 for (i = 1; i < nb_regs; ++i)
4580 {
4581 uint32_t curr_regno;
4582 reginfo >>= 5;
4583 curr_regno = reginfo & 0x1f;
4584 if (curr_regno != ((prev_regno + incr) & 0x1f))
4585 return FALSE;
4586 prev_regno = curr_regno;
4587 }
4588
4589 return TRUE;
4590 }
4591
4592 /* Generic instruction operand parser. This does no encoding and no
4593 semantic validation; it merely squirrels values away in the inst
4594 structure. Returns TRUE or FALSE depending on whether the
4595 specified grammar matched. */
4596
4597 static bfd_boolean
4598 parse_operands (char *str, const aarch64_opcode *opcode)
4599 {
4600 int i;
4601 char *backtrack_pos = 0;
4602 const enum aarch64_opnd *operands = opcode->operands;
4603
4604 clear_error ();
4605 skip_whitespace (str);
4606
4607 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4608 {
4609 int64_t val;
4610 int isreg32, isregzero;
4611 int comma_skipped_p = 0;
4612 aarch64_reg_type rtype;
4613 struct neon_type_el vectype;
4614 aarch64_opnd_info *info = &inst.base.operands[i];
4615
4616 DEBUG_TRACE ("parse operand %d", i);
4617
4618 /* Assign the operand code. */
4619 info->type = operands[i];
4620
4621 if (optional_operand_p (opcode, i))
4622 {
4623 /* Remember where we are in case we need to backtrack. */
4624 gas_assert (!backtrack_pos);
4625 backtrack_pos = str;
4626 }
4627
4628 /* Expect comma between operands; the backtrack mechanizm will take
4629 care of cases of omitted optional operand. */
4630 if (i > 0 && ! skip_past_char (&str, ','))
4631 {
4632 set_syntax_error (_("comma expected between operands"));
4633 goto failure;
4634 }
4635 else
4636 comma_skipped_p = 1;
4637
4638 switch (operands[i])
4639 {
4640 case AARCH64_OPND_Rd:
4641 case AARCH64_OPND_Rn:
4642 case AARCH64_OPND_Rm:
4643 case AARCH64_OPND_Rt:
4644 case AARCH64_OPND_Rt2:
4645 case AARCH64_OPND_Rs:
4646 case AARCH64_OPND_Ra:
4647 case AARCH64_OPND_Rt_SYS:
4648 po_int_reg_or_fail (1, 0);
4649 break;
4650
4651 case AARCH64_OPND_Rd_SP:
4652 case AARCH64_OPND_Rn_SP:
4653 po_int_reg_or_fail (0, 1);
4654 break;
4655
4656 case AARCH64_OPND_Rm_EXT:
4657 case AARCH64_OPND_Rm_SFT:
4658 po_misc_or_fail (parse_shifter_operand
4659 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4660 ? SHIFTED_ARITH_IMM
4661 : SHIFTED_LOGIC_IMM)));
4662 if (!info->shifter.operator_present)
4663 {
4664 /* Default to LSL if not present. Libopcodes prefers shifter
4665 kind to be explicit. */
4666 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4667 info->shifter.kind = AARCH64_MOD_LSL;
4668 /* For Rm_EXT, libopcodes will carry out further check on whether
4669 or not stack pointer is used in the instruction (Recall that
4670 "the extend operator is not optional unless at least one of
4671 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4672 }
4673 break;
4674
4675 case AARCH64_OPND_Fd:
4676 case AARCH64_OPND_Fn:
4677 case AARCH64_OPND_Fm:
4678 case AARCH64_OPND_Fa:
4679 case AARCH64_OPND_Ft:
4680 case AARCH64_OPND_Ft2:
4681 case AARCH64_OPND_Sd:
4682 case AARCH64_OPND_Sn:
4683 case AARCH64_OPND_Sm:
4684 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4685 if (val == PARSE_FAIL)
4686 {
4687 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4688 goto failure;
4689 }
4690 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4691
4692 info->reg.regno = val;
4693 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4694 break;
4695
4696 case AARCH64_OPND_Vd:
4697 case AARCH64_OPND_Vn:
4698 case AARCH64_OPND_Vm:
4699 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4700 if (val == PARSE_FAIL)
4701 {
4702 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4703 goto failure;
4704 }
4705 if (vectype.defined & NTA_HASINDEX)
4706 goto failure;
4707
4708 info->reg.regno = val;
4709 info->qualifier = vectype_to_qualifier (&vectype);
4710 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4711 goto failure;
4712 break;
4713
4714 case AARCH64_OPND_VdD1:
4715 case AARCH64_OPND_VnD1:
4716 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4717 if (val == PARSE_FAIL)
4718 {
4719 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4720 goto failure;
4721 }
4722 if (vectype.type != NT_d || vectype.index != 1)
4723 {
4724 set_fatal_syntax_error
4725 (_("the top half of a 128-bit FP/SIMD register is expected"));
4726 goto failure;
4727 }
4728 info->reg.regno = val;
4729 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4730 here; it is correct for the purpose of encoding/decoding since
4731 only the register number is explicitly encoded in the related
4732 instructions, although this appears a bit hacky. */
4733 info->qualifier = AARCH64_OPND_QLF_S_D;
4734 break;
4735
4736 case AARCH64_OPND_Ed:
4737 case AARCH64_OPND_En:
4738 case AARCH64_OPND_Em:
4739 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4740 if (val == PARSE_FAIL)
4741 {
4742 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4743 goto failure;
4744 }
4745 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4746 goto failure;
4747
4748 info->reglane.regno = val;
4749 info->reglane.index = vectype.index;
4750 info->qualifier = vectype_to_qualifier (&vectype);
4751 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4752 goto failure;
4753 break;
4754
4755 case AARCH64_OPND_LVn:
4756 case AARCH64_OPND_LVt:
4757 case AARCH64_OPND_LVt_AL:
4758 case AARCH64_OPND_LEt:
4759 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4760 goto failure;
4761 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4762 {
4763 set_fatal_syntax_error (_("invalid register list"));
4764 goto failure;
4765 }
4766 info->reglist.first_regno = (val >> 2) & 0x1f;
4767 info->reglist.num_regs = (val & 0x3) + 1;
4768 if (operands[i] == AARCH64_OPND_LEt)
4769 {
4770 if (!(vectype.defined & NTA_HASINDEX))
4771 goto failure;
4772 info->reglist.has_index = 1;
4773 info->reglist.index = vectype.index;
4774 }
4775 else if (!(vectype.defined & NTA_HASTYPE))
4776 goto failure;
4777 info->qualifier = vectype_to_qualifier (&vectype);
4778 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4779 goto failure;
4780 break;
4781
4782 case AARCH64_OPND_Cn:
4783 case AARCH64_OPND_Cm:
4784 po_reg_or_fail (REG_TYPE_CN);
4785 if (val > 15)
4786 {
4787 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4788 goto failure;
4789 }
4790 inst.base.operands[i].reg.regno = val;
4791 break;
4792
4793 case AARCH64_OPND_SHLL_IMM:
4794 case AARCH64_OPND_IMM_VLSR:
4795 po_imm_or_fail (1, 64);
4796 info->imm.value = val;
4797 break;
4798
4799 case AARCH64_OPND_CCMP_IMM:
4800 case AARCH64_OPND_FBITS:
4801 case AARCH64_OPND_UIMM4:
4802 case AARCH64_OPND_UIMM3_OP1:
4803 case AARCH64_OPND_UIMM3_OP2:
4804 case AARCH64_OPND_IMM_VLSL:
4805 case AARCH64_OPND_IMM:
4806 case AARCH64_OPND_WIDTH:
4807 po_imm_nc_or_fail ();
4808 info->imm.value = val;
4809 break;
4810
4811 case AARCH64_OPND_UIMM7:
4812 po_imm_or_fail (0, 127);
4813 info->imm.value = val;
4814 break;
4815
4816 case AARCH64_OPND_IDX:
4817 case AARCH64_OPND_BIT_NUM:
4818 case AARCH64_OPND_IMMR:
4819 case AARCH64_OPND_IMMS:
4820 po_imm_or_fail (0, 63);
4821 info->imm.value = val;
4822 break;
4823
4824 case AARCH64_OPND_IMM0:
4825 po_imm_nc_or_fail ();
4826 if (val != 0)
4827 {
4828 set_fatal_syntax_error (_("immediate zero expected"));
4829 goto failure;
4830 }
4831 info->imm.value = 0;
4832 break;
4833
4834 case AARCH64_OPND_FPIMM0:
4835 {
4836 int qfloat;
4837 bfd_boolean res1 = FALSE, res2 = FALSE;
4838 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4839 it is probably not worth the effort to support it. */
4840 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
4841 && !(res2 = parse_constant_immediate (&str, &val)))
4842 goto failure;
4843 if ((res1 && qfloat == 0) || (res2 && val == 0))
4844 {
4845 info->imm.value = 0;
4846 info->imm.is_fp = 1;
4847 break;
4848 }
4849 set_fatal_syntax_error (_("immediate zero expected"));
4850 goto failure;
4851 }
4852
4853 case AARCH64_OPND_IMM_MOV:
4854 {
4855 char *saved = str;
4856 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
4857 reg_name_p (str, REG_TYPE_VN))
4858 goto failure;
4859 str = saved;
4860 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4861 GE_OPT_PREFIX, 1));
4862 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4863 later. fix_mov_imm_insn will try to determine a machine
4864 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4865 message if the immediate cannot be moved by a single
4866 instruction. */
4867 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4868 inst.base.operands[i].skip = 1;
4869 }
4870 break;
4871
4872 case AARCH64_OPND_SIMD_IMM:
4873 case AARCH64_OPND_SIMD_IMM_SFT:
4874 if (! parse_big_immediate (&str, &val))
4875 goto failure;
4876 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4877 /* addr_off_p */ 0,
4878 /* need_libopcodes_p */ 1,
4879 /* skip_p */ 1);
4880 /* Parse shift.
4881 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4882 shift, we don't check it here; we leave the checking to
4883 the libopcodes (operand_general_constraint_met_p). By
4884 doing this, we achieve better diagnostics. */
4885 if (skip_past_comma (&str)
4886 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4887 goto failure;
4888 if (!info->shifter.operator_present
4889 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4890 {
4891 /* Default to LSL if not present. Libopcodes prefers shifter
4892 kind to be explicit. */
4893 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4894 info->shifter.kind = AARCH64_MOD_LSL;
4895 }
4896 break;
4897
4898 case AARCH64_OPND_FPIMM:
4899 case AARCH64_OPND_SIMD_FPIMM:
4900 {
4901 int qfloat;
4902 bfd_boolean dp_p
4903 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
4904 == 8);
4905 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
4906 goto failure;
4907 if (qfloat == 0)
4908 {
4909 set_fatal_syntax_error (_("invalid floating-point constant"));
4910 goto failure;
4911 }
4912 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
4913 inst.base.operands[i].imm.is_fp = 1;
4914 }
4915 break;
4916
4917 case AARCH64_OPND_LIMM:
4918 po_misc_or_fail (parse_shifter_operand (&str, info,
4919 SHIFTED_LOGIC_IMM));
4920 if (info->shifter.operator_present)
4921 {
4922 set_fatal_syntax_error
4923 (_("shift not allowed for bitmask immediate"));
4924 goto failure;
4925 }
4926 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4927 /* addr_off_p */ 0,
4928 /* need_libopcodes_p */ 1,
4929 /* skip_p */ 1);
4930 break;
4931
4932 case AARCH64_OPND_AIMM:
4933 if (opcode->op == OP_ADD)
4934 /* ADD may have relocation types. */
4935 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
4936 SHIFTED_ARITH_IMM));
4937 else
4938 po_misc_or_fail (parse_shifter_operand (&str, info,
4939 SHIFTED_ARITH_IMM));
4940 switch (inst.reloc.type)
4941 {
4942 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4943 info->shifter.amount = 12;
4944 break;
4945 case BFD_RELOC_UNUSED:
4946 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4947 if (info->shifter.kind != AARCH64_MOD_NONE)
4948 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
4949 inst.reloc.pc_rel = 0;
4950 break;
4951 default:
4952 break;
4953 }
4954 info->imm.value = 0;
4955 if (!info->shifter.operator_present)
4956 {
4957 /* Default to LSL if not present. Libopcodes prefers shifter
4958 kind to be explicit. */
4959 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4960 info->shifter.kind = AARCH64_MOD_LSL;
4961 }
4962 break;
4963
4964 case AARCH64_OPND_HALF:
4965 {
4966 /* #<imm16> or relocation. */
4967 int internal_fixup_p;
4968 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
4969 if (internal_fixup_p)
4970 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4971 skip_whitespace (str);
4972 if (skip_past_comma (&str))
4973 {
4974 /* {, LSL #<shift>} */
4975 if (! aarch64_gas_internal_fixup_p ())
4976 {
4977 set_fatal_syntax_error (_("can't mix relocation modifier "
4978 "with explicit shift"));
4979 goto failure;
4980 }
4981 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
4982 }
4983 else
4984 inst.base.operands[i].shifter.amount = 0;
4985 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
4986 inst.base.operands[i].imm.value = 0;
4987 if (! process_movw_reloc_info ())
4988 goto failure;
4989 }
4990 break;
4991
4992 case AARCH64_OPND_EXCEPTION:
4993 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
4994 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4995 /* addr_off_p */ 0,
4996 /* need_libopcodes_p */ 0,
4997 /* skip_p */ 1);
4998 break;
4999
5000 case AARCH64_OPND_NZCV:
5001 {
5002 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5003 if (nzcv != NULL)
5004 {
5005 str += 4;
5006 info->imm.value = nzcv->value;
5007 break;
5008 }
5009 po_imm_or_fail (0, 15);
5010 info->imm.value = val;
5011 }
5012 break;
5013
5014 case AARCH64_OPND_COND:
5015 case AARCH64_OPND_COND1:
5016 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
5017 str += 2;
5018 if (info->cond == NULL)
5019 {
5020 set_syntax_error (_("invalid condition"));
5021 goto failure;
5022 }
5023 else if (operands[i] == AARCH64_OPND_COND1
5024 && (info->cond->value & 0xe) == 0xe)
5025 {
5026 /* Not allow AL or NV. */
5027 set_default_error ();
5028 goto failure;
5029 }
5030 break;
5031
5032 case AARCH64_OPND_ADDR_ADRP:
5033 po_misc_or_fail (parse_adrp (&str));
5034 /* Clear the value as operand needs to be relocated. */
5035 info->imm.value = 0;
5036 break;
5037
5038 case AARCH64_OPND_ADDR_PCREL14:
5039 case AARCH64_OPND_ADDR_PCREL19:
5040 case AARCH64_OPND_ADDR_PCREL21:
5041 case AARCH64_OPND_ADDR_PCREL26:
5042 po_misc_or_fail (parse_address_reloc (&str, info));
5043 if (!info->addr.pcrel)
5044 {
5045 set_syntax_error (_("invalid pc-relative address"));
5046 goto failure;
5047 }
5048 if (inst.gen_lit_pool
5049 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5050 {
5051 /* Only permit "=value" in the literal load instructions.
5052 The literal will be generated by programmer_friendly_fixup. */
5053 set_syntax_error (_("invalid use of \"=immediate\""));
5054 goto failure;
5055 }
5056 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5057 {
5058 set_syntax_error (_("unrecognized relocation suffix"));
5059 goto failure;
5060 }
5061 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5062 {
5063 info->imm.value = inst.reloc.exp.X_add_number;
5064 inst.reloc.type = BFD_RELOC_UNUSED;
5065 }
5066 else
5067 {
5068 info->imm.value = 0;
5069 if (inst.reloc.type == BFD_RELOC_UNUSED)
5070 switch (opcode->iclass)
5071 {
5072 case compbranch:
5073 case condbranch:
5074 /* e.g. CBZ or B.COND */
5075 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5076 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5077 break;
5078 case testbranch:
5079 /* e.g. TBZ */
5080 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5081 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5082 break;
5083 case branch_imm:
5084 /* e.g. B or BL */
5085 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5086 inst.reloc.type =
5087 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5088 : BFD_RELOC_AARCH64_JUMP26;
5089 break;
5090 case loadlit:
5091 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5092 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5093 break;
5094 case pcreladdr:
5095 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5096 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5097 break;
5098 default:
5099 gas_assert (0);
5100 abort ();
5101 }
5102 inst.reloc.pc_rel = 1;
5103 }
5104 break;
5105
5106 case AARCH64_OPND_ADDR_SIMPLE:
5107 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5108 /* [<Xn|SP>{, #<simm>}] */
5109 po_char_or_fail ('[');
5110 po_reg_or_fail (REG_TYPE_R64_SP);
5111 /* Accept optional ", #0". */
5112 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5113 && skip_past_char (&str, ','))
5114 {
5115 skip_past_char (&str, '#');
5116 if (! skip_past_char (&str, '0'))
5117 {
5118 set_fatal_syntax_error
5119 (_("the optional immediate offset can only be 0"));
5120 goto failure;
5121 }
5122 }
5123 po_char_or_fail (']');
5124 info->addr.base_regno = val;
5125 break;
5126
5127 case AARCH64_OPND_ADDR_REGOFF:
5128 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5129 po_misc_or_fail (parse_address (&str, info, 0));
5130 if (info->addr.pcrel || !info->addr.offset.is_reg
5131 || !info->addr.preind || info->addr.postind
5132 || info->addr.writeback)
5133 {
5134 set_syntax_error (_("invalid addressing mode"));
5135 goto failure;
5136 }
5137 if (!info->shifter.operator_present)
5138 {
5139 /* Default to LSL if not present. Libopcodes prefers shifter
5140 kind to be explicit. */
5141 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5142 info->shifter.kind = AARCH64_MOD_LSL;
5143 }
5144 /* Qualifier to be deduced by libopcodes. */
5145 break;
5146
5147 case AARCH64_OPND_ADDR_SIMM7:
5148 po_misc_or_fail (parse_address (&str, info, 0));
5149 if (info->addr.pcrel || info->addr.offset.is_reg
5150 || (!info->addr.preind && !info->addr.postind))
5151 {
5152 set_syntax_error (_("invalid addressing mode"));
5153 goto failure;
5154 }
5155 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5156 /* addr_off_p */ 1,
5157 /* need_libopcodes_p */ 1,
5158 /* skip_p */ 0);
5159 break;
5160
5161 case AARCH64_OPND_ADDR_SIMM9:
5162 case AARCH64_OPND_ADDR_SIMM9_2:
5163 po_misc_or_fail (parse_address_reloc (&str, info));
5164 if (info->addr.pcrel || info->addr.offset.is_reg
5165 || (!info->addr.preind && !info->addr.postind)
5166 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5167 && info->addr.writeback))
5168 {
5169 set_syntax_error (_("invalid addressing mode"));
5170 goto failure;
5171 }
5172 if (inst.reloc.type != BFD_RELOC_UNUSED)
5173 {
5174 set_syntax_error (_("relocation not allowed"));
5175 goto failure;
5176 }
5177 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5178 /* addr_off_p */ 1,
5179 /* need_libopcodes_p */ 1,
5180 /* skip_p */ 0);
5181 break;
5182
5183 case AARCH64_OPND_ADDR_UIMM12:
5184 po_misc_or_fail (parse_address_reloc (&str, info));
5185 if (info->addr.pcrel || info->addr.offset.is_reg
5186 || !info->addr.preind || info->addr.writeback)
5187 {
5188 set_syntax_error (_("invalid addressing mode"));
5189 goto failure;
5190 }
5191 if (inst.reloc.type == BFD_RELOC_UNUSED)
5192 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5193 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5194 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5195 /* Leave qualifier to be determined by libopcodes. */
5196 break;
5197
5198 case AARCH64_OPND_SIMD_ADDR_POST:
5199 /* [<Xn|SP>], <Xm|#<amount>> */
5200 po_misc_or_fail (parse_address (&str, info, 1));
5201 if (!info->addr.postind || !info->addr.writeback)
5202 {
5203 set_syntax_error (_("invalid addressing mode"));
5204 goto failure;
5205 }
5206 if (!info->addr.offset.is_reg)
5207 {
5208 if (inst.reloc.exp.X_op == O_constant)
5209 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5210 else
5211 {
5212 set_fatal_syntax_error
5213 (_("writeback value should be an immediate constant"));
5214 goto failure;
5215 }
5216 }
5217 /* No qualifier. */
5218 break;
5219
5220 case AARCH64_OPND_SYSREG:
5221 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1))
5222 == PARSE_FAIL)
5223 {
5224 set_syntax_error (_("unknown or missing system register name"));
5225 goto failure;
5226 }
5227 inst.base.operands[i].sysreg = val;
5228 break;
5229
5230 case AARCH64_OPND_PSTATEFIELD:
5231 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0))
5232 == PARSE_FAIL)
5233 {
5234 set_syntax_error (_("unknown or missing PSTATE field name"));
5235 goto failure;
5236 }
5237 inst.base.operands[i].pstatefield = val;
5238 break;
5239
5240 case AARCH64_OPND_SYSREG_IC:
5241 inst.base.operands[i].sysins_op =
5242 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5243 goto sys_reg_ins;
5244 case AARCH64_OPND_SYSREG_DC:
5245 inst.base.operands[i].sysins_op =
5246 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5247 goto sys_reg_ins;
5248 case AARCH64_OPND_SYSREG_AT:
5249 inst.base.operands[i].sysins_op =
5250 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5251 goto sys_reg_ins;
5252 case AARCH64_OPND_SYSREG_TLBI:
5253 inst.base.operands[i].sysins_op =
5254 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5255 sys_reg_ins:
5256 if (inst.base.operands[i].sysins_op == NULL)
5257 {
5258 set_fatal_syntax_error ( _("unknown or missing operation name"));
5259 goto failure;
5260 }
5261 break;
5262
5263 case AARCH64_OPND_BARRIER:
5264 case AARCH64_OPND_BARRIER_ISB:
5265 val = parse_barrier (&str);
5266 if (val != PARSE_FAIL
5267 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5268 {
5269 /* ISB only accepts options name 'sy'. */
5270 set_syntax_error
5271 (_("the specified option is not accepted in ISB"));
5272 /* Turn off backtrack as this optional operand is present. */
5273 backtrack_pos = 0;
5274 goto failure;
5275 }
5276 /* This is an extension to accept a 0..15 immediate. */
5277 if (val == PARSE_FAIL)
5278 po_imm_or_fail (0, 15);
5279 info->barrier = aarch64_barrier_options + val;
5280 break;
5281
5282 case AARCH64_OPND_PRFOP:
5283 val = parse_pldop (&str);
5284 /* This is an extension to accept a 0..31 immediate. */
5285 if (val == PARSE_FAIL)
5286 po_imm_or_fail (0, 31);
5287 inst.base.operands[i].prfop = aarch64_prfops + val;
5288 break;
5289
5290 default:
5291 as_fatal (_("unhandled operand code %d"), operands[i]);
5292 }
5293
5294 /* If we get here, this operand was successfully parsed. */
5295 inst.base.operands[i].present = 1;
5296 continue;
5297
5298 failure:
5299 /* The parse routine should already have set the error, but in case
5300 not, set a default one here. */
5301 if (! error_p ())
5302 set_default_error ();
5303
5304 if (! backtrack_pos)
5305 goto parse_operands_return;
5306
5307 {
5308 /* We reach here because this operand is marked as optional, and
5309 either no operand was supplied or the operand was supplied but it
5310 was syntactically incorrect. In the latter case we report an
5311 error. In the former case we perform a few more checks before
5312 dropping through to the code to insert the default operand. */
5313
5314 char *tmp = backtrack_pos;
5315 char endchar = END_OF_INSN;
5316
5317 if (i != (aarch64_num_of_operands (opcode) - 1))
5318 endchar = ',';
5319 skip_past_char (&tmp, ',');
5320
5321 if (*tmp != endchar)
5322 /* The user has supplied an operand in the wrong format. */
5323 goto parse_operands_return;
5324
5325 /* Make sure there is not a comma before the optional operand.
5326 For example the fifth operand of 'sys' is optional:
5327
5328 sys #0,c0,c0,#0, <--- wrong
5329 sys #0,c0,c0,#0 <--- correct. */
5330 if (comma_skipped_p && i && endchar == END_OF_INSN)
5331 {
5332 set_fatal_syntax_error
5333 (_("unexpected comma before the omitted optional operand"));
5334 goto parse_operands_return;
5335 }
5336 }
5337
5338 /* Reaching here means we are dealing with an optional operand that is
5339 omitted from the assembly line. */
5340 gas_assert (optional_operand_p (opcode, i));
5341 info->present = 0;
5342 process_omitted_operand (operands[i], opcode, i, info);
5343
5344 /* Try again, skipping the optional operand at backtrack_pos. */
5345 str = backtrack_pos;
5346 backtrack_pos = 0;
5347
5348 /* Clear any error record after the omitted optional operand has been
5349 successfully handled. */
5350 clear_error ();
5351 }
5352
5353 /* Check if we have parsed all the operands. */
5354 if (*str != '\0' && ! error_p ())
5355 {
5356 /* Set I to the index of the last present operand; this is
5357 for the purpose of diagnostics. */
5358 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5359 ;
5360 set_fatal_syntax_error
5361 (_("unexpected characters following instruction"));
5362 }
5363
5364 parse_operands_return:
5365
5366 if (error_p ())
5367 {
5368 DEBUG_TRACE ("parsing FAIL: %s - %s",
5369 operand_mismatch_kind_names[get_error_kind ()],
5370 get_error_message ());
5371 /* Record the operand error properly; this is useful when there
5372 are multiple instruction templates for a mnemonic name, so that
5373 later on, we can select the error that most closely describes
5374 the problem. */
5375 record_operand_error (opcode, i, get_error_kind (),
5376 get_error_message ());
5377 return FALSE;
5378 }
5379 else
5380 {
5381 DEBUG_TRACE ("parsing SUCCESS");
5382 return TRUE;
5383 }
5384 }
5385
5386 /* It does some fix-up to provide some programmer friendly feature while
5387 keeping the libopcodes happy, i.e. libopcodes only accepts
5388 the preferred architectural syntax.
5389 Return FALSE if there is any failure; otherwise return TRUE. */
5390
5391 static bfd_boolean
5392 programmer_friendly_fixup (aarch64_instruction *instr)
5393 {
5394 aarch64_inst *base = &instr->base;
5395 const aarch64_opcode *opcode = base->opcode;
5396 enum aarch64_op op = opcode->op;
5397 aarch64_opnd_info *operands = base->operands;
5398
5399 DEBUG_TRACE ("enter");
5400
5401 switch (opcode->iclass)
5402 {
5403 case testbranch:
5404 /* TBNZ Xn|Wn, #uimm6, label
5405 Test and Branch Not Zero: conditionally jumps to label if bit number
5406 uimm6 in register Xn is not zero. The bit number implies the width of
5407 the register, which may be written and should be disassembled as Wn if
5408 uimm is less than 32. */
5409 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5410 {
5411 if (operands[1].imm.value >= 32)
5412 {
5413 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5414 0, 31);
5415 return FALSE;
5416 }
5417 operands[0].qualifier = AARCH64_OPND_QLF_X;
5418 }
5419 break;
5420 case loadlit:
5421 /* LDR Wt, label | =value
5422 As a convenience assemblers will typically permit the notation
5423 "=value" in conjunction with the pc-relative literal load instructions
5424 to automatically place an immediate value or symbolic address in a
5425 nearby literal pool and generate a hidden label which references it.
5426 ISREG has been set to 0 in the case of =value. */
5427 if (instr->gen_lit_pool
5428 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5429 {
5430 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5431 if (op == OP_LDRSW_LIT)
5432 size = 4;
5433 if (instr->reloc.exp.X_op != O_constant
5434 && instr->reloc.exp.X_op != O_big
5435 && instr->reloc.exp.X_op != O_symbol)
5436 {
5437 record_operand_error (opcode, 1,
5438 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5439 _("constant expression expected"));
5440 return FALSE;
5441 }
5442 if (! add_to_lit_pool (&instr->reloc.exp, size))
5443 {
5444 record_operand_error (opcode, 1,
5445 AARCH64_OPDE_OTHER_ERROR,
5446 _("literal pool insertion failed"));
5447 return FALSE;
5448 }
5449 }
5450 break;
5451 case log_shift:
5452 case bitfield:
5453 /* UXT[BHW] Wd, Wn
5454 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5455 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5456 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5457 A programmer-friendly assembler should accept a destination Xd in
5458 place of Wd, however that is not the preferred form for disassembly.
5459 */
5460 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5461 && operands[1].qualifier == AARCH64_OPND_QLF_W
5462 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5463 operands[0].qualifier = AARCH64_OPND_QLF_W;
5464 break;
5465
5466 case addsub_ext:
5467 {
5468 /* In the 64-bit form, the final register operand is written as Wm
5469 for all but the (possibly omitted) UXTX/LSL and SXTX
5470 operators.
5471 As a programmer-friendly assembler, we accept e.g.
5472 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5473 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5474 int idx = aarch64_operand_index (opcode->operands,
5475 AARCH64_OPND_Rm_EXT);
5476 gas_assert (idx == 1 || idx == 2);
5477 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5478 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5479 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5480 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5481 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5482 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5483 }
5484 break;
5485
5486 default:
5487 break;
5488 }
5489
5490 DEBUG_TRACE ("exit with SUCCESS");
5491 return TRUE;
5492 }
5493
5494 /* A wrapper function to interface with libopcodes on encoding and
5495 record the error message if there is any.
5496
5497 Return TRUE on success; otherwise return FALSE. */
5498
5499 static bfd_boolean
5500 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5501 aarch64_insn *code)
5502 {
5503 aarch64_operand_error error_info;
5504 error_info.kind = AARCH64_OPDE_NIL;
5505 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5506 return TRUE;
5507 else
5508 {
5509 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5510 record_operand_error_info (opcode, &error_info);
5511 return FALSE;
5512 }
5513 }
5514
5515 #ifdef DEBUG_AARCH64
5516 static inline void
5517 dump_opcode_operands (const aarch64_opcode *opcode)
5518 {
5519 int i = 0;
5520 while (opcode->operands[i] != AARCH64_OPND_NIL)
5521 {
5522 aarch64_verbose ("\t\t opnd%d: %s", i,
5523 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5524 ? aarch64_get_operand_name (opcode->operands[i])
5525 : aarch64_get_operand_desc (opcode->operands[i]));
5526 ++i;
5527 }
5528 }
5529 #endif /* DEBUG_AARCH64 */
5530
5531 /* This is the guts of the machine-dependent assembler. STR points to a
5532 machine dependent instruction. This function is supposed to emit
5533 the frags/bytes it assembles to. */
5534
5535 void
5536 md_assemble (char *str)
5537 {
5538 char *p = str;
5539 templates *template;
5540 aarch64_opcode *opcode;
5541 aarch64_inst *inst_base;
5542 unsigned saved_cond;
5543
5544 /* Align the previous label if needed. */
5545 if (last_label_seen != NULL)
5546 {
5547 symbol_set_frag (last_label_seen, frag_now);
5548 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5549 S_SET_SEGMENT (last_label_seen, now_seg);
5550 }
5551
5552 inst.reloc.type = BFD_RELOC_UNUSED;
5553
5554 DEBUG_TRACE ("\n\n");
5555 DEBUG_TRACE ("==============================");
5556 DEBUG_TRACE ("Enter md_assemble with %s", str);
5557
5558 template = opcode_lookup (&p);
5559 if (!template)
5560 {
5561 /* It wasn't an instruction, but it might be a register alias of
5562 the form alias .req reg directive. */
5563 if (!create_register_alias (str, p))
5564 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5565 str);
5566 return;
5567 }
5568
5569 skip_whitespace (p);
5570 if (*p == ',')
5571 {
5572 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5573 get_mnemonic_name (str), str);
5574 return;
5575 }
5576
5577 init_operand_error_report ();
5578
5579 saved_cond = inst.cond;
5580 reset_aarch64_instruction (&inst);
5581 inst.cond = saved_cond;
5582
5583 /* Iterate through all opcode entries with the same mnemonic name. */
5584 do
5585 {
5586 opcode = template->opcode;
5587
5588 DEBUG_TRACE ("opcode %s found", opcode->name);
5589 #ifdef DEBUG_AARCH64
5590 if (debug_dump)
5591 dump_opcode_operands (opcode);
5592 #endif /* DEBUG_AARCH64 */
5593
5594 mapping_state (MAP_INSN);
5595
5596 inst_base = &inst.base;
5597 inst_base->opcode = opcode;
5598
5599 /* Truly conditionally executed instructions, e.g. b.cond. */
5600 if (opcode->flags & F_COND)
5601 {
5602 gas_assert (inst.cond != COND_ALWAYS);
5603 inst_base->cond = get_cond_from_value (inst.cond);
5604 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5605 }
5606 else if (inst.cond != COND_ALWAYS)
5607 {
5608 /* It shouldn't arrive here, where the assembly looks like a
5609 conditional instruction but the found opcode is unconditional. */
5610 gas_assert (0);
5611 continue;
5612 }
5613
5614 if (parse_operands (p, opcode)
5615 && programmer_friendly_fixup (&inst)
5616 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5617 {
5618 /* Check that this instruction is supported for this CPU. */
5619 if (!opcode->avariant
5620 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5621 {
5622 as_bad (_("selected processor does not support `%s'"), str);
5623 return;
5624 }
5625
5626 if (inst.reloc.type == BFD_RELOC_UNUSED
5627 || !inst.reloc.need_libopcodes_p)
5628 output_inst (NULL);
5629 else
5630 {
5631 /* If there is relocation generated for the instruction,
5632 store the instruction information for the future fix-up. */
5633 struct aarch64_inst *copy;
5634 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5635 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5636 abort ();
5637 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5638 output_inst (copy);
5639 }
5640 return;
5641 }
5642
5643 template = template->next;
5644 if (template != NULL)
5645 {
5646 reset_aarch64_instruction (&inst);
5647 inst.cond = saved_cond;
5648 }
5649 }
5650 while (template != NULL);
5651
5652 /* Issue the error messages if any. */
5653 output_operand_error_report (str);
5654 }
5655
5656 /* Various frobbings of labels and their addresses. */
5657
5658 void
5659 aarch64_start_line_hook (void)
5660 {
5661 last_label_seen = NULL;
5662 }
5663
5664 void
5665 aarch64_frob_label (symbolS * sym)
5666 {
5667 last_label_seen = sym;
5668
5669 dwarf2_emit_label (sym);
5670 }
5671
5672 int
5673 aarch64_data_in_code (void)
5674 {
5675 if (!strncmp (input_line_pointer + 1, "data:", 5))
5676 {
5677 *input_line_pointer = '/';
5678 input_line_pointer += 5;
5679 *input_line_pointer = 0;
5680 return 1;
5681 }
5682
5683 return 0;
5684 }
5685
5686 char *
5687 aarch64_canonicalize_symbol_name (char *name)
5688 {
5689 int len;
5690
5691 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5692 *(name + len - 5) = 0;
5693
5694 return name;
5695 }
5696 \f
5697 /* Table of all register names defined by default. The user can
5698 define additional names with .req. Note that all register names
5699 should appear in both upper and lowercase variants. Some registers
5700 also have mixed-case names. */
5701
5702 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5703 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5704 #define REGSET31(p,t) \
5705 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5706 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5707 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5708 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5709 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5710 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5711 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5712 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5713 #define REGSET(p,t) \
5714 REGSET31(p,t), REGNUM(p,31,t)
5715
5716 /* These go into aarch64_reg_hsh hash-table. */
5717 static const reg_entry reg_names[] = {
5718 /* Integer registers. */
5719 REGSET31 (x, R_64), REGSET31 (X, R_64),
5720 REGSET31 (w, R_32), REGSET31 (W, R_32),
5721
5722 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5723 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5724
5725 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5726 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5727
5728 /* Coprocessor register numbers. */
5729 REGSET (c, CN), REGSET (C, CN),
5730
5731 /* Floating-point single precision registers. */
5732 REGSET (s, FP_S), REGSET (S, FP_S),
5733
5734 /* Floating-point double precision registers. */
5735 REGSET (d, FP_D), REGSET (D, FP_D),
5736
5737 /* Floating-point half precision registers. */
5738 REGSET (h, FP_H), REGSET (H, FP_H),
5739
5740 /* Floating-point byte precision registers. */
5741 REGSET (b, FP_B), REGSET (B, FP_B),
5742
5743 /* Floating-point quad precision registers. */
5744 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5745
5746 /* FP/SIMD registers. */
5747 REGSET (v, VN), REGSET (V, VN),
5748 };
5749
5750 #undef REGDEF
5751 #undef REGNUM
5752 #undef REGSET
5753
5754 #define N 1
5755 #define n 0
5756 #define Z 1
5757 #define z 0
5758 #define C 1
5759 #define c 0
5760 #define V 1
5761 #define v 0
5762 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5763 static const asm_nzcv nzcv_names[] = {
5764 {"nzcv", B (n, z, c, v)},
5765 {"nzcV", B (n, z, c, V)},
5766 {"nzCv", B (n, z, C, v)},
5767 {"nzCV", B (n, z, C, V)},
5768 {"nZcv", B (n, Z, c, v)},
5769 {"nZcV", B (n, Z, c, V)},
5770 {"nZCv", B (n, Z, C, v)},
5771 {"nZCV", B (n, Z, C, V)},
5772 {"Nzcv", B (N, z, c, v)},
5773 {"NzcV", B (N, z, c, V)},
5774 {"NzCv", B (N, z, C, v)},
5775 {"NzCV", B (N, z, C, V)},
5776 {"NZcv", B (N, Z, c, v)},
5777 {"NZcV", B (N, Z, c, V)},
5778 {"NZCv", B (N, Z, C, v)},
5779 {"NZCV", B (N, Z, C, V)}
5780 };
5781
5782 #undef N
5783 #undef n
5784 #undef Z
5785 #undef z
5786 #undef C
5787 #undef c
5788 #undef V
5789 #undef v
5790 #undef B
5791 \f
5792 /* MD interface: bits in the object file. */
5793
5794 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5795 for use in the a.out file, and stores them in the array pointed to by buf.
5796 This knows about the endian-ness of the target machine and does
5797 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5798 2 (short) and 4 (long) Floating numbers are put out as a series of
5799 LITTLENUMS (shorts, here at least). */
5800
5801 void
5802 md_number_to_chars (char *buf, valueT val, int n)
5803 {
5804 if (target_big_endian)
5805 number_to_chars_bigendian (buf, val, n);
5806 else
5807 number_to_chars_littleendian (buf, val, n);
5808 }
5809
5810 /* MD interface: Sections. */
5811
5812 /* Estimate the size of a frag before relaxing. Assume everything fits in
5813 4 bytes. */
5814
5815 int
5816 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5817 {
5818 fragp->fr_var = 4;
5819 return 4;
5820 }
5821
5822 /* Round up a section size to the appropriate boundary. */
5823
5824 valueT
5825 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5826 {
5827 return size;
5828 }
5829
5830 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
5831 of an rs_align_code fragment. */
5832
5833 void
5834 aarch64_handle_align (fragS * fragP)
5835 {
5836 /* NOP = d503201f */
5837 /* AArch64 instructions are always little-endian. */
5838 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5839
5840 int bytes, fix, noop_size;
5841 char *p;
5842 const char *noop;
5843
5844 if (fragP->fr_type != rs_align_code)
5845 return;
5846
5847 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
5848 p = fragP->fr_literal + fragP->fr_fix;
5849 fix = 0;
5850
5851 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
5852 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
5853
5854 #ifdef OBJ_ELF
5855 gas_assert (fragP->tc_frag_data.recorded);
5856 #endif
5857
5858 noop = aarch64_noop;
5859 noop_size = sizeof (aarch64_noop);
5860 fragP->fr_var = noop_size;
5861
5862 if (bytes & (noop_size - 1))
5863 {
5864 fix = bytes & (noop_size - 1);
5865 #ifdef OBJ_ELF
5866 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
5867 #endif
5868 memset (p, 0, fix);
5869 p += fix;
5870 bytes -= fix;
5871 }
5872
5873 while (bytes >= noop_size)
5874 {
5875 memcpy (p, noop, noop_size);
5876 p += noop_size;
5877 bytes -= noop_size;
5878 fix += noop_size;
5879 }
5880
5881 fragP->fr_fix += fix;
5882 }
5883
5884 /* Called from md_do_align. Used to create an alignment
5885 frag in a code section. */
5886
5887 void
5888 aarch64_frag_align_code (int n, int max)
5889 {
5890 char *p;
5891
5892 /* We assume that there will never be a requirement
5893 to support alignments greater than x bytes. */
5894 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
5895 as_fatal (_
5896 ("alignments greater than %d bytes not supported in .text sections"),
5897 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
5898
5899 p = frag_var (rs_align_code,
5900 MAX_MEM_FOR_RS_ALIGN_CODE,
5901 1,
5902 (relax_substateT) max,
5903 (symbolS *) NULL, (offsetT) n, (char *) NULL);
5904 *p = 0;
5905 }
5906
5907 /* Perform target specific initialisation of a frag.
5908 Note - despite the name this initialisation is not done when the frag
5909 is created, but only when its type is assigned. A frag can be created
5910 and used a long time before its type is set, so beware of assuming that
5911 this initialisationis performed first. */
5912
5913 #ifndef OBJ_ELF
5914 void
5915 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
5916 int max_chars ATTRIBUTE_UNUSED)
5917 {
5918 }
5919
5920 #else /* OBJ_ELF is defined. */
5921 void
5922 aarch64_init_frag (fragS * fragP, int max_chars)
5923 {
5924 /* Record a mapping symbol for alignment frags. We will delete this
5925 later if the alignment ends up empty. */
5926 if (!fragP->tc_frag_data.recorded)
5927 {
5928 fragP->tc_frag_data.recorded = 1;
5929 switch (fragP->fr_type)
5930 {
5931 case rs_align:
5932 case rs_align_test:
5933 case rs_fill:
5934 mapping_state_2 (MAP_DATA, max_chars);
5935 break;
5936 case rs_align_code:
5937 mapping_state_2 (MAP_INSN, max_chars);
5938 break;
5939 default:
5940 break;
5941 }
5942 }
5943 }
5944 \f
5945 /* Initialize the DWARF-2 unwind information for this procedure. */
5946
5947 void
5948 tc_aarch64_frame_initial_instructions (void)
5949 {
5950 cfi_add_CFA_def_cfa (REG_SP, 0);
5951 }
5952 #endif /* OBJ_ELF */
5953
5954 /* Convert REGNAME to a DWARF-2 register number. */
5955
5956 int
5957 tc_aarch64_regname_to_dw2regnum (char *regname)
5958 {
5959 const reg_entry *reg = parse_reg (&regname);
5960 if (reg == NULL)
5961 return -1;
5962
5963 switch (reg->type)
5964 {
5965 case REG_TYPE_SP_32:
5966 case REG_TYPE_SP_64:
5967 case REG_TYPE_R_32:
5968 case REG_TYPE_R_64:
5969 case REG_TYPE_FP_B:
5970 case REG_TYPE_FP_H:
5971 case REG_TYPE_FP_S:
5972 case REG_TYPE_FP_D:
5973 case REG_TYPE_FP_Q:
5974 return reg->number;
5975 default:
5976 break;
5977 }
5978 return -1;
5979 }
5980
5981 /* Implement DWARF2_ADDR_SIZE. */
5982
5983 int
5984 aarch64_dwarf2_addr_size (void)
5985 {
5986 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
5987 if (ilp32_p)
5988 return 4;
5989 #endif
5990 return bfd_arch_bits_per_address (stdoutput) / 8;
5991 }
5992
5993 /* MD interface: Symbol and relocation handling. */
5994
5995 /* Return the address within the segment that a PC-relative fixup is
5996 relative to. For AArch64 PC-relative fixups applied to instructions
5997 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
5998
5999 long
6000 md_pcrel_from_section (fixS * fixP, segT seg)
6001 {
6002 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
6003
6004 /* If this is pc-relative and we are going to emit a relocation
6005 then we just want to put out any pipeline compensation that the linker
6006 will need. Otherwise we want to use the calculated base. */
6007 if (fixP->fx_pcrel
6008 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
6009 || aarch64_force_relocation (fixP)))
6010 base = 0;
6011
6012 /* AArch64 should be consistent for all pc-relative relocations. */
6013 return base + AARCH64_PCREL_OFFSET;
6014 }
6015
6016 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
6017 Otherwise we have no need to default values of symbols. */
6018
6019 symbolS *
6020 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
6021 {
6022 #ifdef OBJ_ELF
6023 if (name[0] == '_' && name[1] == 'G'
6024 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
6025 {
6026 if (!GOT_symbol)
6027 {
6028 if (symbol_find (name))
6029 as_bad (_("GOT already in the symbol table"));
6030
6031 GOT_symbol = symbol_new (name, undefined_section,
6032 (valueT) 0, &zero_address_frag);
6033 }
6034
6035 return GOT_symbol;
6036 }
6037 #endif
6038
6039 return 0;
6040 }
6041
6042 /* Return non-zero if the indicated VALUE has overflowed the maximum
6043 range expressible by a unsigned number with the indicated number of
6044 BITS. */
6045
6046 static bfd_boolean
6047 unsigned_overflow (valueT value, unsigned bits)
6048 {
6049 valueT lim;
6050 if (bits >= sizeof (valueT) * 8)
6051 return FALSE;
6052 lim = (valueT) 1 << bits;
6053 return (value >= lim);
6054 }
6055
6056
6057 /* Return non-zero if the indicated VALUE has overflowed the maximum
6058 range expressible by an signed number with the indicated number of
6059 BITS. */
6060
6061 static bfd_boolean
6062 signed_overflow (offsetT value, unsigned bits)
6063 {
6064 offsetT lim;
6065 if (bits >= sizeof (offsetT) * 8)
6066 return FALSE;
6067 lim = (offsetT) 1 << (bits - 1);
6068 return (value < -lim || value >= lim);
6069 }
6070
6071 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6072 unsigned immediate offset load/store instruction, try to encode it as
6073 an unscaled, 9-bit, signed immediate offset load/store instruction.
6074 Return TRUE if it is successful; otherwise return FALSE.
6075
6076 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6077 in response to the standard LDR/STR mnemonics when the immediate offset is
6078 unambiguous, i.e. when it is negative or unaligned. */
6079
6080 static bfd_boolean
6081 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6082 {
6083 int idx;
6084 enum aarch64_op new_op;
6085 const aarch64_opcode *new_opcode;
6086
6087 gas_assert (instr->opcode->iclass == ldst_pos);
6088
6089 switch (instr->opcode->op)
6090 {
6091 case OP_LDRB_POS:new_op = OP_LDURB; break;
6092 case OP_STRB_POS: new_op = OP_STURB; break;
6093 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6094 case OP_LDRH_POS: new_op = OP_LDURH; break;
6095 case OP_STRH_POS: new_op = OP_STURH; break;
6096 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6097 case OP_LDR_POS: new_op = OP_LDUR; break;
6098 case OP_STR_POS: new_op = OP_STUR; break;
6099 case OP_LDRF_POS: new_op = OP_LDURV; break;
6100 case OP_STRF_POS: new_op = OP_STURV; break;
6101 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6102 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6103 default: new_op = OP_NIL; break;
6104 }
6105
6106 if (new_op == OP_NIL)
6107 return FALSE;
6108
6109 new_opcode = aarch64_get_opcode (new_op);
6110 gas_assert (new_opcode != NULL);
6111
6112 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6113 instr->opcode->op, new_opcode->op);
6114
6115 aarch64_replace_opcode (instr, new_opcode);
6116
6117 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6118 qualifier matching may fail because the out-of-date qualifier will
6119 prevent the operand being updated with a new and correct qualifier. */
6120 idx = aarch64_operand_index (instr->opcode->operands,
6121 AARCH64_OPND_ADDR_SIMM9);
6122 gas_assert (idx == 1);
6123 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6124
6125 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6126
6127 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6128 return FALSE;
6129
6130 return TRUE;
6131 }
6132
6133 /* Called by fix_insn to fix a MOV immediate alias instruction.
6134
6135 Operand for a generic move immediate instruction, which is an alias
6136 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6137 a 32-bit/64-bit immediate value into general register. An assembler error
6138 shall result if the immediate cannot be created by a single one of these
6139 instructions. If there is a choice, then to ensure reversability an
6140 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6141
6142 static void
6143 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6144 {
6145 const aarch64_opcode *opcode;
6146
6147 /* Need to check if the destination is SP/ZR. The check has to be done
6148 before any aarch64_replace_opcode. */
6149 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6150 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6151
6152 instr->operands[1].imm.value = value;
6153 instr->operands[1].skip = 0;
6154
6155 if (try_mov_wide_p)
6156 {
6157 /* Try the MOVZ alias. */
6158 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6159 aarch64_replace_opcode (instr, opcode);
6160 if (aarch64_opcode_encode (instr->opcode, instr,
6161 &instr->value, NULL, NULL))
6162 {
6163 put_aarch64_insn (buf, instr->value);
6164 return;
6165 }
6166 /* Try the MOVK alias. */
6167 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6168 aarch64_replace_opcode (instr, opcode);
6169 if (aarch64_opcode_encode (instr->opcode, instr,
6170 &instr->value, NULL, NULL))
6171 {
6172 put_aarch64_insn (buf, instr->value);
6173 return;
6174 }
6175 }
6176
6177 if (try_mov_bitmask_p)
6178 {
6179 /* Try the ORR alias. */
6180 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6181 aarch64_replace_opcode (instr, opcode);
6182 if (aarch64_opcode_encode (instr->opcode, instr,
6183 &instr->value, NULL, NULL))
6184 {
6185 put_aarch64_insn (buf, instr->value);
6186 return;
6187 }
6188 }
6189
6190 as_bad_where (fixP->fx_file, fixP->fx_line,
6191 _("immediate cannot be moved by a single instruction"));
6192 }
6193
6194 /* An instruction operand which is immediate related may have symbol used
6195 in the assembly, e.g.
6196
6197 mov w0, u32
6198 .set u32, 0x00ffff00
6199
6200 At the time when the assembly instruction is parsed, a referenced symbol,
6201 like 'u32' in the above example may not have been seen; a fixS is created
6202 in such a case and is handled here after symbols have been resolved.
6203 Instruction is fixed up with VALUE using the information in *FIXP plus
6204 extra information in FLAGS.
6205
6206 This function is called by md_apply_fix to fix up instructions that need
6207 a fix-up described above but does not involve any linker-time relocation. */
6208
6209 static void
6210 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6211 {
6212 int idx;
6213 uint32_t insn;
6214 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6215 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6216 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6217
6218 if (new_inst)
6219 {
6220 /* Now the instruction is about to be fixed-up, so the operand that
6221 was previously marked as 'ignored' needs to be unmarked in order
6222 to get the encoding done properly. */
6223 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6224 new_inst->operands[idx].skip = 0;
6225 }
6226
6227 gas_assert (opnd != AARCH64_OPND_NIL);
6228
6229 switch (opnd)
6230 {
6231 case AARCH64_OPND_EXCEPTION:
6232 if (unsigned_overflow (value, 16))
6233 as_bad_where (fixP->fx_file, fixP->fx_line,
6234 _("immediate out of range"));
6235 insn = get_aarch64_insn (buf);
6236 insn |= encode_svc_imm (value);
6237 put_aarch64_insn (buf, insn);
6238 break;
6239
6240 case AARCH64_OPND_AIMM:
6241 /* ADD or SUB with immediate.
6242 NOTE this assumes we come here with a add/sub shifted reg encoding
6243 3 322|2222|2 2 2 21111 111111
6244 1 098|7654|3 2 1 09876 543210 98765 43210
6245 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6246 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6247 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6248 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6249 ->
6250 3 322|2222|2 2 221111111111
6251 1 098|7654|3 2 109876543210 98765 43210
6252 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6253 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6254 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6255 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6256 Fields sf Rn Rd are already set. */
6257 insn = get_aarch64_insn (buf);
6258 if (value < 0)
6259 {
6260 /* Add <-> sub. */
6261 insn = reencode_addsub_switch_add_sub (insn);
6262 value = -value;
6263 }
6264
6265 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6266 && unsigned_overflow (value, 12))
6267 {
6268 /* Try to shift the value by 12 to make it fit. */
6269 if (((value >> 12) << 12) == value
6270 && ! unsigned_overflow (value, 12 + 12))
6271 {
6272 value >>= 12;
6273 insn |= encode_addsub_imm_shift_amount (1);
6274 }
6275 }
6276
6277 if (unsigned_overflow (value, 12))
6278 as_bad_where (fixP->fx_file, fixP->fx_line,
6279 _("immediate out of range"));
6280
6281 insn |= encode_addsub_imm (value);
6282
6283 put_aarch64_insn (buf, insn);
6284 break;
6285
6286 case AARCH64_OPND_SIMD_IMM:
6287 case AARCH64_OPND_SIMD_IMM_SFT:
6288 case AARCH64_OPND_LIMM:
6289 /* Bit mask immediate. */
6290 gas_assert (new_inst != NULL);
6291 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6292 new_inst->operands[idx].imm.value = value;
6293 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6294 &new_inst->value, NULL, NULL))
6295 put_aarch64_insn (buf, new_inst->value);
6296 else
6297 as_bad_where (fixP->fx_file, fixP->fx_line,
6298 _("invalid immediate"));
6299 break;
6300
6301 case AARCH64_OPND_HALF:
6302 /* 16-bit unsigned immediate. */
6303 if (unsigned_overflow (value, 16))
6304 as_bad_where (fixP->fx_file, fixP->fx_line,
6305 _("immediate out of range"));
6306 insn = get_aarch64_insn (buf);
6307 insn |= encode_movw_imm (value & 0xffff);
6308 put_aarch64_insn (buf, insn);
6309 break;
6310
6311 case AARCH64_OPND_IMM_MOV:
6312 /* Operand for a generic move immediate instruction, which is
6313 an alias instruction that generates a single MOVZ, MOVN or ORR
6314 instruction to loads a 32-bit/64-bit immediate value into general
6315 register. An assembler error shall result if the immediate cannot be
6316 created by a single one of these instructions. If there is a choice,
6317 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6318 and MOVZ or MOVN to ORR. */
6319 gas_assert (new_inst != NULL);
6320 fix_mov_imm_insn (fixP, buf, new_inst, value);
6321 break;
6322
6323 case AARCH64_OPND_ADDR_SIMM7:
6324 case AARCH64_OPND_ADDR_SIMM9:
6325 case AARCH64_OPND_ADDR_SIMM9_2:
6326 case AARCH64_OPND_ADDR_UIMM12:
6327 /* Immediate offset in an address. */
6328 insn = get_aarch64_insn (buf);
6329
6330 gas_assert (new_inst != NULL && new_inst->value == insn);
6331 gas_assert (new_inst->opcode->operands[1] == opnd
6332 || new_inst->opcode->operands[2] == opnd);
6333
6334 /* Get the index of the address operand. */
6335 if (new_inst->opcode->operands[1] == opnd)
6336 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6337 idx = 1;
6338 else
6339 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6340 idx = 2;
6341
6342 /* Update the resolved offset value. */
6343 new_inst->operands[idx].addr.offset.imm = value;
6344
6345 /* Encode/fix-up. */
6346 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6347 &new_inst->value, NULL, NULL))
6348 {
6349 put_aarch64_insn (buf, new_inst->value);
6350 break;
6351 }
6352 else if (new_inst->opcode->iclass == ldst_pos
6353 && try_to_encode_as_unscaled_ldst (new_inst))
6354 {
6355 put_aarch64_insn (buf, new_inst->value);
6356 break;
6357 }
6358
6359 as_bad_where (fixP->fx_file, fixP->fx_line,
6360 _("immediate offset out of range"));
6361 break;
6362
6363 default:
6364 gas_assert (0);
6365 as_fatal (_("unhandled operand code %d"), opnd);
6366 }
6367 }
6368
6369 /* Apply a fixup (fixP) to segment data, once it has been determined
6370 by our caller that we have all the info we need to fix it up.
6371
6372 Parameter valP is the pointer to the value of the bits. */
6373
6374 void
6375 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6376 {
6377 offsetT value = *valP;
6378 uint32_t insn;
6379 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6380 int scale;
6381 unsigned flags = fixP->fx_addnumber;
6382
6383 DEBUG_TRACE ("\n\n");
6384 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6385 DEBUG_TRACE ("Enter md_apply_fix");
6386
6387 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6388
6389 /* Note whether this will delete the relocation. */
6390
6391 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6392 fixP->fx_done = 1;
6393
6394 /* Process the relocations. */
6395 switch (fixP->fx_r_type)
6396 {
6397 case BFD_RELOC_NONE:
6398 /* This will need to go in the object file. */
6399 fixP->fx_done = 0;
6400 break;
6401
6402 case BFD_RELOC_8:
6403 case BFD_RELOC_8_PCREL:
6404 if (fixP->fx_done || !seg->use_rela_p)
6405 md_number_to_chars (buf, value, 1);
6406 break;
6407
6408 case BFD_RELOC_16:
6409 case BFD_RELOC_16_PCREL:
6410 if (fixP->fx_done || !seg->use_rela_p)
6411 md_number_to_chars (buf, value, 2);
6412 break;
6413
6414 case BFD_RELOC_32:
6415 case BFD_RELOC_32_PCREL:
6416 if (fixP->fx_done || !seg->use_rela_p)
6417 md_number_to_chars (buf, value, 4);
6418 break;
6419
6420 case BFD_RELOC_64:
6421 case BFD_RELOC_64_PCREL:
6422 if (fixP->fx_done || !seg->use_rela_p)
6423 md_number_to_chars (buf, value, 8);
6424 break;
6425
6426 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6427 /* We claim that these fixups have been processed here, even if
6428 in fact we generate an error because we do not have a reloc
6429 for them, so tc_gen_reloc() will reject them. */
6430 fixP->fx_done = 1;
6431 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6432 {
6433 as_bad_where (fixP->fx_file, fixP->fx_line,
6434 _("undefined symbol %s used as an immediate value"),
6435 S_GET_NAME (fixP->fx_addsy));
6436 goto apply_fix_return;
6437 }
6438 fix_insn (fixP, flags, value);
6439 break;
6440
6441 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6442 if (fixP->fx_done || !seg->use_rela_p)
6443 {
6444 if (value & 3)
6445 as_bad_where (fixP->fx_file, fixP->fx_line,
6446 _("pc-relative load offset not word aligned"));
6447 if (signed_overflow (value, 21))
6448 as_bad_where (fixP->fx_file, fixP->fx_line,
6449 _("pc-relative load offset out of range"));
6450 insn = get_aarch64_insn (buf);
6451 insn |= encode_ld_lit_ofs_19 (value >> 2);
6452 put_aarch64_insn (buf, insn);
6453 }
6454 break;
6455
6456 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6457 if (fixP->fx_done || !seg->use_rela_p)
6458 {
6459 if (signed_overflow (value, 21))
6460 as_bad_where (fixP->fx_file, fixP->fx_line,
6461 _("pc-relative address offset out of range"));
6462 insn = get_aarch64_insn (buf);
6463 insn |= encode_adr_imm (value);
6464 put_aarch64_insn (buf, insn);
6465 }
6466 break;
6467
6468 case BFD_RELOC_AARCH64_BRANCH19:
6469 if (fixP->fx_done || !seg->use_rela_p)
6470 {
6471 if (value & 3)
6472 as_bad_where (fixP->fx_file, fixP->fx_line,
6473 _("conditional branch target not word aligned"));
6474 if (signed_overflow (value, 21))
6475 as_bad_where (fixP->fx_file, fixP->fx_line,
6476 _("conditional branch out of range"));
6477 insn = get_aarch64_insn (buf);
6478 insn |= encode_cond_branch_ofs_19 (value >> 2);
6479 put_aarch64_insn (buf, insn);
6480 }
6481 break;
6482
6483 case BFD_RELOC_AARCH64_TSTBR14:
6484 if (fixP->fx_done || !seg->use_rela_p)
6485 {
6486 if (value & 3)
6487 as_bad_where (fixP->fx_file, fixP->fx_line,
6488 _("conditional branch target not word aligned"));
6489 if (signed_overflow (value, 16))
6490 as_bad_where (fixP->fx_file, fixP->fx_line,
6491 _("conditional branch out of range"));
6492 insn = get_aarch64_insn (buf);
6493 insn |= encode_tst_branch_ofs_14 (value >> 2);
6494 put_aarch64_insn (buf, insn);
6495 }
6496 break;
6497
6498 case BFD_RELOC_AARCH64_JUMP26:
6499 case BFD_RELOC_AARCH64_CALL26:
6500 if (fixP->fx_done || !seg->use_rela_p)
6501 {
6502 if (value & 3)
6503 as_bad_where (fixP->fx_file, fixP->fx_line,
6504 _("branch target not word aligned"));
6505 if (signed_overflow (value, 28))
6506 as_bad_where (fixP->fx_file, fixP->fx_line,
6507 _("branch out of range"));
6508 insn = get_aarch64_insn (buf);
6509 insn |= encode_branch_ofs_26 (value >> 2);
6510 put_aarch64_insn (buf, insn);
6511 }
6512 break;
6513
6514 case BFD_RELOC_AARCH64_MOVW_G0:
6515 case BFD_RELOC_AARCH64_MOVW_G0_S:
6516 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6517 scale = 0;
6518 goto movw_common;
6519 case BFD_RELOC_AARCH64_MOVW_G1:
6520 case BFD_RELOC_AARCH64_MOVW_G1_S:
6521 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6522 scale = 16;
6523 goto movw_common;
6524 case BFD_RELOC_AARCH64_MOVW_G2:
6525 case BFD_RELOC_AARCH64_MOVW_G2_S:
6526 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6527 scale = 32;
6528 goto movw_common;
6529 case BFD_RELOC_AARCH64_MOVW_G3:
6530 scale = 48;
6531 movw_common:
6532 if (fixP->fx_done || !seg->use_rela_p)
6533 {
6534 insn = get_aarch64_insn (buf);
6535
6536 if (!fixP->fx_done)
6537 {
6538 /* REL signed addend must fit in 16 bits */
6539 if (signed_overflow (value, 16))
6540 as_bad_where (fixP->fx_file, fixP->fx_line,
6541 _("offset out of range"));
6542 }
6543 else
6544 {
6545 /* Check for overflow and scale. */
6546 switch (fixP->fx_r_type)
6547 {
6548 case BFD_RELOC_AARCH64_MOVW_G0:
6549 case BFD_RELOC_AARCH64_MOVW_G1:
6550 case BFD_RELOC_AARCH64_MOVW_G2:
6551 case BFD_RELOC_AARCH64_MOVW_G3:
6552 if (unsigned_overflow (value, scale + 16))
6553 as_bad_where (fixP->fx_file, fixP->fx_line,
6554 _("unsigned value out of range"));
6555 break;
6556 case BFD_RELOC_AARCH64_MOVW_G0_S:
6557 case BFD_RELOC_AARCH64_MOVW_G1_S:
6558 case BFD_RELOC_AARCH64_MOVW_G2_S:
6559 /* NOTE: We can only come here with movz or movn. */
6560 if (signed_overflow (value, scale + 16))
6561 as_bad_where (fixP->fx_file, fixP->fx_line,
6562 _("signed value out of range"));
6563 if (value < 0)
6564 {
6565 /* Force use of MOVN. */
6566 value = ~value;
6567 insn = reencode_movzn_to_movn (insn);
6568 }
6569 else
6570 {
6571 /* Force use of MOVZ. */
6572 insn = reencode_movzn_to_movz (insn);
6573 }
6574 break;
6575 default:
6576 /* Unchecked relocations. */
6577 break;
6578 }
6579 value >>= scale;
6580 }
6581
6582 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6583 insn |= encode_movw_imm (value & 0xffff);
6584
6585 put_aarch64_insn (buf, insn);
6586 }
6587 break;
6588
6589 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6590 fixP->fx_r_type = (ilp32_p
6591 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6592 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6593 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6594 /* Should always be exported to object file, see
6595 aarch64_force_relocation(). */
6596 gas_assert (!fixP->fx_done);
6597 gas_assert (seg->use_rela_p);
6598 break;
6599
6600 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6601 fixP->fx_r_type = (ilp32_p
6602 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6603 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6604 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6605 /* Should always be exported to object file, see
6606 aarch64_force_relocation(). */
6607 gas_assert (!fixP->fx_done);
6608 gas_assert (seg->use_rela_p);
6609 break;
6610
6611 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6612 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6613 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6614 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6615 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6616 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6617 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6618 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6619 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6620 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6621 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6622 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6623 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6624 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6625 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6626 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6627 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6628 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6629 /* Should always be exported to object file, see
6630 aarch64_force_relocation(). */
6631 gas_assert (!fixP->fx_done);
6632 gas_assert (seg->use_rela_p);
6633 break;
6634
6635 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6636 /* Should always be exported to object file, see
6637 aarch64_force_relocation(). */
6638 fixP->fx_r_type = (ilp32_p
6639 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
6640 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
6641 gas_assert (!fixP->fx_done);
6642 gas_assert (seg->use_rela_p);
6643 break;
6644
6645 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6646 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6647 case BFD_RELOC_AARCH64_ADD_LO12:
6648 case BFD_RELOC_AARCH64_LDST8_LO12:
6649 case BFD_RELOC_AARCH64_LDST16_LO12:
6650 case BFD_RELOC_AARCH64_LDST32_LO12:
6651 case BFD_RELOC_AARCH64_LDST64_LO12:
6652 case BFD_RELOC_AARCH64_LDST128_LO12:
6653 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6654 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6655 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6656 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6657 /* Should always be exported to object file, see
6658 aarch64_force_relocation(). */
6659 gas_assert (!fixP->fx_done);
6660 gas_assert (seg->use_rela_p);
6661 break;
6662
6663 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6664 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6665 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6666 break;
6667
6668 case BFD_RELOC_UNUSED:
6669 /* An error will already have been reported. */
6670 break;
6671
6672 default:
6673 as_bad_where (fixP->fx_file, fixP->fx_line,
6674 _("unexpected %s fixup"),
6675 bfd_get_reloc_code_name (fixP->fx_r_type));
6676 break;
6677 }
6678
6679 apply_fix_return:
6680 /* Free the allocated the struct aarch64_inst.
6681 N.B. currently there are very limited number of fix-up types actually use
6682 this field, so the impact on the performance should be minimal . */
6683 if (fixP->tc_fix_data.inst != NULL)
6684 free (fixP->tc_fix_data.inst);
6685
6686 return;
6687 }
6688
6689 /* Translate internal representation of relocation info to BFD target
6690 format. */
6691
6692 arelent *
6693 tc_gen_reloc (asection * section, fixS * fixp)
6694 {
6695 arelent *reloc;
6696 bfd_reloc_code_real_type code;
6697
6698 reloc = xmalloc (sizeof (arelent));
6699
6700 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6701 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6702 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6703
6704 if (fixp->fx_pcrel)
6705 {
6706 if (section->use_rela_p)
6707 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6708 else
6709 fixp->fx_offset = reloc->address;
6710 }
6711 reloc->addend = fixp->fx_offset;
6712
6713 code = fixp->fx_r_type;
6714 switch (code)
6715 {
6716 case BFD_RELOC_16:
6717 if (fixp->fx_pcrel)
6718 code = BFD_RELOC_16_PCREL;
6719 break;
6720
6721 case BFD_RELOC_32:
6722 if (fixp->fx_pcrel)
6723 code = BFD_RELOC_32_PCREL;
6724 break;
6725
6726 case BFD_RELOC_64:
6727 if (fixp->fx_pcrel)
6728 code = BFD_RELOC_64_PCREL;
6729 break;
6730
6731 default:
6732 break;
6733 }
6734
6735 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6736 if (reloc->howto == NULL)
6737 {
6738 as_bad_where (fixp->fx_file, fixp->fx_line,
6739 _
6740 ("cannot represent %s relocation in this object file format"),
6741 bfd_get_reloc_code_name (code));
6742 return NULL;
6743 }
6744
6745 return reloc;
6746 }
6747
6748 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6749
6750 void
6751 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6752 {
6753 bfd_reloc_code_real_type type;
6754 int pcrel = 0;
6755
6756 /* Pick a reloc.
6757 FIXME: @@ Should look at CPU word size. */
6758 switch (size)
6759 {
6760 case 1:
6761 type = BFD_RELOC_8;
6762 break;
6763 case 2:
6764 type = BFD_RELOC_16;
6765 break;
6766 case 4:
6767 type = BFD_RELOC_32;
6768 break;
6769 case 8:
6770 type = BFD_RELOC_64;
6771 break;
6772 default:
6773 as_bad (_("cannot do %u-byte relocation"), size);
6774 type = BFD_RELOC_UNUSED;
6775 break;
6776 }
6777
6778 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6779 }
6780
6781 int
6782 aarch64_force_relocation (struct fix *fixp)
6783 {
6784 switch (fixp->fx_r_type)
6785 {
6786 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6787 /* Perform these "immediate" internal relocations
6788 even if the symbol is extern or weak. */
6789 return 0;
6790
6791 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6792 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6793 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6794 /* Pseudo relocs that need to be fixed up according to
6795 ilp32_p. */
6796 return 0;
6797
6798 case BFD_RELOC_AARCH64_ADD_LO12:
6799 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6800 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6801 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6802 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6803 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6804 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6805 case BFD_RELOC_AARCH64_LDST128_LO12:
6806 case BFD_RELOC_AARCH64_LDST16_LO12:
6807 case BFD_RELOC_AARCH64_LDST32_LO12:
6808 case BFD_RELOC_AARCH64_LDST64_LO12:
6809 case BFD_RELOC_AARCH64_LDST8_LO12:
6810 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6811 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6812 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6813 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6814 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6815 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6816 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6817 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6818 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6819 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6820 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6821 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6822 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6823 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6824 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6825 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6826 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6827 /* Always leave these relocations for the linker. */
6828 return 1;
6829
6830 default:
6831 break;
6832 }
6833
6834 return generic_force_reloc (fixp);
6835 }
6836
6837 #ifdef OBJ_ELF
6838
6839 const char *
6840 elf64_aarch64_target_format (void)
6841 {
6842 if (target_big_endian)
6843 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
6844 else
6845 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
6846 }
6847
6848 void
6849 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6850 {
6851 elf_frob_symbol (symp, puntp);
6852 }
6853 #endif
6854
6855 /* MD interface: Finalization. */
6856
6857 /* A good place to do this, although this was probably not intended
6858 for this kind of use. We need to dump the literal pool before
6859 references are made to a null symbol pointer. */
6860
6861 void
6862 aarch64_cleanup (void)
6863 {
6864 literal_pool *pool;
6865
6866 for (pool = list_of_pools; pool; pool = pool->next)
6867 {
6868 /* Put it at the end of the relevant section. */
6869 subseg_set (pool->section, pool->sub_section);
6870 s_ltorg (0);
6871 }
6872 }
6873
6874 #ifdef OBJ_ELF
6875 /* Remove any excess mapping symbols generated for alignment frags in
6876 SEC. We may have created a mapping symbol before a zero byte
6877 alignment; remove it if there's a mapping symbol after the
6878 alignment. */
6879 static void
6880 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
6881 void *dummy ATTRIBUTE_UNUSED)
6882 {
6883 segment_info_type *seginfo = seg_info (sec);
6884 fragS *fragp;
6885
6886 if (seginfo == NULL || seginfo->frchainP == NULL)
6887 return;
6888
6889 for (fragp = seginfo->frchainP->frch_root;
6890 fragp != NULL; fragp = fragp->fr_next)
6891 {
6892 symbolS *sym = fragp->tc_frag_data.last_map;
6893 fragS *next = fragp->fr_next;
6894
6895 /* Variable-sized frags have been converted to fixed size by
6896 this point. But if this was variable-sized to start with,
6897 there will be a fixed-size frag after it. So don't handle
6898 next == NULL. */
6899 if (sym == NULL || next == NULL)
6900 continue;
6901
6902 if (S_GET_VALUE (sym) < next->fr_address)
6903 /* Not at the end of this frag. */
6904 continue;
6905 know (S_GET_VALUE (sym) == next->fr_address);
6906
6907 do
6908 {
6909 if (next->tc_frag_data.first_map != NULL)
6910 {
6911 /* Next frag starts with a mapping symbol. Discard this
6912 one. */
6913 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6914 break;
6915 }
6916
6917 if (next->fr_next == NULL)
6918 {
6919 /* This mapping symbol is at the end of the section. Discard
6920 it. */
6921 know (next->fr_fix == 0 && next->fr_var == 0);
6922 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6923 break;
6924 }
6925
6926 /* As long as we have empty frags without any mapping symbols,
6927 keep looking. */
6928 /* If the next frag is non-empty and does not start with a
6929 mapping symbol, then this mapping symbol is required. */
6930 if (next->fr_address != next->fr_next->fr_address)
6931 break;
6932
6933 next = next->fr_next;
6934 }
6935 while (next != NULL);
6936 }
6937 }
6938 #endif
6939
6940 /* Adjust the symbol table. */
6941
6942 void
6943 aarch64_adjust_symtab (void)
6944 {
6945 #ifdef OBJ_ELF
6946 /* Remove any overlapping mapping symbols generated by alignment frags. */
6947 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
6948 /* Now do generic ELF adjustments. */
6949 elf_adjust_symtab ();
6950 #endif
6951 }
6952
6953 static void
6954 checked_hash_insert (struct hash_control *table, const char *key, void *value)
6955 {
6956 const char *hash_err;
6957
6958 hash_err = hash_insert (table, key, value);
6959 if (hash_err)
6960 printf ("Internal Error: Can't hash %s\n", key);
6961 }
6962
6963 static void
6964 fill_instruction_hash_table (void)
6965 {
6966 aarch64_opcode *opcode = aarch64_opcode_table;
6967
6968 while (opcode->name != NULL)
6969 {
6970 templates *templ, *new_templ;
6971 templ = hash_find (aarch64_ops_hsh, opcode->name);
6972
6973 new_templ = (templates *) xmalloc (sizeof (templates));
6974 new_templ->opcode = opcode;
6975 new_templ->next = NULL;
6976
6977 if (!templ)
6978 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
6979 else
6980 {
6981 new_templ->next = templ->next;
6982 templ->next = new_templ;
6983 }
6984 ++opcode;
6985 }
6986 }
6987
6988 static inline void
6989 convert_to_upper (char *dst, const char *src, size_t num)
6990 {
6991 unsigned int i;
6992 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
6993 *dst = TOUPPER (*src);
6994 *dst = '\0';
6995 }
6996
6997 /* Assume STR point to a lower-case string, allocate, convert and return
6998 the corresponding upper-case string. */
6999 static inline const char*
7000 get_upper_str (const char *str)
7001 {
7002 char *ret;
7003 size_t len = strlen (str);
7004 if ((ret = xmalloc (len + 1)) == NULL)
7005 abort ();
7006 convert_to_upper (ret, str, len);
7007 return ret;
7008 }
7009
7010 /* MD interface: Initialization. */
7011
7012 void
7013 md_begin (void)
7014 {
7015 unsigned mach;
7016 unsigned int i;
7017
7018 if ((aarch64_ops_hsh = hash_new ()) == NULL
7019 || (aarch64_cond_hsh = hash_new ()) == NULL
7020 || (aarch64_shift_hsh = hash_new ()) == NULL
7021 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
7022 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
7023 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
7024 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
7025 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
7026 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
7027 || (aarch64_reg_hsh = hash_new ()) == NULL
7028 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
7029 || (aarch64_nzcv_hsh = hash_new ()) == NULL
7030 || (aarch64_pldop_hsh = hash_new ()) == NULL)
7031 as_fatal (_("virtual memory exhausted"));
7032
7033 fill_instruction_hash_table ();
7034
7035 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
7036 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
7037 (void *) (aarch64_sys_regs + i));
7038
7039 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
7040 checked_hash_insert (aarch64_pstatefield_hsh,
7041 aarch64_pstatefields[i].name,
7042 (void *) (aarch64_pstatefields + i));
7043
7044 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
7045 checked_hash_insert (aarch64_sys_regs_ic_hsh,
7046 aarch64_sys_regs_ic[i].template,
7047 (void *) (aarch64_sys_regs_ic + i));
7048
7049 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
7050 checked_hash_insert (aarch64_sys_regs_dc_hsh,
7051 aarch64_sys_regs_dc[i].template,
7052 (void *) (aarch64_sys_regs_dc + i));
7053
7054 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
7055 checked_hash_insert (aarch64_sys_regs_at_hsh,
7056 aarch64_sys_regs_at[i].template,
7057 (void *) (aarch64_sys_regs_at + i));
7058
7059 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
7060 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
7061 aarch64_sys_regs_tlbi[i].template,
7062 (void *) (aarch64_sys_regs_tlbi + i));
7063
7064 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7065 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7066 (void *) (reg_names + i));
7067
7068 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7069 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7070 (void *) (nzcv_names + i));
7071
7072 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7073 {
7074 const char *name = aarch64_operand_modifiers[i].name;
7075 checked_hash_insert (aarch64_shift_hsh, name,
7076 (void *) (aarch64_operand_modifiers + i));
7077 /* Also hash the name in the upper case. */
7078 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7079 (void *) (aarch64_operand_modifiers + i));
7080 }
7081
7082 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7083 {
7084 unsigned int j;
7085 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7086 the same condition code. */
7087 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7088 {
7089 const char *name = aarch64_conds[i].names[j];
7090 if (name == NULL)
7091 break;
7092 checked_hash_insert (aarch64_cond_hsh, name,
7093 (void *) (aarch64_conds + i));
7094 /* Also hash the name in the upper case. */
7095 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7096 (void *) (aarch64_conds + i));
7097 }
7098 }
7099
7100 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7101 {
7102 const char *name = aarch64_barrier_options[i].name;
7103 /* Skip xx00 - the unallocated values of option. */
7104 if ((i & 0x3) == 0)
7105 continue;
7106 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7107 (void *) (aarch64_barrier_options + i));
7108 /* Also hash the name in the upper case. */
7109 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7110 (void *) (aarch64_barrier_options + i));
7111 }
7112
7113 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7114 {
7115 const char* name = aarch64_prfops[i].name;
7116 /* Skip the unallocated hint encodings. */
7117 if (name == NULL)
7118 continue;
7119 checked_hash_insert (aarch64_pldop_hsh, name,
7120 (void *) (aarch64_prfops + i));
7121 /* Also hash the name in the upper case. */
7122 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7123 (void *) (aarch64_prfops + i));
7124 }
7125
7126 /* Set the cpu variant based on the command-line options. */
7127 if (!mcpu_cpu_opt)
7128 mcpu_cpu_opt = march_cpu_opt;
7129
7130 if (!mcpu_cpu_opt)
7131 mcpu_cpu_opt = &cpu_default;
7132
7133 cpu_variant = *mcpu_cpu_opt;
7134
7135 /* Record the CPU type. */
7136 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7137
7138 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7139 }
7140
7141 /* Command line processing. */
7142
7143 const char *md_shortopts = "m:";
7144
7145 #ifdef AARCH64_BI_ENDIAN
7146 #define OPTION_EB (OPTION_MD_BASE + 0)
7147 #define OPTION_EL (OPTION_MD_BASE + 1)
7148 #else
7149 #if TARGET_BYTES_BIG_ENDIAN
7150 #define OPTION_EB (OPTION_MD_BASE + 0)
7151 #else
7152 #define OPTION_EL (OPTION_MD_BASE + 1)
7153 #endif
7154 #endif
7155
7156 struct option md_longopts[] = {
7157 #ifdef OPTION_EB
7158 {"EB", no_argument, NULL, OPTION_EB},
7159 #endif
7160 #ifdef OPTION_EL
7161 {"EL", no_argument, NULL, OPTION_EL},
7162 #endif
7163 {NULL, no_argument, NULL, 0}
7164 };
7165
7166 size_t md_longopts_size = sizeof (md_longopts);
7167
7168 struct aarch64_option_table
7169 {
7170 char *option; /* Option name to match. */
7171 char *help; /* Help information. */
7172 int *var; /* Variable to change. */
7173 int value; /* What to change it to. */
7174 char *deprecated; /* If non-null, print this message. */
7175 };
7176
7177 static struct aarch64_option_table aarch64_opts[] = {
7178 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7179 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7180 NULL},
7181 #ifdef DEBUG_AARCH64
7182 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7183 #endif /* DEBUG_AARCH64 */
7184 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7185 NULL},
7186 {"mno-verbose-error", N_("do not output verbose error messages"),
7187 &verbose_error_p, 0, NULL},
7188 {NULL, NULL, NULL, 0, NULL}
7189 };
7190
7191 struct aarch64_cpu_option_table
7192 {
7193 char *name;
7194 const aarch64_feature_set value;
7195 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7196 case. */
7197 const char *canonical_name;
7198 };
7199
7200 /* This list should, at a minimum, contain all the cpu names
7201 recognized by GCC. */
7202 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7203 {"all", AARCH64_ANY, NULL},
7204 {"cortex-a53", AARCH64_ARCH_V8, "Cortex-A53"},
7205 {"cortex-a57", AARCH64_ARCH_V8, "Cortex-A57"},
7206 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7207 {"generic", AARCH64_ARCH_V8, NULL},
7208
7209 /* These two are example CPUs supported in GCC, once we have real
7210 CPUs they will be removed. */
7211 {"example-1", AARCH64_ARCH_V8, NULL},
7212 {"example-2", AARCH64_ARCH_V8, NULL},
7213
7214 {NULL, AARCH64_ARCH_NONE, NULL}
7215 };
7216
7217 struct aarch64_arch_option_table
7218 {
7219 char *name;
7220 const aarch64_feature_set value;
7221 };
7222
7223 /* This list should, at a minimum, contain all the architecture names
7224 recognized by GCC. */
7225 static const struct aarch64_arch_option_table aarch64_archs[] = {
7226 {"all", AARCH64_ANY},
7227 {"armv8-a", AARCH64_ARCH_V8},
7228 {NULL, AARCH64_ARCH_NONE}
7229 };
7230
7231 /* ISA extensions. */
7232 struct aarch64_option_cpu_value_table
7233 {
7234 char *name;
7235 const aarch64_feature_set value;
7236 };
7237
7238 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7239 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7240 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7241 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7242 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7243 {NULL, AARCH64_ARCH_NONE}
7244 };
7245
7246 struct aarch64_long_option_table
7247 {
7248 char *option; /* Substring to match. */
7249 char *help; /* Help information. */
7250 int (*func) (char *subopt); /* Function to decode sub-option. */
7251 char *deprecated; /* If non-null, print this message. */
7252 };
7253
7254 static int
7255 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p)
7256 {
7257 /* We insist on extensions being added before being removed. We achieve
7258 this by using the ADDING_VALUE variable to indicate whether we are
7259 adding an extension (1) or removing it (0) and only allowing it to
7260 change in the order -1 -> 1 -> 0. */
7261 int adding_value = -1;
7262 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7263
7264 /* Copy the feature set, so that we can modify it. */
7265 *ext_set = **opt_p;
7266 *opt_p = ext_set;
7267
7268 while (str != NULL && *str != 0)
7269 {
7270 const struct aarch64_option_cpu_value_table *opt;
7271 char *ext;
7272 int optlen;
7273
7274 if (*str != '+')
7275 {
7276 as_bad (_("invalid architectural extension"));
7277 return 0;
7278 }
7279
7280 str++;
7281 ext = strchr (str, '+');
7282
7283 if (ext != NULL)
7284 optlen = ext - str;
7285 else
7286 optlen = strlen (str);
7287
7288 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7289 {
7290 if (adding_value != 0)
7291 adding_value = 0;
7292 optlen -= 2;
7293 str += 2;
7294 }
7295 else if (optlen > 0)
7296 {
7297 if (adding_value == -1)
7298 adding_value = 1;
7299 else if (adding_value != 1)
7300 {
7301 as_bad (_("must specify extensions to add before specifying "
7302 "those to remove"));
7303 return FALSE;
7304 }
7305 }
7306
7307 if (optlen == 0)
7308 {
7309 as_bad (_("missing architectural extension"));
7310 return 0;
7311 }
7312
7313 gas_assert (adding_value != -1);
7314
7315 for (opt = aarch64_features; opt->name != NULL; opt++)
7316 if (strncmp (opt->name, str, optlen) == 0)
7317 {
7318 /* Add or remove the extension. */
7319 if (adding_value)
7320 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7321 else
7322 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7323 break;
7324 }
7325
7326 if (opt->name == NULL)
7327 {
7328 as_bad (_("unknown architectural extension `%s'"), str);
7329 return 0;
7330 }
7331
7332 str = ext;
7333 };
7334
7335 return 1;
7336 }
7337
7338 static int
7339 aarch64_parse_cpu (char *str)
7340 {
7341 const struct aarch64_cpu_option_table *opt;
7342 char *ext = strchr (str, '+');
7343 size_t optlen;
7344
7345 if (ext != NULL)
7346 optlen = ext - str;
7347 else
7348 optlen = strlen (str);
7349
7350 if (optlen == 0)
7351 {
7352 as_bad (_("missing cpu name `%s'"), str);
7353 return 0;
7354 }
7355
7356 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7357 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7358 {
7359 mcpu_cpu_opt = &opt->value;
7360 if (ext != NULL)
7361 return aarch64_parse_features (ext, &mcpu_cpu_opt);
7362
7363 return 1;
7364 }
7365
7366 as_bad (_("unknown cpu `%s'"), str);
7367 return 0;
7368 }
7369
7370 static int
7371 aarch64_parse_arch (char *str)
7372 {
7373 const struct aarch64_arch_option_table *opt;
7374 char *ext = strchr (str, '+');
7375 size_t optlen;
7376
7377 if (ext != NULL)
7378 optlen = ext - str;
7379 else
7380 optlen = strlen (str);
7381
7382 if (optlen == 0)
7383 {
7384 as_bad (_("missing architecture name `%s'"), str);
7385 return 0;
7386 }
7387
7388 for (opt = aarch64_archs; opt->name != NULL; opt++)
7389 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7390 {
7391 march_cpu_opt = &opt->value;
7392 if (ext != NULL)
7393 return aarch64_parse_features (ext, &march_cpu_opt);
7394
7395 return 1;
7396 }
7397
7398 as_bad (_("unknown architecture `%s'\n"), str);
7399 return 0;
7400 }
7401
7402 /* ABIs. */
7403 struct aarch64_option_abi_value_table
7404 {
7405 char *name;
7406 enum aarch64_abi_type value;
7407 };
7408
7409 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7410 {"ilp32", AARCH64_ABI_ILP32},
7411 {"lp64", AARCH64_ABI_LP64},
7412 {NULL, 0}
7413 };
7414
7415 static int
7416 aarch64_parse_abi (char *str)
7417 {
7418 const struct aarch64_option_abi_value_table *opt;
7419 size_t optlen = strlen (str);
7420
7421 if (optlen == 0)
7422 {
7423 as_bad (_("missing abi name `%s'"), str);
7424 return 0;
7425 }
7426
7427 for (opt = aarch64_abis; opt->name != NULL; opt++)
7428 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7429 {
7430 aarch64_abi = opt->value;
7431 return 1;
7432 }
7433
7434 as_bad (_("unknown abi `%s'\n"), str);
7435 return 0;
7436 }
7437
7438 static struct aarch64_long_option_table aarch64_long_opts[] = {
7439 #ifdef OBJ_ELF
7440 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7441 aarch64_parse_abi, NULL},
7442 #endif /* OBJ_ELF */
7443 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7444 aarch64_parse_cpu, NULL},
7445 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7446 aarch64_parse_arch, NULL},
7447 {NULL, NULL, 0, NULL}
7448 };
7449
7450 int
7451 md_parse_option (int c, char *arg)
7452 {
7453 struct aarch64_option_table *opt;
7454 struct aarch64_long_option_table *lopt;
7455
7456 switch (c)
7457 {
7458 #ifdef OPTION_EB
7459 case OPTION_EB:
7460 target_big_endian = 1;
7461 break;
7462 #endif
7463
7464 #ifdef OPTION_EL
7465 case OPTION_EL:
7466 target_big_endian = 0;
7467 break;
7468 #endif
7469
7470 case 'a':
7471 /* Listing option. Just ignore these, we don't support additional
7472 ones. */
7473 return 0;
7474
7475 default:
7476 for (opt = aarch64_opts; opt->option != NULL; opt++)
7477 {
7478 if (c == opt->option[0]
7479 && ((arg == NULL && opt->option[1] == 0)
7480 || streq (arg, opt->option + 1)))
7481 {
7482 /* If the option is deprecated, tell the user. */
7483 if (opt->deprecated != NULL)
7484 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7485 arg ? arg : "", _(opt->deprecated));
7486
7487 if (opt->var != NULL)
7488 *opt->var = opt->value;
7489
7490 return 1;
7491 }
7492 }
7493
7494 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7495 {
7496 /* These options are expected to have an argument. */
7497 if (c == lopt->option[0]
7498 && arg != NULL
7499 && strncmp (arg, lopt->option + 1,
7500 strlen (lopt->option + 1)) == 0)
7501 {
7502 /* If the option is deprecated, tell the user. */
7503 if (lopt->deprecated != NULL)
7504 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7505 _(lopt->deprecated));
7506
7507 /* Call the sup-option parser. */
7508 return lopt->func (arg + strlen (lopt->option) - 1);
7509 }
7510 }
7511
7512 return 0;
7513 }
7514
7515 return 1;
7516 }
7517
7518 void
7519 md_show_usage (FILE * fp)
7520 {
7521 struct aarch64_option_table *opt;
7522 struct aarch64_long_option_table *lopt;
7523
7524 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7525
7526 for (opt = aarch64_opts; opt->option != NULL; opt++)
7527 if (opt->help != NULL)
7528 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7529
7530 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7531 if (lopt->help != NULL)
7532 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7533
7534 #ifdef OPTION_EB
7535 fprintf (fp, _("\
7536 -EB assemble code for a big-endian cpu\n"));
7537 #endif
7538
7539 #ifdef OPTION_EL
7540 fprintf (fp, _("\
7541 -EL assemble code for a little-endian cpu\n"));
7542 #endif
7543 }
7544
7545 /* Parse a .cpu directive. */
7546
7547 static void
7548 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7549 {
7550 const struct aarch64_cpu_option_table *opt;
7551 char saved_char;
7552 char *name;
7553 char *ext;
7554 size_t optlen;
7555
7556 name = input_line_pointer;
7557 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7558 input_line_pointer++;
7559 saved_char = *input_line_pointer;
7560 *input_line_pointer = 0;
7561
7562 ext = strchr (name, '+');
7563
7564 if (ext != NULL)
7565 optlen = ext - name;
7566 else
7567 optlen = strlen (name);
7568
7569 /* Skip the first "all" entry. */
7570 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7571 if (strlen (opt->name) == optlen
7572 && strncmp (name, opt->name, optlen) == 0)
7573 {
7574 mcpu_cpu_opt = &opt->value;
7575 if (ext != NULL)
7576 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7577 return;
7578
7579 cpu_variant = *mcpu_cpu_opt;
7580
7581 *input_line_pointer = saved_char;
7582 demand_empty_rest_of_line ();
7583 return;
7584 }
7585 as_bad (_("unknown cpu `%s'"), name);
7586 *input_line_pointer = saved_char;
7587 ignore_rest_of_line ();
7588 }
7589
7590
7591 /* Parse a .arch directive. */
7592
7593 static void
7594 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7595 {
7596 const struct aarch64_arch_option_table *opt;
7597 char saved_char;
7598 char *name;
7599 char *ext;
7600 size_t optlen;
7601
7602 name = input_line_pointer;
7603 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7604 input_line_pointer++;
7605 saved_char = *input_line_pointer;
7606 *input_line_pointer = 0;
7607
7608 ext = strchr (name, '+');
7609
7610 if (ext != NULL)
7611 optlen = ext - name;
7612 else
7613 optlen = strlen (name);
7614
7615 /* Skip the first "all" entry. */
7616 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7617 if (strlen (opt->name) == optlen
7618 && strncmp (name, opt->name, optlen) == 0)
7619 {
7620 mcpu_cpu_opt = &opt->value;
7621 if (ext != NULL)
7622 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7623 return;
7624
7625 cpu_variant = *mcpu_cpu_opt;
7626
7627 *input_line_pointer = saved_char;
7628 demand_empty_rest_of_line ();
7629 return;
7630 }
7631
7632 as_bad (_("unknown architecture `%s'\n"), name);
7633 *input_line_pointer = saved_char;
7634 ignore_rest_of_line ();
7635 }
7636
7637 /* Copy symbol information. */
7638
7639 void
7640 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7641 {
7642 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7643 }