1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
34 /* Need TARGET_CPU. */
41 #include "opcode/arm.h"
45 #include "dwarf2dbg.h"
46 #include "dw2gencfi.h"
49 /* XXX Set this to 1 after the next binutils release. */
50 #define WARN_DEPRECATED 0
53 /* Must be at least the size of the largest unwind opcode (currently two). */
54 #define ARM_OPCODE_CHUNK_SIZE 8
56 /* This structure holds the unwinding state. */
61 symbolS
* table_entry
;
62 symbolS
* personality_routine
;
63 int personality_index
;
64 /* The segment containing the function. */
67 /* Opcodes generated from this function. */
68 unsigned char * opcodes
;
71 /* The number of bytes pushed to the stack. */
73 /* We don't add stack adjustment opcodes immediately so that we can merge
74 multiple adjustments. We can also omit the final adjustment
75 when using a frame pointer. */
76 offsetT pending_offset
;
77 /* These two fields are set by both unwind_movsp and unwind_setfp. They
78 hold the reg+offset to use when restoring sp from a frame pointer. */
81 /* Nonzero if an unwind_setfp directive has been seen. */
83 /* Nonzero if the last opcode restores sp from fp_reg. */
84 unsigned sp_restored
:1;
87 /* Bit N indicates that an R_ARM_NONE relocation has been output for
88 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
89 emitted only once per section, to save unnecessary bloat. */
90 static unsigned int marked_pr_dependency
= 0;
101 /* Types of processor to assemble for. */
103 #if defined __XSCALE__
104 #define CPU_DEFAULT ARM_ARCH_XSCALE
106 #if defined __thumb__
107 #define CPU_DEFAULT ARM_ARCH_V5T
114 # define FPU_DEFAULT FPU_ARCH_FPA
115 # elif defined (TE_NetBSD)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
119 /* Legacy a.out format. */
120 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
122 # elif defined (TE_VXWORKS)
123 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
125 /* For backwards compatibility, default to FPA. */
126 # define FPU_DEFAULT FPU_ARCH_FPA
128 #endif /* ifndef FPU_DEFAULT */
130 #define streq(a, b) (strcmp (a, b) == 0)
132 static arm_feature_set cpu_variant
;
133 static arm_feature_set arm_arch_used
;
134 static arm_feature_set thumb_arch_used
;
136 /* Flags stored in private area of BFD structure. */
137 static int uses_apcs_26
= FALSE
;
138 static int atpcs
= FALSE
;
139 static int support_interwork
= FALSE
;
140 static int uses_apcs_float
= FALSE
;
141 static int pic_code
= FALSE
;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set
*legacy_cpu
= NULL
;
147 static const arm_feature_set
*legacy_fpu
= NULL
;
149 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
150 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
151 static const arm_feature_set
*march_cpu_opt
= NULL
;
152 static const arm_feature_set
*march_fpu_opt
= NULL
;
153 static const arm_feature_set
*mfpu_opt
= NULL
;
155 /* Constants for known architecture features. */
156 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
157 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
158 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
159 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
160 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
161 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
162 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
163 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
164 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
167 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
170 static const arm_feature_set arm_ext_v1
= ARM_FEATURE (ARM_EXT_V1
, 0);
171 static const arm_feature_set arm_ext_v2
= ARM_FEATURE (ARM_EXT_V1
, 0);
172 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE (ARM_EXT_V2S
, 0);
173 static const arm_feature_set arm_ext_v3
= ARM_FEATURE (ARM_EXT_V3
, 0);
174 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE (ARM_EXT_V3M
, 0);
175 static const arm_feature_set arm_ext_v4
= ARM_FEATURE (ARM_EXT_V4
, 0);
176 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE (ARM_EXT_V4T
, 0);
177 static const arm_feature_set arm_ext_v5
= ARM_FEATURE (ARM_EXT_V5
, 0);
178 static const arm_feature_set arm_ext_v4t_5
=
179 ARM_FEATURE (ARM_EXT_V4T
| ARM_EXT_V5
, 0);
180 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE (ARM_EXT_V5T
, 0);
181 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE (ARM_EXT_V5E
, 0);
182 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE (ARM_EXT_V5ExP
, 0);
183 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE (ARM_EXT_V5J
, 0);
184 static const arm_feature_set arm_ext_v6
= ARM_FEATURE (ARM_EXT_V6
, 0);
185 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE (ARM_EXT_V6K
, 0);
186 static const arm_feature_set arm_ext_v6z
= ARM_FEATURE (ARM_EXT_V6Z
, 0);
187 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE (ARM_EXT_V6T2
, 0);
188 static const arm_feature_set arm_ext_v6_notm
= ARM_FEATURE (ARM_EXT_V6_NOTM
, 0);
189 static const arm_feature_set arm_ext_div
= ARM_FEATURE (ARM_EXT_DIV
, 0);
190 static const arm_feature_set arm_ext_v7
= ARM_FEATURE (ARM_EXT_V7
, 0);
191 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE (ARM_EXT_V7A
, 0);
192 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE (ARM_EXT_V7R
, 0);
193 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE (ARM_EXT_V7M
, 0);
195 static const arm_feature_set arm_arch_any
= ARM_ANY
;
196 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1);
197 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
198 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
200 static const arm_feature_set arm_cext_iwmmxt
=
201 ARM_FEATURE (0, ARM_CEXT_IWMMXT
);
202 static const arm_feature_set arm_cext_xscale
=
203 ARM_FEATURE (0, ARM_CEXT_XSCALE
);
204 static const arm_feature_set arm_cext_maverick
=
205 ARM_FEATURE (0, ARM_CEXT_MAVERICK
);
206 static const arm_feature_set fpu_fpa_ext_v1
= ARM_FEATURE (0, FPU_FPA_EXT_V1
);
207 static const arm_feature_set fpu_fpa_ext_v2
= ARM_FEATURE (0, FPU_FPA_EXT_V2
);
208 static const arm_feature_set fpu_vfp_ext_v1xd
=
209 ARM_FEATURE (0, FPU_VFP_EXT_V1xD
);
210 static const arm_feature_set fpu_vfp_ext_v1
= ARM_FEATURE (0, FPU_VFP_EXT_V1
);
211 static const arm_feature_set fpu_vfp_ext_v2
= ARM_FEATURE (0, FPU_VFP_EXT_V2
);
212 static const arm_feature_set fpu_vfp_ext_v3
= ARM_FEATURE (0, FPU_VFP_EXT_V3
);
213 static const arm_feature_set fpu_neon_ext_v1
= ARM_FEATURE (0, FPU_NEON_EXT_V1
);
214 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
215 ARM_FEATURE (0, FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
217 static int mfloat_abi_opt
= -1;
218 /* Record user cpu selection for object attributes. */
219 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
220 /* Must be long enough to hold any of the names in arm_cpus. */
221 static char selected_cpu_name
[16];
224 static int meabi_flags
= EABI_DEFAULT
;
226 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
231 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
232 symbolS
* GOT_symbol
;
235 /* 0: assemble for ARM,
236 1: assemble for Thumb,
237 2: assemble for Thumb even though target CPU does not support thumb
239 static int thumb_mode
= 0;
241 /* If unified_syntax is true, we are processing the new unified
242 ARM/Thumb syntax. Important differences from the old ARM mode:
244 - Immediate operands do not require a # prefix.
245 - Conditional affixes always appear at the end of the
246 instruction. (For backward compatibility, those instructions
247 that formerly had them in the middle, continue to accept them
249 - The IT instruction may appear, and if it does is validated
250 against subsequent conditional affixes. It does not generate
253 Important differences from the old Thumb mode:
255 - Immediate operands do not require a # prefix.
256 - Most of the V6T2 instructions are only available in unified mode.
257 - The .N and .W suffixes are recognized and honored (it is an error
258 if they cannot be honored).
259 - All instructions set the flags if and only if they have an 's' affix.
260 - Conditional affixes may be used. They are validated against
261 preceding IT instructions. Unlike ARM mode, you cannot use a
262 conditional affix except in the scope of an IT instruction. */
264 static bfd_boolean unified_syntax
= FALSE
;
279 enum neon_el_type type
;
283 #define NEON_MAX_TYPE_ELS 4
287 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
294 unsigned long instruction
;
298 struct neon_type vectype
;
299 /* Set to the opcode if the instruction needs relaxation.
300 Zero if the instruction is not relaxed. */
304 bfd_reloc_code_real_type type
;
313 struct neon_type_el vectype
;
314 unsigned present
: 1; /* Operand present. */
315 unsigned isreg
: 1; /* Operand was a register. */
316 unsigned immisreg
: 1; /* .imm field is a second register. */
317 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
318 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
319 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
320 instructions. This allows us to disambiguate ARM <-> vector insns. */
321 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
322 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
323 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
324 unsigned writeback
: 1; /* Operand has trailing ! */
325 unsigned preind
: 1; /* Preindexed address. */
326 unsigned postind
: 1; /* Postindexed address. */
327 unsigned negative
: 1; /* Index register was negated. */
328 unsigned shifted
: 1; /* Shift applied to operation. */
329 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
333 static struct arm_it inst
;
335 #define NUM_FLOAT_VALS 8
337 const char * fp_const
[] =
339 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
342 /* Number of littlenums required to hold an extended precision number. */
343 #define MAX_LITTLENUMS 6
345 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
355 #define CP_T_X 0x00008000
356 #define CP_T_Y 0x00400000
358 #define CONDS_BIT 0x00100000
359 #define LOAD_BIT 0x00100000
361 #define DOUBLE_LOAD_FLAG 0x00000001
365 const char * template;
369 #define COND_ALWAYS 0xE
373 const char *template;
377 struct asm_barrier_opt
379 const char *template;
383 /* The bit that distinguishes CPSR and SPSR. */
384 #define SPSR_BIT (1 << 22)
386 /* The individual PSR flag bits. */
387 #define PSR_c (1 << 16)
388 #define PSR_x (1 << 17)
389 #define PSR_s (1 << 18)
390 #define PSR_f (1 << 19)
395 bfd_reloc_code_real_type reloc
;
400 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
401 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
406 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
409 /* Bits for DEFINED field in neon_typed_alias. */
410 #define NTA_HASTYPE 1
411 #define NTA_HASINDEX 2
413 struct neon_typed_alias
415 unsigned char defined
;
417 struct neon_type_el eltype
;
420 /* ARM register categories. This includes coprocessor numbers and various
421 architecture extensions' registers. */
445 /* Structure for a hash table entry for a register.
446 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
447 information which states whether a vector type or index is specified (for a
448 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
452 unsigned char number
;
454 unsigned char builtin
;
455 struct neon_typed_alias
*neon
;
458 /* Diagnostics used when we don't get a register of the expected type. */
459 const char *const reg_expected_msgs
[] =
461 N_("ARM register expected"),
462 N_("bad or missing co-processor number"),
463 N_("co-processor register expected"),
464 N_("FPA register expected"),
465 N_("VFP single precision register expected"),
466 N_("VFP/Neon double precision register expected"),
467 N_("Neon quad precision register expected"),
468 N_("Neon double or quad precision register expected"),
469 N_("VFP system register expected"),
470 N_("Maverick MVF register expected"),
471 N_("Maverick MVD register expected"),
472 N_("Maverick MVFX register expected"),
473 N_("Maverick MVDX register expected"),
474 N_("Maverick MVAX register expected"),
475 N_("Maverick DSPSC register expected"),
476 N_("iWMMXt data register expected"),
477 N_("iWMMXt control register expected"),
478 N_("iWMMXt scalar register expected"),
479 N_("XScale accumulator register expected"),
482 /* Some well known registers that we refer to directly elsewhere. */
487 /* ARM instructions take 4bytes in the object file, Thumb instructions
493 /* Basic string to match. */
494 const char *template;
496 /* Parameters to instruction. */
497 unsigned char operands
[8];
499 /* Conditional tag - see opcode_lookup. */
500 unsigned int tag
: 4;
502 /* Basic instruction code. */
503 unsigned int avalue
: 28;
505 /* Thumb-format instruction code. */
508 /* Which architecture variant provides this instruction. */
509 const arm_feature_set
*avariant
;
510 const arm_feature_set
*tvariant
;
512 /* Function to call to encode instruction in ARM format. */
513 void (* aencode
) (void);
515 /* Function to call to encode instruction in Thumb format. */
516 void (* tencode
) (void);
519 /* Defines for various bits that we will want to toggle. */
520 #define INST_IMMEDIATE 0x02000000
521 #define OFFSET_REG 0x02000000
522 #define HWOFFSET_IMM 0x00400000
523 #define SHIFT_BY_REG 0x00000010
524 #define PRE_INDEX 0x01000000
525 #define INDEX_UP 0x00800000
526 #define WRITE_BACK 0x00200000
527 #define LDM_TYPE_2_OR_3 0x00400000
529 #define LITERAL_MASK 0xf000f000
530 #define OPCODE_MASK 0xfe1fffff
531 #define V4_STR_BIT 0x00000020
533 #define DATA_OP_SHIFT 21
535 #define T2_OPCODE_MASK 0xfe1fffff
536 #define T2_DATA_OP_SHIFT 21
538 /* Codes to distinguish the arithmetic instructions. */
549 #define OPCODE_CMP 10
550 #define OPCODE_CMN 11
551 #define OPCODE_ORR 12
552 #define OPCODE_MOV 13
553 #define OPCODE_BIC 14
554 #define OPCODE_MVN 15
556 #define T2_OPCODE_AND 0
557 #define T2_OPCODE_BIC 1
558 #define T2_OPCODE_ORR 2
559 #define T2_OPCODE_ORN 3
560 #define T2_OPCODE_EOR 4
561 #define T2_OPCODE_ADD 8
562 #define T2_OPCODE_ADC 10
563 #define T2_OPCODE_SBC 11
564 #define T2_OPCODE_SUB 13
565 #define T2_OPCODE_RSB 14
567 #define T_OPCODE_MUL 0x4340
568 #define T_OPCODE_TST 0x4200
569 #define T_OPCODE_CMN 0x42c0
570 #define T_OPCODE_NEG 0x4240
571 #define T_OPCODE_MVN 0x43c0
573 #define T_OPCODE_ADD_R3 0x1800
574 #define T_OPCODE_SUB_R3 0x1a00
575 #define T_OPCODE_ADD_HI 0x4400
576 #define T_OPCODE_ADD_ST 0xb000
577 #define T_OPCODE_SUB_ST 0xb080
578 #define T_OPCODE_ADD_SP 0xa800
579 #define T_OPCODE_ADD_PC 0xa000
580 #define T_OPCODE_ADD_I8 0x3000
581 #define T_OPCODE_SUB_I8 0x3800
582 #define T_OPCODE_ADD_I3 0x1c00
583 #define T_OPCODE_SUB_I3 0x1e00
585 #define T_OPCODE_ASR_R 0x4100
586 #define T_OPCODE_LSL_R 0x4080
587 #define T_OPCODE_LSR_R 0x40c0
588 #define T_OPCODE_ROR_R 0x41c0
589 #define T_OPCODE_ASR_I 0x1000
590 #define T_OPCODE_LSL_I 0x0000
591 #define T_OPCODE_LSR_I 0x0800
593 #define T_OPCODE_MOV_I8 0x2000
594 #define T_OPCODE_CMP_I8 0x2800
595 #define T_OPCODE_CMP_LR 0x4280
596 #define T_OPCODE_MOV_HR 0x4600
597 #define T_OPCODE_CMP_HR 0x4500
599 #define T_OPCODE_LDR_PC 0x4800
600 #define T_OPCODE_LDR_SP 0x9800
601 #define T_OPCODE_STR_SP 0x9000
602 #define T_OPCODE_LDR_IW 0x6800
603 #define T_OPCODE_STR_IW 0x6000
604 #define T_OPCODE_LDR_IH 0x8800
605 #define T_OPCODE_STR_IH 0x8000
606 #define T_OPCODE_LDR_IB 0x7800
607 #define T_OPCODE_STR_IB 0x7000
608 #define T_OPCODE_LDR_RW 0x5800
609 #define T_OPCODE_STR_RW 0x5000
610 #define T_OPCODE_LDR_RH 0x5a00
611 #define T_OPCODE_STR_RH 0x5200
612 #define T_OPCODE_LDR_RB 0x5c00
613 #define T_OPCODE_STR_RB 0x5400
615 #define T_OPCODE_PUSH 0xb400
616 #define T_OPCODE_POP 0xbc00
618 #define T_OPCODE_BRANCH 0xe000
620 #define THUMB_SIZE 2 /* Size of thumb instruction. */
621 #define THUMB_PP_PC_LR 0x0100
622 #define THUMB_LOAD_BIT 0x0800
623 #define THUMB2_LOAD_BIT 0x00100000
625 #define BAD_ARGS _("bad arguments to instruction")
626 #define BAD_PC _("r15 not allowed here")
627 #define BAD_COND _("instruction cannot be conditional")
628 #define BAD_OVERLAP _("registers may not be the same")
629 #define BAD_HIREG _("lo register required")
630 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
631 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
632 #define BAD_BRANCH _("branch must be last instruction in IT block")
633 #define BAD_NOT_IT _("instruction not allowed in IT block")
635 static struct hash_control
*arm_ops_hsh
;
636 static struct hash_control
*arm_cond_hsh
;
637 static struct hash_control
*arm_shift_hsh
;
638 static struct hash_control
*arm_psr_hsh
;
639 static struct hash_control
*arm_v7m_psr_hsh
;
640 static struct hash_control
*arm_reg_hsh
;
641 static struct hash_control
*arm_reloc_hsh
;
642 static struct hash_control
*arm_barrier_opt_hsh
;
644 /* Stuff needed to resolve the label ambiguity
654 symbolS
* last_label_seen
;
655 static int label_is_thumb_function_name
= FALSE
;
657 /* Literal pool structure. Held on a per-section
658 and per-sub-section basis. */
660 #define MAX_LITERAL_POOL_SIZE 1024
661 typedef struct literal_pool
663 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
664 unsigned int next_free_entry
;
669 struct literal_pool
* next
;
672 /* Pointer to a linked list of literal pools. */
673 literal_pool
* list_of_pools
= NULL
;
675 /* State variables for IT block handling. */
676 static bfd_boolean current_it_mask
= 0;
677 static int current_cc
;
682 /* This array holds the chars that always start a comment. If the
683 pre-processor is disabled, these aren't very useful. */
684 const char comment_chars
[] = "@";
686 /* This array holds the chars that only start a comment at the beginning of
687 a line. If the line seems to have the form '# 123 filename'
688 .line and .file directives will appear in the pre-processed output. */
689 /* Note that input_file.c hand checks for '#' at the beginning of the
690 first line of the input file. This is because the compiler outputs
691 #NO_APP at the beginning of its output. */
692 /* Also note that comments like this one will always work. */
693 const char line_comment_chars
[] = "#";
695 const char line_separator_chars
[] = ";";
697 /* Chars that can be used to separate mant
698 from exp in floating point numbers. */
699 const char EXP_CHARS
[] = "eE";
701 /* Chars that mean this number is a floating point constant. */
705 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
707 /* Prefix characters that indicate the start of an immediate
709 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
711 /* Separator character handling. */
713 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
716 skip_past_char (char ** str
, char c
)
726 #define skip_past_comma(str) skip_past_char (str, ',')
728 /* Arithmetic expressions (possibly involving symbols). */
730 /* Return TRUE if anything in the expression is a bignum. */
733 walk_no_bignums (symbolS
* sp
)
735 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
738 if (symbol_get_value_expression (sp
)->X_add_symbol
)
740 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
741 || (symbol_get_value_expression (sp
)->X_op_symbol
742 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
748 static int in_my_get_expression
= 0;
750 /* Third argument to my_get_expression. */
751 #define GE_NO_PREFIX 0
752 #define GE_IMM_PREFIX 1
753 #define GE_OPT_PREFIX 2
754 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
755 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
756 #define GE_OPT_PREFIX_BIG 3
759 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
764 /* In unified syntax, all prefixes are optional. */
766 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
771 case GE_NO_PREFIX
: break;
773 if (!is_immediate_prefix (**str
))
775 inst
.error
= _("immediate expression requires a # prefix");
781 case GE_OPT_PREFIX_BIG
:
782 if (is_immediate_prefix (**str
))
788 memset (ep
, 0, sizeof (expressionS
));
790 save_in
= input_line_pointer
;
791 input_line_pointer
= *str
;
792 in_my_get_expression
= 1;
793 seg
= expression (ep
);
794 in_my_get_expression
= 0;
796 if (ep
->X_op
== O_illegal
)
798 /* We found a bad expression in md_operand(). */
799 *str
= input_line_pointer
;
800 input_line_pointer
= save_in
;
801 if (inst
.error
== NULL
)
802 inst
.error
= _("bad expression");
807 if (seg
!= absolute_section
808 && seg
!= text_section
809 && seg
!= data_section
810 && seg
!= bss_section
811 && seg
!= undefined_section
)
813 inst
.error
= _("bad segment");
814 *str
= input_line_pointer
;
815 input_line_pointer
= save_in
;
820 /* Get rid of any bignums now, so that we don't generate an error for which
821 we can't establish a line number later on. Big numbers are never valid
822 in instructions, which is where this routine is always called. */
823 if (prefix_mode
!= GE_OPT_PREFIX_BIG
824 && (ep
->X_op
== O_big
826 && (walk_no_bignums (ep
->X_add_symbol
)
828 && walk_no_bignums (ep
->X_op_symbol
))))))
830 inst
.error
= _("invalid constant");
831 *str
= input_line_pointer
;
832 input_line_pointer
= save_in
;
836 *str
= input_line_pointer
;
837 input_line_pointer
= save_in
;
841 /* Turn a string in input_line_pointer into a floating point constant
842 of type TYPE, and store the appropriate bytes in *LITP. The number
843 of LITTLENUMS emitted is stored in *SIZEP. An error message is
844 returned, or NULL on OK.
846 Note that fp constants aren't represent in the normal way on the ARM.
847 In big endian mode, things are as expected. However, in little endian
848 mode fp constants are big-endian word-wise, and little-endian byte-wise
849 within the words. For example, (double) 1.1 in big endian mode is
850 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
851 the byte sequence 99 99 f1 3f 9a 99 99 99.
853 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
856 md_atof (int type
, char * litP
, int * sizeP
)
859 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
891 return _("bad call to MD_ATOF()");
894 t
= atof_ieee (input_line_pointer
, type
, words
);
896 input_line_pointer
= t
;
899 if (target_big_endian
)
901 for (i
= 0; i
< prec
; i
++)
903 md_number_to_chars (litP
, (valueT
) words
[i
], 2);
909 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
910 for (i
= prec
- 1; i
>= 0; i
--)
912 md_number_to_chars (litP
, (valueT
) words
[i
], 2);
916 /* For a 4 byte float the order of elements in `words' is 1 0.
917 For an 8 byte float the order is 1 0 3 2. */
918 for (i
= 0; i
< prec
; i
+= 2)
920 md_number_to_chars (litP
, (valueT
) words
[i
+ 1], 2);
921 md_number_to_chars (litP
+ 2, (valueT
) words
[i
], 2);
929 /* We handle all bad expressions here, so that we can report the faulty
930 instruction in the error message. */
932 md_operand (expressionS
* expr
)
934 if (in_my_get_expression
)
935 expr
->X_op
= O_illegal
;
938 /* Immediate values. */
940 /* Generic immediate-value read function for use in directives.
941 Accepts anything that 'expression' can fold to a constant.
942 *val receives the number. */
945 immediate_for_directive (int *val
)
948 exp
.X_op
= O_illegal
;
950 if (is_immediate_prefix (*input_line_pointer
))
952 input_line_pointer
++;
956 if (exp
.X_op
!= O_constant
)
958 as_bad (_("expected #constant"));
959 ignore_rest_of_line ();
962 *val
= exp
.X_add_number
;
967 /* Register parsing. */
969 /* Generic register parser. CCP points to what should be the
970 beginning of a register name. If it is indeed a valid register
971 name, advance CCP over it and return the reg_entry structure;
972 otherwise return NULL. Does not issue diagnostics. */
974 static struct reg_entry
*
975 arm_reg_parse_multi (char **ccp
)
979 struct reg_entry
*reg
;
981 #ifdef REGISTER_PREFIX
982 if (*start
!= REGISTER_PREFIX
)
986 #ifdef OPTIONAL_REGISTER_PREFIX
987 if (*start
== OPTIONAL_REGISTER_PREFIX
)
992 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
997 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
999 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1009 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1010 enum arm_reg_type type
)
1012 /* Alternative syntaxes are accepted for a few register classes. */
1019 /* Generic coprocessor register names are allowed for these. */
1020 if (reg
&& reg
->type
== REG_TYPE_CN
)
1025 /* For backward compatibility, a bare number is valid here. */
1027 unsigned long processor
= strtoul (start
, ccp
, 10);
1028 if (*ccp
!= start
&& processor
<= 15)
1032 case REG_TYPE_MMXWC
:
1033 /* WC includes WCG. ??? I'm not sure this is true for all
1034 instructions that take WC registers. */
1035 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1046 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1047 return value is the register number or FAIL. */
1050 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1053 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1056 /* Do not allow a scalar (reg+index) to parse as a register. */
1057 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1060 if (reg
&& reg
->type
== type
)
1063 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1070 /* Parse a Neon type specifier. *STR should point at the leading '.'
1071 character. Does no verification at this stage that the type fits the opcode
1078 Can all be legally parsed by this function.
1080 Fills in neon_type struct pointer with parsed information, and updates STR
1081 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1082 type, FAIL if not. */
1085 parse_neon_type (struct neon_type
*type
, char **str
)
1092 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1094 enum neon_el_type thistype
= NT_untyped
;
1095 unsigned thissize
= -1u;
1102 /* Just a size without an explicit type. */
1106 switch (TOLOWER (*ptr
))
1108 case 'i': thistype
= NT_integer
; break;
1109 case 'f': thistype
= NT_float
; break;
1110 case 'p': thistype
= NT_poly
; break;
1111 case 's': thistype
= NT_signed
; break;
1112 case 'u': thistype
= NT_unsigned
; break;
1114 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1120 /* .f is an abbreviation for .f32. */
1121 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1126 thissize
= strtoul (ptr
, &ptr
, 10);
1128 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1131 as_bad (_("bad size %d in type specifier"), thissize
);
1138 type
->el
[type
->elems
].type
= thistype
;
1139 type
->el
[type
->elems
].size
= thissize
;
1144 /* Empty/missing type is not a successful parse. */
1145 if (type
->elems
== 0)
1153 /* Errors may be set multiple times during parsing or bit encoding
1154 (particularly in the Neon bits), but usually the earliest error which is set
1155 will be the most meaningful. Avoid overwriting it with later (cascading)
1156 errors by calling this function. */
1159 first_error (const char *err
)
1165 /* Parse a single type, e.g. ".s32", leading period included. */
1167 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1170 struct neon_type optype
;
1174 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1176 if (optype
.elems
== 1)
1177 *vectype
= optype
.el
[0];
1180 first_error (_("only one type should be specified for operand"));
1186 first_error (_("vector type expected"));
1198 /* Special meanings for indices (which have a range of 0-7), which will fit into
1201 #define NEON_ALL_LANES 15
1202 #define NEON_INTERLEAVE_LANES 14
1204 /* Parse either a register or a scalar, with an optional type. Return the
1205 register number, and optionally fill in the actual type of the register
1206 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1207 type/index information in *TYPEINFO. */
1210 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1211 enum arm_reg_type
*rtype
,
1212 struct neon_typed_alias
*typeinfo
)
1215 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1216 struct neon_typed_alias atype
;
1217 struct neon_type_el parsetype
;
1221 atype
.eltype
.type
= NT_invtype
;
1222 atype
.eltype
.size
= -1;
1224 /* Try alternate syntax for some types of register. Note these are mutually
1225 exclusive with the Neon syntax extensions. */
1228 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1236 /* Undo polymorphism for Neon D and Q registers. */
1237 if (type
== REG_TYPE_NDQ
1238 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1241 if (type
!= reg
->type
)
1247 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1249 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1251 first_error (_("can't redefine type for operand"));
1254 atype
.defined
|= NTA_HASTYPE
;
1255 atype
.eltype
= parsetype
;
1258 if (skip_past_char (&str
, '[') == SUCCESS
)
1260 if (type
!= REG_TYPE_VFD
)
1262 first_error (_("only D registers may be indexed"));
1266 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1268 first_error (_("can't change index for operand"));
1272 atype
.defined
|= NTA_HASINDEX
;
1274 if (skip_past_char (&str
, ']') == SUCCESS
)
1275 atype
.index
= NEON_ALL_LANES
;
1280 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1282 if (exp
.X_op
!= O_constant
)
1284 first_error (_("constant expression required"));
1288 if (skip_past_char (&str
, ']') == FAIL
)
1291 atype
.index
= exp
.X_add_number
;
1306 /* Like arm_reg_parse, but allow allow the following extra features:
1307 - If RTYPE is non-zero, return the (possibly restricted) type of the
1308 register (e.g. Neon double or quad reg when either has been requested).
1309 - If this is a Neon vector type with additional type information, fill
1310 in the struct pointed to by VECTYPE (if non-NULL).
1311 This function will fault on encountering a scalar.
1315 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1316 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1318 struct neon_typed_alias atype
;
1320 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1325 /* Do not allow a scalar (reg+index) to parse as a register. */
1326 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1328 first_error (_("register operand expected, but got scalar"));
1333 *vectype
= atype
.eltype
;
1340 #define NEON_SCALAR_REG(X) ((X) >> 4)
1341 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1343 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1344 have enough information to be able to do a good job bounds-checking. So, we
1345 just do easy checks here, and do further checks later. */
1348 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1352 struct neon_typed_alias atype
;
1354 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1356 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1359 if (atype
.index
== NEON_ALL_LANES
)
1361 first_error (_("scalar must have an index"));
1364 else if (atype
.index
>= 64 / elsize
)
1366 first_error (_("scalar index out of range"));
1371 *type
= atype
.eltype
;
1375 return reg
* 16 + atype
.index
;
1378 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1380 parse_reg_list (char ** strp
)
1382 char * str
= * strp
;
1386 /* We come back here if we get ranges concatenated by '+' or '|'. */
1401 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1403 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1413 first_error (_("bad range in register list"));
1417 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1419 if (range
& (1 << i
))
1421 (_("Warning: duplicated register (r%d) in register list"),
1429 if (range
& (1 << reg
))
1430 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1432 else if (reg
<= cur_reg
)
1433 as_tsktsk (_("Warning: register range not in ascending order"));
1438 while (skip_past_comma (&str
) != FAIL
1439 || (in_range
= 1, *str
++ == '-'));
1444 first_error (_("missing `}'"));
1452 if (my_get_expression (&expr
, &str
, GE_NO_PREFIX
))
1455 if (expr
.X_op
== O_constant
)
1457 if (expr
.X_add_number
1458 != (expr
.X_add_number
& 0x0000ffff))
1460 inst
.error
= _("invalid register mask");
1464 if ((range
& expr
.X_add_number
) != 0)
1466 int regno
= range
& expr
.X_add_number
;
1469 regno
= (1 << regno
) - 1;
1471 (_("Warning: duplicated register (r%d) in register list"),
1475 range
|= expr
.X_add_number
;
1479 if (inst
.reloc
.type
!= 0)
1481 inst
.error
= _("expression too complex");
1485 memcpy (&inst
.reloc
.exp
, &expr
, sizeof (expressionS
));
1486 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1487 inst
.reloc
.pc_rel
= 0;
1491 if (*str
== '|' || *str
== '+')
1497 while (another_range
);
1503 /* Types of registers in a list. */
1512 /* Parse a VFP register list. If the string is invalid return FAIL.
1513 Otherwise return the number of registers, and set PBASE to the first
1514 register. Parses registers of type ETYPE.
1515 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1516 - Q registers can be used to specify pairs of D registers
1517 - { } can be omitted from around a singleton register list
1518 FIXME: This is not implemented, as it would require backtracking in
1521 This could be done (the meaning isn't really ambiguous), but doesn't
1522 fit in well with the current parsing framework.
1523 - 32 D registers may be used (also true for VFPv3).
1524 FIXME: Types are ignored in these register lists, which is probably a
1528 parse_vfp_reg_list (char **str
, unsigned int *pbase
, enum reg_list_els etype
)
1532 enum arm_reg_type regtype
= 0;
1536 unsigned long mask
= 0;
1541 inst
.error
= _("expecting {");
1550 regtype
= REG_TYPE_VFS
;
1555 regtype
= REG_TYPE_VFD
;
1556 /* VFPv3 allows 32 D registers. */
1557 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
1561 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1564 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1571 case REGLIST_NEON_D
:
1572 regtype
= REG_TYPE_NDQ
;
1577 base_reg
= max_regs
;
1581 int setmask
= 1, addregs
= 1;
1583 new_base
= arm_typed_reg_parse (str
, regtype
, ®type
, NULL
);
1585 if (new_base
== FAIL
)
1587 first_error (_(reg_expected_msgs
[regtype
]));
1591 /* Note: a value of 2 * n is returned for the register Q<n>. */
1592 if (regtype
== REG_TYPE_NQ
)
1598 if (new_base
< base_reg
)
1599 base_reg
= new_base
;
1601 if (mask
& (setmask
<< new_base
))
1603 first_error (_("invalid register list"));
1607 if ((mask
>> new_base
) != 0 && ! warned
)
1609 as_tsktsk (_("register list not in ascending order"));
1613 mask
|= setmask
<< new_base
;
1616 if (**str
== '-') /* We have the start of a range expression */
1622 if ((high_range
= arm_typed_reg_parse (str
, regtype
, NULL
, NULL
))
1625 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1629 if (regtype
== REG_TYPE_NQ
)
1630 high_range
= high_range
+ 1;
1632 if (high_range
<= new_base
)
1634 inst
.error
= _("register range not in ascending order");
1638 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1640 if (mask
& (setmask
<< new_base
))
1642 inst
.error
= _("invalid register list");
1646 mask
|= setmask
<< new_base
;
1651 while (skip_past_comma (str
) != FAIL
);
1655 /* Sanity check -- should have raised a parse error above. */
1656 if (count
== 0 || count
> max_regs
)
1661 /* Final test -- the registers must be consecutive. */
1663 for (i
= 0; i
< count
; i
++)
1665 if ((mask
& (1u << i
)) == 0)
1667 inst
.error
= _("non-contiguous register range");
1675 /* True if two alias types are the same. */
1678 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1686 if (a
->defined
!= b
->defined
)
1689 if ((a
->defined
& NTA_HASTYPE
) != 0
1690 && (a
->eltype
.type
!= b
->eltype
.type
1691 || a
->eltype
.size
!= b
->eltype
.size
))
1694 if ((a
->defined
& NTA_HASINDEX
) != 0
1695 && (a
->index
!= b
->index
))
1701 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1702 The base register is put in *PBASE.
1703 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1705 The register stride (minus one) is put in bit 4 of the return value.
1706 Bits [6:5] encode the list length (minus one).
1707 The type of the list elements is put in *ELTYPE, if non-NULL. */
1709 #define NEON_LANE(X) ((X) & 0xf)
1710 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1711 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1714 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1715 struct neon_type_el
*eltype
)
1722 int leading_brace
= 0;
1723 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1725 const char *const incr_error
= "register stride must be 1 or 2";
1726 const char *const type_error
= "mismatched element/structure types in list";
1727 struct neon_typed_alias firsttype
;
1729 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1734 struct neon_typed_alias atype
;
1735 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1739 first_error (_(reg_expected_msgs
[rtype
]));
1746 if (rtype
== REG_TYPE_NQ
)
1753 else if (reg_incr
== -1)
1755 reg_incr
= getreg
- base_reg
;
1756 if (reg_incr
< 1 || reg_incr
> 2)
1758 first_error (_(incr_error
));
1762 else if (getreg
!= base_reg
+ reg_incr
* count
)
1764 first_error (_(incr_error
));
1768 if (!neon_alias_types_same (&atype
, &firsttype
))
1770 first_error (_(type_error
));
1774 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1778 struct neon_typed_alias htype
;
1779 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
1781 lane
= NEON_INTERLEAVE_LANES
;
1782 else if (lane
!= NEON_INTERLEAVE_LANES
)
1784 first_error (_(type_error
));
1789 else if (reg_incr
!= 1)
1791 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1795 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
1798 first_error (_(reg_expected_msgs
[rtype
]));
1801 if (!neon_alias_types_same (&htype
, &firsttype
))
1803 first_error (_(type_error
));
1806 count
+= hireg
+ dregs
- getreg
;
1810 /* If we're using Q registers, we can't use [] or [n] syntax. */
1811 if (rtype
== REG_TYPE_NQ
)
1817 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1821 else if (lane
!= atype
.index
)
1823 first_error (_(type_error
));
1827 else if (lane
== -1)
1828 lane
= NEON_INTERLEAVE_LANES
;
1829 else if (lane
!= NEON_INTERLEAVE_LANES
)
1831 first_error (_(type_error
));
1836 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
1838 /* No lane set by [x]. We must be interleaving structures. */
1840 lane
= NEON_INTERLEAVE_LANES
;
1843 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
1844 || (count
> 1 && reg_incr
== -1))
1846 first_error (_("error parsing element/structure list"));
1850 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
1852 first_error (_("expected }"));
1860 *eltype
= firsttype
.eltype
;
1865 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
1868 /* Parse an explicit relocation suffix on an expression. This is
1869 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1870 arm_reloc_hsh contains no entries, so this function can only
1871 succeed if there is no () after the word. Returns -1 on error,
1872 BFD_RELOC_UNUSED if there wasn't any suffix. */
1874 parse_reloc (char **str
)
1876 struct reloc_entry
*r
;
1880 return BFD_RELOC_UNUSED
;
1885 while (*q
&& *q
!= ')' && *q
!= ',')
1890 if ((r
= hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
1897 /* Directives: register aliases. */
1899 static struct reg_entry
*
1900 insert_reg_alias (char *str
, int number
, int type
)
1902 struct reg_entry
*new;
1905 if ((new = hash_find (arm_reg_hsh
, str
)) != 0)
1908 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
1910 /* Only warn about a redefinition if it's not defined as the
1912 else if (new->number
!= number
|| new->type
!= type
)
1913 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
1918 name
= xstrdup (str
);
1919 new = xmalloc (sizeof (struct reg_entry
));
1922 new->number
= number
;
1924 new->builtin
= FALSE
;
1927 if (hash_insert (arm_reg_hsh
, name
, (PTR
) new))
1934 insert_neon_reg_alias (char *str
, int number
, int type
,
1935 struct neon_typed_alias
*atype
)
1937 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
1941 first_error (_("attempt to redefine typed alias"));
1947 reg
->neon
= xmalloc (sizeof (struct neon_typed_alias
));
1948 *reg
->neon
= *atype
;
1952 /* Look for the .req directive. This is of the form:
1954 new_register_name .req existing_register_name
1956 If we find one, or if it looks sufficiently like one that we want to
1957 handle any error here, return non-zero. Otherwise return zero. */
1960 create_register_alias (char * newname
, char *p
)
1962 struct reg_entry
*old
;
1963 char *oldname
, *nbuf
;
1966 /* The input scrubber ensures that whitespace after the mnemonic is
1967 collapsed to single spaces. */
1969 if (strncmp (oldname
, " .req ", 6) != 0)
1973 if (*oldname
== '\0')
1976 old
= hash_find (arm_reg_hsh
, oldname
);
1979 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
1983 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1984 the desired alias name, and p points to its end. If not, then
1985 the desired alias name is in the global original_case_string. */
1986 #ifdef TC_CASE_SENSITIVE
1989 newname
= original_case_string
;
1990 nlen
= strlen (newname
);
1993 nbuf
= alloca (nlen
+ 1);
1994 memcpy (nbuf
, newname
, nlen
);
1997 /* Create aliases under the new name as stated; an all-lowercase
1998 version of the new name; and an all-uppercase version of the new
2000 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2002 for (p
= nbuf
; *p
; p
++)
2005 if (strncmp (nbuf
, newname
, nlen
))
2006 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2008 for (p
= nbuf
; *p
; p
++)
2011 if (strncmp (nbuf
, newname
, nlen
))
2012 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2017 /* Create a Neon typed/indexed register alias using directives, e.g.:
2022 These typed registers can be used instead of the types specified after the
2023 Neon mnemonic, so long as all operands given have types. Types can also be
2024 specified directly, e.g.:
2025 vadd d0.s32, d1.s32, d2.s32
2029 create_neon_reg_alias (char *newname
, char *p
)
2031 enum arm_reg_type basetype
;
2032 struct reg_entry
*basereg
;
2033 struct reg_entry mybasereg
;
2034 struct neon_type ntype
;
2035 struct neon_typed_alias typeinfo
;
2036 char *namebuf
, *nameend
;
2039 typeinfo
.defined
= 0;
2040 typeinfo
.eltype
.type
= NT_invtype
;
2041 typeinfo
.eltype
.size
= -1;
2042 typeinfo
.index
= -1;
2046 if (strncmp (p
, " .dn ", 5) == 0)
2047 basetype
= REG_TYPE_VFD
;
2048 else if (strncmp (p
, " .qn ", 5) == 0)
2049 basetype
= REG_TYPE_NQ
;
2058 basereg
= arm_reg_parse_multi (&p
);
2060 if (basereg
&& basereg
->type
!= basetype
)
2062 as_bad (_("bad type for register"));
2066 if (basereg
== NULL
)
2069 /* Try parsing as an integer. */
2070 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2071 if (exp
.X_op
!= O_constant
)
2073 as_bad (_("expression must be constant"));
2076 basereg
= &mybasereg
;
2077 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2083 typeinfo
= *basereg
->neon
;
2085 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2087 /* We got a type. */
2088 if (typeinfo
.defined
& NTA_HASTYPE
)
2090 as_bad (_("can't redefine the type of a register alias"));
2094 typeinfo
.defined
|= NTA_HASTYPE
;
2095 if (ntype
.elems
!= 1)
2097 as_bad (_("you must specify a single type only"));
2100 typeinfo
.eltype
= ntype
.el
[0];
2103 if (skip_past_char (&p
, '[') == SUCCESS
)
2106 /* We got a scalar index. */
2108 if (typeinfo
.defined
& NTA_HASINDEX
)
2110 as_bad (_("can't redefine the index of a scalar alias"));
2114 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2116 if (exp
.X_op
!= O_constant
)
2118 as_bad (_("scalar index must be constant"));
2122 typeinfo
.defined
|= NTA_HASINDEX
;
2123 typeinfo
.index
= exp
.X_add_number
;
2125 if (skip_past_char (&p
, ']') == FAIL
)
2127 as_bad (_("expecting ]"));
2132 namelen
= nameend
- newname
;
2133 namebuf
= alloca (namelen
+ 1);
2134 strncpy (namebuf
, newname
, namelen
);
2135 namebuf
[namelen
] = '\0';
2137 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2138 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2140 /* Insert name in all uppercase. */
2141 for (p
= namebuf
; *p
; p
++)
2144 if (strncmp (namebuf
, newname
, namelen
))
2145 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2146 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2148 /* Insert name in all lowercase. */
2149 for (p
= namebuf
; *p
; p
++)
2152 if (strncmp (namebuf
, newname
, namelen
))
2153 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2154 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2159 /* Should never be called, as .req goes between the alias and the
2160 register name, not at the beginning of the line. */
2162 s_req (int a ATTRIBUTE_UNUSED
)
2164 as_bad (_("invalid syntax for .req directive"));
2168 s_dn (int a ATTRIBUTE_UNUSED
)
2170 as_bad (_("invalid syntax for .dn directive"));
2174 s_qn (int a ATTRIBUTE_UNUSED
)
2176 as_bad (_("invalid syntax for .qn directive"));
2179 /* The .unreq directive deletes an alias which was previously defined
2180 by .req. For example:
2186 s_unreq (int a ATTRIBUTE_UNUSED
)
2191 name
= input_line_pointer
;
2193 while (*input_line_pointer
!= 0
2194 && *input_line_pointer
!= ' '
2195 && *input_line_pointer
!= '\n')
2196 ++input_line_pointer
;
2198 saved_char
= *input_line_pointer
;
2199 *input_line_pointer
= 0;
2202 as_bad (_("invalid syntax for .unreq directive"));
2205 struct reg_entry
*reg
= hash_find (arm_reg_hsh
, name
);
2208 as_bad (_("unknown register alias '%s'"), name
);
2209 else if (reg
->builtin
)
2210 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2214 hash_delete (arm_reg_hsh
, name
);
2215 free ((char *) reg
->name
);
2222 *input_line_pointer
= saved_char
;
2223 demand_empty_rest_of_line ();
2226 /* Directives: Instruction set selection. */
2229 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2230 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2231 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2232 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2234 static enum mstate mapstate
= MAP_UNDEFINED
;
2237 mapping_state (enum mstate state
)
2240 const char * symname
;
2243 if (mapstate
== state
)
2244 /* The mapping symbol has already been emitted.
2245 There is nothing else to do. */
2254 type
= BSF_NO_FLAGS
;
2258 type
= BSF_NO_FLAGS
;
2262 type
= BSF_NO_FLAGS
;
2270 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2272 symbolP
= symbol_new (symname
, now_seg
, (valueT
) frag_now_fix (), frag_now
);
2273 symbol_table_insert (symbolP
);
2274 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2279 THUMB_SET_FUNC (symbolP
, 0);
2280 ARM_SET_THUMB (symbolP
, 0);
2281 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2285 THUMB_SET_FUNC (symbolP
, 1);
2286 ARM_SET_THUMB (symbolP
, 1);
2287 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2296 #define mapping_state(x) /* nothing */
2299 /* Find the real, Thumb encoded start of a Thumb function. */
2302 find_real_start (symbolS
* symbolP
)
2305 const char * name
= S_GET_NAME (symbolP
);
2306 symbolS
* new_target
;
2308 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2309 #define STUB_NAME ".real_start_of"
2314 /* The compiler may generate BL instructions to local labels because
2315 it needs to perform a branch to a far away location. These labels
2316 do not have a corresponding ".real_start_of" label. We check
2317 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2318 the ".real_start_of" convention for nonlocal branches. */
2319 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2322 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2323 new_target
= symbol_find (real_start
);
2325 if (new_target
== NULL
)
2327 as_warn ("Failed to find real start of function: %s\n", name
);
2328 new_target
= symbolP
;
2335 opcode_select (int width
)
2342 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2343 as_bad (_("selected processor does not support THUMB opcodes"));
2346 /* No need to force the alignment, since we will have been
2347 coming from ARM mode, which is word-aligned. */
2348 record_alignment (now_seg
, 1);
2350 mapping_state (MAP_THUMB
);
2356 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2357 as_bad (_("selected processor does not support ARM opcodes"));
2362 frag_align (2, 0, 0);
2364 record_alignment (now_seg
, 1);
2366 mapping_state (MAP_ARM
);
2370 as_bad (_("invalid instruction size selected (%d)"), width
);
2375 s_arm (int ignore ATTRIBUTE_UNUSED
)
2378 demand_empty_rest_of_line ();
2382 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2385 demand_empty_rest_of_line ();
2389 s_code (int unused ATTRIBUTE_UNUSED
)
2393 temp
= get_absolute_expression ();
2398 opcode_select (temp
);
2402 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2407 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2409 /* If we are not already in thumb mode go into it, EVEN if
2410 the target processor does not support thumb instructions.
2411 This is used by gcc/config/arm/lib1funcs.asm for example
2412 to compile interworking support functions even if the
2413 target processor should not support interworking. */
2417 record_alignment (now_seg
, 1);
2420 demand_empty_rest_of_line ();
2424 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2428 /* The following label is the name/address of the start of a Thumb function.
2429 We need to know this for the interworking support. */
2430 label_is_thumb_function_name
= TRUE
;
2433 /* Perform a .set directive, but also mark the alias as
2434 being a thumb function. */
2437 s_thumb_set (int equiv
)
2439 /* XXX the following is a duplicate of the code for s_set() in read.c
2440 We cannot just call that code as we need to get at the symbol that
2447 /* Especial apologies for the random logic:
2448 This just grew, and could be parsed much more simply!
2450 name
= input_line_pointer
;
2451 delim
= get_symbol_end ();
2452 end_name
= input_line_pointer
;
2455 if (*input_line_pointer
!= ',')
2458 as_bad (_("expected comma after name \"%s\""), name
);
2460 ignore_rest_of_line ();
2464 input_line_pointer
++;
2467 if (name
[0] == '.' && name
[1] == '\0')
2469 /* XXX - this should not happen to .thumb_set. */
2473 if ((symbolP
= symbol_find (name
)) == NULL
2474 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2477 /* When doing symbol listings, play games with dummy fragments living
2478 outside the normal fragment chain to record the file and line info
2480 if (listing
& LISTING_SYMBOLS
)
2482 extern struct list_info_struct
* listing_tail
;
2483 fragS
* dummy_frag
= xmalloc (sizeof (fragS
));
2485 memset (dummy_frag
, 0, sizeof (fragS
));
2486 dummy_frag
->fr_type
= rs_fill
;
2487 dummy_frag
->line
= listing_tail
;
2488 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2489 dummy_frag
->fr_symbol
= symbolP
;
2493 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2496 /* "set" symbols are local unless otherwise specified. */
2497 SF_SET_LOCAL (symbolP
);
2498 #endif /* OBJ_COFF */
2499 } /* Make a new symbol. */
2501 symbol_table_insert (symbolP
);
2506 && S_IS_DEFINED (symbolP
)
2507 && S_GET_SEGMENT (symbolP
) != reg_section
)
2508 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2510 pseudo_set (symbolP
);
2512 demand_empty_rest_of_line ();
2514 /* XXX Now we come to the Thumb specific bit of code. */
2516 THUMB_SET_FUNC (symbolP
, 1);
2517 ARM_SET_THUMB (symbolP
, 1);
2518 #if defined OBJ_ELF || defined OBJ_COFF
2519 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2523 /* Directives: Mode selection. */
2525 /* .syntax [unified|divided] - choose the new unified syntax
2526 (same for Arm and Thumb encoding, modulo slight differences in what
2527 can be represented) or the old divergent syntax for each mode. */
2529 s_syntax (int unused ATTRIBUTE_UNUSED
)
2533 name
= input_line_pointer
;
2534 delim
= get_symbol_end ();
2536 if (!strcasecmp (name
, "unified"))
2537 unified_syntax
= TRUE
;
2538 else if (!strcasecmp (name
, "divided"))
2539 unified_syntax
= FALSE
;
2542 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2545 *input_line_pointer
= delim
;
2546 demand_empty_rest_of_line ();
2549 /* Directives: sectioning and alignment. */
2551 /* Same as s_align_ptwo but align 0 => align 2. */
2554 s_align (int unused ATTRIBUTE_UNUSED
)
2558 long max_alignment
= 15;
2560 temp
= get_absolute_expression ();
2561 if (temp
> max_alignment
)
2562 as_bad (_("alignment too large: %d assumed"), temp
= max_alignment
);
2565 as_bad (_("alignment negative. 0 assumed."));
2569 if (*input_line_pointer
== ',')
2571 input_line_pointer
++;
2572 temp_fill
= get_absolute_expression ();
2580 /* Only make a frag if we HAVE to. */
2581 if (temp
&& !need_pass_2
)
2582 frag_align (temp
, (int) temp_fill
, 0);
2583 demand_empty_rest_of_line ();
2585 record_alignment (now_seg
, temp
);
2589 s_bss (int ignore ATTRIBUTE_UNUSED
)
2591 /* We don't support putting frags in the BSS segment, we fake it by
2592 marking in_bss, then looking at s_skip for clues. */
2593 subseg_set (bss_section
, 0);
2594 demand_empty_rest_of_line ();
2595 mapping_state (MAP_DATA
);
2599 s_even (int ignore ATTRIBUTE_UNUSED
)
2601 /* Never make frag if expect extra pass. */
2603 frag_align (1, 0, 0);
2605 record_alignment (now_seg
, 1);
2607 demand_empty_rest_of_line ();
2610 /* Directives: Literal pools. */
2612 static literal_pool
*
2613 find_literal_pool (void)
2615 literal_pool
* pool
;
2617 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
2619 if (pool
->section
== now_seg
2620 && pool
->sub_section
== now_subseg
)
2627 static literal_pool
*
2628 find_or_make_literal_pool (void)
2630 /* Next literal pool ID number. */
2631 static unsigned int latest_pool_num
= 1;
2632 literal_pool
* pool
;
2634 pool
= find_literal_pool ();
2638 /* Create a new pool. */
2639 pool
= xmalloc (sizeof (* pool
));
2643 pool
->next_free_entry
= 0;
2644 pool
->section
= now_seg
;
2645 pool
->sub_section
= now_subseg
;
2646 pool
->next
= list_of_pools
;
2647 pool
->symbol
= NULL
;
2649 /* Add it to the list. */
2650 list_of_pools
= pool
;
2653 /* New pools, and emptied pools, will have a NULL symbol. */
2654 if (pool
->symbol
== NULL
)
2656 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
2657 (valueT
) 0, &zero_address_frag
);
2658 pool
->id
= latest_pool_num
++;
2665 /* Add the literal in the global 'inst'
2666 structure to the relevent literal pool. */
2669 add_to_lit_pool (void)
2671 literal_pool
* pool
;
2674 pool
= find_or_make_literal_pool ();
2676 /* Check if this literal value is already in the pool. */
2677 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2679 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2680 && (inst
.reloc
.exp
.X_op
== O_constant
)
2681 && (pool
->literals
[entry
].X_add_number
2682 == inst
.reloc
.exp
.X_add_number
)
2683 && (pool
->literals
[entry
].X_unsigned
2684 == inst
.reloc
.exp
.X_unsigned
))
2687 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2688 && (inst
.reloc
.exp
.X_op
== O_symbol
)
2689 && (pool
->literals
[entry
].X_add_number
2690 == inst
.reloc
.exp
.X_add_number
)
2691 && (pool
->literals
[entry
].X_add_symbol
2692 == inst
.reloc
.exp
.X_add_symbol
)
2693 && (pool
->literals
[entry
].X_op_symbol
2694 == inst
.reloc
.exp
.X_op_symbol
))
2698 /* Do we need to create a new entry? */
2699 if (entry
== pool
->next_free_entry
)
2701 if (entry
>= MAX_LITERAL_POOL_SIZE
)
2703 inst
.error
= _("literal pool overflow");
2707 pool
->literals
[entry
] = inst
.reloc
.exp
;
2708 pool
->next_free_entry
+= 1;
2711 inst
.reloc
.exp
.X_op
= O_symbol
;
2712 inst
.reloc
.exp
.X_add_number
= ((int) entry
) * 4;
2713 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
2718 /* Can't use symbol_new here, so have to create a symbol and then at
2719 a later date assign it a value. Thats what these functions do. */
2722 symbol_locate (symbolS
* symbolP
,
2723 const char * name
, /* It is copied, the caller can modify. */
2724 segT segment
, /* Segment identifier (SEG_<something>). */
2725 valueT valu
, /* Symbol value. */
2726 fragS
* frag
) /* Associated fragment. */
2728 unsigned int name_length
;
2729 char * preserved_copy_of_name
;
2731 name_length
= strlen (name
) + 1; /* +1 for \0. */
2732 obstack_grow (¬es
, name
, name_length
);
2733 preserved_copy_of_name
= obstack_finish (¬es
);
2735 #ifdef tc_canonicalize_symbol_name
2736 preserved_copy_of_name
=
2737 tc_canonicalize_symbol_name (preserved_copy_of_name
);
2740 S_SET_NAME (symbolP
, preserved_copy_of_name
);
2742 S_SET_SEGMENT (symbolP
, segment
);
2743 S_SET_VALUE (symbolP
, valu
);
2744 symbol_clear_list_pointers (symbolP
);
2746 symbol_set_frag (symbolP
, frag
);
2748 /* Link to end of symbol chain. */
2750 extern int symbol_table_frozen
;
2752 if (symbol_table_frozen
)
2756 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
2758 obj_symbol_new_hook (symbolP
);
2760 #ifdef tc_symbol_new_hook
2761 tc_symbol_new_hook (symbolP
);
2765 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
2766 #endif /* DEBUG_SYMS */
2771 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
2774 literal_pool
* pool
;
2777 pool
= find_literal_pool ();
2779 || pool
->symbol
== NULL
2780 || pool
->next_free_entry
== 0)
2783 mapping_state (MAP_DATA
);
2785 /* Align pool as you have word accesses.
2786 Only make a frag if we have to. */
2788 frag_align (2, 0, 0);
2790 record_alignment (now_seg
, 2);
2792 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
2794 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
2795 (valueT
) frag_now_fix (), frag_now
);
2796 symbol_table_insert (pool
->symbol
);
2798 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
2800 #if defined OBJ_COFF || defined OBJ_ELF
2801 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
2804 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2805 /* First output the expression in the instruction to the pool. */
2806 emit_expr (&(pool
->literals
[entry
]), 4); /* .word */
2808 /* Mark the pool as empty. */
2809 pool
->next_free_entry
= 0;
2810 pool
->symbol
= NULL
;
2814 /* Forward declarations for functions below, in the MD interface
2816 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
2817 static valueT
create_unwind_entry (int);
2818 static void start_unwind_section (const segT
, int);
2819 static void add_unwind_opcode (valueT
, int);
2820 static void flush_pending_unwind (void);
2822 /* Directives: Data. */
2825 s_arm_elf_cons (int nbytes
)
2829 #ifdef md_flush_pending_output
2830 md_flush_pending_output ();
2833 if (is_it_end_of_statement ())
2835 demand_empty_rest_of_line ();
2839 #ifdef md_cons_align
2840 md_cons_align (nbytes
);
2843 mapping_state (MAP_DATA
);
2847 char *base
= input_line_pointer
;
2851 if (exp
.X_op
!= O_symbol
)
2852 emit_expr (&exp
, (unsigned int) nbytes
);
2855 char *before_reloc
= input_line_pointer
;
2856 reloc
= parse_reloc (&input_line_pointer
);
2859 as_bad (_("unrecognized relocation suffix"));
2860 ignore_rest_of_line ();
2863 else if (reloc
== BFD_RELOC_UNUSED
)
2864 emit_expr (&exp
, (unsigned int) nbytes
);
2867 reloc_howto_type
*howto
= bfd_reloc_type_lookup (stdoutput
, reloc
);
2868 int size
= bfd_get_reloc_size (howto
);
2870 if (reloc
== BFD_RELOC_ARM_PLT32
)
2872 as_bad (_("(plt) is only valid on branch targets"));
2873 reloc
= BFD_RELOC_UNUSED
;
2878 as_bad (_("%s relocations do not fit in %d bytes"),
2879 howto
->name
, nbytes
);
2882 /* We've parsed an expression stopping at O_symbol.
2883 But there may be more expression left now that we
2884 have parsed the relocation marker. Parse it again.
2885 XXX Surely there is a cleaner way to do this. */
2886 char *p
= input_line_pointer
;
2888 char *save_buf
= alloca (input_line_pointer
- base
);
2889 memcpy (save_buf
, base
, input_line_pointer
- base
);
2890 memmove (base
+ (input_line_pointer
- before_reloc
),
2891 base
, before_reloc
- base
);
2893 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
2895 memcpy (base
, save_buf
, p
- base
);
2897 offset
= nbytes
- size
;
2898 p
= frag_more ((int) nbytes
);
2899 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
2900 size
, &exp
, 0, reloc
);
2905 while (*input_line_pointer
++ == ',');
2907 /* Put terminator back into stream. */
2908 input_line_pointer
--;
2909 demand_empty_rest_of_line ();
2913 /* Parse a .rel31 directive. */
2916 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
2923 if (*input_line_pointer
== '1')
2924 highbit
= 0x80000000;
2925 else if (*input_line_pointer
!= '0')
2926 as_bad (_("expected 0 or 1"));
2928 input_line_pointer
++;
2929 if (*input_line_pointer
!= ',')
2930 as_bad (_("missing comma"));
2931 input_line_pointer
++;
2933 #ifdef md_flush_pending_output
2934 md_flush_pending_output ();
2937 #ifdef md_cons_align
2941 mapping_state (MAP_DATA
);
2946 md_number_to_chars (p
, highbit
, 4);
2947 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
2948 BFD_RELOC_ARM_PREL31
);
2950 demand_empty_rest_of_line ();
2953 /* Directives: AEABI stack-unwind tables. */
2955 /* Parse an unwind_fnstart directive. Simply records the current location. */
2958 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
2960 demand_empty_rest_of_line ();
2961 /* Mark the start of the function. */
2962 unwind
.proc_start
= expr_build_dot ();
2964 /* Reset the rest of the unwind info. */
2965 unwind
.opcode_count
= 0;
2966 unwind
.table_entry
= NULL
;
2967 unwind
.personality_routine
= NULL
;
2968 unwind
.personality_index
= -1;
2969 unwind
.frame_size
= 0;
2970 unwind
.fp_offset
= 0;
2973 unwind
.sp_restored
= 0;
2977 /* Parse a handlerdata directive. Creates the exception handling table entry
2978 for the function. */
2981 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
2983 demand_empty_rest_of_line ();
2984 if (unwind
.table_entry
)
2985 as_bad (_("dupicate .handlerdata directive"));
2987 create_unwind_entry (1);
2990 /* Parse an unwind_fnend directive. Generates the index table entry. */
2993 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
2999 demand_empty_rest_of_line ();
3001 /* Add eh table entry. */
3002 if (unwind
.table_entry
== NULL
)
3003 val
= create_unwind_entry (0);
3007 /* Add index table entry. This is two words. */
3008 start_unwind_section (unwind
.saved_seg
, 1);
3009 frag_align (2, 0, 0);
3010 record_alignment (now_seg
, 2);
3012 ptr
= frag_more (8);
3013 where
= frag_now_fix () - 8;
3015 /* Self relative offset of the function start. */
3016 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3017 BFD_RELOC_ARM_PREL31
);
3019 /* Indicate dependency on EHABI-defined personality routines to the
3020 linker, if it hasn't been done already. */
3021 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3022 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3024 static const char *const name
[] = {
3025 "__aeabi_unwind_cpp_pr0",
3026 "__aeabi_unwind_cpp_pr1",
3027 "__aeabi_unwind_cpp_pr2"
3029 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3030 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3031 marked_pr_dependency
|= 1 << unwind
.personality_index
;
3032 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3033 = marked_pr_dependency
;
3037 /* Inline exception table entry. */
3038 md_number_to_chars (ptr
+ 4, val
, 4);
3040 /* Self relative offset of the table entry. */
3041 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3042 BFD_RELOC_ARM_PREL31
);
3044 /* Restore the original section. */
3045 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3049 /* Parse an unwind_cantunwind directive. */
3052 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3054 demand_empty_rest_of_line ();
3055 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3056 as_bad (_("personality routine specified for cantunwind frame"));
3058 unwind
.personality_index
= -2;
3062 /* Parse a personalityindex directive. */
3065 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3069 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3070 as_bad (_("duplicate .personalityindex directive"));
3074 if (exp
.X_op
!= O_constant
3075 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3077 as_bad (_("bad personality routine number"));
3078 ignore_rest_of_line ();
3082 unwind
.personality_index
= exp
.X_add_number
;
3084 demand_empty_rest_of_line ();
3088 /* Parse a personality directive. */
3091 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3095 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3096 as_bad (_("duplicate .personality directive"));
3098 name
= input_line_pointer
;
3099 c
= get_symbol_end ();
3100 p
= input_line_pointer
;
3101 unwind
.personality_routine
= symbol_find_or_make (name
);
3103 demand_empty_rest_of_line ();
3107 /* Parse a directive saving core registers. */
3110 s_arm_unwind_save_core (void)
3116 range
= parse_reg_list (&input_line_pointer
);
3119 as_bad (_("expected register list"));
3120 ignore_rest_of_line ();
3124 demand_empty_rest_of_line ();
3126 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3127 into .unwind_save {..., sp...}. We aren't bothered about the value of
3128 ip because it is clobbered by calls. */
3129 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3130 && (range
& 0x3000) == 0x1000)
3132 unwind
.opcode_count
--;
3133 unwind
.sp_restored
= 0;
3134 range
= (range
| 0x2000) & ~0x1000;
3135 unwind
.pending_offset
= 0;
3141 /* See if we can use the short opcodes. These pop a block of up to 8
3142 registers starting with r4, plus maybe r14. */
3143 for (n
= 0; n
< 8; n
++)
3145 /* Break at the first non-saved register. */
3146 if ((range
& (1 << (n
+ 4))) == 0)
3149 /* See if there are any other bits set. */
3150 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3152 /* Use the long form. */
3153 op
= 0x8000 | ((range
>> 4) & 0xfff);
3154 add_unwind_opcode (op
, 2);
3158 /* Use the short form. */
3160 op
= 0xa8; /* Pop r14. */
3162 op
= 0xa0; /* Do not pop r14. */
3164 add_unwind_opcode (op
, 1);
3171 op
= 0xb100 | (range
& 0xf);
3172 add_unwind_opcode (op
, 2);
3175 /* Record the number of bytes pushed. */
3176 for (n
= 0; n
< 16; n
++)
3178 if (range
& (1 << n
))
3179 unwind
.frame_size
+= 4;
3184 /* Parse a directive saving FPA registers. */
3187 s_arm_unwind_save_fpa (int reg
)
3193 /* Get Number of registers to transfer. */
3194 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3197 exp
.X_op
= O_illegal
;
3199 if (exp
.X_op
!= O_constant
)
3201 as_bad (_("expected , <constant>"));
3202 ignore_rest_of_line ();
3206 num_regs
= exp
.X_add_number
;
3208 if (num_regs
< 1 || num_regs
> 4)
3210 as_bad (_("number of registers must be in the range [1:4]"));
3211 ignore_rest_of_line ();
3215 demand_empty_rest_of_line ();
3220 op
= 0xb4 | (num_regs
- 1);
3221 add_unwind_opcode (op
, 1);
3226 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
3227 add_unwind_opcode (op
, 2);
3229 unwind
.frame_size
+= num_regs
* 12;
3233 /* Parse a directive saving VFP registers. */
3236 s_arm_unwind_save_vfp (void)
3242 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
3245 as_bad (_("expected register list"));
3246 ignore_rest_of_line ();
3250 demand_empty_rest_of_line ();
3255 op
= 0xb8 | (count
- 1);
3256 add_unwind_opcode (op
, 1);
3261 op
= 0xb300 | (reg
<< 4) | (count
- 1);
3262 add_unwind_opcode (op
, 2);
3264 unwind
.frame_size
+= count
* 8 + 4;
3268 /* Parse a directive saving iWMMXt data registers. */
3271 s_arm_unwind_save_mmxwr (void)
3279 if (*input_line_pointer
== '{')
3280 input_line_pointer
++;
3284 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3288 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3293 as_tsktsk (_("register list not in ascending order"));
3296 if (*input_line_pointer
== '-')
3298 input_line_pointer
++;
3299 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3302 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3305 else if (reg
>= hi_reg
)
3307 as_bad (_("bad register range"));
3310 for (; reg
< hi_reg
; reg
++)
3314 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3316 if (*input_line_pointer
== '}')
3317 input_line_pointer
++;
3319 demand_empty_rest_of_line ();
3321 /* Generate any deferred opcodes because we're going to be looking at
3323 flush_pending_unwind ();
3325 for (i
= 0; i
< 16; i
++)
3327 if (mask
& (1 << i
))
3328 unwind
.frame_size
+= 8;
3331 /* Attempt to combine with a previous opcode. We do this because gcc
3332 likes to output separate unwind directives for a single block of
3334 if (unwind
.opcode_count
> 0)
3336 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
3337 if ((i
& 0xf8) == 0xc0)
3340 /* Only merge if the blocks are contiguous. */
3343 if ((mask
& 0xfe00) == (1 << 9))
3345 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
3346 unwind
.opcode_count
--;
3349 else if (i
== 6 && unwind
.opcode_count
>= 2)
3351 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
3355 op
= 0xffff << (reg
- 1);
3357 || ((mask
& op
) == (1u << (reg
- 1))))
3359 op
= (1 << (reg
+ i
+ 1)) - 1;
3360 op
&= ~((1 << reg
) - 1);
3362 unwind
.opcode_count
-= 2;
3369 /* We want to generate opcodes in the order the registers have been
3370 saved, ie. descending order. */
3371 for (reg
= 15; reg
>= -1; reg
--)
3373 /* Save registers in blocks. */
3375 || !(mask
& (1 << reg
)))
3377 /* We found an unsaved reg. Generate opcodes to save the
3378 preceeding block. */
3384 op
= 0xc0 | (hi_reg
- 10);
3385 add_unwind_opcode (op
, 1);
3390 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
3391 add_unwind_opcode (op
, 2);
3400 ignore_rest_of_line ();
3404 s_arm_unwind_save_mmxwcg (void)
3411 if (*input_line_pointer
== '{')
3412 input_line_pointer
++;
3416 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3420 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3426 as_tsktsk (_("register list not in ascending order"));
3429 if (*input_line_pointer
== '-')
3431 input_line_pointer
++;
3432 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3435 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3438 else if (reg
>= hi_reg
)
3440 as_bad (_("bad register range"));
3443 for (; reg
< hi_reg
; reg
++)
3447 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3449 if (*input_line_pointer
== '}')
3450 input_line_pointer
++;
3452 demand_empty_rest_of_line ();
3454 /* Generate any deferred opcodes because we're going to be looking at
3456 flush_pending_unwind ();
3458 for (reg
= 0; reg
< 16; reg
++)
3460 if (mask
& (1 << reg
))
3461 unwind
.frame_size
+= 4;
3464 add_unwind_opcode (op
, 2);
3467 ignore_rest_of_line ();
3471 /* Parse an unwind_save directive. */
3474 s_arm_unwind_save (int ignored ATTRIBUTE_UNUSED
)
3477 struct reg_entry
*reg
;
3478 bfd_boolean had_brace
= FALSE
;
3480 /* Figure out what sort of save we have. */
3481 peek
= input_line_pointer
;
3489 reg
= arm_reg_parse_multi (&peek
);
3493 as_bad (_("register expected"));
3494 ignore_rest_of_line ();
3503 as_bad (_("FPA .unwind_save does not take a register list"));
3504 ignore_rest_of_line ();
3507 s_arm_unwind_save_fpa (reg
->number
);
3510 case REG_TYPE_RN
: s_arm_unwind_save_core (); return;
3511 case REG_TYPE_VFD
: s_arm_unwind_save_vfp (); return;
3512 case REG_TYPE_MMXWR
: s_arm_unwind_save_mmxwr (); return;
3513 case REG_TYPE_MMXWCG
: s_arm_unwind_save_mmxwcg (); return;
3516 as_bad (_(".unwind_save does not support this kind of register"));
3517 ignore_rest_of_line ();
3522 /* Parse an unwind_movsp directive. */
3525 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
3530 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3533 as_bad (_(reg_expected_msgs
[REG_TYPE_RN
]));
3534 ignore_rest_of_line ();
3537 demand_empty_rest_of_line ();
3539 if (reg
== REG_SP
|| reg
== REG_PC
)
3541 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3545 if (unwind
.fp_reg
!= REG_SP
)
3546 as_bad (_("unexpected .unwind_movsp directive"));
3548 /* Generate opcode to restore the value. */
3550 add_unwind_opcode (op
, 1);
3552 /* Record the information for later. */
3553 unwind
.fp_reg
= reg
;
3554 unwind
.fp_offset
= unwind
.frame_size
;
3555 unwind
.sp_restored
= 1;
3558 /* Parse an unwind_pad directive. */
3561 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
3565 if (immediate_for_directive (&offset
) == FAIL
)
3570 as_bad (_("stack increment must be multiple of 4"));
3571 ignore_rest_of_line ();
3575 /* Don't generate any opcodes, just record the details for later. */
3576 unwind
.frame_size
+= offset
;
3577 unwind
.pending_offset
+= offset
;
3579 demand_empty_rest_of_line ();
3582 /* Parse an unwind_setfp directive. */
3585 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
3591 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3592 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3595 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3597 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
3599 as_bad (_("expected <reg>, <reg>"));
3600 ignore_rest_of_line ();
3604 /* Optional constant. */
3605 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3607 if (immediate_for_directive (&offset
) == FAIL
)
3613 demand_empty_rest_of_line ();
3615 if (sp_reg
!= 13 && sp_reg
!= unwind
.fp_reg
)
3617 as_bad (_("register must be either sp or set by a previous"
3618 "unwind_movsp directive"));
3622 /* Don't generate any opcodes, just record the information for later. */
3623 unwind
.fp_reg
= fp_reg
;
3626 unwind
.fp_offset
= unwind
.frame_size
- offset
;
3628 unwind
.fp_offset
-= offset
;
3631 /* Parse an unwind_raw directive. */
3634 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
3637 /* This is an arbitrary limit. */
3638 unsigned char op
[16];
3642 if (exp
.X_op
== O_constant
3643 && skip_past_comma (&input_line_pointer
) != FAIL
)
3645 unwind
.frame_size
+= exp
.X_add_number
;
3649 exp
.X_op
= O_illegal
;
3651 if (exp
.X_op
!= O_constant
)
3653 as_bad (_("expected <offset>, <opcode>"));
3654 ignore_rest_of_line ();
3660 /* Parse the opcode. */
3665 as_bad (_("unwind opcode too long"));
3666 ignore_rest_of_line ();
3668 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
3670 as_bad (_("invalid unwind opcode"));
3671 ignore_rest_of_line ();
3674 op
[count
++] = exp
.X_add_number
;
3676 /* Parse the next byte. */
3677 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3683 /* Add the opcode bytes in reverse order. */
3685 add_unwind_opcode (op
[count
], 1);
3687 demand_empty_rest_of_line ();
3691 /* Parse a .eabi_attribute directive. */
3694 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
3697 bfd_boolean is_string
;
3704 if (exp
.X_op
!= O_constant
)
3707 tag
= exp
.X_add_number
;
3708 if (tag
== 4 || tag
== 5 || tag
== 32 || (tag
> 32 && (tag
& 1) != 0))
3713 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3715 if (tag
== 32 || !is_string
)
3718 if (exp
.X_op
!= O_constant
)
3720 as_bad (_("expected numeric constant"));
3721 ignore_rest_of_line ();
3724 i
= exp
.X_add_number
;
3726 if (tag
== Tag_compatibility
3727 && skip_past_comma (&input_line_pointer
) == FAIL
)
3729 as_bad (_("expected comma"));
3730 ignore_rest_of_line ();
3735 skip_whitespace(input_line_pointer
);
3736 if (*input_line_pointer
!= '"')
3738 input_line_pointer
++;
3739 s
= input_line_pointer
;
3740 while (*input_line_pointer
&& *input_line_pointer
!= '"')
3741 input_line_pointer
++;
3742 if (*input_line_pointer
!= '"')
3744 saved_char
= *input_line_pointer
;
3745 *input_line_pointer
= 0;
3753 if (tag
== Tag_compatibility
)
3754 elf32_arm_add_eabi_attr_compat (stdoutput
, i
, s
);
3756 elf32_arm_add_eabi_attr_string (stdoutput
, tag
, s
);
3758 elf32_arm_add_eabi_attr_int (stdoutput
, tag
, i
);
3762 *input_line_pointer
= saved_char
;
3763 input_line_pointer
++;
3765 demand_empty_rest_of_line ();
3768 as_bad (_("bad string constant"));
3769 ignore_rest_of_line ();
3772 as_bad (_("expected <tag> , <value>"));
3773 ignore_rest_of_line ();
3775 #endif /* OBJ_ELF */
3777 static void s_arm_arch (int);
3778 static void s_arm_cpu (int);
3779 static void s_arm_fpu (int);
3781 /* This table describes all the machine specific pseudo-ops the assembler
3782 has to support. The fields are:
3783 pseudo-op name without dot
3784 function to call to execute this pseudo-op
3785 Integer arg to pass to the function. */
3787 const pseudo_typeS md_pseudo_table
[] =
3789 /* Never called because '.req' does not start a line. */
3790 { "req", s_req
, 0 },
3791 /* Following two are likewise never called. */
3794 { "unreq", s_unreq
, 0 },
3795 { "bss", s_bss
, 0 },
3796 { "align", s_align
, 0 },
3797 { "arm", s_arm
, 0 },
3798 { "thumb", s_thumb
, 0 },
3799 { "code", s_code
, 0 },
3800 { "force_thumb", s_force_thumb
, 0 },
3801 { "thumb_func", s_thumb_func
, 0 },
3802 { "thumb_set", s_thumb_set
, 0 },
3803 { "even", s_even
, 0 },
3804 { "ltorg", s_ltorg
, 0 },
3805 { "pool", s_ltorg
, 0 },
3806 { "syntax", s_syntax
, 0 },
3807 { "cpu", s_arm_cpu
, 0 },
3808 { "arch", s_arm_arch
, 0 },
3809 { "fpu", s_arm_fpu
, 0 },
3811 { "word", s_arm_elf_cons
, 4 },
3812 { "long", s_arm_elf_cons
, 4 },
3813 { "rel31", s_arm_rel31
, 0 },
3814 { "fnstart", s_arm_unwind_fnstart
, 0 },
3815 { "fnend", s_arm_unwind_fnend
, 0 },
3816 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
3817 { "personality", s_arm_unwind_personality
, 0 },
3818 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
3819 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
3820 { "save", s_arm_unwind_save
, 0 },
3821 { "movsp", s_arm_unwind_movsp
, 0 },
3822 { "pad", s_arm_unwind_pad
, 0 },
3823 { "setfp", s_arm_unwind_setfp
, 0 },
3824 { "unwind_raw", s_arm_unwind_raw
, 0 },
3825 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
3829 { "extend", float_cons
, 'x' },
3830 { "ldouble", float_cons
, 'x' },
3831 { "packed", float_cons
, 'p' },
3835 /* Parser functions used exclusively in instruction operands. */
3837 /* Generic immediate-value read function for use in insn parsing.
3838 STR points to the beginning of the immediate (the leading #);
3839 VAL receives the value; if the value is outside [MIN, MAX]
3840 issue an error. PREFIX_OPT is true if the immediate prefix is
3844 parse_immediate (char **str
, int *val
, int min
, int max
,
3845 bfd_boolean prefix_opt
)
3848 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
3849 if (exp
.X_op
!= O_constant
)
3851 inst
.error
= _("constant expression required");
3855 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
3857 inst
.error
= _("immediate value out of range");
3861 *val
= exp
.X_add_number
;
3865 /* Less-generic immediate-value read function with the possibility of loading a
3866 big (64-bit) immediate, as required by Neon VMOV and VMVN immediate
3867 instructions. Puts the result directly in inst.operands[i]. */
3870 parse_big_immediate (char **str
, int i
)
3875 my_get_expression (&exp
, &ptr
, GE_OPT_PREFIX_BIG
);
3877 if (exp
.X_op
== O_constant
)
3878 inst
.operands
[i
].imm
= exp
.X_add_number
;
3879 else if (exp
.X_op
== O_big
3880 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
> 32
3881 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
<= 64)
3883 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
3884 /* Bignums have their least significant bits in
3885 generic_bignum[0]. Make sure we put 32 bits in imm and
3886 32 bits in reg, in a (hopefully) portable way. */
3887 assert (parts
!= 0);
3888 inst
.operands
[i
].imm
= 0;
3889 for (j
= 0; j
< parts
; j
++, idx
++)
3890 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
3891 << (LITTLENUM_NUMBER_OF_BITS
* j
);
3892 inst
.operands
[i
].reg
= 0;
3893 for (j
= 0; j
< parts
; j
++, idx
++)
3894 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
3895 << (LITTLENUM_NUMBER_OF_BITS
* j
);
3896 inst
.operands
[i
].regisimm
= 1;
3906 /* Returns the pseudo-register number of an FPA immediate constant,
3907 or FAIL if there isn't a valid constant here. */
3910 parse_fpa_immediate (char ** str
)
3912 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
3918 /* First try and match exact strings, this is to guarantee
3919 that some formats will work even for cross assembly. */
3921 for (i
= 0; fp_const
[i
]; i
++)
3923 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
3927 *str
+= strlen (fp_const
[i
]);
3928 if (is_end_of_line
[(unsigned char) **str
])
3934 /* Just because we didn't get a match doesn't mean that the constant
3935 isn't valid, just that it is in a format that we don't
3936 automatically recognize. Try parsing it with the standard
3937 expression routines. */
3939 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
3941 /* Look for a raw floating point number. */
3942 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
3943 && is_end_of_line
[(unsigned char) *save_in
])
3945 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
3947 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
3949 if (words
[j
] != fp_values
[i
][j
])
3953 if (j
== MAX_LITTLENUMS
)
3961 /* Try and parse a more complex expression, this will probably fail
3962 unless the code uses a floating point prefix (eg "0f"). */
3963 save_in
= input_line_pointer
;
3964 input_line_pointer
= *str
;
3965 if (expression (&exp
) == absolute_section
3966 && exp
.X_op
== O_big
3967 && exp
.X_add_number
< 0)
3969 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
3971 if (gen_to_words (words
, 5, (long) 15) == 0)
3973 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
3975 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
3977 if (words
[j
] != fp_values
[i
][j
])
3981 if (j
== MAX_LITTLENUMS
)
3983 *str
= input_line_pointer
;
3984 input_line_pointer
= save_in
;
3991 *str
= input_line_pointer
;
3992 input_line_pointer
= save_in
;
3993 inst
.error
= _("invalid FPA immediate expression");
3997 /* Returns 1 if a number has "quarter-precision" float format
3998 0baBbbbbbc defgh000 00000000 00000000. */
4001 is_quarter_float (unsigned imm
)
4003 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4004 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4007 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4008 0baBbbbbbc defgh000 00000000 00000000.
4009 The minus-zero case needs special handling, since it can't be encoded in the
4010 "quarter-precision" float format, but can nonetheless be loaded as an integer
4014 parse_qfloat_immediate (char **ccp
, int *immed
)
4017 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4019 skip_past_char (&str
, '#');
4021 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
4023 unsigned fpword
= 0;
4026 /* Our FP word must be 32 bits (single-precision FP). */
4027 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
4029 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
4033 if (is_quarter_float (fpword
) || fpword
== 0x80000000)
4046 /* Shift operands. */
4049 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
4052 struct asm_shift_name
4055 enum shift_kind kind
;
4058 /* Third argument to parse_shift. */
4059 enum parse_shift_mode
4061 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
4062 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
4063 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
4064 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
4065 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
4068 /* Parse a <shift> specifier on an ARM data processing instruction.
4069 This has three forms:
4071 (LSL|LSR|ASL|ASR|ROR) Rs
4072 (LSL|LSR|ASL|ASR|ROR) #imm
4075 Note that ASL is assimilated to LSL in the instruction encoding, and
4076 RRX to ROR #0 (which cannot be written as such). */
4079 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
4081 const struct asm_shift_name
*shift_name
;
4082 enum shift_kind shift
;
4087 for (p
= *str
; ISALPHA (*p
); p
++)
4092 inst
.error
= _("shift expression expected");
4096 shift_name
= hash_find_n (arm_shift_hsh
, *str
, p
- *str
);
4098 if (shift_name
== NULL
)
4100 inst
.error
= _("shift expression expected");
4104 shift
= shift_name
->kind
;
4108 case NO_SHIFT_RESTRICT
:
4109 case SHIFT_IMMEDIATE
: break;
4111 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
4112 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
4114 inst
.error
= _("'LSL' or 'ASR' required");
4119 case SHIFT_LSL_IMMEDIATE
:
4120 if (shift
!= SHIFT_LSL
)
4122 inst
.error
= _("'LSL' required");
4127 case SHIFT_ASR_IMMEDIATE
:
4128 if (shift
!= SHIFT_ASR
)
4130 inst
.error
= _("'ASR' required");
4138 if (shift
!= SHIFT_RRX
)
4140 /* Whitespace can appear here if the next thing is a bare digit. */
4141 skip_whitespace (p
);
4143 if (mode
== NO_SHIFT_RESTRICT
4144 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4146 inst
.operands
[i
].imm
= reg
;
4147 inst
.operands
[i
].immisreg
= 1;
4149 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4152 inst
.operands
[i
].shift_kind
= shift
;
4153 inst
.operands
[i
].shifted
= 1;
4158 /* Parse a <shifter_operand> for an ARM data processing instruction:
4161 #<immediate>, <rotate>
4165 where <shift> is defined by parse_shift above, and <rotate> is a
4166 multiple of 2 between 0 and 30. Validation of immediate operands
4167 is deferred to md_apply_fix. */
4170 parse_shifter_operand (char **str
, int i
)
4175 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
4177 inst
.operands
[i
].reg
= value
;
4178 inst
.operands
[i
].isreg
= 1;
4180 /* parse_shift will override this if appropriate */
4181 inst
.reloc
.exp
.X_op
= O_constant
;
4182 inst
.reloc
.exp
.X_add_number
= 0;
4184 if (skip_past_comma (str
) == FAIL
)
4187 /* Shift operation on register. */
4188 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
4191 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
4194 if (skip_past_comma (str
) == SUCCESS
)
4196 /* #x, y -- ie explicit rotation by Y. */
4197 if (my_get_expression (&expr
, str
, GE_NO_PREFIX
))
4200 if (expr
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
4202 inst
.error
= _("constant expression expected");
4206 value
= expr
.X_add_number
;
4207 if (value
< 0 || value
> 30 || value
% 2 != 0)
4209 inst
.error
= _("invalid rotation");
4212 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
4214 inst
.error
= _("invalid constant");
4218 /* Convert to decoded value. md_apply_fix will put it back. */
4219 inst
.reloc
.exp
.X_add_number
4220 = (((inst
.reloc
.exp
.X_add_number
<< (32 - value
))
4221 | (inst
.reloc
.exp
.X_add_number
>> value
)) & 0xffffffff);
4224 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
4225 inst
.reloc
.pc_rel
= 0;
4229 /* Parse all forms of an ARM address expression. Information is written
4230 to inst.operands[i] and/or inst.reloc.
4232 Preindexed addressing (.preind=1):
4234 [Rn, #offset] .reg=Rn .reloc.exp=offset
4235 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4236 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4237 .shift_kind=shift .reloc.exp=shift_imm
4239 These three may have a trailing ! which causes .writeback to be set also.
4241 Postindexed addressing (.postind=1, .writeback=1):
4243 [Rn], #offset .reg=Rn .reloc.exp=offset
4244 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4245 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4246 .shift_kind=shift .reloc.exp=shift_imm
4248 Unindexed addressing (.preind=0, .postind=0):
4250 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4254 [Rn]{!} shorthand for [Rn,#0]{!}
4255 =immediate .isreg=0 .reloc.exp=immediate
4256 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4258 It is the caller's responsibility to check for addressing modes not
4259 supported by the instruction, and to set inst.reloc.type. */
4262 parse_address (char **str
, int i
)
4267 if (skip_past_char (&p
, '[') == FAIL
)
4269 if (skip_past_char (&p
, '=') == FAIL
)
4271 /* bare address - translate to PC-relative offset */
4272 inst
.reloc
.pc_rel
= 1;
4273 inst
.operands
[i
].reg
= REG_PC
;
4274 inst
.operands
[i
].isreg
= 1;
4275 inst
.operands
[i
].preind
= 1;
4277 /* else a load-constant pseudo op, no special treatment needed here */
4279 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4286 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4288 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4291 inst
.operands
[i
].reg
= reg
;
4292 inst
.operands
[i
].isreg
= 1;
4294 if (skip_past_comma (&p
) == SUCCESS
)
4296 inst
.operands
[i
].preind
= 1;
4299 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4301 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4303 inst
.operands
[i
].imm
= reg
;
4304 inst
.operands
[i
].immisreg
= 1;
4306 if (skip_past_comma (&p
) == SUCCESS
)
4307 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4310 else if (skip_past_char (&p
, ':') == SUCCESS
)
4312 /* FIXME: '@' should be used here, but it's filtered out by generic
4313 code before we get to see it here. This may be subject to
4316 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
4317 if (exp
.X_op
!= O_constant
)
4319 inst
.error
= _("alignment must be constant");
4322 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
4323 inst
.operands
[i
].immisalign
= 1;
4324 /* Alignments are not pre-indexes. */
4325 inst
.operands
[i
].preind
= 0;
4329 if (inst
.operands
[i
].negative
)
4331 inst
.operands
[i
].negative
= 0;
4334 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4339 if (skip_past_char (&p
, ']') == FAIL
)
4341 inst
.error
= _("']' expected");
4345 if (skip_past_char (&p
, '!') == SUCCESS
)
4346 inst
.operands
[i
].writeback
= 1;
4348 else if (skip_past_comma (&p
) == SUCCESS
)
4350 if (skip_past_char (&p
, '{') == SUCCESS
)
4352 /* [Rn], {expr} - unindexed, with option */
4353 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
4354 0, 255, TRUE
) == FAIL
)
4357 if (skip_past_char (&p
, '}') == FAIL
)
4359 inst
.error
= _("'}' expected at end of 'option' field");
4362 if (inst
.operands
[i
].preind
)
4364 inst
.error
= _("cannot combine index with option");
4372 inst
.operands
[i
].postind
= 1;
4373 inst
.operands
[i
].writeback
= 1;
4375 if (inst
.operands
[i
].preind
)
4377 inst
.error
= _("cannot combine pre- and post-indexing");
4382 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4384 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4386 /* We might be using the immediate for alignment already. If we
4387 are, OR the register number into the low-order bits. */
4388 if (inst
.operands
[i
].immisalign
)
4389 inst
.operands
[i
].imm
|= reg
;
4391 inst
.operands
[i
].imm
= reg
;
4392 inst
.operands
[i
].immisreg
= 1;
4394 if (skip_past_comma (&p
) == SUCCESS
)
4395 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4400 if (inst
.operands
[i
].negative
)
4402 inst
.operands
[i
].negative
= 0;
4405 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4411 /* If at this point neither .preind nor .postind is set, we have a
4412 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4413 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
4415 inst
.operands
[i
].preind
= 1;
4416 inst
.reloc
.exp
.X_op
= O_constant
;
4417 inst
.reloc
.exp
.X_add_number
= 0;
4423 /* Miscellaneous. */
4425 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4426 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4428 parse_psr (char **str
)
4431 unsigned long psr_field
;
4432 const struct asm_psr
*psr
;
4435 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4436 feature for ease of use and backwards compatibility. */
4438 if (strncasecmp (p
, "SPSR", 4) == 0)
4439 psr_field
= SPSR_BIT
;
4440 else if (strncasecmp (p
, "CPSR", 4) == 0)
4447 while (ISALNUM (*p
) || *p
== '_');
4449 psr
= hash_find_n (arm_v7m_psr_hsh
, start
, p
- start
);
4460 /* A suffix follows. */
4466 while (ISALNUM (*p
) || *p
== '_');
4468 psr
= hash_find_n (arm_psr_hsh
, start
, p
- start
);
4472 psr_field
|= psr
->field
;
4477 goto error
; /* Garbage after "[CS]PSR". */
4479 psr_field
|= (PSR_c
| PSR_f
);
4485 inst
.error
= _("flag for {c}psr instruction expected");
4489 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4490 value suitable for splatting into the AIF field of the instruction. */
4493 parse_cps_flags (char **str
)
4502 case '\0': case ',':
4505 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
4506 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
4507 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
4510 inst
.error
= _("unrecognized CPS flag");
4515 if (saw_a_flag
== 0)
4517 inst
.error
= _("missing CPS flags");
4525 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4526 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4529 parse_endian_specifier (char **str
)
4534 if (strncasecmp (s
, "BE", 2))
4536 else if (strncasecmp (s
, "LE", 2))
4540 inst
.error
= _("valid endian specifiers are be or le");
4544 if (ISALNUM (s
[2]) || s
[2] == '_')
4546 inst
.error
= _("valid endian specifiers are be or le");
4551 return little_endian
;
4554 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4555 value suitable for poking into the rotate field of an sxt or sxta
4556 instruction, or FAIL on error. */
4559 parse_ror (char **str
)
4564 if (strncasecmp (s
, "ROR", 3) == 0)
4568 inst
.error
= _("missing rotation field after comma");
4572 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
4577 case 0: *str
= s
; return 0x0;
4578 case 8: *str
= s
; return 0x1;
4579 case 16: *str
= s
; return 0x2;
4580 case 24: *str
= s
; return 0x3;
4583 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
4588 /* Parse a conditional code (from conds[] below). The value returned is in the
4589 range 0 .. 14, or FAIL. */
4591 parse_cond (char **str
)
4594 const struct asm_cond
*c
;
4597 while (ISALPHA (*q
))
4600 c
= hash_find_n (arm_cond_hsh
, p
, q
- p
);
4603 inst
.error
= _("condition required");
4611 /* Parse an option for a barrier instruction. Returns the encoding for the
4614 parse_barrier (char **str
)
4617 const struct asm_barrier_opt
*o
;
4620 while (ISALPHA (*q
))
4623 o
= hash_find_n (arm_barrier_opt_hsh
, p
, q
- p
);
4631 /* Parse the operands of a table branch instruction. Similar to a memory
4634 parse_tb (char **str
)
4639 if (skip_past_char (&p
, '[') == FAIL
)
4641 inst
.error
= _("'[' expected");
4645 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4647 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4650 inst
.operands
[0].reg
= reg
;
4652 if (skip_past_comma (&p
) == FAIL
)
4654 inst
.error
= _("',' expected");
4658 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4660 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4663 inst
.operands
[0].imm
= reg
;
4665 if (skip_past_comma (&p
) == SUCCESS
)
4667 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
4669 if (inst
.reloc
.exp
.X_add_number
!= 1)
4671 inst
.error
= _("invalid shift");
4674 inst
.operands
[0].shifted
= 1;
4677 if (skip_past_char (&p
, ']') == FAIL
)
4679 inst
.error
= _("']' expected");
4686 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
4687 information on the types the operands can take and how they are encoded.
4688 Note particularly the abuse of ".regisimm" to signify a Neon register.
4689 Up to three operands may be read; this function handles setting the
4690 ".present" field for each operand itself.
4691 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
4692 else returns FAIL. */
4695 parse_neon_mov (char **str
, int *which_operand
)
4697 int i
= *which_operand
, val
;
4698 enum arm_reg_type rtype
;
4700 struct neon_type_el optype
;
4702 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
4704 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
4705 inst
.operands
[i
].reg
= val
;
4706 inst
.operands
[i
].isscalar
= 1;
4707 inst
.operands
[i
].vectype
= optype
;
4708 inst
.operands
[i
++].present
= 1;
4710 if (skip_past_comma (&ptr
) == FAIL
)
4713 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
4716 inst
.operands
[i
].reg
= val
;
4717 inst
.operands
[i
].isreg
= 1;
4718 inst
.operands
[i
].present
= 1;
4720 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NDQ
, &rtype
, &optype
))
4723 /* Cases 0, 1, 2, 3, 5 (D only). */
4724 if (skip_past_comma (&ptr
) == FAIL
)
4727 inst
.operands
[i
].reg
= val
;
4728 inst
.operands
[i
].isreg
= 1;
4729 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
4730 inst
.operands
[i
].vectype
= optype
;
4731 inst
.operands
[i
++].present
= 1;
4733 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
4735 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>. */
4736 inst
.operands
[i
-1].regisimm
= 1;
4737 inst
.operands
[i
].reg
= val
;
4738 inst
.operands
[i
].isreg
= 1;
4739 inst
.operands
[i
++].present
= 1;
4741 if (rtype
== REG_TYPE_NQ
)
4743 first_error (_("can't use Neon quad register here"));
4746 if (skip_past_comma (&ptr
) == FAIL
)
4748 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
4750 inst
.operands
[i
].reg
= val
;
4751 inst
.operands
[i
].isreg
= 1;
4752 inst
.operands
[i
].present
= 1;
4754 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
4756 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
4757 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm> */
4758 if (!thumb_mode
&& (inst
.instruction
& 0xf0000000) != 0xe0000000)
4761 else if (parse_big_immediate (&ptr
, i
) == SUCCESS
)
4763 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
4764 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
4765 if (!thumb_mode
&& (inst
.instruction
& 0xf0000000) != 0xe0000000)
4768 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NDQ
, &rtype
, &optype
))
4771 /* Case 0: VMOV<c><q> <Qd>, <Qm>
4772 Case 1: VMOV<c><q> <Dd>, <Dm> */
4773 if (!thumb_mode
&& (inst
.instruction
& 0xf0000000) != 0xe0000000)
4776 inst
.operands
[i
].reg
= val
;
4777 inst
.operands
[i
].isreg
= 1;
4778 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
4779 inst
.operands
[i
].vectype
= optype
;
4780 inst
.operands
[i
].present
= 1;
4784 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
4788 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
4791 inst
.operands
[i
].reg
= val
;
4792 inst
.operands
[i
].isreg
= 1;
4793 inst
.operands
[i
++].present
= 1;
4795 if (skip_past_comma (&ptr
) == FAIL
)
4798 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
4800 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
4801 inst
.operands
[i
].reg
= val
;
4802 inst
.operands
[i
].isscalar
= 1;
4803 inst
.operands
[i
].present
= 1;
4804 inst
.operands
[i
].vectype
= optype
;
4806 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
4808 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
4809 inst
.operands
[i
].reg
= val
;
4810 inst
.operands
[i
].isreg
= 1;
4811 inst
.operands
[i
++].present
= 1;
4813 if (skip_past_comma (&ptr
) == FAIL
)
4816 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFD
, NULL
, &optype
))
4819 first_error (_(reg_expected_msgs
[REG_TYPE_VFD
]));
4823 inst
.operands
[i
].reg
= val
;
4824 inst
.operands
[i
].isreg
= 1;
4825 inst
.operands
[i
].regisimm
= 1;
4826 inst
.operands
[i
].vectype
= optype
;
4827 inst
.operands
[i
].present
= 1;
4832 first_error (_("parse error"));
4836 /* Successfully parsed the operands. Update args. */
4842 first_error (_("expected comma"));
4846 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
4850 first_error (_("instruction cannot be conditionalized"));
4854 /* Matcher codes for parse_operands. */
4855 enum operand_parse_code
4857 OP_stop
, /* end of line */
4859 OP_RR
, /* ARM register */
4860 OP_RRnpc
, /* ARM register, not r15 */
4861 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
4862 OP_RRw
, /* ARM register, not r15, optional trailing ! */
4863 OP_RCP
, /* Coprocessor number */
4864 OP_RCN
, /* Coprocessor register */
4865 OP_RF
, /* FPA register */
4866 OP_RVS
, /* VFP single precision register */
4867 OP_RVD
, /* VFP double precision register (0..15) */
4868 OP_RND
, /* Neon double precision register (0..31) */
4869 OP_RNQ
, /* Neon quad precision register */
4870 OP_RNDQ
, /* Neon double or quad precision register */
4871 OP_RNSC
, /* Neon scalar D[X] */
4872 OP_RVC
, /* VFP control register */
4873 OP_RMF
, /* Maverick F register */
4874 OP_RMD
, /* Maverick D register */
4875 OP_RMFX
, /* Maverick FX register */
4876 OP_RMDX
, /* Maverick DX register */
4877 OP_RMAX
, /* Maverick AX register */
4878 OP_RMDS
, /* Maverick DSPSC register */
4879 OP_RIWR
, /* iWMMXt wR register */
4880 OP_RIWC
, /* iWMMXt wC register */
4881 OP_RIWG
, /* iWMMXt wCG register */
4882 OP_RXA
, /* XScale accumulator register */
4884 OP_REGLST
, /* ARM register list */
4885 OP_VRSLST
, /* VFP single-precision register list */
4886 OP_VRDLST
, /* VFP double-precision register list */
4887 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
4888 OP_NSTRLST
, /* Neon element/structure list */
4890 OP_NILO
, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
4891 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
4892 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
4893 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
4894 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
4895 OP_VMOV
, /* Neon VMOV operands. */
4896 OP_RNDQ_IMVNb
,/* Neon D or Q reg, or immediate good for VMVN. */
4897 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
4899 OP_I0
, /* immediate zero */
4900 OP_I7
, /* immediate value 0 .. 7 */
4901 OP_I15
, /* 0 .. 15 */
4902 OP_I16
, /* 1 .. 16 */
4903 OP_I16z
, /* 0 .. 16 */
4904 OP_I31
, /* 0 .. 31 */
4905 OP_I31w
, /* 0 .. 31, optional trailing ! */
4906 OP_I32
, /* 1 .. 32 */
4907 OP_I32z
, /* 0 .. 32 */
4908 OP_I63
, /* 0 .. 63 */
4909 OP_I63s
, /* -64 .. 63 */
4910 OP_I64
, /* 1 .. 64 */
4911 OP_I64z
, /* 0 .. 64 */
4912 OP_I255
, /* 0 .. 255 */
4913 OP_Iffff
, /* 0 .. 65535 */
4915 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
4916 OP_I7b
, /* 0 .. 7 */
4917 OP_I15b
, /* 0 .. 15 */
4918 OP_I31b
, /* 0 .. 31 */
4920 OP_SH
, /* shifter operand */
4921 OP_ADDR
, /* Memory address expression (any mode) */
4922 OP_EXP
, /* arbitrary expression */
4923 OP_EXPi
, /* same, with optional immediate prefix */
4924 OP_EXPr
, /* same, with optional relocation suffix */
4926 OP_CPSF
, /* CPS flags */
4927 OP_ENDI
, /* Endianness specifier */
4928 OP_PSR
, /* CPSR/SPSR mask for msr */
4929 OP_COND
, /* conditional code */
4930 OP_TB
, /* Table branch. */
4932 OP_RRnpc_I0
, /* ARM register or literal 0 */
4933 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
4934 OP_RR_EXi
, /* ARM register or expression with imm prefix */
4935 OP_RF_IF
, /* FPA register or immediate */
4936 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
4938 /* Optional operands. */
4939 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
4940 OP_oI31b
, /* 0 .. 31 */
4941 OP_oI32b
, /* 1 .. 32 */
4942 OP_oIffffb
, /* 0 .. 65535 */
4943 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
4945 OP_oRR
, /* ARM register */
4946 OP_oRRnpc
, /* ARM register, not the PC */
4947 OP_oRND
, /* Optional Neon double precision register */
4948 OP_oRNQ
, /* Optional Neon quad precision register */
4949 OP_oRNDQ
, /* Optional Neon double or quad precision register */
4950 OP_oSHll
, /* LSL immediate */
4951 OP_oSHar
, /* ASR immediate */
4952 OP_oSHllar
, /* LSL or ASR immediate */
4953 OP_oROR
, /* ROR 0/8/16/24 */
4954 OP_oBARRIER
, /* Option argument for a barrier instruction. */
4956 OP_FIRST_OPTIONAL
= OP_oI7b
4959 /* Generic instruction operand parser. This does no encoding and no
4960 semantic validation; it merely squirrels values away in the inst
4961 structure. Returns SUCCESS or FAIL depending on whether the
4962 specified grammar matched. */
4964 parse_operands (char *str
, const unsigned char *pattern
)
4966 unsigned const char *upat
= pattern
;
4967 char *backtrack_pos
= 0;
4968 const char *backtrack_error
= 0;
4969 int i
, val
, backtrack_index
= 0;
4970 enum arm_reg_type rtype
;
4972 #define po_char_or_fail(chr) do { \
4973 if (skip_past_char (&str, chr) == FAIL) \
4977 #define po_reg_or_fail(regtype) do { \
4978 val = arm_typed_reg_parse (&str, regtype, &rtype, \
4979 &inst.operands[i].vectype); \
4982 first_error (_(reg_expected_msgs[regtype])); \
4985 inst.operands[i].reg = val; \
4986 inst.operands[i].isreg = 1; \
4987 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
4990 #define po_reg_or_goto(regtype, label) do { \
4991 val = arm_typed_reg_parse (&str, regtype, &rtype, \
4992 &inst.operands[i].vectype); \
4996 inst.operands[i].reg = val; \
4997 inst.operands[i].isreg = 1; \
4998 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5001 #define po_imm_or_fail(min, max, popt) do { \
5002 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5004 inst.operands[i].imm = val; \
5007 #define po_scalar_or_goto(elsz, label) do { \
5008 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5011 inst.operands[i].reg = val; \
5012 inst.operands[i].isscalar = 1; \
5015 #define po_misc_or_fail(expr) do { \
5020 skip_whitespace (str
);
5022 for (i
= 0; upat
[i
] != OP_stop
; i
++)
5024 if (upat
[i
] >= OP_FIRST_OPTIONAL
)
5026 /* Remember where we are in case we need to backtrack. */
5027 assert (!backtrack_pos
);
5028 backtrack_pos
= str
;
5029 backtrack_error
= inst
.error
;
5030 backtrack_index
= i
;
5034 po_char_or_fail (',');
5042 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
5043 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
5044 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
5045 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
5046 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
5047 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
5049 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
5050 case OP_RVC
: po_reg_or_fail (REG_TYPE_VFC
); break;
5051 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
5052 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
5053 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
5054 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
5055 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
5056 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
5057 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
5058 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
5059 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
5060 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
5062 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
5064 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
5066 /* Neon scalar. Using an element size of 8 means that some invalid
5067 scalars are accepted here, so deal with those in later code. */
5068 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
5070 /* WARNING: We can expand to two operands here. This has the potential
5071 to totally confuse the backtracking mechanism! It will be OK at
5072 least as long as we don't try to use optional args as well,
5076 po_reg_or_goto (REG_TYPE_NDQ
, try_imm
);
5078 skip_past_comma (&str
);
5079 po_reg_or_goto (REG_TYPE_NDQ
, one_reg_only
);
5082 /* Optional register operand was omitted. Unfortunately, it's in
5083 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5084 here (this is a bit grotty). */
5085 inst
.operands
[i
] = inst
.operands
[i
-1];
5086 inst
.operands
[i
-1].present
= 0;
5089 /* Immediate gets verified properly later, so accept any now. */
5090 po_imm_or_fail (INT_MIN
, INT_MAX
, TRUE
);
5096 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
5099 po_imm_or_fail (0, 0, TRUE
);
5105 po_scalar_or_goto (8, try_rr
);
5108 po_reg_or_fail (REG_TYPE_RN
);
5114 po_scalar_or_goto (8, try_ndq
);
5117 po_reg_or_fail (REG_TYPE_NDQ
);
5123 po_scalar_or_goto (8, try_vfd
);
5126 po_reg_or_fail (REG_TYPE_VFD
);
5131 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5132 not careful then bad things might happen. */
5133 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
5138 po_reg_or_goto (REG_TYPE_NDQ
, try_mvnimm
);
5141 /* There's a possibility of getting a 64-bit immediate here, so
5142 we need special handling. */
5143 if (parse_big_immediate (&str
, i
) == FAIL
)
5145 inst
.error
= _("immediate value is out of range");
5153 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
5156 po_imm_or_fail (0, 63, TRUE
);
5161 po_char_or_fail ('[');
5162 po_reg_or_fail (REG_TYPE_RN
);
5163 po_char_or_fail (']');
5167 po_reg_or_fail (REG_TYPE_RN
);
5168 if (skip_past_char (&str
, '!') == SUCCESS
)
5169 inst
.operands
[i
].writeback
= 1;
5173 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
5174 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
5175 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
5176 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
5177 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
5178 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
5179 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
5180 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
5181 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
5182 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
5183 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
5184 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
5185 case OP_Iffff
: po_imm_or_fail ( 0, 0xffff, FALSE
); break;
5187 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
5189 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
5190 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
5192 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
5193 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
5194 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
5196 /* Immediate variants */
5198 po_char_or_fail ('{');
5199 po_imm_or_fail (0, 255, TRUE
);
5200 po_char_or_fail ('}');
5204 /* The expression parser chokes on a trailing !, so we have
5205 to find it first and zap it. */
5208 while (*s
&& *s
!= ',')
5213 inst
.operands
[i
].writeback
= 1;
5215 po_imm_or_fail (0, 31, TRUE
);
5223 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5228 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5233 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5235 if (inst
.reloc
.exp
.X_op
== O_symbol
)
5237 val
= parse_reloc (&str
);
5240 inst
.error
= _("unrecognized relocation suffix");
5243 else if (val
!= BFD_RELOC_UNUSED
)
5245 inst
.operands
[i
].imm
= val
;
5246 inst
.operands
[i
].hasreloc
= 1;
5251 /* Register or expression */
5252 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
5253 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
5255 /* Register or immediate */
5256 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
5257 I0
: po_imm_or_fail (0, 0, FALSE
); break;
5259 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
5261 if (!is_immediate_prefix (*str
))
5264 val
= parse_fpa_immediate (&str
);
5267 /* FPA immediates are encoded as registers 8-15.
5268 parse_fpa_immediate has already applied the offset. */
5269 inst
.operands
[i
].reg
= val
;
5270 inst
.operands
[i
].isreg
= 1;
5273 /* Two kinds of register */
5276 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
5277 if (rege
->type
!= REG_TYPE_MMXWR
5278 && rege
->type
!= REG_TYPE_MMXWC
5279 && rege
->type
!= REG_TYPE_MMXWCG
)
5281 inst
.error
= _("iWMMXt data or control register expected");
5284 inst
.operands
[i
].reg
= rege
->number
;
5285 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
5290 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
5291 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
5292 case OP_oROR
: val
= parse_ror (&str
); break;
5293 case OP_PSR
: val
= parse_psr (&str
); break;
5294 case OP_COND
: val
= parse_cond (&str
); break;
5295 case OP_oBARRIER
:val
= parse_barrier (&str
); break;
5298 po_misc_or_fail (parse_tb (&str
));
5301 /* Register lists */
5303 val
= parse_reg_list (&str
);
5306 inst
.operands
[1].writeback
= 1;
5312 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
5316 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
5320 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5325 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
5326 &inst
.operands
[i
].vectype
);
5329 /* Addressing modes */
5331 po_misc_or_fail (parse_address (&str
, i
));
5335 po_misc_or_fail (parse_shifter_operand (&str
, i
));
5339 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
5343 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
5347 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
5351 as_fatal ("unhandled operand code %d", upat
[i
]);
5354 /* Various value-based sanity checks and shared operations. We
5355 do not signal immediate failures for the register constraints;
5356 this allows a syntax error to take precedence. */
5364 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
5365 inst
.error
= BAD_PC
;
5381 inst
.operands
[i
].imm
= val
;
5388 /* If we get here, this operand was successfully parsed. */
5389 inst
.operands
[i
].present
= 1;
5393 inst
.error
= BAD_ARGS
;
5398 /* The parse routine should already have set inst.error, but set a
5399 defaut here just in case. */
5401 inst
.error
= _("syntax error");
5405 /* Do not backtrack over a trailing optional argument that
5406 absorbed some text. We will only fail again, with the
5407 'garbage following instruction' error message, which is
5408 probably less helpful than the current one. */
5409 if (backtrack_index
== i
&& backtrack_pos
!= str
5410 && upat
[i
+1] == OP_stop
)
5413 inst
.error
= _("syntax error");
5417 /* Try again, skipping the optional argument at backtrack_pos. */
5418 str
= backtrack_pos
;
5419 inst
.error
= backtrack_error
;
5420 inst
.operands
[backtrack_index
].present
= 0;
5421 i
= backtrack_index
;
5425 /* Check that we have parsed all the arguments. */
5426 if (*str
!= '\0' && !inst
.error
)
5427 inst
.error
= _("garbage following instruction");
5429 return inst
.error
? FAIL
: SUCCESS
;
5432 #undef po_char_or_fail
5433 #undef po_reg_or_fail
5434 #undef po_reg_or_goto
5435 #undef po_imm_or_fail
5436 #undef po_scalar_or_fail
5438 /* Shorthand macro for instruction encoding functions issuing errors. */
5439 #define constraint(expr, err) do { \
5447 /* Functions for operand encoding. ARM, then Thumb. */
5449 #define rotate_left(v, n) (v << n | v >> (32 - n))
5451 /* If VAL can be encoded in the immediate field of an ARM instruction,
5452 return the encoded form. Otherwise, return FAIL. */
5455 encode_arm_immediate (unsigned int val
)
5459 for (i
= 0; i
< 32; i
+= 2)
5460 if ((a
= rotate_left (val
, i
)) <= 0xff)
5461 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
5466 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
5467 return the encoded form. Otherwise, return FAIL. */
5469 encode_thumb32_immediate (unsigned int val
)
5476 for (i
= 1; i
<= 24; i
++)
5479 if ((val
& ~(0xff << i
)) == 0)
5480 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
5484 if (val
== ((a
<< 16) | a
))
5486 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
5490 if (val
== ((a
<< 16) | a
))
5491 return 0x200 | (a
>> 8);
5495 /* Encode a VFP SP or DP register number into inst.instruction. */
5498 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
5500 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
5503 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
5506 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
5509 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
5514 first_error (_("D register out of range for selected VFP version"));
5522 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
5526 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
5530 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
5534 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
5538 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
5542 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
5550 /* Encode a <shift> in an ARM-format instruction. The immediate,
5551 if any, is handled by md_apply_fix. */
5553 encode_arm_shift (int i
)
5555 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
5556 inst
.instruction
|= SHIFT_ROR
<< 5;
5559 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
5560 if (inst
.operands
[i
].immisreg
)
5562 inst
.instruction
|= SHIFT_BY_REG
;
5563 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
5566 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
5571 encode_arm_shifter_operand (int i
)
5573 if (inst
.operands
[i
].isreg
)
5575 inst
.instruction
|= inst
.operands
[i
].reg
;
5576 encode_arm_shift (i
);
5579 inst
.instruction
|= INST_IMMEDIATE
;
5582 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
5584 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
5586 assert (inst
.operands
[i
].isreg
);
5587 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
5589 if (inst
.operands
[i
].preind
)
5593 inst
.error
= _("instruction does not accept preindexed addressing");
5596 inst
.instruction
|= PRE_INDEX
;
5597 if (inst
.operands
[i
].writeback
)
5598 inst
.instruction
|= WRITE_BACK
;
5601 else if (inst
.operands
[i
].postind
)
5603 assert (inst
.operands
[i
].writeback
);
5605 inst
.instruction
|= WRITE_BACK
;
5607 else /* unindexed - only for coprocessor */
5609 inst
.error
= _("instruction does not accept unindexed addressing");
5613 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
5614 && (((inst
.instruction
& 0x000f0000) >> 16)
5615 == ((inst
.instruction
& 0x0000f000) >> 12)))
5616 as_warn ((inst
.instruction
& LOAD_BIT
)
5617 ? _("destination register same as write-back base")
5618 : _("source register same as write-back base"));
5621 /* inst.operands[i] was set up by parse_address. Encode it into an
5622 ARM-format mode 2 load or store instruction. If is_t is true,
5623 reject forms that cannot be used with a T instruction (i.e. not
5626 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
5628 encode_arm_addr_mode_common (i
, is_t
);
5630 if (inst
.operands
[i
].immisreg
)
5632 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
5633 inst
.instruction
|= inst
.operands
[i
].imm
;
5634 if (!inst
.operands
[i
].negative
)
5635 inst
.instruction
|= INDEX_UP
;
5636 if (inst
.operands
[i
].shifted
)
5638 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
5639 inst
.instruction
|= SHIFT_ROR
<< 5;
5642 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
5643 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
5647 else /* immediate offset in inst.reloc */
5649 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5650 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
5654 /* inst.operands[i] was set up by parse_address. Encode it into an
5655 ARM-format mode 3 load or store instruction. Reject forms that
5656 cannot be used with such instructions. If is_t is true, reject
5657 forms that cannot be used with a T instruction (i.e. not
5660 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
5662 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
5664 inst
.error
= _("instruction does not accept scaled register index");
5668 encode_arm_addr_mode_common (i
, is_t
);
5670 if (inst
.operands
[i
].immisreg
)
5672 inst
.instruction
|= inst
.operands
[i
].imm
;
5673 if (!inst
.operands
[i
].negative
)
5674 inst
.instruction
|= INDEX_UP
;
5676 else /* immediate offset in inst.reloc */
5678 inst
.instruction
|= HWOFFSET_IMM
;
5679 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5680 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
5684 /* inst.operands[i] was set up by parse_address. Encode it into an
5685 ARM-format instruction. Reject all forms which cannot be encoded
5686 into a coprocessor load/store instruction. If wb_ok is false,
5687 reject use of writeback; if unind_ok is false, reject use of
5688 unindexed addressing. If reloc_override is not 0, use it instead
5689 of BFD_ARM_CP_OFF_IMM. */
5692 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
5694 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
5696 assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
5698 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
5700 assert (!inst
.operands
[i
].writeback
);
5703 inst
.error
= _("instruction does not support unindexed addressing");
5706 inst
.instruction
|= inst
.operands
[i
].imm
;
5707 inst
.instruction
|= INDEX_UP
;
5711 if (inst
.operands
[i
].preind
)
5712 inst
.instruction
|= PRE_INDEX
;
5714 if (inst
.operands
[i
].writeback
)
5716 if (inst
.operands
[i
].reg
== REG_PC
)
5718 inst
.error
= _("pc may not be used with write-back");
5723 inst
.error
= _("instruction does not support writeback");
5726 inst
.instruction
|= WRITE_BACK
;
5730 inst
.reloc
.type
= reloc_override
;
5731 else if (thumb_mode
)
5732 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
5734 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
5738 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
5739 Determine whether it can be performed with a move instruction; if
5740 it can, convert inst.instruction to that move instruction and
5741 return 1; if it can't, convert inst.instruction to a literal-pool
5742 load and return 0. If this is not a valid thing to do in the
5743 current context, set inst.error and return 1.
5745 inst.operands[i] describes the destination register. */
5748 move_or_literal_pool (int i
, bfd_boolean thumb_p
, bfd_boolean mode_3
)
5753 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
5757 if ((inst
.instruction
& tbit
) == 0)
5759 inst
.error
= _("invalid pseudo operation");
5762 if (inst
.reloc
.exp
.X_op
!= O_constant
&& inst
.reloc
.exp
.X_op
!= O_symbol
)
5764 inst
.error
= _("constant expression expected");
5767 if (inst
.reloc
.exp
.X_op
== O_constant
)
5771 if (!unified_syntax
&& (inst
.reloc
.exp
.X_add_number
& ~0xFF) == 0)
5773 /* This can be done with a mov(1) instruction. */
5774 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
5775 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
;
5781 int value
= encode_arm_immediate (inst
.reloc
.exp
.X_add_number
);
5784 /* This can be done with a mov instruction. */
5785 inst
.instruction
&= LITERAL_MASK
;
5786 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
5787 inst
.instruction
|= value
& 0xfff;
5791 value
= encode_arm_immediate (~inst
.reloc
.exp
.X_add_number
);
5794 /* This can be done with a mvn instruction. */
5795 inst
.instruction
&= LITERAL_MASK
;
5796 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
5797 inst
.instruction
|= value
& 0xfff;
5803 if (add_to_lit_pool () == FAIL
)
5805 inst
.error
= _("literal pool insertion failed");
5808 inst
.operands
[1].reg
= REG_PC
;
5809 inst
.operands
[1].isreg
= 1;
5810 inst
.operands
[1].preind
= 1;
5811 inst
.reloc
.pc_rel
= 1;
5812 inst
.reloc
.type
= (thumb_p
5813 ? BFD_RELOC_ARM_THUMB_OFFSET
5815 ? BFD_RELOC_ARM_HWLITERAL
5816 : BFD_RELOC_ARM_LITERAL
));
5820 /* Functions for instruction encoding, sorted by subarchitecture.
5821 First some generics; their names are taken from the conventional
5822 bit positions for register arguments in ARM format instructions. */
5832 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5838 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5839 inst
.instruction
|= inst
.operands
[1].reg
;
5845 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5846 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
5852 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
5853 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
5859 unsigned Rn
= inst
.operands
[2].reg
;
5860 /* Enforce restrictions on SWP instruction. */
5861 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
5862 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
5863 _("Rn must not overlap other operands"));
5864 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5865 inst
.instruction
|= inst
.operands
[1].reg
;
5866 inst
.instruction
|= Rn
<< 16;
5872 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5873 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
5874 inst
.instruction
|= inst
.operands
[2].reg
;
5880 inst
.instruction
|= inst
.operands
[0].reg
;
5881 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
5882 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
5888 inst
.instruction
|= inst
.operands
[0].imm
;
5894 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5895 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
5898 /* ARM instructions, in alphabetical order by function name (except
5899 that wrapper functions appear immediately after the function they
5902 /* This is a pseudo-op of the form "adr rd, label" to be converted
5903 into a relative address of the form "add rd, pc, #label-.-8". */
5908 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
5910 /* Frag hacking will turn this into a sub instruction if the offset turns
5911 out to be negative. */
5912 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
5913 inst
.reloc
.pc_rel
= 1;
5914 inst
.reloc
.exp
.X_add_number
-= 8;
5917 /* This is a pseudo-op of the form "adrl rd, label" to be converted
5918 into a relative address of the form:
5919 add rd, pc, #low(label-.-8)"
5920 add rd, rd, #high(label-.-8)" */
5925 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
5927 /* Frag hacking will turn this into a sub instruction if the offset turns
5928 out to be negative. */
5929 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
5930 inst
.reloc
.pc_rel
= 1;
5931 inst
.size
= INSN_SIZE
* 2;
5932 inst
.reloc
.exp
.X_add_number
-= 8;
5938 if (!inst
.operands
[1].present
)
5939 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
5940 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5941 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
5942 encode_arm_shifter_operand (2);
5948 if (inst
.operands
[0].present
)
5950 constraint ((inst
.instruction
& 0xf0) != 0x40
5951 && inst
.operands
[0].imm
!= 0xf,
5952 "bad barrier type");
5953 inst
.instruction
|= inst
.operands
[0].imm
;
5956 inst
.instruction
|= 0xf;
5962 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
5963 constraint (msb
> 32, _("bit-field extends past end of register"));
5964 /* The instruction encoding stores the LSB and MSB,
5965 not the LSB and width. */
5966 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5967 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
5968 inst
.instruction
|= (msb
- 1) << 16;
5976 /* #0 in second position is alternative syntax for bfc, which is
5977 the same instruction but with REG_PC in the Rm field. */
5978 if (!inst
.operands
[1].isreg
)
5979 inst
.operands
[1].reg
= REG_PC
;
5981 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
5982 constraint (msb
> 32, _("bit-field extends past end of register"));
5983 /* The instruction encoding stores the LSB and MSB,
5984 not the LSB and width. */
5985 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5986 inst
.instruction
|= inst
.operands
[1].reg
;
5987 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
5988 inst
.instruction
|= (msb
- 1) << 16;
5994 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
5995 _("bit-field extends past end of register"));
5996 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
5997 inst
.instruction
|= inst
.operands
[1].reg
;
5998 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
5999 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
6002 /* ARM V5 breakpoint instruction (argument parse)
6003 BKPT <16 bit unsigned immediate>
6004 Instruction is not conditional.
6005 The bit pattern given in insns[] has the COND_ALWAYS condition,
6006 and it is an error if the caller tried to override that. */
6011 /* Top 12 of 16 bits to bits 19:8. */
6012 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
6014 /* Bottom 4 of 16 bits to bits 3:0. */
6015 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
6019 encode_branch (int default_reloc
)
6021 if (inst
.operands
[0].hasreloc
)
6023 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
,
6024 _("the only suffix valid here is '(plt)'"));
6025 inst
.reloc
.type
= BFD_RELOC_ARM_PLT32
;
6029 inst
.reloc
.type
= default_reloc
;
6031 inst
.reloc
.pc_rel
= 1;
6038 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6039 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6042 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6049 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6051 if (inst
.cond
== COND_ALWAYS
)
6052 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6054 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6058 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6061 /* ARM V5 branch-link-exchange instruction (argument parse)
6062 BLX <target_addr> ie BLX(1)
6063 BLX{<condition>} <Rm> ie BLX(2)
6064 Unfortunately, there are two different opcodes for this mnemonic.
6065 So, the insns[].value is not used, and the code here zaps values
6066 into inst.instruction.
6067 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6072 if (inst
.operands
[0].isreg
)
6074 /* Arg is a register; the opcode provided by insns[] is correct.
6075 It is not illegal to do "blx pc", just useless. */
6076 if (inst
.operands
[0].reg
== REG_PC
)
6077 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6079 inst
.instruction
|= inst
.operands
[0].reg
;
6083 /* Arg is an address; this instruction cannot be executed
6084 conditionally, and the opcode must be adjusted. */
6085 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
6086 inst
.instruction
= 0xfa000000;
6088 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6089 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6092 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
6099 if (inst
.operands
[0].reg
== REG_PC
)
6100 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6102 inst
.instruction
|= inst
.operands
[0].reg
;
6106 /* ARM v5TEJ. Jump to Jazelle code. */
6111 if (inst
.operands
[0].reg
== REG_PC
)
6112 as_tsktsk (_("use of r15 in bxj is not really useful"));
6114 inst
.instruction
|= inst
.operands
[0].reg
;
6117 /* Co-processor data operation:
6118 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6119 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6123 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6124 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
6125 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6126 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6127 inst
.instruction
|= inst
.operands
[4].reg
;
6128 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6134 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6135 encode_arm_shifter_operand (1);
6138 /* Transfer between coprocessor and ARM registers.
6139 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6144 No special properties. */
6149 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6150 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
6151 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6152 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6153 inst
.instruction
|= inst
.operands
[4].reg
;
6154 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6157 /* Transfer between coprocessor register and pair of ARM registers.
6158 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6163 Two XScale instructions are special cases of these:
6165 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6166 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6168 Result unpredicatable if Rd or Rn is R15. */
6173 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6174 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
6175 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6176 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6177 inst
.instruction
|= inst
.operands
[4].reg
;
6183 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
6184 inst
.instruction
|= inst
.operands
[1].imm
;
6190 inst
.instruction
|= inst
.operands
[0].imm
;
6196 /* There is no IT instruction in ARM mode. We
6197 process it but do not generate code for it. */
6204 int base_reg
= inst
.operands
[0].reg
;
6205 int range
= inst
.operands
[1].imm
;
6207 inst
.instruction
|= base_reg
<< 16;
6208 inst
.instruction
|= range
;
6210 if (inst
.operands
[1].writeback
)
6211 inst
.instruction
|= LDM_TYPE_2_OR_3
;
6213 if (inst
.operands
[0].writeback
)
6215 inst
.instruction
|= WRITE_BACK
;
6216 /* Check for unpredictable uses of writeback. */
6217 if (inst
.instruction
& LOAD_BIT
)
6219 /* Not allowed in LDM type 2. */
6220 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
6221 && ((range
& (1 << REG_PC
)) == 0))
6222 as_warn (_("writeback of base register is UNPREDICTABLE"));
6223 /* Only allowed if base reg not in list for other types. */
6224 else if (range
& (1 << base_reg
))
6225 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6229 /* Not allowed for type 2. */
6230 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
6231 as_warn (_("writeback of base register is UNPREDICTABLE"));
6232 /* Only allowed if base reg not in list, or first in list. */
6233 else if ((range
& (1 << base_reg
))
6234 && (range
& ((1 << base_reg
) - 1)))
6235 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6240 /* ARMv5TE load-consecutive (argument parse)
6249 constraint (inst
.operands
[0].reg
% 2 != 0,
6250 _("first destination register must be even"));
6251 constraint (inst
.operands
[1].present
6252 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
6253 _("can only load two consecutive registers"));
6254 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
6255 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
6257 if (!inst
.operands
[1].present
)
6258 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
6260 if (inst
.instruction
& LOAD_BIT
)
6262 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6263 register and the first register written; we have to diagnose
6264 overlap between the base and the second register written here. */
6266 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
6267 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
6268 as_warn (_("base register written back, and overlaps "
6269 "second destination register"));
6271 /* For an index-register load, the index register must not overlap the
6272 destination (even if not write-back). */
6273 else if (inst
.operands
[2].immisreg
6274 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
6275 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
6276 as_warn (_("index register overlaps destination register"));
6279 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6280 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
6286 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
6287 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
6288 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
6289 || inst
.operands
[1].negative
6290 /* This can arise if the programmer has written
6292 or if they have mistakenly used a register name as the last
6295 It is very difficult to distinguish between these two cases
6296 because "rX" might actually be a label. ie the register
6297 name has been occluded by a symbol of the same name. So we
6298 just generate a general 'bad addressing mode' type error
6299 message and leave it up to the programmer to discover the
6300 true cause and fix their mistake. */
6301 || (inst
.operands
[1].reg
== REG_PC
),
6304 constraint (inst
.reloc
.exp
.X_op
!= O_constant
6305 || inst
.reloc
.exp
.X_add_number
!= 0,
6306 _("offset must be zero in ARM encoding"));
6308 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6309 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6310 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
6316 constraint (inst
.operands
[0].reg
% 2 != 0,
6317 _("even register required"));
6318 constraint (inst
.operands
[1].present
6319 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
6320 _("can only load two consecutive registers"));
6321 /* If op 1 were present and equal to PC, this function wouldn't
6322 have been called in the first place. */
6323 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
6325 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6326 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6332 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6333 if (!inst
.operands
[1].isreg
)
6334 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/FALSE
))
6336 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
6342 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6344 if (inst
.operands
[1].preind
)
6346 constraint (inst
.reloc
.exp
.X_op
!= O_constant
||
6347 inst
.reloc
.exp
.X_add_number
!= 0,
6348 _("this instruction requires a post-indexed address"));
6350 inst
.operands
[1].preind
= 0;
6351 inst
.operands
[1].postind
= 1;
6352 inst
.operands
[1].writeback
= 1;
6354 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6355 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
6358 /* Halfword and signed-byte load/store operations. */
6363 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6364 if (!inst
.operands
[1].isreg
)
6365 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/TRUE
))
6367 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
6373 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6375 if (inst
.operands
[1].preind
)
6377 constraint (inst
.reloc
.exp
.X_op
!= O_constant
||
6378 inst
.reloc
.exp
.X_add_number
!= 0,
6379 _("this instruction requires a post-indexed address"));
6381 inst
.operands
[1].preind
= 0;
6382 inst
.operands
[1].postind
= 1;
6383 inst
.operands
[1].writeback
= 1;
6385 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6386 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
6389 /* Co-processor register load/store.
6390 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
6394 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6395 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6396 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
6402 /* This restriction does not apply to mls (nor to mla in v6, but
6403 that's hard to detect at present). */
6404 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
6405 && !(inst
.instruction
& 0x00400000))
6406 as_tsktsk (_("rd and rm should be different in mla"));
6408 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6409 inst
.instruction
|= inst
.operands
[1].reg
;
6410 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6411 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
6418 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6419 encode_arm_shifter_operand (1);
6422 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
6426 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6427 /* The value is in two pieces: 0:11, 16:19. */
6428 inst
.instruction
|= (inst
.operands
[1].imm
& 0x00000fff);
6429 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0000f000) << 4;
6435 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
6436 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
6438 _("'CPSR' or 'SPSR' expected"));
6439 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6440 inst
.instruction
|= (inst
.operands
[1].imm
& SPSR_BIT
);
6443 /* Two possible forms:
6444 "{C|S}PSR_<field>, Rm",
6445 "{C|S}PSR_f, #expression". */
6450 inst
.instruction
|= inst
.operands
[0].imm
;
6451 if (inst
.operands
[1].isreg
)
6452 inst
.instruction
|= inst
.operands
[1].reg
;
6455 inst
.instruction
|= INST_IMMEDIATE
;
6456 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
6457 inst
.reloc
.pc_rel
= 0;
6464 if (!inst
.operands
[2].present
)
6465 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
6466 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6467 inst
.instruction
|= inst
.operands
[1].reg
;
6468 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6470 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
6471 as_tsktsk (_("rd and rm should be different in mul"));
6474 /* Long Multiply Parser
6475 UMULL RdLo, RdHi, Rm, Rs
6476 SMULL RdLo, RdHi, Rm, Rs
6477 UMLAL RdLo, RdHi, Rm, Rs
6478 SMLAL RdLo, RdHi, Rm, Rs. */
6483 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6484 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6485 inst
.instruction
|= inst
.operands
[2].reg
;
6486 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
6488 /* rdhi, rdlo and rm must all be different. */
6489 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
6490 || inst
.operands
[0].reg
== inst
.operands
[2].reg
6491 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
6492 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
6498 if (inst
.operands
[0].present
)
6500 /* Architectural NOP hints are CPSR sets with no bits selected. */
6501 inst
.instruction
&= 0xf0000000;
6502 inst
.instruction
|= 0x0320f000 + inst
.operands
[0].imm
;
6506 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
6507 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
6508 Condition defaults to COND_ALWAYS.
6509 Error if Rd, Rn or Rm are R15. */
6514 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6515 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6516 inst
.instruction
|= inst
.operands
[2].reg
;
6517 if (inst
.operands
[3].present
)
6518 encode_arm_shift (3);
6521 /* ARM V6 PKHTB (Argument Parse). */
6526 if (!inst
.operands
[3].present
)
6528 /* If the shift specifier is omitted, turn the instruction
6529 into pkhbt rd, rm, rn. */
6530 inst
.instruction
&= 0xfff00010;
6531 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6532 inst
.instruction
|= inst
.operands
[1].reg
;
6533 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6537 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6538 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6539 inst
.instruction
|= inst
.operands
[2].reg
;
6540 encode_arm_shift (3);
6544 /* ARMv5TE: Preload-Cache
6548 Syntactically, like LDR with B=1, W=0, L=1. */
6553 constraint (!inst
.operands
[0].isreg
,
6554 _("'[' expected after PLD mnemonic"));
6555 constraint (inst
.operands
[0].postind
,
6556 _("post-indexed expression used in preload instruction"));
6557 constraint (inst
.operands
[0].writeback
,
6558 _("writeback used in preload instruction"));
6559 constraint (!inst
.operands
[0].preind
,
6560 _("unindexed addressing used in preload instruction"));
6561 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
6564 /* ARMv7: PLI <addr_mode> */
6568 constraint (!inst
.operands
[0].isreg
,
6569 _("'[' expected after PLI mnemonic"));
6570 constraint (inst
.operands
[0].postind
,
6571 _("post-indexed expression used in preload instruction"));
6572 constraint (inst
.operands
[0].writeback
,
6573 _("writeback used in preload instruction"));
6574 constraint (!inst
.operands
[0].preind
,
6575 _("unindexed addressing used in preload instruction"));
6576 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
6577 inst
.instruction
&= ~PRE_INDEX
;
6583 inst
.operands
[1] = inst
.operands
[0];
6584 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
6585 inst
.operands
[0].isreg
= 1;
6586 inst
.operands
[0].writeback
= 1;
6587 inst
.operands
[0].reg
= REG_SP
;
6591 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
6592 word at the specified address and the following word
6594 Unconditionally executed.
6595 Error if Rn is R15. */
6600 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6601 if (inst
.operands
[0].writeback
)
6602 inst
.instruction
|= WRITE_BACK
;
6605 /* ARM V6 ssat (argument parse). */
6610 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6611 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
6612 inst
.instruction
|= inst
.operands
[2].reg
;
6614 if (inst
.operands
[3].present
)
6615 encode_arm_shift (3);
6618 /* ARM V6 usat (argument parse). */
6623 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6624 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
6625 inst
.instruction
|= inst
.operands
[2].reg
;
6627 if (inst
.operands
[3].present
)
6628 encode_arm_shift (3);
6631 /* ARM V6 ssat16 (argument parse). */
6636 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6637 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
6638 inst
.instruction
|= inst
.operands
[2].reg
;
6644 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6645 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
6646 inst
.instruction
|= inst
.operands
[2].reg
;
6649 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
6650 preserving the other bits.
6652 setend <endian_specifier>, where <endian_specifier> is either
6658 if (inst
.operands
[0].imm
)
6659 inst
.instruction
|= 0x200;
6665 unsigned int Rm
= (inst
.operands
[1].present
6666 ? inst
.operands
[1].reg
6667 : inst
.operands
[0].reg
);
6669 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6670 inst
.instruction
|= Rm
;
6671 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
6673 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6674 inst
.instruction
|= SHIFT_BY_REG
;
6677 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6683 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
6684 inst
.reloc
.pc_rel
= 0;
6690 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
6691 inst
.reloc
.pc_rel
= 0;
6694 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
6695 SMLAxy{cond} Rd,Rm,Rs,Rn
6696 SMLAWy{cond} Rd,Rm,Rs,Rn
6697 Error if any register is R15. */
6702 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6703 inst
.instruction
|= inst
.operands
[1].reg
;
6704 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6705 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
6708 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
6709 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
6710 Error if any register is R15.
6711 Warning if Rdlo == Rdhi. */
6716 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6717 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6718 inst
.instruction
|= inst
.operands
[2].reg
;
6719 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
6721 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
6722 as_tsktsk (_("rdhi and rdlo must be different"));
6725 /* ARM V5E (El Segundo) signed-multiply (argument parse)
6726 SMULxy{cond} Rd,Rm,Rs
6727 Error if any register is R15. */
6732 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6733 inst
.instruction
|= inst
.operands
[1].reg
;
6734 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
6737 /* ARM V6 srs (argument parse). */
6742 inst
.instruction
|= inst
.operands
[0].imm
;
6743 if (inst
.operands
[0].writeback
)
6744 inst
.instruction
|= WRITE_BACK
;
6747 /* ARM V6 strex (argument parse). */
6752 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
6753 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
6754 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
6755 || inst
.operands
[2].negative
6756 /* See comment in do_ldrex(). */
6757 || (inst
.operands
[2].reg
== REG_PC
),
6760 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
6761 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
6763 constraint (inst
.reloc
.exp
.X_op
!= O_constant
6764 || inst
.reloc
.exp
.X_add_number
!= 0,
6765 _("offset must be zero in ARM encoding"));
6767 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6768 inst
.instruction
|= inst
.operands
[1].reg
;
6769 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6770 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
6776 constraint (inst
.operands
[1].reg
% 2 != 0,
6777 _("even register required"));
6778 constraint (inst
.operands
[2].present
6779 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
6780 _("can only store two consecutive registers"));
6781 /* If op 2 were present and equal to PC, this function wouldn't
6782 have been called in the first place. */
6783 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
6785 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
6786 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
6787 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
6790 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6791 inst
.instruction
|= inst
.operands
[1].reg
;
6792 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6795 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
6796 extends it to 32-bits, and adds the result to a value in another
6797 register. You can specify a rotation by 0, 8, 16, or 24 bits
6798 before extracting the 16-bit value.
6799 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
6800 Condition defaults to COND_ALWAYS.
6801 Error if any register uses R15. */
6806 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6807 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6808 inst
.instruction
|= inst
.operands
[2].reg
;
6809 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
6814 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
6815 Condition defaults to COND_ALWAYS.
6816 Error if any register uses R15. */
6821 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6822 inst
.instruction
|= inst
.operands
[1].reg
;
6823 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
6826 /* VFP instructions. In a logical order: SP variant first, monad
6827 before dyad, arithmetic then move then load/store. */
6830 do_vfp_sp_monadic (void)
6832 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6833 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
6837 do_vfp_sp_dyadic (void)
6839 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6840 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
6841 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
6845 do_vfp_sp_compare_z (void)
6847 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6851 do_vfp_dp_sp_cvt (void)
6853 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6854 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
6858 do_vfp_sp_dp_cvt (void)
6860 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6861 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
6865 do_vfp_reg_from_sp (void)
6867 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6868 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
6872 do_vfp_reg2_from_sp2 (void)
6874 constraint (inst
.operands
[2].imm
!= 2,
6875 _("only two consecutive VFP SP registers allowed here"));
6876 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6877 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6878 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
6882 do_vfp_sp_from_reg (void)
6884 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
6885 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6889 do_vfp_sp2_from_reg2 (void)
6891 constraint (inst
.operands
[0].imm
!= 2,
6892 _("only two consecutive VFP SP registers allowed here"));
6893 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
6894 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6895 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6899 do_vfp_sp_ldst (void)
6901 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
6902 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
6906 do_vfp_dp_ldst (void)
6908 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6909 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
6914 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
6916 if (inst
.operands
[0].writeback
)
6917 inst
.instruction
|= WRITE_BACK
;
6919 constraint (ldstm_type
!= VFP_LDSTMIA
,
6920 _("this addressing mode requires base-register writeback"));
6921 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6922 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
6923 inst
.instruction
|= inst
.operands
[1].imm
;
6927 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
6931 if (inst
.operands
[0].writeback
)
6932 inst
.instruction
|= WRITE_BACK
;
6934 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
6935 _("this addressing mode requires base-register writeback"));
6937 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6938 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
6940 count
= inst
.operands
[1].imm
<< 1;
6941 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
6944 inst
.instruction
|= count
;
6948 do_vfp_sp_ldstmia (void)
6950 vfp_sp_ldstm (VFP_LDSTMIA
);
6954 do_vfp_sp_ldstmdb (void)
6956 vfp_sp_ldstm (VFP_LDSTMDB
);
6960 do_vfp_dp_ldstmia (void)
6962 vfp_dp_ldstm (VFP_LDSTMIA
);
6966 do_vfp_dp_ldstmdb (void)
6968 vfp_dp_ldstm (VFP_LDSTMDB
);
6972 do_vfp_xp_ldstmia (void)
6974 vfp_dp_ldstm (VFP_LDSTMIAX
);
6978 do_vfp_xp_ldstmdb (void)
6980 vfp_dp_ldstm (VFP_LDSTMDBX
);
6984 do_vfp_dp_rd_rm (void)
6986 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
6987 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
6991 do_vfp_dp_rn_rd (void)
6993 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
6994 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
6998 do_vfp_dp_rd_rn (void)
7000 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7001 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7005 do_vfp_dp_rd_rn_rm (void)
7007 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7008 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7009 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
7015 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7019 do_vfp_dp_rm_rd_rn (void)
7021 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
7022 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7023 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
7026 /* VFPv3 instructions. */
7028 do_vfp_sp_const (void)
7030 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7031 inst
.instruction
|= (inst
.operands
[1].imm
& 15) << 16;
7032 inst
.instruction
|= (inst
.operands
[1].imm
>> 4);
7036 do_vfp_dp_const (void)
7038 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7039 inst
.instruction
|= (inst
.operands
[1].imm
& 15) << 16;
7040 inst
.instruction
|= (inst
.operands
[1].imm
>> 4);
7044 vfp_conv (int srcsize
)
7046 unsigned immbits
= srcsize
- inst
.operands
[1].imm
;
7047 inst
.instruction
|= (immbits
& 1) << 5;
7048 inst
.instruction
|= (immbits
>> 1);
7052 do_vfp_sp_conv_16 (void)
7054 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7059 do_vfp_dp_conv_16 (void)
7061 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7066 do_vfp_sp_conv_32 (void)
7068 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7073 do_vfp_dp_conv_32 (void)
7075 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7080 /* FPA instructions. Also in a logical order. */
7085 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7086 inst
.instruction
|= inst
.operands
[1].reg
;
7090 do_fpa_ldmstm (void)
7092 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7093 switch (inst
.operands
[1].imm
)
7095 case 1: inst
.instruction
|= CP_T_X
; break;
7096 case 2: inst
.instruction
|= CP_T_Y
; break;
7097 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
7102 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
7104 /* The instruction specified "ea" or "fd", so we can only accept
7105 [Rn]{!}. The instruction does not really support stacking or
7106 unstacking, so we have to emulate these by setting appropriate
7107 bits and offsets. */
7108 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7109 || inst
.reloc
.exp
.X_add_number
!= 0,
7110 _("this instruction does not support indexing"));
7112 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
7113 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
7115 if (!(inst
.instruction
& INDEX_UP
))
7116 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
7118 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
7120 inst
.operands
[2].preind
= 0;
7121 inst
.operands
[2].postind
= 1;
7125 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7128 /* iWMMXt instructions: strictly in alphabetical order. */
7131 do_iwmmxt_tandorc (void)
7133 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
7137 do_iwmmxt_textrc (void)
7139 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7140 inst
.instruction
|= inst
.operands
[1].imm
;
7144 do_iwmmxt_textrm (void)
7146 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7147 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7148 inst
.instruction
|= inst
.operands
[2].imm
;
7152 do_iwmmxt_tinsr (void)
7154 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7155 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7156 inst
.instruction
|= inst
.operands
[2].imm
;
7160 do_iwmmxt_tmia (void)
7162 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
7163 inst
.instruction
|= inst
.operands
[1].reg
;
7164 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7168 do_iwmmxt_waligni (void)
7170 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7171 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7172 inst
.instruction
|= inst
.operands
[2].reg
;
7173 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
7177 do_iwmmxt_wmov (void)
7179 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7180 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7181 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7182 inst
.instruction
|= inst
.operands
[1].reg
;
7186 do_iwmmxt_wldstbh (void)
7189 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7191 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
7193 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
7194 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
7198 do_iwmmxt_wldstw (void)
7200 /* RIWR_RIWC clears .isreg for a control register. */
7201 if (!inst
.operands
[0].isreg
)
7203 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
7204 inst
.instruction
|= 0xf0000000;
7207 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7208 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
7212 do_iwmmxt_wldstd (void)
7214 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7215 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
7219 do_iwmmxt_wshufh (void)
7221 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7222 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7223 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
7224 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
7228 do_iwmmxt_wzero (void)
7230 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7231 inst
.instruction
|= inst
.operands
[0].reg
;
7232 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7233 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7236 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
7237 operations first, then control, shift, and load/store. */
7239 /* Insns like "foo X,Y,Z". */
7242 do_mav_triple (void)
7244 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7245 inst
.instruction
|= inst
.operands
[1].reg
;
7246 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7249 /* Insns like "foo W,X,Y,Z".
7250 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
7255 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
7256 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7257 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7258 inst
.instruction
|= inst
.operands
[3].reg
;
7261 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
7265 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7268 /* Maverick shift immediate instructions.
7269 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
7270 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
7275 int imm
= inst
.operands
[2].imm
;
7277 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7278 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7280 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
7281 Bits 5-7 of the insn should have bits 4-6 of the immediate.
7282 Bit 4 should be 0. */
7283 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
7285 inst
.instruction
|= imm
;
7288 /* XScale instructions. Also sorted arithmetic before move. */
7290 /* Xscale multiply-accumulate (argument parse)
7293 MIAxycc acc0,Rm,Rs. */
7298 inst
.instruction
|= inst
.operands
[1].reg
;
7299 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7302 /* Xscale move-accumulator-register (argument parse)
7304 MARcc acc0,RdLo,RdHi. */
7309 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7310 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7313 /* Xscale move-register-accumulator (argument parse)
7315 MRAcc RdLo,RdHi,acc0. */
7320 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
7321 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7322 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7325 /* Encoding functions relevant only to Thumb. */
7327 /* inst.operands[i] is a shifted-register operand; encode
7328 it into inst.instruction in the format used by Thumb32. */
7331 encode_thumb32_shifted_operand (int i
)
7333 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
7334 unsigned int shift
= inst
.operands
[i
].shift_kind
;
7336 constraint (inst
.operands
[i
].immisreg
,
7337 _("shift by register not allowed in thumb mode"));
7338 inst
.instruction
|= inst
.operands
[i
].reg
;
7339 if (shift
== SHIFT_RRX
)
7340 inst
.instruction
|= SHIFT_ROR
<< 4;
7343 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
7344 _("expression too complex"));
7346 constraint (value
> 32
7347 || (value
== 32 && (shift
== SHIFT_LSL
7348 || shift
== SHIFT_ROR
)),
7349 _("shift expression is too large"));
7353 else if (value
== 32)
7356 inst
.instruction
|= shift
<< 4;
7357 inst
.instruction
|= (value
& 0x1c) << 10;
7358 inst
.instruction
|= (value
& 0x03) << 6;
7363 /* inst.operands[i] was set up by parse_address. Encode it into a
7364 Thumb32 format load or store instruction. Reject forms that cannot
7365 be used with such instructions. If is_t is true, reject forms that
7366 cannot be used with a T instruction; if is_d is true, reject forms
7367 that cannot be used with a D instruction. */
7370 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
7372 bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7374 constraint (!inst
.operands
[i
].isreg
,
7375 _("Instruction does not support =N addresses"));
7377 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7378 if (inst
.operands
[i
].immisreg
)
7380 constraint (is_pc
, _("cannot use register index with PC-relative addressing"));
7381 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
7382 constraint (inst
.operands
[i
].negative
,
7383 _("Thumb does not support negative register indexing"));
7384 constraint (inst
.operands
[i
].postind
,
7385 _("Thumb does not support register post-indexing"));
7386 constraint (inst
.operands
[i
].writeback
,
7387 _("Thumb does not support register indexing with writeback"));
7388 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
7389 _("Thumb supports only LSL in shifted register indexing"));
7391 inst
.instruction
|= inst
.operands
[i
].imm
;
7392 if (inst
.operands
[i
].shifted
)
7394 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
7395 _("expression too complex"));
7396 constraint (inst
.reloc
.exp
.X_add_number
< 0
7397 || inst
.reloc
.exp
.X_add_number
> 3,
7398 _("shift out of range"));
7399 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
7401 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7403 else if (inst
.operands
[i
].preind
)
7405 constraint (is_pc
&& inst
.operands
[i
].writeback
,
7406 _("cannot use writeback with PC-relative addressing"));
7407 constraint (is_t
&& inst
.operands
[i
].writeback
,
7408 _("cannot use writeback with this instruction"));
7412 inst
.instruction
|= 0x01000000;
7413 if (inst
.operands
[i
].writeback
)
7414 inst
.instruction
|= 0x00200000;
7418 inst
.instruction
|= 0x00000c00;
7419 if (inst
.operands
[i
].writeback
)
7420 inst
.instruction
|= 0x00000100;
7422 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
7424 else if (inst
.operands
[i
].postind
)
7426 assert (inst
.operands
[i
].writeback
);
7427 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
7428 constraint (is_t
, _("cannot use post-indexing with this instruction"));
7431 inst
.instruction
|= 0x00200000;
7433 inst
.instruction
|= 0x00000900;
7434 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
7436 else /* unindexed - only for coprocessor */
7437 inst
.error
= _("instruction does not accept unindexed addressing");
7440 /* Table of Thumb instructions which exist in both 16- and 32-bit
7441 encodings (the latter only in post-V6T2 cores). The index is the
7442 value used in the insns table below. When there is more than one
7443 possible 16-bit encoding for the instruction, this table always
7445 Also contains several pseudo-instructions used during relaxation. */
7446 #define T16_32_TAB \
7447 X(adc, 4140, eb400000), \
7448 X(adcs, 4140, eb500000), \
7449 X(add, 1c00, eb000000), \
7450 X(adds, 1c00, eb100000), \
7451 X(addi, 0000, f1000000), \
7452 X(addis, 0000, f1100000), \
7453 X(add_pc,000f, f20f0000), \
7454 X(add_sp,000d, f10d0000), \
7455 X(adr, 000f, f20f0000), \
7456 X(and, 4000, ea000000), \
7457 X(ands, 4000, ea100000), \
7458 X(asr, 1000, fa40f000), \
7459 X(asrs, 1000, fa50f000), \
7460 X(b, e000, f000b000), \
7461 X(bcond, d000, f0008000), \
7462 X(bic, 4380, ea200000), \
7463 X(bics, 4380, ea300000), \
7464 X(cmn, 42c0, eb100f00), \
7465 X(cmp, 2800, ebb00f00), \
7466 X(cpsie, b660, f3af8400), \
7467 X(cpsid, b670, f3af8600), \
7468 X(cpy, 4600, ea4f0000), \
7469 X(dec_sp,80dd, f1bd0d00), \
7470 X(eor, 4040, ea800000), \
7471 X(eors, 4040, ea900000), \
7472 X(inc_sp,00dd, f10d0d00), \
7473 X(ldmia, c800, e8900000), \
7474 X(ldr, 6800, f8500000), \
7475 X(ldrb, 7800, f8100000), \
7476 X(ldrh, 8800, f8300000), \
7477 X(ldrsb, 5600, f9100000), \
7478 X(ldrsh, 5e00, f9300000), \
7479 X(ldr_pc,4800, f85f0000), \
7480 X(ldr_pc2,4800, f85f0000), \
7481 X(ldr_sp,9800, f85d0000), \
7482 X(lsl, 0000, fa00f000), \
7483 X(lsls, 0000, fa10f000), \
7484 X(lsr, 0800, fa20f000), \
7485 X(lsrs, 0800, fa30f000), \
7486 X(mov, 2000, ea4f0000), \
7487 X(movs, 2000, ea5f0000), \
7488 X(mul, 4340, fb00f000), \
7489 X(muls, 4340, ffffffff), /* no 32b muls */ \
7490 X(mvn, 43c0, ea6f0000), \
7491 X(mvns, 43c0, ea7f0000), \
7492 X(neg, 4240, f1c00000), /* rsb #0 */ \
7493 X(negs, 4240, f1d00000), /* rsbs #0 */ \
7494 X(orr, 4300, ea400000), \
7495 X(orrs, 4300, ea500000), \
7496 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
7497 X(push, b400, e92d0000), /* stmdb sp!,... */ \
7498 X(rev, ba00, fa90f080), \
7499 X(rev16, ba40, fa90f090), \
7500 X(revsh, bac0, fa90f0b0), \
7501 X(ror, 41c0, fa60f000), \
7502 X(rors, 41c0, fa70f000), \
7503 X(sbc, 4180, eb600000), \
7504 X(sbcs, 4180, eb700000), \
7505 X(stmia, c000, e8800000), \
7506 X(str, 6000, f8400000), \
7507 X(strb, 7000, f8000000), \
7508 X(strh, 8000, f8200000), \
7509 X(str_sp,9000, f84d0000), \
7510 X(sub, 1e00, eba00000), \
7511 X(subs, 1e00, ebb00000), \
7512 X(subi, 8000, f1a00000), \
7513 X(subis, 8000, f1b00000), \
7514 X(sxtb, b240, fa4ff080), \
7515 X(sxth, b200, fa0ff080), \
7516 X(tst, 4200, ea100f00), \
7517 X(uxtb, b2c0, fa5ff080), \
7518 X(uxth, b280, fa1ff080), \
7519 X(nop, bf00, f3af8000), \
7520 X(yield, bf10, f3af8001), \
7521 X(wfe, bf20, f3af8002), \
7522 X(wfi, bf30, f3af8003), \
7523 X(sev, bf40, f3af9004), /* typo, 8004? */
7525 /* To catch errors in encoding functions, the codes are all offset by
7526 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
7527 as 16-bit instructions. */
7528 #define X(a,b,c) T_MNEM_##a
7529 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
7532 #define X(a,b,c) 0x##b
7533 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
7534 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
7537 #define X(a,b,c) 0x##c
7538 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
7539 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
7540 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
7544 /* Thumb instruction encoders, in alphabetical order. */
7548 do_t_add_sub_w (void)
7552 Rd
= inst
.operands
[0].reg
;
7553 Rn
= inst
.operands
[1].reg
;
7555 constraint (Rd
== 15, _("PC not allowed as destination"));
7556 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
7557 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
7560 /* Parse an add or subtract instruction. We get here with inst.instruction
7561 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
7568 Rd
= inst
.operands
[0].reg
;
7569 Rs
= (inst
.operands
[1].present
7570 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
7571 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
7579 flags
= (inst
.instruction
== T_MNEM_adds
7580 || inst
.instruction
== T_MNEM_subs
);
7582 narrow
= (current_it_mask
== 0);
7584 narrow
= (current_it_mask
!= 0);
7585 if (!inst
.operands
[2].isreg
)
7588 if (inst
.size_req
!= 4)
7592 add
= (inst
.instruction
== T_MNEM_add
7593 || inst
.instruction
== T_MNEM_adds
);
7594 /* Attempt to use a narrow opcode, with relaxation if
7596 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
7597 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
7598 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
7599 opcode
= T_MNEM_add_sp
;
7600 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
7601 opcode
= T_MNEM_add_pc
;
7602 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
7605 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
7607 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
7611 inst
.instruction
= THUMB_OP16(opcode
);
7612 inst
.instruction
|= (Rd
<< 4) | Rs
;
7613 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
7614 if (inst
.size_req
!= 2)
7615 inst
.relax
= opcode
;
7618 constraint (inst
.size_req
== 2, BAD_HIREG
);
7620 if (inst
.size_req
== 4
7621 || (inst
.size_req
!= 2 && !opcode
))
7623 /* ??? Convert large immediates to addw/subw. */
7624 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7625 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
7626 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7627 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7628 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
7633 Rn
= inst
.operands
[2].reg
;
7634 /* See if we can do this with a 16-bit instruction. */
7635 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
7637 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
7642 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
7643 || inst
.instruction
== T_MNEM_add
)
7646 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
7650 if (inst
.instruction
== T_MNEM_add
)
7654 inst
.instruction
= T_OPCODE_ADD_HI
;
7655 inst
.instruction
|= (Rd
& 8) << 4;
7656 inst
.instruction
|= (Rd
& 7);
7657 inst
.instruction
|= Rn
<< 3;
7660 /* ... because addition is commutative! */
7663 inst
.instruction
= T_OPCODE_ADD_HI
;
7664 inst
.instruction
|= (Rd
& 8) << 4;
7665 inst
.instruction
|= (Rd
& 7);
7666 inst
.instruction
|= Rs
<< 3;
7671 /* If we get here, it can't be done in 16 bits. */
7672 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
7673 _("shift must be constant"));
7674 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7675 inst
.instruction
|= Rd
<< 8;
7676 inst
.instruction
|= Rs
<< 16;
7677 encode_thumb32_shifted_operand (2);
7682 constraint (inst
.instruction
== T_MNEM_adds
7683 || inst
.instruction
== T_MNEM_subs
,
7686 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
7688 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
7689 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
7692 inst
.instruction
= (inst
.instruction
== T_MNEM_add
7694 inst
.instruction
|= (Rd
<< 4) | Rs
;
7695 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
7699 Rn
= inst
.operands
[2].reg
;
7700 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
7702 /* We now have Rd, Rs, and Rn set to registers. */
7703 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
7705 /* Can't do this for SUB. */
7706 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
7707 inst
.instruction
= T_OPCODE_ADD_HI
;
7708 inst
.instruction
|= (Rd
& 8) << 4;
7709 inst
.instruction
|= (Rd
& 7);
7711 inst
.instruction
|= Rn
<< 3;
7713 inst
.instruction
|= Rs
<< 3;
7715 constraint (1, _("dest must overlap one source register"));
7719 inst
.instruction
= (inst
.instruction
== T_MNEM_add
7720 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
7721 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
7729 if (unified_syntax
&& inst
.size_req
== 0 && inst
.operands
[0].reg
<= 7)
7731 /* Defer to section relaxation. */
7732 inst
.relax
= inst
.instruction
;
7733 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7734 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
7736 else if (unified_syntax
&& inst
.size_req
!= 2)
7738 /* Generate a 32-bit opcode. */
7739 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7740 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7741 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
7742 inst
.reloc
.pc_rel
= 1;
7746 /* Generate a 16-bit opcode. */
7747 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7748 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
7749 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
7750 inst
.reloc
.pc_rel
= 1;
7752 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
7756 /* Arithmetic instructions for which there is just one 16-bit
7757 instruction encoding, and it allows only two low registers.
7758 For maximal compatibility with ARM syntax, we allow three register
7759 operands even when Thumb-32 instructions are not available, as long
7760 as the first two are identical. For instance, both "sbc r0,r1" and
7761 "sbc r0,r0,r1" are allowed. */
7767 Rd
= inst
.operands
[0].reg
;
7768 Rs
= (inst
.operands
[1].present
7769 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
7770 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
7771 Rn
= inst
.operands
[2].reg
;
7775 if (!inst
.operands
[2].isreg
)
7777 /* For an immediate, we always generate a 32-bit opcode;
7778 section relaxation will shrink it later if possible. */
7779 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7780 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
7781 inst
.instruction
|= Rd
<< 8;
7782 inst
.instruction
|= Rs
<< 16;
7783 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
7789 /* See if we can do this with a 16-bit instruction. */
7790 if (THUMB_SETS_FLAGS (inst
.instruction
))
7791 narrow
= current_it_mask
== 0;
7793 narrow
= current_it_mask
!= 0;
7795 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
7797 if (inst
.operands
[2].shifted
)
7799 if (inst
.size_req
== 4)
7805 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7806 inst
.instruction
|= Rd
;
7807 inst
.instruction
|= Rn
<< 3;
7811 /* If we get here, it can't be done in 16 bits. */
7812 constraint (inst
.operands
[2].shifted
7813 && inst
.operands
[2].immisreg
,
7814 _("shift must be constant"));
7815 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7816 inst
.instruction
|= Rd
<< 8;
7817 inst
.instruction
|= Rs
<< 16;
7818 encode_thumb32_shifted_operand (2);
7823 /* On its face this is a lie - the instruction does set the
7824 flags. However, the only supported mnemonic in this mode
7826 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
7828 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
7829 _("unshifted register required"));
7830 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
7831 constraint (Rd
!= Rs
,
7832 _("dest and source1 must be the same register"));
7834 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7835 inst
.instruction
|= Rd
;
7836 inst
.instruction
|= Rn
<< 3;
7840 /* Similarly, but for instructions where the arithmetic operation is
7841 commutative, so we can allow either of them to be different from
7842 the destination operand in a 16-bit instruction. For instance, all
7843 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
7850 Rd
= inst
.operands
[0].reg
;
7851 Rs
= (inst
.operands
[1].present
7852 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
7853 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
7854 Rn
= inst
.operands
[2].reg
;
7858 if (!inst
.operands
[2].isreg
)
7860 /* For an immediate, we always generate a 32-bit opcode;
7861 section relaxation will shrink it later if possible. */
7862 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7863 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
7864 inst
.instruction
|= Rd
<< 8;
7865 inst
.instruction
|= Rs
<< 16;
7866 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
7872 /* See if we can do this with a 16-bit instruction. */
7873 if (THUMB_SETS_FLAGS (inst
.instruction
))
7874 narrow
= current_it_mask
== 0;
7876 narrow
= current_it_mask
!= 0;
7878 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
7880 if (inst
.operands
[2].shifted
)
7882 if (inst
.size_req
== 4)
7889 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7890 inst
.instruction
|= Rd
;
7891 inst
.instruction
|= Rn
<< 3;
7896 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7897 inst
.instruction
|= Rd
;
7898 inst
.instruction
|= Rs
<< 3;
7903 /* If we get here, it can't be done in 16 bits. */
7904 constraint (inst
.operands
[2].shifted
7905 && inst
.operands
[2].immisreg
,
7906 _("shift must be constant"));
7907 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
7908 inst
.instruction
|= Rd
<< 8;
7909 inst
.instruction
|= Rs
<< 16;
7910 encode_thumb32_shifted_operand (2);
7915 /* On its face this is a lie - the instruction does set the
7916 flags. However, the only supported mnemonic in this mode
7918 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
7920 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
7921 _("unshifted register required"));
7922 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
7924 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
7925 inst
.instruction
|= Rd
;
7928 inst
.instruction
|= Rn
<< 3;
7930 inst
.instruction
|= Rs
<< 3;
7932 constraint (1, _("dest must overlap one source register"));
7939 if (inst
.operands
[0].present
)
7941 constraint ((inst
.instruction
& 0xf0) != 0x40
7942 && inst
.operands
[0].imm
!= 0xf,
7943 "bad barrier type");
7944 inst
.instruction
|= inst
.operands
[0].imm
;
7947 inst
.instruction
|= 0xf;
7953 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
7954 constraint (msb
> 32, _("bit-field extends past end of register"));
7955 /* The instruction encoding stores the LSB and MSB,
7956 not the LSB and width. */
7957 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7958 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
7959 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
7960 inst
.instruction
|= msb
- 1;
7968 /* #0 in second position is alternative syntax for bfc, which is
7969 the same instruction but with REG_PC in the Rm field. */
7970 if (!inst
.operands
[1].isreg
)
7971 inst
.operands
[1].reg
= REG_PC
;
7973 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
7974 constraint (msb
> 32, _("bit-field extends past end of register"));
7975 /* The instruction encoding stores the LSB and MSB,
7976 not the LSB and width. */
7977 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7978 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7979 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
7980 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
7981 inst
.instruction
|= msb
- 1;
7987 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
7988 _("bit-field extends past end of register"));
7989 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7990 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7991 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
7992 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
7993 inst
.instruction
|= inst
.operands
[3].imm
- 1;
7996 /* ARM V5 Thumb BLX (argument parse)
7997 BLX <target_addr> which is BLX(1)
7998 BLX <Rm> which is BLX(2)
7999 Unfortunately, there are two different opcodes for this mnemonic.
8000 So, the insns[].value is not used, and the code here zaps values
8001 into inst.instruction.
8003 ??? How to take advantage of the additional two bits of displacement
8004 available in Thumb32 mode? Need new relocation? */
8009 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8010 if (inst
.operands
[0].isreg
)
8011 /* We have a register, so this is BLX(2). */
8012 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
8015 /* No register. This must be BLX(1). */
8016 inst
.instruction
= 0xf000e800;
8018 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8019 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
8022 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BLX
;
8023 inst
.reloc
.pc_rel
= 1;
8033 if (current_it_mask
)
8035 /* Conditional branches inside IT blocks are encoded as unconditional
8038 /* A branch must be the last instruction in an IT block. */
8039 constraint (current_it_mask
!= 0x10, BAD_BRANCH
);
8044 if (cond
!= COND_ALWAYS
)
8045 opcode
= T_MNEM_bcond
;
8047 opcode
= inst
.instruction
;
8049 if (unified_syntax
&& inst
.size_req
== 4)
8051 inst
.instruction
= THUMB_OP32(opcode
);
8052 if (cond
== COND_ALWAYS
)
8053 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
8056 assert (cond
!= 0xF);
8057 inst
.instruction
|= cond
<< 22;
8058 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
8063 inst
.instruction
= THUMB_OP16(opcode
);
8064 if (cond
== COND_ALWAYS
)
8065 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
8068 inst
.instruction
|= cond
<< 8;
8069 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
8071 /* Allow section relaxation. */
8072 if (unified_syntax
&& inst
.size_req
!= 2)
8073 inst
.relax
= opcode
;
8076 inst
.reloc
.pc_rel
= 1;
8082 constraint (inst
.cond
!= COND_ALWAYS
,
8083 _("instruction is always unconditional"));
8084 if (inst
.operands
[0].present
)
8086 constraint (inst
.operands
[0].imm
> 255,
8087 _("immediate value out of range"));
8088 inst
.instruction
|= inst
.operands
[0].imm
;
8093 do_t_branch23 (void)
8095 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8096 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
8097 inst
.reloc
.pc_rel
= 1;
8099 /* If the destination of the branch is a defined symbol which does not have
8100 the THUMB_FUNC attribute, then we must be calling a function which has
8101 the (interfacearm) attribute. We look for the Thumb entry point to that
8102 function and change the branch to refer to that function instead. */
8103 if ( inst
.reloc
.exp
.X_op
== O_symbol
8104 && inst
.reloc
.exp
.X_add_symbol
!= NULL
8105 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
8106 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
8107 inst
.reloc
.exp
.X_add_symbol
=
8108 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
8114 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8115 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
8116 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8117 should cause the alignment to be checked once it is known. This is
8118 because BX PC only works if the instruction is word aligned. */
8124 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8125 if (inst
.operands
[0].reg
== REG_PC
)
8126 as_tsktsk (_("use of r15 in bxj is not really useful"));
8128 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8134 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8135 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8136 inst
.instruction
|= inst
.operands
[1].reg
;
8142 constraint (current_it_mask
, BAD_NOT_IT
);
8143 inst
.instruction
|= inst
.operands
[0].imm
;
8149 constraint (current_it_mask
, BAD_NOT_IT
);
8151 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
8152 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
8154 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
8155 inst
.instruction
= 0xf3af8000;
8156 inst
.instruction
|= imod
<< 9;
8157 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
8158 if (inst
.operands
[1].present
)
8159 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
8163 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
8164 && (inst
.operands
[0].imm
& 4),
8165 _("selected processor does not support 'A' form "
8166 "of this instruction"));
8167 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
8168 _("Thumb does not support the 2-argument "
8169 "form of this instruction"));
8170 inst
.instruction
|= inst
.operands
[0].imm
;
8174 /* THUMB CPY instruction (argument parse). */
8179 if (inst
.size_req
== 4)
8181 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
8182 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8183 inst
.instruction
|= inst
.operands
[1].reg
;
8187 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
8188 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
8189 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8196 constraint (current_it_mask
, BAD_NOT_IT
);
8197 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
8198 inst
.instruction
|= inst
.operands
[0].reg
;
8199 inst
.reloc
.pc_rel
= 1;
8200 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
8206 inst
.instruction
|= inst
.operands
[0].imm
;
8212 if (!inst
.operands
[1].present
)
8213 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8214 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8215 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8216 inst
.instruction
|= inst
.operands
[2].reg
;
8222 if (unified_syntax
&& inst
.size_req
== 4)
8223 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8225 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8231 unsigned int cond
= inst
.operands
[0].imm
;
8233 constraint (current_it_mask
, BAD_NOT_IT
);
8234 current_it_mask
= (inst
.instruction
& 0xf) | 0x10;
8237 /* If the condition is a negative condition, invert the mask. */
8238 if ((cond
& 0x1) == 0x0)
8240 unsigned int mask
= inst
.instruction
& 0x000f;
8242 if ((mask
& 0x7) == 0)
8243 /* no conversion needed */;
8244 else if ((mask
& 0x3) == 0)
8246 else if ((mask
& 0x1) == 0)
8251 inst
.instruction
&= 0xfff0;
8252 inst
.instruction
|= mask
;
8255 inst
.instruction
|= cond
<< 4;
8261 /* This really doesn't seem worth it. */
8262 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
8263 _("expression too complex"));
8264 constraint (inst
.operands
[1].writeback
,
8265 _("Thumb load/store multiple does not support {reglist}^"));
8269 /* See if we can use a 16-bit instruction. */
8270 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
8271 && inst
.size_req
!= 4
8272 && inst
.operands
[0].reg
<= 7
8273 && !(inst
.operands
[1].imm
& ~0xff)
8274 && (inst
.instruction
== T_MNEM_stmia
8275 ? inst
.operands
[0].writeback
8276 : (inst
.operands
[0].writeback
8277 == !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))))
8279 if (inst
.instruction
== T_MNEM_stmia
8280 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
8281 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
8282 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8283 inst
.operands
[0].reg
);
8285 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8286 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8287 inst
.instruction
|= inst
.operands
[1].imm
;
8291 if (inst
.operands
[1].imm
& (1 << 13))
8292 as_warn (_("SP should not be in register list"));
8293 if (inst
.instruction
== T_MNEM_stmia
)
8295 if (inst
.operands
[1].imm
& (1 << 15))
8296 as_warn (_("PC should not be in register list"));
8297 if (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
8298 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8299 inst
.operands
[0].reg
);
8303 if (inst
.operands
[1].imm
& (1 << 14)
8304 && inst
.operands
[1].imm
& (1 << 15))
8305 as_warn (_("LR and PC should not both be in register list"));
8306 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
8307 && inst
.operands
[0].writeback
)
8308 as_warn (_("base register should not be in register list "
8309 "when written back"));
8311 if (inst
.instruction
< 0xffff)
8312 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8313 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8314 inst
.instruction
|= inst
.operands
[1].imm
;
8315 if (inst
.operands
[0].writeback
)
8316 inst
.instruction
|= WRITE_BACK
;
8321 constraint (inst
.operands
[0].reg
> 7
8322 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
8323 if (inst
.instruction
== T_MNEM_stmia
)
8325 if (!inst
.operands
[0].writeback
)
8326 as_warn (_("this instruction will write back the base register"));
8327 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
8328 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
8329 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8330 inst
.operands
[0].reg
);
8334 if (!inst
.operands
[0].writeback
8335 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
8336 as_warn (_("this instruction will write back the base register"));
8337 else if (inst
.operands
[0].writeback
8338 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
8339 as_warn (_("this instruction will not write back the base register"));
8342 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8343 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8344 inst
.instruction
|= inst
.operands
[1].imm
;
8351 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
8352 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
8353 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
8354 || inst
.operands
[1].negative
,
8357 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8358 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8359 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
8365 if (!inst
.operands
[1].present
)
8367 constraint (inst
.operands
[0].reg
== REG_LR
,
8368 _("r14 not allowed as first register "
8369 "when second register is omitted"));
8370 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8372 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
8375 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8376 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8377 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8383 unsigned long opcode
;
8386 opcode
= inst
.instruction
;
8389 if (!inst
.operands
[1].isreg
)
8391 if (opcode
<= 0xffff)
8392 inst
.instruction
= THUMB_OP32 (opcode
);
8393 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
8396 if (inst
.operands
[1].isreg
8397 && !inst
.operands
[1].writeback
8398 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
8399 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
8401 && inst
.size_req
!= 4)
8403 /* Insn may have a 16-bit form. */
8404 Rn
= inst
.operands
[1].reg
;
8405 if (inst
.operands
[1].immisreg
)
8407 inst
.instruction
= THUMB_OP16 (opcode
);
8409 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
8412 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
8413 && opcode
!= T_MNEM_ldrsb
)
8414 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
8415 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
8422 if (inst
.reloc
.pc_rel
)
8423 opcode
= T_MNEM_ldr_pc2
;
8425 opcode
= T_MNEM_ldr_pc
;
8429 if (opcode
== T_MNEM_ldr
)
8430 opcode
= T_MNEM_ldr_sp
;
8432 opcode
= T_MNEM_str_sp
;
8434 inst
.instruction
= inst
.operands
[0].reg
<< 8;
8438 inst
.instruction
= inst
.operands
[0].reg
;
8439 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8441 inst
.instruction
|= THUMB_OP16 (opcode
);
8442 if (inst
.size_req
== 2)
8443 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
8445 inst
.relax
= opcode
;
8449 /* Definitely a 32-bit variant. */
8450 inst
.instruction
= THUMB_OP32 (opcode
);
8451 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8452 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
8456 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
8458 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
8460 /* Only [Rn,Rm] is acceptable. */
8461 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
8462 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
8463 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
8464 || inst
.operands
[1].negative
,
8465 _("Thumb does not support this addressing mode"));
8466 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8470 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8471 if (!inst
.operands
[1].isreg
)
8472 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
8475 constraint (!inst
.operands
[1].preind
8476 || inst
.operands
[1].shifted
8477 || inst
.operands
[1].writeback
,
8478 _("Thumb does not support this addressing mode"));
8479 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
8481 constraint (inst
.instruction
& 0x0600,
8482 _("byte or halfword not valid for base register"));
8483 constraint (inst
.operands
[1].reg
== REG_PC
8484 && !(inst
.instruction
& THUMB_LOAD_BIT
),
8485 _("r15 based store not allowed"));
8486 constraint (inst
.operands
[1].immisreg
,
8487 _("invalid base register for register offset"));
8489 if (inst
.operands
[1].reg
== REG_PC
)
8490 inst
.instruction
= T_OPCODE_LDR_PC
;
8491 else if (inst
.instruction
& THUMB_LOAD_BIT
)
8492 inst
.instruction
= T_OPCODE_LDR_SP
;
8494 inst
.instruction
= T_OPCODE_STR_SP
;
8496 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8497 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
8501 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
8502 if (!inst
.operands
[1].immisreg
)
8504 /* Immediate offset. */
8505 inst
.instruction
|= inst
.operands
[0].reg
;
8506 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8507 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
8511 /* Register offset. */
8512 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
8513 constraint (inst
.operands
[1].negative
,
8514 _("Thumb does not support this addressing mode"));
8517 switch (inst
.instruction
)
8519 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
8520 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
8521 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
8522 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
8523 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
8524 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
8525 case 0x5600 /* ldrsb */:
8526 case 0x5e00 /* ldrsh */: break;
8530 inst
.instruction
|= inst
.operands
[0].reg
;
8531 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8532 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
8538 if (!inst
.operands
[1].present
)
8540 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8541 constraint (inst
.operands
[0].reg
== REG_LR
,
8542 _("r14 not allowed here"));
8544 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8545 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8546 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
8553 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8554 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
8560 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8561 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8562 inst
.instruction
|= inst
.operands
[2].reg
;
8563 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
8569 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8570 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8571 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8572 inst
.instruction
|= inst
.operands
[3].reg
;
8580 int r0off
= (inst
.instruction
== T_MNEM_mov
8581 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
8582 unsigned long opcode
;
8584 bfd_boolean low_regs
;
8586 low_regs
= (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7);
8587 opcode
= inst
.instruction
;
8588 if (current_it_mask
)
8589 narrow
= opcode
!= T_MNEM_movs
;
8591 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
8592 if (inst
.size_req
== 4
8593 || inst
.operands
[1].shifted
)
8596 if (!inst
.operands
[1].isreg
)
8598 /* Immediate operand. */
8599 if (current_it_mask
== 0 && opcode
== T_MNEM_mov
)
8601 if (low_regs
&& narrow
)
8603 inst
.instruction
= THUMB_OP16 (opcode
);
8604 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8605 if (inst
.size_req
== 2)
8606 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
8608 inst
.relax
= opcode
;
8612 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8613 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8614 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8615 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8620 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8621 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8622 encode_thumb32_shifted_operand (1);
8625 switch (inst
.instruction
)
8628 inst
.instruction
= T_OPCODE_MOV_HR
;
8629 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
8630 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
8631 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8635 /* We know we have low registers at this point.
8636 Generate ADD Rd, Rs, #0. */
8637 inst
.instruction
= T_OPCODE_ADD_I3
;
8638 inst
.instruction
|= inst
.operands
[0].reg
;
8639 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8645 inst
.instruction
= T_OPCODE_CMP_LR
;
8646 inst
.instruction
|= inst
.operands
[0].reg
;
8647 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8651 inst
.instruction
= T_OPCODE_CMP_HR
;
8652 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
8653 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
8654 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8661 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8662 if (inst
.operands
[1].isreg
)
8664 if (inst
.operands
[0].reg
< 8 && inst
.operands
[1].reg
< 8)
8666 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
8667 since a MOV instruction produces unpredictable results. */
8668 if (inst
.instruction
== T_OPCODE_MOV_I8
)
8669 inst
.instruction
= T_OPCODE_ADD_I3
;
8671 inst
.instruction
= T_OPCODE_CMP_LR
;
8673 inst
.instruction
|= inst
.operands
[0].reg
;
8674 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8678 if (inst
.instruction
== T_OPCODE_MOV_I8
)
8679 inst
.instruction
= T_OPCODE_MOV_HR
;
8681 inst
.instruction
= T_OPCODE_CMP_HR
;
8687 constraint (inst
.operands
[0].reg
> 7,
8688 _("only lo regs allowed with immediate"));
8689 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8690 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
8697 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8698 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf000) << 4;
8699 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0800) << 15;
8700 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0700) << 4;
8701 inst
.instruction
|= (inst
.operands
[1].imm
& 0x00ff);
8709 int r0off
= (inst
.instruction
== T_MNEM_mvn
8710 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
8713 if (inst
.size_req
== 4
8714 || inst
.instruction
> 0xffff
8715 || inst
.operands
[1].shifted
8716 || inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
8718 else if (inst
.instruction
== T_MNEM_cmn
)
8720 else if (THUMB_SETS_FLAGS (inst
.instruction
))
8721 narrow
= (current_it_mask
== 0);
8723 narrow
= (current_it_mask
!= 0);
8725 if (!inst
.operands
[1].isreg
)
8727 /* For an immediate, we always generate a 32-bit opcode;
8728 section relaxation will shrink it later if possible. */
8729 if (inst
.instruction
< 0xffff)
8730 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8731 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8732 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8733 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8737 /* See if we can do this with a 16-bit instruction. */
8740 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8741 inst
.instruction
|= inst
.operands
[0].reg
;
8742 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8746 constraint (inst
.operands
[1].shifted
8747 && inst
.operands
[1].immisreg
,
8748 _("shift must be constant"));
8749 if (inst
.instruction
< 0xffff)
8750 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8751 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
8752 encode_thumb32_shifted_operand (1);
8758 constraint (inst
.instruction
> 0xffff
8759 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
8760 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
8761 _("unshifted register required"));
8762 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
8765 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8766 inst
.instruction
|= inst
.operands
[0].reg
;
8767 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8775 flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
8778 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7m
),
8779 _("selected processor does not support "
8780 "requested special purpose register"));
8784 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
8785 _("selected processor does not support "
8786 "requested special purpose register %x"));
8787 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8788 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
8789 _("'CPSR' or 'SPSR' expected"));
8792 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8793 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
8794 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
8802 constraint (!inst
.operands
[1].isreg
,
8803 _("Thumb encoding does not support an immediate here"));
8804 flags
= inst
.operands
[0].imm
;
8807 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
8808 _("selected processor does not support "
8809 "requested special purpose register"));
8813 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7m
),
8814 _("selected processor does not support "
8815 "requested special purpose register"));
8818 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
8819 inst
.instruction
|= (flags
& ~SPSR_BIT
) >> 8;
8820 inst
.instruction
|= (flags
& 0xff);
8821 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8827 if (!inst
.operands
[2].present
)
8828 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
8830 /* There is no 32-bit MULS and no 16-bit MUL. */
8831 if (unified_syntax
&& inst
.instruction
== T_MNEM_mul
)
8833 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8834 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8835 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8836 inst
.instruction
|= inst
.operands
[2].reg
<< 0;
8840 constraint (!unified_syntax
8841 && inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
8842 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
8845 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8846 inst
.instruction
|= inst
.operands
[0].reg
;
8848 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
8849 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
8850 else if (inst
.operands
[0].reg
== inst
.operands
[2].reg
)
8851 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8853 constraint (1, _("dest must overlap one source register"));
8860 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8861 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
8862 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8863 inst
.instruction
|= inst
.operands
[3].reg
;
8865 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
8866 as_tsktsk (_("rdhi and rdlo must be different"));
8874 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
8876 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8877 inst
.instruction
|= inst
.operands
[0].imm
;
8881 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8882 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
8887 constraint (inst
.operands
[0].present
,
8888 _("Thumb does not support NOP with hints"));
8889 inst
.instruction
= 0x46c0;
8900 if (THUMB_SETS_FLAGS (inst
.instruction
))
8901 narrow
= (current_it_mask
== 0);
8903 narrow
= (current_it_mask
!= 0);
8904 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
8906 if (inst
.size_req
== 4)
8911 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8912 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8913 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8917 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8918 inst
.instruction
|= inst
.operands
[0].reg
;
8919 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8924 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
8926 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
8928 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8929 inst
.instruction
|= inst
.operands
[0].reg
;
8930 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8937 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8938 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8939 inst
.instruction
|= inst
.operands
[2].reg
;
8940 if (inst
.operands
[3].present
)
8942 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
8943 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8944 _("expression too complex"));
8945 inst
.instruction
|= (val
& 0x1c) << 10;
8946 inst
.instruction
|= (val
& 0x03) << 6;
8953 if (!inst
.operands
[3].present
)
8954 inst
.instruction
&= ~0x00000020;
8961 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
8965 do_t_push_pop (void)
8969 constraint (inst
.operands
[0].writeback
,
8970 _("push/pop do not support {reglist}^"));
8971 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
8972 _("expression too complex"));
8974 mask
= inst
.operands
[0].imm
;
8975 if ((mask
& ~0xff) == 0)
8976 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8977 else if ((inst
.instruction
== T_MNEM_push
8978 && (mask
& ~0xff) == 1 << REG_LR
)
8979 || (inst
.instruction
== T_MNEM_pop
8980 && (mask
& ~0xff) == 1 << REG_PC
))
8982 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8983 inst
.instruction
|= THUMB_PP_PC_LR
;
8986 else if (unified_syntax
)
8988 if (mask
& (1 << 13))
8989 inst
.error
= _("SP not allowed in register list");
8990 if (inst
.instruction
== T_MNEM_push
)
8992 if (mask
& (1 << 15))
8993 inst
.error
= _("PC not allowed in register list");
8997 if (mask
& (1 << 14)
8998 && mask
& (1 << 15))
8999 inst
.error
= _("LR and PC should not both be in register list");
9001 if ((mask
& (mask
- 1)) == 0)
9003 /* Single register push/pop implemented as str/ldr. */
9004 if (inst
.instruction
== T_MNEM_push
)
9005 inst
.instruction
= 0xf84d0d04; /* str reg, [sp, #-4]! */
9007 inst
.instruction
= 0xf85d0b04; /* ldr reg, [sp], #4 */
9008 mask
= ffs(mask
) - 1;
9012 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9016 inst
.error
= _("invalid register list to push/pop instruction");
9020 inst
.instruction
|= mask
;
9026 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9027 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9033 if (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
9034 && inst
.size_req
!= 4)
9036 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9037 inst
.instruction
|= inst
.operands
[0].reg
;
9038 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9040 else if (unified_syntax
)
9042 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9043 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9044 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9045 inst
.instruction
|= inst
.operands
[1].reg
;
9048 inst
.error
= BAD_HIREG
;
9056 Rd
= inst
.operands
[0].reg
;
9057 Rs
= (inst
.operands
[1].present
9058 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9059 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9061 inst
.instruction
|= Rd
<< 8;
9062 inst
.instruction
|= Rs
<< 16;
9063 if (!inst
.operands
[2].isreg
)
9065 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9066 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9069 encode_thumb32_shifted_operand (2);
9075 constraint (current_it_mask
, BAD_NOT_IT
);
9076 if (inst
.operands
[0].imm
)
9077 inst
.instruction
|= 0x8;
9083 if (!inst
.operands
[1].present
)
9084 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
9091 switch (inst
.instruction
)
9094 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
9096 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
9098 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
9100 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
9104 if (THUMB_SETS_FLAGS (inst
.instruction
))
9105 narrow
= (current_it_mask
== 0);
9107 narrow
= (current_it_mask
!= 0);
9108 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
9110 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
9112 if (inst
.operands
[2].isreg
9113 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
9114 || inst
.operands
[2].reg
> 7))
9116 if (inst
.size_req
== 4)
9121 if (inst
.operands
[2].isreg
)
9123 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9124 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9125 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9126 inst
.instruction
|= inst
.operands
[2].reg
;
9130 inst
.operands
[1].shifted
= 1;
9131 inst
.operands
[1].shift_kind
= shift_kind
;
9132 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
9133 ? T_MNEM_movs
: T_MNEM_mov
);
9134 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9135 encode_thumb32_shifted_operand (1);
9136 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9137 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9142 if (inst
.operands
[2].isreg
)
9146 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
9147 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
9148 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
9149 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
9153 inst
.instruction
|= inst
.operands
[0].reg
;
9154 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
9160 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
9161 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
9162 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
9165 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
9166 inst
.instruction
|= inst
.operands
[0].reg
;
9167 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9173 constraint (inst
.operands
[0].reg
> 7
9174 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
9175 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9177 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
9179 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
9180 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
9181 _("source1 and dest must be same register"));
9183 switch (inst
.instruction
)
9185 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
9186 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
9187 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
9188 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
9192 inst
.instruction
|= inst
.operands
[0].reg
;
9193 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
9197 switch (inst
.instruction
)
9199 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
9200 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
9201 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
9202 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
9205 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
9206 inst
.instruction
|= inst
.operands
[0].reg
;
9207 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9215 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9216 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9217 inst
.instruction
|= inst
.operands
[2].reg
;
9223 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
9224 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9225 _("expression too complex"));
9226 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9227 inst
.instruction
|= (value
& 0xf000) >> 12;
9228 inst
.instruction
|= (value
& 0x0ff0);
9229 inst
.instruction
|= (value
& 0x000f) << 16;
9235 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9236 inst
.instruction
|= inst
.operands
[1].imm
- 1;
9237 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9239 if (inst
.operands
[3].present
)
9241 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9242 _("expression too complex"));
9244 if (inst
.reloc
.exp
.X_add_number
!= 0)
9246 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
9247 inst
.instruction
|= 0x00200000; /* sh bit */
9248 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
9249 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
9251 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9258 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9259 inst
.instruction
|= inst
.operands
[1].imm
- 1;
9260 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9266 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9267 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9268 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9269 || inst
.operands
[2].negative
,
9272 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9273 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9274 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9275 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
9281 if (!inst
.operands
[2].present
)
9282 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
9284 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9285 || inst
.operands
[0].reg
== inst
.operands
[2].reg
9286 || inst
.operands
[0].reg
== inst
.operands
[3].reg
9287 || inst
.operands
[1].reg
== inst
.operands
[2].reg
,
9290 inst
.instruction
|= inst
.operands
[0].reg
;
9291 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9292 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9293 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9299 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9300 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9301 inst
.instruction
|= inst
.operands
[2].reg
;
9302 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
9308 if (inst
.instruction
<= 0xffff && inst
.size_req
!= 4
9309 && inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
9310 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
9312 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9313 inst
.instruction
|= inst
.operands
[0].reg
;
9314 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9316 else if (unified_syntax
)
9318 if (inst
.instruction
<= 0xffff)
9319 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9320 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9321 inst
.instruction
|= inst
.operands
[1].reg
;
9322 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
9326 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
9327 _("Thumb encoding does not support rotation"));
9328 constraint (1, BAD_HIREG
);
9335 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
9343 half
= (inst
.instruction
& 0x10) != 0;
9344 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
9345 constraint (inst
.operands
[0].immisreg
,
9346 _("instruction requires register index"));
9347 constraint (inst
.operands
[0].imm
== 15,
9348 _("PC is not a valid index register"));
9349 constraint (!half
&& inst
.operands
[0].shifted
,
9350 _("instruction does not allow shifted index"));
9351 inst
.instruction
|= (inst
.operands
[0].reg
<< 16) | inst
.operands
[0].imm
;
9357 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9358 inst
.instruction
|= inst
.operands
[1].imm
;
9359 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9361 if (inst
.operands
[3].present
)
9363 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9364 _("expression too complex"));
9365 if (inst
.reloc
.exp
.X_add_number
!= 0)
9367 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
9368 inst
.instruction
|= 0x00200000; /* sh bit */
9370 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
9371 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
9373 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9380 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9381 inst
.instruction
|= inst
.operands
[1].imm
;
9382 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9385 /* Neon instruction encoder helpers. */
9387 /* Encodings for the different types for various Neon opcodes. */
9389 /* An "invalid" code for the following tables. */
9392 struct neon_tab_entry
9395 unsigned float_or_poly
;
9396 unsigned scalar_or_imm
;
9399 /* Map overloaded Neon opcodes to their respective encodings. */
9400 #define NEON_ENC_TAB \
9401 X(vabd, 0x0000700, 0x1200d00, N_INV), \
9402 X(vmax, 0x0000600, 0x0000f00, N_INV), \
9403 X(vmin, 0x0000610, 0x0200f00, N_INV), \
9404 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
9405 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
9406 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
9407 X(vadd, 0x0000800, 0x0000d00, N_INV), \
9408 X(vsub, 0x1000800, 0x0200d00, N_INV), \
9409 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
9410 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
9411 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
9412 /* Register variants of the following two instructions are encoded as
9413 vcge / vcgt with the operands reversed. */ \
9414 X(vclt, 0x0000310, 0x1000e00, 0x1b10200), \
9415 X(vcle, 0x0000300, 0x1200e00, 0x1b10180), \
9416 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
9417 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
9418 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
9419 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
9420 X(vmlal, 0x0800800, N_INV, 0x0800240), \
9421 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
9422 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
9423 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
9424 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
9425 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
9426 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
9427 X(vshl, 0x0000400, N_INV, 0x0800510), \
9428 X(vqshl, 0x0000410, N_INV, 0x0800710), \
9429 X(vand, 0x0000110, N_INV, 0x0800030), \
9430 X(vbic, 0x0100110, N_INV, 0x0800030), \
9431 X(veor, 0x1000110, N_INV, N_INV), \
9432 X(vorn, 0x0300110, N_INV, 0x0800010), \
9433 X(vorr, 0x0200110, N_INV, 0x0800010), \
9434 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
9435 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
9436 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
9437 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
9438 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
9439 X(vst1, 0x0000000, 0x0800000, N_INV), \
9440 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
9441 X(vst2, 0x0000100, 0x0800100, N_INV), \
9442 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
9443 X(vst3, 0x0000200, 0x0800200, N_INV), \
9444 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
9445 X(vst4, 0x0000300, 0x0800300, N_INV), \
9446 X(vmovn, 0x1b20200, N_INV, N_INV), \
9447 X(vtrn, 0x1b20080, N_INV, N_INV), \
9448 X(vqmovn, 0x1b20200, N_INV, N_INV), \
9449 X(vqmovun, 0x1b20240, N_INV, N_INV)
9453 #define X(OPC,I,F,S) N_MNEM_##OPC
9458 static const struct neon_tab_entry neon_enc_tab
[] =
9460 #define X(OPC,I,F,S) { (I), (F), (S) }
9465 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9466 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9467 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9468 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9469 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9470 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9471 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9472 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9473 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9475 /* Shapes for instruction operands. Some (e.g. NS_DDD_QQQ) represent multiple
9476 shapes which an instruction can accept. The following mnemonic characters
9477 are used in the tag names for this enumeration:
9479 D - Neon D<n> register
9480 Q - Neon Q<n> register
9484 L - D<n> register list
9525 /* Bit masks used in type checking given instructions.
9526 'N_EQK' means the type must be the same as (or based on in some way) the key
9527 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
9528 set, various other bits can be set as well in order to modify the meaning of
9529 the type constraint. */
9552 N_KEY
= 0x080000, /* key element (main type specifier). */
9553 N_EQK
= 0x100000, /* given operand has the same type & size as the key. */
9554 N_DBL
= 0x000001, /* if N_EQK, this operand is twice the size. */
9555 N_HLF
= 0x000002, /* if N_EQK, this operand is half the size. */
9556 N_SGN
= 0x000004, /* if N_EQK, this operand is forced to be signed. */
9557 N_UNS
= 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
9558 N_INT
= 0x000010, /* if N_EQK, this operand is forced to be integer. */
9559 N_FLT
= 0x000020, /* if N_EQK, this operand is forced to be float. */
9560 N_SIZ
= 0x000040, /* if N_EQK, this operand is forced to be size-only. */
9562 N_MAX_NONSPECIAL
= N_F32
9565 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
9567 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
9568 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
9569 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
9570 #define N_SUF_32 (N_SU_32 | N_F32)
9571 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
9572 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
9574 /* Pass this as the first type argument to neon_check_type to ignore types
9576 #define N_IGNORE_TYPE (N_KEY | N_EQK)
9578 /* Check the shape of a Neon instruction (sizes of registers). Returns the more
9579 specific shape when there are two alternatives. For non-polymorphic shapes,
9580 checking is done during operand parsing, so is not implemented here. */
9582 static enum neon_shape
9583 neon_check_shape (enum neon_shape req
)
9585 #define RR(X) (inst.operands[(X)].isreg)
9586 #define RD(X) (inst.operands[(X)].isreg && !inst.operands[(X)].isquad)
9587 #define RQ(X) (inst.operands[(X)].isreg && inst.operands[(X)].isquad)
9588 #define IM(X) (!inst.operands[(X)].isreg && !inst.operands[(X)].isscalar)
9589 #define SC(X) (!inst.operands[(X)].isreg && inst.operands[(X)].isscalar)
9591 /* Fix missing optional operands. FIXME: we don't know at this point how
9592 many arguments we should have, so this makes the assumption that we have
9593 > 1. This is true of all current Neon opcodes, I think, but may not be
9594 true in the future. */
9595 if (!inst
.operands
[1].present
)
9596 inst
.operands
[1] = inst
.operands
[0];
9602 if (RD(0) && RD(1) && RD(2))
9604 else if (RQ(0) && RQ(1) && RQ(2))
9607 first_error (_("expected <Qd>, <Qn>, <Qm> or <Dd>, <Dn>, <Dm> "
9614 if (RD(0) && RD(1) && IM(2))
9616 else if (RQ(0) && RQ(1) && IM(2))
9619 first_error (_("expected <Qd>, <Qn>, #<imm> or <Dd>, <Dn>, #<imm> "
9626 if (RD(0) && RD(1) && RD(2) && IM(3))
9628 if (RQ(0) && RQ(1) && RQ(2) && IM(3))
9631 first_error (_("expected <Qd>, <Qn>, <Qm>, #<imm> or "
9632 "<Dd>, <Dn>, <Dm>, #<imm> operands"));
9638 if (RD(0) && RD(1) && SC(2))
9640 else if (RQ(0) && RQ(1) && SC(2))
9643 first_error (_("expected <Qd>, <Qn>, <Dm[x]> or <Dd>, <Dn>, <Dm[x]> "
9652 else if (RQ(0) && RQ(1))
9655 first_error (_("expected <Qd>, <Qm> or <Dd>, <Dm> operands"));
9663 else if (RQ(0) && SC(1))
9666 first_error (_("expected <Qd>, <Dm[x]> or <Dd>, <Dm[x]> operands"));
9674 else if (RQ(0) && RR(1))
9677 first_error (_("expected <Qd>, <Rm> or <Dd>, <Rm> operands"));
9685 else if (RQ(0) && IM(1))
9688 first_error (_("expected <Qd>, #<imm> or <Dd>, #<imm> operands"));
9705 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
9708 /* Allow modification to be made to types which are constrained to be
9709 based on the key element, based on bits set alongside N_EQK. */
9710 if ((typebits
& N_EQK
) != 0)
9712 if ((typebits
& N_HLF
) != 0)
9714 else if ((typebits
& N_DBL
) != 0)
9716 if ((typebits
& N_SGN
) != 0)
9717 *g_type
= NT_signed
;
9718 else if ((typebits
& N_UNS
) != 0)
9719 *g_type
= NT_unsigned
;
9720 else if ((typebits
& N_INT
) != 0)
9721 *g_type
= NT_integer
;
9722 else if ((typebits
& N_FLT
) != 0)
9724 else if ((typebits
& N_SIZ
) != 0)
9725 *g_type
= NT_untyped
;
9729 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
9730 operand type, i.e. the single type specified in a Neon instruction when it
9731 is the only one given. */
9733 static struct neon_type_el
9734 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
9736 struct neon_type_el dest
= *key
;
9738 assert ((thisarg
& N_EQK
) != 0);
9740 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
9745 /* Convert Neon type and size into compact bitmask representation. */
9747 static enum neon_type_mask
9748 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
9756 case 16: return N_16
;
9757 case 32: return N_32
;
9758 case 64: return N_64
;
9766 case 8: return N_I8
;
9767 case 16: return N_I16
;
9768 case 32: return N_I32
;
9769 case 64: return N_I64
;
9782 case 8: return N_P8
;
9783 case 16: return N_P16
;
9791 case 8: return N_S8
;
9792 case 16: return N_S16
;
9793 case 32: return N_S32
;
9794 case 64: return N_S64
;
9802 case 8: return N_U8
;
9803 case 16: return N_U16
;
9804 case 32: return N_U32
;
9805 case 64: return N_U64
;
9816 /* Convert compact Neon bitmask type representation to a type and size. Only
9817 handles the case where a single bit is set in the mask. */
9820 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
9821 enum neon_type_mask mask
)
9823 if ((mask
& N_EQK
) != 0)
9826 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
9828 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_P16
)) != 0)
9830 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
9832 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
)) != 0)
9837 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
9839 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
9840 *type
= NT_unsigned
;
9841 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
9843 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
9845 else if ((mask
& (N_P8
| N_P16
)) != 0)
9847 else if ((mask
& N_F32
) != 0)
9855 /* Modify a bitmask of allowed types. This is only needed for type
9859 modify_types_allowed (unsigned allowed
, unsigned mods
)
9862 enum neon_el_type type
;
9868 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
9870 if (el_type_of_type_chk (&type
, &size
, allowed
& i
) == SUCCESS
)
9872 neon_modify_type_size (mods
, &type
, &size
);
9873 destmask
|= type_chk_of_el_type (type
, size
);
9880 /* Check type and return type classification.
9881 The manual states (paraphrase): If one datatype is given, it indicates the
9883 - the second operand, if there is one
9884 - the operand, if there is no second operand
9885 - the result, if there are no operands.
9886 This isn't quite good enough though, so we use a concept of a "key" datatype
9887 which is set on a per-instruction basis, which is the one which matters when
9888 only one data type is written.
9889 Note: this function has side-effects (e.g. filling in missing operands). All
9890 Neon instructions should call it before performing bit encoding.
9893 static struct neon_type_el
9894 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
9897 unsigned i
, pass
, key_el
= 0;
9898 unsigned types
[NEON_MAX_TYPE_ELS
];
9899 enum neon_el_type k_type
= NT_invtype
;
9900 unsigned k_size
= -1u;
9901 struct neon_type_el badtype
= {NT_invtype
, -1};
9902 unsigned key_allowed
= 0;
9904 /* Optional registers in Neon instructions are always (not) in operand 1.
9905 Fill in the missing operand here, if it was omitted. */
9906 if (els
> 1 && !inst
.operands
[1].present
)
9907 inst
.operands
[1] = inst
.operands
[0];
9909 /* Suck up all the varargs. */
9911 for (i
= 0; i
< els
; i
++)
9913 unsigned thisarg
= va_arg (ap
, unsigned);
9914 if (thisarg
== N_IGNORE_TYPE
)
9920 if ((thisarg
& N_KEY
) != 0)
9925 if (inst
.vectype
.elems
> 0)
9926 for (i
= 0; i
< els
; i
++)
9927 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
9929 first_error (_("types specified in both the mnemonic and operands"));
9933 /* Duplicate inst.vectype elements here as necessary.
9934 FIXME: No idea if this is exactly the same as the ARM assembler,
9935 particularly when an insn takes one register and one non-register
9937 if (inst
.vectype
.elems
== 1 && els
> 1)
9940 inst
.vectype
.elems
= els
;
9941 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
9942 for (j
= 0; j
< els
; j
++)
9944 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
9947 else if (inst
.vectype
.elems
== 0 && els
> 0)
9950 /* No types were given after the mnemonic, so look for types specified
9951 after each operand. We allow some flexibility here; as long as the
9952 "key" operand has a type, we can infer the others. */
9953 for (j
= 0; j
< els
; j
++)
9954 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
9955 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
9957 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
9959 for (j
= 0; j
< els
; j
++)
9960 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
9961 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
9966 first_error (_("operand types can't be inferred"));
9970 else if (inst
.vectype
.elems
!= els
)
9972 first_error (_("type specifier has the wrong number of parts"));
9976 for (pass
= 0; pass
< 2; pass
++)
9978 for (i
= 0; i
< els
; i
++)
9980 unsigned thisarg
= types
[i
];
9981 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
9982 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
9983 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
9984 unsigned g_size
= inst
.vectype
.el
[i
].size
;
9986 /* Decay more-specific signed & unsigned types to sign-insensitive
9987 integer types if sign-specific variants are unavailable. */
9988 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
9989 && (types_allowed
& N_SU_ALL
) == 0)
9990 g_type
= NT_integer
;
9992 /* If only untyped args are allowed, decay any more specific types to
9993 them. Some instructions only care about signs for some element
9994 sizes, so handle that properly. */
9995 if ((g_size
== 8 && (types_allowed
& N_8
) != 0)
9996 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
9997 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
9998 || (g_size
== 64 && (types_allowed
& N_64
) != 0))
9999 g_type
= NT_untyped
;
10003 if ((thisarg
& N_KEY
) != 0)
10007 key_allowed
= thisarg
& ~N_KEY
;
10012 if ((thisarg
& N_EQK
) == 0)
10014 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
10016 if ((given_type
& types_allowed
) == 0)
10018 first_error (_("bad type in Neon instruction"));
10024 enum neon_el_type mod_k_type
= k_type
;
10025 unsigned mod_k_size
= k_size
;
10026 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
10027 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
10029 first_error (_("inconsistent types in Neon instruction"));
10037 return inst
.vectype
.el
[key_el
];
10040 /* Fix up Neon data-processing instructions, ORing in the correct bits for
10041 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
10044 neon_dp_fixup (unsigned i
)
10048 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
10062 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
10066 neon_logbits (unsigned x
)
10068 return ffs (x
) - 4;
10071 #define LOW4(R) ((R) & 0xf)
10072 #define HI1(R) (((R) >> 4) & 1)
10074 /* Encode insns with bit pattern:
10076 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
10077 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
10079 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
10080 different meaning for some instruction. */
10083 neon_three_same (int isquad
, int ubit
, int size
)
10085 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10086 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10087 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
10088 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
10089 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
10090 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
10091 inst
.instruction
|= (isquad
!= 0) << 6;
10092 inst
.instruction
|= (ubit
!= 0) << 24;
10094 inst
.instruction
|= neon_logbits (size
) << 20;
10096 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10099 /* Encode instructions of the form:
10101 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
10102 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
10104 Don't write size if SIZE == -1. */
10107 neon_two_same (int qbit
, int ubit
, int size
)
10109 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10110 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10111 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10112 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10113 inst
.instruction
|= (qbit
!= 0) << 6;
10114 inst
.instruction
|= (ubit
!= 0) << 24;
10117 inst
.instruction
|= neon_logbits (size
) << 18;
10119 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10122 /* Neon instruction encoders, in approximate order of appearance. */
10125 do_neon_dyadic_i_su (void)
10127 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10128 struct neon_type_el et
= neon_check_type (3, rs
,
10129 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
10130 neon_three_same (rs
== NS_QQQ
, et
.type
== NT_unsigned
, et
.size
);
10134 do_neon_dyadic_i64_su (void)
10136 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10137 struct neon_type_el et
= neon_check_type (3, rs
,
10138 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
10139 neon_three_same (rs
== NS_QQQ
, et
.type
== NT_unsigned
, et
.size
);
10143 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
10146 unsigned size
= et
.size
>> 3;
10147 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10148 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10149 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10150 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10151 inst
.instruction
|= (isquad
!= 0) << 6;
10152 inst
.instruction
|= immbits
<< 16;
10153 inst
.instruction
|= (size
>> 3) << 7;
10154 inst
.instruction
|= (size
& 0x7) << 19;
10156 inst
.instruction
|= (uval
!= 0) << 24;
10158 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10162 do_neon_shl_imm (void)
10164 if (!inst
.operands
[2].isreg
)
10166 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10167 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
10168 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10169 neon_imm_shift (FALSE
, 0, rs
== NS_QQI
, et
, inst
.operands
[2].imm
);
10173 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10174 struct neon_type_el et
= neon_check_type (3, rs
,
10175 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
10176 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10177 neon_three_same (rs
== NS_QQQ
, et
.type
== NT_unsigned
, et
.size
);
10182 do_neon_qshl_imm (void)
10184 if (!inst
.operands
[2].isreg
)
10186 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10187 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
10188 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10189 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, rs
== NS_QQI
, et
,
10190 inst
.operands
[2].imm
);
10194 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10195 struct neon_type_el et
= neon_check_type (3, rs
,
10196 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
10197 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10198 neon_three_same (rs
== NS_QQQ
, et
.type
== NT_unsigned
, et
.size
);
10203 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
10205 /* Handle .I8 and .I64 as pseudo-instructions. */
10209 /* Unfortunately, this will make everything apart from zero out-of-range.
10210 FIXME is this the intended semantics? There doesn't seem much point in
10211 accepting .I8 if so. */
10212 immediate
|= immediate
<< 8;
10216 /* Similarly, anything other than zero will be replicated in bits [63:32],
10217 which probably isn't want we want if we specified .I64. */
10218 if (immediate
!= 0)
10219 goto bad_immediate
;
10225 if (immediate
== (immediate
& 0x000000ff))
10227 *immbits
= immediate
;
10228 return (size
== 16) ? 0x9 : 0x1;
10230 else if (immediate
== (immediate
& 0x0000ff00))
10232 *immbits
= immediate
>> 8;
10233 return (size
== 16) ? 0xb : 0x3;
10235 else if (immediate
== (immediate
& 0x00ff0000))
10237 *immbits
= immediate
>> 16;
10240 else if (immediate
== (immediate
& 0xff000000))
10242 *immbits
= immediate
>> 24;
10247 first_error (_("immediate value out of range"));
10251 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
10255 neon_bits_same_in_bytes (unsigned imm
)
10257 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
10258 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
10259 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
10260 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
10263 /* For immediate of above form, return 0bABCD. */
10266 neon_squash_bits (unsigned imm
)
10268 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
10269 | ((imm
& 0x01000000) >> 21);
10272 /* Compress quarter-float representation to 0b...000 abcdefgh. */
10275 neon_qfloat_bits (unsigned imm
)
10277 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
10280 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
10281 the instruction. *OP is passed as the initial value of the op field, and
10282 may be set to a different value depending on the constant (i.e.
10283 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
10287 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, unsigned *immbits
,
10288 int *op
, int size
, enum neon_el_type type
)
10290 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
10292 if (size
!= 32 || *op
== 1)
10294 *immbits
= neon_qfloat_bits (immlo
);
10297 else if (size
== 64 && neon_bits_same_in_bytes (immhi
)
10298 && neon_bits_same_in_bytes (immlo
))
10300 /* Check this one first so we don't have to bother with immhi in later
10304 *immbits
= (neon_squash_bits (immhi
) << 4) | neon_squash_bits (immlo
);
10308 else if (immhi
!= 0)
10310 else if (immlo
== (immlo
& 0x000000ff))
10312 /* 64-bit case was already handled. Don't allow MVN with 8-bit
10314 if ((size
!= 8 && size
!= 16 && size
!= 32)
10315 || (size
== 8 && *op
== 1))
10318 return (size
== 8) ? 0xe : (size
== 16) ? 0x8 : 0x0;
10320 else if (immlo
== (immlo
& 0x0000ff00))
10322 if (size
!= 16 && size
!= 32)
10324 *immbits
= immlo
>> 8;
10325 return (size
== 16) ? 0xa : 0x2;
10327 else if (immlo
== (immlo
& 0x00ff0000))
10331 *immbits
= immlo
>> 16;
10334 else if (immlo
== (immlo
& 0xff000000))
10338 *immbits
= immlo
>> 24;
10341 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
10345 *immbits
= (immlo
>> 8) & 0xff;
10348 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
10352 *immbits
= (immlo
>> 16) & 0xff;
10359 /* Write immediate bits [7:0] to the following locations:
10361 |28/24|23 19|18 16|15 4|3 0|
10362 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
10364 This function is used by VMOV/VMVN/VORR/VBIC. */
10367 neon_write_immbits (unsigned immbits
)
10369 inst
.instruction
|= immbits
& 0xf;
10370 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
10371 inst
.instruction
|= ((immbits
>> 7) & 0x1) << 24;
10374 /* Invert low-order SIZE bits of XHI:XLO. */
10377 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
10379 unsigned immlo
= xlo
? *xlo
: 0;
10380 unsigned immhi
= xhi
? *xhi
: 0;
10385 immlo
= (~immlo
) & 0xff;
10389 immlo
= (~immlo
) & 0xffff;
10393 immhi
= (~immhi
) & 0xffffffff;
10394 /* fall through. */
10397 immlo
= (~immlo
) & 0xffffffff;
10412 do_neon_logic (void)
10414 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
10416 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10417 neon_check_type (3, rs
, N_IGNORE_TYPE
);
10418 /* U bit and size field were set as part of the bitmask. */
10419 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10420 neon_three_same (rs
== NS_QQQ
, 0, -1);
10424 enum neon_shape rs
= neon_check_shape (NS_DI_QI
);
10425 struct neon_type_el et
= neon_check_type (1, rs
, N_I8
| N_I16
| N_I32
10427 enum neon_opc opcode
= inst
.instruction
& 0x0fffffff;
10431 if (et
.type
== NT_invtype
)
10434 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10439 cmode
= neon_cmode_for_logic_imm (inst
.operands
[1].imm
, &immbits
,
10444 cmode
= neon_cmode_for_logic_imm (inst
.operands
[1].imm
, &immbits
,
10449 /* Pseudo-instruction for VBIC. */
10450 immbits
= inst
.operands
[1].imm
;
10451 neon_invert_size (&immbits
, 0, et
.size
);
10452 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
10456 /* Pseudo-instruction for VORR. */
10457 immbits
= inst
.operands
[1].imm
;
10458 neon_invert_size (&immbits
, 0, et
.size
);
10459 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
10469 inst
.instruction
|= (rs
== NS_QI
) << 6;
10470 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10471 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10472 inst
.instruction
|= cmode
<< 8;
10473 neon_write_immbits (immbits
);
10475 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10480 do_neon_bitfield (void)
10482 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10483 neon_check_type (3, rs
, N_IGNORE_TYPE
);
10484 neon_three_same (rs
== NS_QQQ
, 0, -1);
10488 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
10491 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10492 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
10494 if (et
.type
== NT_float
)
10496 inst
.instruction
= NEON_ENC_FLOAT (inst
.instruction
);
10497 neon_three_same (rs
== NS_QQQ
, 0, -1);
10501 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10502 neon_three_same (rs
== NS_QQQ
, et
.type
== ubit_meaning
, et
.size
);
10507 do_neon_dyadic_if_su (void)
10509 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
10513 do_neon_dyadic_if_su_d (void)
10515 /* This version only allow D registers, but that constraint is enforced during
10516 operand parsing so we don't need to do anything extra here. */
10517 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
10521 do_neon_dyadic_if_i (void)
10523 neon_dyadic_misc (NT_unsigned
, N_IF_32
, 0);
10527 do_neon_dyadic_if_i_d (void)
10529 neon_dyadic_misc (NT_unsigned
, N_IF_32
, 0);
10533 do_neon_addsub_if_i (void)
10535 /* The "untyped" case can't happen. Do this to stop the "U" bit being
10536 affected if we specify unsigned args. */
10537 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
10540 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
10542 V<op> A,B (A is operand 0, B is operand 2)
10547 so handle that case specially. */
10550 neon_exchange_operands (void)
10552 void *scratch
= alloca (sizeof (inst
.operands
[0]));
10553 if (inst
.operands
[1].present
)
10555 /* Swap operands[1] and operands[2]. */
10556 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
10557 inst
.operands
[1] = inst
.operands
[2];
10558 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
10562 inst
.operands
[1] = inst
.operands
[2];
10563 inst
.operands
[2] = inst
.operands
[0];
10568 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
10570 if (inst
.operands
[2].isreg
)
10573 neon_exchange_operands ();
10574 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
10578 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10579 struct neon_type_el et
= neon_check_type (2, rs
,
10580 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
10582 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10583 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10584 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10585 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10586 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10587 inst
.instruction
|= (rs
== NS_QQI
) << 6;
10588 inst
.instruction
|= (et
.type
== NT_float
) << 10;
10589 inst
.instruction
|= neon_logbits (et
.size
) << 18;
10591 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10598 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
10602 do_neon_cmp_inv (void)
10604 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
10610 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
10613 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
10614 scalars, which are encoded in 5 bits, M : Rm.
10615 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
10616 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
10620 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
10622 unsigned regno
= NEON_SCALAR_REG (scalar
);
10623 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
10628 if (regno
> 7 || elno
> 3)
10630 return regno
| (elno
<< 3);
10633 if (regno
> 15 || elno
> 1)
10635 return regno
| (elno
<< 4);
10639 first_error (_("scalar out of range for multiply instruction"));
10645 /* Encode multiply / multiply-accumulate scalar instructions. */
10648 neon_mul_mac (struct neon_type_el et
, int ubit
)
10652 /* Give a more helpful error message if we have an invalid type. */
10653 if (et
.type
== NT_invtype
)
10656 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
10657 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10658 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10659 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
10660 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
10661 inst
.instruction
|= LOW4 (scalar
);
10662 inst
.instruction
|= HI1 (scalar
) << 5;
10663 inst
.instruction
|= (et
.type
== NT_float
) << 8;
10664 inst
.instruction
|= neon_logbits (et
.size
) << 20;
10665 inst
.instruction
|= (ubit
!= 0) << 24;
10667 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10671 do_neon_mac_maybe_scalar (void)
10673 if (inst
.operands
[2].isscalar
)
10675 enum neon_shape rs
= neon_check_shape (NS_DDS_QQS
);
10676 struct neon_type_el et
= neon_check_type (3, rs
,
10677 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
10678 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
10679 neon_mul_mac (et
, rs
== NS_QQS
);
10682 do_neon_dyadic_if_i ();
10688 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10689 struct neon_type_el et
= neon_check_type (3, rs
,
10690 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
10691 neon_three_same (rs
== NS_QQQ
, 0, et
.size
);
10694 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
10695 same types as the MAC equivalents. The polynomial type for this instruction
10696 is encoded the same as the integer type. */
10701 if (inst
.operands
[2].isscalar
)
10702 do_neon_mac_maybe_scalar ();
10704 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
10708 do_neon_qdmulh (void)
10710 if (inst
.operands
[2].isscalar
)
10712 enum neon_shape rs
= neon_check_shape (NS_DDS_QQS
);
10713 struct neon_type_el et
= neon_check_type (3, rs
,
10714 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
10715 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
10716 neon_mul_mac (et
, rs
== NS_QQS
);
10720 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10721 struct neon_type_el et
= neon_check_type (3, rs
,
10722 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
10723 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10724 /* The U bit (rounding) comes from bit mask. */
10725 neon_three_same (rs
== NS_QQQ
, 0, et
.size
);
10730 do_neon_fcmp_absolute (void)
10732 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10733 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
10734 /* Size field comes from bit mask. */
10735 neon_three_same (rs
== NS_QQQ
, 1, -1);
10739 do_neon_fcmp_absolute_inv (void)
10741 neon_exchange_operands ();
10742 do_neon_fcmp_absolute ();
10746 do_neon_step (void)
10748 enum neon_shape rs
= neon_check_shape (NS_DDD_QQQ
);
10749 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
10750 neon_three_same (rs
== NS_QQQ
, 0, -1);
10754 do_neon_abs_neg (void)
10756 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
10757 struct neon_type_el et
= neon_check_type (3, rs
,
10758 N_EQK
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
10759 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10760 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10761 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10762 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10763 inst
.instruction
|= (rs
== NS_QQ
) << 6;
10764 inst
.instruction
|= (et
.type
== NT_float
) << 10;
10765 inst
.instruction
|= neon_logbits (et
.size
) << 18;
10767 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10773 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10774 struct neon_type_el et
= neon_check_type (2, rs
,
10775 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
10776 int imm
= inst
.operands
[2].imm
;
10777 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
10778 _("immediate out of range for insert"));
10779 neon_imm_shift (FALSE
, 0, rs
== NS_QQI
, et
, imm
);
10785 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10786 struct neon_type_el et
= neon_check_type (2, rs
,
10787 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
10788 int imm
= inst
.operands
[2].imm
;
10789 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
10790 _("immediate out of range for insert"));
10791 neon_imm_shift (FALSE
, 0, rs
== NS_QQI
, et
, et
.size
- imm
);
10795 do_neon_qshlu_imm (void)
10797 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10798 struct neon_type_el et
= neon_check_type (2, rs
,
10799 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
10800 int imm
= inst
.operands
[2].imm
;
10801 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
10802 _("immediate out of range for shift"));
10803 /* Only encodes the 'U present' variant of the instruction.
10804 In this case, signed types have OP (bit 8) set to 0.
10805 Unsigned types have OP set to 1. */
10806 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
10807 /* The rest of the bits are the same as other immediate shifts. */
10808 neon_imm_shift (FALSE
, 0, rs
== NS_QQI
, et
, imm
);
10812 do_neon_qmovn (void)
10814 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
10815 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
10816 /* Saturating move where operands can be signed or unsigned, and the
10817 destination has the same signedness. */
10818 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10819 if (et
.type
== NT_unsigned
)
10820 inst
.instruction
|= 0xc0;
10822 inst
.instruction
|= 0x80;
10823 neon_two_same (0, 1, et
.size
/ 2);
10827 do_neon_qmovun (void)
10829 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
10830 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
10831 /* Saturating move with unsigned results. Operands must be signed. */
10832 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10833 neon_two_same (0, 1, et
.size
/ 2);
10837 do_neon_rshift_sat_narrow (void)
10839 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10840 or unsigned. If operands are unsigned, results must also be unsigned. */
10841 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
10842 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
10843 int imm
= inst
.operands
[2].imm
;
10844 /* This gets the bounds check, size encoding and immediate bits calculation
10848 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
10849 VQMOVN.I<size> <Dd>, <Qm>. */
10852 inst
.operands
[2].present
= 0;
10853 inst
.instruction
= N_MNEM_vqmovn
;
10858 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
10859 _("immediate out of range"));
10860 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
10864 do_neon_rshift_sat_narrow_u (void)
10866 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10867 or unsigned. If operands are unsigned, results must also be unsigned. */
10868 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
10869 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
10870 int imm
= inst
.operands
[2].imm
;
10871 /* This gets the bounds check, size encoding and immediate bits calculation
10875 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
10876 VQMOVUN.I<size> <Dd>, <Qm>. */
10879 inst
.operands
[2].present
= 0;
10880 inst
.instruction
= N_MNEM_vqmovun
;
10885 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
10886 _("immediate out of range"));
10887 /* FIXME: The manual is kind of unclear about what value U should have in
10888 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
10890 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
10894 do_neon_movn (void)
10896 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
10897 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
10898 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10899 neon_two_same (0, 1, et
.size
/ 2);
10903 do_neon_rshift_narrow (void)
10905 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
10906 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
10907 int imm
= inst
.operands
[2].imm
;
10908 /* This gets the bounds check, size encoding and immediate bits calculation
10912 /* If immediate is zero then we are a pseudo-instruction for
10913 VMOVN.I<size> <Dd>, <Qm> */
10916 inst
.operands
[2].present
= 0;
10917 inst
.instruction
= N_MNEM_vmovn
;
10922 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
10923 _("immediate out of range for narrowing operation"));
10924 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
10928 do_neon_shll (void)
10930 /* FIXME: Type checking when lengthening. */
10931 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
10932 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
10933 unsigned imm
= inst
.operands
[2].imm
;
10935 if (imm
== et
.size
)
10937 /* Maximum shift variant. */
10938 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
10939 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10940 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10941 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10942 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10943 inst
.instruction
|= neon_logbits (et
.size
) << 18;
10945 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
10949 /* A more-specific type check for non-max versions. */
10950 et
= neon_check_type (2, NS_QDI
,
10951 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
10952 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10953 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
10957 /* Check the various types for the VCVT instruction, and return the one that
10958 the current instruction is. */
10961 neon_cvt_flavour (enum neon_shape rs
)
10963 #define CVT_VAR(C,X,Y) \
10964 et = neon_check_type (2, rs, (X), (Y)); \
10965 if (et.type != NT_invtype) \
10967 inst.error = NULL; \
10970 struct neon_type_el et
;
10972 CVT_VAR (0, N_S32
, N_F32
);
10973 CVT_VAR (1, N_U32
, N_F32
);
10974 CVT_VAR (2, N_F32
, N_S32
);
10975 CVT_VAR (3, N_F32
, N_U32
);
10984 /* Fixed-point conversion with #0 immediate is encoded as an integer
10986 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0)
10988 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
10989 int flavour
= neon_cvt_flavour (rs
);
10990 unsigned immbits
= 32 - inst
.operands
[2].imm
;
10991 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
10992 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
10994 inst
.instruction
|= enctab
[flavour
];
10995 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
10996 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
10997 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
10998 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
10999 inst
.instruction
|= (rs
== NS_QQI
) << 6;
11000 inst
.instruction
|= 1 << 21;
11001 inst
.instruction
|= immbits
<< 16;
11005 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11006 int flavour
= neon_cvt_flavour (rs
);
11007 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
11008 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11010 inst
.instruction
|= enctab
[flavour
];
11011 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11012 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11013 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11014 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11015 inst
.instruction
|= (rs
== NS_QQ
) << 6;
11016 inst
.instruction
|= 2 << 18;
11018 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11022 neon_move_immediate (void)
11024 enum neon_shape rs
= neon_check_shape (NS_DI_QI
);
11025 struct neon_type_el et
= neon_check_type (1, rs
,
11026 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
);
11027 unsigned immlo
, immhi
= 0, immbits
;
11030 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
11031 op
= (inst
.instruction
& (1 << 5)) != 0;
11033 immlo
= inst
.operands
[1].imm
;
11034 if (inst
.operands
[1].regisimm
)
11035 immhi
= inst
.operands
[1].reg
;
11037 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
11038 _("immediate has bits set outside the operand size"));
11040 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, &immbits
, &op
,
11041 et
.size
, et
.type
)) == FAIL
)
11043 /* Invert relevant bits only. */
11044 neon_invert_size (&immlo
, &immhi
, et
.size
);
11045 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
11046 with one or the other; those cases are caught by
11047 neon_cmode_for_move_imm. */
11049 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, &immbits
, &op
,
11050 et
.size
, et
.type
)) == FAIL
)
11052 first_error (_("immediate out of range"));
11057 inst
.instruction
&= ~(1 << 5);
11058 inst
.instruction
|= op
<< 5;
11060 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11061 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11062 inst
.instruction
|= (rs
== NS_QI
) << 6;
11063 inst
.instruction
|= cmode
<< 8;
11065 neon_write_immbits (immbits
);
11071 if (inst
.operands
[1].isreg
)
11073 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11075 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11076 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11077 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11078 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11079 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11080 inst
.instruction
|= (rs
== NS_QQ
) << 6;
11084 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11085 neon_move_immediate ();
11088 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11091 /* Encode instructions of form:
11093 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11094 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
11099 neon_mixed_length (struct neon_type_el et
, unsigned size
)
11101 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11102 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11103 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11104 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11105 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11106 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11107 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
11108 inst
.instruction
|= neon_logbits (size
) << 20;
11110 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11114 do_neon_dyadic_long (void)
11116 /* FIXME: Type checking for lengthening op. */
11117 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
11118 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
11119 neon_mixed_length (et
, et
.size
);
11123 do_neon_abal (void)
11125 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
11126 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
11127 neon_mixed_length (et
, et
.size
);
11131 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
11133 if (inst
.operands
[2].isscalar
)
11135 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
11136 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
11137 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
11138 neon_mul_mac (et
, et
.type
== NT_unsigned
);
11142 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
11143 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
11144 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11145 neon_mixed_length (et
, et
.size
);
11150 do_neon_mac_maybe_scalar_long (void)
11152 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
11156 do_neon_dyadic_wide (void)
11158 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
11159 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
11160 neon_mixed_length (et
, et
.size
);
11164 do_neon_dyadic_narrow (void)
11166 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
11167 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
11168 neon_mixed_length (et
, et
.size
/ 2);
11172 do_neon_mul_sat_scalar_long (void)
11174 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
11178 do_neon_vmull (void)
11180 if (inst
.operands
[2].isscalar
)
11181 do_neon_mac_maybe_scalar_long ();
11184 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
11185 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_KEY
);
11186 if (et
.type
== NT_poly
)
11187 inst
.instruction
= NEON_ENC_POLY (inst
.instruction
);
11189 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11190 /* For polynomial encoding, size field must be 0b00 and the U bit must be
11191 zero. Should be OK as-is. */
11192 neon_mixed_length (et
, et
.size
);
11199 enum neon_shape rs
= neon_check_shape (NS_DDDI_QQQI
);
11200 struct neon_type_el et
= neon_check_type (3, rs
,
11201 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
11202 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
11203 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11204 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11205 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11206 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11207 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11208 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11209 inst
.instruction
|= (rs
== NS_QQQI
) << 6;
11210 inst
.instruction
|= imm
<< 8;
11212 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11218 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11219 struct neon_type_el et
= neon_check_type (2, rs
,
11220 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
11221 unsigned op
= (inst
.instruction
>> 7) & 3;
11222 /* N (width of reversed regions) is encoded as part of the bitmask. We
11223 extract it here to check the elements to be reversed are smaller.
11224 Otherwise we'd get a reserved instruction. */
11225 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
11226 assert (elsize
!= 0);
11227 constraint (et
.size
>= elsize
,
11228 _("elements must be smaller than reversal region"));
11229 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11235 if (inst
.operands
[1].isscalar
)
11237 enum neon_shape rs
= neon_check_shape (NS_DS_QS
);
11238 struct neon_type_el et
= neon_check_type (2, rs
,
11239 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
11240 unsigned sizebits
= et
.size
>> 3;
11241 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
11242 int logsize
= neon_logbits (et
.size
);
11243 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
11244 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
11245 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11246 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11247 inst
.instruction
|= LOW4 (dm
);
11248 inst
.instruction
|= HI1 (dm
) << 5;
11249 inst
.instruction
|= (rs
== NS_QS
) << 6;
11250 inst
.instruction
|= x
<< 17;
11251 inst
.instruction
|= sizebits
<< 16;
11253 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11257 enum neon_shape rs
= neon_check_shape (NS_DR_QR
);
11258 struct neon_type_el et
= neon_check_type (1, rs
,
11259 N_8
| N_16
| N_32
| N_KEY
);
11260 unsigned save_cond
= inst
.instruction
& 0xf0000000;
11261 /* Duplicate ARM register to lanes of vector. */
11262 inst
.instruction
= NEON_ENC_ARMREG (inst
.instruction
);
11265 case 8: inst
.instruction
|= 0x400000; break;
11266 case 16: inst
.instruction
|= 0x000020; break;
11267 case 32: inst
.instruction
|= 0x000000; break;
11270 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
11271 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
11272 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
11273 inst
.instruction
|= (rs
== NS_QR
) << 21;
11274 /* The encoding for this instruction is identical for the ARM and Thumb
11275 variants, except for the condition field. */
11277 inst
.instruction
|= 0xe0000000;
11279 inst
.instruction
|= save_cond
;
11283 /* VMOV has particularly many variations. It can be one of:
11284 0. VMOV<c><q> <Qd>, <Qm>
11285 1. VMOV<c><q> <Dd>, <Dm>
11286 (Register operations, which are VORR with Rm = Rn.)
11287 2. VMOV<c><q>.<dt> <Qd>, #<imm>
11288 3. VMOV<c><q>.<dt> <Dd>, #<imm>
11290 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
11291 (ARM register to scalar.)
11292 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
11293 (Two ARM registers to vector.)
11294 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
11295 (Scalar to ARM register.)
11296 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
11297 (Vector to two ARM registers.)
11299 We should have just enough information to be able to disambiguate most of
11300 these, apart from "Two ARM registers to vector" and "Vector to two ARM
11301 registers" cases. For these, abuse the .regisimm operand field to signify a
11304 All the encoded bits are hardcoded by this function.
11306 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
11307 can specify a type where it doesn't make sense to, and is ignored).
11313 int nargs
= inst
.operands
[0].present
+ inst
.operands
[1].present
11314 + inst
.operands
[2].present
;
11315 unsigned save_cond
= thumb_mode
? 0xe0000000 : inst
.instruction
& 0xf0000000;
11320 /* Cases 0, 1, 2, 3, 4, 6. */
11321 if (inst
.operands
[1].isscalar
)
11324 struct neon_type_el et
= neon_check_type (2, NS_IGNORE
,
11325 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
11326 unsigned logsize
= neon_logbits (et
.size
);
11327 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
11328 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
11329 unsigned abcdebits
= 0;
11331 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
11332 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
11336 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
11337 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
11338 case 32: abcdebits
= 0x00; break;
11342 abcdebits
|= x
<< logsize
;
11343 inst
.instruction
= save_cond
;
11344 inst
.instruction
|= 0xe100b10;
11345 inst
.instruction
|= LOW4 (dn
) << 16;
11346 inst
.instruction
|= HI1 (dn
) << 7;
11347 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11348 inst
.instruction
|= (abcdebits
& 3) << 5;
11349 inst
.instruction
|= (abcdebits
>> 2) << 21;
11351 else if (inst
.operands
[1].isreg
)
11353 /* Cases 0, 1, 4. */
11354 if (inst
.operands
[0].isscalar
)
11357 unsigned bcdebits
= 0;
11358 struct neon_type_el et
= neon_check_type (2, NS_IGNORE
,
11359 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
11360 int logsize
= neon_logbits (et
.size
);
11361 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
11362 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
11364 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
11365 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
11369 case 8: bcdebits
= 0x8; break;
11370 case 16: bcdebits
= 0x1; break;
11371 case 32: bcdebits
= 0x0; break;
11375 bcdebits
|= x
<< logsize
;
11376 inst
.instruction
= save_cond
;
11377 inst
.instruction
|= 0xe000b10;
11378 inst
.instruction
|= LOW4 (dn
) << 16;
11379 inst
.instruction
|= HI1 (dn
) << 7;
11380 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11381 inst
.instruction
|= (bcdebits
& 3) << 5;
11382 inst
.instruction
|= (bcdebits
>> 2) << 21;
11387 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11388 /* The architecture manual I have doesn't explicitly state which
11389 value the U bit should have for register->register moves, but
11390 the equivalent VORR instruction has U = 0, so do that. */
11391 inst
.instruction
= 0x0200110;
11392 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11393 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11394 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11395 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11396 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11397 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11398 inst
.instruction
|= (rs
== NS_QQ
) << 6;
11400 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11406 inst
.instruction
= 0x0800010;
11407 neon_move_immediate ();
11408 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11414 if (inst
.operands
[0].regisimm
)
11417 inst
.instruction
= save_cond
;
11418 inst
.instruction
|= 0xc400b10;
11419 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
11420 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
11421 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11422 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11427 inst
.instruction
= save_cond
;
11428 inst
.instruction
|= 0xc500b10;
11429 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11430 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11431 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11432 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11442 do_neon_rshift_round_imm (void)
11444 enum neon_shape rs
= neon_check_shape (NS_DDI_QQI
);
11445 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
11446 int imm
= inst
.operands
[2].imm
;
11448 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
11451 inst
.operands
[2].present
= 0;
11456 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
11457 _("immediate out of range for shift"));
11458 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, rs
== NS_QQI
, et
,
11463 do_neon_movl (void)
11465 struct neon_type_el et
= neon_check_type (2, NS_QD
,
11466 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
11467 unsigned sizebits
= et
.size
>> 3;
11468 inst
.instruction
|= sizebits
<< 19;
11469 neon_two_same (0, et
.type
== NT_unsigned
, -1);
11475 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11476 struct neon_type_el et
= neon_check_type (2, rs
,
11477 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
11478 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11479 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11483 do_neon_zip_uzp (void)
11485 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11486 struct neon_type_el et
= neon_check_type (2, rs
,
11487 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
11488 if (rs
== NS_DD
&& et
.size
== 32)
11490 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
11491 inst
.instruction
= N_MNEM_vtrn
;
11495 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11499 do_neon_sat_abs_neg (void)
11501 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11502 struct neon_type_el et
= neon_check_type (2, rs
,
11503 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
11504 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11508 do_neon_pair_long (void)
11510 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11511 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
11512 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
11513 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
11514 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11518 do_neon_recip_est (void)
11520 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11521 struct neon_type_el et
= neon_check_type (2, rs
,
11522 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
11523 inst
.instruction
|= (et
.type
== NT_float
) << 8;
11524 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11530 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11531 struct neon_type_el et
= neon_check_type (2, rs
,
11532 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
11533 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11539 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11540 struct neon_type_el et
= neon_check_type (2, rs
,
11541 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
11542 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11548 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11549 struct neon_type_el et
= neon_check_type (2, rs
,
11550 N_EQK
| N_INT
, N_8
| N_KEY
);
11551 neon_two_same (rs
== NS_QQ
, 1, et
.size
);
11557 enum neon_shape rs
= neon_check_shape (NS_DD_QQ
);
11558 neon_two_same (rs
== NS_QQ
, 1, -1);
11562 do_neon_tbl_tbx (void)
11564 unsigned listlenbits
;
11565 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
11567 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
11569 first_error (_("bad list length for table lookup"));
11573 listlenbits
= inst
.operands
[1].imm
- 1;
11574 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11575 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11576 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11577 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11578 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11579 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11580 inst
.instruction
|= listlenbits
<< 8;
11582 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11586 do_neon_ldm_stm (void)
11588 /* P, U and L bits are part of bitmask. */
11589 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
11590 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
11592 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
11593 _("writeback (!) must be used for VLDMDB and VSTMDB"));
11595 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
11596 _("register list must contain at least 1 and at most 16 "
11599 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
11600 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
11601 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
11602 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
11604 inst
.instruction
|= offsetbits
;
11607 inst
.instruction
|= 0xe0000000;
11611 do_neon_ldr_str (void)
11613 unsigned offsetbits
;
11615 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
11617 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11618 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11620 constraint (inst
.reloc
.pc_rel
&& !is_ldr
,
11621 _("PC-relative addressing unavailable with VSTR"));
11623 constraint (!inst
.reloc
.pc_rel
&& inst
.reloc
.exp
.X_op
!= O_constant
,
11624 _("Immediate value must be a constant"));
11626 if (inst
.reloc
.exp
.X_add_number
< 0)
11629 offsetbits
= -inst
.reloc
.exp
.X_add_number
/ 4;
11632 offsetbits
= inst
.reloc
.exp
.X_add_number
/ 4;
11634 /* FIXME: Does this catch everything? */
11635 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11636 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11637 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
,
11639 constraint ((inst
.operands
[1].imm
& 3) != 0,
11640 _("Offset must be a multiple of 4"));
11641 constraint (offsetbits
!= (offsetbits
& 0xff),
11642 _("Immediate offset out of range"));
11644 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11645 inst
.instruction
|= offsetbits
& 0xff;
11646 inst
.instruction
|= offset_up
<< 23;
11649 inst
.instruction
|= 0xe0000000;
11651 if (inst
.reloc
.pc_rel
)
11654 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
11656 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
11659 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11662 /* "interleave" version also handles non-interleaving register VLD1/VST1
11666 do_neon_ld_st_interleave (void)
11668 struct neon_type_el et
= neon_check_type (1, NS_IGNORE
,
11669 N_8
| N_16
| N_32
| N_64
);
11670 unsigned alignbits
= 0;
11672 /* The bits in this table go:
11673 0: register stride of one (0) or two (1)
11674 1,2: register list length, minus one (1, 2, 3, 4).
11675 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
11676 We use -1 for invalid entries. */
11677 const int typetable
[] =
11679 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
11680 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
11681 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
11682 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
11686 if (et
.type
== NT_invtype
)
11689 if (inst
.operands
[1].immisalign
)
11690 switch (inst
.operands
[1].imm
>> 8)
11692 case 64: alignbits
= 1; break;
11694 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
11695 goto bad_alignment
;
11699 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
11700 goto bad_alignment
;
11705 first_error (_("bad alignment"));
11709 inst
.instruction
|= alignbits
<< 4;
11710 inst
.instruction
|= neon_logbits (et
.size
) << 6;
11712 /* Bits [4:6] of the immediate in a list specifier encode register stride
11713 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
11714 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
11715 up the right value for "type" in a table based on this value and the given
11716 list style, then stick it back. */
11717 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
11718 | (((inst
.instruction
>> 8) & 3) << 3);
11720 typebits
= typetable
[idx
];
11722 constraint (typebits
== -1, _("bad list type for instruction"));
11724 inst
.instruction
&= ~0xf00;
11725 inst
.instruction
|= typebits
<< 8;
11728 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
11729 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
11730 otherwise. The variable arguments are a list of pairs of legal (size, align)
11731 values, terminated with -1. */
11734 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
11737 int result
= FAIL
, thissize
, thisalign
;
11739 if (!inst
.operands
[1].immisalign
)
11745 va_start (ap
, do_align
);
11749 thissize
= va_arg (ap
, int);
11750 if (thissize
== -1)
11752 thisalign
= va_arg (ap
, int);
11754 if (size
== thissize
&& align
== thisalign
)
11757 while (result
!= SUCCESS
);
11761 if (result
== SUCCESS
)
11764 first_error (_("unsupported alignment for instruction"));
11770 do_neon_ld_st_lane (void)
11772 struct neon_type_el et
= neon_check_type (1, NS_IGNORE
, N_8
| N_16
| N_32
);
11773 int align_good
, do_align
= 0;
11774 int logsize
= neon_logbits (et
.size
);
11775 int align
= inst
.operands
[1].imm
>> 8;
11776 int n
= (inst
.instruction
>> 8) & 3;
11777 int max_el
= 64 / et
.size
;
11779 if (et
.type
== NT_invtype
)
11782 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
11783 _("bad list length"));
11784 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
11785 _("scalar index out of range"));
11786 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
11788 _("stride of 2 unavailable when element size is 8"));
11792 case 0: /* VLD1 / VST1. */
11793 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
11795 if (align_good
== FAIL
)
11799 unsigned alignbits
= 0;
11802 case 16: alignbits
= 0x1; break;
11803 case 32: alignbits
= 0x3; break;
11806 inst
.instruction
|= alignbits
<< 4;
11810 case 1: /* VLD2 / VST2. */
11811 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
11813 if (align_good
== FAIL
)
11816 inst
.instruction
|= 1 << 4;
11819 case 2: /* VLD3 / VST3. */
11820 constraint (inst
.operands
[1].immisalign
,
11821 _("can't use alignment with this instruction"));
11824 case 3: /* VLD4 / VST4. */
11825 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
11826 16, 64, 32, 64, 32, 128, -1);
11827 if (align_good
== FAIL
)
11831 unsigned alignbits
= 0;
11834 case 8: alignbits
= 0x1; break;
11835 case 16: alignbits
= 0x1; break;
11836 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
11839 inst
.instruction
|= alignbits
<< 4;
11846 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
11847 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
11848 inst
.instruction
|= 1 << (4 + logsize
);
11850 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
11851 inst
.instruction
|= logsize
<< 10;
11854 /* Encode single n-element structure to all lanes VLD<n> instructions. */
11857 do_neon_ld_dup (void)
11859 struct neon_type_el et
= neon_check_type (1, NS_IGNORE
, N_8
| N_16
| N_32
);
11860 int align_good
, do_align
= 0;
11862 if (et
.type
== NT_invtype
)
11865 switch ((inst
.instruction
>> 8) & 3)
11867 case 0: /* VLD1. */
11868 assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
11869 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
11870 &do_align
, 16, 16, 32, 32, -1);
11871 if (align_good
== FAIL
)
11873 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
11876 case 2: inst
.instruction
|= 1 << 5; break;
11877 default: first_error (_("bad list length")); return;
11879 inst
.instruction
|= neon_logbits (et
.size
) << 6;
11882 case 1: /* VLD2. */
11883 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
11884 &do_align
, 8, 16, 16, 32, 32, 64, -1);
11885 if (align_good
== FAIL
)
11887 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
11888 _("bad list length"));
11889 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
11890 inst
.instruction
|= 1 << 5;
11891 inst
.instruction
|= neon_logbits (et
.size
) << 6;
11894 case 2: /* VLD3. */
11895 constraint (inst
.operands
[1].immisalign
,
11896 _("can't use alignment with this instruction"));
11897 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
11898 _("bad list length"));
11899 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
11900 inst
.instruction
|= 1 << 5;
11901 inst
.instruction
|= neon_logbits (et
.size
) << 6;
11904 case 3: /* VLD4. */
11906 int align
= inst
.operands
[1].imm
>> 8;
11907 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
11908 16, 64, 32, 64, 32, 128, -1);
11909 if (align_good
== FAIL
)
11911 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
11912 _("bad list length"));
11913 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
11914 inst
.instruction
|= 1 << 5;
11915 if (et
.size
== 32 && align
== 128)
11916 inst
.instruction
|= 0x3 << 6;
11918 inst
.instruction
|= neon_logbits (et
.size
) << 6;
11925 inst
.instruction
|= do_align
<< 4;
11928 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
11929 apart from bits [11:4]. */
11932 do_neon_ldx_stx (void)
11934 switch (NEON_LANE (inst
.operands
[0].imm
))
11936 case NEON_INTERLEAVE_LANES
:
11937 inst
.instruction
= NEON_ENC_INTERLV (inst
.instruction
);
11938 do_neon_ld_st_interleave ();
11941 case NEON_ALL_LANES
:
11942 inst
.instruction
= NEON_ENC_DUP (inst
.instruction
);
11947 inst
.instruction
= NEON_ENC_LANE (inst
.instruction
);
11948 do_neon_ld_st_lane ();
11951 /* L bit comes from bit mask. */
11952 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11953 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11954 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11956 if (inst
.operands
[1].postind
)
11958 int postreg
= inst
.operands
[1].imm
& 0xf;
11959 constraint (!inst
.operands
[1].immisreg
,
11960 _("post-index must be a register"));
11961 constraint (postreg
== 0xd || postreg
== 0xf,
11962 _("bad register for post-index"));
11963 inst
.instruction
|= postreg
;
11965 else if (inst
.operands
[1].writeback
)
11967 inst
.instruction
|= 0xd;
11970 inst
.instruction
|= 0xf;
11973 inst
.instruction
|= 0xf9000000;
11975 inst
.instruction
|= 0xf4000000;
11979 /* Overall per-instruction processing. */
11981 /* We need to be able to fix up arbitrary expressions in some statements.
11982 This is so that we can handle symbols that are an arbitrary distance from
11983 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
11984 which returns part of an address in a form which will be valid for
11985 a data instruction. We do this by pushing the expression into a symbol
11986 in the expr_section, and creating a fix for that. */
11989 fix_new_arm (fragS
* frag
,
12004 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
, reloc
);
12008 new_fix
= fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
12013 /* Mark whether the fix is to a THUMB instruction, or an ARM
12015 new_fix
->tc_fix_data
= thumb_mode
;
12018 /* Create a frg for an instruction requiring relaxation. */
12020 output_relax_insn (void)
12027 /* The size of the instruction is unknown, so tie the debug info to the
12028 start of the instruction. */
12029 dwarf2_emit_insn (0);
12032 switch (inst
.reloc
.exp
.X_op
)
12035 sym
= inst
.reloc
.exp
.X_add_symbol
;
12036 offset
= inst
.reloc
.exp
.X_add_number
;
12040 offset
= inst
.reloc
.exp
.X_add_number
;
12043 sym
= make_expr_symbol (&inst
.reloc
.exp
);
12047 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
12048 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
12049 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
12052 /* Write a 32-bit thumb instruction to buf. */
12054 put_thumb32_insn (char * buf
, unsigned long insn
)
12056 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
12057 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
12061 output_inst (const char * str
)
12067 as_bad ("%s -- `%s'", inst
.error
, str
);
12071 output_relax_insn();
12074 if (inst
.size
== 0)
12077 to
= frag_more (inst
.size
);
12079 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
12081 assert (inst
.size
== (2 * THUMB_SIZE
));
12082 put_thumb32_insn (to
, inst
.instruction
);
12084 else if (inst
.size
> INSN_SIZE
)
12086 assert (inst
.size
== (2 * INSN_SIZE
));
12087 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
12088 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
12091 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
12093 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
12094 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
12095 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
12099 dwarf2_emit_insn (inst
.size
);
12103 /* Tag values used in struct asm_opcode's tag field. */
12106 OT_unconditional
, /* Instruction cannot be conditionalized.
12107 The ARM condition field is still 0xE. */
12108 OT_unconditionalF
, /* Instruction cannot be conditionalized
12109 and carries 0xF in its ARM condition field. */
12110 OT_csuffix
, /* Instruction takes a conditional suffix. */
12111 OT_cinfix3
, /* Instruction takes a conditional infix,
12112 beginning at character index 3. (In
12113 unified mode, it becomes a suffix.) */
12114 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
12115 tsts, cmps, cmns, and teqs. */
12116 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
12117 character index 3, even in unified mode. Used for
12118 legacy instructions where suffix and infix forms
12119 may be ambiguous. */
12120 OT_csuf_or_in3
, /* Instruction takes either a conditional
12121 suffix or an infix at character index 3. */
12122 OT_odd_infix_unc
, /* This is the unconditional variant of an
12123 instruction that takes a conditional infix
12124 at an unusual position. In unified mode,
12125 this variant will accept a suffix. */
12126 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
12127 are the conditional variants of instructions that
12128 take conditional infixes in unusual positions.
12129 The infix appears at character index
12130 (tag - OT_odd_infix_0). These are not accepted
12131 in unified mode. */
12134 /* Subroutine of md_assemble, responsible for looking up the primary
12135 opcode from the mnemonic the user wrote. STR points to the
12136 beginning of the mnemonic.
12138 This is not simply a hash table lookup, because of conditional
12139 variants. Most instructions have conditional variants, which are
12140 expressed with a _conditional affix_ to the mnemonic. If we were
12141 to encode each conditional variant as a literal string in the opcode
12142 table, it would have approximately 20,000 entries.
12144 Most mnemonics take this affix as a suffix, and in unified syntax,
12145 'most' is upgraded to 'all'. However, in the divided syntax, some
12146 instructions take the affix as an infix, notably the s-variants of
12147 the arithmetic instructions. Of those instructions, all but six
12148 have the infix appear after the third character of the mnemonic.
12150 Accordingly, the algorithm for looking up primary opcodes given
12153 1. Look up the identifier in the opcode table.
12154 If we find a match, go to step U.
12156 2. Look up the last two characters of the identifier in the
12157 conditions table. If we find a match, look up the first N-2
12158 characters of the identifier in the opcode table. If we
12159 find a match, go to step CE.
12161 3. Look up the fourth and fifth characters of the identifier in
12162 the conditions table. If we find a match, extract those
12163 characters from the identifier, and look up the remaining
12164 characters in the opcode table. If we find a match, go
12169 U. Examine the tag field of the opcode structure, in case this is
12170 one of the six instructions with its conditional infix in an
12171 unusual place. If it is, the tag tells us where to find the
12172 infix; look it up in the conditions table and set inst.cond
12173 accordingly. Otherwise, this is an unconditional instruction.
12174 Again set inst.cond accordingly. Return the opcode structure.
12176 CE. Examine the tag field to make sure this is an instruction that
12177 should receive a conditional suffix. If it is not, fail.
12178 Otherwise, set inst.cond from the suffix we already looked up,
12179 and return the opcode structure.
12181 CM. Examine the tag field to make sure this is an instruction that
12182 should receive a conditional infix after the third character.
12183 If it is not, fail. Otherwise, undo the edits to the current
12184 line of input and proceed as for case CE. */
12186 static const struct asm_opcode
*
12187 opcode_lookup (char **str
)
12191 const struct asm_opcode
*opcode
;
12192 const struct asm_cond
*cond
;
12195 /* Scan up to the end of the mnemonic, which must end in white space,
12196 '.' (in unified mode only), or end of string. */
12197 for (base
= end
= *str
; *end
!= '\0'; end
++)
12198 if (*end
== ' ' || (unified_syntax
&& *end
== '.'))
12204 /* Handle a possible width suffix and/or Neon type suffix. */
12211 else if (end
[1] == 'n')
12216 inst
.vectype
.elems
= 0;
12218 *str
= end
+ offset
;
12220 if (end
[offset
] == '.')
12222 /* See if we have a Neon type suffix. */
12223 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
12226 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
12232 /* Look for unaffixed or special-case affixed mnemonic. */
12233 opcode
= hash_find_n (arm_ops_hsh
, base
, end
- base
);
12237 if (opcode
->tag
< OT_odd_infix_0
)
12239 inst
.cond
= COND_ALWAYS
;
12243 if (unified_syntax
)
12244 as_warn (_("conditional infixes are deprecated in unified syntax"));
12245 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
12246 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
12249 inst
.cond
= cond
->value
;
12253 /* Cannot have a conditional suffix on a mnemonic of less than two
12255 if (end
- base
< 3)
12258 /* Look for suffixed mnemonic. */
12260 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
12261 opcode
= hash_find_n (arm_ops_hsh
, base
, affix
- base
);
12262 if (opcode
&& cond
)
12265 switch (opcode
->tag
)
12267 case OT_cinfix3_legacy
:
12268 /* Ignore conditional suffixes matched on infix only mnemonics. */
12272 case OT_cinfix3_deprecated
:
12273 case OT_odd_infix_unc
:
12274 if (!unified_syntax
)
12276 /* else fall through */
12279 case OT_csuf_or_in3
:
12280 inst
.cond
= cond
->value
;
12283 case OT_unconditional
:
12284 case OT_unconditionalF
:
12287 inst
.cond
= cond
->value
;
12291 /* delayed diagnostic */
12292 inst
.error
= BAD_COND
;
12293 inst
.cond
= COND_ALWAYS
;
12302 /* Cannot have a usual-position infix on a mnemonic of less than
12303 six characters (five would be a suffix). */
12304 if (end
- base
< 6)
12307 /* Look for infixed mnemonic in the usual position. */
12309 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
12313 memcpy (save
, affix
, 2);
12314 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
12315 opcode
= hash_find_n (arm_ops_hsh
, base
, (end
- base
) - 2);
12316 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
12317 memcpy (affix
, save
, 2);
12320 && (opcode
->tag
== OT_cinfix3
12321 || opcode
->tag
== OT_cinfix3_deprecated
12322 || opcode
->tag
== OT_csuf_or_in3
12323 || opcode
->tag
== OT_cinfix3_legacy
))
12327 && (opcode
->tag
== OT_cinfix3
12328 || opcode
->tag
== OT_cinfix3_deprecated
))
12329 as_warn (_("conditional infixes are deprecated in unified syntax"));
12331 inst
.cond
= cond
->value
;
12339 md_assemble (char *str
)
12342 const struct asm_opcode
* opcode
;
12344 /* Align the previous label if needed. */
12345 if (last_label_seen
!= NULL
)
12347 symbol_set_frag (last_label_seen
, frag_now
);
12348 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
12349 S_SET_SEGMENT (last_label_seen
, now_seg
);
12352 memset (&inst
, '\0', sizeof (inst
));
12353 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12355 opcode
= opcode_lookup (&p
);
12358 /* It wasn't an instruction, but it might be a register alias of
12359 the form alias .req reg, or a Neon .dn/.qn directive. */
12360 if (!create_register_alias (str
, p
)
12361 && !create_neon_reg_alias (str
, p
))
12362 as_bad (_("bad instruction `%s'"), str
);
12367 if (opcode
->tag
== OT_cinfix3_deprecated
)
12368 as_warn (_("s suffix on comparison instruction is deprecated"));
12372 arm_feature_set variant
;
12374 variant
= cpu_variant
;
12375 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
12376 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
12377 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
12378 /* Check that this instruction is supported for this CPU. */
12379 if (!opcode
->tvariant
12380 || (thumb_mode
== 1
12381 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
12383 as_bad (_("selected processor does not support `%s'"), str
);
12386 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
12387 && opcode
->tencode
!= do_t_branch
)
12389 as_bad (_("Thumb does not support conditional execution"));
12393 /* Check conditional suffixes. */
12394 if (current_it_mask
)
12397 cond
= current_cc
^ ((current_it_mask
>> 4) & 1) ^ 1;
12398 current_it_mask
<<= 1;
12399 current_it_mask
&= 0x1f;
12400 /* The BKPT instruction is unconditional even in an IT block. */
12402 && cond
!= inst
.cond
&& opcode
->tencode
!= do_t_bkpt
)
12404 as_bad (_("incorrect condition in IT block"));
12408 else if (inst
.cond
!= COND_ALWAYS
&& opcode
->tencode
!= do_t_branch
)
12410 as_bad (_("thumb conditional instrunction not in IT block"));
12414 mapping_state (MAP_THUMB
);
12415 inst
.instruction
= opcode
->tvalue
;
12417 if (!parse_operands (p
, opcode
->operands
))
12418 opcode
->tencode ();
12420 /* Clear current_it_mask at the end of an IT block. */
12421 if (current_it_mask
== 0x10)
12422 current_it_mask
= 0;
12424 if (!(inst
.error
|| inst
.relax
))
12426 assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
12427 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
12428 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
12430 as_bad (_("cannot honor width suffix -- `%s'"), str
);
12434 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12435 *opcode
->tvariant
);
12436 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
12437 set those bits when Thumb-2 32-bit instructions are seen. ie.
12438 anything other than bl/blx.
12439 This is overly pessimistic for relaxable instructions. */
12440 if ((inst
.size
== 4 && (inst
.instruction
& 0xf800e800) != 0xf000e800)
12442 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12447 /* Check that this instruction is supported for this CPU. */
12448 if (!opcode
->avariant
||
12449 !ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
))
12451 as_bad (_("selected processor does not support `%s'"), str
);
12456 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
12460 mapping_state (MAP_ARM
);
12461 inst
.instruction
= opcode
->avalue
;
12462 if (opcode
->tag
== OT_unconditionalF
)
12463 inst
.instruction
|= 0xF << 28;
12465 inst
.instruction
|= inst
.cond
<< 28;
12466 inst
.size
= INSN_SIZE
;
12467 if (!parse_operands (p
, opcode
->operands
))
12468 opcode
->aencode ();
12469 /* Arm mode bx is marked as both v4T and v5 because it's still required
12470 on a hypothetical non-thumb v5 core. */
12471 if (ARM_CPU_HAS_FEATURE (*opcode
->avariant
, arm_ext_v4t
)
12472 || ARM_CPU_HAS_FEATURE (*opcode
->avariant
, arm_ext_v5
))
12473 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
12475 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
12476 *opcode
->avariant
);
12481 /* Various frobbings of labels and their addresses. */
12484 arm_start_line_hook (void)
12486 last_label_seen
= NULL
;
12490 arm_frob_label (symbolS
* sym
)
12492 last_label_seen
= sym
;
12494 ARM_SET_THUMB (sym
, thumb_mode
);
12496 #if defined OBJ_COFF || defined OBJ_ELF
12497 ARM_SET_INTERWORK (sym
, support_interwork
);
12500 /* Note - do not allow local symbols (.Lxxx) to be labeled
12501 as Thumb functions. This is because these labels, whilst
12502 they exist inside Thumb code, are not the entry points for
12503 possible ARM->Thumb calls. Also, these labels can be used
12504 as part of a computed goto or switch statement. eg gcc
12505 can generate code that looks like this:
12507 ldr r2, [pc, .Laaa]
12517 The first instruction loads the address of the jump table.
12518 The second instruction converts a table index into a byte offset.
12519 The third instruction gets the jump address out of the table.
12520 The fourth instruction performs the jump.
12522 If the address stored at .Laaa is that of a symbol which has the
12523 Thumb_Func bit set, then the linker will arrange for this address
12524 to have the bottom bit set, which in turn would mean that the
12525 address computation performed by the third instruction would end
12526 up with the bottom bit set. Since the ARM is capable of unaligned
12527 word loads, the instruction would then load the incorrect address
12528 out of the jump table, and chaos would ensue. */
12529 if (label_is_thumb_function_name
12530 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
12531 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
12533 /* When the address of a Thumb function is taken the bottom
12534 bit of that address should be set. This will allow
12535 interworking between Arm and Thumb functions to work
12538 THUMB_SET_FUNC (sym
, 1);
12540 label_is_thumb_function_name
= FALSE
;
12544 dwarf2_emit_label (sym
);
12549 arm_data_in_code (void)
12551 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
12553 *input_line_pointer
= '/';
12554 input_line_pointer
+= 5;
12555 *input_line_pointer
= 0;
12563 arm_canonicalize_symbol_name (char * name
)
12567 if (thumb_mode
&& (len
= strlen (name
)) > 5
12568 && streq (name
+ len
- 5, "/data"))
12569 *(name
+ len
- 5) = 0;
12574 /* Table of all register names defined by default. The user can
12575 define additional names with .req. Note that all register names
12576 should appear in both upper and lowercase variants. Some registers
12577 also have mixed-case names. */
12579 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
12580 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
12581 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
12582 #define REGSET(p,t) \
12583 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
12584 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
12585 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
12586 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
12587 #define REGSETH(p,t) \
12588 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
12589 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
12590 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
12591 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
12592 #define REGSET2(p,t) \
12593 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
12594 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
12595 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
12596 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
12598 static const struct reg_entry reg_names
[] =
12600 /* ARM integer registers. */
12601 REGSET(r
, RN
), REGSET(R
, RN
),
12603 /* ATPCS synonyms. */
12604 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
12605 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
12606 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
12608 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
12609 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
12610 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
12612 /* Well-known aliases. */
12613 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
12614 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
12616 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
12617 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
12619 /* Coprocessor numbers. */
12620 REGSET(p
, CP
), REGSET(P
, CP
),
12622 /* Coprocessor register numbers. The "cr" variants are for backward
12624 REGSET(c
, CN
), REGSET(C
, CN
),
12625 REGSET(cr
, CN
), REGSET(CR
, CN
),
12627 /* FPA registers. */
12628 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
12629 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
12631 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
12632 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
12634 /* VFP SP registers. */
12635 REGSET(s
,VFS
), REGSET(S
,VFS
),
12636 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
12638 /* VFP DP Registers. */
12639 REGSET(d
,VFD
), REGSET(D
,VFD
),
12640 /* Extra Neon DP registers. */
12641 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
12643 /* Neon QP registers. */
12644 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
12646 /* VFP control registers. */
12647 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
12648 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
12650 /* Maverick DSP coprocessor registers. */
12651 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
12652 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
12654 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
12655 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
12656 REGDEF(dspsc
,0,DSPSC
),
12658 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
12659 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
12660 REGDEF(DSPSC
,0,DSPSC
),
12662 /* iWMMXt data registers - p0, c0-15. */
12663 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
12665 /* iWMMXt control registers - p1, c0-3. */
12666 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
12667 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
12668 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
12669 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
12671 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
12672 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
12673 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
12674 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
12675 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
12677 /* XScale accumulator registers. */
12678 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
12684 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
12685 within psr_required_here. */
12686 static const struct asm_psr psrs
[] =
12688 /* Backward compatibility notation. Note that "all" is no longer
12689 truly all possible PSR bits. */
12690 {"all", PSR_c
| PSR_f
},
12694 /* Individual flags. */
12699 /* Combinations of flags. */
12700 {"fs", PSR_f
| PSR_s
},
12701 {"fx", PSR_f
| PSR_x
},
12702 {"fc", PSR_f
| PSR_c
},
12703 {"sf", PSR_s
| PSR_f
},
12704 {"sx", PSR_s
| PSR_x
},
12705 {"sc", PSR_s
| PSR_c
},
12706 {"xf", PSR_x
| PSR_f
},
12707 {"xs", PSR_x
| PSR_s
},
12708 {"xc", PSR_x
| PSR_c
},
12709 {"cf", PSR_c
| PSR_f
},
12710 {"cs", PSR_c
| PSR_s
},
12711 {"cx", PSR_c
| PSR_x
},
12712 {"fsx", PSR_f
| PSR_s
| PSR_x
},
12713 {"fsc", PSR_f
| PSR_s
| PSR_c
},
12714 {"fxs", PSR_f
| PSR_x
| PSR_s
},
12715 {"fxc", PSR_f
| PSR_x
| PSR_c
},
12716 {"fcs", PSR_f
| PSR_c
| PSR_s
},
12717 {"fcx", PSR_f
| PSR_c
| PSR_x
},
12718 {"sfx", PSR_s
| PSR_f
| PSR_x
},
12719 {"sfc", PSR_s
| PSR_f
| PSR_c
},
12720 {"sxf", PSR_s
| PSR_x
| PSR_f
},
12721 {"sxc", PSR_s
| PSR_x
| PSR_c
},
12722 {"scf", PSR_s
| PSR_c
| PSR_f
},
12723 {"scx", PSR_s
| PSR_c
| PSR_x
},
12724 {"xfs", PSR_x
| PSR_f
| PSR_s
},
12725 {"xfc", PSR_x
| PSR_f
| PSR_c
},
12726 {"xsf", PSR_x
| PSR_s
| PSR_f
},
12727 {"xsc", PSR_x
| PSR_s
| PSR_c
},
12728 {"xcf", PSR_x
| PSR_c
| PSR_f
},
12729 {"xcs", PSR_x
| PSR_c
| PSR_s
},
12730 {"cfs", PSR_c
| PSR_f
| PSR_s
},
12731 {"cfx", PSR_c
| PSR_f
| PSR_x
},
12732 {"csf", PSR_c
| PSR_s
| PSR_f
},
12733 {"csx", PSR_c
| PSR_s
| PSR_x
},
12734 {"cxf", PSR_c
| PSR_x
| PSR_f
},
12735 {"cxs", PSR_c
| PSR_x
| PSR_s
},
12736 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
12737 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
12738 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
12739 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
12740 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
12741 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
12742 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
12743 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
12744 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
12745 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
12746 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
12747 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
12748 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
12749 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
12750 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
12751 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
12752 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
12753 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
12754 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
12755 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
12756 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
12757 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
12758 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
12759 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
12762 /* Table of V7M psr names. */
12763 static const struct asm_psr v7m_psrs
[] =
12776 {"basepri_max", 18},
12781 /* Table of all shift-in-operand names. */
12782 static const struct asm_shift_name shift_names
[] =
12784 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
12785 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
12786 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
12787 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
12788 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
12789 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
12792 /* Table of all explicit relocation names. */
12794 static struct reloc_entry reloc_names
[] =
12796 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
12797 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
12798 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
12799 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
12800 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
12801 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
12802 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
12803 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
12804 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
12805 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
12806 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
}
12810 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
12811 static const struct asm_cond conds
[] =
12815 {"cs", 0x2}, {"hs", 0x2},
12816 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
12830 static struct asm_barrier_opt barrier_opt_names
[] =
12838 /* Table of ARM-format instructions. */
12840 /* Macros for gluing together operand strings. N.B. In all cases
12841 other than OPS0, the trailing OP_stop comes from default
12842 zero-initialization of the unspecified elements of the array. */
12843 #define OPS0() { OP_stop, }
12844 #define OPS1(a) { OP_##a, }
12845 #define OPS2(a,b) { OP_##a,OP_##b, }
12846 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
12847 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
12848 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
12849 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
12851 /* These macros abstract out the exact format of the mnemonic table and
12852 save some repeated characters. */
12854 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
12855 #define TxCE(mnem, op, top, nops, ops, ae, te) \
12856 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
12857 THUMB_VARIANT, do_##ae, do_##te }
12859 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
12860 a T_MNEM_xyz enumerator. */
12861 #define TCE(mnem, aop, top, nops, ops, ae, te) \
12862 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
12863 #define tCE(mnem, aop, top, nops, ops, ae, te) \
12864 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12866 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
12867 infix after the third character. */
12868 #define TxC3(mnem, op, top, nops, ops, ae, te) \
12869 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
12870 THUMB_VARIANT, do_##ae, do_##te }
12871 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
12872 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
12873 THUMB_VARIANT, do_##ae, do_##te }
12874 #define TC3(mnem, aop, top, nops, ops, ae, te) \
12875 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
12876 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
12877 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
12878 #define tC3(mnem, aop, top, nops, ops, ae, te) \
12879 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12880 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
12881 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12883 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
12884 appear in the condition table. */
12885 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
12886 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12887 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
12889 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
12890 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
12891 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
12892 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
12893 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
12894 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
12895 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
12896 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
12897 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
12898 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
12899 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
12900 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
12901 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
12902 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
12903 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
12904 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
12905 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
12906 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
12907 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
12908 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
12910 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
12911 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
12912 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
12913 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
12915 /* Mnemonic that cannot be conditionalized. The ARM condition-code
12916 field is still 0xE. Many of the Thumb variants can be executed
12917 conditionally, so this is checked separately. */
12918 #define TUE(mnem, op, top, nops, ops, ae, te) \
12919 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
12920 THUMB_VARIANT, do_##ae, do_##te }
12922 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
12923 condition code field. */
12924 #define TUF(mnem, op, top, nops, ops, ae, te) \
12925 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
12926 THUMB_VARIANT, do_##ae, do_##te }
12928 /* ARM-only variants of all the above. */
12929 #define CE(mnem, op, nops, ops, ae) \
12930 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12932 #define C3(mnem, op, nops, ops, ae) \
12933 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12935 /* Legacy mnemonics that always have conditional infix after the third
12937 #define CL(mnem, op, nops, ops, ae) \
12938 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
12939 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12941 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
12942 #define cCE(mnem, op, nops, ops, ae) \
12943 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12945 /* Legacy coprocessor instructions where conditional infix and conditional
12946 suffix are ambiguous. For consistency this includes all FPA instructions,
12947 not just the potentially ambiguous ones. */
12948 #define cCL(mnem, op, nops, ops, ae) \
12949 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
12950 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12952 /* Coprocessor, takes either a suffix or a position-3 infix
12953 (for an FPA corner case). */
12954 #define C3E(mnem, op, nops, ops, ae) \
12955 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
12956 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12958 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
12959 { #m1 #m2 #m3, OPS##nops ops, \
12960 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12961 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12963 #define CM(m1, m2, op, nops, ops, ae) \
12964 xCM_(m1, , m2, op, nops, ops, ae), \
12965 xCM_(m1, eq, m2, op, nops, ops, ae), \
12966 xCM_(m1, ne, m2, op, nops, ops, ae), \
12967 xCM_(m1, cs, m2, op, nops, ops, ae), \
12968 xCM_(m1, hs, m2, op, nops, ops, ae), \
12969 xCM_(m1, cc, m2, op, nops, ops, ae), \
12970 xCM_(m1, ul, m2, op, nops, ops, ae), \
12971 xCM_(m1, lo, m2, op, nops, ops, ae), \
12972 xCM_(m1, mi, m2, op, nops, ops, ae), \
12973 xCM_(m1, pl, m2, op, nops, ops, ae), \
12974 xCM_(m1, vs, m2, op, nops, ops, ae), \
12975 xCM_(m1, vc, m2, op, nops, ops, ae), \
12976 xCM_(m1, hi, m2, op, nops, ops, ae), \
12977 xCM_(m1, ls, m2, op, nops, ops, ae), \
12978 xCM_(m1, ge, m2, op, nops, ops, ae), \
12979 xCM_(m1, lt, m2, op, nops, ops, ae), \
12980 xCM_(m1, gt, m2, op, nops, ops, ae), \
12981 xCM_(m1, le, m2, op, nops, ops, ae), \
12982 xCM_(m1, al, m2, op, nops, ops, ae)
12984 #define UE(mnem, op, nops, ops, ae) \
12985 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
12987 #define UF(mnem, op, nops, ops, ae) \
12988 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
12990 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
12991 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
12992 use the same encoding function for each. */
12993 #define NUF(mnem, op, nops, ops, enc) \
12994 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
12995 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
12997 /* Neon data processing, version which indirects through neon_enc_tab for
12998 the various overloaded versions of opcodes. */
12999 #define nUF(mnem, op, nops, ops, enc) \
13000 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
13001 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
13003 /* Neon insn with conditional suffix for the ARM version, non-overloaded
13005 #define NCE(mnem, op, nops, ops, enc) \
13006 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x##op, ARM_VARIANT, \
13007 THUMB_VARIANT, do_##enc, do_##enc }
13009 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
13010 #define nCE(mnem, op, nops, ops, enc) \
13011 { #mnem, OPS##nops ops, OT_csuffix, N_MNEM_##op, N_MNEM_##op, \
13012 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
13016 /* Thumb-only, unconditional. */
13017 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
13019 static const struct asm_opcode insns
[] =
13021 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
13022 #define THUMB_VARIANT &arm_ext_v4t
13023 tCE(and, 0000000, and, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13024 tC3(ands
, 0100000, ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13025 tCE(eor
, 0200000, eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13026 tC3(eors
, 0300000, eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13027 tCE(sub
, 0400000, sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
13028 tC3(subs
, 0500000, subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
13029 tCE(add
, 0800000, add
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
13030 tC3(adds
, 0900000, adds
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
13031 tCE(adc
, 0a00000
, adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13032 tC3(adcs
, 0b00000, adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13033 tCE(sbc
, 0c00000
, sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
13034 tC3(sbcs
, 0d00000
, sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
13035 tCE(orr
, 1800000, orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13036 tC3(orrs
, 1900000, orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
13037 tCE(bic
, 1c00000
, bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
13038 tC3(bics
, 1d00000
, bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
13040 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
13041 for setting PSR flag bits. They are obsolete in V6 and do not
13042 have Thumb equivalents. */
13043 tCE(tst
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13044 tC3w(tsts
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13045 CL(tstp
, 110f000
, 2, (RR
, SH
), cmp
),
13046 tCE(cmp
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
13047 tC3w(cmps
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
13048 CL(cmpp
, 150f000
, 2, (RR
, SH
), cmp
),
13049 tCE(cmn
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13050 tC3w(cmns
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13051 CL(cmnp
, 170f000
, 2, (RR
, SH
), cmp
),
13053 tCE(mov
, 1a00000
, mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
13054 tC3(movs
, 1b00000
, movs
, 2, (RR
, SH
), mov
, t_mov_cmp
),
13055 tCE(mvn
, 1e00000
, mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
13056 tC3(mvns
, 1f00000
, mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
13058 tCE(ldr
, 4100000, ldr
, 2, (RR
, ADDR
), ldst
, t_ldst
),
13059 tC3(ldrb
, 4500000, ldrb
, 2, (RR
, ADDR
), ldst
, t_ldst
),
13060 tCE(str
, 4000000, str
, 2, (RR
, ADDR
), ldst
, t_ldst
),
13061 tC3(strb
, 4400000, strb
, 2, (RR
, ADDR
), ldst
, t_ldst
),
13063 tCE(stm
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13064 tC3(stmia
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13065 tC3(stmea
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13066 tCE(ldm
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13067 tC3(ldmia
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13068 tC3(ldmfd
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13070 TCE(swi
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
13071 TCE(svc
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
13072 tCE(b
, a000000
, b
, 1, (EXPr
), branch
, t_branch
),
13073 TCE(bl
, b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
13076 tCE(adr
, 28f0000
, adr
, 2, (RR
, EXP
), adr
, t_adr
),
13077 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
13078 tCE(nop
, 1a00000
, nop
, 1, (oI255c
), nop
, t_nop
),
13080 /* Thumb-compatibility pseudo ops. */
13081 tCE(lsl
, 1a00000
, lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13082 tC3(lsls
, 1b00000
, lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13083 tCE(lsr
, 1a00020
, lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13084 tC3(lsrs
, 1b00020
, lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13085 tCE(asr
, 1a00040
, asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13086 tC3(asrs
, 1b00040
, asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13087 tCE(ror
, 1a00060
, ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13088 tC3(rors
, 1b00060
, rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
13089 tCE(neg
, 2600000, neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
13090 tC3(negs
, 2700000, negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
13091 tCE(push
, 92d0000
, push
, 1, (REGLST
), push_pop
, t_push_pop
),
13092 tCE(pop
, 8bd0000
, pop
, 1, (REGLST
), push_pop
, t_push_pop
),
13094 #undef THUMB_VARIANT
13095 #define THUMB_VARIANT &arm_ext_v6
13096 TCE(cpy
, 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
13098 /* V1 instructions with no Thumb analogue prior to V6T2. */
13099 #undef THUMB_VARIANT
13100 #define THUMB_VARIANT &arm_ext_v6t2
13101 TCE(rsb
, 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
13102 TC3(rsbs
, 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
13103 TCE(teq
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13104 TC3w(teqs
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
13105 CL(teqp
, 130f000
, 2, (RR
, SH
), cmp
),
13107 TC3(ldrt
, 4300000, f8500e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
13108 TC3(ldrbt
, 4700000, f8100e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
13109 TC3(strt
, 4200000, f8400e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
13110 TC3(strbt
, 4600000, f8000e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
13112 TC3(stmdb
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13113 TC3(stmfd
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13115 TC3(ldmdb
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13116 TC3(ldmea
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
13118 /* V1 instructions with no Thumb analogue at all. */
13119 CE(rsc
, 0e00000
, 3, (RR
, oRR
, SH
), arit
),
13120 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
13122 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
13123 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
13124 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
13125 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
13126 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
13127 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
13128 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
13129 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
13132 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
13133 #undef THUMB_VARIANT
13134 #define THUMB_VARIANT &arm_ext_v4t
13135 tCE(mul
, 0000090, mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
13136 tC3(muls
, 0100090, muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
13138 #undef THUMB_VARIANT
13139 #define THUMB_VARIANT &arm_ext_v6t2
13140 TCE(mla
, 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
13141 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
13143 /* Generic coprocessor instructions. */
13144 TCE(cdp
, e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
13145 TCE(ldc
, c100000
, ec100000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13146 TC3(ldcl
, c500000
, ec500000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13147 TCE(stc
, c000000
, ec000000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13148 TC3(stcl
, c400000
, ec400000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13149 TCE(mcr
, e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
13150 TCE(mrc
, e100010
, ee100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
13153 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
13154 CE(swp
, 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
13155 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
13158 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
13159 TCE(mrs
, 10f0000
, f3ef8000
, 2, (RR
, PSR
), mrs
, t_mrs
),
13160 TCE(msr
, 120f000
, f3808000
, 2, (PSR
, RR_EXi
), msr
, t_msr
),
13163 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
13164 TCE(smull
, 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
13165 CM(smull
,s
, 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
13166 TCE(umull
, 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
13167 CM(umull
,s
, 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
13168 TCE(smlal
, 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
13169 CM(smlal
,s
, 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
13170 TCE(umlal
, 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
13171 CM(umlal
,s
, 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
13174 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
13175 #undef THUMB_VARIANT
13176 #define THUMB_VARIANT &arm_ext_v4t
13177 tC3(ldrh
, 01000b0
, ldrh
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13178 tC3(strh
, 00000b0
, strh
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13179 tC3(ldrsh
, 01000f0
, ldrsh
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13180 tC3(ldrsb
, 01000d0
, ldrsb
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13181 tCM(ld
,sh
, 01000f0
, ldrsh
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13182 tCM(ld
,sb
, 01000d0
, ldrsb
, 2, (RR
, ADDR
), ldstv4
, t_ldst
),
13185 #define ARM_VARIANT &arm_ext_v4t_5
13186 /* ARM Architecture 4T. */
13187 /* Note: bx (and blx) are required on V5, even if the processor does
13188 not support Thumb. */
13189 TCE(bx
, 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
13192 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
13193 #undef THUMB_VARIANT
13194 #define THUMB_VARIANT &arm_ext_v5t
13195 /* Note: blx has 2 variants; the .value coded here is for
13196 BLX(2). Only this variant has conditional execution. */
13197 TCE(blx
, 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
13198 TUE(bkpt
, 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
13200 #undef THUMB_VARIANT
13201 #define THUMB_VARIANT &arm_ext_v6t2
13202 TCE(clz
, 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
13203 TUF(ldc2
, c100000
, fc100000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13204 TUF(ldc2l
, c500000
, fc500000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13205 TUF(stc2
, c000000
, fc000000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13206 TUF(stc2l
, c400000
, fc400000
, 3, (RCP
, RCN
, ADDR
), lstc
, lstc
),
13207 TUF(cdp2
, e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
13208 TUF(mcr2
, e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
13209 TUF(mrc2
, e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
13212 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
13213 TCE(smlabb
, 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13214 TCE(smlatb
, 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13215 TCE(smlabt
, 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13216 TCE(smlatt
, 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13218 TCE(smlawb
, 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13219 TCE(smlawt
, 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
13221 TCE(smlalbb
, 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
13222 TCE(smlaltb
, 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
13223 TCE(smlalbt
, 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
13224 TCE(smlaltt
, 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
13226 TCE(smulbb
, 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13227 TCE(smultb
, 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13228 TCE(smulbt
, 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13229 TCE(smultt
, 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13231 TCE(smulwb
, 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13232 TCE(smulwt
, 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13234 TCE(qadd
, 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
13235 TCE(qdadd
, 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
13236 TCE(qsub
, 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
13237 TCE(qdsub
, 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
13240 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
13241 TUF(pld
, 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
13242 TC3(ldrd
, 00000d0
, e9500000
, 3, (RRnpc
, oRRnpc
, ADDR
), ldrd
, t_ldstd
),
13243 TC3(strd
, 00000f0
, e9400000
, 3, (RRnpc
, oRRnpc
, ADDR
), ldrd
, t_ldstd
),
13245 TCE(mcrr
, c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
13246 TCE(mrrc
, c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
13249 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
13250 TCE(bxj
, 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
13253 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
13254 #undef THUMB_VARIANT
13255 #define THUMB_VARIANT &arm_ext_v6
13256 TUF(cpsie
, 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
13257 TUF(cpsid
, 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
13258 tCE(rev
, 6bf0f30
, rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
13259 tCE(rev16
, 6bf0fb0
, rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
13260 tCE(revsh
, 6ff0fb0
, revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
13261 tCE(sxth
, 6bf0070
, sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13262 tCE(uxth
, 6ff0070
, uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13263 tCE(sxtb
, 6af0070
, sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13264 tCE(uxtb
, 6ef0070
, uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13265 TUF(setend
, 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
13267 #undef THUMB_VARIANT
13268 #define THUMB_VARIANT &arm_ext_v6t2
13269 TCE(ldrex
, 1900f9f
, e8500f00
, 2, (RRnpc
, ADDR
), ldrex
, t_ldrex
),
13270 TUF(mcrr2
, c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
13271 TUF(mrrc2
, c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
13273 TCE(ssat
, 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
13274 TCE(usat
, 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
13276 /* ARM V6 not included in V7M (eg. integer SIMD). */
13277 #undef THUMB_VARIANT
13278 #define THUMB_VARIANT &arm_ext_v6_notm
13279 TUF(cps
, 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
13280 TCE(pkhbt
, 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
13281 TCE(pkhtb
, 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
13282 TCE(qadd16
, 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13283 TCE(qadd8
, 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13284 TCE(qaddsubx
, 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13285 TCE(qsub16
, 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13286 TCE(qsub8
, 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13287 TCE(qsubaddx
, 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13288 TCE(sadd16
, 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13289 TCE(sadd8
, 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13290 TCE(saddsubx
, 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13291 TCE(shadd16
, 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13292 TCE(shadd8
, 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13293 TCE(shaddsubx
, 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13294 TCE(shsub16
, 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13295 TCE(shsub8
, 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13296 TCE(shsubaddx
, 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13297 TCE(ssub16
, 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13298 TCE(ssub8
, 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13299 TCE(ssubaddx
, 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13300 TCE(uadd16
, 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13301 TCE(uadd8
, 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13302 TCE(uaddsubx
, 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13303 TCE(uhadd16
, 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13304 TCE(uhadd8
, 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13305 TCE(uhaddsubx
, 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13306 TCE(uhsub16
, 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13307 TCE(uhsub8
, 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13308 TCE(uhsubaddx
, 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13309 TCE(uqadd16
, 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13310 TCE(uqadd8
, 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13311 TCE(uqaddsubx
, 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13312 TCE(uqsub16
, 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13313 TCE(uqsub8
, 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13314 TCE(uqsubaddx
, 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13315 TCE(usub16
, 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13316 TCE(usub8
, 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13317 TCE(usubaddx
, 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13318 TUF(rfeia
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
13319 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
13320 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
13321 TUF(rfedb
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
13322 TUF(rfefd
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
13323 UF(rfefa
, 9900a00
, 1, (RRw
), rfe
),
13324 UF(rfeea
, 8100a00
, 1, (RRw
), rfe
),
13325 TUF(rfeed
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
13326 TCE(sxtah
, 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13327 TCE(sxtab16
, 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13328 TCE(sxtab
, 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13329 TCE(sxtb16
, 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13330 TCE(uxtah
, 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13331 TCE(uxtab16
, 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13332 TCE(uxtab
, 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
13333 TCE(uxtb16
, 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
13334 TCE(sel
, 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
13335 TCE(smlad
, 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13336 TCE(smladx
, 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13337 TCE(smlald
, 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
13338 TCE(smlaldx
, 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
13339 TCE(smlsd
, 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13340 TCE(smlsdx
, 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13341 TCE(smlsld
, 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
13342 TCE(smlsldx
, 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
13343 TCE(smmla
, 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13344 TCE(smmlar
, 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13345 TCE(smmls
, 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13346 TCE(smmlsr
, 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13347 TCE(smmul
, 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13348 TCE(smmulr
, 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13349 TCE(smuad
, 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13350 TCE(smuadx
, 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13351 TCE(smusd
, 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13352 TCE(smusdx
, 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13353 TUF(srsia
, 8cd0500
, e980c000
, 1, (I31w
), srs
, srs
),
13354 UF(srsib
, 9cd0500
, 1, (I31w
), srs
),
13355 UF(srsda
, 84d0500
, 1, (I31w
), srs
),
13356 TUF(srsdb
, 94d0500
, e800c000
, 1, (I31w
), srs
, srs
),
13357 TCE(ssat16
, 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
13358 TCE(strex
, 1800f90
, e8400000
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, t_strex
),
13359 TCE(umaal
, 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
13360 TCE(usad8
, 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
13361 TCE(usada8
, 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
13362 TCE(usat16
, 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
13365 #define ARM_VARIANT &arm_ext_v6k
13366 #undef THUMB_VARIANT
13367 #define THUMB_VARIANT &arm_ext_v6k
13368 tCE(yield
, 320f001
, yield
, 0, (), noargs
, t_hint
),
13369 tCE(wfe
, 320f002
, wfe
, 0, (), noargs
, t_hint
),
13370 tCE(wfi
, 320f003
, wfi
, 0, (), noargs
, t_hint
),
13371 tCE(sev
, 320f004
, sev
, 0, (), noargs
, t_hint
),
13373 #undef THUMB_VARIANT
13374 #define THUMB_VARIANT &arm_ext_v6_notm
13375 TCE(ldrexd
, 1b00f9f
, e8d0007f
, 3, (RRnpc
, oRRnpc
, RRnpcb
), ldrexd
, t_ldrexd
),
13376 TCE(strexd
, 1a00f90
, e8c00070
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
), strexd
, t_strexd
),
13378 #undef THUMB_VARIANT
13379 #define THUMB_VARIANT &arm_ext_v6t2
13380 TCE(ldrexb
, 1d00f9f
, e8d00f4f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
13381 TCE(ldrexh
, 1f00f9f
, e8d00f5f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
13382 TCE(strexb
, 1c00f90
, e8c00f40
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
13383 TCE(strexh
, 1e00f90
, e8c00f50
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
13384 TUF(clrex
, 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
13387 #define ARM_VARIANT &arm_ext_v6z
13388 TCE(smc
, 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
13391 #define ARM_VARIANT &arm_ext_v6t2
13392 TCE(bfc
, 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
13393 TCE(bfi
, 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
13394 TCE(sbfx
, 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
13395 TCE(ubfx
, 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
13397 TCE(mls
, 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
13398 TCE(movw
, 3000000, f2400000
, 2, (RRnpc
, Iffff
), mov16
, t_mov16
),
13399 TCE(movt
, 3400000, f2c00000
, 2, (RRnpc
, Iffff
), mov16
, t_mov16
),
13400 TCE(rbit
, 3ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
13402 TC3(ldrht
, 03000b0
, f8300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
13403 TC3(ldrsht
, 03000f0
, f9300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
13404 TC3(ldrsbt
, 03000d0
, f9100e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
13405 TC3(strht
, 02000b0
, f8200e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
13407 UT(cbnz
, b900
, 2, (RR
, EXP
), t_czb
),
13408 UT(cbz
, b100
, 2, (RR
, EXP
), t_czb
),
13409 /* ARM does not really have an IT instruction. */
13410 TUE(it
, 0, bf08
, 1, (COND
), it
, t_it
),
13411 TUE(itt
, 0, bf0c
, 1, (COND
), it
, t_it
),
13412 TUE(ite
, 0, bf04
, 1, (COND
), it
, t_it
),
13413 TUE(ittt
, 0, bf0e
, 1, (COND
), it
, t_it
),
13414 TUE(itet
, 0, bf06
, 1, (COND
), it
, t_it
),
13415 TUE(itte
, 0, bf0a
, 1, (COND
), it
, t_it
),
13416 TUE(itee
, 0, bf02
, 1, (COND
), it
, t_it
),
13417 TUE(itttt
, 0, bf0f
, 1, (COND
), it
, t_it
),
13418 TUE(itett
, 0, bf07
, 1, (COND
), it
, t_it
),
13419 TUE(ittet
, 0, bf0b
, 1, (COND
), it
, t_it
),
13420 TUE(iteet
, 0, bf03
, 1, (COND
), it
, t_it
),
13421 TUE(ittte
, 0, bf0d
, 1, (COND
), it
, t_it
),
13422 TUE(itete
, 0, bf05
, 1, (COND
), it
, t_it
),
13423 TUE(ittee
, 0, bf09
, 1, (COND
), it
, t_it
),
13424 TUE(iteee
, 0, bf01
, 1, (COND
), it
, t_it
),
13426 /* Thumb2 only instructions. */
13428 #define ARM_VARIANT NULL
13430 TCE(addw
, 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
13431 TCE(subw
, 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
13432 TCE(tbb
, 0, e8d0f000
, 1, (TB
), 0, t_tb
),
13433 TCE(tbh
, 0, e8d0f010
, 1, (TB
), 0, t_tb
),
13435 /* Thumb-2 hardware division instructions (R and M profiles only). */
13436 #undef THUMB_VARIANT
13437 #define THUMB_VARIANT &arm_ext_div
13438 TCE(sdiv
, 0, fb90f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
13439 TCE(udiv
, 0, fbb0f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
13441 /* ARM V7 instructions. */
13443 #define ARM_VARIANT &arm_ext_v7
13444 #undef THUMB_VARIANT
13445 #define THUMB_VARIANT &arm_ext_v7
13446 TUF(pli
, 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
13447 TCE(dbg
, 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
13448 TUF(dmb
, 57ff050
, f3bf8f50
, 1, (oBARRIER
), barrier
, t_barrier
),
13449 TUF(dsb
, 57ff040
, f3bf8f40
, 1, (oBARRIER
), barrier
, t_barrier
),
13450 TUF(isb
, 57ff060
, f3bf8f60
, 1, (oBARRIER
), barrier
, t_barrier
),
13453 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
13454 cCE(wfs
, e200110
, 1, (RR
), rd
),
13455 cCE(rfs
, e300110
, 1, (RR
), rd
),
13456 cCE(wfc
, e400110
, 1, (RR
), rd
),
13457 cCE(rfc
, e500110
, 1, (RR
), rd
),
13459 cCL(ldfs
, c100100
, 2, (RF
, ADDR
), rd_cpaddr
),
13460 cCL(ldfd
, c108100
, 2, (RF
, ADDR
), rd_cpaddr
),
13461 cCL(ldfe
, c500100
, 2, (RF
, ADDR
), rd_cpaddr
),
13462 cCL(ldfp
, c508100
, 2, (RF
, ADDR
), rd_cpaddr
),
13464 cCL(stfs
, c000100
, 2, (RF
, ADDR
), rd_cpaddr
),
13465 cCL(stfd
, c008100
, 2, (RF
, ADDR
), rd_cpaddr
),
13466 cCL(stfe
, c400100
, 2, (RF
, ADDR
), rd_cpaddr
),
13467 cCL(stfp
, c408100
, 2, (RF
, ADDR
), rd_cpaddr
),
13469 cCL(mvfs
, e008100
, 2, (RF
, RF_IF
), rd_rm
),
13470 cCL(mvfsp
, e008120
, 2, (RF
, RF_IF
), rd_rm
),
13471 cCL(mvfsm
, e008140
, 2, (RF
, RF_IF
), rd_rm
),
13472 cCL(mvfsz
, e008160
, 2, (RF
, RF_IF
), rd_rm
),
13473 cCL(mvfd
, e008180
, 2, (RF
, RF_IF
), rd_rm
),
13474 cCL(mvfdp
, e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
13475 cCL(mvfdm
, e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
13476 cCL(mvfdz
, e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
13477 cCL(mvfe
, e088100
, 2, (RF
, RF_IF
), rd_rm
),
13478 cCL(mvfep
, e088120
, 2, (RF
, RF_IF
), rd_rm
),
13479 cCL(mvfem
, e088140
, 2, (RF
, RF_IF
), rd_rm
),
13480 cCL(mvfez
, e088160
, 2, (RF
, RF_IF
), rd_rm
),
13482 cCL(mnfs
, e108100
, 2, (RF
, RF_IF
), rd_rm
),
13483 cCL(mnfsp
, e108120
, 2, (RF
, RF_IF
), rd_rm
),
13484 cCL(mnfsm
, e108140
, 2, (RF
, RF_IF
), rd_rm
),
13485 cCL(mnfsz
, e108160
, 2, (RF
, RF_IF
), rd_rm
),
13486 cCL(mnfd
, e108180
, 2, (RF
, RF_IF
), rd_rm
),
13487 cCL(mnfdp
, e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
13488 cCL(mnfdm
, e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
13489 cCL(mnfdz
, e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
13490 cCL(mnfe
, e188100
, 2, (RF
, RF_IF
), rd_rm
),
13491 cCL(mnfep
, e188120
, 2, (RF
, RF_IF
), rd_rm
),
13492 cCL(mnfem
, e188140
, 2, (RF
, RF_IF
), rd_rm
),
13493 cCL(mnfez
, e188160
, 2, (RF
, RF_IF
), rd_rm
),
13495 cCL(abss
, e208100
, 2, (RF
, RF_IF
), rd_rm
),
13496 cCL(abssp
, e208120
, 2, (RF
, RF_IF
), rd_rm
),
13497 cCL(abssm
, e208140
, 2, (RF
, RF_IF
), rd_rm
),
13498 cCL(abssz
, e208160
, 2, (RF
, RF_IF
), rd_rm
),
13499 cCL(absd
, e208180
, 2, (RF
, RF_IF
), rd_rm
),
13500 cCL(absdp
, e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
13501 cCL(absdm
, e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
13502 cCL(absdz
, e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
13503 cCL(abse
, e288100
, 2, (RF
, RF_IF
), rd_rm
),
13504 cCL(absep
, e288120
, 2, (RF
, RF_IF
), rd_rm
),
13505 cCL(absem
, e288140
, 2, (RF
, RF_IF
), rd_rm
),
13506 cCL(absez
, e288160
, 2, (RF
, RF_IF
), rd_rm
),
13508 cCL(rnds
, e308100
, 2, (RF
, RF_IF
), rd_rm
),
13509 cCL(rndsp
, e308120
, 2, (RF
, RF_IF
), rd_rm
),
13510 cCL(rndsm
, e308140
, 2, (RF
, RF_IF
), rd_rm
),
13511 cCL(rndsz
, e308160
, 2, (RF
, RF_IF
), rd_rm
),
13512 cCL(rndd
, e308180
, 2, (RF
, RF_IF
), rd_rm
),
13513 cCL(rnddp
, e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
13514 cCL(rnddm
, e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
13515 cCL(rnddz
, e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
13516 cCL(rnde
, e388100
, 2, (RF
, RF_IF
), rd_rm
),
13517 cCL(rndep
, e388120
, 2, (RF
, RF_IF
), rd_rm
),
13518 cCL(rndem
, e388140
, 2, (RF
, RF_IF
), rd_rm
),
13519 cCL(rndez
, e388160
, 2, (RF
, RF_IF
), rd_rm
),
13521 cCL(sqts
, e408100
, 2, (RF
, RF_IF
), rd_rm
),
13522 cCL(sqtsp
, e408120
, 2, (RF
, RF_IF
), rd_rm
),
13523 cCL(sqtsm
, e408140
, 2, (RF
, RF_IF
), rd_rm
),
13524 cCL(sqtsz
, e408160
, 2, (RF
, RF_IF
), rd_rm
),
13525 cCL(sqtd
, e408180
, 2, (RF
, RF_IF
), rd_rm
),
13526 cCL(sqtdp
, e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
13527 cCL(sqtdm
, e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
13528 cCL(sqtdz
, e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
13529 cCL(sqte
, e488100
, 2, (RF
, RF_IF
), rd_rm
),
13530 cCL(sqtep
, e488120
, 2, (RF
, RF_IF
), rd_rm
),
13531 cCL(sqtem
, e488140
, 2, (RF
, RF_IF
), rd_rm
),
13532 cCL(sqtez
, e488160
, 2, (RF
, RF_IF
), rd_rm
),
13534 cCL(logs
, e508100
, 2, (RF
, RF_IF
), rd_rm
),
13535 cCL(logsp
, e508120
, 2, (RF
, RF_IF
), rd_rm
),
13536 cCL(logsm
, e508140
, 2, (RF
, RF_IF
), rd_rm
),
13537 cCL(logsz
, e508160
, 2, (RF
, RF_IF
), rd_rm
),
13538 cCL(logd
, e508180
, 2, (RF
, RF_IF
), rd_rm
),
13539 cCL(logdp
, e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
13540 cCL(logdm
, e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
13541 cCL(logdz
, e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
13542 cCL(loge
, e588100
, 2, (RF
, RF_IF
), rd_rm
),
13543 cCL(logep
, e588120
, 2, (RF
, RF_IF
), rd_rm
),
13544 cCL(logem
, e588140
, 2, (RF
, RF_IF
), rd_rm
),
13545 cCL(logez
, e588160
, 2, (RF
, RF_IF
), rd_rm
),
13547 cCL(lgns
, e608100
, 2, (RF
, RF_IF
), rd_rm
),
13548 cCL(lgnsp
, e608120
, 2, (RF
, RF_IF
), rd_rm
),
13549 cCL(lgnsm
, e608140
, 2, (RF
, RF_IF
), rd_rm
),
13550 cCL(lgnsz
, e608160
, 2, (RF
, RF_IF
), rd_rm
),
13551 cCL(lgnd
, e608180
, 2, (RF
, RF_IF
), rd_rm
),
13552 cCL(lgndp
, e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
13553 cCL(lgndm
, e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
13554 cCL(lgndz
, e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
13555 cCL(lgne
, e688100
, 2, (RF
, RF_IF
), rd_rm
),
13556 cCL(lgnep
, e688120
, 2, (RF
, RF_IF
), rd_rm
),
13557 cCL(lgnem
, e688140
, 2, (RF
, RF_IF
), rd_rm
),
13558 cCL(lgnez
, e688160
, 2, (RF
, RF_IF
), rd_rm
),
13560 cCL(exps
, e708100
, 2, (RF
, RF_IF
), rd_rm
),
13561 cCL(expsp
, e708120
, 2, (RF
, RF_IF
), rd_rm
),
13562 cCL(expsm
, e708140
, 2, (RF
, RF_IF
), rd_rm
),
13563 cCL(expsz
, e708160
, 2, (RF
, RF_IF
), rd_rm
),
13564 cCL(expd
, e708180
, 2, (RF
, RF_IF
), rd_rm
),
13565 cCL(expdp
, e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
13566 cCL(expdm
, e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
13567 cCL(expdz
, e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
13568 cCL(expe
, e788100
, 2, (RF
, RF_IF
), rd_rm
),
13569 cCL(expep
, e788120
, 2, (RF
, RF_IF
), rd_rm
),
13570 cCL(expem
, e788140
, 2, (RF
, RF_IF
), rd_rm
),
13571 cCL(expdz
, e788160
, 2, (RF
, RF_IF
), rd_rm
),
13573 cCL(sins
, e808100
, 2, (RF
, RF_IF
), rd_rm
),
13574 cCL(sinsp
, e808120
, 2, (RF
, RF_IF
), rd_rm
),
13575 cCL(sinsm
, e808140
, 2, (RF
, RF_IF
), rd_rm
),
13576 cCL(sinsz
, e808160
, 2, (RF
, RF_IF
), rd_rm
),
13577 cCL(sind
, e808180
, 2, (RF
, RF_IF
), rd_rm
),
13578 cCL(sindp
, e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
13579 cCL(sindm
, e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
13580 cCL(sindz
, e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
13581 cCL(sine
, e888100
, 2, (RF
, RF_IF
), rd_rm
),
13582 cCL(sinep
, e888120
, 2, (RF
, RF_IF
), rd_rm
),
13583 cCL(sinem
, e888140
, 2, (RF
, RF_IF
), rd_rm
),
13584 cCL(sinez
, e888160
, 2, (RF
, RF_IF
), rd_rm
),
13586 cCL(coss
, e908100
, 2, (RF
, RF_IF
), rd_rm
),
13587 cCL(cossp
, e908120
, 2, (RF
, RF_IF
), rd_rm
),
13588 cCL(cossm
, e908140
, 2, (RF
, RF_IF
), rd_rm
),
13589 cCL(cossz
, e908160
, 2, (RF
, RF_IF
), rd_rm
),
13590 cCL(cosd
, e908180
, 2, (RF
, RF_IF
), rd_rm
),
13591 cCL(cosdp
, e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
13592 cCL(cosdm
, e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
13593 cCL(cosdz
, e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
13594 cCL(cose
, e988100
, 2, (RF
, RF_IF
), rd_rm
),
13595 cCL(cosep
, e988120
, 2, (RF
, RF_IF
), rd_rm
),
13596 cCL(cosem
, e988140
, 2, (RF
, RF_IF
), rd_rm
),
13597 cCL(cosez
, e988160
, 2, (RF
, RF_IF
), rd_rm
),
13599 cCL(tans
, ea08100
, 2, (RF
, RF_IF
), rd_rm
),
13600 cCL(tansp
, ea08120
, 2, (RF
, RF_IF
), rd_rm
),
13601 cCL(tansm
, ea08140
, 2, (RF
, RF_IF
), rd_rm
),
13602 cCL(tansz
, ea08160
, 2, (RF
, RF_IF
), rd_rm
),
13603 cCL(tand
, ea08180
, 2, (RF
, RF_IF
), rd_rm
),
13604 cCL(tandp
, ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
13605 cCL(tandm
, ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
13606 cCL(tandz
, ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
13607 cCL(tane
, ea88100
, 2, (RF
, RF_IF
), rd_rm
),
13608 cCL(tanep
, ea88120
, 2, (RF
, RF_IF
), rd_rm
),
13609 cCL(tanem
, ea88140
, 2, (RF
, RF_IF
), rd_rm
),
13610 cCL(tanez
, ea88160
, 2, (RF
, RF_IF
), rd_rm
),
13612 cCL(asns
, eb08100
, 2, (RF
, RF_IF
), rd_rm
),
13613 cCL(asnsp
, eb08120
, 2, (RF
, RF_IF
), rd_rm
),
13614 cCL(asnsm
, eb08140
, 2, (RF
, RF_IF
), rd_rm
),
13615 cCL(asnsz
, eb08160
, 2, (RF
, RF_IF
), rd_rm
),
13616 cCL(asnd
, eb08180
, 2, (RF
, RF_IF
), rd_rm
),
13617 cCL(asndp
, eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
13618 cCL(asndm
, eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
13619 cCL(asndz
, eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
13620 cCL(asne
, eb88100
, 2, (RF
, RF_IF
), rd_rm
),
13621 cCL(asnep
, eb88120
, 2, (RF
, RF_IF
), rd_rm
),
13622 cCL(asnem
, eb88140
, 2, (RF
, RF_IF
), rd_rm
),
13623 cCL(asnez
, eb88160
, 2, (RF
, RF_IF
), rd_rm
),
13625 cCL(acss
, ec08100
, 2, (RF
, RF_IF
), rd_rm
),
13626 cCL(acssp
, ec08120
, 2, (RF
, RF_IF
), rd_rm
),
13627 cCL(acssm
, ec08140
, 2, (RF
, RF_IF
), rd_rm
),
13628 cCL(acssz
, ec08160
, 2, (RF
, RF_IF
), rd_rm
),
13629 cCL(acsd
, ec08180
, 2, (RF
, RF_IF
), rd_rm
),
13630 cCL(acsdp
, ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
13631 cCL(acsdm
, ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
13632 cCL(acsdz
, ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
13633 cCL(acse
, ec88100
, 2, (RF
, RF_IF
), rd_rm
),
13634 cCL(acsep
, ec88120
, 2, (RF
, RF_IF
), rd_rm
),
13635 cCL(acsem
, ec88140
, 2, (RF
, RF_IF
), rd_rm
),
13636 cCL(acsez
, ec88160
, 2, (RF
, RF_IF
), rd_rm
),
13638 cCL(atns
, ed08100
, 2, (RF
, RF_IF
), rd_rm
),
13639 cCL(atnsp
, ed08120
, 2, (RF
, RF_IF
), rd_rm
),
13640 cCL(atnsm
, ed08140
, 2, (RF
, RF_IF
), rd_rm
),
13641 cCL(atnsz
, ed08160
, 2, (RF
, RF_IF
), rd_rm
),
13642 cCL(atnd
, ed08180
, 2, (RF
, RF_IF
), rd_rm
),
13643 cCL(atndp
, ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
13644 cCL(atndm
, ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
13645 cCL(atndz
, ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
13646 cCL(atne
, ed88100
, 2, (RF
, RF_IF
), rd_rm
),
13647 cCL(atnep
, ed88120
, 2, (RF
, RF_IF
), rd_rm
),
13648 cCL(atnem
, ed88140
, 2, (RF
, RF_IF
), rd_rm
),
13649 cCL(atnez
, ed88160
, 2, (RF
, RF_IF
), rd_rm
),
13651 cCL(urds
, ee08100
, 2, (RF
, RF_IF
), rd_rm
),
13652 cCL(urdsp
, ee08120
, 2, (RF
, RF_IF
), rd_rm
),
13653 cCL(urdsm
, ee08140
, 2, (RF
, RF_IF
), rd_rm
),
13654 cCL(urdsz
, ee08160
, 2, (RF
, RF_IF
), rd_rm
),
13655 cCL(urdd
, ee08180
, 2, (RF
, RF_IF
), rd_rm
),
13656 cCL(urddp
, ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
13657 cCL(urddm
, ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
13658 cCL(urddz
, ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
13659 cCL(urde
, ee88100
, 2, (RF
, RF_IF
), rd_rm
),
13660 cCL(urdep
, ee88120
, 2, (RF
, RF_IF
), rd_rm
),
13661 cCL(urdem
, ee88140
, 2, (RF
, RF_IF
), rd_rm
),
13662 cCL(urdez
, ee88160
, 2, (RF
, RF_IF
), rd_rm
),
13664 cCL(nrms
, ef08100
, 2, (RF
, RF_IF
), rd_rm
),
13665 cCL(nrmsp
, ef08120
, 2, (RF
, RF_IF
), rd_rm
),
13666 cCL(nrmsm
, ef08140
, 2, (RF
, RF_IF
), rd_rm
),
13667 cCL(nrmsz
, ef08160
, 2, (RF
, RF_IF
), rd_rm
),
13668 cCL(nrmd
, ef08180
, 2, (RF
, RF_IF
), rd_rm
),
13669 cCL(nrmdp
, ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
13670 cCL(nrmdm
, ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
13671 cCL(nrmdz
, ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
13672 cCL(nrme
, ef88100
, 2, (RF
, RF_IF
), rd_rm
),
13673 cCL(nrmep
, ef88120
, 2, (RF
, RF_IF
), rd_rm
),
13674 cCL(nrmem
, ef88140
, 2, (RF
, RF_IF
), rd_rm
),
13675 cCL(nrmez
, ef88160
, 2, (RF
, RF_IF
), rd_rm
),
13677 cCL(adfs
, e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13678 cCL(adfsp
, e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13679 cCL(adfsm
, e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13680 cCL(adfsz
, e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13681 cCL(adfd
, e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13682 cCL(adfdp
, e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13683 cCL(adfdm
, e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13684 cCL(adfdz
, e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13685 cCL(adfe
, e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13686 cCL(adfep
, e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13687 cCL(adfem
, e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13688 cCL(adfez
, e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13690 cCL(sufs
, e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13691 cCL(sufsp
, e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13692 cCL(sufsm
, e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13693 cCL(sufsz
, e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13694 cCL(sufd
, e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13695 cCL(sufdp
, e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13696 cCL(sufdm
, e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13697 cCL(sufdz
, e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13698 cCL(sufe
, e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13699 cCL(sufep
, e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13700 cCL(sufem
, e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13701 cCL(sufez
, e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13703 cCL(rsfs
, e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13704 cCL(rsfsp
, e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13705 cCL(rsfsm
, e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13706 cCL(rsfsz
, e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13707 cCL(rsfd
, e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13708 cCL(rsfdp
, e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13709 cCL(rsfdm
, e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13710 cCL(rsfdz
, e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13711 cCL(rsfe
, e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13712 cCL(rsfep
, e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13713 cCL(rsfem
, e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13714 cCL(rsfez
, e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13716 cCL(mufs
, e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13717 cCL(mufsp
, e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13718 cCL(mufsm
, e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13719 cCL(mufsz
, e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13720 cCL(mufd
, e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13721 cCL(mufdp
, e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13722 cCL(mufdm
, e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13723 cCL(mufdz
, e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13724 cCL(mufe
, e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13725 cCL(mufep
, e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13726 cCL(mufem
, e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13727 cCL(mufez
, e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13729 cCL(dvfs
, e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13730 cCL(dvfsp
, e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13731 cCL(dvfsm
, e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13732 cCL(dvfsz
, e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13733 cCL(dvfd
, e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13734 cCL(dvfdp
, e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13735 cCL(dvfdm
, e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13736 cCL(dvfdz
, e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13737 cCL(dvfe
, e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13738 cCL(dvfep
, e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13739 cCL(dvfem
, e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13740 cCL(dvfez
, e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13742 cCL(rdfs
, e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13743 cCL(rdfsp
, e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13744 cCL(rdfsm
, e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13745 cCL(rdfsz
, e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13746 cCL(rdfd
, e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13747 cCL(rdfdp
, e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13748 cCL(rdfdm
, e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13749 cCL(rdfdz
, e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13750 cCL(rdfe
, e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13751 cCL(rdfep
, e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13752 cCL(rdfem
, e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13753 cCL(rdfez
, e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13755 cCL(pows
, e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13756 cCL(powsp
, e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13757 cCL(powsm
, e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13758 cCL(powsz
, e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13759 cCL(powd
, e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13760 cCL(powdp
, e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13761 cCL(powdm
, e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13762 cCL(powdz
, e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13763 cCL(powe
, e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13764 cCL(powep
, e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13765 cCL(powem
, e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13766 cCL(powez
, e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13768 cCL(rpws
, e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13769 cCL(rpwsp
, e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13770 cCL(rpwsm
, e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13771 cCL(rpwsz
, e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13772 cCL(rpwd
, e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13773 cCL(rpwdp
, e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13774 cCL(rpwdm
, e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13775 cCL(rpwdz
, e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13776 cCL(rpwe
, e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13777 cCL(rpwep
, e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13778 cCL(rpwem
, e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13779 cCL(rpwez
, e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13781 cCL(rmfs
, e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13782 cCL(rmfsp
, e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13783 cCL(rmfsm
, e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13784 cCL(rmfsz
, e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13785 cCL(rmfd
, e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13786 cCL(rmfdp
, e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13787 cCL(rmfdm
, e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13788 cCL(rmfdz
, e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13789 cCL(rmfe
, e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13790 cCL(rmfep
, e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13791 cCL(rmfem
, e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13792 cCL(rmfez
, e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13794 cCL(fmls
, e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13795 cCL(fmlsp
, e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13796 cCL(fmlsm
, e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13797 cCL(fmlsz
, e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13798 cCL(fmld
, e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13799 cCL(fmldp
, e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13800 cCL(fmldm
, e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13801 cCL(fmldz
, e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13802 cCL(fmle
, e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13803 cCL(fmlep
, e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13804 cCL(fmlem
, e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13805 cCL(fmlez
, e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13807 cCL(fdvs
, ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13808 cCL(fdvsp
, ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13809 cCL(fdvsm
, ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13810 cCL(fdvsz
, ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13811 cCL(fdvd
, ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13812 cCL(fdvdp
, ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13813 cCL(fdvdm
, ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13814 cCL(fdvdz
, ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13815 cCL(fdve
, ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13816 cCL(fdvep
, ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13817 cCL(fdvem
, ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13818 cCL(fdvez
, ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13820 cCL(frds
, eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13821 cCL(frdsp
, eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13822 cCL(frdsm
, eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13823 cCL(frdsz
, eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13824 cCL(frdd
, eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13825 cCL(frddp
, eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13826 cCL(frddm
, eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13827 cCL(frddz
, eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13828 cCL(frde
, eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13829 cCL(frdep
, eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13830 cCL(frdem
, eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13831 cCL(frdez
, eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13833 cCL(pols
, ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13834 cCL(polsp
, ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13835 cCL(polsm
, ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13836 cCL(polsz
, ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13837 cCL(pold
, ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13838 cCL(poldp
, ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13839 cCL(poldm
, ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13840 cCL(poldz
, ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13841 cCL(pole
, ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13842 cCL(polep
, ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13843 cCL(polem
, ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13844 cCL(polez
, ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
13846 cCE(cmf
, e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
13847 C3E(cmfe
, ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
13848 cCE(cnf
, eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
13849 C3E(cnfe
, ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
13851 cCL(flts
, e000110
, 2, (RF
, RR
), rn_rd
),
13852 cCL(fltsp
, e000130
, 2, (RF
, RR
), rn_rd
),
13853 cCL(fltsm
, e000150
, 2, (RF
, RR
), rn_rd
),
13854 cCL(fltsz
, e000170
, 2, (RF
, RR
), rn_rd
),
13855 cCL(fltd
, e000190
, 2, (RF
, RR
), rn_rd
),
13856 cCL(fltdp
, e0001b0
, 2, (RF
, RR
), rn_rd
),
13857 cCL(fltdm
, e0001d0
, 2, (RF
, RR
), rn_rd
),
13858 cCL(fltdz
, e0001f0
, 2, (RF
, RR
), rn_rd
),
13859 cCL(flte
, e080110
, 2, (RF
, RR
), rn_rd
),
13860 cCL(fltep
, e080130
, 2, (RF
, RR
), rn_rd
),
13861 cCL(fltem
, e080150
, 2, (RF
, RR
), rn_rd
),
13862 cCL(fltez
, e080170
, 2, (RF
, RR
), rn_rd
),
13864 /* The implementation of the FIX instruction is broken on some
13865 assemblers, in that it accepts a precision specifier as well as a
13866 rounding specifier, despite the fact that this is meaningless.
13867 To be more compatible, we accept it as well, though of course it
13868 does not set any bits. */
13869 cCE(fix
, e100110
, 2, (RR
, RF
), rd_rm
),
13870 cCL(fixp
, e100130
, 2, (RR
, RF
), rd_rm
),
13871 cCL(fixm
, e100150
, 2, (RR
, RF
), rd_rm
),
13872 cCL(fixz
, e100170
, 2, (RR
, RF
), rd_rm
),
13873 cCL(fixsp
, e100130
, 2, (RR
, RF
), rd_rm
),
13874 cCL(fixsm
, e100150
, 2, (RR
, RF
), rd_rm
),
13875 cCL(fixsz
, e100170
, 2, (RR
, RF
), rd_rm
),
13876 cCL(fixdp
, e100130
, 2, (RR
, RF
), rd_rm
),
13877 cCL(fixdm
, e100150
, 2, (RR
, RF
), rd_rm
),
13878 cCL(fixdz
, e100170
, 2, (RR
, RF
), rd_rm
),
13879 cCL(fixep
, e100130
, 2, (RR
, RF
), rd_rm
),
13880 cCL(fixem
, e100150
, 2, (RR
, RF
), rd_rm
),
13881 cCL(fixez
, e100170
, 2, (RR
, RF
), rd_rm
),
13883 /* Instructions that were new with the real FPA, call them V2. */
13885 #define ARM_VARIANT &fpu_fpa_ext_v2
13886 cCE(lfm
, c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13887 cCL(lfmfd
, c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13888 cCL(lfmea
, d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13889 cCE(sfm
, c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13890 cCL(sfmfd
, d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13891 cCL(sfmea
, c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
13894 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
13895 /* Moves and type conversions. */
13896 cCE(fcpys
, eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13897 cCE(fmrs
, e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
13898 cCE(fmsr
, e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
13899 cCE(fmstat
, ef1fa10
, 0, (), noargs
),
13900 cCE(fsitos
, eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13901 cCE(fuitos
, eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13902 cCE(ftosis
, ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13903 cCE(ftosizs
, ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13904 cCE(ftouis
, ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13905 cCE(ftouizs
, ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13906 cCE(fmrx
, ef00a10
, 2, (RR
, RVC
), rd_rn
),
13907 cCE(fmxr
, ee00a10
, 2, (RVC
, RR
), rn_rd
),
13909 /* Memory operations. */
13910 cCE(flds
, d100a00
, 2, (RVS
, ADDR
), vfp_sp_ldst
),
13911 cCE(fsts
, d000a00
, 2, (RVS
, ADDR
), vfp_sp_ldst
),
13912 cCE(fldmias
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
13913 cCE(fldmfds
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
13914 cCE(fldmdbs
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
13915 cCE(fldmeas
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
13916 cCE(fldmiax
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
13917 cCE(fldmfdx
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
13918 cCE(fldmdbx
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
13919 cCE(fldmeax
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
13920 cCE(fstmias
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
13921 cCE(fstmeas
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
13922 cCE(fstmdbs
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
13923 cCE(fstmfds
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
13924 cCE(fstmiax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
13925 cCE(fstmeax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
13926 cCE(fstmdbx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
13927 cCE(fstmfdx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
13929 /* Monadic operations. */
13930 cCE(fabss
, eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13931 cCE(fnegs
, eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13932 cCE(fsqrts
, eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13934 /* Dyadic operations. */
13935 cCE(fadds
, e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13936 cCE(fsubs
, e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13937 cCE(fmuls
, e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13938 cCE(fdivs
, e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13939 cCE(fmacs
, e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13940 cCE(fmscs
, e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13941 cCE(fnmuls
, e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13942 cCE(fnmacs
, e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13943 cCE(fnmscs
, e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
13946 cCE(fcmps
, eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13947 cCE(fcmpzs
, eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
13948 cCE(fcmpes
, eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
13949 cCE(fcmpezs
, eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
13952 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
13953 /* Moves and type conversions. */
13954 cCE(fcpyd
, eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
13955 cCE(fcvtds
, eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
13956 cCE(fcvtsd
, eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
13957 cCE(fmdhr
, e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
13958 cCE(fmdlr
, e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
13959 cCE(fmrdh
, e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
13960 cCE(fmrdl
, e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
13961 cCE(fsitod
, eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
13962 cCE(fuitod
, eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
13963 cCE(ftosid
, ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
13964 cCE(ftosizd
, ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
13965 cCE(ftouid
, ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
13966 cCE(ftouizd
, ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
13968 /* Memory operations. */
13969 cCE(fldd
, d100b00
, 2, (RVD
, ADDR
), vfp_dp_ldst
),
13970 cCE(fstd
, d000b00
, 2, (RVD
, ADDR
), vfp_dp_ldst
),
13971 cCE(fldmiad
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
13972 cCE(fldmfdd
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
13973 cCE(fldmdbd
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
13974 cCE(fldmead
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
13975 cCE(fstmiad
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
13976 cCE(fstmead
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
13977 cCE(fstmdbd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
13978 cCE(fstmfdd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
13980 /* Monadic operations. */
13981 cCE(fabsd
, eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
13982 cCE(fnegd
, eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
13983 cCE(fsqrtd
, eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
13985 /* Dyadic operations. */
13986 cCE(faddd
, e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13987 cCE(fsubd
, e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13988 cCE(fmuld
, e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13989 cCE(fdivd
, e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13990 cCE(fmacd
, e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13991 cCE(fmscd
, e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13992 cCE(fnmuld
, e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13993 cCE(fnmacd
, e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13994 cCE(fnmscd
, e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
13997 cCE(fcmpd
, eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
13998 cCE(fcmpzd
, eb50b40
, 1, (RVD
), vfp_dp_rd
),
13999 cCE(fcmped
, eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
14000 cCE(fcmpezd
, eb50bc0
, 1, (RVD
), vfp_dp_rd
),
14003 #define ARM_VARIANT &fpu_vfp_ext_v2
14004 cCE(fmsrr
, c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
14005 cCE(fmrrs
, c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
14006 cCE(fmdrr
, c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
14007 cCE(fmrrd
, c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
14009 #undef THUMB_VARIANT
14010 #define THUMB_VARIANT &fpu_neon_ext_v1
14012 #define ARM_VARIANT &fpu_neon_ext_v1
14013 /* Data processing with three registers of the same length. */
14014 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
14015 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
14016 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
14017 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
14018 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
14019 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
14020 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
14021 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
14022 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
14023 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
14024 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
14025 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
14026 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
14027 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
14028 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
14029 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
14030 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
14031 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
14032 /* If not immediate, fall back to neon_dyadic_i64_su.
14033 shl_imm should accept I8 I16 I32 I64,
14034 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
14035 nUF(vshl
, vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
14036 nUF(vshlq
, vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
14037 nUF(vqshl
, vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
14038 nUF(vqshlq
, vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
14039 /* Logic ops, types optional & ignored. */
14040 nUF(vand
, vand
, 2, (RNDQ
, NILO
), neon_logic
),
14041 nUF(vandq
, vand
, 2, (RNQ
, NILO
), neon_logic
),
14042 nUF(vbic
, vbic
, 2, (RNDQ
, NILO
), neon_logic
),
14043 nUF(vbicq
, vbic
, 2, (RNQ
, NILO
), neon_logic
),
14044 nUF(vorr
, vorr
, 2, (RNDQ
, NILO
), neon_logic
),
14045 nUF(vorrq
, vorr
, 2, (RNQ
, NILO
), neon_logic
),
14046 nUF(vorn
, vorn
, 2, (RNDQ
, NILO
), neon_logic
),
14047 nUF(vornq
, vorn
, 2, (RNQ
, NILO
), neon_logic
),
14048 nUF(veor
, veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
14049 nUF(veorq
, veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
14050 /* Bitfield ops, untyped. */
14051 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
14052 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
14053 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
14054 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
14055 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
14056 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
14057 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
14058 nUF(vabd
, vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
14059 nUF(vabdq
, vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
14060 nUF(vmax
, vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
14061 nUF(vmaxq
, vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
14062 nUF(vmin
, vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
14063 nUF(vminq
, vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
14064 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
14065 back to neon_dyadic_if_su. */
14066 nUF(vcge
, vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
14067 nUF(vcgeq
, vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
14068 nUF(vcgt
, vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
14069 nUF(vcgtq
, vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
14070 nUF(vclt
, vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
14071 nUF(vcltq
, vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
14072 nUF(vcle
, vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
14073 nUF(vcleq
, vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
14074 /* Comparison. Type I8 I16 I32 F32. Non-immediate -> neon_dyadic_if_i. */
14075 nUF(vceq
, vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
14076 nUF(vceqq
, vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
14077 /* As above, D registers only. */
14078 nUF(vpmax
, vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
14079 nUF(vpmin
, vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
14080 /* Int and float variants, signedness unimportant. */
14081 /* If not scalar, fall back to neon_dyadic_if_i. */
14082 nUF(vmla
, vmla
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
14083 nUF(vmlaq
, vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
14084 nUF(vmls
, vmls
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
14085 nUF(vmlsq
, vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
14086 nUF(vpadd
, vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
14087 /* Add/sub take types I8 I16 I32 I64 F32. */
14088 nUF(vadd
, vadd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_addsub_if_i
),
14089 nUF(vaddq
, vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
14090 nUF(vsub
, vsub
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_addsub_if_i
),
14091 nUF(vsubq
, vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
14092 /* vtst takes sizes 8, 16, 32. */
14093 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
14094 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
14095 /* VMUL takes I8 I16 I32 F32 P8. */
14096 nUF(vmul
, vmul
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_mul
),
14097 nUF(vmulq
, vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
14098 /* VQD{R}MULH takes S16 S32. */
14099 nUF(vqdmulh
, vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
14100 nUF(vqdmulhq
, vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
14101 nUF(vqrdmulh
, vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
14102 nUF(vqrdmulhq
, vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
14103 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
14104 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
14105 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
14106 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
14107 NUF(vaclt
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
14108 NUF(vacltq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
14109 NUF(vacle
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
14110 NUF(vacleq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
14111 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
14112 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
14113 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
14114 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
14116 /* Two address, int/float. Types S8 S16 S32 F32. */
14117 NUF(vabs
, 1b10300
, 2, (RNDQ
, RNDQ
), neon_abs_neg
),
14118 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
14119 NUF(vneg
, 1b10380
, 2, (RNDQ
, RNDQ
), neon_abs_neg
),
14120 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
14122 /* Data processing with two registers and a shift amount. */
14123 /* Right shifts, and variants with rounding.
14124 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
14125 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
14126 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
14127 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
14128 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
14129 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
14130 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
14131 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
14132 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
14133 /* Shift and insert. Sizes accepted 8 16 32 64. */
14134 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
14135 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
14136 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
14137 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
14138 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
14139 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
14140 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
14141 /* Right shift immediate, saturating & narrowing, with rounding variants.
14142 Types accepted S16 S32 S64 U16 U32 U64. */
14143 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
14144 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
14145 /* As above, unsigned. Types accepted S16 S32 S64. */
14146 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
14147 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
14148 /* Right shift narrowing. Types accepted I16 I32 I64. */
14149 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
14150 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
14151 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
14152 nUF(vshll
, vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
14153 /* CVT with optional immediate for fixed-point variant. */
14154 nUF(vcvt
, vcvt
, 3, (RNDQ
, RNDQ
, oI32b
), neon_cvt
),
14155 nUF(vcvtq
, vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
14157 /* One register and an immediate value. All encoding special-cased! */
14158 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
14159 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
14160 nUF(vmvn
, vmvn
, 2, (RNDQ
, RNDQ_IMVNb
), neon_mvn
),
14161 nUF(vmvnq
, vmvn
, 2, (RNQ
, RNDQ_IMVNb
), neon_mvn
),
14163 /* Data processing, three registers of different lengths. */
14164 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
14165 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
14166 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
14167 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
14168 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
14169 /* If not scalar, fall back to neon_dyadic_long.
14170 Vector types as above, scalar types S16 S32 U16 U32. */
14171 nUF(vmlal
, vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
14172 nUF(vmlsl
, vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
14173 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
14174 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
14175 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
14176 /* Dyadic, narrowing insns. Types I16 I32 I64. */
14177 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
14178 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
14179 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
14180 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
14181 /* Saturating doubling multiplies. Types S16 S32. */
14182 nUF(vqdmlal
, vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
14183 nUF(vqdmlsl
, vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
14184 nUF(vqdmull
, vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
14185 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
14186 S16 S32 U16 U32. */
14187 nUF(vmull
, vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
14189 /* Extract. Size 8. */
14190 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I7
), neon_ext
),
14191 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I7
), neon_ext
),
14193 /* Two registers, miscellaneous. */
14194 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
14195 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
14196 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
14197 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
14198 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
14199 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
14200 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
14201 /* Vector replicate. Sizes 8 16 32. */
14202 nCE(vdup
, vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
14203 nCE(vdupq
, vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
14204 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
14205 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
14206 /* VMOVN. Types I16 I32 I64. */
14207 nUF(vmovn
, vmovn
, 2, (RND
, RNQ
), neon_movn
),
14208 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
14209 nUF(vqmovn
, vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
14210 /* VQMOVUN. Types S16 S32 S64. */
14211 nUF(vqmovun
, vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
14212 /* VZIP / VUZP. Sizes 8 16 32. */
14213 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
14214 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
14215 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
14216 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
14217 /* VQABS / VQNEG. Types S8 S16 S32. */
14218 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
14219 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
14220 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
14221 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
14222 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
14223 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
14224 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
14225 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
14226 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
14227 /* Reciprocal estimates. Types U32 F32. */
14228 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
14229 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
14230 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
14231 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
14232 /* VCLS. Types S8 S16 S32. */
14233 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
14234 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
14235 /* VCLZ. Types I8 I16 I32. */
14236 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
14237 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
14238 /* VCNT. Size 8. */
14239 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
14240 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
14241 /* Two address, untyped. */
14242 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
14243 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
14244 /* VTRN. Sizes 8 16 32. */
14245 nUF(vtrn
, vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
14246 nUF(vtrnq
, vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
14248 /* Table lookup. Size 8. */
14249 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
14250 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
14252 #undef THUMB_VARIANT
14253 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
14255 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
14257 /* Load/store instructions. Available in Neon or VFPv3. */
14258 NCE(vldm
, c900b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14259 NCE(vldmia
, c900b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14260 NCE(vldmdb
, d100b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14261 NCE(vstm
, c800b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14262 NCE(vstmia
, c800b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14263 NCE(vstmdb
, d000b00
, 2, (RRw
, NRDLST
), neon_ldm_stm
),
14264 NCE(vldr
, d100b00
, 2, (RND
, ADDR
), neon_ldr_str
),
14265 NCE(vstr
, d000b00
, 2, (RND
, ADDR
), neon_ldr_str
),
14267 /* Neon element/structure load/store. */
14268 nUF(vld1
, vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14269 nUF(vst1
, vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14270 nUF(vld2
, vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14271 nUF(vst2
, vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14272 nUF(vld3
, vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14273 nUF(vst3
, vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14274 nUF(vld4
, vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14275 nUF(vst4
, vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
14277 #undef THUMB_VARIANT
14278 #define THUMB_VARIANT &fpu_vfp_ext_v3
14280 #define ARM_VARIANT &fpu_vfp_ext_v3
14282 cCE(fconsts
, eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
14283 cCE(fconstd
, eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
14284 cCE(fshtos
, eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
14285 cCE(fshtod
, eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
14286 cCE(fsltos
, eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
14287 cCE(fsltod
, eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
14288 cCE(fuhtos
, ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
14289 cCE(fuhtod
, ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
14290 cCE(fultos
, ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
14291 cCE(fultod
, ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
14292 cCE(ftoshs
, ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
14293 cCE(ftoshd
, ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
14294 cCE(ftosls
, ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
14295 cCE(ftosld
, ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
14296 cCE(ftouhs
, ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
14297 cCE(ftouhd
, ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
14298 cCE(ftouls
, ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
14299 cCE(ftould
, ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
14301 #undef THUMB_VARIANT
14303 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
14304 cCE(mia
, e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14305 cCE(miaph
, e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14306 cCE(miabb
, e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14307 cCE(miabt
, e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14308 cCE(miatb
, e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14309 cCE(miatt
, e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
14310 cCE(mar
, c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
14311 cCE(mra
, c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
14314 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
14315 cCE(tandcb
, e13f130
, 1, (RR
), iwmmxt_tandorc
),
14316 cCE(tandch
, e53f130
, 1, (RR
), iwmmxt_tandorc
),
14317 cCE(tandcw
, e93f130
, 1, (RR
), iwmmxt_tandorc
),
14318 cCE(tbcstb
, e400010
, 2, (RIWR
, RR
), rn_rd
),
14319 cCE(tbcsth
, e400050
, 2, (RIWR
, RR
), rn_rd
),
14320 cCE(tbcstw
, e400090
, 2, (RIWR
, RR
), rn_rd
),
14321 cCE(textrcb
, e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
14322 cCE(textrch
, e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
14323 cCE(textrcw
, e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
14324 cCE(textrmub
, e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14325 cCE(textrmuh
, e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14326 cCE(textrmuw
, e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14327 cCE(textrmsb
, e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14328 cCE(textrmsh
, e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14329 cCE(textrmsw
, e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
14330 cCE(tinsrb
, e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
14331 cCE(tinsrh
, e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
14332 cCE(tinsrw
, e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
14333 cCE(tmcr
, e000110
, 2, (RIWC
, RR
), rn_rd
),
14334 cCE(tmcrr
, c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
14335 cCE(tmia
, e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14336 cCE(tmiaph
, e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14337 cCE(tmiabb
, e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14338 cCE(tmiabt
, e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14339 cCE(tmiatb
, e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14340 cCE(tmiatt
, e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
14341 cCE(tmovmskb
, e100030
, 2, (RR
, RIWR
), rd_rn
),
14342 cCE(tmovmskh
, e500030
, 2, (RR
, RIWR
), rd_rn
),
14343 cCE(tmovmskw
, e900030
, 2, (RR
, RIWR
), rd_rn
),
14344 cCE(tmrc
, e100110
, 2, (RR
, RIWC
), rd_rn
),
14345 cCE(tmrrc
, c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
14346 cCE(torcb
, e13f150
, 1, (RR
), iwmmxt_tandorc
),
14347 cCE(torch
, e53f150
, 1, (RR
), iwmmxt_tandorc
),
14348 cCE(torcw
, e93f150
, 1, (RR
), iwmmxt_tandorc
),
14349 cCE(waccb
, e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
14350 cCE(wacch
, e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
14351 cCE(waccw
, e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
14352 cCE(waddbss
, e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14353 cCE(waddb
, e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14354 cCE(waddbus
, e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14355 cCE(waddhss
, e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14356 cCE(waddh
, e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14357 cCE(waddhus
, e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14358 cCE(waddwss
, eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14359 cCE(waddw
, e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14360 cCE(waddwus
, e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14361 cCE(waligni
, e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
14362 cCE(walignr0
, e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14363 cCE(walignr1
, e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14364 cCE(walignr2
, ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14365 cCE(walignr3
, eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14366 cCE(wand
, e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14367 cCE(wandn
, e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14368 cCE(wavg2b
, e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14369 cCE(wavg2br
, e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14370 cCE(wavg2h
, ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14371 cCE(wavg2hr
, ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14372 cCE(wcmpeqb
, e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14373 cCE(wcmpeqh
, e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14374 cCE(wcmpeqw
, e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14375 cCE(wcmpgtub
, e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14376 cCE(wcmpgtuh
, e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14377 cCE(wcmpgtuw
, e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14378 cCE(wcmpgtsb
, e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14379 cCE(wcmpgtsh
, e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14380 cCE(wcmpgtsw
, eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14381 cCE(wldrb
, c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
14382 cCE(wldrh
, c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
14383 cCE(wldrw
, c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
14384 cCE(wldrd
, c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
14385 cCE(wmacs
, e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14386 cCE(wmacsz
, e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14387 cCE(wmacu
, e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14388 cCE(wmacuz
, e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14389 cCE(wmadds
, ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14390 cCE(wmaddu
, e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14391 cCE(wmaxsb
, e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14392 cCE(wmaxsh
, e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14393 cCE(wmaxsw
, ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14394 cCE(wmaxub
, e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14395 cCE(wmaxuh
, e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14396 cCE(wmaxuw
, e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14397 cCE(wminsb
, e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14398 cCE(wminsh
, e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14399 cCE(wminsw
, eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14400 cCE(wminub
, e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14401 cCE(wminuh
, e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14402 cCE(wminuw
, e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14403 cCE(wmov
, e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
14404 cCE(wmulsm
, e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14405 cCE(wmulsl
, e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14406 cCE(wmulum
, e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14407 cCE(wmulul
, e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14408 cCE(wor
, e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14409 cCE(wpackhss
, e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14410 cCE(wpackhus
, e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14411 cCE(wpackwss
, eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14412 cCE(wpackwus
, e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14413 cCE(wpackdss
, ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14414 cCE(wpackdus
, ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14415 cCE(wrorh
, e700040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14416 cCE(wrorhg
, e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14417 cCE(wrorw
, eb00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14418 cCE(wrorwg
, eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14419 cCE(wrord
, ef00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14420 cCE(wrordg
, ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14421 cCE(wsadb
, e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14422 cCE(wsadbz
, e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14423 cCE(wsadh
, e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14424 cCE(wsadhz
, e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14425 cCE(wshufh
, e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
14426 cCE(wsllh
, e500040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14427 cCE(wsllhg
, e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14428 cCE(wsllw
, e900040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14429 cCE(wsllwg
, e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14430 cCE(wslld
, ed00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14431 cCE(wslldg
, ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14432 cCE(wsrah
, e400040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14433 cCE(wsrahg
, e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14434 cCE(wsraw
, e800040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14435 cCE(wsrawg
, e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14436 cCE(wsrad
, ec00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14437 cCE(wsradg
, ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14438 cCE(wsrlh
, e600040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14439 cCE(wsrlhg
, e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14440 cCE(wsrlw
, ea00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14441 cCE(wsrlwg
, ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14442 cCE(wsrld
, ee00040
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14443 cCE(wsrldg
, ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
14444 cCE(wstrb
, c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
14445 cCE(wstrh
, c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
14446 cCE(wstrw
, c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
14447 cCE(wstrd
, c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
14448 cCE(wsubbss
, e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14449 cCE(wsubb
, e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14450 cCE(wsubbus
, e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14451 cCE(wsubhss
, e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14452 cCE(wsubh
, e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14453 cCE(wsubhus
, e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14454 cCE(wsubwss
, eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14455 cCE(wsubw
, e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14456 cCE(wsubwus
, e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14457 cCE(wunpckehub
,e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14458 cCE(wunpckehuh
,e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14459 cCE(wunpckehuw
,e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14460 cCE(wunpckehsb
,e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14461 cCE(wunpckehsh
,e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14462 cCE(wunpckehsw
,ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
14463 cCE(wunpckihb
, e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14464 cCE(wunpckihh
, e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14465 cCE(wunpckihw
, e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14466 cCE(wunpckelub
,e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14467 cCE(wunpckeluh
,e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14468 cCE(wunpckeluw
,e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14469 cCE(wunpckelsb
,e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14470 cCE(wunpckelsh
,e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14471 cCE(wunpckelsw
,ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
14472 cCE(wunpckilb
, e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14473 cCE(wunpckilh
, e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14474 cCE(wunpckilw
, e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14475 cCE(wxor
, e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
14476 cCE(wzero
, e300000
, 1, (RIWR
), iwmmxt_wzero
),
14479 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
14480 cCE(cfldrs
, c100400
, 2, (RMF
, ADDR
), rd_cpaddr
),
14481 cCE(cfldrd
, c500400
, 2, (RMD
, ADDR
), rd_cpaddr
),
14482 cCE(cfldr32
, c100500
, 2, (RMFX
, ADDR
), rd_cpaddr
),
14483 cCE(cfldr64
, c500500
, 2, (RMDX
, ADDR
), rd_cpaddr
),
14484 cCE(cfstrs
, c000400
, 2, (RMF
, ADDR
), rd_cpaddr
),
14485 cCE(cfstrd
, c400400
, 2, (RMD
, ADDR
), rd_cpaddr
),
14486 cCE(cfstr32
, c000500
, 2, (RMFX
, ADDR
), rd_cpaddr
),
14487 cCE(cfstr64
, c400500
, 2, (RMDX
, ADDR
), rd_cpaddr
),
14488 cCE(cfmvsr
, e000450
, 2, (RMF
, RR
), rn_rd
),
14489 cCE(cfmvrs
, e100450
, 2, (RR
, RMF
), rd_rn
),
14490 cCE(cfmvdlr
, e000410
, 2, (RMD
, RR
), rn_rd
),
14491 cCE(cfmvrdl
, e100410
, 2, (RR
, RMD
), rd_rn
),
14492 cCE(cfmvdhr
, e000430
, 2, (RMD
, RR
), rn_rd
),
14493 cCE(cfmvrdh
, e100430
, 2, (RR
, RMD
), rd_rn
),
14494 cCE(cfmv64lr
, e000510
, 2, (RMDX
, RR
), rn_rd
),
14495 cCE(cfmvr64l
, e100510
, 2, (RR
, RMDX
), rd_rn
),
14496 cCE(cfmv64hr
, e000530
, 2, (RMDX
, RR
), rn_rd
),
14497 cCE(cfmvr64h
, e100530
, 2, (RR
, RMDX
), rd_rn
),
14498 cCE(cfmval32
, e200440
, 2, (RMAX
, RMFX
), rd_rn
),
14499 cCE(cfmv32al
, e100440
, 2, (RMFX
, RMAX
), rd_rn
),
14500 cCE(cfmvam32
, e200460
, 2, (RMAX
, RMFX
), rd_rn
),
14501 cCE(cfmv32am
, e100460
, 2, (RMFX
, RMAX
), rd_rn
),
14502 cCE(cfmvah32
, e200480
, 2, (RMAX
, RMFX
), rd_rn
),
14503 cCE(cfmv32ah
, e100480
, 2, (RMFX
, RMAX
), rd_rn
),
14504 cCE(cfmva32
, e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
14505 cCE(cfmv32a
, e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
14506 cCE(cfmva64
, e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
14507 cCE(cfmv64a
, e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
14508 cCE(cfmvsc32
, e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
14509 cCE(cfmv32sc
, e1004e0
, 2, (RMDX
, RMDS
), rd
),
14510 cCE(cfcpys
, e000400
, 2, (RMF
, RMF
), rd_rn
),
14511 cCE(cfcpyd
, e000420
, 2, (RMD
, RMD
), rd_rn
),
14512 cCE(cfcvtsd
, e000460
, 2, (RMD
, RMF
), rd_rn
),
14513 cCE(cfcvtds
, e000440
, 2, (RMF
, RMD
), rd_rn
),
14514 cCE(cfcvt32s
, e000480
, 2, (RMF
, RMFX
), rd_rn
),
14515 cCE(cfcvt32d
, e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
14516 cCE(cfcvt64s
, e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
14517 cCE(cfcvt64d
, e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
14518 cCE(cfcvts32
, e100580
, 2, (RMFX
, RMF
), rd_rn
),
14519 cCE(cfcvtd32
, e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
14520 cCE(cftruncs32
,e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
14521 cCE(cftruncd32
,e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
14522 cCE(cfrshl32
, e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
14523 cCE(cfrshl64
, e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
14524 cCE(cfsh32
, e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
14525 cCE(cfsh64
, e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
14526 cCE(cfcmps
, e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
14527 cCE(cfcmpd
, e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
14528 cCE(cfcmp32
, e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
14529 cCE(cfcmp64
, e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
14530 cCE(cfabss
, e300400
, 2, (RMF
, RMF
), rd_rn
),
14531 cCE(cfabsd
, e300420
, 2, (RMD
, RMD
), rd_rn
),
14532 cCE(cfnegs
, e300440
, 2, (RMF
, RMF
), rd_rn
),
14533 cCE(cfnegd
, e300460
, 2, (RMD
, RMD
), rd_rn
),
14534 cCE(cfadds
, e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
14535 cCE(cfaddd
, e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
14536 cCE(cfsubs
, e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
14537 cCE(cfsubd
, e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
14538 cCE(cfmuls
, e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
14539 cCE(cfmuld
, e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
14540 cCE(cfabs32
, e300500
, 2, (RMFX
, RMFX
), rd_rn
),
14541 cCE(cfabs64
, e300520
, 2, (RMDX
, RMDX
), rd_rn
),
14542 cCE(cfneg32
, e300540
, 2, (RMFX
, RMFX
), rd_rn
),
14543 cCE(cfneg64
, e300560
, 2, (RMDX
, RMDX
), rd_rn
),
14544 cCE(cfadd32
, e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
14545 cCE(cfadd64
, e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
14546 cCE(cfsub32
, e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
14547 cCE(cfsub64
, e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
14548 cCE(cfmul32
, e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
14549 cCE(cfmul64
, e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
14550 cCE(cfmac32
, e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
14551 cCE(cfmsc32
, e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
14552 cCE(cfmadd32
, e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
14553 cCE(cfmsub32
, e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
14554 cCE(cfmadda32
, e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
14555 cCE(cfmsuba32
, e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
14558 #undef THUMB_VARIANT
14585 /* MD interface: bits in the object file. */
14587 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
14588 for use in the a.out file, and stores them in the array pointed to by buf.
14589 This knows about the endian-ness of the target machine and does
14590 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
14591 2 (short) and 4 (long) Floating numbers are put out as a series of
14592 LITTLENUMS (shorts, here at least). */
14595 md_number_to_chars (char * buf
, valueT val
, int n
)
14597 if (target_big_endian
)
14598 number_to_chars_bigendian (buf
, val
, n
);
14600 number_to_chars_littleendian (buf
, val
, n
);
14604 md_chars_to_number (char * buf
, int n
)
14607 unsigned char * where
= (unsigned char *) buf
;
14609 if (target_big_endian
)
14614 result
|= (*where
++ & 255);
14622 result
|= (where
[n
] & 255);
14629 /* MD interface: Sections. */
14631 /* Estimate the size of a frag before relaxing. Assume everything fits in
14635 md_estimate_size_before_relax (fragS
* fragp
,
14636 segT segtype ATTRIBUTE_UNUSED
)
14642 /* Convert a machine dependent frag. */
14645 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
14647 unsigned long insn
;
14648 unsigned long old_op
;
14656 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
14658 old_op
= bfd_get_16(abfd
, buf
);
14659 if (fragp
->fr_symbol
) {
14660 exp
.X_op
= O_symbol
;
14661 exp
.X_add_symbol
= fragp
->fr_symbol
;
14663 exp
.X_op
= O_constant
;
14665 exp
.X_add_number
= fragp
->fr_offset
;
14666 opcode
= fragp
->fr_subtype
;
14669 case T_MNEM_ldr_pc
:
14670 case T_MNEM_ldr_pc2
:
14671 case T_MNEM_ldr_sp
:
14672 case T_MNEM_str_sp
:
14679 if (fragp
->fr_var
== 4)
14681 insn
= THUMB_OP32(opcode
);
14682 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
14684 insn
|= (old_op
& 0x700) << 4;
14688 insn
|= (old_op
& 7) << 12;
14689 insn
|= (old_op
& 0x38) << 13;
14691 insn
|= 0x00000c00;
14692 put_thumb32_insn (buf
, insn
);
14693 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
14697 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
14699 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
14702 if (fragp
->fr_var
== 4)
14704 insn
= THUMB_OP32 (opcode
);
14705 insn
|= (old_op
& 0xf0) << 4;
14706 put_thumb32_insn (buf
, insn
);
14707 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
14711 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
14712 exp
.X_add_number
-= 4;
14720 if (fragp
->fr_var
== 4)
14722 int r0off
= (opcode
== T_MNEM_mov
14723 || opcode
== T_MNEM_movs
) ? 0 : 8;
14724 insn
= THUMB_OP32 (opcode
);
14725 insn
= (insn
& 0xe1ffffff) | 0x10000000;
14726 insn
|= (old_op
& 0x700) << r0off
;
14727 put_thumb32_insn (buf
, insn
);
14728 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
14732 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
14737 if (fragp
->fr_var
== 4)
14739 insn
= THUMB_OP32(opcode
);
14740 put_thumb32_insn (buf
, insn
);
14741 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
14744 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
14748 if (fragp
->fr_var
== 4)
14750 insn
= THUMB_OP32(opcode
);
14751 insn
|= (old_op
& 0xf00) << 14;
14752 put_thumb32_insn (buf
, insn
);
14753 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
14756 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
14759 case T_MNEM_add_sp
:
14760 case T_MNEM_add_pc
:
14761 case T_MNEM_inc_sp
:
14762 case T_MNEM_dec_sp
:
14763 if (fragp
->fr_var
== 4)
14765 /* ??? Choose between add and addw. */
14766 insn
= THUMB_OP32 (opcode
);
14767 insn
|= (old_op
& 0xf0) << 4;
14768 put_thumb32_insn (buf
, insn
);
14769 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
14772 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
14780 if (fragp
->fr_var
== 4)
14782 insn
= THUMB_OP32 (opcode
);
14783 insn
|= (old_op
& 0xf0) << 4;
14784 insn
|= (old_op
& 0xf) << 16;
14785 put_thumb32_insn (buf
, insn
);
14786 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
14789 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
14795 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
14797 fixp
->fx_file
= fragp
->fr_file
;
14798 fixp
->fx_line
= fragp
->fr_line
;
14799 fragp
->fr_fix
+= fragp
->fr_var
;
14802 /* Return the size of a relaxable immediate operand instruction.
14803 SHIFT and SIZE specify the form of the allowable immediate. */
14805 relax_immediate (fragS
*fragp
, int size
, int shift
)
14811 /* ??? Should be able to do better than this. */
14812 if (fragp
->fr_symbol
)
14815 low
= (1 << shift
) - 1;
14816 mask
= (1 << (shift
+ size
)) - (1 << shift
);
14817 offset
= fragp
->fr_offset
;
14818 /* Force misaligned offsets to 32-bit variant. */
14821 if (offset
& ~mask
)
14826 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
14829 relax_adr (fragS
*fragp
, asection
*sec
)
14834 /* Assume worst case for symbols not known to be in the same section. */
14835 if (!S_IS_DEFINED(fragp
->fr_symbol
)
14836 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
14839 val
= S_GET_VALUE(fragp
->fr_symbol
) + fragp
->fr_offset
;
14840 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
14841 addr
= (addr
+ 4) & ~3;
14842 /* Fix the insn as the 4-byte version if the target address is not
14843 sufficiently aligned. This is prevents an infinite loop when two
14844 instructions have contradictory range/alignment requirements. */
14848 if (val
< 0 || val
> 1020)
14853 /* Return the size of a relaxable add/sub immediate instruction. */
14855 relax_addsub (fragS
*fragp
, asection
*sec
)
14860 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
14861 op
= bfd_get_16(sec
->owner
, buf
);
14862 if ((op
& 0xf) == ((op
>> 4) & 0xf))
14863 return relax_immediate (fragp
, 8, 0);
14865 return relax_immediate (fragp
, 3, 0);
14869 /* Return the size of a relaxable branch instruction. BITS is the
14870 size of the offset field in the narrow instruction. */
14873 relax_branch (fragS
*fragp
, asection
*sec
, int bits
)
14879 /* Assume worst case for symbols not known to be in the same section. */
14880 if (!S_IS_DEFINED(fragp
->fr_symbol
)
14881 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
14884 val
= S_GET_VALUE(fragp
->fr_symbol
) + fragp
->fr_offset
;
14885 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
14888 /* Offset is a signed value *2 */
14890 if (val
>= limit
|| val
< -limit
)
14896 /* Relax a machine dependent frag. This returns the amount by which
14897 the current size of the frag should change. */
14900 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch ATTRIBUTE_UNUSED
)
14905 oldsize
= fragp
->fr_var
;
14906 switch (fragp
->fr_subtype
)
14908 case T_MNEM_ldr_pc2
:
14909 newsize
= relax_adr(fragp
, sec
);
14911 case T_MNEM_ldr_pc
:
14912 case T_MNEM_ldr_sp
:
14913 case T_MNEM_str_sp
:
14914 newsize
= relax_immediate(fragp
, 8, 2);
14918 newsize
= relax_immediate(fragp
, 5, 2);
14922 newsize
= relax_immediate(fragp
, 5, 1);
14926 newsize
= relax_immediate(fragp
, 5, 0);
14929 newsize
= relax_adr(fragp
, sec
);
14935 newsize
= relax_immediate(fragp
, 8, 0);
14938 newsize
= relax_branch(fragp
, sec
, 11);
14941 newsize
= relax_branch(fragp
, sec
, 8);
14943 case T_MNEM_add_sp
:
14944 case T_MNEM_add_pc
:
14945 newsize
= relax_immediate (fragp
, 8, 2);
14947 case T_MNEM_inc_sp
:
14948 case T_MNEM_dec_sp
:
14949 newsize
= relax_immediate (fragp
, 7, 2);
14955 newsize
= relax_addsub (fragp
, sec
);
14962 fragp
->fr_var
= -newsize
;
14963 md_convert_frag (sec
->owner
, sec
, fragp
);
14965 return -(newsize
+ oldsize
);
14967 fragp
->fr_var
= newsize
;
14968 return newsize
- oldsize
;
14971 /* Round up a section size to the appropriate boundary. */
14974 md_section_align (segT segment ATTRIBUTE_UNUSED
,
14980 /* Round all sects to multiple of 4. */
14981 return (size
+ 3) & ~3;
14985 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
14986 of an rs_align_code fragment. */
14989 arm_handle_align (fragS
* fragP
)
14991 static char const arm_noop
[4] = { 0x00, 0x00, 0xa0, 0xe1 };
14992 static char const thumb_noop
[2] = { 0xc0, 0x46 };
14993 static char const arm_bigend_noop
[4] = { 0xe1, 0xa0, 0x00, 0x00 };
14994 static char const thumb_bigend_noop
[2] = { 0x46, 0xc0 };
14996 int bytes
, fix
, noop_size
;
15000 if (fragP
->fr_type
!= rs_align_code
)
15003 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
15004 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
15007 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
15008 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
15010 if (fragP
->tc_frag_data
)
15012 if (target_big_endian
)
15013 noop
= thumb_bigend_noop
;
15016 noop_size
= sizeof (thumb_noop
);
15020 if (target_big_endian
)
15021 noop
= arm_bigend_noop
;
15024 noop_size
= sizeof (arm_noop
);
15027 if (bytes
& (noop_size
- 1))
15029 fix
= bytes
& (noop_size
- 1);
15030 memset (p
, 0, fix
);
15035 while (bytes
>= noop_size
)
15037 memcpy (p
, noop
, noop_size
);
15039 bytes
-= noop_size
;
15043 fragP
->fr_fix
+= fix
;
15044 fragP
->fr_var
= noop_size
;
15047 /* Called from md_do_align. Used to create an alignment
15048 frag in a code section. */
15051 arm_frag_align_code (int n
, int max
)
15055 /* We assume that there will never be a requirement
15056 to support alignments greater than 32 bytes. */
15057 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
15058 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
15060 p
= frag_var (rs_align_code
,
15061 MAX_MEM_FOR_RS_ALIGN_CODE
,
15063 (relax_substateT
) max
,
15070 /* Perform target specific initialisation of a frag. */
15073 arm_init_frag (fragS
* fragP
)
15075 /* Record whether this frag is in an ARM or a THUMB area. */
15076 fragP
->tc_frag_data
= thumb_mode
;
15080 /* When we change sections we need to issue a new mapping symbol. */
15083 arm_elf_change_section (void)
15086 segment_info_type
*seginfo
;
15088 /* Link an unlinked unwind index table section to the .text section. */
15089 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
15090 && elf_linked_to_section (now_seg
) == NULL
)
15091 elf_linked_to_section (now_seg
) = text_section
;
15093 if (!SEG_NORMAL (now_seg
))
15096 flags
= bfd_get_section_flags (stdoutput
, now_seg
);
15098 /* We can ignore sections that only contain debug info. */
15099 if ((flags
& SEC_ALLOC
) == 0)
15102 seginfo
= seg_info (now_seg
);
15103 mapstate
= seginfo
->tc_segment_info_data
.mapstate
;
15104 marked_pr_dependency
= seginfo
->tc_segment_info_data
.marked_pr_dependency
;
15108 arm_elf_section_type (const char * str
, size_t len
)
15110 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
15111 return SHT_ARM_EXIDX
;
15116 /* Code to deal with unwinding tables. */
15118 static void add_unwind_adjustsp (offsetT
);
15120 /* Cenerate and deferred unwind frame offset. */
15123 flush_pending_unwind (void)
15127 offset
= unwind
.pending_offset
;
15128 unwind
.pending_offset
= 0;
15130 add_unwind_adjustsp (offset
);
15133 /* Add an opcode to this list for this function. Two-byte opcodes should
15134 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
15138 add_unwind_opcode (valueT op
, int length
)
15140 /* Add any deferred stack adjustment. */
15141 if (unwind
.pending_offset
)
15142 flush_pending_unwind ();
15144 unwind
.sp_restored
= 0;
15146 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
15148 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
15149 if (unwind
.opcodes
)
15150 unwind
.opcodes
= xrealloc (unwind
.opcodes
,
15151 unwind
.opcode_alloc
);
15153 unwind
.opcodes
= xmalloc (unwind
.opcode_alloc
);
15158 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
15160 unwind
.opcode_count
++;
15164 /* Add unwind opcodes to adjust the stack pointer. */
15167 add_unwind_adjustsp (offsetT offset
)
15171 if (offset
> 0x200)
15173 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
15178 /* Long form: 0xb2, uleb128. */
15179 /* This might not fit in a word so add the individual bytes,
15180 remembering the list is built in reverse order. */
15181 o
= (valueT
) ((offset
- 0x204) >> 2);
15183 add_unwind_opcode (0, 1);
15185 /* Calculate the uleb128 encoding of the offset. */
15189 bytes
[n
] = o
& 0x7f;
15195 /* Add the insn. */
15197 add_unwind_opcode (bytes
[n
- 1], 1);
15198 add_unwind_opcode (0xb2, 1);
15200 else if (offset
> 0x100)
15202 /* Two short opcodes. */
15203 add_unwind_opcode (0x3f, 1);
15204 op
= (offset
- 0x104) >> 2;
15205 add_unwind_opcode (op
, 1);
15207 else if (offset
> 0)
15209 /* Short opcode. */
15210 op
= (offset
- 4) >> 2;
15211 add_unwind_opcode (op
, 1);
15213 else if (offset
< 0)
15216 while (offset
> 0x100)
15218 add_unwind_opcode (0x7f, 1);
15221 op
= ((offset
- 4) >> 2) | 0x40;
15222 add_unwind_opcode (op
, 1);
15226 /* Finish the list of unwind opcodes for this function. */
15228 finish_unwind_opcodes (void)
15232 if (unwind
.fp_used
)
15234 /* Adjust sp as necessary. */
15235 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
15236 flush_pending_unwind ();
15238 /* After restoring sp from the frame pointer. */
15239 op
= 0x90 | unwind
.fp_reg
;
15240 add_unwind_opcode (op
, 1);
15243 flush_pending_unwind ();
15247 /* Start an exception table entry. If idx is nonzero this is an index table
15251 start_unwind_section (const segT text_seg
, int idx
)
15253 const char * text_name
;
15254 const char * prefix
;
15255 const char * prefix_once
;
15256 const char * group_name
;
15260 size_t sec_name_len
;
15267 prefix
= ELF_STRING_ARM_unwind
;
15268 prefix_once
= ELF_STRING_ARM_unwind_once
;
15269 type
= SHT_ARM_EXIDX
;
15273 prefix
= ELF_STRING_ARM_unwind_info
;
15274 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
15275 type
= SHT_PROGBITS
;
15278 text_name
= segment_name (text_seg
);
15279 if (streq (text_name
, ".text"))
15282 if (strncmp (text_name
, ".gnu.linkonce.t.",
15283 strlen (".gnu.linkonce.t.")) == 0)
15285 prefix
= prefix_once
;
15286 text_name
+= strlen (".gnu.linkonce.t.");
15289 prefix_len
= strlen (prefix
);
15290 text_len
= strlen (text_name
);
15291 sec_name_len
= prefix_len
+ text_len
;
15292 sec_name
= xmalloc (sec_name_len
+ 1);
15293 memcpy (sec_name
, prefix
, prefix_len
);
15294 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
15295 sec_name
[prefix_len
+ text_len
] = '\0';
15301 /* Handle COMDAT group. */
15302 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
15304 group_name
= elf_group_name (text_seg
);
15305 if (group_name
== NULL
)
15307 as_bad ("Group section `%s' has no group signature",
15308 segment_name (text_seg
));
15309 ignore_rest_of_line ();
15312 flags
|= SHF_GROUP
;
15316 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
15318 /* Set the setion link for index tables. */
15320 elf_linked_to_section (now_seg
) = text_seg
;
15324 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
15325 personality routine data. Returns zero, or the index table value for
15326 and inline entry. */
15329 create_unwind_entry (int have_data
)
15334 /* The current word of data. */
15336 /* The number of bytes left in this word. */
15339 finish_unwind_opcodes ();
15341 /* Remember the current text section. */
15342 unwind
.saved_seg
= now_seg
;
15343 unwind
.saved_subseg
= now_subseg
;
15345 start_unwind_section (now_seg
, 0);
15347 if (unwind
.personality_routine
== NULL
)
15349 if (unwind
.personality_index
== -2)
15352 as_bad (_("handerdata in cantunwind frame"));
15353 return 1; /* EXIDX_CANTUNWIND. */
15356 /* Use a default personality routine if none is specified. */
15357 if (unwind
.personality_index
== -1)
15359 if (unwind
.opcode_count
> 3)
15360 unwind
.personality_index
= 1;
15362 unwind
.personality_index
= 0;
15365 /* Space for the personality routine entry. */
15366 if (unwind
.personality_index
== 0)
15368 if (unwind
.opcode_count
> 3)
15369 as_bad (_("too many unwind opcodes for personality routine 0"));
15373 /* All the data is inline in the index table. */
15376 while (unwind
.opcode_count
> 0)
15378 unwind
.opcode_count
--;
15379 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
15383 /* Pad with "finish" opcodes. */
15385 data
= (data
<< 8) | 0xb0;
15392 /* We get two opcodes "free" in the first word. */
15393 size
= unwind
.opcode_count
- 2;
15396 /* An extra byte is required for the opcode count. */
15397 size
= unwind
.opcode_count
+ 1;
15399 size
= (size
+ 3) >> 2;
15401 as_bad (_("too many unwind opcodes"));
15403 frag_align (2, 0, 0);
15404 record_alignment (now_seg
, 2);
15405 unwind
.table_entry
= expr_build_dot ();
15407 /* Allocate the table entry. */
15408 ptr
= frag_more ((size
<< 2) + 4);
15409 where
= frag_now_fix () - ((size
<< 2) + 4);
15411 switch (unwind
.personality_index
)
15414 /* ??? Should this be a PLT generating relocation? */
15415 /* Custom personality routine. */
15416 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
15417 BFD_RELOC_ARM_PREL31
);
15422 /* Set the first byte to the number of additional words. */
15427 /* ABI defined personality routines. */
15429 /* Three opcodes bytes are packed into the first word. */
15436 /* The size and first two opcode bytes go in the first word. */
15437 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
15442 /* Should never happen. */
15446 /* Pack the opcodes into words (MSB first), reversing the list at the same
15448 while (unwind
.opcode_count
> 0)
15452 md_number_to_chars (ptr
, data
, 4);
15457 unwind
.opcode_count
--;
15459 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
15462 /* Finish off the last word. */
15465 /* Pad with "finish" opcodes. */
15467 data
= (data
<< 8) | 0xb0;
15469 md_number_to_chars (ptr
, data
, 4);
15474 /* Add an empty descriptor if there is no user-specified data. */
15475 ptr
= frag_more (4);
15476 md_number_to_chars (ptr
, 0, 4);
15482 /* Convert REGNAME to a DWARF-2 register number. */
15485 tc_arm_regname_to_dw2regnum (const char *regname
)
15487 int reg
= arm_reg_parse ((char **) ®name
, REG_TYPE_RN
);
15495 /* Initialize the DWARF-2 unwind information for this procedure. */
15498 tc_arm_frame_initial_instructions (void)
15500 cfi_add_CFA_def_cfa (REG_SP
, 0);
15502 #endif /* OBJ_ELF */
15505 /* MD interface: Symbol and relocation handling. */
15507 /* Return the address within the segment that a PC-relative fixup is
15508 relative to. For ARM, PC-relative fixups applied to instructions
15509 are generally relative to the location of the fixup plus 8 bytes.
15510 Thumb branches are offset by 4, and Thumb loads relative to PC
15511 require special handling. */
15514 md_pcrel_from_section (fixS
* fixP
, segT seg
)
15516 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
15518 /* If this is pc-relative and we are going to emit a relocation
15519 then we just want to put out any pipeline compensation that the linker
15520 will need. Otherwise we want to use the calculated base. */
15522 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
15523 || arm_force_relocation (fixP
)))
15526 switch (fixP
->fx_r_type
)
15528 /* PC relative addressing on the Thumb is slightly odd as the
15529 bottom two bits of the PC are forced to zero for the
15530 calculation. This happens *after* application of the
15531 pipeline offset. However, Thumb adrl already adjusts for
15532 this, so we need not do it again. */
15533 case BFD_RELOC_ARM_THUMB_ADD
:
15536 case BFD_RELOC_ARM_THUMB_OFFSET
:
15537 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
15538 case BFD_RELOC_ARM_T32_ADD_PC12
:
15539 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
15540 return (base
+ 4) & ~3;
15542 /* Thumb branches are simply offset by +4. */
15543 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
15544 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
15545 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
15546 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
15547 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
15548 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
15549 case BFD_RELOC_THUMB_PCREL_BLX
:
15552 /* ARM mode branches are offset by +8. However, the Windows CE
15553 loader expects the relocation not to take this into account. */
15554 case BFD_RELOC_ARM_PCREL_BRANCH
:
15555 case BFD_RELOC_ARM_PCREL_CALL
:
15556 case BFD_RELOC_ARM_PCREL_JUMP
:
15557 case BFD_RELOC_ARM_PCREL_BLX
:
15558 case BFD_RELOC_ARM_PLT32
:
15565 /* ARM mode loads relative to PC are also offset by +8. Unlike
15566 branches, the Windows CE loader *does* expect the relocation
15567 to take this into account. */
15568 case BFD_RELOC_ARM_OFFSET_IMM
:
15569 case BFD_RELOC_ARM_OFFSET_IMM8
:
15570 case BFD_RELOC_ARM_HWLITERAL
:
15571 case BFD_RELOC_ARM_LITERAL
:
15572 case BFD_RELOC_ARM_CP_OFF_IMM
:
15576 /* Other PC-relative relocations are un-offset. */
15582 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
15583 Otherwise we have no need to default values of symbols. */
15586 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
15589 if (name
[0] == '_' && name
[1] == 'G'
15590 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
15594 if (symbol_find (name
))
15595 as_bad ("GOT already in the symbol table");
15597 GOT_symbol
= symbol_new (name
, undefined_section
,
15598 (valueT
) 0, & zero_address_frag
);
15608 /* Subroutine of md_apply_fix. Check to see if an immediate can be
15609 computed as two separate immediate values, added together. We
15610 already know that this value cannot be computed by just one ARM
15613 static unsigned int
15614 validate_immediate_twopart (unsigned int val
,
15615 unsigned int * highpart
)
15620 for (i
= 0; i
< 32; i
+= 2)
15621 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
15627 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
15629 else if (a
& 0xff0000)
15631 if (a
& 0xff000000)
15633 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
15637 assert (a
& 0xff000000);
15638 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
15641 return (a
& 0xff) | (i
<< 7);
15648 validate_offset_imm (unsigned int val
, int hwse
)
15650 if ((hwse
&& val
> 255) || val
> 4095)
15655 /* Subroutine of md_apply_fix. Do those data_ops which can take a
15656 negative immediate constant by altering the instruction. A bit of
15661 by inverting the second operand, and
15664 by negating the second operand. */
15667 negate_data_op (unsigned long * instruction
,
15668 unsigned long value
)
15671 unsigned long negated
, inverted
;
15673 negated
= encode_arm_immediate (-value
);
15674 inverted
= encode_arm_immediate (~value
);
15676 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
15679 /* First negates. */
15680 case OPCODE_SUB
: /* ADD <-> SUB */
15681 new_inst
= OPCODE_ADD
;
15686 new_inst
= OPCODE_SUB
;
15690 case OPCODE_CMP
: /* CMP <-> CMN */
15691 new_inst
= OPCODE_CMN
;
15696 new_inst
= OPCODE_CMP
;
15700 /* Now Inverted ops. */
15701 case OPCODE_MOV
: /* MOV <-> MVN */
15702 new_inst
= OPCODE_MVN
;
15707 new_inst
= OPCODE_MOV
;
15711 case OPCODE_AND
: /* AND <-> BIC */
15712 new_inst
= OPCODE_BIC
;
15717 new_inst
= OPCODE_AND
;
15721 case OPCODE_ADC
: /* ADC <-> SBC */
15722 new_inst
= OPCODE_SBC
;
15727 new_inst
= OPCODE_ADC
;
15731 /* We cannot do anything. */
15736 if (value
== (unsigned) FAIL
)
15739 *instruction
&= OPCODE_MASK
;
15740 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
15744 /* Like negate_data_op, but for Thumb-2. */
15746 static unsigned int
15747 thumb32_negate_data_op (offsetT
*instruction
, offsetT value
)
15751 offsetT negated
, inverted
;
15753 negated
= encode_thumb32_immediate (-value
);
15754 inverted
= encode_thumb32_immediate (~value
);
15756 rd
= (*instruction
>> 8) & 0xf;
15757 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
15760 /* ADD <-> SUB. Includes CMP <-> CMN. */
15761 case T2_OPCODE_SUB
:
15762 new_inst
= T2_OPCODE_ADD
;
15766 case T2_OPCODE_ADD
:
15767 new_inst
= T2_OPCODE_SUB
;
15771 /* ORR <-> ORN. Includes MOV <-> MVN. */
15772 case T2_OPCODE_ORR
:
15773 new_inst
= T2_OPCODE_ORN
;
15777 case T2_OPCODE_ORN
:
15778 new_inst
= T2_OPCODE_ORR
;
15782 /* AND <-> BIC. TST has no inverted equivalent. */
15783 case T2_OPCODE_AND
:
15784 new_inst
= T2_OPCODE_BIC
;
15791 case T2_OPCODE_BIC
:
15792 new_inst
= T2_OPCODE_AND
;
15797 case T2_OPCODE_ADC
:
15798 new_inst
= T2_OPCODE_SBC
;
15802 case T2_OPCODE_SBC
:
15803 new_inst
= T2_OPCODE_ADC
;
15807 /* We cannot do anything. */
15815 *instruction
&= T2_OPCODE_MASK
;
15816 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
15820 /* Read a 32-bit thumb instruction from buf. */
15821 static unsigned long
15822 get_thumb32_insn (char * buf
)
15824 unsigned long insn
;
15825 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
15826 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
15832 /* We usually want to set the low bit on the address of thumb function
15833 symbols. In particular .word foo - . should have the low bit set.
15834 Generic code tries to fold the difference of two symbols to
15835 a constant. Prevent this and force a relocation when the first symbols
15836 is a thumb function. */
15838 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
15840 if (op
== O_subtract
15841 && l
->X_op
== O_symbol
15842 && r
->X_op
== O_symbol
15843 && THUMB_IS_FUNC (l
->X_add_symbol
))
15845 l
->X_op
= O_subtract
;
15846 l
->X_op_symbol
= r
->X_add_symbol
;
15847 l
->X_add_number
-= r
->X_add_number
;
15850 /* Process as normal. */
15855 md_apply_fix (fixS
* fixP
,
15859 offsetT value
= * valP
;
15861 unsigned int newimm
;
15862 unsigned long temp
;
15864 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
15866 assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
15868 /* Note whether this will delete the relocation. */
15869 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
15872 /* On a 64-bit host, silently truncate 'value' to 32 bits for
15873 consistency with the behavior on 32-bit hosts. Remember value
15875 value
&= 0xffffffff;
15876 value
^= 0x80000000;
15877 value
-= 0x80000000;
15880 fixP
->fx_addnumber
= value
;
15882 /* Same treatment for fixP->fx_offset. */
15883 fixP
->fx_offset
&= 0xffffffff;
15884 fixP
->fx_offset
^= 0x80000000;
15885 fixP
->fx_offset
-= 0x80000000;
15887 switch (fixP
->fx_r_type
)
15889 case BFD_RELOC_NONE
:
15890 /* This will need to go in the object file. */
15894 case BFD_RELOC_ARM_IMMEDIATE
:
15895 /* We claim that this fixup has been processed here,
15896 even if in fact we generate an error because we do
15897 not have a reloc for it, so tc_gen_reloc will reject it. */
15901 && ! S_IS_DEFINED (fixP
->fx_addsy
))
15903 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15904 _("undefined symbol %s used as an immediate value"),
15905 S_GET_NAME (fixP
->fx_addsy
));
15909 newimm
= encode_arm_immediate (value
);
15910 temp
= md_chars_to_number (buf
, INSN_SIZE
);
15912 /* If the instruction will fail, see if we can fix things up by
15913 changing the opcode. */
15914 if (newimm
== (unsigned int) FAIL
15915 && (newimm
= negate_data_op (&temp
, value
)) == (unsigned int) FAIL
)
15917 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15918 _("invalid constant (%lx) after fixup"),
15919 (unsigned long) value
);
15923 newimm
|= (temp
& 0xfffff000);
15924 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
15927 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
15929 unsigned int highpart
= 0;
15930 unsigned int newinsn
= 0xe1a00000; /* nop. */
15932 newimm
= encode_arm_immediate (value
);
15933 temp
= md_chars_to_number (buf
, INSN_SIZE
);
15935 /* If the instruction will fail, see if we can fix things up by
15936 changing the opcode. */
15937 if (newimm
== (unsigned int) FAIL
15938 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
15940 /* No ? OK - try using two ADD instructions to generate
15942 newimm
= validate_immediate_twopart (value
, & highpart
);
15944 /* Yes - then make sure that the second instruction is
15946 if (newimm
!= (unsigned int) FAIL
)
15948 /* Still No ? Try using a negated value. */
15949 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
15950 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
15951 /* Otherwise - give up. */
15954 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15955 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
15960 /* Replace the first operand in the 2nd instruction (which
15961 is the PC) with the destination register. We have
15962 already added in the PC in the first instruction and we
15963 do not want to do it again. */
15964 newinsn
&= ~ 0xf0000;
15965 newinsn
|= ((newinsn
& 0x0f000) << 4);
15968 newimm
|= (temp
& 0xfffff000);
15969 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
15971 highpart
|= (newinsn
& 0xfffff000);
15972 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
15976 case BFD_RELOC_ARM_OFFSET_IMM
:
15977 if (!fixP
->fx_done
&& seg
->use_rela_p
)
15980 case BFD_RELOC_ARM_LITERAL
:
15986 if (validate_offset_imm (value
, 0) == FAIL
)
15988 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
15989 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15990 _("invalid literal constant: pool needs to be closer"));
15992 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
15993 _("bad immediate value for offset (%ld)"),
15998 newval
= md_chars_to_number (buf
, INSN_SIZE
);
15999 newval
&= 0xff7ff000;
16000 newval
|= value
| (sign
? INDEX_UP
: 0);
16001 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16004 case BFD_RELOC_ARM_OFFSET_IMM8
:
16005 case BFD_RELOC_ARM_HWLITERAL
:
16011 if (validate_offset_imm (value
, 1) == FAIL
)
16013 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
16014 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16015 _("invalid literal constant: pool needs to be closer"));
16017 as_bad (_("bad immediate value for half-word offset (%ld)"),
16022 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16023 newval
&= 0xff7ff0f0;
16024 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
16025 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16028 case BFD_RELOC_ARM_T32_OFFSET_U8
:
16029 if (value
< 0 || value
> 1020 || value
% 4 != 0)
16030 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16031 _("bad immediate value for offset (%ld)"), (long) value
);
16034 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
16036 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
16039 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
16040 /* This is a complicated relocation used for all varieties of Thumb32
16041 load/store instruction with immediate offset:
16043 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
16044 *4, optional writeback(W)
16045 (doubleword load/store)
16047 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
16048 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
16049 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
16050 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
16051 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
16053 Uppercase letters indicate bits that are already encoded at
16054 this point. Lowercase letters are our problem. For the
16055 second block of instructions, the secondary opcode nybble
16056 (bits 8..11) is present, and bit 23 is zero, even if this is
16057 a PC-relative operation. */
16058 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16060 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
16062 if ((newval
& 0xf0000000) == 0xe0000000)
16064 /* Doubleword load/store: 8-bit offset, scaled by 4. */
16066 newval
|= (1 << 23);
16069 if (value
% 4 != 0)
16071 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16072 _("offset not a multiple of 4"));
16078 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16079 _("offset out of range"));
16084 else if ((newval
& 0x000f0000) == 0x000f0000)
16086 /* PC-relative, 12-bit offset. */
16088 newval
|= (1 << 23);
16093 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16094 _("offset out of range"));
16099 else if ((newval
& 0x00000100) == 0x00000100)
16101 /* Writeback: 8-bit, +/- offset. */
16103 newval
|= (1 << 9);
16108 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16109 _("offset out of range"));
16114 else if ((newval
& 0x00000f00) == 0x00000e00)
16116 /* T-instruction: positive 8-bit offset. */
16117 if (value
< 0 || value
> 0xff)
16119 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16120 _("offset out of range"));
16128 /* Positive 12-bit or negative 8-bit offset. */
16132 newval
|= (1 << 23);
16142 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16143 _("offset out of range"));
16150 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
16151 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
16154 case BFD_RELOC_ARM_SHIFT_IMM
:
16155 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16156 if (((unsigned long) value
) > 32
16158 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
16160 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16161 _("shift expression is too large"));
16166 /* Shifts of zero must be done as lsl. */
16168 else if (value
== 32)
16170 newval
&= 0xfffff07f;
16171 newval
|= (value
& 0x1f) << 7;
16172 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16175 case BFD_RELOC_ARM_T32_IMMEDIATE
:
16176 case BFD_RELOC_ARM_T32_IMM12
:
16177 case BFD_RELOC_ARM_T32_ADD_PC12
:
16178 /* We claim that this fixup has been processed here,
16179 even if in fact we generate an error because we do
16180 not have a reloc for it, so tc_gen_reloc will reject it. */
16184 && ! S_IS_DEFINED (fixP
->fx_addsy
))
16186 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16187 _("undefined symbol %s used as an immediate value"),
16188 S_GET_NAME (fixP
->fx_addsy
));
16192 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16194 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
16196 /* FUTURE: Implement analogue of negate_data_op for T32. */
16197 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
)
16199 newimm
= encode_thumb32_immediate (value
);
16200 if (newimm
== (unsigned int) FAIL
)
16201 newimm
= thumb32_negate_data_op (&newval
, value
);
16205 /* 12 bit immediate for addw/subw. */
16209 newval
^= 0x00a00000;
16212 newimm
= (unsigned int) FAIL
;
16217 if (newimm
== (unsigned int)FAIL
)
16219 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16220 _("invalid constant (%lx) after fixup"),
16221 (unsigned long) value
);
16225 newval
|= (newimm
& 0x800) << 15;
16226 newval
|= (newimm
& 0x700) << 4;
16227 newval
|= (newimm
& 0x0ff);
16229 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
16230 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
16233 case BFD_RELOC_ARM_SMC
:
16234 if (((unsigned long) value
) > 0xffff)
16235 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16236 _("invalid smc expression"));
16237 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16238 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
16239 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16242 case BFD_RELOC_ARM_SWI
:
16243 if (fixP
->tc_fix_data
!= 0)
16245 if (((unsigned long) value
) > 0xff)
16246 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16247 _("invalid swi expression"));
16248 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16250 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16254 if (((unsigned long) value
) > 0x00ffffff)
16255 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16256 _("invalid swi expression"));
16257 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16259 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16263 case BFD_RELOC_ARM_MULTI
:
16264 if (((unsigned long) value
) > 0xffff)
16265 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16266 _("invalid expression in load/store multiple"));
16267 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
16268 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16272 case BFD_RELOC_ARM_PCREL_CALL
:
16273 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16274 if ((newval
& 0xf0000000) == 0xf0000000)
16278 goto arm_branch_common
;
16280 case BFD_RELOC_ARM_PCREL_JUMP
:
16281 case BFD_RELOC_ARM_PLT32
:
16283 case BFD_RELOC_ARM_PCREL_BRANCH
:
16285 goto arm_branch_common
;
16287 case BFD_RELOC_ARM_PCREL_BLX
:
16290 /* We are going to store value (shifted right by two) in the
16291 instruction, in a 24 bit, signed field. Bits 26 through 32 either
16292 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
16293 also be be clear. */
16295 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16296 _("misaligned branch destination"));
16297 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
16298 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
16299 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16300 _("branch out of range"));
16302 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16304 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16305 newval
|= (value
>> 2) & 0x00ffffff;
16306 /* Set the H bit on BLX instructions. */
16310 newval
|= 0x01000000;
16312 newval
&= ~0x01000000;
16314 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16318 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CZB */
16319 /* CZB can only branch forward. */
16321 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16322 _("branch out of range"));
16324 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16326 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16327 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
16328 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16332 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
16333 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
16334 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16335 _("branch out of range"));
16337 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16339 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16340 newval
|= (value
& 0x1ff) >> 1;
16341 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16345 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
16346 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
16347 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16348 _("branch out of range"));
16350 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16352 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16353 newval
|= (value
& 0xfff) >> 1;
16354 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16358 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
16359 if ((value
& ~0x1fffff) && ((value
& ~0x1fffff) != ~0x1fffff))
16360 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16361 _("conditional branch out of range"));
16363 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16366 addressT S
, J1
, J2
, lo
, hi
;
16368 S
= (value
& 0x00100000) >> 20;
16369 J2
= (value
& 0x00080000) >> 19;
16370 J1
= (value
& 0x00040000) >> 18;
16371 hi
= (value
& 0x0003f000) >> 12;
16372 lo
= (value
& 0x00000ffe) >> 1;
16374 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16375 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
16376 newval
|= (S
<< 10) | hi
;
16377 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
16378 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16379 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
16383 case BFD_RELOC_THUMB_PCREL_BLX
:
16384 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
16385 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
16386 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16387 _("branch out of range"));
16389 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
16390 /* For a BLX instruction, make sure that the relocation is rounded up
16391 to a word boundary. This follows the semantics of the instruction
16392 which specifies that bit 1 of the target address will come from bit
16393 1 of the base address. */
16394 value
= (value
+ 1) & ~ 1;
16396 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16400 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16401 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
16402 newval
|= (value
& 0x7fffff) >> 12;
16403 newval2
|= (value
& 0xfff) >> 1;
16404 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16405 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
16409 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
16410 if ((value
& ~0x1ffffff) && ((value
& ~0x1ffffff) != ~0x1ffffff))
16411 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16412 _("branch out of range"));
16414 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16417 addressT S
, I1
, I2
, lo
, hi
;
16419 S
= (value
& 0x01000000) >> 24;
16420 I1
= (value
& 0x00800000) >> 23;
16421 I2
= (value
& 0x00400000) >> 22;
16422 hi
= (value
& 0x003ff000) >> 12;
16423 lo
= (value
& 0x00000ffe) >> 1;
16428 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16429 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
16430 newval
|= (S
<< 10) | hi
;
16431 newval2
|= (I1
<< 13) | (I2
<< 11) | lo
;
16432 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16433 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
16438 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16439 md_number_to_chars (buf
, value
, 1);
16443 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16444 md_number_to_chars (buf
, value
, 2);
16448 case BFD_RELOC_ARM_TLS_GD32
:
16449 case BFD_RELOC_ARM_TLS_LE32
:
16450 case BFD_RELOC_ARM_TLS_IE32
:
16451 case BFD_RELOC_ARM_TLS_LDM32
:
16452 case BFD_RELOC_ARM_TLS_LDO32
:
16453 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
16456 case BFD_RELOC_ARM_GOT32
:
16457 case BFD_RELOC_ARM_GOTOFF
:
16458 case BFD_RELOC_ARM_TARGET2
:
16459 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16460 md_number_to_chars (buf
, 0, 4);
16464 case BFD_RELOC_RVA
:
16466 case BFD_RELOC_ARM_TARGET1
:
16467 case BFD_RELOC_ARM_ROSEGREL32
:
16468 case BFD_RELOC_ARM_SBREL32
:
16469 case BFD_RELOC_32_PCREL
:
16470 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16471 md_number_to_chars (buf
, value
, 4);
16475 case BFD_RELOC_ARM_PREL31
:
16476 if (fixP
->fx_done
|| !seg
->use_rela_p
)
16478 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
16479 if ((value
^ (value
>> 1)) & 0x40000000)
16481 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16482 _("rel31 relocation overflow"));
16484 newval
|= value
& 0x7fffffff;
16485 md_number_to_chars (buf
, newval
, 4);
16490 case BFD_RELOC_ARM_CP_OFF_IMM
:
16491 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
16492 if (value
< -1023 || value
> 1023 || (value
& 3))
16493 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16494 _("co-processor offset out of range"));
16499 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
16500 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
16501 newval
= md_chars_to_number (buf
, INSN_SIZE
);
16503 newval
= get_thumb32_insn (buf
);
16504 newval
&= 0xff7fff00;
16505 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
16507 newval
&= ~WRITE_BACK
;
16508 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
16509 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
16510 md_number_to_chars (buf
, newval
, INSN_SIZE
);
16512 put_thumb32_insn (buf
, newval
);
16515 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
16516 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
16517 if (value
< -255 || value
> 255)
16518 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16519 _("co-processor offset out of range"));
16521 goto cp_off_common
;
16523 case BFD_RELOC_ARM_THUMB_OFFSET
:
16524 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16525 /* Exactly what ranges, and where the offset is inserted depends
16526 on the type of instruction, we can establish this from the
16528 switch (newval
>> 12)
16530 case 4: /* PC load. */
16531 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
16532 forced to zero for these loads; md_pcrel_from has already
16533 compensated for this. */
16535 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16536 _("invalid offset, target not word aligned (0x%08lX)"),
16537 (((unsigned long) fixP
->fx_frag
->fr_address
16538 + (unsigned long) fixP
->fx_where
) & ~3)
16539 + (unsigned long) value
);
16541 if (value
& ~0x3fc)
16542 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16543 _("invalid offset, value too big (0x%08lX)"),
16546 newval
|= value
>> 2;
16549 case 9: /* SP load/store. */
16550 if (value
& ~0x3fc)
16551 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16552 _("invalid offset, value too big (0x%08lX)"),
16554 newval
|= value
>> 2;
16557 case 6: /* Word load/store. */
16559 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16560 _("invalid offset, value too big (0x%08lX)"),
16562 newval
|= value
<< 4; /* 6 - 2. */
16565 case 7: /* Byte load/store. */
16567 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16568 _("invalid offset, value too big (0x%08lX)"),
16570 newval
|= value
<< 6;
16573 case 8: /* Halfword load/store. */
16575 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16576 _("invalid offset, value too big (0x%08lX)"),
16578 newval
|= value
<< 5; /* 6 - 1. */
16582 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16583 "Unable to process relocation for thumb opcode: %lx",
16584 (unsigned long) newval
);
16587 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16590 case BFD_RELOC_ARM_THUMB_ADD
:
16591 /* This is a complicated relocation, since we use it for all of
16592 the following immediate relocations:
16596 9bit ADD/SUB SP word-aligned
16597 10bit ADD PC/SP word-aligned
16599 The type of instruction being processed is encoded in the
16606 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16608 int rd
= (newval
>> 4) & 0xf;
16609 int rs
= newval
& 0xf;
16610 int subtract
= !!(newval
& 0x8000);
16612 /* Check for HI regs, only very restricted cases allowed:
16613 Adjusting SP, and using PC or SP to get an address. */
16614 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
16615 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
16616 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16617 _("invalid Hi register with immediate"));
16619 /* If value is negative, choose the opposite instruction. */
16623 subtract
= !subtract
;
16625 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16626 _("immediate value out of range"));
16631 if (value
& ~0x1fc)
16632 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16633 _("invalid immediate for stack address calculation"));
16634 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
16635 newval
|= value
>> 2;
16637 else if (rs
== REG_PC
|| rs
== REG_SP
)
16639 if (subtract
|| value
& ~0x3fc)
16640 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16641 _("invalid immediate for address calculation (value = 0x%08lX)"),
16642 (unsigned long) value
);
16643 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
16645 newval
|= value
>> 2;
16650 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16651 _("immediate value out of range"));
16652 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
16653 newval
|= (rd
<< 8) | value
;
16658 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16659 _("immediate value out of range"));
16660 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
16661 newval
|= rd
| (rs
<< 3) | (value
<< 6);
16664 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16667 case BFD_RELOC_ARM_THUMB_IMM
:
16668 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
16669 if (value
< 0 || value
> 255)
16670 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16671 _("invalid immediate: %ld is too large"),
16674 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16677 case BFD_RELOC_ARM_THUMB_SHIFT
:
16678 /* 5bit shift value (0..32). LSL cannot take 32. */
16679 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
16680 temp
= newval
& 0xf800;
16681 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
16682 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16683 _("invalid shift value: %ld"), (long) value
);
16684 /* Shifts of zero must be encoded as LSL. */
16686 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
16687 /* Shifts of 32 are encoded as zero. */
16688 else if (value
== 32)
16690 newval
|= value
<< 6;
16691 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
16694 case BFD_RELOC_VTABLE_INHERIT
:
16695 case BFD_RELOC_VTABLE_ENTRY
:
16699 case BFD_RELOC_UNUSED
:
16701 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
16702 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
16706 /* Translate internal representation of relocation info to BFD target
16710 tc_gen_reloc (asection
*section
, fixS
*fixp
)
16713 bfd_reloc_code_real_type code
;
16715 reloc
= xmalloc (sizeof (arelent
));
16717 reloc
->sym_ptr_ptr
= xmalloc (sizeof (asymbol
*));
16718 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
16719 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
16721 if (fixp
->fx_pcrel
)
16723 if (section
->use_rela_p
)
16724 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
16726 fixp
->fx_offset
= reloc
->address
;
16728 reloc
->addend
= fixp
->fx_offset
;
16730 switch (fixp
->fx_r_type
)
16733 if (fixp
->fx_pcrel
)
16735 code
= BFD_RELOC_8_PCREL
;
16740 if (fixp
->fx_pcrel
)
16742 code
= BFD_RELOC_16_PCREL
;
16747 if (fixp
->fx_pcrel
)
16749 code
= BFD_RELOC_32_PCREL
;
16753 case BFD_RELOC_NONE
:
16754 case BFD_RELOC_ARM_PCREL_BRANCH
:
16755 case BFD_RELOC_ARM_PCREL_BLX
:
16756 case BFD_RELOC_RVA
:
16757 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
16758 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
16759 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
16760 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
16761 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
16762 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
16763 case BFD_RELOC_THUMB_PCREL_BLX
:
16764 case BFD_RELOC_VTABLE_ENTRY
:
16765 case BFD_RELOC_VTABLE_INHERIT
:
16766 code
= fixp
->fx_r_type
;
16769 case BFD_RELOC_ARM_LITERAL
:
16770 case BFD_RELOC_ARM_HWLITERAL
:
16771 /* If this is called then the a literal has
16772 been referenced across a section boundary. */
16773 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16774 _("literal referenced across section boundary"));
16778 case BFD_RELOC_ARM_GOT32
:
16779 case BFD_RELOC_ARM_GOTOFF
:
16780 case BFD_RELOC_ARM_PLT32
:
16781 case BFD_RELOC_ARM_TARGET1
:
16782 case BFD_RELOC_ARM_ROSEGREL32
:
16783 case BFD_RELOC_ARM_SBREL32
:
16784 case BFD_RELOC_ARM_PREL31
:
16785 case BFD_RELOC_ARM_TARGET2
:
16786 case BFD_RELOC_ARM_TLS_LE32
:
16787 case BFD_RELOC_ARM_TLS_LDO32
:
16788 case BFD_RELOC_ARM_PCREL_CALL
:
16789 case BFD_RELOC_ARM_PCREL_JUMP
:
16790 code
= fixp
->fx_r_type
;
16793 case BFD_RELOC_ARM_TLS_GD32
:
16794 case BFD_RELOC_ARM_TLS_IE32
:
16795 case BFD_RELOC_ARM_TLS_LDM32
:
16796 /* BFD will include the symbol's address in the addend.
16797 But we don't want that, so subtract it out again here. */
16798 if (!S_IS_COMMON (fixp
->fx_addsy
))
16799 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
16800 code
= fixp
->fx_r_type
;
16804 case BFD_RELOC_ARM_IMMEDIATE
:
16805 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16806 _("internal relocation (type: IMMEDIATE) not fixed up"));
16809 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
16810 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16811 _("ADRL used for a symbol not defined in the same file"));
16814 case BFD_RELOC_ARM_OFFSET_IMM
:
16815 if (section
->use_rela_p
)
16817 code
= fixp
->fx_r_type
;
16821 if (fixp
->fx_addsy
!= NULL
16822 && !S_IS_DEFINED (fixp
->fx_addsy
)
16823 && S_IS_LOCAL (fixp
->fx_addsy
))
16825 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16826 _("undefined local label `%s'"),
16827 S_GET_NAME (fixp
->fx_addsy
));
16831 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16832 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
16839 switch (fixp
->fx_r_type
)
16841 case BFD_RELOC_NONE
: type
= "NONE"; break;
16842 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
16843 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
16844 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
16845 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
16846 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
16847 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
16848 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
16849 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
16850 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
16851 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
16852 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
16853 default: type
= _("<unknown>"); break;
16855 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16856 _("cannot represent %s relocation in this object file format"),
16863 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
16865 && fixp
->fx_addsy
== GOT_symbol
)
16867 code
= BFD_RELOC_ARM_GOTPC
;
16868 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
16872 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
16874 if (reloc
->howto
== NULL
)
16876 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
16877 _("cannot represent %s relocation in this object file format"),
16878 bfd_get_reloc_code_name (code
));
16882 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
16883 vtable entry to be used in the relocation's section offset. */
16884 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
16885 reloc
->address
= fixp
->fx_offset
;
16890 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
16893 cons_fix_new_arm (fragS
* frag
,
16898 bfd_reloc_code_real_type type
;
16902 FIXME: @@ Should look at CPU word size. */
16906 type
= BFD_RELOC_8
;
16909 type
= BFD_RELOC_16
;
16913 type
= BFD_RELOC_32
;
16916 type
= BFD_RELOC_64
;
16920 fix_new_exp (frag
, where
, (int) size
, exp
, pcrel
, type
);
16923 #if defined OBJ_COFF || defined OBJ_ELF
16925 arm_validate_fix (fixS
* fixP
)
16927 /* If the destination of the branch is a defined symbol which does not have
16928 the THUMB_FUNC attribute, then we must be calling a function which has
16929 the (interfacearm) attribute. We look for the Thumb entry point to that
16930 function and change the branch to refer to that function instead. */
16931 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
16932 && fixP
->fx_addsy
!= NULL
16933 && S_IS_DEFINED (fixP
->fx_addsy
)
16934 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
16936 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
16942 arm_force_relocation (struct fix
* fixp
)
16944 #if defined (OBJ_COFF) && defined (TE_PE)
16945 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
16949 /* Resolve these relocations even if the symbol is extern or weak. */
16950 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
16951 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
16952 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
16953 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
16954 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
16955 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
)
16958 return generic_force_reloc (fixp
);
16962 /* This is a little hack to help the gas/arm/adrl.s test. It prevents
16963 local labels from being added to the output symbol table when they
16964 are used with the ADRL pseudo op. The ADRL relocation should always
16965 be resolved before the binbary is emitted, so it is safe to say that
16966 it is adjustable. */
16969 arm_fix_adjustable (fixS
* fixP
)
16971 if (fixP
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
)
16978 /* Relocations against Thumb function names must be left unadjusted,
16979 so that the linker can use this information to correctly set the
16980 bottom bit of their addresses. The MIPS version of this function
16981 also prevents relocations that are mips-16 specific, but I do not
16982 know why it does this.
16985 There is one other problem that ought to be addressed here, but
16986 which currently is not: Taking the address of a label (rather
16987 than a function) and then later jumping to that address. Such
16988 addresses also ought to have their bottom bit set (assuming that
16989 they reside in Thumb code), but at the moment they will not. */
16992 arm_fix_adjustable (fixS
* fixP
)
16994 if (fixP
->fx_addsy
== NULL
)
16997 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
16998 && fixP
->fx_subsy
== NULL
)
17001 /* We need the symbol name for the VTABLE entries. */
17002 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
17003 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
17006 /* Don't allow symbols to be discarded on GOT related relocs. */
17007 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
17008 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
17009 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
17010 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
17011 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
17012 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
17013 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
17014 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
17015 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
17022 elf32_arm_target_format (void)
17025 return (target_big_endian
17026 ? "elf32-bigarm-symbian"
17027 : "elf32-littlearm-symbian");
17028 #elif defined (TE_VXWORKS)
17029 return (target_big_endian
17030 ? "elf32-bigarm-vxworks"
17031 : "elf32-littlearm-vxworks");
17033 if (target_big_endian
)
17034 return "elf32-bigarm";
17036 return "elf32-littlearm";
17041 armelf_frob_symbol (symbolS
* symp
,
17044 elf_frob_symbol (symp
, puntp
);
17048 /* MD interface: Finalization. */
17050 /* A good place to do this, although this was probably not intended
17051 for this kind of use. We need to dump the literal pool before
17052 references are made to a null symbol pointer. */
17057 literal_pool
* pool
;
17059 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
17061 /* Put it at the end of the relevent section. */
17062 subseg_set (pool
->section
, pool
->sub_section
);
17064 arm_elf_change_section ();
17070 /* Adjust the symbol table. This marks Thumb symbols as distinct from
17074 arm_adjust_symtab (void)
17079 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
17081 if (ARM_IS_THUMB (sym
))
17083 if (THUMB_IS_FUNC (sym
))
17085 /* Mark the symbol as a Thumb function. */
17086 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
17087 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
17088 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
17090 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
17091 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
17093 as_bad (_("%s: unexpected function type: %d"),
17094 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
17096 else switch (S_GET_STORAGE_CLASS (sym
))
17099 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
17102 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
17105 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
17113 if (ARM_IS_INTERWORK (sym
))
17114 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
17121 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
17123 if (ARM_IS_THUMB (sym
))
17125 elf_symbol_type
* elf_sym
;
17127 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
17128 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
17130 if (! bfd_is_arm_mapping_symbol_name (elf_sym
->symbol
.name
))
17132 /* If it's a .thumb_func, declare it as so,
17133 otherwise tag label as .code 16. */
17134 if (THUMB_IS_FUNC (sym
))
17135 elf_sym
->internal_elf_sym
.st_info
=
17136 ELF_ST_INFO (bind
, STT_ARM_TFUNC
);
17138 elf_sym
->internal_elf_sym
.st_info
=
17139 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
17146 /* MD interface: Initialization. */
17149 set_constant_flonums (void)
17153 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
17154 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
17164 if ( (arm_ops_hsh
= hash_new ()) == NULL
17165 || (arm_cond_hsh
= hash_new ()) == NULL
17166 || (arm_shift_hsh
= hash_new ()) == NULL
17167 || (arm_psr_hsh
= hash_new ()) == NULL
17168 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
17169 || (arm_reg_hsh
= hash_new ()) == NULL
17170 || (arm_reloc_hsh
= hash_new ()) == NULL
17171 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
17172 as_fatal (_("virtual memory exhausted"));
17174 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
17175 hash_insert (arm_ops_hsh
, insns
[i
].template, (PTR
) (insns
+ i
));
17176 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
17177 hash_insert (arm_cond_hsh
, conds
[i
].template, (PTR
) (conds
+ i
));
17178 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
17179 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (PTR
) (shift_names
+ i
));
17180 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
17181 hash_insert (arm_psr_hsh
, psrs
[i
].template, (PTR
) (psrs
+ i
));
17182 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
17183 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template, (PTR
) (v7m_psrs
+ i
));
17184 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
17185 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (PTR
) (reg_names
+ i
));
17187 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
17189 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template,
17190 (PTR
) (barrier_opt_names
+ i
));
17192 for (i
= 0; i
< sizeof (reloc_names
) / sizeof (struct reloc_entry
); i
++)
17193 hash_insert (arm_reloc_hsh
, reloc_names
[i
].name
, (PTR
) (reloc_names
+ i
));
17196 set_constant_flonums ();
17198 /* Set the cpu variant based on the command-line options. We prefer
17199 -mcpu= over -march= if both are set (as for GCC); and we prefer
17200 -mfpu= over any other way of setting the floating point unit.
17201 Use of legacy options with new options are faulted. */
17204 if (mcpu_cpu_opt
|| march_cpu_opt
)
17205 as_bad (_("use of old and new-style options to set CPU type"));
17207 mcpu_cpu_opt
= legacy_cpu
;
17209 else if (!mcpu_cpu_opt
)
17210 mcpu_cpu_opt
= march_cpu_opt
;
17215 as_bad (_("use of old and new-style options to set FPU type"));
17217 mfpu_opt
= legacy_fpu
;
17219 else if (!mfpu_opt
)
17221 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
17222 /* Some environments specify a default FPU. If they don't, infer it
17223 from the processor. */
17225 mfpu_opt
= mcpu_fpu_opt
;
17227 mfpu_opt
= march_fpu_opt
;
17229 mfpu_opt
= &fpu_default
;
17236 mfpu_opt
= &fpu_default
;
17237 else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
17238 mfpu_opt
= &fpu_arch_vfp_v2
;
17240 mfpu_opt
= &fpu_arch_fpa
;
17246 mcpu_cpu_opt
= &cpu_default
;
17247 selected_cpu
= cpu_default
;
17251 selected_cpu
= *mcpu_cpu_opt
;
17253 mcpu_cpu_opt
= &arm_arch_any
;
17256 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
17258 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
17260 #if defined OBJ_COFF || defined OBJ_ELF
17262 unsigned int flags
= 0;
17264 #if defined OBJ_ELF
17265 flags
= meabi_flags
;
17267 switch (meabi_flags
)
17269 case EF_ARM_EABI_UNKNOWN
:
17271 /* Set the flags in the private structure. */
17272 if (uses_apcs_26
) flags
|= F_APCS26
;
17273 if (support_interwork
) flags
|= F_INTERWORK
;
17274 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
17275 if (pic_code
) flags
|= F_PIC
;
17276 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
17277 flags
|= F_SOFT_FLOAT
;
17279 switch (mfloat_abi_opt
)
17281 case ARM_FLOAT_ABI_SOFT
:
17282 case ARM_FLOAT_ABI_SOFTFP
:
17283 flags
|= F_SOFT_FLOAT
;
17286 case ARM_FLOAT_ABI_HARD
:
17287 if (flags
& F_SOFT_FLOAT
)
17288 as_bad (_("hard-float conflicts with specified fpu"));
17292 /* Using pure-endian doubles (even if soft-float). */
17293 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
17294 flags
|= F_VFP_FLOAT
;
17296 #if defined OBJ_ELF
17297 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
17298 flags
|= EF_ARM_MAVERICK_FLOAT
;
17301 case EF_ARM_EABI_VER4
:
17302 case EF_ARM_EABI_VER5
:
17303 /* No additional flags to set. */
17310 bfd_set_private_flags (stdoutput
, flags
);
17312 /* We have run out flags in the COFF header to encode the
17313 status of ATPCS support, so instead we create a dummy,
17314 empty, debug section called .arm.atpcs. */
17319 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
17323 bfd_set_section_flags
17324 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
17325 bfd_set_section_size (stdoutput
, sec
, 0);
17326 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
17332 /* Record the CPU type as well. */
17333 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
17334 mach
= bfd_mach_arm_iWMMXt
;
17335 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
17336 mach
= bfd_mach_arm_XScale
;
17337 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
17338 mach
= bfd_mach_arm_ep9312
;
17339 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
17340 mach
= bfd_mach_arm_5TE
;
17341 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
17343 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
17344 mach
= bfd_mach_arm_5T
;
17346 mach
= bfd_mach_arm_5
;
17348 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
17350 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
17351 mach
= bfd_mach_arm_4T
;
17353 mach
= bfd_mach_arm_4
;
17355 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
17356 mach
= bfd_mach_arm_3M
;
17357 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
17358 mach
= bfd_mach_arm_3
;
17359 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
17360 mach
= bfd_mach_arm_2a
;
17361 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
17362 mach
= bfd_mach_arm_2
;
17364 mach
= bfd_mach_arm_unknown
;
17366 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
17369 /* Command line processing. */
17372 Invocation line includes a switch not recognized by the base assembler.
17373 See if it's a processor-specific option.
17375 This routine is somewhat complicated by the need for backwards
17376 compatibility (since older releases of gcc can't be changed).
17377 The new options try to make the interface as compatible as
17380 New options (supported) are:
17382 -mcpu=<cpu name> Assemble for selected processor
17383 -march=<architecture name> Assemble for selected architecture
17384 -mfpu=<fpu architecture> Assemble for selected FPU.
17385 -EB/-mbig-endian Big-endian
17386 -EL/-mlittle-endian Little-endian
17387 -k Generate PIC code
17388 -mthumb Start in Thumb mode
17389 -mthumb-interwork Code supports ARM/Thumb interworking
17391 For now we will also provide support for:
17393 -mapcs-32 32-bit Program counter
17394 -mapcs-26 26-bit Program counter
17395 -macps-float Floats passed in FP registers
17396 -mapcs-reentrant Reentrant code
17398 (sometime these will probably be replaced with -mapcs=<list of options>
17399 and -matpcs=<list of options>)
17401 The remaining options are only supported for back-wards compatibility.
17402 Cpu variants, the arm part is optional:
17403 -m[arm]1 Currently not supported.
17404 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
17405 -m[arm]3 Arm 3 processor
17406 -m[arm]6[xx], Arm 6 processors
17407 -m[arm]7[xx][t][[d]m] Arm 7 processors
17408 -m[arm]8[10] Arm 8 processors
17409 -m[arm]9[20][tdmi] Arm 9 processors
17410 -mstrongarm[110[0]] StrongARM processors
17411 -mxscale XScale processors
17412 -m[arm]v[2345[t[e]]] Arm architectures
17413 -mall All (except the ARM1)
17415 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
17416 -mfpe-old (No float load/store multiples)
17417 -mvfpxd VFP Single precision
17419 -mno-fpu Disable all floating point instructions
17421 The following CPU names are recognized:
17422 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
17423 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
17424 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
17425 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
17426 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
17427 arm10t arm10e, arm1020t, arm1020e, arm10200e,
17428 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
17432 const char * md_shortopts
= "m:k";
17434 #ifdef ARM_BI_ENDIAN
17435 #define OPTION_EB (OPTION_MD_BASE + 0)
17436 #define OPTION_EL (OPTION_MD_BASE + 1)
17438 #if TARGET_BYTES_BIG_ENDIAN
17439 #define OPTION_EB (OPTION_MD_BASE + 0)
17441 #define OPTION_EL (OPTION_MD_BASE + 1)
17445 struct option md_longopts
[] =
17448 {"EB", no_argument
, NULL
, OPTION_EB
},
17451 {"EL", no_argument
, NULL
, OPTION_EL
},
17453 {NULL
, no_argument
, NULL
, 0}
17456 size_t md_longopts_size
= sizeof (md_longopts
);
17458 struct arm_option_table
17460 char *option
; /* Option name to match. */
17461 char *help
; /* Help information. */
17462 int *var
; /* Variable to change. */
17463 int value
; /* What to change it to. */
17464 char *deprecated
; /* If non-null, print this message. */
17467 struct arm_option_table arm_opts
[] =
17469 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
17470 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
17471 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
17472 &support_interwork
, 1, NULL
},
17473 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
17474 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
17475 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
17477 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
17478 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
17479 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
17480 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
17483 /* These are recognized by the assembler, but have no affect on code. */
17484 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
17485 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
17486 {NULL
, NULL
, NULL
, 0, NULL
}
17489 struct arm_legacy_option_table
17491 char *option
; /* Option name to match. */
17492 const arm_feature_set
**var
; /* Variable to change. */
17493 const arm_feature_set value
; /* What to change it to. */
17494 char *deprecated
; /* If non-null, print this message. */
17497 const struct arm_legacy_option_table arm_legacy_opts
[] =
17499 /* DON'T add any new processors to this list -- we want the whole list
17500 to go away... Add them to the processors table instead. */
17501 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
17502 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
17503 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
17504 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
17505 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
17506 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
17507 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
17508 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
17509 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
17510 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
17511 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
17512 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
17513 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
17514 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
17515 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
17516 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
17517 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
17518 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
17519 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
17520 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
17521 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
17522 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
17523 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
17524 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
17525 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
17526 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
17527 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
17528 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
17529 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
17530 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
17531 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
17532 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
17533 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
17534 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
17535 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
17536 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
17537 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
17538 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
17539 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
17540 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
17541 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
17542 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
17543 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
17544 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
17545 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
17546 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
17547 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
17548 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
17549 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
17550 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
17551 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
17552 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
17553 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
17554 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
17555 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
17556 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
17557 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
17558 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
17559 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
17560 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
17561 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
17562 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
17563 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
17564 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
17565 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
17566 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
17567 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
17568 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
17569 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
17570 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
17571 N_("use -mcpu=strongarm110")},
17572 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
17573 N_("use -mcpu=strongarm1100")},
17574 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
17575 N_("use -mcpu=strongarm1110")},
17576 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
17577 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
17578 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
17580 /* Architecture variants -- don't add any more to this list either. */
17581 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
17582 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
17583 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
17584 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
17585 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
17586 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
17587 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
17588 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
17589 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
17590 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
17591 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
17592 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
17593 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
17594 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
17595 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
17596 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
17597 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
17598 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
17600 /* Floating point variants -- don't add any more to this list either. */
17601 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
17602 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
17603 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
17604 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
17605 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
17607 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
17610 struct arm_cpu_option_table
17613 const arm_feature_set value
;
17614 /* For some CPUs we assume an FPU unless the user explicitly sets
17616 const arm_feature_set default_fpu
;
17617 /* The canonical name of the CPU, or NULL to use NAME converted to upper
17619 const char *canonical_name
;
17622 /* This list should, at a minimum, contain all the cpu names
17623 recognized by GCC. */
17624 static const struct arm_cpu_option_table arm_cpus
[] =
17626 {"all", ARM_ANY
, FPU_ARCH_FPA
, NULL
},
17627 {"arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
},
17628 {"arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
},
17629 {"arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
17630 {"arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
17631 {"arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17632 {"arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17633 {"arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17634 {"arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17635 {"arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17636 {"arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17637 {"arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
17638 {"arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17639 {"arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
17640 {"arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17641 {"arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
17642 {"arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17643 {"arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17644 {"arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17645 {"arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17646 {"arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17647 {"arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17648 {"arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17649 {"arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17650 {"arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17651 {"arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17652 {"arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17653 {"arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
17654 {"arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17655 {"arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17656 {"arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17657 {"arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17658 {"arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17659 {"strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17660 {"strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17661 {"strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17662 {"strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17663 {"strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
17664 {"arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17665 {"arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"},
17666 {"arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17667 {"arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17668 {"arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17669 {"arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
17670 /* For V5 or later processors we default to using VFP; but the user
17671 should really set the FPU type explicitly. */
17672 {"arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
17673 {"arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17674 {"arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
17675 {"arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
17676 {"arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
17677 {"arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
17678 {"arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"},
17679 {"arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17680 {"arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
17681 {"arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"},
17682 {"arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17683 {"arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17684 {"arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
17685 {"arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
17686 {"arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17687 {"arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"},
17688 {"arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
17689 {"arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17690 {"arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
17691 {"arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM1026EJ-S"},
17692 {"arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
17693 {"arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"},
17694 {"arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
},
17695 {"arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, "ARM1136JF-S"},
17696 {"arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
},
17697 {"mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, NULL
},
17698 {"mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, NULL
},
17699 {"arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
},
17700 {"arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
},
17701 {"arm1176jz-s", ARM_ARCH_V6ZK
, FPU_NONE
, NULL
},
17702 {"arm1176jzf-s", ARM_ARCH_V6ZK
, FPU_ARCH_VFP_V2
, NULL
},
17703 {"cortex-a8", ARM_ARCH_V7A
, ARM_FEATURE(0, FPU_VFP_V3
17704 | FPU_NEON_EXT_V1
),
17706 {"cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, NULL
},
17707 {"cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, NULL
},
17708 /* ??? XSCALE is really an architecture. */
17709 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
17710 /* ??? iwmmxt is not a processor. */
17711 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
},
17712 {"i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
17714 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
), FPU_ARCH_MAVERICK
, "ARM920T"},
17715 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
17718 struct arm_arch_option_table
17721 const arm_feature_set value
;
17722 const arm_feature_set default_fpu
;
17725 /* This list should, at a minimum, contain all the architecture names
17726 recognized by GCC. */
17727 static const struct arm_arch_option_table arm_archs
[] =
17729 {"all", ARM_ANY
, FPU_ARCH_FPA
},
17730 {"armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
},
17731 {"armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
},
17732 {"armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
17733 {"armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
17734 {"armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
},
17735 {"armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
},
17736 {"armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
},
17737 {"armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
},
17738 {"armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
},
17739 {"armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
},
17740 {"armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
},
17741 {"armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
},
17742 {"armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
},
17743 {"armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
},
17744 {"armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
},
17745 {"armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
},
17746 {"armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
},
17747 {"armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
},
17748 {"armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
},
17749 {"armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
},
17750 {"armv6zk", ARM_ARCH_V6ZK
, FPU_ARCH_VFP
},
17751 {"armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
},
17752 {"armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
},
17753 {"armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
},
17754 {"armv6zkt2", ARM_ARCH_V6ZKT2
, FPU_ARCH_VFP
},
17755 {"armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
},
17756 {"armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
17757 {"armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
17758 {"armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
17759 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
},
17760 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
},
17761 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
17764 /* ISA extensions in the co-processor space. */
17765 struct arm_option_cpu_value_table
17768 const arm_feature_set value
;
17771 static const struct arm_option_cpu_value_table arm_extensions
[] =
17773 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK
)},
17774 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE
)},
17775 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT
)},
17776 {NULL
, ARM_ARCH_NONE
}
17779 /* This list should, at a minimum, contain all the fpu names
17780 recognized by GCC. */
17781 static const struct arm_option_cpu_value_table arm_fpus
[] =
17783 {"softfpa", FPU_NONE
},
17784 {"fpe", FPU_ARCH_FPE
},
17785 {"fpe2", FPU_ARCH_FPE
},
17786 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
17787 {"fpa", FPU_ARCH_FPA
},
17788 {"fpa10", FPU_ARCH_FPA
},
17789 {"fpa11", FPU_ARCH_FPA
},
17790 {"arm7500fe", FPU_ARCH_FPA
},
17791 {"softvfp", FPU_ARCH_VFP
},
17792 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
17793 {"vfp", FPU_ARCH_VFP_V2
},
17794 {"vfp9", FPU_ARCH_VFP_V2
},
17795 {"vfp3", FPU_ARCH_VFP_V3
},
17796 {"vfp10", FPU_ARCH_VFP_V2
},
17797 {"vfp10-r0", FPU_ARCH_VFP_V1
},
17798 {"vfpxd", FPU_ARCH_VFP_V1xD
},
17799 {"arm1020t", FPU_ARCH_VFP_V1
},
17800 {"arm1020e", FPU_ARCH_VFP_V2
},
17801 {"arm1136jfs", FPU_ARCH_VFP_V2
},
17802 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
17803 {"maverick", FPU_ARCH_MAVERICK
},
17804 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
17805 {NULL
, ARM_ARCH_NONE
}
17808 struct arm_option_value_table
17814 static const struct arm_option_value_table arm_float_abis
[] =
17816 {"hard", ARM_FLOAT_ABI_HARD
},
17817 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
17818 {"soft", ARM_FLOAT_ABI_SOFT
},
17823 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
17824 static const struct arm_option_value_table arm_eabis
[] =
17826 {"gnu", EF_ARM_EABI_UNKNOWN
},
17827 {"4", EF_ARM_EABI_VER4
},
17828 {"5", EF_ARM_EABI_VER5
},
17833 struct arm_long_option_table
17835 char * option
; /* Substring to match. */
17836 char * help
; /* Help information. */
17837 int (* func
) (char * subopt
); /* Function to decode sub-option. */
17838 char * deprecated
; /* If non-null, print this message. */
17842 arm_parse_extension (char * str
, const arm_feature_set
**opt_p
)
17844 arm_feature_set
*ext_set
= xmalloc (sizeof (arm_feature_set
));
17846 /* Copy the feature set, so that we can modify it. */
17847 *ext_set
= **opt_p
;
17850 while (str
!= NULL
&& *str
!= 0)
17852 const struct arm_option_cpu_value_table
* opt
;
17858 as_bad (_("invalid architectural extension"));
17863 ext
= strchr (str
, '+');
17866 optlen
= ext
- str
;
17868 optlen
= strlen (str
);
17872 as_bad (_("missing architectural extension"));
17876 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
17877 if (strncmp (opt
->name
, str
, optlen
) == 0)
17879 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->value
);
17883 if (opt
->name
== NULL
)
17885 as_bad (_("unknown architectural extnsion `%s'"), str
);
17896 arm_parse_cpu (char * str
)
17898 const struct arm_cpu_option_table
* opt
;
17899 char * ext
= strchr (str
, '+');
17903 optlen
= ext
- str
;
17905 optlen
= strlen (str
);
17909 as_bad (_("missing cpu name `%s'"), str
);
17913 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
17914 if (strncmp (opt
->name
, str
, optlen
) == 0)
17916 mcpu_cpu_opt
= &opt
->value
;
17917 mcpu_fpu_opt
= &opt
->default_fpu
;
17918 if (opt
->canonical_name
)
17919 strcpy(selected_cpu_name
, opt
->canonical_name
);
17923 for (i
= 0; i
< optlen
; i
++)
17924 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
17925 selected_cpu_name
[i
] = 0;
17929 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
17934 as_bad (_("unknown cpu `%s'"), str
);
17939 arm_parse_arch (char * str
)
17941 const struct arm_arch_option_table
*opt
;
17942 char *ext
= strchr (str
, '+');
17946 optlen
= ext
- str
;
17948 optlen
= strlen (str
);
17952 as_bad (_("missing architecture name `%s'"), str
);
17956 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
17957 if (streq (opt
->name
, str
))
17959 march_cpu_opt
= &opt
->value
;
17960 march_fpu_opt
= &opt
->default_fpu
;
17961 strcpy(selected_cpu_name
, opt
->name
);
17964 return arm_parse_extension (ext
, &march_cpu_opt
);
17969 as_bad (_("unknown architecture `%s'\n"), str
);
17974 arm_parse_fpu (char * str
)
17976 const struct arm_option_cpu_value_table
* opt
;
17978 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
17979 if (streq (opt
->name
, str
))
17981 mfpu_opt
= &opt
->value
;
17985 as_bad (_("unknown floating point format `%s'\n"), str
);
17990 arm_parse_float_abi (char * str
)
17992 const struct arm_option_value_table
* opt
;
17994 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
17995 if (streq (opt
->name
, str
))
17997 mfloat_abi_opt
= opt
->value
;
18001 as_bad (_("unknown floating point abi `%s'\n"), str
);
18007 arm_parse_eabi (char * str
)
18009 const struct arm_option_value_table
*opt
;
18011 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
18012 if (streq (opt
->name
, str
))
18014 meabi_flags
= opt
->value
;
18017 as_bad (_("unknown EABI `%s'\n"), str
);
18022 struct arm_long_option_table arm_long_opts
[] =
18024 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
18025 arm_parse_cpu
, NULL
},
18026 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
18027 arm_parse_arch
, NULL
},
18028 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
18029 arm_parse_fpu
, NULL
},
18030 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
18031 arm_parse_float_abi
, NULL
},
18033 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
18034 arm_parse_eabi
, NULL
},
18036 {NULL
, NULL
, 0, NULL
}
18040 md_parse_option (int c
, char * arg
)
18042 struct arm_option_table
*opt
;
18043 const struct arm_legacy_option_table
*fopt
;
18044 struct arm_long_option_table
*lopt
;
18050 target_big_endian
= 1;
18056 target_big_endian
= 0;
18061 /* Listing option. Just ignore these, we don't support additional
18066 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
18068 if (c
== opt
->option
[0]
18069 && ((arg
== NULL
&& opt
->option
[1] == 0)
18070 || streq (arg
, opt
->option
+ 1)))
18072 #if WARN_DEPRECATED
18073 /* If the option is deprecated, tell the user. */
18074 if (opt
->deprecated
!= NULL
)
18075 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
18076 arg
? arg
: "", _(opt
->deprecated
));
18079 if (opt
->var
!= NULL
)
18080 *opt
->var
= opt
->value
;
18086 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
18088 if (c
== fopt
->option
[0]
18089 && ((arg
== NULL
&& fopt
->option
[1] == 0)
18090 || streq (arg
, fopt
->option
+ 1)))
18092 #if WARN_DEPRECATED
18093 /* If the option is deprecated, tell the user. */
18094 if (fopt
->deprecated
!= NULL
)
18095 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
18096 arg
? arg
: "", _(fopt
->deprecated
));
18099 if (fopt
->var
!= NULL
)
18100 *fopt
->var
= &fopt
->value
;
18106 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
18108 /* These options are expected to have an argument. */
18109 if (c
== lopt
->option
[0]
18111 && strncmp (arg
, lopt
->option
+ 1,
18112 strlen (lopt
->option
+ 1)) == 0)
18114 #if WARN_DEPRECATED
18115 /* If the option is deprecated, tell the user. */
18116 if (lopt
->deprecated
!= NULL
)
18117 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
18118 _(lopt
->deprecated
));
18121 /* Call the sup-option parser. */
18122 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
18133 md_show_usage (FILE * fp
)
18135 struct arm_option_table
*opt
;
18136 struct arm_long_option_table
*lopt
;
18138 fprintf (fp
, _(" ARM-specific assembler options:\n"));
18140 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
18141 if (opt
->help
!= NULL
)
18142 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
18144 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
18145 if (lopt
->help
!= NULL
)
18146 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
18150 -EB assemble code for a big-endian cpu\n"));
18155 -EL assemble code for a little-endian cpu\n"));
18164 arm_feature_set flags
;
18165 } cpu_arch_ver_table
;
18167 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
18168 least features first. */
18169 static const cpu_arch_ver_table cpu_arch_ver
[] =
18174 {4, ARM_ARCH_V5TE
},
18175 {5, ARM_ARCH_V5TEJ
},
18179 {9, ARM_ARCH_V6T2
},
18180 {10, ARM_ARCH_V7A
},
18181 {10, ARM_ARCH_V7R
},
18182 {10, ARM_ARCH_V7M
},
18186 /* Set the public EABI object attributes. */
18188 aeabi_set_public_attributes (void)
18191 arm_feature_set flags
;
18192 arm_feature_set tmp
;
18193 const cpu_arch_ver_table
*p
;
18195 /* Choose the architecture based on the capabilities of the requested cpu
18196 (if any) and/or the instructions actually used. */
18197 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
18198 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
18199 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
18203 for (p
= cpu_arch_ver
; p
->val
; p
++)
18205 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
18208 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
18212 /* Tag_CPU_name. */
18213 if (selected_cpu_name
[0])
18217 p
= selected_cpu_name
;
18218 if (strncmp(p
, "armv", 4) == 0)
18223 for (i
= 0; p
[i
]; i
++)
18224 p
[i
] = TOUPPER (p
[i
]);
18226 elf32_arm_add_eabi_attr_string (stdoutput
, 5, p
);
18228 /* Tag_CPU_arch. */
18229 elf32_arm_add_eabi_attr_int (stdoutput
, 6, arch
);
18230 /* Tag_CPU_arch_profile. */
18231 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
))
18232 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'A');
18233 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
18234 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'R');
18235 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
))
18236 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'M');
18237 /* Tag_ARM_ISA_use. */
18238 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_full
))
18239 elf32_arm_add_eabi_attr_int (stdoutput
, 8, 1);
18240 /* Tag_THUMB_ISA_use. */
18241 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_full
))
18242 elf32_arm_add_eabi_attr_int (stdoutput
, 9,
18243 ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
) ? 2 : 1);
18244 /* Tag_VFP_arch. */
18245 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v3
)
18246 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v3
))
18247 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 3);
18248 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v2
)
18249 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v2
))
18250 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 2);
18251 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v1
)
18252 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v1
)
18253 || ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v1xd
)
18254 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v1xd
))
18255 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 1);
18256 /* Tag_WMMX_arch. */
18257 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_cext_iwmmxt
)
18258 || ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_cext_iwmmxt
))
18259 elf32_arm_add_eabi_attr_int (stdoutput
, 11, 1);
18260 /* Tag_NEON_arch. */
18261 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_neon_ext_v1
)
18262 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_neon_ext_v1
))
18263 elf32_arm_add_eabi_attr_int (stdoutput
, 12, 1);
18266 /* Add the .ARM.attributes section. */
18275 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
18278 aeabi_set_public_attributes ();
18279 size
= elf32_arm_eabi_attr_size (stdoutput
);
18280 s
= subseg_new (".ARM.attributes", 0);
18281 bfd_set_section_flags (stdoutput
, s
, SEC_READONLY
| SEC_DATA
);
18282 addr
= frag_now_fix ();
18283 p
= frag_more (size
);
18284 elf32_arm_set_eabi_attr_contents (stdoutput
, (bfd_byte
*)p
, size
);
18286 #endif /* OBJ_ELF */
18289 /* Parse a .cpu directive. */
18292 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
18294 const struct arm_cpu_option_table
*opt
;
18298 name
= input_line_pointer
;
18299 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
18300 input_line_pointer
++;
18301 saved_char
= *input_line_pointer
;
18302 *input_line_pointer
= 0;
18304 /* Skip the first "all" entry. */
18305 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
18306 if (streq (opt
->name
, name
))
18308 mcpu_cpu_opt
= &opt
->value
;
18309 selected_cpu
= opt
->value
;
18310 if (opt
->canonical_name
)
18311 strcpy(selected_cpu_name
, opt
->canonical_name
);
18315 for (i
= 0; opt
->name
[i
]; i
++)
18316 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
18317 selected_cpu_name
[i
] = 0;
18319 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
18320 *input_line_pointer
= saved_char
;
18321 demand_empty_rest_of_line ();
18324 as_bad (_("unknown cpu `%s'"), name
);
18325 *input_line_pointer
= saved_char
;
18326 ignore_rest_of_line ();
18330 /* Parse a .arch directive. */
18333 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
18335 const struct arm_arch_option_table
*opt
;
18339 name
= input_line_pointer
;
18340 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
18341 input_line_pointer
++;
18342 saved_char
= *input_line_pointer
;
18343 *input_line_pointer
= 0;
18345 /* Skip the first "all" entry. */
18346 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
18347 if (streq (opt
->name
, name
))
18349 mcpu_cpu_opt
= &opt
->value
;
18350 selected_cpu
= opt
->value
;
18351 strcpy(selected_cpu_name
, opt
->name
);
18352 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
18353 *input_line_pointer
= saved_char
;
18354 demand_empty_rest_of_line ();
18358 as_bad (_("unknown architecture `%s'\n"), name
);
18359 *input_line_pointer
= saved_char
;
18360 ignore_rest_of_line ();
18364 /* Parse a .fpu directive. */
18367 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
18369 const struct arm_option_cpu_value_table
*opt
;
18373 name
= input_line_pointer
;
18374 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
18375 input_line_pointer
++;
18376 saved_char
= *input_line_pointer
;
18377 *input_line_pointer
= 0;
18379 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
18380 if (streq (opt
->name
, name
))
18382 mfpu_opt
= &opt
->value
;
18383 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
18384 *input_line_pointer
= saved_char
;
18385 demand_empty_rest_of_line ();
18389 as_bad (_("unknown floating point format `%s'\n"), name
);
18390 *input_line_pointer
= saved_char
;
18391 ignore_rest_of_line ();