1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2015 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
80 /* Results from operand parsing worker functions. */
84 PARSE_OPERAND_SUCCESS
,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result
;
96 /* Types of processor to assemble for. */
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
122 #endif /* ifndef FPU_DEFAULT */
124 #define streq(a, b) (strcmp (a, b) == 0)
126 static arm_feature_set cpu_variant
;
127 static arm_feature_set arm_arch_used
;
128 static arm_feature_set thumb_arch_used
;
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26
= FALSE
;
132 static int atpcs
= FALSE
;
133 static int support_interwork
= FALSE
;
134 static int uses_apcs_float
= FALSE
;
135 static int pic_code
= FALSE
;
136 static int fix_v4bx
= FALSE
;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated
= TRUE
;
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax
= FALSE
;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set
*legacy_cpu
= NULL
;
147 static const arm_feature_set
*legacy_fpu
= NULL
;
149 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
150 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
151 static const arm_feature_set
*march_cpu_opt
= NULL
;
152 static const arm_feature_set
*march_fpu_opt
= NULL
;
153 static const arm_feature_set
*mfpu_opt
= NULL
;
154 static const arm_feature_set
*object_arch
= NULL
;
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
158 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
159 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
160 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
161 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
162 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
163 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
164 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
165 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
168 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
171 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
172 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
173 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
174 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
175 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
176 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
177 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
178 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
179 static const arm_feature_set arm_ext_v4t_5
=
180 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
181 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
182 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
183 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
184 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
185 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
186 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
187 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
188 static const arm_feature_set arm_ext_v6m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
);
189 static const arm_feature_set arm_ext_v6_notm
=
190 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
191 static const arm_feature_set arm_ext_v6_dsp
=
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
193 static const arm_feature_set arm_ext_barrier
=
194 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
195 static const arm_feature_set arm_ext_msr
=
196 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
197 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
198 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
199 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
200 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
201 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
202 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
203 static const arm_feature_set arm_ext_m
=
204 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_OS
| ARM_EXT_V7M
, ARM_EXT2_V8M
);
205 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
206 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
207 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
208 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
209 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
210 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
211 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
212 static const arm_feature_set arm_ext_v6t2_v8m
=
213 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
214 /* Instructions shared between ARMv8-A and ARMv8-M. */
215 static const arm_feature_set arm_ext_atomics
=
216 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
218 static const arm_feature_set arm_arch_any
= ARM_ANY
;
219 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1, -1);
220 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
221 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
222 static const arm_feature_set arm_arch_v6m_only
= ARM_ARCH_V6M_ONLY
;
224 static const arm_feature_set arm_cext_iwmmxt2
=
225 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
226 static const arm_feature_set arm_cext_iwmmxt
=
227 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
228 static const arm_feature_set arm_cext_xscale
=
229 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
230 static const arm_feature_set arm_cext_maverick
=
231 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
232 static const arm_feature_set fpu_fpa_ext_v1
=
233 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
234 static const arm_feature_set fpu_fpa_ext_v2
=
235 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
236 static const arm_feature_set fpu_vfp_ext_v1xd
=
237 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
238 static const arm_feature_set fpu_vfp_ext_v1
=
239 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
240 static const arm_feature_set fpu_vfp_ext_v2
=
241 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
242 static const arm_feature_set fpu_vfp_ext_v3xd
=
243 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
244 static const arm_feature_set fpu_vfp_ext_v3
=
245 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
246 static const arm_feature_set fpu_vfp_ext_d32
=
247 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
248 static const arm_feature_set fpu_neon_ext_v1
=
249 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
250 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
251 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
252 static const arm_feature_set fpu_vfp_fp16
=
253 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
254 static const arm_feature_set fpu_neon_ext_fma
=
255 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
256 static const arm_feature_set fpu_vfp_ext_fma
=
257 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
258 static const arm_feature_set fpu_vfp_ext_armv8
=
259 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
260 static const arm_feature_set fpu_vfp_ext_armv8xd
=
261 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
262 static const arm_feature_set fpu_neon_ext_armv8
=
263 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
264 static const arm_feature_set fpu_crypto_ext_armv8
=
265 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
266 static const arm_feature_set crc_ext_armv8
=
267 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
268 static const arm_feature_set fpu_neon_ext_v8_1
=
269 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
| FPU_NEON_EXT_RDMA
);
271 static int mfloat_abi_opt
= -1;
272 /* Record user cpu selection for object attributes. */
273 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
274 /* Must be long enough to hold any of the names in arm_cpus. */
275 static char selected_cpu_name
[20];
277 extern FLONUM_TYPE generic_floating_point_number
;
279 /* Return if no cpu was selected on command-line. */
281 no_cpu_selected (void)
283 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
288 static int meabi_flags
= EABI_DEFAULT
;
290 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
293 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
298 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
303 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
304 symbolS
* GOT_symbol
;
307 /* 0: assemble for ARM,
308 1: assemble for Thumb,
309 2: assemble for Thumb even though target CPU does not support thumb
311 static int thumb_mode
= 0;
312 /* A value distinct from the possible values for thumb_mode that we
313 can use to record whether thumb_mode has been copied into the
314 tc_frag_data field of a frag. */
315 #define MODE_RECORDED (1 << 4)
317 /* Specifies the intrinsic IT insn behavior mode. */
318 enum implicit_it_mode
320 IMPLICIT_IT_MODE_NEVER
= 0x00,
321 IMPLICIT_IT_MODE_ARM
= 0x01,
322 IMPLICIT_IT_MODE_THUMB
= 0x02,
323 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
325 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
327 /* If unified_syntax is true, we are processing the new unified
328 ARM/Thumb syntax. Important differences from the old ARM mode:
330 - Immediate operands do not require a # prefix.
331 - Conditional affixes always appear at the end of the
332 instruction. (For backward compatibility, those instructions
333 that formerly had them in the middle, continue to accept them
335 - The IT instruction may appear, and if it does is validated
336 against subsequent conditional affixes. It does not generate
339 Important differences from the old Thumb mode:
341 - Immediate operands do not require a # prefix.
342 - Most of the V6T2 instructions are only available in unified mode.
343 - The .N and .W suffixes are recognized and honored (it is an error
344 if they cannot be honored).
345 - All instructions set the flags if and only if they have an 's' affix.
346 - Conditional affixes may be used. They are validated against
347 preceding IT instructions. Unlike ARM mode, you cannot use a
348 conditional affix except in the scope of an IT instruction. */
350 static bfd_boolean unified_syntax
= FALSE
;
352 /* An immediate operand can start with #, and ld*, st*, pld operands
353 can contain [ and ]. We need to tell APP not to elide whitespace
354 before a [, which can appear as the first operand for pld.
355 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
356 const char arm_symbol_chars
[] = "#[]{}";
371 enum neon_el_type type
;
375 #define NEON_MAX_TYPE_ELS 4
379 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
383 enum it_instruction_type
388 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
389 if inside, should be the last one. */
390 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
391 i.e. BKPT and NOP. */
392 IT_INSN
/* The IT insn has been parsed. */
395 /* The maximum number of operands we need. */
396 #define ARM_IT_MAX_OPERANDS 6
401 unsigned long instruction
;
405 /* "uncond_value" is set to the value in place of the conditional field in
406 unconditional versions of the instruction, or -1 if nothing is
409 struct neon_type vectype
;
410 /* This does not indicate an actual NEON instruction, only that
411 the mnemonic accepts neon-style type suffixes. */
413 /* Set to the opcode if the instruction needs relaxation.
414 Zero if the instruction is not relaxed. */
418 bfd_reloc_code_real_type type
;
423 enum it_instruction_type it_insn_type
;
429 struct neon_type_el vectype
;
430 unsigned present
: 1; /* Operand present. */
431 unsigned isreg
: 1; /* Operand was a register. */
432 unsigned immisreg
: 1; /* .imm field is a second register. */
433 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
434 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
435 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
436 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
437 instructions. This allows us to disambiguate ARM <-> vector insns. */
438 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
439 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
440 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
441 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
442 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
443 unsigned writeback
: 1; /* Operand has trailing ! */
444 unsigned preind
: 1; /* Preindexed address. */
445 unsigned postind
: 1; /* Postindexed address. */
446 unsigned negative
: 1; /* Index register was negated. */
447 unsigned shifted
: 1; /* Shift applied to operation. */
448 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
449 } operands
[ARM_IT_MAX_OPERANDS
];
452 static struct arm_it inst
;
454 #define NUM_FLOAT_VALS 8
456 const char * fp_const
[] =
458 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
461 /* Number of littlenums required to hold an extended precision number. */
462 #define MAX_LITTLENUMS 6
464 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
474 #define CP_T_X 0x00008000
475 #define CP_T_Y 0x00400000
477 #define CONDS_BIT 0x00100000
478 #define LOAD_BIT 0x00100000
480 #define DOUBLE_LOAD_FLAG 0x00000001
484 const char * template_name
;
488 #define COND_ALWAYS 0xE
492 const char * template_name
;
496 struct asm_barrier_opt
498 const char * template_name
;
500 const arm_feature_set arch
;
503 /* The bit that distinguishes CPSR and SPSR. */
504 #define SPSR_BIT (1 << 22)
506 /* The individual PSR flag bits. */
507 #define PSR_c (1 << 16)
508 #define PSR_x (1 << 17)
509 #define PSR_s (1 << 18)
510 #define PSR_f (1 << 19)
515 bfd_reloc_code_real_type reloc
;
520 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
521 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
526 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
529 /* Bits for DEFINED field in neon_typed_alias. */
530 #define NTA_HASTYPE 1
531 #define NTA_HASINDEX 2
533 struct neon_typed_alias
535 unsigned char defined
;
537 struct neon_type_el eltype
;
540 /* ARM register categories. This includes coprocessor numbers and various
541 architecture extensions' registers. */
568 /* Structure for a hash table entry for a register.
569 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
570 information which states whether a vector type or index is specified (for a
571 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
577 unsigned char builtin
;
578 struct neon_typed_alias
* neon
;
581 /* Diagnostics used when we don't get a register of the expected type. */
582 const char * const reg_expected_msgs
[] =
584 N_("ARM register expected"),
585 N_("bad or missing co-processor number"),
586 N_("co-processor register expected"),
587 N_("FPA register expected"),
588 N_("VFP single precision register expected"),
589 N_("VFP/Neon double precision register expected"),
590 N_("Neon quad precision register expected"),
591 N_("VFP single or double precision register expected"),
592 N_("Neon double or quad precision register expected"),
593 N_("VFP single, double or Neon quad precision register expected"),
594 N_("VFP system register expected"),
595 N_("Maverick MVF register expected"),
596 N_("Maverick MVD register expected"),
597 N_("Maverick MVFX register expected"),
598 N_("Maverick MVDX register expected"),
599 N_("Maverick MVAX register expected"),
600 N_("Maverick DSPSC register expected"),
601 N_("iWMMXt data register expected"),
602 N_("iWMMXt control register expected"),
603 N_("iWMMXt scalar register expected"),
604 N_("XScale accumulator register expected"),
607 /* Some well known registers that we refer to directly elsewhere. */
613 /* ARM instructions take 4bytes in the object file, Thumb instructions
619 /* Basic string to match. */
620 const char * template_name
;
622 /* Parameters to instruction. */
623 unsigned int operands
[8];
625 /* Conditional tag - see opcode_lookup. */
626 unsigned int tag
: 4;
628 /* Basic instruction code. */
629 unsigned int avalue
: 28;
631 /* Thumb-format instruction code. */
634 /* Which architecture variant provides this instruction. */
635 const arm_feature_set
* avariant
;
636 const arm_feature_set
* tvariant
;
638 /* Function to call to encode instruction in ARM format. */
639 void (* aencode
) (void);
641 /* Function to call to encode instruction in Thumb format. */
642 void (* tencode
) (void);
645 /* Defines for various bits that we will want to toggle. */
646 #define INST_IMMEDIATE 0x02000000
647 #define OFFSET_REG 0x02000000
648 #define HWOFFSET_IMM 0x00400000
649 #define SHIFT_BY_REG 0x00000010
650 #define PRE_INDEX 0x01000000
651 #define INDEX_UP 0x00800000
652 #define WRITE_BACK 0x00200000
653 #define LDM_TYPE_2_OR_3 0x00400000
654 #define CPSI_MMOD 0x00020000
656 #define LITERAL_MASK 0xf000f000
657 #define OPCODE_MASK 0xfe1fffff
658 #define V4_STR_BIT 0x00000020
659 #define VLDR_VMOV_SAME 0x0040f000
661 #define T2_SUBS_PC_LR 0xf3de8f00
663 #define DATA_OP_SHIFT 21
665 #define T2_OPCODE_MASK 0xfe1fffff
666 #define T2_DATA_OP_SHIFT 21
668 #define A_COND_MASK 0xf0000000
669 #define A_PUSH_POP_OP_MASK 0x0fff0000
671 /* Opcodes for pushing/poping registers to/from the stack. */
672 #define A1_OPCODE_PUSH 0x092d0000
673 #define A2_OPCODE_PUSH 0x052d0004
674 #define A2_OPCODE_POP 0x049d0004
676 /* Codes to distinguish the arithmetic instructions. */
687 #define OPCODE_CMP 10
688 #define OPCODE_CMN 11
689 #define OPCODE_ORR 12
690 #define OPCODE_MOV 13
691 #define OPCODE_BIC 14
692 #define OPCODE_MVN 15
694 #define T2_OPCODE_AND 0
695 #define T2_OPCODE_BIC 1
696 #define T2_OPCODE_ORR 2
697 #define T2_OPCODE_ORN 3
698 #define T2_OPCODE_EOR 4
699 #define T2_OPCODE_ADD 8
700 #define T2_OPCODE_ADC 10
701 #define T2_OPCODE_SBC 11
702 #define T2_OPCODE_SUB 13
703 #define T2_OPCODE_RSB 14
705 #define T_OPCODE_MUL 0x4340
706 #define T_OPCODE_TST 0x4200
707 #define T_OPCODE_CMN 0x42c0
708 #define T_OPCODE_NEG 0x4240
709 #define T_OPCODE_MVN 0x43c0
711 #define T_OPCODE_ADD_R3 0x1800
712 #define T_OPCODE_SUB_R3 0x1a00
713 #define T_OPCODE_ADD_HI 0x4400
714 #define T_OPCODE_ADD_ST 0xb000
715 #define T_OPCODE_SUB_ST 0xb080
716 #define T_OPCODE_ADD_SP 0xa800
717 #define T_OPCODE_ADD_PC 0xa000
718 #define T_OPCODE_ADD_I8 0x3000
719 #define T_OPCODE_SUB_I8 0x3800
720 #define T_OPCODE_ADD_I3 0x1c00
721 #define T_OPCODE_SUB_I3 0x1e00
723 #define T_OPCODE_ASR_R 0x4100
724 #define T_OPCODE_LSL_R 0x4080
725 #define T_OPCODE_LSR_R 0x40c0
726 #define T_OPCODE_ROR_R 0x41c0
727 #define T_OPCODE_ASR_I 0x1000
728 #define T_OPCODE_LSL_I 0x0000
729 #define T_OPCODE_LSR_I 0x0800
731 #define T_OPCODE_MOV_I8 0x2000
732 #define T_OPCODE_CMP_I8 0x2800
733 #define T_OPCODE_CMP_LR 0x4280
734 #define T_OPCODE_MOV_HR 0x4600
735 #define T_OPCODE_CMP_HR 0x4500
737 #define T_OPCODE_LDR_PC 0x4800
738 #define T_OPCODE_LDR_SP 0x9800
739 #define T_OPCODE_STR_SP 0x9000
740 #define T_OPCODE_LDR_IW 0x6800
741 #define T_OPCODE_STR_IW 0x6000
742 #define T_OPCODE_LDR_IH 0x8800
743 #define T_OPCODE_STR_IH 0x8000
744 #define T_OPCODE_LDR_IB 0x7800
745 #define T_OPCODE_STR_IB 0x7000
746 #define T_OPCODE_LDR_RW 0x5800
747 #define T_OPCODE_STR_RW 0x5000
748 #define T_OPCODE_LDR_RH 0x5a00
749 #define T_OPCODE_STR_RH 0x5200
750 #define T_OPCODE_LDR_RB 0x5c00
751 #define T_OPCODE_STR_RB 0x5400
753 #define T_OPCODE_PUSH 0xb400
754 #define T_OPCODE_POP 0xbc00
756 #define T_OPCODE_BRANCH 0xe000
758 #define THUMB_SIZE 2 /* Size of thumb instruction. */
759 #define THUMB_PP_PC_LR 0x0100
760 #define THUMB_LOAD_BIT 0x0800
761 #define THUMB2_LOAD_BIT 0x00100000
763 #define BAD_ARGS _("bad arguments to instruction")
764 #define BAD_SP _("r13 not allowed here")
765 #define BAD_PC _("r15 not allowed here")
766 #define BAD_COND _("instruction cannot be conditional")
767 #define BAD_OVERLAP _("registers may not be the same")
768 #define BAD_HIREG _("lo register required")
769 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
770 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
771 #define BAD_BRANCH _("branch must be last instruction in IT block")
772 #define BAD_NOT_IT _("instruction not allowed in IT block")
773 #define BAD_FPU _("selected FPU does not support instruction")
774 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
775 #define BAD_IT_COND _("incorrect condition in IT block")
776 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
777 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
778 #define BAD_PC_ADDRESSING \
779 _("cannot use register index with PC-relative addressing")
780 #define BAD_PC_WRITEBACK \
781 _("cannot use writeback with PC-relative addressing")
782 #define BAD_RANGE _("branch out of range")
783 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
785 static struct hash_control
* arm_ops_hsh
;
786 static struct hash_control
* arm_cond_hsh
;
787 static struct hash_control
* arm_shift_hsh
;
788 static struct hash_control
* arm_psr_hsh
;
789 static struct hash_control
* arm_v7m_psr_hsh
;
790 static struct hash_control
* arm_reg_hsh
;
791 static struct hash_control
* arm_reloc_hsh
;
792 static struct hash_control
* arm_barrier_opt_hsh
;
794 /* Stuff needed to resolve the label ambiguity
803 symbolS
* last_label_seen
;
804 static int label_is_thumb_function_name
= FALSE
;
806 /* Literal pool structure. Held on a per-section
807 and per-sub-section basis. */
809 #define MAX_LITERAL_POOL_SIZE 1024
810 typedef struct literal_pool
812 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
813 unsigned int next_free_entry
;
819 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
821 struct literal_pool
* next
;
822 unsigned int alignment
;
825 /* Pointer to a linked list of literal pools. */
826 literal_pool
* list_of_pools
= NULL
;
828 typedef enum asmfunc_states
831 WAITING_ASMFUNC_NAME
,
835 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
838 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
840 static struct current_it now_it
;
844 now_it_compatible (int cond
)
846 return (cond
& ~1) == (now_it
.cc
& ~1);
850 conditional_insn (void)
852 return inst
.cond
!= COND_ALWAYS
;
855 static int in_it_block (void);
857 static int handle_it_state (void);
859 static void force_automatic_it_block_close (void);
861 static void it_fsm_post_encode (void);
863 #define set_it_insn_type(type) \
866 inst.it_insn_type = type; \
867 if (handle_it_state () == FAIL) \
872 #define set_it_insn_type_nonvoid(type, failret) \
875 inst.it_insn_type = type; \
876 if (handle_it_state () == FAIL) \
881 #define set_it_insn_type_last() \
884 if (inst.cond == COND_ALWAYS) \
885 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
887 set_it_insn_type (INSIDE_IT_LAST_INSN); \
893 /* This array holds the chars that always start a comment. If the
894 pre-processor is disabled, these aren't very useful. */
895 char arm_comment_chars
[] = "@";
897 /* This array holds the chars that only start a comment at the beginning of
898 a line. If the line seems to have the form '# 123 filename'
899 .line and .file directives will appear in the pre-processed output. */
900 /* Note that input_file.c hand checks for '#' at the beginning of the
901 first line of the input file. This is because the compiler outputs
902 #NO_APP at the beginning of its output. */
903 /* Also note that comments like this one will always work. */
904 const char line_comment_chars
[] = "#";
906 char arm_line_separator_chars
[] = ";";
908 /* Chars that can be used to separate mant
909 from exp in floating point numbers. */
910 const char EXP_CHARS
[] = "eE";
912 /* Chars that mean this number is a floating point constant. */
916 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
918 /* Prefix characters that indicate the start of an immediate
920 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
922 /* Separator character handling. */
924 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
927 skip_past_char (char ** str
, char c
)
929 /* PR gas/14987: Allow for whitespace before the expected character. */
930 skip_whitespace (*str
);
941 #define skip_past_comma(str) skip_past_char (str, ',')
943 /* Arithmetic expressions (possibly involving symbols). */
945 /* Return TRUE if anything in the expression is a bignum. */
948 walk_no_bignums (symbolS
* sp
)
950 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
953 if (symbol_get_value_expression (sp
)->X_add_symbol
)
955 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
956 || (symbol_get_value_expression (sp
)->X_op_symbol
957 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
963 static int in_my_get_expression
= 0;
965 /* Third argument to my_get_expression. */
966 #define GE_NO_PREFIX 0
967 #define GE_IMM_PREFIX 1
968 #define GE_OPT_PREFIX 2
969 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
970 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
971 #define GE_OPT_PREFIX_BIG 3
974 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
979 /* In unified syntax, all prefixes are optional. */
981 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
986 case GE_NO_PREFIX
: break;
988 if (!is_immediate_prefix (**str
))
990 inst
.error
= _("immediate expression requires a # prefix");
996 case GE_OPT_PREFIX_BIG
:
997 if (is_immediate_prefix (**str
))
1003 memset (ep
, 0, sizeof (expressionS
));
1005 save_in
= input_line_pointer
;
1006 input_line_pointer
= *str
;
1007 in_my_get_expression
= 1;
1008 seg
= expression (ep
);
1009 in_my_get_expression
= 0;
1011 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1013 /* We found a bad or missing expression in md_operand(). */
1014 *str
= input_line_pointer
;
1015 input_line_pointer
= save_in
;
1016 if (inst
.error
== NULL
)
1017 inst
.error
= (ep
->X_op
== O_absent
1018 ? _("missing expression") :_("bad expression"));
1023 if (seg
!= absolute_section
1024 && seg
!= text_section
1025 && seg
!= data_section
1026 && seg
!= bss_section
1027 && seg
!= undefined_section
)
1029 inst
.error
= _("bad segment");
1030 *str
= input_line_pointer
;
1031 input_line_pointer
= save_in
;
1038 /* Get rid of any bignums now, so that we don't generate an error for which
1039 we can't establish a line number later on. Big numbers are never valid
1040 in instructions, which is where this routine is always called. */
1041 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1042 && (ep
->X_op
== O_big
1043 || (ep
->X_add_symbol
1044 && (walk_no_bignums (ep
->X_add_symbol
)
1046 && walk_no_bignums (ep
->X_op_symbol
))))))
1048 inst
.error
= _("invalid constant");
1049 *str
= input_line_pointer
;
1050 input_line_pointer
= save_in
;
1054 *str
= input_line_pointer
;
1055 input_line_pointer
= save_in
;
1059 /* Turn a string in input_line_pointer into a floating point constant
1060 of type TYPE, and store the appropriate bytes in *LITP. The number
1061 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1062 returned, or NULL on OK.
1064 Note that fp constants aren't represent in the normal way on the ARM.
1065 In big endian mode, things are as expected. However, in little endian
1066 mode fp constants are big-endian word-wise, and little-endian byte-wise
1067 within the words. For example, (double) 1.1 in big endian mode is
1068 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1069 the byte sequence 99 99 f1 3f 9a 99 99 99.
1071 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1074 md_atof (int type
, char * litP
, int * sizeP
)
1077 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1109 return _("Unrecognized or unsupported floating point constant");
1112 t
= atof_ieee (input_line_pointer
, type
, words
);
1114 input_line_pointer
= t
;
1115 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1117 if (target_big_endian
)
1119 for (i
= 0; i
< prec
; i
++)
1121 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1122 litP
+= sizeof (LITTLENUM_TYPE
);
1127 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1128 for (i
= prec
- 1; i
>= 0; i
--)
1130 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1131 litP
+= sizeof (LITTLENUM_TYPE
);
1134 /* For a 4 byte float the order of elements in `words' is 1 0.
1135 For an 8 byte float the order is 1 0 3 2. */
1136 for (i
= 0; i
< prec
; i
+= 2)
1138 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1139 sizeof (LITTLENUM_TYPE
));
1140 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1141 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1142 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1149 /* We handle all bad expressions here, so that we can report the faulty
1150 instruction in the error message. */
1152 md_operand (expressionS
* exp
)
1154 if (in_my_get_expression
)
1155 exp
->X_op
= O_illegal
;
1158 /* Immediate values. */
1160 /* Generic immediate-value read function for use in directives.
1161 Accepts anything that 'expression' can fold to a constant.
1162 *val receives the number. */
1165 immediate_for_directive (int *val
)
1168 exp
.X_op
= O_illegal
;
1170 if (is_immediate_prefix (*input_line_pointer
))
1172 input_line_pointer
++;
1176 if (exp
.X_op
!= O_constant
)
1178 as_bad (_("expected #constant"));
1179 ignore_rest_of_line ();
1182 *val
= exp
.X_add_number
;
1187 /* Register parsing. */
1189 /* Generic register parser. CCP points to what should be the
1190 beginning of a register name. If it is indeed a valid register
1191 name, advance CCP over it and return the reg_entry structure;
1192 otherwise return NULL. Does not issue diagnostics. */
1194 static struct reg_entry
*
1195 arm_reg_parse_multi (char **ccp
)
1199 struct reg_entry
*reg
;
1201 skip_whitespace (start
);
1203 #ifdef REGISTER_PREFIX
1204 if (*start
!= REGISTER_PREFIX
)
1208 #ifdef OPTIONAL_REGISTER_PREFIX
1209 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1214 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1219 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1221 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1231 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1232 enum arm_reg_type type
)
1234 /* Alternative syntaxes are accepted for a few register classes. */
1241 /* Generic coprocessor register names are allowed for these. */
1242 if (reg
&& reg
->type
== REG_TYPE_CN
)
1247 /* For backward compatibility, a bare number is valid here. */
1249 unsigned long processor
= strtoul (start
, ccp
, 10);
1250 if (*ccp
!= start
&& processor
<= 15)
1254 case REG_TYPE_MMXWC
:
1255 /* WC includes WCG. ??? I'm not sure this is true for all
1256 instructions that take WC registers. */
1257 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1268 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1269 return value is the register number or FAIL. */
1272 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1275 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1278 /* Do not allow a scalar (reg+index) to parse as a register. */
1279 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1282 if (reg
&& reg
->type
== type
)
1285 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1292 /* Parse a Neon type specifier. *STR should point at the leading '.'
1293 character. Does no verification at this stage that the type fits the opcode
1300 Can all be legally parsed by this function.
1302 Fills in neon_type struct pointer with parsed information, and updates STR
1303 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1304 type, FAIL if not. */
1307 parse_neon_type (struct neon_type
*type
, char **str
)
1314 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1316 enum neon_el_type thistype
= NT_untyped
;
1317 unsigned thissize
= -1u;
1324 /* Just a size without an explicit type. */
1328 switch (TOLOWER (*ptr
))
1330 case 'i': thistype
= NT_integer
; break;
1331 case 'f': thistype
= NT_float
; break;
1332 case 'p': thistype
= NT_poly
; break;
1333 case 's': thistype
= NT_signed
; break;
1334 case 'u': thistype
= NT_unsigned
; break;
1336 thistype
= NT_float
;
1341 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1347 /* .f is an abbreviation for .f32. */
1348 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1353 thissize
= strtoul (ptr
, &ptr
, 10);
1355 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1358 as_bad (_("bad size %d in type specifier"), thissize
);
1366 type
->el
[type
->elems
].type
= thistype
;
1367 type
->el
[type
->elems
].size
= thissize
;
1372 /* Empty/missing type is not a successful parse. */
1373 if (type
->elems
== 0)
1381 /* Errors may be set multiple times during parsing or bit encoding
1382 (particularly in the Neon bits), but usually the earliest error which is set
1383 will be the most meaningful. Avoid overwriting it with later (cascading)
1384 errors by calling this function. */
1387 first_error (const char *err
)
1393 /* Parse a single type, e.g. ".s32", leading period included. */
1395 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1398 struct neon_type optype
;
1402 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1404 if (optype
.elems
== 1)
1405 *vectype
= optype
.el
[0];
1408 first_error (_("only one type should be specified for operand"));
1414 first_error (_("vector type expected"));
1426 /* Special meanings for indices (which have a range of 0-7), which will fit into
1429 #define NEON_ALL_LANES 15
1430 #define NEON_INTERLEAVE_LANES 14
1432 /* Parse either a register or a scalar, with an optional type. Return the
1433 register number, and optionally fill in the actual type of the register
1434 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1435 type/index information in *TYPEINFO. */
1438 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1439 enum arm_reg_type
*rtype
,
1440 struct neon_typed_alias
*typeinfo
)
1443 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1444 struct neon_typed_alias atype
;
1445 struct neon_type_el parsetype
;
1449 atype
.eltype
.type
= NT_invtype
;
1450 atype
.eltype
.size
= -1;
1452 /* Try alternate syntax for some types of register. Note these are mutually
1453 exclusive with the Neon syntax extensions. */
1456 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1464 /* Undo polymorphism when a set of register types may be accepted. */
1465 if ((type
== REG_TYPE_NDQ
1466 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1467 || (type
== REG_TYPE_VFSD
1468 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1469 || (type
== REG_TYPE_NSDQ
1470 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1471 || reg
->type
== REG_TYPE_NQ
))
1472 || (type
== REG_TYPE_MMXWC
1473 && (reg
->type
== REG_TYPE_MMXWCG
)))
1474 type
= (enum arm_reg_type
) reg
->type
;
1476 if (type
!= reg
->type
)
1482 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1484 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1486 first_error (_("can't redefine type for operand"));
1489 atype
.defined
|= NTA_HASTYPE
;
1490 atype
.eltype
= parsetype
;
1493 if (skip_past_char (&str
, '[') == SUCCESS
)
1495 if (type
!= REG_TYPE_VFD
)
1497 first_error (_("only D registers may be indexed"));
1501 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1503 first_error (_("can't change index for operand"));
1507 atype
.defined
|= NTA_HASINDEX
;
1509 if (skip_past_char (&str
, ']') == SUCCESS
)
1510 atype
.index
= NEON_ALL_LANES
;
1515 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1517 if (exp
.X_op
!= O_constant
)
1519 first_error (_("constant expression required"));
1523 if (skip_past_char (&str
, ']') == FAIL
)
1526 atype
.index
= exp
.X_add_number
;
1541 /* Like arm_reg_parse, but allow allow the following extra features:
1542 - If RTYPE is non-zero, return the (possibly restricted) type of the
1543 register (e.g. Neon double or quad reg when either has been requested).
1544 - If this is a Neon vector type with additional type information, fill
1545 in the struct pointed to by VECTYPE (if non-NULL).
1546 This function will fault on encountering a scalar. */
1549 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1550 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1552 struct neon_typed_alias atype
;
1554 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1559 /* Do not allow regname(... to parse as a register. */
1563 /* Do not allow a scalar (reg+index) to parse as a register. */
1564 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1566 first_error (_("register operand expected, but got scalar"));
1571 *vectype
= atype
.eltype
;
1578 #define NEON_SCALAR_REG(X) ((X) >> 4)
1579 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1581 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1582 have enough information to be able to do a good job bounds-checking. So, we
1583 just do easy checks here, and do further checks later. */
1586 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1590 struct neon_typed_alias atype
;
1592 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1594 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1597 if (atype
.index
== NEON_ALL_LANES
)
1599 first_error (_("scalar must have an index"));
1602 else if (atype
.index
>= 64 / elsize
)
1604 first_error (_("scalar index out of range"));
1609 *type
= atype
.eltype
;
1613 return reg
* 16 + atype
.index
;
1616 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1619 parse_reg_list (char ** strp
)
1621 char * str
= * strp
;
1625 /* We come back here if we get ranges concatenated by '+' or '|'. */
1628 skip_whitespace (str
);
1642 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1644 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1654 first_error (_("bad range in register list"));
1658 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1660 if (range
& (1 << i
))
1662 (_("Warning: duplicated register (r%d) in register list"),
1670 if (range
& (1 << reg
))
1671 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1673 else if (reg
<= cur_reg
)
1674 as_tsktsk (_("Warning: register range not in ascending order"));
1679 while (skip_past_comma (&str
) != FAIL
1680 || (in_range
= 1, *str
++ == '-'));
1683 if (skip_past_char (&str
, '}') == FAIL
)
1685 first_error (_("missing `}'"));
1693 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1696 if (exp
.X_op
== O_constant
)
1698 if (exp
.X_add_number
1699 != (exp
.X_add_number
& 0x0000ffff))
1701 inst
.error
= _("invalid register mask");
1705 if ((range
& exp
.X_add_number
) != 0)
1707 int regno
= range
& exp
.X_add_number
;
1710 regno
= (1 << regno
) - 1;
1712 (_("Warning: duplicated register (r%d) in register list"),
1716 range
|= exp
.X_add_number
;
1720 if (inst
.reloc
.type
!= 0)
1722 inst
.error
= _("expression too complex");
1726 memcpy (&inst
.reloc
.exp
, &exp
, sizeof (expressionS
));
1727 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1728 inst
.reloc
.pc_rel
= 0;
1732 if (*str
== '|' || *str
== '+')
1738 while (another_range
);
1744 /* Types of registers in a list. */
1753 /* Parse a VFP register list. If the string is invalid return FAIL.
1754 Otherwise return the number of registers, and set PBASE to the first
1755 register. Parses registers of type ETYPE.
1756 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1757 - Q registers can be used to specify pairs of D registers
1758 - { } can be omitted from around a singleton register list
1759 FIXME: This is not implemented, as it would require backtracking in
1762 This could be done (the meaning isn't really ambiguous), but doesn't
1763 fit in well with the current parsing framework.
1764 - 32 D registers may be used (also true for VFPv3).
1765 FIXME: Types are ignored in these register lists, which is probably a
1769 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1774 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1778 unsigned long mask
= 0;
1781 if (skip_past_char (&str
, '{') == FAIL
)
1783 inst
.error
= _("expecting {");
1790 regtype
= REG_TYPE_VFS
;
1795 regtype
= REG_TYPE_VFD
;
1798 case REGLIST_NEON_D
:
1799 regtype
= REG_TYPE_NDQ
;
1803 if (etype
!= REGLIST_VFP_S
)
1805 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1806 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1810 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1813 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1820 base_reg
= max_regs
;
1824 int setmask
= 1, addregs
= 1;
1826 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1828 if (new_base
== FAIL
)
1830 first_error (_(reg_expected_msgs
[regtype
]));
1834 if (new_base
>= max_regs
)
1836 first_error (_("register out of range in list"));
1840 /* Note: a value of 2 * n is returned for the register Q<n>. */
1841 if (regtype
== REG_TYPE_NQ
)
1847 if (new_base
< base_reg
)
1848 base_reg
= new_base
;
1850 if (mask
& (setmask
<< new_base
))
1852 first_error (_("invalid register list"));
1856 if ((mask
>> new_base
) != 0 && ! warned
)
1858 as_tsktsk (_("register list not in ascending order"));
1862 mask
|= setmask
<< new_base
;
1865 if (*str
== '-') /* We have the start of a range expression */
1871 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1874 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1878 if (high_range
>= max_regs
)
1880 first_error (_("register out of range in list"));
1884 if (regtype
== REG_TYPE_NQ
)
1885 high_range
= high_range
+ 1;
1887 if (high_range
<= new_base
)
1889 inst
.error
= _("register range not in ascending order");
1893 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1895 if (mask
& (setmask
<< new_base
))
1897 inst
.error
= _("invalid register list");
1901 mask
|= setmask
<< new_base
;
1906 while (skip_past_comma (&str
) != FAIL
);
1910 /* Sanity check -- should have raised a parse error above. */
1911 if (count
== 0 || count
> max_regs
)
1916 /* Final test -- the registers must be consecutive. */
1918 for (i
= 0; i
< count
; i
++)
1920 if ((mask
& (1u << i
)) == 0)
1922 inst
.error
= _("non-contiguous register range");
1932 /* True if two alias types are the same. */
1935 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1943 if (a
->defined
!= b
->defined
)
1946 if ((a
->defined
& NTA_HASTYPE
) != 0
1947 && (a
->eltype
.type
!= b
->eltype
.type
1948 || a
->eltype
.size
!= b
->eltype
.size
))
1951 if ((a
->defined
& NTA_HASINDEX
) != 0
1952 && (a
->index
!= b
->index
))
1958 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1959 The base register is put in *PBASE.
1960 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1962 The register stride (minus one) is put in bit 4 of the return value.
1963 Bits [6:5] encode the list length (minus one).
1964 The type of the list elements is put in *ELTYPE, if non-NULL. */
1966 #define NEON_LANE(X) ((X) & 0xf)
1967 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1968 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1971 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1972 struct neon_type_el
*eltype
)
1979 int leading_brace
= 0;
1980 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1981 const char *const incr_error
= _("register stride must be 1 or 2");
1982 const char *const type_error
= _("mismatched element/structure types in list");
1983 struct neon_typed_alias firsttype
;
1985 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1990 struct neon_typed_alias atype
;
1991 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1995 first_error (_(reg_expected_msgs
[rtype
]));
2002 if (rtype
== REG_TYPE_NQ
)
2008 else if (reg_incr
== -1)
2010 reg_incr
= getreg
- base_reg
;
2011 if (reg_incr
< 1 || reg_incr
> 2)
2013 first_error (_(incr_error
));
2017 else if (getreg
!= base_reg
+ reg_incr
* count
)
2019 first_error (_(incr_error
));
2023 if (! neon_alias_types_same (&atype
, &firsttype
))
2025 first_error (_(type_error
));
2029 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2033 struct neon_typed_alias htype
;
2034 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2036 lane
= NEON_INTERLEAVE_LANES
;
2037 else if (lane
!= NEON_INTERLEAVE_LANES
)
2039 first_error (_(type_error
));
2044 else if (reg_incr
!= 1)
2046 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2050 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2053 first_error (_(reg_expected_msgs
[rtype
]));
2056 if (! neon_alias_types_same (&htype
, &firsttype
))
2058 first_error (_(type_error
));
2061 count
+= hireg
+ dregs
- getreg
;
2065 /* If we're using Q registers, we can't use [] or [n] syntax. */
2066 if (rtype
== REG_TYPE_NQ
)
2072 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2076 else if (lane
!= atype
.index
)
2078 first_error (_(type_error
));
2082 else if (lane
== -1)
2083 lane
= NEON_INTERLEAVE_LANES
;
2084 else if (lane
!= NEON_INTERLEAVE_LANES
)
2086 first_error (_(type_error
));
2091 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2093 /* No lane set by [x]. We must be interleaving structures. */
2095 lane
= NEON_INTERLEAVE_LANES
;
2098 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2099 || (count
> 1 && reg_incr
== -1))
2101 first_error (_("error parsing element/structure list"));
2105 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2107 first_error (_("expected }"));
2115 *eltype
= firsttype
.eltype
;
2120 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2123 /* Parse an explicit relocation suffix on an expression. This is
2124 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2125 arm_reloc_hsh contains no entries, so this function can only
2126 succeed if there is no () after the word. Returns -1 on error,
2127 BFD_RELOC_UNUSED if there wasn't any suffix. */
2130 parse_reloc (char **str
)
2132 struct reloc_entry
*r
;
2136 return BFD_RELOC_UNUSED
;
2141 while (*q
&& *q
!= ')' && *q
!= ',')
2146 if ((r
= (struct reloc_entry
*)
2147 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2154 /* Directives: register aliases. */
2156 static struct reg_entry
*
2157 insert_reg_alias (char *str
, unsigned number
, int type
)
2159 struct reg_entry
*new_reg
;
2162 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2164 if (new_reg
->builtin
)
2165 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2167 /* Only warn about a redefinition if it's not defined as the
2169 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2170 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2175 name
= xstrdup (str
);
2176 new_reg
= (struct reg_entry
*) xmalloc (sizeof (struct reg_entry
));
2178 new_reg
->name
= name
;
2179 new_reg
->number
= number
;
2180 new_reg
->type
= type
;
2181 new_reg
->builtin
= FALSE
;
2182 new_reg
->neon
= NULL
;
2184 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2191 insert_neon_reg_alias (char *str
, int number
, int type
,
2192 struct neon_typed_alias
*atype
)
2194 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2198 first_error (_("attempt to redefine typed alias"));
2204 reg
->neon
= (struct neon_typed_alias
*)
2205 xmalloc (sizeof (struct neon_typed_alias
));
2206 *reg
->neon
= *atype
;
2210 /* Look for the .req directive. This is of the form:
2212 new_register_name .req existing_register_name
2214 If we find one, or if it looks sufficiently like one that we want to
2215 handle any error here, return TRUE. Otherwise return FALSE. */
2218 create_register_alias (char * newname
, char *p
)
2220 struct reg_entry
*old
;
2221 char *oldname
, *nbuf
;
2224 /* The input scrubber ensures that whitespace after the mnemonic is
2225 collapsed to single spaces. */
2227 if (strncmp (oldname
, " .req ", 6) != 0)
2231 if (*oldname
== '\0')
2234 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2237 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2241 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2242 the desired alias name, and p points to its end. If not, then
2243 the desired alias name is in the global original_case_string. */
2244 #ifdef TC_CASE_SENSITIVE
2247 newname
= original_case_string
;
2248 nlen
= strlen (newname
);
2251 nbuf
= (char *) alloca (nlen
+ 1);
2252 memcpy (nbuf
, newname
, nlen
);
2255 /* Create aliases under the new name as stated; an all-lowercase
2256 version of the new name; and an all-uppercase version of the new
2258 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2260 for (p
= nbuf
; *p
; p
++)
2263 if (strncmp (nbuf
, newname
, nlen
))
2265 /* If this attempt to create an additional alias fails, do not bother
2266 trying to create the all-lower case alias. We will fail and issue
2267 a second, duplicate error message. This situation arises when the
2268 programmer does something like:
2271 The second .req creates the "Foo" alias but then fails to create
2272 the artificial FOO alias because it has already been created by the
2274 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2278 for (p
= nbuf
; *p
; p
++)
2281 if (strncmp (nbuf
, newname
, nlen
))
2282 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2288 /* Create a Neon typed/indexed register alias using directives, e.g.:
2293 These typed registers can be used instead of the types specified after the
2294 Neon mnemonic, so long as all operands given have types. Types can also be
2295 specified directly, e.g.:
2296 vadd d0.s32, d1.s32, d2.s32 */
2299 create_neon_reg_alias (char *newname
, char *p
)
2301 enum arm_reg_type basetype
;
2302 struct reg_entry
*basereg
;
2303 struct reg_entry mybasereg
;
2304 struct neon_type ntype
;
2305 struct neon_typed_alias typeinfo
;
2306 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2309 typeinfo
.defined
= 0;
2310 typeinfo
.eltype
.type
= NT_invtype
;
2311 typeinfo
.eltype
.size
= -1;
2312 typeinfo
.index
= -1;
2316 if (strncmp (p
, " .dn ", 5) == 0)
2317 basetype
= REG_TYPE_VFD
;
2318 else if (strncmp (p
, " .qn ", 5) == 0)
2319 basetype
= REG_TYPE_NQ
;
2328 basereg
= arm_reg_parse_multi (&p
);
2330 if (basereg
&& basereg
->type
!= basetype
)
2332 as_bad (_("bad type for register"));
2336 if (basereg
== NULL
)
2339 /* Try parsing as an integer. */
2340 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2341 if (exp
.X_op
!= O_constant
)
2343 as_bad (_("expression must be constant"));
2346 basereg
= &mybasereg
;
2347 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2353 typeinfo
= *basereg
->neon
;
2355 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2357 /* We got a type. */
2358 if (typeinfo
.defined
& NTA_HASTYPE
)
2360 as_bad (_("can't redefine the type of a register alias"));
2364 typeinfo
.defined
|= NTA_HASTYPE
;
2365 if (ntype
.elems
!= 1)
2367 as_bad (_("you must specify a single type only"));
2370 typeinfo
.eltype
= ntype
.el
[0];
2373 if (skip_past_char (&p
, '[') == SUCCESS
)
2376 /* We got a scalar index. */
2378 if (typeinfo
.defined
& NTA_HASINDEX
)
2380 as_bad (_("can't redefine the index of a scalar alias"));
2384 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2386 if (exp
.X_op
!= O_constant
)
2388 as_bad (_("scalar index must be constant"));
2392 typeinfo
.defined
|= NTA_HASINDEX
;
2393 typeinfo
.index
= exp
.X_add_number
;
2395 if (skip_past_char (&p
, ']') == FAIL
)
2397 as_bad (_("expecting ]"));
2402 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2403 the desired alias name, and p points to its end. If not, then
2404 the desired alias name is in the global original_case_string. */
2405 #ifdef TC_CASE_SENSITIVE
2406 namelen
= nameend
- newname
;
2408 newname
= original_case_string
;
2409 namelen
= strlen (newname
);
2412 namebuf
= (char *) alloca (namelen
+ 1);
2413 strncpy (namebuf
, newname
, namelen
);
2414 namebuf
[namelen
] = '\0';
2416 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2417 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2419 /* Insert name in all uppercase. */
2420 for (p
= namebuf
; *p
; p
++)
2423 if (strncmp (namebuf
, newname
, namelen
))
2424 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2425 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2427 /* Insert name in all lowercase. */
2428 for (p
= namebuf
; *p
; p
++)
2431 if (strncmp (namebuf
, newname
, namelen
))
2432 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2433 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2438 /* Should never be called, as .req goes between the alias and the
2439 register name, not at the beginning of the line. */
2442 s_req (int a ATTRIBUTE_UNUSED
)
2444 as_bad (_("invalid syntax for .req directive"));
2448 s_dn (int a ATTRIBUTE_UNUSED
)
2450 as_bad (_("invalid syntax for .dn directive"));
2454 s_qn (int a ATTRIBUTE_UNUSED
)
2456 as_bad (_("invalid syntax for .qn directive"));
2459 /* The .unreq directive deletes an alias which was previously defined
2460 by .req. For example:
2466 s_unreq (int a ATTRIBUTE_UNUSED
)
2471 name
= input_line_pointer
;
2473 while (*input_line_pointer
!= 0
2474 && *input_line_pointer
!= ' '
2475 && *input_line_pointer
!= '\n')
2476 ++input_line_pointer
;
2478 saved_char
= *input_line_pointer
;
2479 *input_line_pointer
= 0;
2482 as_bad (_("invalid syntax for .unreq directive"));
2485 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2489 as_bad (_("unknown register alias '%s'"), name
);
2490 else if (reg
->builtin
)
2491 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2498 hash_delete (arm_reg_hsh
, name
, FALSE
);
2499 free ((char *) reg
->name
);
2504 /* Also locate the all upper case and all lower case versions.
2505 Do not complain if we cannot find one or the other as it
2506 was probably deleted above. */
2508 nbuf
= strdup (name
);
2509 for (p
= nbuf
; *p
; p
++)
2511 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2514 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2515 free ((char *) reg
->name
);
2521 for (p
= nbuf
; *p
; p
++)
2523 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2526 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2527 free ((char *) reg
->name
);
2537 *input_line_pointer
= saved_char
;
2538 demand_empty_rest_of_line ();
2541 /* Directives: Instruction set selection. */
2544 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2545 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2546 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2547 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2549 /* Create a new mapping symbol for the transition to STATE. */
2552 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2555 const char * symname
;
2562 type
= BSF_NO_FLAGS
;
2566 type
= BSF_NO_FLAGS
;
2570 type
= BSF_NO_FLAGS
;
2576 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2577 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2582 THUMB_SET_FUNC (symbolP
, 0);
2583 ARM_SET_THUMB (symbolP
, 0);
2584 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2588 THUMB_SET_FUNC (symbolP
, 1);
2589 ARM_SET_THUMB (symbolP
, 1);
2590 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2598 /* Save the mapping symbols for future reference. Also check that
2599 we do not place two mapping symbols at the same offset within a
2600 frag. We'll handle overlap between frags in
2601 check_mapping_symbols.
2603 If .fill or other data filling directive generates zero sized data,
2604 the mapping symbol for the following code will have the same value
2605 as the one generated for the data filling directive. In this case,
2606 we replace the old symbol with the new one at the same address. */
2609 if (frag
->tc_frag_data
.first_map
!= NULL
)
2611 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2612 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2614 frag
->tc_frag_data
.first_map
= symbolP
;
2616 if (frag
->tc_frag_data
.last_map
!= NULL
)
2618 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2619 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2620 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2622 frag
->tc_frag_data
.last_map
= symbolP
;
2625 /* We must sometimes convert a region marked as code to data during
2626 code alignment, if an odd number of bytes have to be padded. The
2627 code mapping symbol is pushed to an aligned address. */
2630 insert_data_mapping_symbol (enum mstate state
,
2631 valueT value
, fragS
*frag
, offsetT bytes
)
2633 /* If there was already a mapping symbol, remove it. */
2634 if (frag
->tc_frag_data
.last_map
!= NULL
2635 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2637 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2641 know (frag
->tc_frag_data
.first_map
== symp
);
2642 frag
->tc_frag_data
.first_map
= NULL
;
2644 frag
->tc_frag_data
.last_map
= NULL
;
2645 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2648 make_mapping_symbol (MAP_DATA
, value
, frag
);
2649 make_mapping_symbol (state
, value
+ bytes
, frag
);
2652 static void mapping_state_2 (enum mstate state
, int max_chars
);
2654 /* Set the mapping state to STATE. Only call this when about to
2655 emit some STATE bytes to the file. */
2657 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2659 mapping_state (enum mstate state
)
2661 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2663 if (mapstate
== state
)
2664 /* The mapping symbol has already been emitted.
2665 There is nothing else to do. */
2668 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2670 All ARM instructions require 4-byte alignment.
2671 (Almost) all Thumb instructions require 2-byte alignment.
2673 When emitting instructions into any section, mark the section
2676 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2677 but themselves require 2-byte alignment; this applies to some
2678 PC- relative forms. However, these cases will invovle implicit
2679 literal pool generation or an explicit .align >=2, both of
2680 which will cause the section to me marked with sufficient
2681 alignment. Thus, we don't handle those cases here. */
2682 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2684 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2685 /* This case will be evaluated later. */
2688 mapping_state_2 (state
, 0);
2691 /* Same as mapping_state, but MAX_CHARS bytes have already been
2692 allocated. Put the mapping symbol that far back. */
2695 mapping_state_2 (enum mstate state
, int max_chars
)
2697 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2699 if (!SEG_NORMAL (now_seg
))
2702 if (mapstate
== state
)
2703 /* The mapping symbol has already been emitted.
2704 There is nothing else to do. */
2707 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2708 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2710 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2711 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2714 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2717 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2718 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2722 #define mapping_state(x) ((void)0)
2723 #define mapping_state_2(x, y) ((void)0)
2726 /* Find the real, Thumb encoded start of a Thumb function. */
2730 find_real_start (symbolS
* symbolP
)
2733 const char * name
= S_GET_NAME (symbolP
);
2734 symbolS
* new_target
;
2736 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2737 #define STUB_NAME ".real_start_of"
2742 /* The compiler may generate BL instructions to local labels because
2743 it needs to perform a branch to a far away location. These labels
2744 do not have a corresponding ".real_start_of" label. We check
2745 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2746 the ".real_start_of" convention for nonlocal branches. */
2747 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2750 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2751 new_target
= symbol_find (real_start
);
2753 if (new_target
== NULL
)
2755 as_warn (_("Failed to find real start of function: %s\n"), name
);
2756 new_target
= symbolP
;
2764 opcode_select (int width
)
2771 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2772 as_bad (_("selected processor does not support THUMB opcodes"));
2775 /* No need to force the alignment, since we will have been
2776 coming from ARM mode, which is word-aligned. */
2777 record_alignment (now_seg
, 1);
2784 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2785 as_bad (_("selected processor does not support ARM opcodes"));
2790 frag_align (2, 0, 0);
2792 record_alignment (now_seg
, 1);
2797 as_bad (_("invalid instruction size selected (%d)"), width
);
2802 s_arm (int ignore ATTRIBUTE_UNUSED
)
2805 demand_empty_rest_of_line ();
2809 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2812 demand_empty_rest_of_line ();
2816 s_code (int unused ATTRIBUTE_UNUSED
)
2820 temp
= get_absolute_expression ();
2825 opcode_select (temp
);
2829 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2834 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2836 /* If we are not already in thumb mode go into it, EVEN if
2837 the target processor does not support thumb instructions.
2838 This is used by gcc/config/arm/lib1funcs.asm for example
2839 to compile interworking support functions even if the
2840 target processor should not support interworking. */
2844 record_alignment (now_seg
, 1);
2847 demand_empty_rest_of_line ();
2851 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2855 /* The following label is the name/address of the start of a Thumb function.
2856 We need to know this for the interworking support. */
2857 label_is_thumb_function_name
= TRUE
;
2860 /* Perform a .set directive, but also mark the alias as
2861 being a thumb function. */
2864 s_thumb_set (int equiv
)
2866 /* XXX the following is a duplicate of the code for s_set() in read.c
2867 We cannot just call that code as we need to get at the symbol that
2874 /* Especial apologies for the random logic:
2875 This just grew, and could be parsed much more simply!
2877 delim
= get_symbol_name (& name
);
2878 end_name
= input_line_pointer
;
2879 (void) restore_line_pointer (delim
);
2881 if (*input_line_pointer
!= ',')
2884 as_bad (_("expected comma after name \"%s\""), name
);
2886 ignore_rest_of_line ();
2890 input_line_pointer
++;
2893 if (name
[0] == '.' && name
[1] == '\0')
2895 /* XXX - this should not happen to .thumb_set. */
2899 if ((symbolP
= symbol_find (name
)) == NULL
2900 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2903 /* When doing symbol listings, play games with dummy fragments living
2904 outside the normal fragment chain to record the file and line info
2906 if (listing
& LISTING_SYMBOLS
)
2908 extern struct list_info_struct
* listing_tail
;
2909 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
2911 memset (dummy_frag
, 0, sizeof (fragS
));
2912 dummy_frag
->fr_type
= rs_fill
;
2913 dummy_frag
->line
= listing_tail
;
2914 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2915 dummy_frag
->fr_symbol
= symbolP
;
2919 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2922 /* "set" symbols are local unless otherwise specified. */
2923 SF_SET_LOCAL (symbolP
);
2924 #endif /* OBJ_COFF */
2925 } /* Make a new symbol. */
2927 symbol_table_insert (symbolP
);
2932 && S_IS_DEFINED (symbolP
)
2933 && S_GET_SEGMENT (symbolP
) != reg_section
)
2934 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2936 pseudo_set (symbolP
);
2938 demand_empty_rest_of_line ();
2940 /* XXX Now we come to the Thumb specific bit of code. */
2942 THUMB_SET_FUNC (symbolP
, 1);
2943 ARM_SET_THUMB (symbolP
, 1);
2944 #if defined OBJ_ELF || defined OBJ_COFF
2945 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2949 /* Directives: Mode selection. */
2951 /* .syntax [unified|divided] - choose the new unified syntax
2952 (same for Arm and Thumb encoding, modulo slight differences in what
2953 can be represented) or the old divergent syntax for each mode. */
2955 s_syntax (int unused ATTRIBUTE_UNUSED
)
2959 delim
= get_symbol_name (& name
);
2961 if (!strcasecmp (name
, "unified"))
2962 unified_syntax
= TRUE
;
2963 else if (!strcasecmp (name
, "divided"))
2964 unified_syntax
= FALSE
;
2967 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2970 (void) restore_line_pointer (delim
);
2971 demand_empty_rest_of_line ();
2974 /* Directives: sectioning and alignment. */
2977 s_bss (int ignore ATTRIBUTE_UNUSED
)
2979 /* We don't support putting frags in the BSS segment, we fake it by
2980 marking in_bss, then looking at s_skip for clues. */
2981 subseg_set (bss_section
, 0);
2982 demand_empty_rest_of_line ();
2984 #ifdef md_elf_section_change_hook
2985 md_elf_section_change_hook ();
2990 s_even (int ignore ATTRIBUTE_UNUSED
)
2992 /* Never make frag if expect extra pass. */
2994 frag_align (1, 0, 0);
2996 record_alignment (now_seg
, 1);
2998 demand_empty_rest_of_line ();
3001 /* Directives: CodeComposer Studio. */
3003 /* .ref (for CodeComposer Studio syntax only). */
3005 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3007 if (codecomposer_syntax
)
3008 ignore_rest_of_line ();
3010 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3013 /* If name is not NULL, then it is used for marking the beginning of a
3014 function, wherease if it is NULL then it means the function end. */
3016 asmfunc_debug (const char * name
)
3018 static const char * last_name
= NULL
;
3022 gas_assert (last_name
== NULL
);
3025 if (debug_type
== DEBUG_STABS
)
3026 stabs_generate_asm_func (name
, name
);
3030 gas_assert (last_name
!= NULL
);
3032 if (debug_type
== DEBUG_STABS
)
3033 stabs_generate_asm_endfunc (last_name
, last_name
);
3040 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3042 if (codecomposer_syntax
)
3044 switch (asmfunc_state
)
3046 case OUTSIDE_ASMFUNC
:
3047 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3050 case WAITING_ASMFUNC_NAME
:
3051 as_bad (_(".asmfunc repeated."));
3054 case WAITING_ENDASMFUNC
:
3055 as_bad (_(".asmfunc without function."));
3058 demand_empty_rest_of_line ();
3061 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3065 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3067 if (codecomposer_syntax
)
3069 switch (asmfunc_state
)
3071 case OUTSIDE_ASMFUNC
:
3072 as_bad (_(".endasmfunc without a .asmfunc."));
3075 case WAITING_ASMFUNC_NAME
:
3076 as_bad (_(".endasmfunc without function."));
3079 case WAITING_ENDASMFUNC
:
3080 asmfunc_state
= OUTSIDE_ASMFUNC
;
3081 asmfunc_debug (NULL
);
3084 demand_empty_rest_of_line ();
3087 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3091 s_ccs_def (int name
)
3093 if (codecomposer_syntax
)
3096 as_bad (_(".def pseudo-op only available with -mccs flag."));
3099 /* Directives: Literal pools. */
3101 static literal_pool
*
3102 find_literal_pool (void)
3104 literal_pool
* pool
;
3106 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3108 if (pool
->section
== now_seg
3109 && pool
->sub_section
== now_subseg
)
3116 static literal_pool
*
3117 find_or_make_literal_pool (void)
3119 /* Next literal pool ID number. */
3120 static unsigned int latest_pool_num
= 1;
3121 literal_pool
* pool
;
3123 pool
= find_literal_pool ();
3127 /* Create a new pool. */
3128 pool
= (literal_pool
*) xmalloc (sizeof (* pool
));
3132 pool
->next_free_entry
= 0;
3133 pool
->section
= now_seg
;
3134 pool
->sub_section
= now_subseg
;
3135 pool
->next
= list_of_pools
;
3136 pool
->symbol
= NULL
;
3137 pool
->alignment
= 2;
3139 /* Add it to the list. */
3140 list_of_pools
= pool
;
3143 /* New pools, and emptied pools, will have a NULL symbol. */
3144 if (pool
->symbol
== NULL
)
3146 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3147 (valueT
) 0, &zero_address_frag
);
3148 pool
->id
= latest_pool_num
++;
3155 /* Add the literal in the global 'inst'
3156 structure to the relevant literal pool. */
3159 add_to_lit_pool (unsigned int nbytes
)
3161 #define PADDING_SLOT 0x1
3162 #define LIT_ENTRY_SIZE_MASK 0xFF
3163 literal_pool
* pool
;
3164 unsigned int entry
, pool_size
= 0;
3165 bfd_boolean padding_slot_p
= FALSE
;
3171 imm1
= inst
.operands
[1].imm
;
3172 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3173 : inst
.reloc
.exp
.X_unsigned
? 0
3174 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3175 if (target_big_endian
)
3178 imm2
= inst
.operands
[1].imm
;
3182 pool
= find_or_make_literal_pool ();
3184 /* Check if this literal value is already in the pool. */
3185 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3189 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3190 && (inst
.reloc
.exp
.X_op
== O_constant
)
3191 && (pool
->literals
[entry
].X_add_number
3192 == inst
.reloc
.exp
.X_add_number
)
3193 && (pool
->literals
[entry
].X_md
== nbytes
)
3194 && (pool
->literals
[entry
].X_unsigned
3195 == inst
.reloc
.exp
.X_unsigned
))
3198 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3199 && (inst
.reloc
.exp
.X_op
== O_symbol
)
3200 && (pool
->literals
[entry
].X_add_number
3201 == inst
.reloc
.exp
.X_add_number
)
3202 && (pool
->literals
[entry
].X_add_symbol
3203 == inst
.reloc
.exp
.X_add_symbol
)
3204 && (pool
->literals
[entry
].X_op_symbol
3205 == inst
.reloc
.exp
.X_op_symbol
)
3206 && (pool
->literals
[entry
].X_md
== nbytes
))
3209 else if ((nbytes
== 8)
3210 && !(pool_size
& 0x7)
3211 && ((entry
+ 1) != pool
->next_free_entry
)
3212 && (pool
->literals
[entry
].X_op
== O_constant
)
3213 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3214 && (pool
->literals
[entry
].X_unsigned
3215 == inst
.reloc
.exp
.X_unsigned
)
3216 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3217 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3218 && (pool
->literals
[entry
+ 1].X_unsigned
3219 == inst
.reloc
.exp
.X_unsigned
))
3222 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3223 if (padding_slot_p
&& (nbytes
== 4))
3229 /* Do we need to create a new entry? */
3230 if (entry
== pool
->next_free_entry
)
3232 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3234 inst
.error
= _("literal pool overflow");
3240 /* For 8-byte entries, we align to an 8-byte boundary,
3241 and split it into two 4-byte entries, because on 32-bit
3242 host, 8-byte constants are treated as big num, thus
3243 saved in "generic_bignum" which will be overwritten
3244 by later assignments.
3246 We also need to make sure there is enough space for
3249 We also check to make sure the literal operand is a
3251 if (!(inst
.reloc
.exp
.X_op
== O_constant
3252 || inst
.reloc
.exp
.X_op
== O_big
))
3254 inst
.error
= _("invalid type for literal pool");
3257 else if (pool_size
& 0x7)
3259 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3261 inst
.error
= _("literal pool overflow");
3265 pool
->literals
[entry
] = inst
.reloc
.exp
;
3266 pool
->literals
[entry
].X_add_number
= 0;
3267 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3268 pool
->next_free_entry
+= 1;
3271 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3273 inst
.error
= _("literal pool overflow");
3277 pool
->literals
[entry
] = inst
.reloc
.exp
;
3278 pool
->literals
[entry
].X_op
= O_constant
;
3279 pool
->literals
[entry
].X_add_number
= imm1
;
3280 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3281 pool
->literals
[entry
++].X_md
= 4;
3282 pool
->literals
[entry
] = inst
.reloc
.exp
;
3283 pool
->literals
[entry
].X_op
= O_constant
;
3284 pool
->literals
[entry
].X_add_number
= imm2
;
3285 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3286 pool
->literals
[entry
].X_md
= 4;
3287 pool
->alignment
= 3;
3288 pool
->next_free_entry
+= 1;
3292 pool
->literals
[entry
] = inst
.reloc
.exp
;
3293 pool
->literals
[entry
].X_md
= 4;
3297 /* PR ld/12974: Record the location of the first source line to reference
3298 this entry in the literal pool. If it turns out during linking that the
3299 symbol does not exist we will be able to give an accurate line number for
3300 the (first use of the) missing reference. */
3301 if (debug_type
== DEBUG_DWARF2
)
3302 dwarf2_where (pool
->locs
+ entry
);
3304 pool
->next_free_entry
+= 1;
3306 else if (padding_slot_p
)
3308 pool
->literals
[entry
] = inst
.reloc
.exp
;
3309 pool
->literals
[entry
].X_md
= nbytes
;
3312 inst
.reloc
.exp
.X_op
= O_symbol
;
3313 inst
.reloc
.exp
.X_add_number
= pool_size
;
3314 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
3320 tc_start_label_without_colon (void)
3322 bfd_boolean ret
= TRUE
;
3324 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3326 const char *label
= input_line_pointer
;
3328 while (!is_end_of_line
[(int) label
[-1]])
3333 as_bad (_("Invalid label '%s'"), label
);
3337 asmfunc_debug (label
);
3339 asmfunc_state
= WAITING_ENDASMFUNC
;
3345 /* Can't use symbol_new here, so have to create a symbol and then at
3346 a later date assign it a value. Thats what these functions do. */
3349 symbol_locate (symbolS
* symbolP
,
3350 const char * name
, /* It is copied, the caller can modify. */
3351 segT segment
, /* Segment identifier (SEG_<something>). */
3352 valueT valu
, /* Symbol value. */
3353 fragS
* frag
) /* Associated fragment. */
3356 char * preserved_copy_of_name
;
3358 name_length
= strlen (name
) + 1; /* +1 for \0. */
3359 obstack_grow (¬es
, name
, name_length
);
3360 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3362 #ifdef tc_canonicalize_symbol_name
3363 preserved_copy_of_name
=
3364 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3367 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3369 S_SET_SEGMENT (symbolP
, segment
);
3370 S_SET_VALUE (symbolP
, valu
);
3371 symbol_clear_list_pointers (symbolP
);
3373 symbol_set_frag (symbolP
, frag
);
3375 /* Link to end of symbol chain. */
3377 extern int symbol_table_frozen
;
3379 if (symbol_table_frozen
)
3383 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3385 obj_symbol_new_hook (symbolP
);
3387 #ifdef tc_symbol_new_hook
3388 tc_symbol_new_hook (symbolP
);
3392 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3393 #endif /* DEBUG_SYMS */
3397 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3400 literal_pool
* pool
;
3403 pool
= find_literal_pool ();
3405 || pool
->symbol
== NULL
3406 || pool
->next_free_entry
== 0)
3409 /* Align pool as you have word accesses.
3410 Only make a frag if we have to. */
3412 frag_align (pool
->alignment
, 0, 0);
3414 record_alignment (now_seg
, 2);
3417 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3418 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3420 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3422 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3423 (valueT
) frag_now_fix (), frag_now
);
3424 symbol_table_insert (pool
->symbol
);
3426 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3428 #if defined OBJ_COFF || defined OBJ_ELF
3429 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3432 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3435 if (debug_type
== DEBUG_DWARF2
)
3436 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3438 /* First output the expression in the instruction to the pool. */
3439 emit_expr (&(pool
->literals
[entry
]),
3440 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3443 /* Mark the pool as empty. */
3444 pool
->next_free_entry
= 0;
3445 pool
->symbol
= NULL
;
3449 /* Forward declarations for functions below, in the MD interface
3451 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3452 static valueT
create_unwind_entry (int);
3453 static void start_unwind_section (const segT
, int);
3454 static void add_unwind_opcode (valueT
, int);
3455 static void flush_pending_unwind (void);
3457 /* Directives: Data. */
3460 s_arm_elf_cons (int nbytes
)
3464 #ifdef md_flush_pending_output
3465 md_flush_pending_output ();
3468 if (is_it_end_of_statement ())
3470 demand_empty_rest_of_line ();
3474 #ifdef md_cons_align
3475 md_cons_align (nbytes
);
3478 mapping_state (MAP_DATA
);
3482 char *base
= input_line_pointer
;
3486 if (exp
.X_op
!= O_symbol
)
3487 emit_expr (&exp
, (unsigned int) nbytes
);
3490 char *before_reloc
= input_line_pointer
;
3491 reloc
= parse_reloc (&input_line_pointer
);
3494 as_bad (_("unrecognized relocation suffix"));
3495 ignore_rest_of_line ();
3498 else if (reloc
== BFD_RELOC_UNUSED
)
3499 emit_expr (&exp
, (unsigned int) nbytes
);
3502 reloc_howto_type
*howto
= (reloc_howto_type
*)
3503 bfd_reloc_type_lookup (stdoutput
,
3504 (bfd_reloc_code_real_type
) reloc
);
3505 int size
= bfd_get_reloc_size (howto
);
3507 if (reloc
== BFD_RELOC_ARM_PLT32
)
3509 as_bad (_("(plt) is only valid on branch targets"));
3510 reloc
= BFD_RELOC_UNUSED
;
3515 as_bad (_("%s relocations do not fit in %d bytes"),
3516 howto
->name
, nbytes
);
3519 /* We've parsed an expression stopping at O_symbol.
3520 But there may be more expression left now that we
3521 have parsed the relocation marker. Parse it again.
3522 XXX Surely there is a cleaner way to do this. */
3523 char *p
= input_line_pointer
;
3525 char *save_buf
= (char *) alloca (input_line_pointer
- base
);
3526 memcpy (save_buf
, base
, input_line_pointer
- base
);
3527 memmove (base
+ (input_line_pointer
- before_reloc
),
3528 base
, before_reloc
- base
);
3530 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3532 memcpy (base
, save_buf
, p
- base
);
3534 offset
= nbytes
- size
;
3535 p
= frag_more (nbytes
);
3536 memset (p
, 0, nbytes
);
3537 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3538 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3543 while (*input_line_pointer
++ == ',');
3545 /* Put terminator back into stream. */
3546 input_line_pointer
--;
3547 demand_empty_rest_of_line ();
3550 /* Emit an expression containing a 32-bit thumb instruction.
3551 Implementation based on put_thumb32_insn. */
3554 emit_thumb32_expr (expressionS
* exp
)
3556 expressionS exp_high
= *exp
;
3558 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3559 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3560 exp
->X_add_number
&= 0xffff;
3561 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3564 /* Guess the instruction size based on the opcode. */
3567 thumb_insn_size (int opcode
)
3569 if ((unsigned int) opcode
< 0xe800u
)
3571 else if ((unsigned int) opcode
>= 0xe8000000u
)
3578 emit_insn (expressionS
*exp
, int nbytes
)
3582 if (exp
->X_op
== O_constant
)
3587 size
= thumb_insn_size (exp
->X_add_number
);
3591 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3593 as_bad (_(".inst.n operand too big. "\
3594 "Use .inst.w instead"));
3599 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3600 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3602 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3604 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3605 emit_thumb32_expr (exp
);
3607 emit_expr (exp
, (unsigned int) size
);
3609 it_fsm_post_encode ();
3613 as_bad (_("cannot determine Thumb instruction size. " \
3614 "Use .inst.n/.inst.w instead"));
3617 as_bad (_("constant expression required"));
3622 /* Like s_arm_elf_cons but do not use md_cons_align and
3623 set the mapping state to MAP_ARM/MAP_THUMB. */
3626 s_arm_elf_inst (int nbytes
)
3628 if (is_it_end_of_statement ())
3630 demand_empty_rest_of_line ();
3634 /* Calling mapping_state () here will not change ARM/THUMB,
3635 but will ensure not to be in DATA state. */
3638 mapping_state (MAP_THUMB
);
3643 as_bad (_("width suffixes are invalid in ARM mode"));
3644 ignore_rest_of_line ();
3650 mapping_state (MAP_ARM
);
3659 if (! emit_insn (& exp
, nbytes
))
3661 ignore_rest_of_line ();
3665 while (*input_line_pointer
++ == ',');
3667 /* Put terminator back into stream. */
3668 input_line_pointer
--;
3669 demand_empty_rest_of_line ();
3672 /* Parse a .rel31 directive. */
3675 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3682 if (*input_line_pointer
== '1')
3683 highbit
= 0x80000000;
3684 else if (*input_line_pointer
!= '0')
3685 as_bad (_("expected 0 or 1"));
3687 input_line_pointer
++;
3688 if (*input_line_pointer
!= ',')
3689 as_bad (_("missing comma"));
3690 input_line_pointer
++;
3692 #ifdef md_flush_pending_output
3693 md_flush_pending_output ();
3696 #ifdef md_cons_align
3700 mapping_state (MAP_DATA
);
3705 md_number_to_chars (p
, highbit
, 4);
3706 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3707 BFD_RELOC_ARM_PREL31
);
3709 demand_empty_rest_of_line ();
3712 /* Directives: AEABI stack-unwind tables. */
3714 /* Parse an unwind_fnstart directive. Simply records the current location. */
3717 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3719 demand_empty_rest_of_line ();
3720 if (unwind
.proc_start
)
3722 as_bad (_("duplicate .fnstart directive"));
3726 /* Mark the start of the function. */
3727 unwind
.proc_start
= expr_build_dot ();
3729 /* Reset the rest of the unwind info. */
3730 unwind
.opcode_count
= 0;
3731 unwind
.table_entry
= NULL
;
3732 unwind
.personality_routine
= NULL
;
3733 unwind
.personality_index
= -1;
3734 unwind
.frame_size
= 0;
3735 unwind
.fp_offset
= 0;
3736 unwind
.fp_reg
= REG_SP
;
3738 unwind
.sp_restored
= 0;
3742 /* Parse a handlerdata directive. Creates the exception handling table entry
3743 for the function. */
3746 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3748 demand_empty_rest_of_line ();
3749 if (!unwind
.proc_start
)
3750 as_bad (MISSING_FNSTART
);
3752 if (unwind
.table_entry
)
3753 as_bad (_("duplicate .handlerdata directive"));
3755 create_unwind_entry (1);
3758 /* Parse an unwind_fnend directive. Generates the index table entry. */
3761 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3766 unsigned int marked_pr_dependency
;
3768 demand_empty_rest_of_line ();
3770 if (!unwind
.proc_start
)
3772 as_bad (_(".fnend directive without .fnstart"));
3776 /* Add eh table entry. */
3777 if (unwind
.table_entry
== NULL
)
3778 val
= create_unwind_entry (0);
3782 /* Add index table entry. This is two words. */
3783 start_unwind_section (unwind
.saved_seg
, 1);
3784 frag_align (2, 0, 0);
3785 record_alignment (now_seg
, 2);
3787 ptr
= frag_more (8);
3789 where
= frag_now_fix () - 8;
3791 /* Self relative offset of the function start. */
3792 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3793 BFD_RELOC_ARM_PREL31
);
3795 /* Indicate dependency on EHABI-defined personality routines to the
3796 linker, if it hasn't been done already. */
3797 marked_pr_dependency
3798 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3799 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3800 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3802 static const char *const name
[] =
3804 "__aeabi_unwind_cpp_pr0",
3805 "__aeabi_unwind_cpp_pr1",
3806 "__aeabi_unwind_cpp_pr2"
3808 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3809 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3810 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3811 |= 1 << unwind
.personality_index
;
3815 /* Inline exception table entry. */
3816 md_number_to_chars (ptr
+ 4, val
, 4);
3818 /* Self relative offset of the table entry. */
3819 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3820 BFD_RELOC_ARM_PREL31
);
3822 /* Restore the original section. */
3823 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3825 unwind
.proc_start
= NULL
;
3829 /* Parse an unwind_cantunwind directive. */
3832 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3834 demand_empty_rest_of_line ();
3835 if (!unwind
.proc_start
)
3836 as_bad (MISSING_FNSTART
);
3838 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3839 as_bad (_("personality routine specified for cantunwind frame"));
3841 unwind
.personality_index
= -2;
3845 /* Parse a personalityindex directive. */
3848 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3852 if (!unwind
.proc_start
)
3853 as_bad (MISSING_FNSTART
);
3855 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3856 as_bad (_("duplicate .personalityindex directive"));
3860 if (exp
.X_op
!= O_constant
3861 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3863 as_bad (_("bad personality routine number"));
3864 ignore_rest_of_line ();
3868 unwind
.personality_index
= exp
.X_add_number
;
3870 demand_empty_rest_of_line ();
3874 /* Parse a personality directive. */
3877 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3881 if (!unwind
.proc_start
)
3882 as_bad (MISSING_FNSTART
);
3884 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3885 as_bad (_("duplicate .personality directive"));
3887 c
= get_symbol_name (& name
);
3888 p
= input_line_pointer
;
3890 ++ input_line_pointer
;
3891 unwind
.personality_routine
= symbol_find_or_make (name
);
3893 demand_empty_rest_of_line ();
3897 /* Parse a directive saving core registers. */
3900 s_arm_unwind_save_core (void)
3906 range
= parse_reg_list (&input_line_pointer
);
3909 as_bad (_("expected register list"));
3910 ignore_rest_of_line ();
3914 demand_empty_rest_of_line ();
3916 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3917 into .unwind_save {..., sp...}. We aren't bothered about the value of
3918 ip because it is clobbered by calls. */
3919 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3920 && (range
& 0x3000) == 0x1000)
3922 unwind
.opcode_count
--;
3923 unwind
.sp_restored
= 0;
3924 range
= (range
| 0x2000) & ~0x1000;
3925 unwind
.pending_offset
= 0;
3931 /* See if we can use the short opcodes. These pop a block of up to 8
3932 registers starting with r4, plus maybe r14. */
3933 for (n
= 0; n
< 8; n
++)
3935 /* Break at the first non-saved register. */
3936 if ((range
& (1 << (n
+ 4))) == 0)
3939 /* See if there are any other bits set. */
3940 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3942 /* Use the long form. */
3943 op
= 0x8000 | ((range
>> 4) & 0xfff);
3944 add_unwind_opcode (op
, 2);
3948 /* Use the short form. */
3950 op
= 0xa8; /* Pop r14. */
3952 op
= 0xa0; /* Do not pop r14. */
3954 add_unwind_opcode (op
, 1);
3961 op
= 0xb100 | (range
& 0xf);
3962 add_unwind_opcode (op
, 2);
3965 /* Record the number of bytes pushed. */
3966 for (n
= 0; n
< 16; n
++)
3968 if (range
& (1 << n
))
3969 unwind
.frame_size
+= 4;
3974 /* Parse a directive saving FPA registers. */
3977 s_arm_unwind_save_fpa (int reg
)
3983 /* Get Number of registers to transfer. */
3984 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3987 exp
.X_op
= O_illegal
;
3989 if (exp
.X_op
!= O_constant
)
3991 as_bad (_("expected , <constant>"));
3992 ignore_rest_of_line ();
3996 num_regs
= exp
.X_add_number
;
3998 if (num_regs
< 1 || num_regs
> 4)
4000 as_bad (_("number of registers must be in the range [1:4]"));
4001 ignore_rest_of_line ();
4005 demand_empty_rest_of_line ();
4010 op
= 0xb4 | (num_regs
- 1);
4011 add_unwind_opcode (op
, 1);
4016 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4017 add_unwind_opcode (op
, 2);
4019 unwind
.frame_size
+= num_regs
* 12;
4023 /* Parse a directive saving VFP registers for ARMv6 and above. */
4026 s_arm_unwind_save_vfp_armv6 (void)
4031 int num_vfpv3_regs
= 0;
4032 int num_regs_below_16
;
4034 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
4037 as_bad (_("expected register list"));
4038 ignore_rest_of_line ();
4042 demand_empty_rest_of_line ();
4044 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4045 than FSTMX/FLDMX-style ones). */
4047 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4049 num_vfpv3_regs
= count
;
4050 else if (start
+ count
> 16)
4051 num_vfpv3_regs
= start
+ count
- 16;
4053 if (num_vfpv3_regs
> 0)
4055 int start_offset
= start
> 16 ? start
- 16 : 0;
4056 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4057 add_unwind_opcode (op
, 2);
4060 /* Generate opcode for registers numbered in the range 0 .. 15. */
4061 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4062 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4063 if (num_regs_below_16
> 0)
4065 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4066 add_unwind_opcode (op
, 2);
4069 unwind
.frame_size
+= count
* 8;
4073 /* Parse a directive saving VFP registers for pre-ARMv6. */
4076 s_arm_unwind_save_vfp (void)
4082 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
4085 as_bad (_("expected register list"));
4086 ignore_rest_of_line ();
4090 demand_empty_rest_of_line ();
4095 op
= 0xb8 | (count
- 1);
4096 add_unwind_opcode (op
, 1);
4101 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4102 add_unwind_opcode (op
, 2);
4104 unwind
.frame_size
+= count
* 8 + 4;
4108 /* Parse a directive saving iWMMXt data registers. */
4111 s_arm_unwind_save_mmxwr (void)
4119 if (*input_line_pointer
== '{')
4120 input_line_pointer
++;
4124 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4128 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4133 as_tsktsk (_("register list not in ascending order"));
4136 if (*input_line_pointer
== '-')
4138 input_line_pointer
++;
4139 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4142 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4145 else if (reg
>= hi_reg
)
4147 as_bad (_("bad register range"));
4150 for (; reg
< hi_reg
; reg
++)
4154 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4156 skip_past_char (&input_line_pointer
, '}');
4158 demand_empty_rest_of_line ();
4160 /* Generate any deferred opcodes because we're going to be looking at
4162 flush_pending_unwind ();
4164 for (i
= 0; i
< 16; i
++)
4166 if (mask
& (1 << i
))
4167 unwind
.frame_size
+= 8;
4170 /* Attempt to combine with a previous opcode. We do this because gcc
4171 likes to output separate unwind directives for a single block of
4173 if (unwind
.opcode_count
> 0)
4175 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4176 if ((i
& 0xf8) == 0xc0)
4179 /* Only merge if the blocks are contiguous. */
4182 if ((mask
& 0xfe00) == (1 << 9))
4184 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4185 unwind
.opcode_count
--;
4188 else if (i
== 6 && unwind
.opcode_count
>= 2)
4190 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4194 op
= 0xffff << (reg
- 1);
4196 && ((mask
& op
) == (1u << (reg
- 1))))
4198 op
= (1 << (reg
+ i
+ 1)) - 1;
4199 op
&= ~((1 << reg
) - 1);
4201 unwind
.opcode_count
-= 2;
4208 /* We want to generate opcodes in the order the registers have been
4209 saved, ie. descending order. */
4210 for (reg
= 15; reg
>= -1; reg
--)
4212 /* Save registers in blocks. */
4214 || !(mask
& (1 << reg
)))
4216 /* We found an unsaved reg. Generate opcodes to save the
4223 op
= 0xc0 | (hi_reg
- 10);
4224 add_unwind_opcode (op
, 1);
4229 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4230 add_unwind_opcode (op
, 2);
4239 ignore_rest_of_line ();
4243 s_arm_unwind_save_mmxwcg (void)
4250 if (*input_line_pointer
== '{')
4251 input_line_pointer
++;
4253 skip_whitespace (input_line_pointer
);
4257 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4261 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4267 as_tsktsk (_("register list not in ascending order"));
4270 if (*input_line_pointer
== '-')
4272 input_line_pointer
++;
4273 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4276 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4279 else if (reg
>= hi_reg
)
4281 as_bad (_("bad register range"));
4284 for (; reg
< hi_reg
; reg
++)
4288 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4290 skip_past_char (&input_line_pointer
, '}');
4292 demand_empty_rest_of_line ();
4294 /* Generate any deferred opcodes because we're going to be looking at
4296 flush_pending_unwind ();
4298 for (reg
= 0; reg
< 16; reg
++)
4300 if (mask
& (1 << reg
))
4301 unwind
.frame_size
+= 4;
4304 add_unwind_opcode (op
, 2);
4307 ignore_rest_of_line ();
4311 /* Parse an unwind_save directive.
4312 If the argument is non-zero, this is a .vsave directive. */
4315 s_arm_unwind_save (int arch_v6
)
4318 struct reg_entry
*reg
;
4319 bfd_boolean had_brace
= FALSE
;
4321 if (!unwind
.proc_start
)
4322 as_bad (MISSING_FNSTART
);
4324 /* Figure out what sort of save we have. */
4325 peek
= input_line_pointer
;
4333 reg
= arm_reg_parse_multi (&peek
);
4337 as_bad (_("register expected"));
4338 ignore_rest_of_line ();
4347 as_bad (_("FPA .unwind_save does not take a register list"));
4348 ignore_rest_of_line ();
4351 input_line_pointer
= peek
;
4352 s_arm_unwind_save_fpa (reg
->number
);
4356 s_arm_unwind_save_core ();
4361 s_arm_unwind_save_vfp_armv6 ();
4363 s_arm_unwind_save_vfp ();
4366 case REG_TYPE_MMXWR
:
4367 s_arm_unwind_save_mmxwr ();
4370 case REG_TYPE_MMXWCG
:
4371 s_arm_unwind_save_mmxwcg ();
4375 as_bad (_(".unwind_save does not support this kind of register"));
4376 ignore_rest_of_line ();
4381 /* Parse an unwind_movsp directive. */
4384 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4390 if (!unwind
.proc_start
)
4391 as_bad (MISSING_FNSTART
);
4393 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4396 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4397 ignore_rest_of_line ();
4401 /* Optional constant. */
4402 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4404 if (immediate_for_directive (&offset
) == FAIL
)
4410 demand_empty_rest_of_line ();
4412 if (reg
== REG_SP
|| reg
== REG_PC
)
4414 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4418 if (unwind
.fp_reg
!= REG_SP
)
4419 as_bad (_("unexpected .unwind_movsp directive"));
4421 /* Generate opcode to restore the value. */
4423 add_unwind_opcode (op
, 1);
4425 /* Record the information for later. */
4426 unwind
.fp_reg
= reg
;
4427 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4428 unwind
.sp_restored
= 1;
4431 /* Parse an unwind_pad directive. */
4434 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4438 if (!unwind
.proc_start
)
4439 as_bad (MISSING_FNSTART
);
4441 if (immediate_for_directive (&offset
) == FAIL
)
4446 as_bad (_("stack increment must be multiple of 4"));
4447 ignore_rest_of_line ();
4451 /* Don't generate any opcodes, just record the details for later. */
4452 unwind
.frame_size
+= offset
;
4453 unwind
.pending_offset
+= offset
;
4455 demand_empty_rest_of_line ();
4458 /* Parse an unwind_setfp directive. */
4461 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4467 if (!unwind
.proc_start
)
4468 as_bad (MISSING_FNSTART
);
4470 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4471 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4474 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4476 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4478 as_bad (_("expected <reg>, <reg>"));
4479 ignore_rest_of_line ();
4483 /* Optional constant. */
4484 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4486 if (immediate_for_directive (&offset
) == FAIL
)
4492 demand_empty_rest_of_line ();
4494 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4496 as_bad (_("register must be either sp or set by a previous"
4497 "unwind_movsp directive"));
4501 /* Don't generate any opcodes, just record the information for later. */
4502 unwind
.fp_reg
= fp_reg
;
4504 if (sp_reg
== REG_SP
)
4505 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4507 unwind
.fp_offset
-= offset
;
4510 /* Parse an unwind_raw directive. */
4513 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4516 /* This is an arbitrary limit. */
4517 unsigned char op
[16];
4520 if (!unwind
.proc_start
)
4521 as_bad (MISSING_FNSTART
);
4524 if (exp
.X_op
== O_constant
4525 && skip_past_comma (&input_line_pointer
) != FAIL
)
4527 unwind
.frame_size
+= exp
.X_add_number
;
4531 exp
.X_op
= O_illegal
;
4533 if (exp
.X_op
!= O_constant
)
4535 as_bad (_("expected <offset>, <opcode>"));
4536 ignore_rest_of_line ();
4542 /* Parse the opcode. */
4547 as_bad (_("unwind opcode too long"));
4548 ignore_rest_of_line ();
4550 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4552 as_bad (_("invalid unwind opcode"));
4553 ignore_rest_of_line ();
4556 op
[count
++] = exp
.X_add_number
;
4558 /* Parse the next byte. */
4559 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4565 /* Add the opcode bytes in reverse order. */
4567 add_unwind_opcode (op
[count
], 1);
4569 demand_empty_rest_of_line ();
4573 /* Parse a .eabi_attribute directive. */
4576 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4578 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4580 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4581 attributes_set_explicitly
[tag
] = 1;
4584 /* Emit a tls fix for the symbol. */
4587 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4591 #ifdef md_flush_pending_output
4592 md_flush_pending_output ();
4595 #ifdef md_cons_align
4599 /* Since we're just labelling the code, there's no need to define a
4602 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4603 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4604 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4605 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4607 #endif /* OBJ_ELF */
4609 static void s_arm_arch (int);
4610 static void s_arm_object_arch (int);
4611 static void s_arm_cpu (int);
4612 static void s_arm_fpu (int);
4613 static void s_arm_arch_extension (int);
4618 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4625 if (exp
.X_op
== O_symbol
)
4626 exp
.X_op
= O_secrel
;
4628 emit_expr (&exp
, 4);
4630 while (*input_line_pointer
++ == ',');
4632 input_line_pointer
--;
4633 demand_empty_rest_of_line ();
4637 /* This table describes all the machine specific pseudo-ops the assembler
4638 has to support. The fields are:
4639 pseudo-op name without dot
4640 function to call to execute this pseudo-op
4641 Integer arg to pass to the function. */
4643 const pseudo_typeS md_pseudo_table
[] =
4645 /* Never called because '.req' does not start a line. */
4646 { "req", s_req
, 0 },
4647 /* Following two are likewise never called. */
4650 { "unreq", s_unreq
, 0 },
4651 { "bss", s_bss
, 0 },
4652 { "align", s_align_ptwo
, 2 },
4653 { "arm", s_arm
, 0 },
4654 { "thumb", s_thumb
, 0 },
4655 { "code", s_code
, 0 },
4656 { "force_thumb", s_force_thumb
, 0 },
4657 { "thumb_func", s_thumb_func
, 0 },
4658 { "thumb_set", s_thumb_set
, 0 },
4659 { "even", s_even
, 0 },
4660 { "ltorg", s_ltorg
, 0 },
4661 { "pool", s_ltorg
, 0 },
4662 { "syntax", s_syntax
, 0 },
4663 { "cpu", s_arm_cpu
, 0 },
4664 { "arch", s_arm_arch
, 0 },
4665 { "object_arch", s_arm_object_arch
, 0 },
4666 { "fpu", s_arm_fpu
, 0 },
4667 { "arch_extension", s_arm_arch_extension
, 0 },
4669 { "word", s_arm_elf_cons
, 4 },
4670 { "long", s_arm_elf_cons
, 4 },
4671 { "inst.n", s_arm_elf_inst
, 2 },
4672 { "inst.w", s_arm_elf_inst
, 4 },
4673 { "inst", s_arm_elf_inst
, 0 },
4674 { "rel31", s_arm_rel31
, 0 },
4675 { "fnstart", s_arm_unwind_fnstart
, 0 },
4676 { "fnend", s_arm_unwind_fnend
, 0 },
4677 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4678 { "personality", s_arm_unwind_personality
, 0 },
4679 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4680 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4681 { "save", s_arm_unwind_save
, 0 },
4682 { "vsave", s_arm_unwind_save
, 1 },
4683 { "movsp", s_arm_unwind_movsp
, 0 },
4684 { "pad", s_arm_unwind_pad
, 0 },
4685 { "setfp", s_arm_unwind_setfp
, 0 },
4686 { "unwind_raw", s_arm_unwind_raw
, 0 },
4687 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4688 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4692 /* These are used for dwarf. */
4696 /* These are used for dwarf2. */
4697 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
4698 { "loc", dwarf2_directive_loc
, 0 },
4699 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4701 { "extend", float_cons
, 'x' },
4702 { "ldouble", float_cons
, 'x' },
4703 { "packed", float_cons
, 'p' },
4705 {"secrel32", pe_directive_secrel
, 0},
4708 /* These are for compatibility with CodeComposer Studio. */
4709 {"ref", s_ccs_ref
, 0},
4710 {"def", s_ccs_def
, 0},
4711 {"asmfunc", s_ccs_asmfunc
, 0},
4712 {"endasmfunc", s_ccs_endasmfunc
, 0},
4717 /* Parser functions used exclusively in instruction operands. */
4719 /* Generic immediate-value read function for use in insn parsing.
4720 STR points to the beginning of the immediate (the leading #);
4721 VAL receives the value; if the value is outside [MIN, MAX]
4722 issue an error. PREFIX_OPT is true if the immediate prefix is
4726 parse_immediate (char **str
, int *val
, int min
, int max
,
4727 bfd_boolean prefix_opt
)
4730 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4731 if (exp
.X_op
!= O_constant
)
4733 inst
.error
= _("constant expression required");
4737 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4739 inst
.error
= _("immediate value out of range");
4743 *val
= exp
.X_add_number
;
4747 /* Less-generic immediate-value read function with the possibility of loading a
4748 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4749 instructions. Puts the result directly in inst.operands[i]. */
4752 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
4753 bfd_boolean allow_symbol_p
)
4756 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
4759 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
4761 if (exp_p
->X_op
== O_constant
)
4763 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
4764 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4765 O_constant. We have to be careful not to break compilation for
4766 32-bit X_add_number, though. */
4767 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
4769 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4770 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
4772 inst
.operands
[i
].regisimm
= 1;
4775 else if (exp_p
->X_op
== O_big
4776 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
4778 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4780 /* Bignums have their least significant bits in
4781 generic_bignum[0]. Make sure we put 32 bits in imm and
4782 32 bits in reg, in a (hopefully) portable way. */
4783 gas_assert (parts
!= 0);
4785 /* Make sure that the number is not too big.
4786 PR 11972: Bignums can now be sign-extended to the
4787 size of a .octa so check that the out of range bits
4788 are all zero or all one. */
4789 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
4791 LITTLENUM_TYPE m
= -1;
4793 if (generic_bignum
[parts
* 2] != 0
4794 && generic_bignum
[parts
* 2] != m
)
4797 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
4798 if (generic_bignum
[j
] != generic_bignum
[j
-1])
4802 inst
.operands
[i
].imm
= 0;
4803 for (j
= 0; j
< parts
; j
++, idx
++)
4804 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4805 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4806 inst
.operands
[i
].reg
= 0;
4807 for (j
= 0; j
< parts
; j
++, idx
++)
4808 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4809 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4810 inst
.operands
[i
].regisimm
= 1;
4812 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
4820 /* Returns the pseudo-register number of an FPA immediate constant,
4821 or FAIL if there isn't a valid constant here. */
4824 parse_fpa_immediate (char ** str
)
4826 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4832 /* First try and match exact strings, this is to guarantee
4833 that some formats will work even for cross assembly. */
4835 for (i
= 0; fp_const
[i
]; i
++)
4837 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4841 *str
+= strlen (fp_const
[i
]);
4842 if (is_end_of_line
[(unsigned char) **str
])
4848 /* Just because we didn't get a match doesn't mean that the constant
4849 isn't valid, just that it is in a format that we don't
4850 automatically recognize. Try parsing it with the standard
4851 expression routines. */
4853 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4855 /* Look for a raw floating point number. */
4856 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4857 && is_end_of_line
[(unsigned char) *save_in
])
4859 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4861 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4863 if (words
[j
] != fp_values
[i
][j
])
4867 if (j
== MAX_LITTLENUMS
)
4875 /* Try and parse a more complex expression, this will probably fail
4876 unless the code uses a floating point prefix (eg "0f"). */
4877 save_in
= input_line_pointer
;
4878 input_line_pointer
= *str
;
4879 if (expression (&exp
) == absolute_section
4880 && exp
.X_op
== O_big
4881 && exp
.X_add_number
< 0)
4883 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4885 #define X_PRECISION 5
4886 #define E_PRECISION 15L
4887 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
4889 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4891 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4893 if (words
[j
] != fp_values
[i
][j
])
4897 if (j
== MAX_LITTLENUMS
)
4899 *str
= input_line_pointer
;
4900 input_line_pointer
= save_in
;
4907 *str
= input_line_pointer
;
4908 input_line_pointer
= save_in
;
4909 inst
.error
= _("invalid FPA immediate expression");
4913 /* Returns 1 if a number has "quarter-precision" float format
4914 0baBbbbbbc defgh000 00000000 00000000. */
4917 is_quarter_float (unsigned imm
)
4919 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4920 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4924 /* Detect the presence of a floating point or integer zero constant,
4928 parse_ifimm_zero (char **in
)
4932 if (!is_immediate_prefix (**in
))
4937 /* Accept #0x0 as a synonym for #0. */
4938 if (strncmp (*in
, "0x", 2) == 0)
4941 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
4946 error_code
= atof_generic (in
, ".", EXP_CHARS
,
4947 &generic_floating_point_number
);
4950 && generic_floating_point_number
.sign
== '+'
4951 && (generic_floating_point_number
.low
4952 > generic_floating_point_number
.leader
))
4958 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4959 0baBbbbbbc defgh000 00000000 00000000.
4960 The zero and minus-zero cases need special handling, since they can't be
4961 encoded in the "quarter-precision" float format, but can nonetheless be
4962 loaded as integer constants. */
4965 parse_qfloat_immediate (char **ccp
, int *immed
)
4969 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4970 int found_fpchar
= 0;
4972 skip_past_char (&str
, '#');
4974 /* We must not accidentally parse an integer as a floating-point number. Make
4975 sure that the value we parse is not an integer by checking for special
4976 characters '.' or 'e'.
4977 FIXME: This is a horrible hack, but doing better is tricky because type
4978 information isn't in a very usable state at parse time. */
4980 skip_whitespace (fpnum
);
4982 if (strncmp (fpnum
, "0x", 2) == 0)
4986 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
4987 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
4997 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
4999 unsigned fpword
= 0;
5002 /* Our FP word must be 32 bits (single-precision FP). */
5003 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5005 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5009 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5022 /* Shift operands. */
5025 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5028 struct asm_shift_name
5031 enum shift_kind kind
;
5034 /* Third argument to parse_shift. */
5035 enum parse_shift_mode
5037 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5038 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5039 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5040 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5041 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5044 /* Parse a <shift> specifier on an ARM data processing instruction.
5045 This has three forms:
5047 (LSL|LSR|ASL|ASR|ROR) Rs
5048 (LSL|LSR|ASL|ASR|ROR) #imm
5051 Note that ASL is assimilated to LSL in the instruction encoding, and
5052 RRX to ROR #0 (which cannot be written as such). */
5055 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5057 const struct asm_shift_name
*shift_name
;
5058 enum shift_kind shift
;
5063 for (p
= *str
; ISALPHA (*p
); p
++)
5068 inst
.error
= _("shift expression expected");
5072 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5075 if (shift_name
== NULL
)
5077 inst
.error
= _("shift expression expected");
5081 shift
= shift_name
->kind
;
5085 case NO_SHIFT_RESTRICT
:
5086 case SHIFT_IMMEDIATE
: break;
5088 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5089 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5091 inst
.error
= _("'LSL' or 'ASR' required");
5096 case SHIFT_LSL_IMMEDIATE
:
5097 if (shift
!= SHIFT_LSL
)
5099 inst
.error
= _("'LSL' required");
5104 case SHIFT_ASR_IMMEDIATE
:
5105 if (shift
!= SHIFT_ASR
)
5107 inst
.error
= _("'ASR' required");
5115 if (shift
!= SHIFT_RRX
)
5117 /* Whitespace can appear here if the next thing is a bare digit. */
5118 skip_whitespace (p
);
5120 if (mode
== NO_SHIFT_RESTRICT
5121 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5123 inst
.operands
[i
].imm
= reg
;
5124 inst
.operands
[i
].immisreg
= 1;
5126 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5129 inst
.operands
[i
].shift_kind
= shift
;
5130 inst
.operands
[i
].shifted
= 1;
5135 /* Parse a <shifter_operand> for an ARM data processing instruction:
5138 #<immediate>, <rotate>
5142 where <shift> is defined by parse_shift above, and <rotate> is a
5143 multiple of 2 between 0 and 30. Validation of immediate operands
5144 is deferred to md_apply_fix. */
5147 parse_shifter_operand (char **str
, int i
)
5152 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5154 inst
.operands
[i
].reg
= value
;
5155 inst
.operands
[i
].isreg
= 1;
5157 /* parse_shift will override this if appropriate */
5158 inst
.reloc
.exp
.X_op
= O_constant
;
5159 inst
.reloc
.exp
.X_add_number
= 0;
5161 if (skip_past_comma (str
) == FAIL
)
5164 /* Shift operation on register. */
5165 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5168 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
5171 if (skip_past_comma (str
) == SUCCESS
)
5173 /* #x, y -- ie explicit rotation by Y. */
5174 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5177 if (exp
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
5179 inst
.error
= _("constant expression expected");
5183 value
= exp
.X_add_number
;
5184 if (value
< 0 || value
> 30 || value
% 2 != 0)
5186 inst
.error
= _("invalid rotation");
5189 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
5191 inst
.error
= _("invalid constant");
5195 /* Encode as specified. */
5196 inst
.operands
[i
].imm
= inst
.reloc
.exp
.X_add_number
| value
<< 7;
5200 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
5201 inst
.reloc
.pc_rel
= 0;
5205 /* Group relocation information. Each entry in the table contains the
5206 textual name of the relocation as may appear in assembler source
5207 and must end with a colon.
5208 Along with this textual name are the relocation codes to be used if
5209 the corresponding instruction is an ALU instruction (ADD or SUB only),
5210 an LDR, an LDRS, or an LDC. */
5212 struct group_reloc_table_entry
5223 /* Varieties of non-ALU group relocation. */
5230 static struct group_reloc_table_entry group_reloc_table
[] =
5231 { /* Program counter relative: */
5233 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5238 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5239 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5240 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5241 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5243 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5248 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5249 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5250 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5251 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5253 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5254 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5255 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5256 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5257 /* Section base relative */
5259 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5264 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5265 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5266 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5267 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5269 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5274 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5275 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5276 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5277 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5279 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5280 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5281 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5282 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5283 /* Absolute thumb alu relocations. */
5285 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5290 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5295 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5300 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5305 /* Given the address of a pointer pointing to the textual name of a group
5306 relocation as may appear in assembler source, attempt to find its details
5307 in group_reloc_table. The pointer will be updated to the character after
5308 the trailing colon. On failure, FAIL will be returned; SUCCESS
5309 otherwise. On success, *entry will be updated to point at the relevant
5310 group_reloc_table entry. */
5313 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5316 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5318 int length
= strlen (group_reloc_table
[i
].name
);
5320 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5321 && (*str
)[length
] == ':')
5323 *out
= &group_reloc_table
[i
];
5324 *str
+= (length
+ 1);
5332 /* Parse a <shifter_operand> for an ARM data processing instruction
5333 (as for parse_shifter_operand) where group relocations are allowed:
5336 #<immediate>, <rotate>
5337 #:<group_reloc>:<expression>
5341 where <group_reloc> is one of the strings defined in group_reloc_table.
5342 The hashes are optional.
5344 Everything else is as for parse_shifter_operand. */
5346 static parse_operand_result
5347 parse_shifter_operand_group_reloc (char **str
, int i
)
5349 /* Determine if we have the sequence of characters #: or just :
5350 coming next. If we do, then we check for a group relocation.
5351 If we don't, punt the whole lot to parse_shifter_operand. */
5353 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5354 || (*str
)[0] == ':')
5356 struct group_reloc_table_entry
*entry
;
5358 if ((*str
)[0] == '#')
5363 /* Try to parse a group relocation. Anything else is an error. */
5364 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5366 inst
.error
= _("unknown group relocation");
5367 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5370 /* We now have the group relocation table entry corresponding to
5371 the name in the assembler source. Next, we parse the expression. */
5372 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
5373 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5375 /* Record the relocation type (always the ALU variant here). */
5376 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5377 gas_assert (inst
.reloc
.type
!= 0);
5379 return PARSE_OPERAND_SUCCESS
;
5382 return parse_shifter_operand (str
, i
) == SUCCESS
5383 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5385 /* Never reached. */
5388 /* Parse a Neon alignment expression. Information is written to
5389 inst.operands[i]. We assume the initial ':' has been skipped.
5391 align .imm = align << 8, .immisalign=1, .preind=0 */
5392 static parse_operand_result
5393 parse_neon_alignment (char **str
, int i
)
5398 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5400 if (exp
.X_op
!= O_constant
)
5402 inst
.error
= _("alignment must be constant");
5403 return PARSE_OPERAND_FAIL
;
5406 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5407 inst
.operands
[i
].immisalign
= 1;
5408 /* Alignments are not pre-indexes. */
5409 inst
.operands
[i
].preind
= 0;
5412 return PARSE_OPERAND_SUCCESS
;
5415 /* Parse all forms of an ARM address expression. Information is written
5416 to inst.operands[i] and/or inst.reloc.
5418 Preindexed addressing (.preind=1):
5420 [Rn, #offset] .reg=Rn .reloc.exp=offset
5421 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5422 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5423 .shift_kind=shift .reloc.exp=shift_imm
5425 These three may have a trailing ! which causes .writeback to be set also.
5427 Postindexed addressing (.postind=1, .writeback=1):
5429 [Rn], #offset .reg=Rn .reloc.exp=offset
5430 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5431 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5432 .shift_kind=shift .reloc.exp=shift_imm
5434 Unindexed addressing (.preind=0, .postind=0):
5436 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5440 [Rn]{!} shorthand for [Rn,#0]{!}
5441 =immediate .isreg=0 .reloc.exp=immediate
5442 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5444 It is the caller's responsibility to check for addressing modes not
5445 supported by the instruction, and to set inst.reloc.type. */
5447 static parse_operand_result
5448 parse_address_main (char **str
, int i
, int group_relocations
,
5449 group_reloc_type group_type
)
5454 if (skip_past_char (&p
, '[') == FAIL
)
5456 if (skip_past_char (&p
, '=') == FAIL
)
5458 /* Bare address - translate to PC-relative offset. */
5459 inst
.reloc
.pc_rel
= 1;
5460 inst
.operands
[i
].reg
= REG_PC
;
5461 inst
.operands
[i
].isreg
= 1;
5462 inst
.operands
[i
].preind
= 1;
5464 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_OPT_PREFIX_BIG
))
5465 return PARSE_OPERAND_FAIL
;
5467 else if (parse_big_immediate (&p
, i
, &inst
.reloc
.exp
,
5468 /*allow_symbol_p=*/TRUE
))
5469 return PARSE_OPERAND_FAIL
;
5472 return PARSE_OPERAND_SUCCESS
;
5475 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5476 skip_whitespace (p
);
5478 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5480 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5481 return PARSE_OPERAND_FAIL
;
5483 inst
.operands
[i
].reg
= reg
;
5484 inst
.operands
[i
].isreg
= 1;
5486 if (skip_past_comma (&p
) == SUCCESS
)
5488 inst
.operands
[i
].preind
= 1;
5491 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5493 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5495 inst
.operands
[i
].imm
= reg
;
5496 inst
.operands
[i
].immisreg
= 1;
5498 if (skip_past_comma (&p
) == SUCCESS
)
5499 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5500 return PARSE_OPERAND_FAIL
;
5502 else if (skip_past_char (&p
, ':') == SUCCESS
)
5504 /* FIXME: '@' should be used here, but it's filtered out by generic
5505 code before we get to see it here. This may be subject to
5507 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5509 if (result
!= PARSE_OPERAND_SUCCESS
)
5514 if (inst
.operands
[i
].negative
)
5516 inst
.operands
[i
].negative
= 0;
5520 if (group_relocations
5521 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5523 struct group_reloc_table_entry
*entry
;
5525 /* Skip over the #: or : sequence. */
5531 /* Try to parse a group relocation. Anything else is an
5533 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5535 inst
.error
= _("unknown group relocation");
5536 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5539 /* We now have the group relocation table entry corresponding to
5540 the name in the assembler source. Next, we parse the
5542 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5543 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5545 /* Record the relocation type. */
5549 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldr_code
;
5553 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5557 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldc_code
;
5564 if (inst
.reloc
.type
== 0)
5566 inst
.error
= _("this group relocation is not allowed on this instruction");
5567 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5573 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5574 return PARSE_OPERAND_FAIL
;
5575 /* If the offset is 0, find out if it's a +0 or -0. */
5576 if (inst
.reloc
.exp
.X_op
== O_constant
5577 && inst
.reloc
.exp
.X_add_number
== 0)
5579 skip_whitespace (q
);
5583 skip_whitespace (q
);
5586 inst
.operands
[i
].negative
= 1;
5591 else if (skip_past_char (&p
, ':') == SUCCESS
)
5593 /* FIXME: '@' should be used here, but it's filtered out by generic code
5594 before we get to see it here. This may be subject to change. */
5595 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5597 if (result
!= PARSE_OPERAND_SUCCESS
)
5601 if (skip_past_char (&p
, ']') == FAIL
)
5603 inst
.error
= _("']' expected");
5604 return PARSE_OPERAND_FAIL
;
5607 if (skip_past_char (&p
, '!') == SUCCESS
)
5608 inst
.operands
[i
].writeback
= 1;
5610 else if (skip_past_comma (&p
) == SUCCESS
)
5612 if (skip_past_char (&p
, '{') == SUCCESS
)
5614 /* [Rn], {expr} - unindexed, with option */
5615 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5616 0, 255, TRUE
) == FAIL
)
5617 return PARSE_OPERAND_FAIL
;
5619 if (skip_past_char (&p
, '}') == FAIL
)
5621 inst
.error
= _("'}' expected at end of 'option' field");
5622 return PARSE_OPERAND_FAIL
;
5624 if (inst
.operands
[i
].preind
)
5626 inst
.error
= _("cannot combine index with option");
5627 return PARSE_OPERAND_FAIL
;
5630 return PARSE_OPERAND_SUCCESS
;
5634 inst
.operands
[i
].postind
= 1;
5635 inst
.operands
[i
].writeback
= 1;
5637 if (inst
.operands
[i
].preind
)
5639 inst
.error
= _("cannot combine pre- and post-indexing");
5640 return PARSE_OPERAND_FAIL
;
5644 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5646 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5648 /* We might be using the immediate for alignment already. If we
5649 are, OR the register number into the low-order bits. */
5650 if (inst
.operands
[i
].immisalign
)
5651 inst
.operands
[i
].imm
|= reg
;
5653 inst
.operands
[i
].imm
= reg
;
5654 inst
.operands
[i
].immisreg
= 1;
5656 if (skip_past_comma (&p
) == SUCCESS
)
5657 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5658 return PARSE_OPERAND_FAIL
;
5663 if (inst
.operands
[i
].negative
)
5665 inst
.operands
[i
].negative
= 0;
5668 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5669 return PARSE_OPERAND_FAIL
;
5670 /* If the offset is 0, find out if it's a +0 or -0. */
5671 if (inst
.reloc
.exp
.X_op
== O_constant
5672 && inst
.reloc
.exp
.X_add_number
== 0)
5674 skip_whitespace (q
);
5678 skip_whitespace (q
);
5681 inst
.operands
[i
].negative
= 1;
5687 /* If at this point neither .preind nor .postind is set, we have a
5688 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5689 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5691 inst
.operands
[i
].preind
= 1;
5692 inst
.reloc
.exp
.X_op
= O_constant
;
5693 inst
.reloc
.exp
.X_add_number
= 0;
5696 return PARSE_OPERAND_SUCCESS
;
5700 parse_address (char **str
, int i
)
5702 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5706 static parse_operand_result
5707 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5709 return parse_address_main (str
, i
, 1, type
);
5712 /* Parse an operand for a MOVW or MOVT instruction. */
5714 parse_half (char **str
)
5719 skip_past_char (&p
, '#');
5720 if (strncasecmp (p
, ":lower16:", 9) == 0)
5721 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
5722 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5723 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
5725 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
5728 skip_whitespace (p
);
5731 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5734 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5736 if (inst
.reloc
.exp
.X_op
!= O_constant
)
5738 inst
.error
= _("constant expression expected");
5741 if (inst
.reloc
.exp
.X_add_number
< 0
5742 || inst
.reloc
.exp
.X_add_number
> 0xffff)
5744 inst
.error
= _("immediate value out of range");
5752 /* Miscellaneous. */
5754 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5755 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5757 parse_psr (char **str
, bfd_boolean lhs
)
5760 unsigned long psr_field
;
5761 const struct asm_psr
*psr
;
5763 bfd_boolean is_apsr
= FALSE
;
5764 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
5766 /* PR gas/12698: If the user has specified -march=all then m_profile will
5767 be TRUE, but we want to ignore it in this case as we are building for any
5768 CPU type, including non-m variants. */
5769 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
5772 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5773 feature for ease of use and backwards compatibility. */
5775 if (strncasecmp (p
, "SPSR", 4) == 0)
5778 goto unsupported_psr
;
5780 psr_field
= SPSR_BIT
;
5782 else if (strncasecmp (p
, "CPSR", 4) == 0)
5785 goto unsupported_psr
;
5789 else if (strncasecmp (p
, "APSR", 4) == 0)
5791 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5792 and ARMv7-R architecture CPUs. */
5801 while (ISALNUM (*p
) || *p
== '_');
5803 if (strncasecmp (start
, "iapsr", 5) == 0
5804 || strncasecmp (start
, "eapsr", 5) == 0
5805 || strncasecmp (start
, "xpsr", 4) == 0
5806 || strncasecmp (start
, "psr", 3) == 0)
5807 p
= start
+ strcspn (start
, "rR") + 1;
5809 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5815 /* If APSR is being written, a bitfield may be specified. Note that
5816 APSR itself is handled above. */
5817 if (psr
->field
<= 3)
5819 psr_field
= psr
->field
;
5825 /* M-profile MSR instructions have the mask field set to "10", except
5826 *PSR variants which modify APSR, which may use a different mask (and
5827 have been handled already). Do that by setting the PSR_f field
5829 return psr
->field
| (lhs
? PSR_f
: 0);
5832 goto unsupported_psr
;
5838 /* A suffix follows. */
5844 while (ISALNUM (*p
) || *p
== '_');
5848 /* APSR uses a notation for bits, rather than fields. */
5849 unsigned int nzcvq_bits
= 0;
5850 unsigned int g_bit
= 0;
5853 for (bit
= start
; bit
!= p
; bit
++)
5855 switch (TOLOWER (*bit
))
5858 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
5862 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
5866 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
5870 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
5874 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
5878 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
5882 inst
.error
= _("unexpected bit specified after APSR");
5887 if (nzcvq_bits
== 0x1f)
5892 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
5894 inst
.error
= _("selected processor does not "
5895 "support DSP extension");
5902 if ((nzcvq_bits
& 0x20) != 0
5903 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
5904 || (g_bit
& 0x2) != 0)
5906 inst
.error
= _("bad bitmask specified after APSR");
5912 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
5917 psr_field
|= psr
->field
;
5923 goto error
; /* Garbage after "[CS]PSR". */
5925 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5926 is deprecated, but allow it anyway. */
5930 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5933 else if (!m_profile
)
5934 /* These bits are never right for M-profile devices: don't set them
5935 (only code paths which read/write APSR reach here). */
5936 psr_field
|= (PSR_c
| PSR_f
);
5942 inst
.error
= _("selected processor does not support requested special "
5943 "purpose register");
5947 inst
.error
= _("flag for {c}psr instruction expected");
5951 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5952 value suitable for splatting into the AIF field of the instruction. */
5955 parse_cps_flags (char **str
)
5964 case '\0': case ',':
5967 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
5968 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
5969 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
5972 inst
.error
= _("unrecognized CPS flag");
5977 if (saw_a_flag
== 0)
5979 inst
.error
= _("missing CPS flags");
5987 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5988 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5991 parse_endian_specifier (char **str
)
5996 if (strncasecmp (s
, "BE", 2))
5998 else if (strncasecmp (s
, "LE", 2))
6002 inst
.error
= _("valid endian specifiers are be or le");
6006 if (ISALNUM (s
[2]) || s
[2] == '_')
6008 inst
.error
= _("valid endian specifiers are be or le");
6013 return little_endian
;
6016 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6017 value suitable for poking into the rotate field of an sxt or sxta
6018 instruction, or FAIL on error. */
6021 parse_ror (char **str
)
6026 if (strncasecmp (s
, "ROR", 3) == 0)
6030 inst
.error
= _("missing rotation field after comma");
6034 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6039 case 0: *str
= s
; return 0x0;
6040 case 8: *str
= s
; return 0x1;
6041 case 16: *str
= s
; return 0x2;
6042 case 24: *str
= s
; return 0x3;
6045 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6050 /* Parse a conditional code (from conds[] below). The value returned is in the
6051 range 0 .. 14, or FAIL. */
6053 parse_cond (char **str
)
6056 const struct asm_cond
*c
;
6058 /* Condition codes are always 2 characters, so matching up to
6059 3 characters is sufficient. */
6064 while (ISALPHA (*q
) && n
< 3)
6066 cond
[n
] = TOLOWER (*q
);
6071 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6074 inst
.error
= _("condition required");
6082 /* If the given feature available in the selected CPU, mark it as used.
6083 Returns TRUE iff feature is available. */
6085 mark_feature_used (const arm_feature_set
*feature
)
6087 /* Ensure the option is valid on the current architecture. */
6088 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
6091 /* Add the appropriate architecture feature for the barrier option used.
6094 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
6096 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
6101 /* Parse an option for a barrier instruction. Returns the encoding for the
6104 parse_barrier (char **str
)
6107 const struct asm_barrier_opt
*o
;
6110 while (ISALPHA (*q
))
6113 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6118 if (!mark_feature_used (&o
->arch
))
6125 /* Parse the operands of a table branch instruction. Similar to a memory
6128 parse_tb (char **str
)
6133 if (skip_past_char (&p
, '[') == FAIL
)
6135 inst
.error
= _("'[' expected");
6139 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6141 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6144 inst
.operands
[0].reg
= reg
;
6146 if (skip_past_comma (&p
) == FAIL
)
6148 inst
.error
= _("',' expected");
6152 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6154 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6157 inst
.operands
[0].imm
= reg
;
6159 if (skip_past_comma (&p
) == SUCCESS
)
6161 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6163 if (inst
.reloc
.exp
.X_add_number
!= 1)
6165 inst
.error
= _("invalid shift");
6168 inst
.operands
[0].shifted
= 1;
6171 if (skip_past_char (&p
, ']') == FAIL
)
6173 inst
.error
= _("']' expected");
6180 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6181 information on the types the operands can take and how they are encoded.
6182 Up to four operands may be read; this function handles setting the
6183 ".present" field for each read operand itself.
6184 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6185 else returns FAIL. */
6188 parse_neon_mov (char **str
, int *which_operand
)
6190 int i
= *which_operand
, val
;
6191 enum arm_reg_type rtype
;
6193 struct neon_type_el optype
;
6195 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6197 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6198 inst
.operands
[i
].reg
= val
;
6199 inst
.operands
[i
].isscalar
= 1;
6200 inst
.operands
[i
].vectype
= optype
;
6201 inst
.operands
[i
++].present
= 1;
6203 if (skip_past_comma (&ptr
) == FAIL
)
6206 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6209 inst
.operands
[i
].reg
= val
;
6210 inst
.operands
[i
].isreg
= 1;
6211 inst
.operands
[i
].present
= 1;
6213 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6216 /* Cases 0, 1, 2, 3, 5 (D only). */
6217 if (skip_past_comma (&ptr
) == FAIL
)
6220 inst
.operands
[i
].reg
= val
;
6221 inst
.operands
[i
].isreg
= 1;
6222 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6223 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6224 inst
.operands
[i
].isvec
= 1;
6225 inst
.operands
[i
].vectype
= optype
;
6226 inst
.operands
[i
++].present
= 1;
6228 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6230 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6231 Case 13: VMOV <Sd>, <Rm> */
6232 inst
.operands
[i
].reg
= val
;
6233 inst
.operands
[i
].isreg
= 1;
6234 inst
.operands
[i
].present
= 1;
6236 if (rtype
== REG_TYPE_NQ
)
6238 first_error (_("can't use Neon quad register here"));
6241 else if (rtype
!= REG_TYPE_VFS
)
6244 if (skip_past_comma (&ptr
) == FAIL
)
6246 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6248 inst
.operands
[i
].reg
= val
;
6249 inst
.operands
[i
].isreg
= 1;
6250 inst
.operands
[i
].present
= 1;
6253 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6256 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6257 Case 1: VMOV<c><q> <Dd>, <Dm>
6258 Case 8: VMOV.F32 <Sd>, <Sm>
6259 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6261 inst
.operands
[i
].reg
= val
;
6262 inst
.operands
[i
].isreg
= 1;
6263 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6264 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6265 inst
.operands
[i
].isvec
= 1;
6266 inst
.operands
[i
].vectype
= optype
;
6267 inst
.operands
[i
].present
= 1;
6269 if (skip_past_comma (&ptr
) == SUCCESS
)
6274 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6277 inst
.operands
[i
].reg
= val
;
6278 inst
.operands
[i
].isreg
= 1;
6279 inst
.operands
[i
++].present
= 1;
6281 if (skip_past_comma (&ptr
) == FAIL
)
6284 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6287 inst
.operands
[i
].reg
= val
;
6288 inst
.operands
[i
].isreg
= 1;
6289 inst
.operands
[i
].present
= 1;
6292 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6293 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6294 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6295 Case 10: VMOV.F32 <Sd>, #<imm>
6296 Case 11: VMOV.F64 <Dd>, #<imm> */
6297 inst
.operands
[i
].immisfloat
= 1;
6298 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6300 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6301 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6305 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6309 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6312 inst
.operands
[i
].reg
= val
;
6313 inst
.operands
[i
].isreg
= 1;
6314 inst
.operands
[i
++].present
= 1;
6316 if (skip_past_comma (&ptr
) == FAIL
)
6319 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6321 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6322 inst
.operands
[i
].reg
= val
;
6323 inst
.operands
[i
].isscalar
= 1;
6324 inst
.operands
[i
].present
= 1;
6325 inst
.operands
[i
].vectype
= optype
;
6327 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6329 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6330 inst
.operands
[i
].reg
= val
;
6331 inst
.operands
[i
].isreg
= 1;
6332 inst
.operands
[i
++].present
= 1;
6334 if (skip_past_comma (&ptr
) == FAIL
)
6337 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6340 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6344 inst
.operands
[i
].reg
= val
;
6345 inst
.operands
[i
].isreg
= 1;
6346 inst
.operands
[i
].isvec
= 1;
6347 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6348 inst
.operands
[i
].vectype
= optype
;
6349 inst
.operands
[i
].present
= 1;
6351 if (rtype
== REG_TYPE_VFS
)
6355 if (skip_past_comma (&ptr
) == FAIL
)
6357 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6360 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6363 inst
.operands
[i
].reg
= val
;
6364 inst
.operands
[i
].isreg
= 1;
6365 inst
.operands
[i
].isvec
= 1;
6366 inst
.operands
[i
].issingle
= 1;
6367 inst
.operands
[i
].vectype
= optype
;
6368 inst
.operands
[i
].present
= 1;
6371 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6375 inst
.operands
[i
].reg
= val
;
6376 inst
.operands
[i
].isreg
= 1;
6377 inst
.operands
[i
].isvec
= 1;
6378 inst
.operands
[i
].issingle
= 1;
6379 inst
.operands
[i
].vectype
= optype
;
6380 inst
.operands
[i
].present
= 1;
6385 first_error (_("parse error"));
6389 /* Successfully parsed the operands. Update args. */
6395 first_error (_("expected comma"));
6399 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6403 /* Use this macro when the operand constraints are different
6404 for ARM and THUMB (e.g. ldrd). */
6405 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6406 ((arm_operand) | ((thumb_operand) << 16))
6408 /* Matcher codes for parse_operands. */
6409 enum operand_parse_code
6411 OP_stop
, /* end of line */
6413 OP_RR
, /* ARM register */
6414 OP_RRnpc
, /* ARM register, not r15 */
6415 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6416 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6417 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6418 optional trailing ! */
6419 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6420 OP_RCP
, /* Coprocessor number */
6421 OP_RCN
, /* Coprocessor register */
6422 OP_RF
, /* FPA register */
6423 OP_RVS
, /* VFP single precision register */
6424 OP_RVD
, /* VFP double precision register (0..15) */
6425 OP_RND
, /* Neon double precision register (0..31) */
6426 OP_RNQ
, /* Neon quad precision register */
6427 OP_RVSD
, /* VFP single or double precision register */
6428 OP_RNDQ
, /* Neon double or quad precision register */
6429 OP_RNSDQ
, /* Neon single, double or quad precision register */
6430 OP_RNSC
, /* Neon scalar D[X] */
6431 OP_RVC
, /* VFP control register */
6432 OP_RMF
, /* Maverick F register */
6433 OP_RMD
, /* Maverick D register */
6434 OP_RMFX
, /* Maverick FX register */
6435 OP_RMDX
, /* Maverick DX register */
6436 OP_RMAX
, /* Maverick AX register */
6437 OP_RMDS
, /* Maverick DSPSC register */
6438 OP_RIWR
, /* iWMMXt wR register */
6439 OP_RIWC
, /* iWMMXt wC register */
6440 OP_RIWG
, /* iWMMXt wCG register */
6441 OP_RXA
, /* XScale accumulator register */
6443 OP_REGLST
, /* ARM register list */
6444 OP_VRSLST
, /* VFP single-precision register list */
6445 OP_VRDLST
, /* VFP double-precision register list */
6446 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6447 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6448 OP_NSTRLST
, /* Neon element/structure list */
6450 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6451 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6452 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6453 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6454 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6455 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6456 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6457 OP_VMOV
, /* Neon VMOV operands. */
6458 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6459 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6460 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6462 OP_I0
, /* immediate zero */
6463 OP_I7
, /* immediate value 0 .. 7 */
6464 OP_I15
, /* 0 .. 15 */
6465 OP_I16
, /* 1 .. 16 */
6466 OP_I16z
, /* 0 .. 16 */
6467 OP_I31
, /* 0 .. 31 */
6468 OP_I31w
, /* 0 .. 31, optional trailing ! */
6469 OP_I32
, /* 1 .. 32 */
6470 OP_I32z
, /* 0 .. 32 */
6471 OP_I63
, /* 0 .. 63 */
6472 OP_I63s
, /* -64 .. 63 */
6473 OP_I64
, /* 1 .. 64 */
6474 OP_I64z
, /* 0 .. 64 */
6475 OP_I255
, /* 0 .. 255 */
6477 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6478 OP_I7b
, /* 0 .. 7 */
6479 OP_I15b
, /* 0 .. 15 */
6480 OP_I31b
, /* 0 .. 31 */
6482 OP_SH
, /* shifter operand */
6483 OP_SHG
, /* shifter operand with possible group relocation */
6484 OP_ADDR
, /* Memory address expression (any mode) */
6485 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6486 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6487 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6488 OP_EXP
, /* arbitrary expression */
6489 OP_EXPi
, /* same, with optional immediate prefix */
6490 OP_EXPr
, /* same, with optional relocation suffix */
6491 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6493 OP_CPSF
, /* CPS flags */
6494 OP_ENDI
, /* Endianness specifier */
6495 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6496 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6497 OP_COND
, /* conditional code */
6498 OP_TB
, /* Table branch. */
6500 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6502 OP_RRnpc_I0
, /* ARM register or literal 0 */
6503 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
6504 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6505 OP_RF_IF
, /* FPA register or immediate */
6506 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6507 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6509 /* Optional operands. */
6510 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6511 OP_oI31b
, /* 0 .. 31 */
6512 OP_oI32b
, /* 1 .. 32 */
6513 OP_oI32z
, /* 0 .. 32 */
6514 OP_oIffffb
, /* 0 .. 65535 */
6515 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6517 OP_oRR
, /* ARM register */
6518 OP_oRRnpc
, /* ARM register, not the PC */
6519 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6520 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6521 OP_oRND
, /* Optional Neon double precision register */
6522 OP_oRNQ
, /* Optional Neon quad precision register */
6523 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6524 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6525 OP_oSHll
, /* LSL immediate */
6526 OP_oSHar
, /* ASR immediate */
6527 OP_oSHllar
, /* LSL or ASR immediate */
6528 OP_oROR
, /* ROR 0/8/16/24 */
6529 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6531 /* Some pre-defined mixed (ARM/THUMB) operands. */
6532 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6533 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6534 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6536 OP_FIRST_OPTIONAL
= OP_oI7b
6539 /* Generic instruction operand parser. This does no encoding and no
6540 semantic validation; it merely squirrels values away in the inst
6541 structure. Returns SUCCESS or FAIL depending on whether the
6542 specified grammar matched. */
6544 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6546 unsigned const int *upat
= pattern
;
6547 char *backtrack_pos
= 0;
6548 const char *backtrack_error
= 0;
6549 int i
, val
= 0, backtrack_index
= 0;
6550 enum arm_reg_type rtype
;
6551 parse_operand_result result
;
6552 unsigned int op_parse_code
;
6554 #define po_char_or_fail(chr) \
6557 if (skip_past_char (&str, chr) == FAIL) \
6562 #define po_reg_or_fail(regtype) \
6565 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6566 & inst.operands[i].vectype); \
6569 first_error (_(reg_expected_msgs[regtype])); \
6572 inst.operands[i].reg = val; \
6573 inst.operands[i].isreg = 1; \
6574 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6575 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6576 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6577 || rtype == REG_TYPE_VFD \
6578 || rtype == REG_TYPE_NQ); \
6582 #define po_reg_or_goto(regtype, label) \
6585 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6586 & inst.operands[i].vectype); \
6590 inst.operands[i].reg = val; \
6591 inst.operands[i].isreg = 1; \
6592 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6593 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6594 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6595 || rtype == REG_TYPE_VFD \
6596 || rtype == REG_TYPE_NQ); \
6600 #define po_imm_or_fail(min, max, popt) \
6603 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6605 inst.operands[i].imm = val; \
6609 #define po_scalar_or_goto(elsz, label) \
6612 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6615 inst.operands[i].reg = val; \
6616 inst.operands[i].isscalar = 1; \
6620 #define po_misc_or_fail(expr) \
6628 #define po_misc_or_fail_no_backtrack(expr) \
6632 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6633 backtrack_pos = 0; \
6634 if (result != PARSE_OPERAND_SUCCESS) \
6639 #define po_barrier_or_imm(str) \
6642 val = parse_barrier (&str); \
6643 if (val == FAIL && ! ISALPHA (*str)) \
6646 /* ISB can only take SY as an option. */ \
6647 || ((inst.instruction & 0xf0) == 0x60 \
6650 inst.error = _("invalid barrier type"); \
6651 backtrack_pos = 0; \
6657 skip_whitespace (str
);
6659 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6661 op_parse_code
= upat
[i
];
6662 if (op_parse_code
>= 1<<16)
6663 op_parse_code
= thumb
? (op_parse_code
>> 16)
6664 : (op_parse_code
& ((1<<16)-1));
6666 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6668 /* Remember where we are in case we need to backtrack. */
6669 gas_assert (!backtrack_pos
);
6670 backtrack_pos
= str
;
6671 backtrack_error
= inst
.error
;
6672 backtrack_index
= i
;
6675 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6676 po_char_or_fail (',');
6678 switch (op_parse_code
)
6686 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6687 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6688 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6689 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6690 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6691 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6693 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6695 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6697 /* Also accept generic coprocessor regs for unknown registers. */
6699 po_reg_or_fail (REG_TYPE_CN
);
6701 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6702 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6703 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6704 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6705 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6706 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6707 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6708 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6709 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6710 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6712 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6714 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6715 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6717 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6719 /* Neon scalar. Using an element size of 8 means that some invalid
6720 scalars are accepted here, so deal with those in later code. */
6721 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6725 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6728 po_imm_or_fail (0, 0, TRUE
);
6733 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6738 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
6741 if (parse_ifimm_zero (&str
))
6742 inst
.operands
[i
].imm
= 0;
6746 = _("only floating point zero is allowed as immediate value");
6754 po_scalar_or_goto (8, try_rr
);
6757 po_reg_or_fail (REG_TYPE_RN
);
6763 po_scalar_or_goto (8, try_nsdq
);
6766 po_reg_or_fail (REG_TYPE_NSDQ
);
6772 po_scalar_or_goto (8, try_ndq
);
6775 po_reg_or_fail (REG_TYPE_NDQ
);
6781 po_scalar_or_goto (8, try_vfd
);
6784 po_reg_or_fail (REG_TYPE_VFD
);
6789 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6790 not careful then bad things might happen. */
6791 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
6796 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
6799 /* There's a possibility of getting a 64-bit immediate here, so
6800 we need special handling. */
6801 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6804 inst
.error
= _("immediate value is out of range");
6812 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
6815 po_imm_or_fail (0, 63, TRUE
);
6820 po_char_or_fail ('[');
6821 po_reg_or_fail (REG_TYPE_RN
);
6822 po_char_or_fail (']');
6828 po_reg_or_fail (REG_TYPE_RN
);
6829 if (skip_past_char (&str
, '!') == SUCCESS
)
6830 inst
.operands
[i
].writeback
= 1;
6834 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
6835 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
6836 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
6837 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
6838 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
6839 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
6840 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
6841 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
6842 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
6843 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
6844 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
6845 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
6847 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
6849 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
6850 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
6852 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
6853 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
6854 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
6855 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
6857 /* Immediate variants */
6859 po_char_or_fail ('{');
6860 po_imm_or_fail (0, 255, TRUE
);
6861 po_char_or_fail ('}');
6865 /* The expression parser chokes on a trailing !, so we have
6866 to find it first and zap it. */
6869 while (*s
&& *s
!= ',')
6874 inst
.operands
[i
].writeback
= 1;
6876 po_imm_or_fail (0, 31, TRUE
);
6884 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6889 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6894 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6896 if (inst
.reloc
.exp
.X_op
== O_symbol
)
6898 val
= parse_reloc (&str
);
6901 inst
.error
= _("unrecognized relocation suffix");
6904 else if (val
!= BFD_RELOC_UNUSED
)
6906 inst
.operands
[i
].imm
= val
;
6907 inst
.operands
[i
].hasreloc
= 1;
6912 /* Operand for MOVW or MOVT. */
6914 po_misc_or_fail (parse_half (&str
));
6917 /* Register or expression. */
6918 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
6919 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
6921 /* Register or immediate. */
6922 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
6923 I0
: po_imm_or_fail (0, 0, FALSE
); break;
6925 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
6927 if (!is_immediate_prefix (*str
))
6930 val
= parse_fpa_immediate (&str
);
6933 /* FPA immediates are encoded as registers 8-15.
6934 parse_fpa_immediate has already applied the offset. */
6935 inst
.operands
[i
].reg
= val
;
6936 inst
.operands
[i
].isreg
= 1;
6939 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
6940 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
6942 /* Two kinds of register. */
6945 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6947 || (rege
->type
!= REG_TYPE_MMXWR
6948 && rege
->type
!= REG_TYPE_MMXWC
6949 && rege
->type
!= REG_TYPE_MMXWCG
))
6951 inst
.error
= _("iWMMXt data or control register expected");
6954 inst
.operands
[i
].reg
= rege
->number
;
6955 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
6961 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6963 || (rege
->type
!= REG_TYPE_MMXWC
6964 && rege
->type
!= REG_TYPE_MMXWCG
))
6966 inst
.error
= _("iWMMXt control register expected");
6969 inst
.operands
[i
].reg
= rege
->number
;
6970 inst
.operands
[i
].isreg
= 1;
6975 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
6976 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
6977 case OP_oROR
: val
= parse_ror (&str
); break;
6978 case OP_COND
: val
= parse_cond (&str
); break;
6979 case OP_oBARRIER_I15
:
6980 po_barrier_or_imm (str
); break;
6982 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
6988 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
6989 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
6991 inst
.error
= _("Banked registers are not available with this "
6997 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7001 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7004 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7006 if (strncasecmp (str
, "APSR_", 5) == 0)
7013 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7014 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7015 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7016 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7017 default: found
= 16;
7021 inst
.operands
[i
].isvec
= 1;
7022 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7023 inst
.operands
[i
].reg
= REG_PC
;
7030 po_misc_or_fail (parse_tb (&str
));
7033 /* Register lists. */
7035 val
= parse_reg_list (&str
);
7038 inst
.operands
[i
].writeback
= 1;
7044 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
7048 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
7052 /* Allow Q registers too. */
7053 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7058 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7060 inst
.operands
[i
].issingle
= 1;
7065 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7070 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7071 &inst
.operands
[i
].vectype
);
7074 /* Addressing modes */
7076 po_misc_or_fail (parse_address (&str
, i
));
7080 po_misc_or_fail_no_backtrack (
7081 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7085 po_misc_or_fail_no_backtrack (
7086 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7090 po_misc_or_fail_no_backtrack (
7091 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7095 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7099 po_misc_or_fail_no_backtrack (
7100 parse_shifter_operand_group_reloc (&str
, i
));
7104 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7108 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7112 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7116 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7119 /* Various value-based sanity checks and shared operations. We
7120 do not signal immediate failures for the register constraints;
7121 this allows a syntax error to take precedence. */
7122 switch (op_parse_code
)
7130 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7131 inst
.error
= BAD_PC
;
7136 if (inst
.operands
[i
].isreg
)
7138 if (inst
.operands
[i
].reg
== REG_PC
)
7139 inst
.error
= BAD_PC
;
7140 else if (inst
.operands
[i
].reg
== REG_SP
)
7141 inst
.error
= BAD_SP
;
7146 if (inst
.operands
[i
].isreg
7147 && inst
.operands
[i
].reg
== REG_PC
7148 && (inst
.operands
[i
].writeback
|| thumb
))
7149 inst
.error
= BAD_PC
;
7158 case OP_oBARRIER_I15
:
7167 inst
.operands
[i
].imm
= val
;
7174 /* If we get here, this operand was successfully parsed. */
7175 inst
.operands
[i
].present
= 1;
7179 inst
.error
= BAD_ARGS
;
7184 /* The parse routine should already have set inst.error, but set a
7185 default here just in case. */
7187 inst
.error
= _("syntax error");
7191 /* Do not backtrack over a trailing optional argument that
7192 absorbed some text. We will only fail again, with the
7193 'garbage following instruction' error message, which is
7194 probably less helpful than the current one. */
7195 if (backtrack_index
== i
&& backtrack_pos
!= str
7196 && upat
[i
+1] == OP_stop
)
7199 inst
.error
= _("syntax error");
7203 /* Try again, skipping the optional argument at backtrack_pos. */
7204 str
= backtrack_pos
;
7205 inst
.error
= backtrack_error
;
7206 inst
.operands
[backtrack_index
].present
= 0;
7207 i
= backtrack_index
;
7211 /* Check that we have parsed all the arguments. */
7212 if (*str
!= '\0' && !inst
.error
)
7213 inst
.error
= _("garbage following instruction");
7215 return inst
.error
? FAIL
: SUCCESS
;
7218 #undef po_char_or_fail
7219 #undef po_reg_or_fail
7220 #undef po_reg_or_goto
7221 #undef po_imm_or_fail
7222 #undef po_scalar_or_fail
7223 #undef po_barrier_or_imm
7225 /* Shorthand macro for instruction encoding functions issuing errors. */
7226 #define constraint(expr, err) \
7237 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7238 instructions are unpredictable if these registers are used. This
7239 is the BadReg predicate in ARM's Thumb-2 documentation. */
7240 #define reject_bad_reg(reg) \
7242 if (reg == REG_SP || reg == REG_PC) \
7244 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7249 /* If REG is R13 (the stack pointer), warn that its use is
7251 #define warn_deprecated_sp(reg) \
7253 if (warn_on_deprecated && reg == REG_SP) \
7254 as_tsktsk (_("use of r13 is deprecated")); \
7257 /* Functions for operand encoding. ARM, then Thumb. */
7259 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7261 /* If VAL can be encoded in the immediate field of an ARM instruction,
7262 return the encoded form. Otherwise, return FAIL. */
7265 encode_arm_immediate (unsigned int val
)
7269 for (i
= 0; i
< 32; i
+= 2)
7270 if ((a
= rotate_left (val
, i
)) <= 0xff)
7271 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7276 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7277 return the encoded form. Otherwise, return FAIL. */
7279 encode_thumb32_immediate (unsigned int val
)
7286 for (i
= 1; i
<= 24; i
++)
7289 if ((val
& ~(0xff << i
)) == 0)
7290 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7294 if (val
== ((a
<< 16) | a
))
7296 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7300 if (val
== ((a
<< 16) | a
))
7301 return 0x200 | (a
>> 8);
7305 /* Encode a VFP SP or DP register number into inst.instruction. */
7308 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7310 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7313 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7316 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7319 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7324 first_error (_("D register out of range for selected VFP version"));
7332 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7336 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7340 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7344 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7348 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7352 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7360 /* Encode a <shift> in an ARM-format instruction. The immediate,
7361 if any, is handled by md_apply_fix. */
7363 encode_arm_shift (int i
)
7365 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7366 inst
.instruction
|= SHIFT_ROR
<< 5;
7369 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7370 if (inst
.operands
[i
].immisreg
)
7372 inst
.instruction
|= SHIFT_BY_REG
;
7373 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7376 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7381 encode_arm_shifter_operand (int i
)
7383 if (inst
.operands
[i
].isreg
)
7385 inst
.instruction
|= inst
.operands
[i
].reg
;
7386 encode_arm_shift (i
);
7390 inst
.instruction
|= INST_IMMEDIATE
;
7391 if (inst
.reloc
.type
!= BFD_RELOC_ARM_IMMEDIATE
)
7392 inst
.instruction
|= inst
.operands
[i
].imm
;
7396 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7398 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7401 Generate an error if the operand is not a register. */
7402 constraint (!inst
.operands
[i
].isreg
,
7403 _("Instruction does not support =N addresses"));
7405 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7407 if (inst
.operands
[i
].preind
)
7411 inst
.error
= _("instruction does not accept preindexed addressing");
7414 inst
.instruction
|= PRE_INDEX
;
7415 if (inst
.operands
[i
].writeback
)
7416 inst
.instruction
|= WRITE_BACK
;
7419 else if (inst
.operands
[i
].postind
)
7421 gas_assert (inst
.operands
[i
].writeback
);
7423 inst
.instruction
|= WRITE_BACK
;
7425 else /* unindexed - only for coprocessor */
7427 inst
.error
= _("instruction does not accept unindexed addressing");
7431 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7432 && (((inst
.instruction
& 0x000f0000) >> 16)
7433 == ((inst
.instruction
& 0x0000f000) >> 12)))
7434 as_warn ((inst
.instruction
& LOAD_BIT
)
7435 ? _("destination register same as write-back base")
7436 : _("source register same as write-back base"));
7439 /* inst.operands[i] was set up by parse_address. Encode it into an
7440 ARM-format mode 2 load or store instruction. If is_t is true,
7441 reject forms that cannot be used with a T instruction (i.e. not
7444 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7446 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7448 encode_arm_addr_mode_common (i
, is_t
);
7450 if (inst
.operands
[i
].immisreg
)
7452 constraint ((inst
.operands
[i
].imm
== REG_PC
7453 || (is_pc
&& inst
.operands
[i
].writeback
)),
7455 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7456 inst
.instruction
|= inst
.operands
[i
].imm
;
7457 if (!inst
.operands
[i
].negative
)
7458 inst
.instruction
|= INDEX_UP
;
7459 if (inst
.operands
[i
].shifted
)
7461 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7462 inst
.instruction
|= SHIFT_ROR
<< 5;
7465 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7466 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7470 else /* immediate offset in inst.reloc */
7472 if (is_pc
&& !inst
.reloc
.pc_rel
)
7474 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7476 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7477 cannot use PC in addressing.
7478 PC cannot be used in writeback addressing, either. */
7479 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7482 /* Use of PC in str is deprecated for ARMv7. */
7483 if (warn_on_deprecated
7485 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
7486 as_tsktsk (_("use of PC in this instruction is deprecated"));
7489 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7491 /* Prefer + for zero encoded value. */
7492 if (!inst
.operands
[i
].negative
)
7493 inst
.instruction
|= INDEX_UP
;
7494 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
7499 /* inst.operands[i] was set up by parse_address. Encode it into an
7500 ARM-format mode 3 load or store instruction. Reject forms that
7501 cannot be used with such instructions. If is_t is true, reject
7502 forms that cannot be used with a T instruction (i.e. not
7505 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
7507 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
7509 inst
.error
= _("instruction does not accept scaled register index");
7513 encode_arm_addr_mode_common (i
, is_t
);
7515 if (inst
.operands
[i
].immisreg
)
7517 constraint ((inst
.operands
[i
].imm
== REG_PC
7518 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
7520 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
7522 inst
.instruction
|= inst
.operands
[i
].imm
;
7523 if (!inst
.operands
[i
].negative
)
7524 inst
.instruction
|= INDEX_UP
;
7526 else /* immediate offset in inst.reloc */
7528 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.reloc
.pc_rel
7529 && inst
.operands
[i
].writeback
),
7531 inst
.instruction
|= HWOFFSET_IMM
;
7532 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7534 /* Prefer + for zero encoded value. */
7535 if (!inst
.operands
[i
].negative
)
7536 inst
.instruction
|= INDEX_UP
;
7538 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
7543 /* Write immediate bits [7:0] to the following locations:
7545 |28/24|23 19|18 16|15 4|3 0|
7546 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7548 This function is used by VMOV/VMVN/VORR/VBIC. */
7551 neon_write_immbits (unsigned immbits
)
7553 inst
.instruction
|= immbits
& 0xf;
7554 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
7555 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
7558 /* Invert low-order SIZE bits of XHI:XLO. */
7561 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
7563 unsigned immlo
= xlo
? *xlo
: 0;
7564 unsigned immhi
= xhi
? *xhi
: 0;
7569 immlo
= (~immlo
) & 0xff;
7573 immlo
= (~immlo
) & 0xffff;
7577 immhi
= (~immhi
) & 0xffffffff;
7581 immlo
= (~immlo
) & 0xffffffff;
7595 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7599 neon_bits_same_in_bytes (unsigned imm
)
7601 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
7602 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
7603 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
7604 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
7607 /* For immediate of above form, return 0bABCD. */
7610 neon_squash_bits (unsigned imm
)
7612 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
7613 | ((imm
& 0x01000000) >> 21);
7616 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7619 neon_qfloat_bits (unsigned imm
)
7621 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
7624 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7625 the instruction. *OP is passed as the initial value of the op field, and
7626 may be set to a different value depending on the constant (i.e.
7627 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7628 MVN). If the immediate looks like a repeated pattern then also
7629 try smaller element sizes. */
7632 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
7633 unsigned *immbits
, int *op
, int size
,
7634 enum neon_el_type type
)
7636 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7638 if (type
== NT_float
&& !float_p
)
7641 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
7643 if (size
!= 32 || *op
== 1)
7645 *immbits
= neon_qfloat_bits (immlo
);
7651 if (neon_bits_same_in_bytes (immhi
)
7652 && neon_bits_same_in_bytes (immlo
))
7656 *immbits
= (neon_squash_bits (immhi
) << 4)
7657 | neon_squash_bits (immlo
);
7668 if (immlo
== (immlo
& 0x000000ff))
7673 else if (immlo
== (immlo
& 0x0000ff00))
7675 *immbits
= immlo
>> 8;
7678 else if (immlo
== (immlo
& 0x00ff0000))
7680 *immbits
= immlo
>> 16;
7683 else if (immlo
== (immlo
& 0xff000000))
7685 *immbits
= immlo
>> 24;
7688 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
7690 *immbits
= (immlo
>> 8) & 0xff;
7693 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
7695 *immbits
= (immlo
>> 16) & 0xff;
7699 if ((immlo
& 0xffff) != (immlo
>> 16))
7706 if (immlo
== (immlo
& 0x000000ff))
7711 else if (immlo
== (immlo
& 0x0000ff00))
7713 *immbits
= immlo
>> 8;
7717 if ((immlo
& 0xff) != (immlo
>> 8))
7722 if (immlo
== (immlo
& 0x000000ff))
7724 /* Don't allow MVN with 8-bit immediate. */
7734 #if defined BFD_HOST_64_BIT
7735 /* Returns TRUE if double precision value V may be cast
7736 to single precision without loss of accuracy. */
7739 is_double_a_single (bfd_int64_t v
)
7741 int exp
= (int)((v
>> 52) & 0x7FF);
7742 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7744 return (exp
== 0 || exp
== 0x7FF
7745 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
7746 && (mantissa
& 0x1FFFFFFFl
) == 0;
7749 /* Returns a double precision value casted to single precision
7750 (ignoring the least significant bits in exponent and mantissa). */
7753 double_to_single (bfd_int64_t v
)
7755 int sign
= (int) ((v
>> 63) & 1l);
7756 int exp
= (int) ((v
>> 52) & 0x7FF);
7757 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7763 exp
= exp
- 1023 + 127;
7772 /* No denormalized numbers. */
7778 return (sign
<< 31) | (exp
<< 23) | mantissa
;
7780 #endif /* BFD_HOST_64_BIT */
7789 static void do_vfp_nsyn_opcode (const char *);
7791 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7792 Determine whether it can be performed with a move instruction; if
7793 it can, convert inst.instruction to that move instruction and
7794 return TRUE; if it can't, convert inst.instruction to a literal-pool
7795 load and return FALSE. If this is not a valid thing to do in the
7796 current context, set inst.error and return TRUE.
7798 inst.operands[i] describes the destination register. */
7801 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
7804 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
7805 bfd_boolean arm_p
= (t
== CONST_ARM
);
7808 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
7812 if ((inst
.instruction
& tbit
) == 0)
7814 inst
.error
= _("invalid pseudo operation");
7818 if (inst
.reloc
.exp
.X_op
!= O_constant
7819 && inst
.reloc
.exp
.X_op
!= O_symbol
7820 && inst
.reloc
.exp
.X_op
!= O_big
)
7822 inst
.error
= _("constant expression expected");
7826 if (inst
.reloc
.exp
.X_op
== O_constant
7827 || inst
.reloc
.exp
.X_op
== O_big
)
7829 #if defined BFD_HOST_64_BIT
7834 if (inst
.reloc
.exp
.X_op
== O_big
)
7836 LITTLENUM_TYPE w
[X_PRECISION
];
7839 if (inst
.reloc
.exp
.X_add_number
== -1)
7841 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
7843 /* FIXME: Should we check words w[2..5] ? */
7848 #if defined BFD_HOST_64_BIT
7850 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
7851 << LITTLENUM_NUMBER_OF_BITS
)
7852 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
7853 << LITTLENUM_NUMBER_OF_BITS
)
7854 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
7855 << LITTLENUM_NUMBER_OF_BITS
)
7856 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
7858 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
7859 | (l
[0] & LITTLENUM_MASK
);
7863 v
= inst
.reloc
.exp
.X_add_number
;
7865 if (!inst
.operands
[i
].issingle
)
7869 /* This can be encoded only for a low register. */
7870 if ((v
& ~0xFF) == 0 && (inst
.operands
[i
].reg
< 8))
7872 /* This can be done with a mov(1) instruction. */
7873 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
7874 inst
.instruction
|= v
;
7878 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
7879 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7881 /* Check if on thumb2 it can be done with a mov.w, mvn or
7882 movw instruction. */
7883 unsigned int newimm
;
7884 bfd_boolean isNegated
;
7886 newimm
= encode_thumb32_immediate (v
);
7887 if (newimm
!= (unsigned int) FAIL
)
7891 newimm
= encode_thumb32_immediate (~v
);
7892 if (newimm
!= (unsigned int) FAIL
)
7896 /* The number can be loaded with a mov.w or mvn
7898 if (newimm
!= (unsigned int) FAIL
7899 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
7901 inst
.instruction
= (0xf04f0000 /* MOV.W. */
7902 | (inst
.operands
[i
].reg
<< 8));
7903 /* Change to MOVN. */
7904 inst
.instruction
|= (isNegated
? 0x200000 : 0);
7905 inst
.instruction
|= (newimm
& 0x800) << 15;
7906 inst
.instruction
|= (newimm
& 0x700) << 4;
7907 inst
.instruction
|= (newimm
& 0x0ff);
7910 /* The number can be loaded with a movw instruction. */
7911 else if ((v
& ~0xFFFF) == 0
7912 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7914 int imm
= v
& 0xFFFF;
7916 inst
.instruction
= 0xf2400000; /* MOVW. */
7917 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
7918 inst
.instruction
|= (imm
& 0xf000) << 4;
7919 inst
.instruction
|= (imm
& 0x0800) << 15;
7920 inst
.instruction
|= (imm
& 0x0700) << 4;
7921 inst
.instruction
|= (imm
& 0x00ff);
7928 int value
= encode_arm_immediate (v
);
7932 /* This can be done with a mov instruction. */
7933 inst
.instruction
&= LITERAL_MASK
;
7934 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
7935 inst
.instruction
|= value
& 0xfff;
7939 value
= encode_arm_immediate (~ v
);
7942 /* This can be done with a mvn instruction. */
7943 inst
.instruction
&= LITERAL_MASK
;
7944 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
7945 inst
.instruction
|= value
& 0xfff;
7949 else if (t
== CONST_VEC
)
7952 unsigned immbits
= 0;
7953 unsigned immlo
= inst
.operands
[1].imm
;
7954 unsigned immhi
= inst
.operands
[1].regisimm
7955 ? inst
.operands
[1].reg
7956 : inst
.reloc
.exp
.X_unsigned
7958 : ((bfd_int64_t
)((int) immlo
)) >> 32;
7959 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
7960 &op
, 64, NT_invtype
);
7964 neon_invert_size (&immlo
, &immhi
, 64);
7966 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
7967 &op
, 64, NT_invtype
);
7972 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
7978 /* Fill other bits in vmov encoding for both thumb and arm. */
7980 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
7982 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
7983 neon_write_immbits (immbits
);
7991 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
7992 if (inst
.operands
[i
].issingle
7993 && is_quarter_float (inst
.operands
[1].imm
)
7994 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
7996 inst
.operands
[1].imm
=
7997 neon_qfloat_bits (v
);
7998 do_vfp_nsyn_opcode ("fconsts");
8002 /* If our host does not support a 64-bit type then we cannot perform
8003 the following optimization. This mean that there will be a
8004 discrepancy between the output produced by an assembler built for
8005 a 32-bit-only host and the output produced from a 64-bit host, but
8006 this cannot be helped. */
8007 #if defined BFD_HOST_64_BIT
8008 else if (!inst
.operands
[1].issingle
8009 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8011 if (is_double_a_single (v
)
8012 && is_quarter_float (double_to_single (v
)))
8014 inst
.operands
[1].imm
=
8015 neon_qfloat_bits (double_to_single (v
));
8016 do_vfp_nsyn_opcode ("fconstd");
8024 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8025 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8028 inst
.operands
[1].reg
= REG_PC
;
8029 inst
.operands
[1].isreg
= 1;
8030 inst
.operands
[1].preind
= 1;
8031 inst
.reloc
.pc_rel
= 1;
8032 inst
.reloc
.type
= (thumb_p
8033 ? BFD_RELOC_ARM_THUMB_OFFSET
8035 ? BFD_RELOC_ARM_HWLITERAL
8036 : BFD_RELOC_ARM_LITERAL
));
8040 /* inst.operands[i] was set up by parse_address. Encode it into an
8041 ARM-format instruction. Reject all forms which cannot be encoded
8042 into a coprocessor load/store instruction. If wb_ok is false,
8043 reject use of writeback; if unind_ok is false, reject use of
8044 unindexed addressing. If reloc_override is not 0, use it instead
8045 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8046 (in which case it is preserved). */
8049 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8051 if (!inst
.operands
[i
].isreg
)
8054 if (! inst
.operands
[0].isvec
)
8056 inst
.error
= _("invalid co-processor operand");
8059 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8063 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8065 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8067 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8069 gas_assert (!inst
.operands
[i
].writeback
);
8072 inst
.error
= _("instruction does not support unindexed addressing");
8075 inst
.instruction
|= inst
.operands
[i
].imm
;
8076 inst
.instruction
|= INDEX_UP
;
8080 if (inst
.operands
[i
].preind
)
8081 inst
.instruction
|= PRE_INDEX
;
8083 if (inst
.operands
[i
].writeback
)
8085 if (inst
.operands
[i
].reg
== REG_PC
)
8087 inst
.error
= _("pc may not be used with write-back");
8092 inst
.error
= _("instruction does not support writeback");
8095 inst
.instruction
|= WRITE_BACK
;
8099 inst
.reloc
.type
= (bfd_reloc_code_real_type
) reloc_override
;
8100 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8101 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
8102 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8105 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8107 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8110 /* Prefer + for zero encoded value. */
8111 if (!inst
.operands
[i
].negative
)
8112 inst
.instruction
|= INDEX_UP
;
8117 /* Functions for instruction encoding, sorted by sub-architecture.
8118 First some generics; their names are taken from the conventional
8119 bit positions for register arguments in ARM format instructions. */
8129 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8135 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8136 inst
.instruction
|= inst
.operands
[1].reg
;
8142 inst
.instruction
|= inst
.operands
[0].reg
;
8143 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8149 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8150 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8156 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8157 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8163 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8164 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8168 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8170 if (ARM_CPU_IS_ANY (cpu_variant
))
8172 as_tsktsk ("%s", msg
);
8175 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8187 unsigned Rn
= inst
.operands
[2].reg
;
8188 /* Enforce restrictions on SWP instruction. */
8189 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8191 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8192 _("Rn must not overlap other operands"));
8194 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8196 if (!check_obsolete (&arm_ext_v8
,
8197 _("swp{b} use is obsoleted for ARMv8 and later"))
8198 && warn_on_deprecated
8199 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8200 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8203 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8204 inst
.instruction
|= inst
.operands
[1].reg
;
8205 inst
.instruction
|= Rn
<< 16;
8211 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8212 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8213 inst
.instruction
|= inst
.operands
[2].reg
;
8219 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8220 constraint (((inst
.reloc
.exp
.X_op
!= O_constant
8221 && inst
.reloc
.exp
.X_op
!= O_illegal
)
8222 || inst
.reloc
.exp
.X_add_number
!= 0),
8224 inst
.instruction
|= inst
.operands
[0].reg
;
8225 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8226 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8232 inst
.instruction
|= inst
.operands
[0].imm
;
8238 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8239 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8242 /* ARM instructions, in alphabetical order by function name (except
8243 that wrapper functions appear immediately after the function they
8246 /* This is a pseudo-op of the form "adr rd, label" to be converted
8247 into a relative address of the form "add rd, pc, #label-.-8". */
8252 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8254 /* Frag hacking will turn this into a sub instruction if the offset turns
8255 out to be negative. */
8256 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
8257 inst
.reloc
.pc_rel
= 1;
8258 inst
.reloc
.exp
.X_add_number
-= 8;
8261 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8262 into a relative address of the form:
8263 add rd, pc, #low(label-.-8)"
8264 add rd, rd, #high(label-.-8)" */
8269 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8271 /* Frag hacking will turn this into a sub instruction if the offset turns
8272 out to be negative. */
8273 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8274 inst
.reloc
.pc_rel
= 1;
8275 inst
.size
= INSN_SIZE
* 2;
8276 inst
.reloc
.exp
.X_add_number
-= 8;
8282 if (!inst
.operands
[1].present
)
8283 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8284 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8285 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8286 encode_arm_shifter_operand (2);
8292 if (inst
.operands
[0].present
)
8293 inst
.instruction
|= inst
.operands
[0].imm
;
8295 inst
.instruction
|= 0xf;
8301 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8302 constraint (msb
> 32, _("bit-field extends past end of register"));
8303 /* The instruction encoding stores the LSB and MSB,
8304 not the LSB and width. */
8305 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8306 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8307 inst
.instruction
|= (msb
- 1) << 16;
8315 /* #0 in second position is alternative syntax for bfc, which is
8316 the same instruction but with REG_PC in the Rm field. */
8317 if (!inst
.operands
[1].isreg
)
8318 inst
.operands
[1].reg
= REG_PC
;
8320 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8321 constraint (msb
> 32, _("bit-field extends past end of register"));
8322 /* The instruction encoding stores the LSB and MSB,
8323 not the LSB and width. */
8324 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8325 inst
.instruction
|= inst
.operands
[1].reg
;
8326 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8327 inst
.instruction
|= (msb
- 1) << 16;
8333 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8334 _("bit-field extends past end of register"));
8335 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8336 inst
.instruction
|= inst
.operands
[1].reg
;
8337 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8338 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8341 /* ARM V5 breakpoint instruction (argument parse)
8342 BKPT <16 bit unsigned immediate>
8343 Instruction is not conditional.
8344 The bit pattern given in insns[] has the COND_ALWAYS condition,
8345 and it is an error if the caller tried to override that. */
8350 /* Top 12 of 16 bits to bits 19:8. */
8351 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8353 /* Bottom 4 of 16 bits to bits 3:0. */
8354 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8358 encode_branch (int default_reloc
)
8360 if (inst
.operands
[0].hasreloc
)
8362 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8363 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8364 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8365 inst
.reloc
.type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8366 ? BFD_RELOC_ARM_PLT32
8367 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8370 inst
.reloc
.type
= (bfd_reloc_code_real_type
) default_reloc
;
8371 inst
.reloc
.pc_rel
= 1;
8378 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8379 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8382 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8389 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8391 if (inst
.cond
== COND_ALWAYS
)
8392 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8394 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8398 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8401 /* ARM V5 branch-link-exchange instruction (argument parse)
8402 BLX <target_addr> ie BLX(1)
8403 BLX{<condition>} <Rm> ie BLX(2)
8404 Unfortunately, there are two different opcodes for this mnemonic.
8405 So, the insns[].value is not used, and the code here zaps values
8406 into inst.instruction.
8407 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8412 if (inst
.operands
[0].isreg
)
8414 /* Arg is a register; the opcode provided by insns[] is correct.
8415 It is not illegal to do "blx pc", just useless. */
8416 if (inst
.operands
[0].reg
== REG_PC
)
8417 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8419 inst
.instruction
|= inst
.operands
[0].reg
;
8423 /* Arg is an address; this instruction cannot be executed
8424 conditionally, and the opcode must be adjusted.
8425 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8426 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8427 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8428 inst
.instruction
= 0xfa000000;
8429 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8436 bfd_boolean want_reloc
;
8438 if (inst
.operands
[0].reg
== REG_PC
)
8439 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8441 inst
.instruction
|= inst
.operands
[0].reg
;
8442 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8443 it is for ARMv4t or earlier. */
8444 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8445 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
8449 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8454 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
8458 /* ARM v5TEJ. Jump to Jazelle code. */
8463 if (inst
.operands
[0].reg
== REG_PC
)
8464 as_tsktsk (_("use of r15 in bxj is not really useful"));
8466 inst
.instruction
|= inst
.operands
[0].reg
;
8469 /* Co-processor data operation:
8470 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8471 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8475 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8476 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
8477 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8478 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8479 inst
.instruction
|= inst
.operands
[4].reg
;
8480 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8486 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8487 encode_arm_shifter_operand (1);
8490 /* Transfer between coprocessor and ARM registers.
8491 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8496 No special properties. */
8498 struct deprecated_coproc_regs_s
8505 arm_feature_set deprecated
;
8506 arm_feature_set obsoleted
;
8507 const char *dep_msg
;
8508 const char *obs_msg
;
8511 #define DEPR_ACCESS_V8 \
8512 N_("This coprocessor register access is deprecated in ARMv8")
8514 /* Table of all deprecated coprocessor registers. */
8515 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
8517 {15, 0, 7, 10, 5, /* CP15DMB. */
8518 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8519 DEPR_ACCESS_V8
, NULL
},
8520 {15, 0, 7, 10, 4, /* CP15DSB. */
8521 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8522 DEPR_ACCESS_V8
, NULL
},
8523 {15, 0, 7, 5, 4, /* CP15ISB. */
8524 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8525 DEPR_ACCESS_V8
, NULL
},
8526 {14, 6, 1, 0, 0, /* TEEHBR. */
8527 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8528 DEPR_ACCESS_V8
, NULL
},
8529 {14, 6, 0, 0, 0, /* TEECR. */
8530 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8531 DEPR_ACCESS_V8
, NULL
},
8534 #undef DEPR_ACCESS_V8
8536 static const size_t deprecated_coproc_reg_count
=
8537 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
8545 Rd
= inst
.operands
[2].reg
;
8548 if (inst
.instruction
== 0xee000010
8549 || inst
.instruction
== 0xfe000010)
8551 reject_bad_reg (Rd
);
8554 constraint (Rd
== REG_SP
, BAD_SP
);
8559 if (inst
.instruction
== 0xe000010)
8560 constraint (Rd
== REG_PC
, BAD_PC
);
8563 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
8565 const struct deprecated_coproc_regs_s
*r
=
8566 deprecated_coproc_regs
+ i
;
8568 if (inst
.operands
[0].reg
== r
->cp
8569 && inst
.operands
[1].imm
== r
->opc1
8570 && inst
.operands
[3].reg
== r
->crn
8571 && inst
.operands
[4].reg
== r
->crm
8572 && inst
.operands
[5].imm
== r
->opc2
)
8574 if (! ARM_CPU_IS_ANY (cpu_variant
)
8575 && warn_on_deprecated
8576 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
8577 as_tsktsk ("%s", r
->dep_msg
);
8581 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8582 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
8583 inst
.instruction
|= Rd
<< 12;
8584 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8585 inst
.instruction
|= inst
.operands
[4].reg
;
8586 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8589 /* Transfer between coprocessor register and pair of ARM registers.
8590 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8595 Two XScale instructions are special cases of these:
8597 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8598 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8600 Result unpredictable if Rd or Rn is R15. */
8607 Rd
= inst
.operands
[2].reg
;
8608 Rn
= inst
.operands
[3].reg
;
8612 reject_bad_reg (Rd
);
8613 reject_bad_reg (Rn
);
8617 constraint (Rd
== REG_PC
, BAD_PC
);
8618 constraint (Rn
== REG_PC
, BAD_PC
);
8621 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8622 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
8623 inst
.instruction
|= Rd
<< 12;
8624 inst
.instruction
|= Rn
<< 16;
8625 inst
.instruction
|= inst
.operands
[4].reg
;
8631 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
8632 if (inst
.operands
[1].present
)
8634 inst
.instruction
|= CPSI_MMOD
;
8635 inst
.instruction
|= inst
.operands
[1].imm
;
8642 inst
.instruction
|= inst
.operands
[0].imm
;
8648 unsigned Rd
, Rn
, Rm
;
8650 Rd
= inst
.operands
[0].reg
;
8651 Rn
= (inst
.operands
[1].present
8652 ? inst
.operands
[1].reg
: Rd
);
8653 Rm
= inst
.operands
[2].reg
;
8655 constraint ((Rd
== REG_PC
), BAD_PC
);
8656 constraint ((Rn
== REG_PC
), BAD_PC
);
8657 constraint ((Rm
== REG_PC
), BAD_PC
);
8659 inst
.instruction
|= Rd
<< 16;
8660 inst
.instruction
|= Rn
<< 0;
8661 inst
.instruction
|= Rm
<< 8;
8667 /* There is no IT instruction in ARM mode. We
8668 process it to do the validation as if in
8669 thumb mode, just in case the code gets
8670 assembled for thumb using the unified syntax. */
8675 set_it_insn_type (IT_INSN
);
8676 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
8677 now_it
.cc
= inst
.operands
[0].imm
;
8681 /* If there is only one register in the register list,
8682 then return its register number. Otherwise return -1. */
8684 only_one_reg_in_list (int range
)
8686 int i
= ffs (range
) - 1;
8687 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
8691 encode_ldmstm(int from_push_pop_mnem
)
8693 int base_reg
= inst
.operands
[0].reg
;
8694 int range
= inst
.operands
[1].imm
;
8697 inst
.instruction
|= base_reg
<< 16;
8698 inst
.instruction
|= range
;
8700 if (inst
.operands
[1].writeback
)
8701 inst
.instruction
|= LDM_TYPE_2_OR_3
;
8703 if (inst
.operands
[0].writeback
)
8705 inst
.instruction
|= WRITE_BACK
;
8706 /* Check for unpredictable uses of writeback. */
8707 if (inst
.instruction
& LOAD_BIT
)
8709 /* Not allowed in LDM type 2. */
8710 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
8711 && ((range
& (1 << REG_PC
)) == 0))
8712 as_warn (_("writeback of base register is UNPREDICTABLE"));
8713 /* Only allowed if base reg not in list for other types. */
8714 else if (range
& (1 << base_reg
))
8715 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8719 /* Not allowed for type 2. */
8720 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
8721 as_warn (_("writeback of base register is UNPREDICTABLE"));
8722 /* Only allowed if base reg not in list, or first in list. */
8723 else if ((range
& (1 << base_reg
))
8724 && (range
& ((1 << base_reg
) - 1)))
8725 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8729 /* If PUSH/POP has only one register, then use the A2 encoding. */
8730 one_reg
= only_one_reg_in_list (range
);
8731 if (from_push_pop_mnem
&& one_reg
>= 0)
8733 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
8735 inst
.instruction
&= A_COND_MASK
;
8736 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
8737 inst
.instruction
|= one_reg
<< 12;
8744 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
8747 /* ARMv5TE load-consecutive (argument parse)
8756 constraint (inst
.operands
[0].reg
% 2 != 0,
8757 _("first transfer register must be even"));
8758 constraint (inst
.operands
[1].present
8759 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8760 _("can only transfer two consecutive registers"));
8761 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8762 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
8764 if (!inst
.operands
[1].present
)
8765 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8767 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8768 register and the first register written; we have to diagnose
8769 overlap between the base and the second register written here. */
8771 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
8772 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
8773 as_warn (_("base register written back, and overlaps "
8774 "second transfer register"));
8776 if (!(inst
.instruction
& V4_STR_BIT
))
8778 /* For an index-register load, the index register must not overlap the
8779 destination (even if not write-back). */
8780 if (inst
.operands
[2].immisreg
8781 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
8782 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
8783 as_warn (_("index register overlaps transfer register"));
8785 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8786 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
8792 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
8793 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
8794 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
8795 || inst
.operands
[1].negative
8796 /* This can arise if the programmer has written
8798 or if they have mistakenly used a register name as the last
8801 It is very difficult to distinguish between these two cases
8802 because "rX" might actually be a label. ie the register
8803 name has been occluded by a symbol of the same name. So we
8804 just generate a general 'bad addressing mode' type error
8805 message and leave it up to the programmer to discover the
8806 true cause and fix their mistake. */
8807 || (inst
.operands
[1].reg
== REG_PC
),
8810 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8811 || inst
.reloc
.exp
.X_add_number
!= 0,
8812 _("offset must be zero in ARM encoding"));
8814 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
8816 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8817 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8818 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8824 constraint (inst
.operands
[0].reg
% 2 != 0,
8825 _("even register required"));
8826 constraint (inst
.operands
[1].present
8827 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8828 _("can only load two consecutive registers"));
8829 /* If op 1 were present and equal to PC, this function wouldn't
8830 have been called in the first place. */
8831 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8833 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8834 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8837 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8838 which is not a multiple of four is UNPREDICTABLE. */
8840 check_ldr_r15_aligned (void)
8842 constraint (!(inst
.operands
[1].immisreg
)
8843 && (inst
.operands
[0].reg
== REG_PC
8844 && inst
.operands
[1].reg
== REG_PC
8845 && (inst
.reloc
.exp
.X_add_number
& 0x3)),
8846 _("ldr to register 15 must be 4-byte alligned"));
8852 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8853 if (!inst
.operands
[1].isreg
)
8854 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
8856 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
8857 check_ldr_r15_aligned ();
8863 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8865 if (inst
.operands
[1].preind
)
8867 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8868 || inst
.reloc
.exp
.X_add_number
!= 0,
8869 _("this instruction requires a post-indexed address"));
8871 inst
.operands
[1].preind
= 0;
8872 inst
.operands
[1].postind
= 1;
8873 inst
.operands
[1].writeback
= 1;
8875 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8876 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
8879 /* Halfword and signed-byte load/store operations. */
8884 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
8885 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8886 if (!inst
.operands
[1].isreg
)
8887 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
8889 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
8895 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8897 if (inst
.operands
[1].preind
)
8899 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8900 || inst
.reloc
.exp
.X_add_number
!= 0,
8901 _("this instruction requires a post-indexed address"));
8903 inst
.operands
[1].preind
= 0;
8904 inst
.operands
[1].postind
= 1;
8905 inst
.operands
[1].writeback
= 1;
8907 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8908 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
8911 /* Co-processor register load/store.
8912 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8916 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8917 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8918 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
8924 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8925 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
8926 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
8927 && !(inst
.instruction
& 0x00400000))
8928 as_tsktsk (_("Rd and Rm should be different in mla"));
8930 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8931 inst
.instruction
|= inst
.operands
[1].reg
;
8932 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
8933 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
8939 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8940 encode_arm_shifter_operand (1);
8943 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8950 top
= (inst
.instruction
& 0x00400000) != 0;
8951 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
8952 _(":lower16: not allowed this instruction"));
8953 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
8954 _(":upper16: not allowed instruction"));
8955 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8956 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
8958 imm
= inst
.reloc
.exp
.X_add_number
;
8959 /* The value is in two pieces: 0:11, 16:19. */
8960 inst
.instruction
|= (imm
& 0x00000fff);
8961 inst
.instruction
|= (imm
& 0x0000f000) << 4;
8966 do_vfp_nsyn_mrs (void)
8968 if (inst
.operands
[0].isvec
)
8970 if (inst
.operands
[1].reg
!= 1)
8971 first_error (_("operand 1 must be FPSCR"));
8972 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
8973 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
8974 do_vfp_nsyn_opcode ("fmstat");
8976 else if (inst
.operands
[1].isvec
)
8977 do_vfp_nsyn_opcode ("fmrx");
8985 do_vfp_nsyn_msr (void)
8987 if (inst
.operands
[0].isvec
)
8988 do_vfp_nsyn_opcode ("fmxr");
8998 unsigned Rt
= inst
.operands
[0].reg
;
9000 if (thumb_mode
&& Rt
== REG_SP
)
9002 inst
.error
= BAD_SP
;
9006 /* APSR_ sets isvec. All other refs to PC are illegal. */
9007 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9009 inst
.error
= BAD_PC
;
9013 /* If we get through parsing the register name, we just insert the number
9014 generated into the instruction without further validation. */
9015 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9016 inst
.instruction
|= (Rt
<< 12);
9022 unsigned Rt
= inst
.operands
[1].reg
;
9025 reject_bad_reg (Rt
);
9026 else if (Rt
== REG_PC
)
9028 inst
.error
= BAD_PC
;
9032 /* If we get through parsing the register name, we just insert the number
9033 generated into the instruction without further validation. */
9034 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9035 inst
.instruction
|= (Rt
<< 12);
9043 if (do_vfp_nsyn_mrs () == SUCCESS
)
9046 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9047 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9049 if (inst
.operands
[1].isreg
)
9051 br
= inst
.operands
[1].reg
;
9052 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf000))
9053 as_bad (_("bad register for mrs"));
9057 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9058 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9060 _("'APSR', 'CPSR' or 'SPSR' expected"));
9061 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9064 inst
.instruction
|= br
;
9067 /* Two possible forms:
9068 "{C|S}PSR_<field>, Rm",
9069 "{C|S}PSR_f, #expression". */
9074 if (do_vfp_nsyn_msr () == SUCCESS
)
9077 inst
.instruction
|= inst
.operands
[0].imm
;
9078 if (inst
.operands
[1].isreg
)
9079 inst
.instruction
|= inst
.operands
[1].reg
;
9082 inst
.instruction
|= INST_IMMEDIATE
;
9083 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
9084 inst
.reloc
.pc_rel
= 0;
9091 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9093 if (!inst
.operands
[2].present
)
9094 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9095 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9096 inst
.instruction
|= inst
.operands
[1].reg
;
9097 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9099 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9100 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9101 as_tsktsk (_("Rd and Rm should be different in mul"));
9104 /* Long Multiply Parser
9105 UMULL RdLo, RdHi, Rm, Rs
9106 SMULL RdLo, RdHi, Rm, Rs
9107 UMLAL RdLo, RdHi, Rm, Rs
9108 SMLAL RdLo, RdHi, Rm, Rs. */
9113 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9114 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9115 inst
.instruction
|= inst
.operands
[2].reg
;
9116 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9118 /* rdhi and rdlo must be different. */
9119 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9120 as_tsktsk (_("rdhi and rdlo must be different"));
9122 /* rdhi, rdlo and rm must all be different before armv6. */
9123 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9124 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9125 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9126 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9132 if (inst
.operands
[0].present
9133 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9135 /* Architectural NOP hints are CPSR sets with no bits selected. */
9136 inst
.instruction
&= 0xf0000000;
9137 inst
.instruction
|= 0x0320f000;
9138 if (inst
.operands
[0].present
)
9139 inst
.instruction
|= inst
.operands
[0].imm
;
9143 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9144 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9145 Condition defaults to COND_ALWAYS.
9146 Error if Rd, Rn or Rm are R15. */
9151 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9152 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9153 inst
.instruction
|= inst
.operands
[2].reg
;
9154 if (inst
.operands
[3].present
)
9155 encode_arm_shift (3);
9158 /* ARM V6 PKHTB (Argument Parse). */
9163 if (!inst
.operands
[3].present
)
9165 /* If the shift specifier is omitted, turn the instruction
9166 into pkhbt rd, rm, rn. */
9167 inst
.instruction
&= 0xfff00010;
9168 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9169 inst
.instruction
|= inst
.operands
[1].reg
;
9170 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9174 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9175 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9176 inst
.instruction
|= inst
.operands
[2].reg
;
9177 encode_arm_shift (3);
9181 /* ARMv5TE: Preload-Cache
9182 MP Extensions: Preload for write
9186 Syntactically, like LDR with B=1, W=0, L=1. */
9191 constraint (!inst
.operands
[0].isreg
,
9192 _("'[' expected after PLD mnemonic"));
9193 constraint (inst
.operands
[0].postind
,
9194 _("post-indexed expression used in preload instruction"));
9195 constraint (inst
.operands
[0].writeback
,
9196 _("writeback used in preload instruction"));
9197 constraint (!inst
.operands
[0].preind
,
9198 _("unindexed addressing used in preload instruction"));
9199 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9202 /* ARMv7: PLI <addr_mode> */
9206 constraint (!inst
.operands
[0].isreg
,
9207 _("'[' expected after PLI mnemonic"));
9208 constraint (inst
.operands
[0].postind
,
9209 _("post-indexed expression used in preload instruction"));
9210 constraint (inst
.operands
[0].writeback
,
9211 _("writeback used in preload instruction"));
9212 constraint (!inst
.operands
[0].preind
,
9213 _("unindexed addressing used in preload instruction"));
9214 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9215 inst
.instruction
&= ~PRE_INDEX
;
9221 constraint (inst
.operands
[0].writeback
,
9222 _("push/pop do not support {reglist}^"));
9223 inst
.operands
[1] = inst
.operands
[0];
9224 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9225 inst
.operands
[0].isreg
= 1;
9226 inst
.operands
[0].writeback
= 1;
9227 inst
.operands
[0].reg
= REG_SP
;
9228 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9231 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9232 word at the specified address and the following word
9234 Unconditionally executed.
9235 Error if Rn is R15. */
9240 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9241 if (inst
.operands
[0].writeback
)
9242 inst
.instruction
|= WRITE_BACK
;
9245 /* ARM V6 ssat (argument parse). */
9250 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9251 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9252 inst
.instruction
|= inst
.operands
[2].reg
;
9254 if (inst
.operands
[3].present
)
9255 encode_arm_shift (3);
9258 /* ARM V6 usat (argument parse). */
9263 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9264 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9265 inst
.instruction
|= inst
.operands
[2].reg
;
9267 if (inst
.operands
[3].present
)
9268 encode_arm_shift (3);
9271 /* ARM V6 ssat16 (argument parse). */
9276 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9277 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9278 inst
.instruction
|= inst
.operands
[2].reg
;
9284 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9285 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9286 inst
.instruction
|= inst
.operands
[2].reg
;
9289 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9290 preserving the other bits.
9292 setend <endian_specifier>, where <endian_specifier> is either
9298 if (warn_on_deprecated
9299 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9300 as_tsktsk (_("setend use is deprecated for ARMv8"));
9302 if (inst
.operands
[0].imm
)
9303 inst
.instruction
|= 0x200;
9309 unsigned int Rm
= (inst
.operands
[1].present
9310 ? inst
.operands
[1].reg
9311 : inst
.operands
[0].reg
);
9313 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9314 inst
.instruction
|= Rm
;
9315 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9317 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9318 inst
.instruction
|= SHIFT_BY_REG
;
9319 /* PR 12854: Error on extraneous shifts. */
9320 constraint (inst
.operands
[2].shifted
,
9321 _("extraneous shift as part of operand to shift insn"));
9324 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
9330 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
9331 inst
.reloc
.pc_rel
= 0;
9337 inst
.reloc
.type
= BFD_RELOC_ARM_HVC
;
9338 inst
.reloc
.pc_rel
= 0;
9344 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
9345 inst
.reloc
.pc_rel
= 0;
9351 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9352 _("selected processor does not support SETPAN instruction"));
9354 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9360 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9361 _("selected processor does not support SETPAN instruction"));
9363 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9366 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9367 SMLAxy{cond} Rd,Rm,Rs,Rn
9368 SMLAWy{cond} Rd,Rm,Rs,Rn
9369 Error if any register is R15. */
9374 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9375 inst
.instruction
|= inst
.operands
[1].reg
;
9376 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9377 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9380 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9381 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9382 Error if any register is R15.
9383 Warning if Rdlo == Rdhi. */
9388 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9389 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9390 inst
.instruction
|= inst
.operands
[2].reg
;
9391 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9393 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9394 as_tsktsk (_("rdhi and rdlo must be different"));
9397 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9398 SMULxy{cond} Rd,Rm,Rs
9399 Error if any register is R15. */
9404 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9405 inst
.instruction
|= inst
.operands
[1].reg
;
9406 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9409 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9410 the same for both ARM and Thumb-2. */
9417 if (inst
.operands
[0].present
)
9419 reg
= inst
.operands
[0].reg
;
9420 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9425 inst
.instruction
|= reg
<< 16;
9426 inst
.instruction
|= inst
.operands
[1].imm
;
9427 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9428 inst
.instruction
|= WRITE_BACK
;
9431 /* ARM V6 strex (argument parse). */
9436 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9437 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9438 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9439 || inst
.operands
[2].negative
9440 /* See comment in do_ldrex(). */
9441 || (inst
.operands
[2].reg
== REG_PC
),
9444 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9445 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9447 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9448 || inst
.reloc
.exp
.X_add_number
!= 0,
9449 _("offset must be zero in ARM encoding"));
9451 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9452 inst
.instruction
|= inst
.operands
[1].reg
;
9453 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9454 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9460 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9461 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9462 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9463 || inst
.operands
[2].negative
,
9466 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9467 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9475 constraint (inst
.operands
[1].reg
% 2 != 0,
9476 _("even register required"));
9477 constraint (inst
.operands
[2].present
9478 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
9479 _("can only store two consecutive registers"));
9480 /* If op 2 were present and equal to PC, this function wouldn't
9481 have been called in the first place. */
9482 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
9484 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9485 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
9486 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
9489 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9490 inst
.instruction
|= inst
.operands
[1].reg
;
9491 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9498 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9499 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9507 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9508 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9513 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9514 extends it to 32-bits, and adds the result to a value in another
9515 register. You can specify a rotation by 0, 8, 16, or 24 bits
9516 before extracting the 16-bit value.
9517 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9518 Condition defaults to COND_ALWAYS.
9519 Error if any register uses R15. */
9524 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9525 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9526 inst
.instruction
|= inst
.operands
[2].reg
;
9527 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
9532 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9533 Condition defaults to COND_ALWAYS.
9534 Error if any register uses R15. */
9539 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9540 inst
.instruction
|= inst
.operands
[1].reg
;
9541 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
9544 /* VFP instructions. In a logical order: SP variant first, monad
9545 before dyad, arithmetic then move then load/store. */
9548 do_vfp_sp_monadic (void)
9550 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9551 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9555 do_vfp_sp_dyadic (void)
9557 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9558 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9559 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9563 do_vfp_sp_compare_z (void)
9565 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9569 do_vfp_dp_sp_cvt (void)
9571 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9572 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9576 do_vfp_sp_dp_cvt (void)
9578 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9579 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9583 do_vfp_reg_from_sp (void)
9585 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9586 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9590 do_vfp_reg2_from_sp2 (void)
9592 constraint (inst
.operands
[2].imm
!= 2,
9593 _("only two consecutive VFP SP registers allowed here"));
9594 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9595 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9596 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9600 do_vfp_sp_from_reg (void)
9602 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
9603 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9607 do_vfp_sp2_from_reg2 (void)
9609 constraint (inst
.operands
[0].imm
!= 2,
9610 _("only two consecutive VFP SP registers allowed here"));
9611 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
9612 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9613 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9617 do_vfp_sp_ldst (void)
9619 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9620 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9624 do_vfp_dp_ldst (void)
9626 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9627 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9632 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
9634 if (inst
.operands
[0].writeback
)
9635 inst
.instruction
|= WRITE_BACK
;
9637 constraint (ldstm_type
!= VFP_LDSTMIA
,
9638 _("this addressing mode requires base-register writeback"));
9639 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9640 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
9641 inst
.instruction
|= inst
.operands
[1].imm
;
9645 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
9649 if (inst
.operands
[0].writeback
)
9650 inst
.instruction
|= WRITE_BACK
;
9652 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
9653 _("this addressing mode requires base-register writeback"));
9655 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9656 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9658 count
= inst
.operands
[1].imm
<< 1;
9659 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
9662 inst
.instruction
|= count
;
9666 do_vfp_sp_ldstmia (void)
9668 vfp_sp_ldstm (VFP_LDSTMIA
);
9672 do_vfp_sp_ldstmdb (void)
9674 vfp_sp_ldstm (VFP_LDSTMDB
);
9678 do_vfp_dp_ldstmia (void)
9680 vfp_dp_ldstm (VFP_LDSTMIA
);
9684 do_vfp_dp_ldstmdb (void)
9686 vfp_dp_ldstm (VFP_LDSTMDB
);
9690 do_vfp_xp_ldstmia (void)
9692 vfp_dp_ldstm (VFP_LDSTMIAX
);
9696 do_vfp_xp_ldstmdb (void)
9698 vfp_dp_ldstm (VFP_LDSTMDBX
);
9702 do_vfp_dp_rd_rm (void)
9704 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9705 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9709 do_vfp_dp_rn_rd (void)
9711 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
9712 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9716 do_vfp_dp_rd_rn (void)
9718 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9719 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9723 do_vfp_dp_rd_rn_rm (void)
9725 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9726 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9727 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
9733 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9737 do_vfp_dp_rm_rd_rn (void)
9739 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
9740 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9741 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
9744 /* VFPv3 instructions. */
9746 do_vfp_sp_const (void)
9748 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9749 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9750 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9754 do_vfp_dp_const (void)
9756 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9757 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9758 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9762 vfp_conv (int srcsize
)
9764 int immbits
= srcsize
- inst
.operands
[1].imm
;
9766 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
9768 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9769 i.e. immbits must be in range 0 - 16. */
9770 inst
.error
= _("immediate value out of range, expected range [0, 16]");
9773 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
9775 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9776 i.e. immbits must be in range 0 - 31. */
9777 inst
.error
= _("immediate value out of range, expected range [1, 32]");
9781 inst
.instruction
|= (immbits
& 1) << 5;
9782 inst
.instruction
|= (immbits
>> 1);
9786 do_vfp_sp_conv_16 (void)
9788 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9793 do_vfp_dp_conv_16 (void)
9795 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9800 do_vfp_sp_conv_32 (void)
9802 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9807 do_vfp_dp_conv_32 (void)
9809 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9813 /* FPA instructions. Also in a logical order. */
9818 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9819 inst
.instruction
|= inst
.operands
[1].reg
;
9823 do_fpa_ldmstm (void)
9825 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9826 switch (inst
.operands
[1].imm
)
9828 case 1: inst
.instruction
|= CP_T_X
; break;
9829 case 2: inst
.instruction
|= CP_T_Y
; break;
9830 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
9835 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
9837 /* The instruction specified "ea" or "fd", so we can only accept
9838 [Rn]{!}. The instruction does not really support stacking or
9839 unstacking, so we have to emulate these by setting appropriate
9840 bits and offsets. */
9841 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9842 || inst
.reloc
.exp
.X_add_number
!= 0,
9843 _("this instruction does not support indexing"));
9845 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
9846 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
9848 if (!(inst
.instruction
& INDEX_UP
))
9849 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
9851 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
9853 inst
.operands
[2].preind
= 0;
9854 inst
.operands
[2].postind
= 1;
9858 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9861 /* iWMMXt instructions: strictly in alphabetical order. */
9864 do_iwmmxt_tandorc (void)
9866 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
9870 do_iwmmxt_textrc (void)
9872 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9873 inst
.instruction
|= inst
.operands
[1].imm
;
9877 do_iwmmxt_textrm (void)
9879 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9880 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9881 inst
.instruction
|= inst
.operands
[2].imm
;
9885 do_iwmmxt_tinsr (void)
9887 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9888 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9889 inst
.instruction
|= inst
.operands
[2].imm
;
9893 do_iwmmxt_tmia (void)
9895 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
9896 inst
.instruction
|= inst
.operands
[1].reg
;
9897 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
9901 do_iwmmxt_waligni (void)
9903 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9904 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9905 inst
.instruction
|= inst
.operands
[2].reg
;
9906 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
9910 do_iwmmxt_wmerge (void)
9912 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9913 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9914 inst
.instruction
|= inst
.operands
[2].reg
;
9915 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
9919 do_iwmmxt_wmov (void)
9921 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9922 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9923 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9924 inst
.instruction
|= inst
.operands
[1].reg
;
9928 do_iwmmxt_wldstbh (void)
9931 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9933 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
9935 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
9936 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
9940 do_iwmmxt_wldstw (void)
9942 /* RIWR_RIWC clears .isreg for a control register. */
9943 if (!inst
.operands
[0].isreg
)
9945 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
9946 inst
.instruction
|= 0xf0000000;
9949 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9950 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
9954 do_iwmmxt_wldstd (void)
9956 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9957 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
9958 && inst
.operands
[1].immisreg
)
9960 inst
.instruction
&= ~0x1a000ff;
9961 inst
.instruction
|= (0xfU
<< 28);
9962 if (inst
.operands
[1].preind
)
9963 inst
.instruction
|= PRE_INDEX
;
9964 if (!inst
.operands
[1].negative
)
9965 inst
.instruction
|= INDEX_UP
;
9966 if (inst
.operands
[1].writeback
)
9967 inst
.instruction
|= WRITE_BACK
;
9968 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9969 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
9970 inst
.instruction
|= inst
.operands
[1].imm
;
9973 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
9977 do_iwmmxt_wshufh (void)
9979 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9980 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9981 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
9982 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
9986 do_iwmmxt_wzero (void)
9988 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9989 inst
.instruction
|= inst
.operands
[0].reg
;
9990 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9991 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9995 do_iwmmxt_wrwrwr_or_imm5 (void)
9997 if (inst
.operands
[2].isreg
)
10000 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10001 _("immediate operand requires iWMMXt2"));
10003 if (inst
.operands
[2].imm
== 0)
10005 switch ((inst
.instruction
>> 20) & 0xf)
10011 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10012 inst
.operands
[2].imm
= 16;
10013 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10019 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10020 inst
.operands
[2].imm
= 32;
10021 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10028 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10030 wrn
= (inst
.instruction
>> 16) & 0xf;
10031 inst
.instruction
&= 0xff0fff0f;
10032 inst
.instruction
|= wrn
;
10033 /* Bail out here; the instruction is now assembled. */
10038 /* Map 32 -> 0, etc. */
10039 inst
.operands
[2].imm
&= 0x1f;
10040 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10044 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10045 operations first, then control, shift, and load/store. */
10047 /* Insns like "foo X,Y,Z". */
10050 do_mav_triple (void)
10052 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10053 inst
.instruction
|= inst
.operands
[1].reg
;
10054 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10057 /* Insns like "foo W,X,Y,Z".
10058 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10063 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10064 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10065 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10066 inst
.instruction
|= inst
.operands
[3].reg
;
10069 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10071 do_mav_dspsc (void)
10073 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10076 /* Maverick shift immediate instructions.
10077 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10078 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10081 do_mav_shift (void)
10083 int imm
= inst
.operands
[2].imm
;
10085 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10086 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10088 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10089 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10090 Bit 4 should be 0. */
10091 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10093 inst
.instruction
|= imm
;
10096 /* XScale instructions. Also sorted arithmetic before move. */
10098 /* Xscale multiply-accumulate (argument parse)
10101 MIAxycc acc0,Rm,Rs. */
10106 inst
.instruction
|= inst
.operands
[1].reg
;
10107 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10110 /* Xscale move-accumulator-register (argument parse)
10112 MARcc acc0,RdLo,RdHi. */
10117 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10118 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10121 /* Xscale move-register-accumulator (argument parse)
10123 MRAcc RdLo,RdHi,acc0. */
10128 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10129 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10130 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10133 /* Encoding functions relevant only to Thumb. */
10135 /* inst.operands[i] is a shifted-register operand; encode
10136 it into inst.instruction in the format used by Thumb32. */
10139 encode_thumb32_shifted_operand (int i
)
10141 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10142 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10144 constraint (inst
.operands
[i
].immisreg
,
10145 _("shift by register not allowed in thumb mode"));
10146 inst
.instruction
|= inst
.operands
[i
].reg
;
10147 if (shift
== SHIFT_RRX
)
10148 inst
.instruction
|= SHIFT_ROR
<< 4;
10151 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10152 _("expression too complex"));
10154 constraint (value
> 32
10155 || (value
== 32 && (shift
== SHIFT_LSL
10156 || shift
== SHIFT_ROR
)),
10157 _("shift expression is too large"));
10161 else if (value
== 32)
10164 inst
.instruction
|= shift
<< 4;
10165 inst
.instruction
|= (value
& 0x1c) << 10;
10166 inst
.instruction
|= (value
& 0x03) << 6;
10171 /* inst.operands[i] was set up by parse_address. Encode it into a
10172 Thumb32 format load or store instruction. Reject forms that cannot
10173 be used with such instructions. If is_t is true, reject forms that
10174 cannot be used with a T instruction; if is_d is true, reject forms
10175 that cannot be used with a D instruction. If it is a store insn,
10176 reject PC in Rn. */
10179 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10181 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10183 constraint (!inst
.operands
[i
].isreg
,
10184 _("Instruction does not support =N addresses"));
10186 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10187 if (inst
.operands
[i
].immisreg
)
10189 constraint (is_pc
, BAD_PC_ADDRESSING
);
10190 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10191 constraint (inst
.operands
[i
].negative
,
10192 _("Thumb does not support negative register indexing"));
10193 constraint (inst
.operands
[i
].postind
,
10194 _("Thumb does not support register post-indexing"));
10195 constraint (inst
.operands
[i
].writeback
,
10196 _("Thumb does not support register indexing with writeback"));
10197 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10198 _("Thumb supports only LSL in shifted register indexing"));
10200 inst
.instruction
|= inst
.operands
[i
].imm
;
10201 if (inst
.operands
[i
].shifted
)
10203 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10204 _("expression too complex"));
10205 constraint (inst
.reloc
.exp
.X_add_number
< 0
10206 || inst
.reloc
.exp
.X_add_number
> 3,
10207 _("shift out of range"));
10208 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
10210 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10212 else if (inst
.operands
[i
].preind
)
10214 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10215 constraint (is_t
&& inst
.operands
[i
].writeback
,
10216 _("cannot use writeback with this instruction"));
10217 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10218 BAD_PC_ADDRESSING
);
10222 inst
.instruction
|= 0x01000000;
10223 if (inst
.operands
[i
].writeback
)
10224 inst
.instruction
|= 0x00200000;
10228 inst
.instruction
|= 0x00000c00;
10229 if (inst
.operands
[i
].writeback
)
10230 inst
.instruction
|= 0x00000100;
10232 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10234 else if (inst
.operands
[i
].postind
)
10236 gas_assert (inst
.operands
[i
].writeback
);
10237 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10238 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10241 inst
.instruction
|= 0x00200000;
10243 inst
.instruction
|= 0x00000900;
10244 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10246 else /* unindexed - only for coprocessor */
10247 inst
.error
= _("instruction does not accept unindexed addressing");
10250 /* Table of Thumb instructions which exist in both 16- and 32-bit
10251 encodings (the latter only in post-V6T2 cores). The index is the
10252 value used in the insns table below. When there is more than one
10253 possible 16-bit encoding for the instruction, this table always
10255 Also contains several pseudo-instructions used during relaxation. */
10256 #define T16_32_TAB \
10257 X(_adc, 4140, eb400000), \
10258 X(_adcs, 4140, eb500000), \
10259 X(_add, 1c00, eb000000), \
10260 X(_adds, 1c00, eb100000), \
10261 X(_addi, 0000, f1000000), \
10262 X(_addis, 0000, f1100000), \
10263 X(_add_pc,000f, f20f0000), \
10264 X(_add_sp,000d, f10d0000), \
10265 X(_adr, 000f, f20f0000), \
10266 X(_and, 4000, ea000000), \
10267 X(_ands, 4000, ea100000), \
10268 X(_asr, 1000, fa40f000), \
10269 X(_asrs, 1000, fa50f000), \
10270 X(_b, e000, f000b000), \
10271 X(_bcond, d000, f0008000), \
10272 X(_bic, 4380, ea200000), \
10273 X(_bics, 4380, ea300000), \
10274 X(_cmn, 42c0, eb100f00), \
10275 X(_cmp, 2800, ebb00f00), \
10276 X(_cpsie, b660, f3af8400), \
10277 X(_cpsid, b670, f3af8600), \
10278 X(_cpy, 4600, ea4f0000), \
10279 X(_dec_sp,80dd, f1ad0d00), \
10280 X(_eor, 4040, ea800000), \
10281 X(_eors, 4040, ea900000), \
10282 X(_inc_sp,00dd, f10d0d00), \
10283 X(_ldmia, c800, e8900000), \
10284 X(_ldr, 6800, f8500000), \
10285 X(_ldrb, 7800, f8100000), \
10286 X(_ldrh, 8800, f8300000), \
10287 X(_ldrsb, 5600, f9100000), \
10288 X(_ldrsh, 5e00, f9300000), \
10289 X(_ldr_pc,4800, f85f0000), \
10290 X(_ldr_pc2,4800, f85f0000), \
10291 X(_ldr_sp,9800, f85d0000), \
10292 X(_lsl, 0000, fa00f000), \
10293 X(_lsls, 0000, fa10f000), \
10294 X(_lsr, 0800, fa20f000), \
10295 X(_lsrs, 0800, fa30f000), \
10296 X(_mov, 2000, ea4f0000), \
10297 X(_movs, 2000, ea5f0000), \
10298 X(_mul, 4340, fb00f000), \
10299 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10300 X(_mvn, 43c0, ea6f0000), \
10301 X(_mvns, 43c0, ea7f0000), \
10302 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10303 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10304 X(_orr, 4300, ea400000), \
10305 X(_orrs, 4300, ea500000), \
10306 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10307 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10308 X(_rev, ba00, fa90f080), \
10309 X(_rev16, ba40, fa90f090), \
10310 X(_revsh, bac0, fa90f0b0), \
10311 X(_ror, 41c0, fa60f000), \
10312 X(_rors, 41c0, fa70f000), \
10313 X(_sbc, 4180, eb600000), \
10314 X(_sbcs, 4180, eb700000), \
10315 X(_stmia, c000, e8800000), \
10316 X(_str, 6000, f8400000), \
10317 X(_strb, 7000, f8000000), \
10318 X(_strh, 8000, f8200000), \
10319 X(_str_sp,9000, f84d0000), \
10320 X(_sub, 1e00, eba00000), \
10321 X(_subs, 1e00, ebb00000), \
10322 X(_subi, 8000, f1a00000), \
10323 X(_subis, 8000, f1b00000), \
10324 X(_sxtb, b240, fa4ff080), \
10325 X(_sxth, b200, fa0ff080), \
10326 X(_tst, 4200, ea100f00), \
10327 X(_uxtb, b2c0, fa5ff080), \
10328 X(_uxth, b280, fa1ff080), \
10329 X(_nop, bf00, f3af8000), \
10330 X(_yield, bf10, f3af8001), \
10331 X(_wfe, bf20, f3af8002), \
10332 X(_wfi, bf30, f3af8003), \
10333 X(_sev, bf40, f3af8004), \
10334 X(_sevl, bf50, f3af8005), \
10335 X(_udf, de00, f7f0a000)
10337 /* To catch errors in encoding functions, the codes are all offset by
10338 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10339 as 16-bit instructions. */
10340 #define X(a,b,c) T_MNEM##a
10341 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10344 #define X(a,b,c) 0x##b
10345 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10346 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10349 #define X(a,b,c) 0x##c
10350 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10351 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10352 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10356 /* Thumb instruction encoders, in alphabetical order. */
10358 /* ADDW or SUBW. */
10361 do_t_add_sub_w (void)
10365 Rd
= inst
.operands
[0].reg
;
10366 Rn
= inst
.operands
[1].reg
;
10368 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10369 is the SP-{plus,minus}-immediate form of the instruction. */
10371 constraint (Rd
== REG_PC
, BAD_PC
);
10373 reject_bad_reg (Rd
);
10375 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10376 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10379 /* Parse an add or subtract instruction. We get here with inst.instruction
10380 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10383 do_t_add_sub (void)
10387 Rd
= inst
.operands
[0].reg
;
10388 Rs
= (inst
.operands
[1].present
10389 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10390 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10393 set_it_insn_type_last ();
10395 if (unified_syntax
)
10398 bfd_boolean narrow
;
10401 flags
= (inst
.instruction
== T_MNEM_adds
10402 || inst
.instruction
== T_MNEM_subs
);
10404 narrow
= !in_it_block ();
10406 narrow
= in_it_block ();
10407 if (!inst
.operands
[2].isreg
)
10411 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10413 add
= (inst
.instruction
== T_MNEM_add
10414 || inst
.instruction
== T_MNEM_adds
);
10416 if (inst
.size_req
!= 4)
10418 /* Attempt to use a narrow opcode, with relaxation if
10420 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10421 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10422 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10423 opcode
= T_MNEM_add_sp
;
10424 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10425 opcode
= T_MNEM_add_pc
;
10426 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
10429 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
10431 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
10435 inst
.instruction
= THUMB_OP16(opcode
);
10436 inst
.instruction
|= (Rd
<< 4) | Rs
;
10437 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10438 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
10439 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10440 if (inst
.size_req
!= 2)
10441 inst
.relax
= opcode
;
10444 constraint (inst
.size_req
== 2, BAD_HIREG
);
10446 if (inst
.size_req
== 4
10447 || (inst
.size_req
!= 2 && !opcode
))
10451 constraint (add
, BAD_PC
);
10452 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
10453 _("only SUBS PC, LR, #const allowed"));
10454 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10455 _("expression too complex"));
10456 constraint (inst
.reloc
.exp
.X_add_number
< 0
10457 || inst
.reloc
.exp
.X_add_number
> 0xff,
10458 _("immediate value out of range"));
10459 inst
.instruction
= T2_SUBS_PC_LR
10460 | inst
.reloc
.exp
.X_add_number
;
10461 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10464 else if (Rs
== REG_PC
)
10466 /* Always use addw/subw. */
10467 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
10468 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10472 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10473 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
10476 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10478 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
10480 inst
.instruction
|= Rd
<< 8;
10481 inst
.instruction
|= Rs
<< 16;
10486 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10487 unsigned int shift
= inst
.operands
[2].shift_kind
;
10489 Rn
= inst
.operands
[2].reg
;
10490 /* See if we can do this with a 16-bit instruction. */
10491 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
10493 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10498 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
10499 || inst
.instruction
== T_MNEM_add
)
10501 : T_OPCODE_SUB_R3
);
10502 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10506 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
10508 /* Thumb-1 cores (except v6-M) require at least one high
10509 register in a narrow non flag setting add. */
10510 if (Rd
> 7 || Rn
> 7
10511 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
10512 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
10519 inst
.instruction
= T_OPCODE_ADD_HI
;
10520 inst
.instruction
|= (Rd
& 8) << 4;
10521 inst
.instruction
|= (Rd
& 7);
10522 inst
.instruction
|= Rn
<< 3;
10528 constraint (Rd
== REG_PC
, BAD_PC
);
10529 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10530 constraint (Rs
== REG_PC
, BAD_PC
);
10531 reject_bad_reg (Rn
);
10533 /* If we get here, it can't be done in 16 bits. */
10534 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
10535 _("shift must be constant"));
10536 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10537 inst
.instruction
|= Rd
<< 8;
10538 inst
.instruction
|= Rs
<< 16;
10539 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
10540 _("shift value over 3 not allowed in thumb mode"));
10541 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
10542 _("only LSL shift allowed in thumb mode"));
10543 encode_thumb32_shifted_operand (2);
10548 constraint (inst
.instruction
== T_MNEM_adds
10549 || inst
.instruction
== T_MNEM_subs
,
10552 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
10554 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
10555 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
10558 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10559 ? 0x0000 : 0x8000);
10560 inst
.instruction
|= (Rd
<< 4) | Rs
;
10561 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10565 Rn
= inst
.operands
[2].reg
;
10566 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
10568 /* We now have Rd, Rs, and Rn set to registers. */
10569 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10571 /* Can't do this for SUB. */
10572 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
10573 inst
.instruction
= T_OPCODE_ADD_HI
;
10574 inst
.instruction
|= (Rd
& 8) << 4;
10575 inst
.instruction
|= (Rd
& 7);
10577 inst
.instruction
|= Rn
<< 3;
10579 inst
.instruction
|= Rs
<< 3;
10581 constraint (1, _("dest must overlap one source register"));
10585 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10586 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
10587 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10597 Rd
= inst
.operands
[0].reg
;
10598 reject_bad_reg (Rd
);
10600 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
10602 /* Defer to section relaxation. */
10603 inst
.relax
= inst
.instruction
;
10604 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10605 inst
.instruction
|= Rd
<< 4;
10607 else if (unified_syntax
&& inst
.size_req
!= 2)
10609 /* Generate a 32-bit opcode. */
10610 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10611 inst
.instruction
|= Rd
<< 8;
10612 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
10613 inst
.reloc
.pc_rel
= 1;
10617 /* Generate a 16-bit opcode. */
10618 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10619 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10620 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
10621 inst
.reloc
.pc_rel
= 1;
10623 inst
.instruction
|= Rd
<< 4;
10627 /* Arithmetic instructions for which there is just one 16-bit
10628 instruction encoding, and it allows only two low registers.
10629 For maximal compatibility with ARM syntax, we allow three register
10630 operands even when Thumb-32 instructions are not available, as long
10631 as the first two are identical. For instance, both "sbc r0,r1" and
10632 "sbc r0,r0,r1" are allowed. */
10638 Rd
= inst
.operands
[0].reg
;
10639 Rs
= (inst
.operands
[1].present
10640 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10641 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10642 Rn
= inst
.operands
[2].reg
;
10644 reject_bad_reg (Rd
);
10645 reject_bad_reg (Rs
);
10646 if (inst
.operands
[2].isreg
)
10647 reject_bad_reg (Rn
);
10649 if (unified_syntax
)
10651 if (!inst
.operands
[2].isreg
)
10653 /* For an immediate, we always generate a 32-bit opcode;
10654 section relaxation will shrink it later if possible. */
10655 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10656 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10657 inst
.instruction
|= Rd
<< 8;
10658 inst
.instruction
|= Rs
<< 16;
10659 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10663 bfd_boolean narrow
;
10665 /* See if we can do this with a 16-bit instruction. */
10666 if (THUMB_SETS_FLAGS (inst
.instruction
))
10667 narrow
= !in_it_block ();
10669 narrow
= in_it_block ();
10671 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10673 if (inst
.operands
[2].shifted
)
10675 if (inst
.size_req
== 4)
10681 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10682 inst
.instruction
|= Rd
;
10683 inst
.instruction
|= Rn
<< 3;
10687 /* If we get here, it can't be done in 16 bits. */
10688 constraint (inst
.operands
[2].shifted
10689 && inst
.operands
[2].immisreg
,
10690 _("shift must be constant"));
10691 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10692 inst
.instruction
|= Rd
<< 8;
10693 inst
.instruction
|= Rs
<< 16;
10694 encode_thumb32_shifted_operand (2);
10699 /* On its face this is a lie - the instruction does set the
10700 flags. However, the only supported mnemonic in this mode
10701 says it doesn't. */
10702 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10704 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10705 _("unshifted register required"));
10706 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10707 constraint (Rd
!= Rs
,
10708 _("dest and source1 must be the same register"));
10710 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10711 inst
.instruction
|= Rd
;
10712 inst
.instruction
|= Rn
<< 3;
10716 /* Similarly, but for instructions where the arithmetic operation is
10717 commutative, so we can allow either of them to be different from
10718 the destination operand in a 16-bit instruction. For instance, all
10719 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10726 Rd
= inst
.operands
[0].reg
;
10727 Rs
= (inst
.operands
[1].present
10728 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10729 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10730 Rn
= inst
.operands
[2].reg
;
10732 reject_bad_reg (Rd
);
10733 reject_bad_reg (Rs
);
10734 if (inst
.operands
[2].isreg
)
10735 reject_bad_reg (Rn
);
10737 if (unified_syntax
)
10739 if (!inst
.operands
[2].isreg
)
10741 /* For an immediate, we always generate a 32-bit opcode;
10742 section relaxation will shrink it later if possible. */
10743 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10744 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10745 inst
.instruction
|= Rd
<< 8;
10746 inst
.instruction
|= Rs
<< 16;
10747 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10751 bfd_boolean narrow
;
10753 /* See if we can do this with a 16-bit instruction. */
10754 if (THUMB_SETS_FLAGS (inst
.instruction
))
10755 narrow
= !in_it_block ();
10757 narrow
= in_it_block ();
10759 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10761 if (inst
.operands
[2].shifted
)
10763 if (inst
.size_req
== 4)
10770 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10771 inst
.instruction
|= Rd
;
10772 inst
.instruction
|= Rn
<< 3;
10777 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10778 inst
.instruction
|= Rd
;
10779 inst
.instruction
|= Rs
<< 3;
10784 /* If we get here, it can't be done in 16 bits. */
10785 constraint (inst
.operands
[2].shifted
10786 && inst
.operands
[2].immisreg
,
10787 _("shift must be constant"));
10788 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10789 inst
.instruction
|= Rd
<< 8;
10790 inst
.instruction
|= Rs
<< 16;
10791 encode_thumb32_shifted_operand (2);
10796 /* On its face this is a lie - the instruction does set the
10797 flags. However, the only supported mnemonic in this mode
10798 says it doesn't. */
10799 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10801 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10802 _("unshifted register required"));
10803 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10805 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10806 inst
.instruction
|= Rd
;
10809 inst
.instruction
|= Rn
<< 3;
10811 inst
.instruction
|= Rs
<< 3;
10813 constraint (1, _("dest must overlap one source register"));
10821 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
10822 constraint (msb
> 32, _("bit-field extends past end of register"));
10823 /* The instruction encoding stores the LSB and MSB,
10824 not the LSB and width. */
10825 Rd
= inst
.operands
[0].reg
;
10826 reject_bad_reg (Rd
);
10827 inst
.instruction
|= Rd
<< 8;
10828 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
10829 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
10830 inst
.instruction
|= msb
- 1;
10839 Rd
= inst
.operands
[0].reg
;
10840 reject_bad_reg (Rd
);
10842 /* #0 in second position is alternative syntax for bfc, which is
10843 the same instruction but with REG_PC in the Rm field. */
10844 if (!inst
.operands
[1].isreg
)
10848 Rn
= inst
.operands
[1].reg
;
10849 reject_bad_reg (Rn
);
10852 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
10853 constraint (msb
> 32, _("bit-field extends past end of register"));
10854 /* The instruction encoding stores the LSB and MSB,
10855 not the LSB and width. */
10856 inst
.instruction
|= Rd
<< 8;
10857 inst
.instruction
|= Rn
<< 16;
10858 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10859 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10860 inst
.instruction
|= msb
- 1;
10868 Rd
= inst
.operands
[0].reg
;
10869 Rn
= inst
.operands
[1].reg
;
10871 reject_bad_reg (Rd
);
10872 reject_bad_reg (Rn
);
10874 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
10875 _("bit-field extends past end of register"));
10876 inst
.instruction
|= Rd
<< 8;
10877 inst
.instruction
|= Rn
<< 16;
10878 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10879 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10880 inst
.instruction
|= inst
.operands
[3].imm
- 1;
10883 /* ARM V5 Thumb BLX (argument parse)
10884 BLX <target_addr> which is BLX(1)
10885 BLX <Rm> which is BLX(2)
10886 Unfortunately, there are two different opcodes for this mnemonic.
10887 So, the insns[].value is not used, and the code here zaps values
10888 into inst.instruction.
10890 ??? How to take advantage of the additional two bits of displacement
10891 available in Thumb32 mode? Need new relocation? */
10896 set_it_insn_type_last ();
10898 if (inst
.operands
[0].isreg
)
10900 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
10901 /* We have a register, so this is BLX(2). */
10902 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
10906 /* No register. This must be BLX(1). */
10907 inst
.instruction
= 0xf000e800;
10908 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
10920 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
10922 if (in_it_block ())
10924 /* Conditional branches inside IT blocks are encoded as unconditional
10926 cond
= COND_ALWAYS
;
10931 if (cond
!= COND_ALWAYS
)
10932 opcode
= T_MNEM_bcond
;
10934 opcode
= inst
.instruction
;
10937 && (inst
.size_req
== 4
10938 || (inst
.size_req
!= 2
10939 && (inst
.operands
[0].hasreloc
10940 || inst
.reloc
.exp
.X_op
== O_constant
))))
10942 inst
.instruction
= THUMB_OP32(opcode
);
10943 if (cond
== COND_ALWAYS
)
10944 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
10947 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
10948 _("selected architecture does not support "
10949 "wide conditional branch instruction"));
10951 gas_assert (cond
!= 0xF);
10952 inst
.instruction
|= cond
<< 22;
10953 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
10958 inst
.instruction
= THUMB_OP16(opcode
);
10959 if (cond
== COND_ALWAYS
)
10960 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
10963 inst
.instruction
|= cond
<< 8;
10964 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
10966 /* Allow section relaxation. */
10967 if (unified_syntax
&& inst
.size_req
!= 2)
10968 inst
.relax
= opcode
;
10970 inst
.reloc
.type
= reloc
;
10971 inst
.reloc
.pc_rel
= 1;
10974 /* Actually do the work for Thumb state bkpt and hlt. The only difference
10975 between the two is the maximum immediate allowed - which is passed in
10978 do_t_bkpt_hlt1 (int range
)
10980 constraint (inst
.cond
!= COND_ALWAYS
,
10981 _("instruction is always unconditional"));
10982 if (inst
.operands
[0].present
)
10984 constraint (inst
.operands
[0].imm
> range
,
10985 _("immediate value out of range"));
10986 inst
.instruction
|= inst
.operands
[0].imm
;
10989 set_it_insn_type (NEUTRAL_IT_INSN
);
10995 do_t_bkpt_hlt1 (63);
11001 do_t_bkpt_hlt1 (255);
11005 do_t_branch23 (void)
11007 set_it_insn_type_last ();
11008 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11010 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11011 this file. We used to simply ignore the PLT reloc type here --
11012 the branch encoding is now needed to deal with TLSCALL relocs.
11013 So if we see a PLT reloc now, put it back to how it used to be to
11014 keep the preexisting behaviour. */
11015 if (inst
.reloc
.type
== BFD_RELOC_ARM_PLT32
)
11016 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11018 #if defined(OBJ_COFF)
11019 /* If the destination of the branch is a defined symbol which does not have
11020 the THUMB_FUNC attribute, then we must be calling a function which has
11021 the (interfacearm) attribute. We look for the Thumb entry point to that
11022 function and change the branch to refer to that function instead. */
11023 if ( inst
.reloc
.exp
.X_op
== O_symbol
11024 && inst
.reloc
.exp
.X_add_symbol
!= NULL
11025 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
11026 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
11027 inst
.reloc
.exp
.X_add_symbol
=
11028 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
11035 set_it_insn_type_last ();
11036 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11037 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11038 should cause the alignment to be checked once it is known. This is
11039 because BX PC only works if the instruction is word aligned. */
11047 set_it_insn_type_last ();
11048 Rm
= inst
.operands
[0].reg
;
11049 reject_bad_reg (Rm
);
11050 inst
.instruction
|= Rm
<< 16;
11059 Rd
= inst
.operands
[0].reg
;
11060 Rm
= inst
.operands
[1].reg
;
11062 reject_bad_reg (Rd
);
11063 reject_bad_reg (Rm
);
11065 inst
.instruction
|= Rd
<< 8;
11066 inst
.instruction
|= Rm
<< 16;
11067 inst
.instruction
|= Rm
;
11073 set_it_insn_type (OUTSIDE_IT_INSN
);
11074 inst
.instruction
|= inst
.operands
[0].imm
;
11080 set_it_insn_type (OUTSIDE_IT_INSN
);
11082 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11083 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11085 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11086 inst
.instruction
= 0xf3af8000;
11087 inst
.instruction
|= imod
<< 9;
11088 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11089 if (inst
.operands
[1].present
)
11090 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11094 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11095 && (inst
.operands
[0].imm
& 4),
11096 _("selected processor does not support 'A' form "
11097 "of this instruction"));
11098 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11099 _("Thumb does not support the 2-argument "
11100 "form of this instruction"));
11101 inst
.instruction
|= inst
.operands
[0].imm
;
11105 /* THUMB CPY instruction (argument parse). */
11110 if (inst
.size_req
== 4)
11112 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11113 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11114 inst
.instruction
|= inst
.operands
[1].reg
;
11118 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11119 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11120 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11127 set_it_insn_type (OUTSIDE_IT_INSN
);
11128 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11129 inst
.instruction
|= inst
.operands
[0].reg
;
11130 inst
.reloc
.pc_rel
= 1;
11131 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11137 inst
.instruction
|= inst
.operands
[0].imm
;
11143 unsigned Rd
, Rn
, Rm
;
11145 Rd
= inst
.operands
[0].reg
;
11146 Rn
= (inst
.operands
[1].present
11147 ? inst
.operands
[1].reg
: Rd
);
11148 Rm
= inst
.operands
[2].reg
;
11150 reject_bad_reg (Rd
);
11151 reject_bad_reg (Rn
);
11152 reject_bad_reg (Rm
);
11154 inst
.instruction
|= Rd
<< 8;
11155 inst
.instruction
|= Rn
<< 16;
11156 inst
.instruction
|= Rm
;
11162 if (unified_syntax
&& inst
.size_req
== 4)
11163 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11165 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11171 unsigned int cond
= inst
.operands
[0].imm
;
11173 set_it_insn_type (IT_INSN
);
11174 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
11176 now_it
.warn_deprecated
= FALSE
;
11178 /* If the condition is a negative condition, invert the mask. */
11179 if ((cond
& 0x1) == 0x0)
11181 unsigned int mask
= inst
.instruction
& 0x000f;
11183 if ((mask
& 0x7) == 0)
11185 /* No conversion needed. */
11186 now_it
.block_length
= 1;
11188 else if ((mask
& 0x3) == 0)
11191 now_it
.block_length
= 2;
11193 else if ((mask
& 0x1) == 0)
11196 now_it
.block_length
= 3;
11201 now_it
.block_length
= 4;
11204 inst
.instruction
&= 0xfff0;
11205 inst
.instruction
|= mask
;
11208 inst
.instruction
|= cond
<< 4;
11211 /* Helper function used for both push/pop and ldm/stm. */
11213 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
11217 load
= (inst
.instruction
& (1 << 20)) != 0;
11219 if (mask
& (1 << 13))
11220 inst
.error
= _("SP not allowed in register list");
11222 if ((mask
& (1 << base
)) != 0
11224 inst
.error
= _("having the base register in the register list when "
11225 "using write back is UNPREDICTABLE");
11229 if (mask
& (1 << 15))
11231 if (mask
& (1 << 14))
11232 inst
.error
= _("LR and PC should not both be in register list");
11234 set_it_insn_type_last ();
11239 if (mask
& (1 << 15))
11240 inst
.error
= _("PC not allowed in register list");
11243 if ((mask
& (mask
- 1)) == 0)
11245 /* Single register transfers implemented as str/ldr. */
11248 if (inst
.instruction
& (1 << 23))
11249 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11251 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11255 if (inst
.instruction
& (1 << 23))
11256 inst
.instruction
= 0x00800000; /* ia -> [base] */
11258 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11261 inst
.instruction
|= 0xf8400000;
11263 inst
.instruction
|= 0x00100000;
11265 mask
= ffs (mask
) - 1;
11268 else if (writeback
)
11269 inst
.instruction
|= WRITE_BACK
;
11271 inst
.instruction
|= mask
;
11272 inst
.instruction
|= base
<< 16;
11278 /* This really doesn't seem worth it. */
11279 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
11280 _("expression too complex"));
11281 constraint (inst
.operands
[1].writeback
,
11282 _("Thumb load/store multiple does not support {reglist}^"));
11284 if (unified_syntax
)
11286 bfd_boolean narrow
;
11290 /* See if we can use a 16-bit instruction. */
11291 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11292 && inst
.size_req
!= 4
11293 && !(inst
.operands
[1].imm
& ~0xff))
11295 mask
= 1 << inst
.operands
[0].reg
;
11297 if (inst
.operands
[0].reg
<= 7)
11299 if (inst
.instruction
== T_MNEM_stmia
11300 ? inst
.operands
[0].writeback
11301 : (inst
.operands
[0].writeback
11302 == !(inst
.operands
[1].imm
& mask
)))
11304 if (inst
.instruction
== T_MNEM_stmia
11305 && (inst
.operands
[1].imm
& mask
)
11306 && (inst
.operands
[1].imm
& (mask
- 1)))
11307 as_warn (_("value stored for r%d is UNKNOWN"),
11308 inst
.operands
[0].reg
);
11310 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11311 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11312 inst
.instruction
|= inst
.operands
[1].imm
;
11315 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11317 /* This means 1 register in reg list one of 3 situations:
11318 1. Instruction is stmia, but without writeback.
11319 2. lmdia without writeback, but with Rn not in
11321 3. ldmia with writeback, but with Rn in reglist.
11322 Case 3 is UNPREDICTABLE behaviour, so we handle
11323 case 1 and 2 which can be converted into a 16-bit
11324 str or ldr. The SP cases are handled below. */
11325 unsigned long opcode
;
11326 /* First, record an error for Case 3. */
11327 if (inst
.operands
[1].imm
& mask
11328 && inst
.operands
[0].writeback
)
11330 _("having the base register in the register list when "
11331 "using write back is UNPREDICTABLE");
11333 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11335 inst
.instruction
= THUMB_OP16 (opcode
);
11336 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11337 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11341 else if (inst
.operands
[0] .reg
== REG_SP
)
11343 if (inst
.operands
[0].writeback
)
11346 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11347 ? T_MNEM_push
: T_MNEM_pop
);
11348 inst
.instruction
|= inst
.operands
[1].imm
;
11351 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11354 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11355 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11356 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11364 if (inst
.instruction
< 0xffff)
11365 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11367 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
11368 inst
.operands
[0].writeback
);
11373 constraint (inst
.operands
[0].reg
> 7
11374 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11375 constraint (inst
.instruction
!= T_MNEM_ldmia
11376 && inst
.instruction
!= T_MNEM_stmia
,
11377 _("Thumb-2 instruction only valid in unified syntax"));
11378 if (inst
.instruction
== T_MNEM_stmia
)
11380 if (!inst
.operands
[0].writeback
)
11381 as_warn (_("this instruction will write back the base register"));
11382 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11383 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11384 as_warn (_("value stored for r%d is UNKNOWN"),
11385 inst
.operands
[0].reg
);
11389 if (!inst
.operands
[0].writeback
11390 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11391 as_warn (_("this instruction will write back the base register"));
11392 else if (inst
.operands
[0].writeback
11393 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11394 as_warn (_("this instruction will not write back the base register"));
11397 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11398 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11399 inst
.instruction
|= inst
.operands
[1].imm
;
11406 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11407 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11408 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
11409 || inst
.operands
[1].negative
,
11412 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
11414 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11415 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11416 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11422 if (!inst
.operands
[1].present
)
11424 constraint (inst
.operands
[0].reg
== REG_LR
,
11425 _("r14 not allowed as first register "
11426 "when second register is omitted"));
11427 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11429 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11432 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11433 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11434 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11440 unsigned long opcode
;
11443 if (inst
.operands
[0].isreg
11444 && !inst
.operands
[0].preind
11445 && inst
.operands
[0].reg
== REG_PC
)
11446 set_it_insn_type_last ();
11448 opcode
= inst
.instruction
;
11449 if (unified_syntax
)
11451 if (!inst
.operands
[1].isreg
)
11453 if (opcode
<= 0xffff)
11454 inst
.instruction
= THUMB_OP32 (opcode
);
11455 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11458 if (inst
.operands
[1].isreg
11459 && !inst
.operands
[1].writeback
11460 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
11461 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
11462 && opcode
<= 0xffff
11463 && inst
.size_req
!= 4)
11465 /* Insn may have a 16-bit form. */
11466 Rn
= inst
.operands
[1].reg
;
11467 if (inst
.operands
[1].immisreg
)
11469 inst
.instruction
= THUMB_OP16 (opcode
);
11471 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
11473 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
11474 reject_bad_reg (inst
.operands
[1].imm
);
11476 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
11477 && opcode
!= T_MNEM_ldrsb
)
11478 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
11479 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
11486 if (inst
.reloc
.pc_rel
)
11487 opcode
= T_MNEM_ldr_pc2
;
11489 opcode
= T_MNEM_ldr_pc
;
11493 if (opcode
== T_MNEM_ldr
)
11494 opcode
= T_MNEM_ldr_sp
;
11496 opcode
= T_MNEM_str_sp
;
11498 inst
.instruction
= inst
.operands
[0].reg
<< 8;
11502 inst
.instruction
= inst
.operands
[0].reg
;
11503 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11505 inst
.instruction
|= THUMB_OP16 (opcode
);
11506 if (inst
.size_req
== 2)
11507 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11509 inst
.relax
= opcode
;
11513 /* Definitely a 32-bit variant. */
11515 /* Warning for Erratum 752419. */
11516 if (opcode
== T_MNEM_ldr
11517 && inst
.operands
[0].reg
== REG_SP
11518 && inst
.operands
[1].writeback
== 1
11519 && !inst
.operands
[1].immisreg
)
11521 if (no_cpu_selected ()
11522 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
11523 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
11524 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
11525 as_warn (_("This instruction may be unpredictable "
11526 "if executed on M-profile cores "
11527 "with interrupts enabled."));
11530 /* Do some validations regarding addressing modes. */
11531 if (inst
.operands
[1].immisreg
)
11532 reject_bad_reg (inst
.operands
[1].imm
);
11534 constraint (inst
.operands
[1].writeback
== 1
11535 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11538 inst
.instruction
= THUMB_OP32 (opcode
);
11539 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11540 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
11541 check_ldr_r15_aligned ();
11545 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11547 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
11549 /* Only [Rn,Rm] is acceptable. */
11550 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
11551 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
11552 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
11553 || inst
.operands
[1].negative
,
11554 _("Thumb does not support this addressing mode"));
11555 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11559 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11560 if (!inst
.operands
[1].isreg
)
11561 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11564 constraint (!inst
.operands
[1].preind
11565 || inst
.operands
[1].shifted
11566 || inst
.operands
[1].writeback
,
11567 _("Thumb does not support this addressing mode"));
11568 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
11570 constraint (inst
.instruction
& 0x0600,
11571 _("byte or halfword not valid for base register"));
11572 constraint (inst
.operands
[1].reg
== REG_PC
11573 && !(inst
.instruction
& THUMB_LOAD_BIT
),
11574 _("r15 based store not allowed"));
11575 constraint (inst
.operands
[1].immisreg
,
11576 _("invalid base register for register offset"));
11578 if (inst
.operands
[1].reg
== REG_PC
)
11579 inst
.instruction
= T_OPCODE_LDR_PC
;
11580 else if (inst
.instruction
& THUMB_LOAD_BIT
)
11581 inst
.instruction
= T_OPCODE_LDR_SP
;
11583 inst
.instruction
= T_OPCODE_STR_SP
;
11585 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11586 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11590 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
11591 if (!inst
.operands
[1].immisreg
)
11593 /* Immediate offset. */
11594 inst
.instruction
|= inst
.operands
[0].reg
;
11595 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11596 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11600 /* Register offset. */
11601 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
11602 constraint (inst
.operands
[1].negative
,
11603 _("Thumb does not support this addressing mode"));
11606 switch (inst
.instruction
)
11608 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
11609 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
11610 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
11611 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
11612 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
11613 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
11614 case 0x5600 /* ldrsb */:
11615 case 0x5e00 /* ldrsh */: break;
11619 inst
.instruction
|= inst
.operands
[0].reg
;
11620 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11621 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
11627 if (!inst
.operands
[1].present
)
11629 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11630 constraint (inst
.operands
[0].reg
== REG_LR
,
11631 _("r14 not allowed here"));
11632 constraint (inst
.operands
[0].reg
== REG_R12
,
11633 _("r12 not allowed here"));
11636 if (inst
.operands
[2].writeback
11637 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
11638 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
11639 as_warn (_("base register written back, and overlaps "
11640 "one of transfer registers"));
11642 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11643 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11644 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
11650 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11651 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
11657 unsigned Rd
, Rn
, Rm
, Ra
;
11659 Rd
= inst
.operands
[0].reg
;
11660 Rn
= inst
.operands
[1].reg
;
11661 Rm
= inst
.operands
[2].reg
;
11662 Ra
= inst
.operands
[3].reg
;
11664 reject_bad_reg (Rd
);
11665 reject_bad_reg (Rn
);
11666 reject_bad_reg (Rm
);
11667 reject_bad_reg (Ra
);
11669 inst
.instruction
|= Rd
<< 8;
11670 inst
.instruction
|= Rn
<< 16;
11671 inst
.instruction
|= Rm
;
11672 inst
.instruction
|= Ra
<< 12;
11678 unsigned RdLo
, RdHi
, Rn
, Rm
;
11680 RdLo
= inst
.operands
[0].reg
;
11681 RdHi
= inst
.operands
[1].reg
;
11682 Rn
= inst
.operands
[2].reg
;
11683 Rm
= inst
.operands
[3].reg
;
11685 reject_bad_reg (RdLo
);
11686 reject_bad_reg (RdHi
);
11687 reject_bad_reg (Rn
);
11688 reject_bad_reg (Rm
);
11690 inst
.instruction
|= RdLo
<< 12;
11691 inst
.instruction
|= RdHi
<< 8;
11692 inst
.instruction
|= Rn
<< 16;
11693 inst
.instruction
|= Rm
;
11697 do_t_mov_cmp (void)
11701 Rn
= inst
.operands
[0].reg
;
11702 Rm
= inst
.operands
[1].reg
;
11705 set_it_insn_type_last ();
11707 if (unified_syntax
)
11709 int r0off
= (inst
.instruction
== T_MNEM_mov
11710 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
11711 unsigned long opcode
;
11712 bfd_boolean narrow
;
11713 bfd_boolean low_regs
;
11715 low_regs
= (Rn
<= 7 && Rm
<= 7);
11716 opcode
= inst
.instruction
;
11717 if (in_it_block ())
11718 narrow
= opcode
!= T_MNEM_movs
;
11720 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
11721 if (inst
.size_req
== 4
11722 || inst
.operands
[1].shifted
)
11725 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11726 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
11727 && !inst
.operands
[1].shifted
11731 inst
.instruction
= T2_SUBS_PC_LR
;
11735 if (opcode
== T_MNEM_cmp
)
11737 constraint (Rn
== REG_PC
, BAD_PC
);
11740 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11742 warn_deprecated_sp (Rm
);
11743 /* R15 was documented as a valid choice for Rm in ARMv6,
11744 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11745 tools reject R15, so we do too. */
11746 constraint (Rm
== REG_PC
, BAD_PC
);
11749 reject_bad_reg (Rm
);
11751 else if (opcode
== T_MNEM_mov
11752 || opcode
== T_MNEM_movs
)
11754 if (inst
.operands
[1].isreg
)
11756 if (opcode
== T_MNEM_movs
)
11758 reject_bad_reg (Rn
);
11759 reject_bad_reg (Rm
);
11763 /* This is mov.n. */
11764 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
11765 && (Rm
== REG_SP
|| Rm
== REG_PC
))
11767 as_tsktsk (_("Use of r%u as a source register is "
11768 "deprecated when r%u is the destination "
11769 "register."), Rm
, Rn
);
11774 /* This is mov.w. */
11775 constraint (Rn
== REG_PC
, BAD_PC
);
11776 constraint (Rm
== REG_PC
, BAD_PC
);
11777 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
11781 reject_bad_reg (Rn
);
11784 if (!inst
.operands
[1].isreg
)
11786 /* Immediate operand. */
11787 if (!in_it_block () && opcode
== T_MNEM_mov
)
11789 if (low_regs
&& narrow
)
11791 inst
.instruction
= THUMB_OP16 (opcode
);
11792 inst
.instruction
|= Rn
<< 8;
11793 if (inst
.size_req
== 2)
11795 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11796 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
11797 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
11800 inst
.relax
= opcode
;
11804 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11805 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11806 inst
.instruction
|= Rn
<< r0off
;
11807 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11810 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
11811 && (inst
.instruction
== T_MNEM_mov
11812 || inst
.instruction
== T_MNEM_movs
))
11814 /* Register shifts are encoded as separate shift instructions. */
11815 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
11817 if (in_it_block ())
11822 if (inst
.size_req
== 4)
11825 if (!low_regs
|| inst
.operands
[1].imm
> 7)
11831 switch (inst
.operands
[1].shift_kind
)
11834 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
11837 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
11840 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
11843 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
11849 inst
.instruction
= opcode
;
11852 inst
.instruction
|= Rn
;
11853 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
11858 inst
.instruction
|= CONDS_BIT
;
11860 inst
.instruction
|= Rn
<< 8;
11861 inst
.instruction
|= Rm
<< 16;
11862 inst
.instruction
|= inst
.operands
[1].imm
;
11867 /* Some mov with immediate shift have narrow variants.
11868 Register shifts are handled above. */
11869 if (low_regs
&& inst
.operands
[1].shifted
11870 && (inst
.instruction
== T_MNEM_mov
11871 || inst
.instruction
== T_MNEM_movs
))
11873 if (in_it_block ())
11874 narrow
= (inst
.instruction
== T_MNEM_mov
);
11876 narrow
= (inst
.instruction
== T_MNEM_movs
);
11881 switch (inst
.operands
[1].shift_kind
)
11883 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
11884 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
11885 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
11886 default: narrow
= FALSE
; break;
11892 inst
.instruction
|= Rn
;
11893 inst
.instruction
|= Rm
<< 3;
11894 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
11898 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11899 inst
.instruction
|= Rn
<< r0off
;
11900 encode_thumb32_shifted_operand (1);
11904 switch (inst
.instruction
)
11907 /* In v4t or v5t a move of two lowregs produces unpredictable
11908 results. Don't allow this. */
11911 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
11912 "MOV Rd, Rs with two low registers is not "
11913 "permitted on this architecture");
11914 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
11918 inst
.instruction
= T_OPCODE_MOV_HR
;
11919 inst
.instruction
|= (Rn
& 0x8) << 4;
11920 inst
.instruction
|= (Rn
& 0x7);
11921 inst
.instruction
|= Rm
<< 3;
11925 /* We know we have low registers at this point.
11926 Generate LSLS Rd, Rs, #0. */
11927 inst
.instruction
= T_OPCODE_LSL_I
;
11928 inst
.instruction
|= Rn
;
11929 inst
.instruction
|= Rm
<< 3;
11935 inst
.instruction
= T_OPCODE_CMP_LR
;
11936 inst
.instruction
|= Rn
;
11937 inst
.instruction
|= Rm
<< 3;
11941 inst
.instruction
= T_OPCODE_CMP_HR
;
11942 inst
.instruction
|= (Rn
& 0x8) << 4;
11943 inst
.instruction
|= (Rn
& 0x7);
11944 inst
.instruction
|= Rm
<< 3;
11951 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11953 /* PR 10443: Do not silently ignore shifted operands. */
11954 constraint (inst
.operands
[1].shifted
,
11955 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11957 if (inst
.operands
[1].isreg
)
11959 if (Rn
< 8 && Rm
< 8)
11961 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11962 since a MOV instruction produces unpredictable results. */
11963 if (inst
.instruction
== T_OPCODE_MOV_I8
)
11964 inst
.instruction
= T_OPCODE_ADD_I3
;
11966 inst
.instruction
= T_OPCODE_CMP_LR
;
11968 inst
.instruction
|= Rn
;
11969 inst
.instruction
|= Rm
<< 3;
11973 if (inst
.instruction
== T_OPCODE_MOV_I8
)
11974 inst
.instruction
= T_OPCODE_MOV_HR
;
11976 inst
.instruction
= T_OPCODE_CMP_HR
;
11982 constraint (Rn
> 7,
11983 _("only lo regs allowed with immediate"));
11984 inst
.instruction
|= Rn
<< 8;
11985 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
11996 top
= (inst
.instruction
& 0x00800000) != 0;
11997 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
11999 constraint (top
, _(":lower16: not allowed this instruction"));
12000 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
12002 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
12004 constraint (!top
, _(":upper16: not allowed this instruction"));
12005 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
12008 Rd
= inst
.operands
[0].reg
;
12009 reject_bad_reg (Rd
);
12011 inst
.instruction
|= Rd
<< 8;
12012 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
12014 imm
= inst
.reloc
.exp
.X_add_number
;
12015 inst
.instruction
|= (imm
& 0xf000) << 4;
12016 inst
.instruction
|= (imm
& 0x0800) << 15;
12017 inst
.instruction
|= (imm
& 0x0700) << 4;
12018 inst
.instruction
|= (imm
& 0x00ff);
12023 do_t_mvn_tst (void)
12027 Rn
= inst
.operands
[0].reg
;
12028 Rm
= inst
.operands
[1].reg
;
12030 if (inst
.instruction
== T_MNEM_cmp
12031 || inst
.instruction
== T_MNEM_cmn
)
12032 constraint (Rn
== REG_PC
, BAD_PC
);
12034 reject_bad_reg (Rn
);
12035 reject_bad_reg (Rm
);
12037 if (unified_syntax
)
12039 int r0off
= (inst
.instruction
== T_MNEM_mvn
12040 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12041 bfd_boolean narrow
;
12043 if (inst
.size_req
== 4
12044 || inst
.instruction
> 0xffff
12045 || inst
.operands
[1].shifted
12046 || Rn
> 7 || Rm
> 7)
12048 else if (inst
.instruction
== T_MNEM_cmn
12049 || inst
.instruction
== T_MNEM_tst
)
12051 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12052 narrow
= !in_it_block ();
12054 narrow
= in_it_block ();
12056 if (!inst
.operands
[1].isreg
)
12058 /* For an immediate, we always generate a 32-bit opcode;
12059 section relaxation will shrink it later if possible. */
12060 if (inst
.instruction
< 0xffff)
12061 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12062 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12063 inst
.instruction
|= Rn
<< r0off
;
12064 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12068 /* See if we can do this with a 16-bit instruction. */
12071 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12072 inst
.instruction
|= Rn
;
12073 inst
.instruction
|= Rm
<< 3;
12077 constraint (inst
.operands
[1].shifted
12078 && inst
.operands
[1].immisreg
,
12079 _("shift must be constant"));
12080 if (inst
.instruction
< 0xffff)
12081 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12082 inst
.instruction
|= Rn
<< r0off
;
12083 encode_thumb32_shifted_operand (1);
12089 constraint (inst
.instruction
> 0xffff
12090 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12091 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12092 _("unshifted register required"));
12093 constraint (Rn
> 7 || Rm
> 7,
12096 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12097 inst
.instruction
|= Rn
;
12098 inst
.instruction
|= Rm
<< 3;
12107 if (do_vfp_nsyn_mrs () == SUCCESS
)
12110 Rd
= inst
.operands
[0].reg
;
12111 reject_bad_reg (Rd
);
12112 inst
.instruction
|= Rd
<< 8;
12114 if (inst
.operands
[1].isreg
)
12116 unsigned br
= inst
.operands
[1].reg
;
12117 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12118 as_bad (_("bad register for mrs"));
12120 inst
.instruction
|= br
& (0xf << 16);
12121 inst
.instruction
|= (br
& 0x300) >> 4;
12122 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12126 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12128 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12130 /* PR gas/12698: The constraint is only applied for m_profile.
12131 If the user has specified -march=all, we want to ignore it as
12132 we are building for any CPU type, including non-m variants. */
12133 bfd_boolean m_profile
=
12134 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12135 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12136 "not support requested special purpose register"));
12139 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12141 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12142 _("'APSR', 'CPSR' or 'SPSR' expected"));
12144 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12145 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12146 inst
.instruction
|= 0xf0000;
12156 if (do_vfp_nsyn_msr () == SUCCESS
)
12159 constraint (!inst
.operands
[1].isreg
,
12160 _("Thumb encoding does not support an immediate here"));
12162 if (inst
.operands
[0].isreg
)
12163 flags
= (int)(inst
.operands
[0].reg
);
12165 flags
= inst
.operands
[0].imm
;
12167 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12169 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12171 /* PR gas/12698: The constraint is only applied for m_profile.
12172 If the user has specified -march=all, we want to ignore it as
12173 we are building for any CPU type, including non-m variants. */
12174 bfd_boolean m_profile
=
12175 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12176 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12177 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12178 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12179 && bits
!= PSR_f
)) && m_profile
,
12180 _("selected processor does not support requested special "
12181 "purpose register"));
12184 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12185 "requested special purpose register"));
12187 Rn
= inst
.operands
[1].reg
;
12188 reject_bad_reg (Rn
);
12190 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12191 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12192 inst
.instruction
|= (flags
& 0x300) >> 4;
12193 inst
.instruction
|= (flags
& 0xff);
12194 inst
.instruction
|= Rn
<< 16;
12200 bfd_boolean narrow
;
12201 unsigned Rd
, Rn
, Rm
;
12203 if (!inst
.operands
[2].present
)
12204 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12206 Rd
= inst
.operands
[0].reg
;
12207 Rn
= inst
.operands
[1].reg
;
12208 Rm
= inst
.operands
[2].reg
;
12210 if (unified_syntax
)
12212 if (inst
.size_req
== 4
12218 else if (inst
.instruction
== T_MNEM_muls
)
12219 narrow
= !in_it_block ();
12221 narrow
= in_it_block ();
12225 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12226 constraint (Rn
> 7 || Rm
> 7,
12233 /* 16-bit MULS/Conditional MUL. */
12234 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12235 inst
.instruction
|= Rd
;
12238 inst
.instruction
|= Rm
<< 3;
12240 inst
.instruction
|= Rn
<< 3;
12242 constraint (1, _("dest must overlap one source register"));
12246 constraint (inst
.instruction
!= T_MNEM_mul
,
12247 _("Thumb-2 MUL must not set flags"));
12249 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12250 inst
.instruction
|= Rd
<< 8;
12251 inst
.instruction
|= Rn
<< 16;
12252 inst
.instruction
|= Rm
<< 0;
12254 reject_bad_reg (Rd
);
12255 reject_bad_reg (Rn
);
12256 reject_bad_reg (Rm
);
12263 unsigned RdLo
, RdHi
, Rn
, Rm
;
12265 RdLo
= inst
.operands
[0].reg
;
12266 RdHi
= inst
.operands
[1].reg
;
12267 Rn
= inst
.operands
[2].reg
;
12268 Rm
= inst
.operands
[3].reg
;
12270 reject_bad_reg (RdLo
);
12271 reject_bad_reg (RdHi
);
12272 reject_bad_reg (Rn
);
12273 reject_bad_reg (Rm
);
12275 inst
.instruction
|= RdLo
<< 12;
12276 inst
.instruction
|= RdHi
<< 8;
12277 inst
.instruction
|= Rn
<< 16;
12278 inst
.instruction
|= Rm
;
12281 as_tsktsk (_("rdhi and rdlo must be different"));
12287 set_it_insn_type (NEUTRAL_IT_INSN
);
12289 if (unified_syntax
)
12291 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12293 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12294 inst
.instruction
|= inst
.operands
[0].imm
;
12298 /* PR9722: Check for Thumb2 availability before
12299 generating a thumb2 nop instruction. */
12300 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12302 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12303 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12306 inst
.instruction
= 0x46c0;
12311 constraint (inst
.operands
[0].present
,
12312 _("Thumb does not support NOP with hints"));
12313 inst
.instruction
= 0x46c0;
12320 if (unified_syntax
)
12322 bfd_boolean narrow
;
12324 if (THUMB_SETS_FLAGS (inst
.instruction
))
12325 narrow
= !in_it_block ();
12327 narrow
= in_it_block ();
12328 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12330 if (inst
.size_req
== 4)
12335 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12336 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12337 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12341 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12342 inst
.instruction
|= inst
.operands
[0].reg
;
12343 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12348 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12350 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12352 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12353 inst
.instruction
|= inst
.operands
[0].reg
;
12354 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12363 Rd
= inst
.operands
[0].reg
;
12364 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12366 reject_bad_reg (Rd
);
12367 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12368 reject_bad_reg (Rn
);
12370 inst
.instruction
|= Rd
<< 8;
12371 inst
.instruction
|= Rn
<< 16;
12373 if (!inst
.operands
[2].isreg
)
12375 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12376 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12382 Rm
= inst
.operands
[2].reg
;
12383 reject_bad_reg (Rm
);
12385 constraint (inst
.operands
[2].shifted
12386 && inst
.operands
[2].immisreg
,
12387 _("shift must be constant"));
12388 encode_thumb32_shifted_operand (2);
12395 unsigned Rd
, Rn
, Rm
;
12397 Rd
= inst
.operands
[0].reg
;
12398 Rn
= inst
.operands
[1].reg
;
12399 Rm
= inst
.operands
[2].reg
;
12401 reject_bad_reg (Rd
);
12402 reject_bad_reg (Rn
);
12403 reject_bad_reg (Rm
);
12405 inst
.instruction
|= Rd
<< 8;
12406 inst
.instruction
|= Rn
<< 16;
12407 inst
.instruction
|= Rm
;
12408 if (inst
.operands
[3].present
)
12410 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
12411 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12412 _("expression too complex"));
12413 inst
.instruction
|= (val
& 0x1c) << 10;
12414 inst
.instruction
|= (val
& 0x03) << 6;
12421 if (!inst
.operands
[3].present
)
12425 inst
.instruction
&= ~0x00000020;
12427 /* PR 10168. Swap the Rm and Rn registers. */
12428 Rtmp
= inst
.operands
[1].reg
;
12429 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
12430 inst
.operands
[2].reg
= Rtmp
;
12438 if (inst
.operands
[0].immisreg
)
12439 reject_bad_reg (inst
.operands
[0].imm
);
12441 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12445 do_t_push_pop (void)
12449 constraint (inst
.operands
[0].writeback
,
12450 _("push/pop do not support {reglist}^"));
12451 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
12452 _("expression too complex"));
12454 mask
= inst
.operands
[0].imm
;
12455 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
12456 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
12457 else if (inst
.size_req
!= 4
12458 && (mask
& ~0xff) == (1 << (inst
.instruction
== T_MNEM_push
12459 ? REG_LR
: REG_PC
)))
12461 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12462 inst
.instruction
|= THUMB_PP_PC_LR
;
12463 inst
.instruction
|= mask
& 0xff;
12465 else if (unified_syntax
)
12467 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12468 encode_thumb2_ldmstm (13, mask
, TRUE
);
12472 inst
.error
= _("invalid register list to push/pop instruction");
12482 Rd
= inst
.operands
[0].reg
;
12483 Rm
= inst
.operands
[1].reg
;
12485 reject_bad_reg (Rd
);
12486 reject_bad_reg (Rm
);
12488 inst
.instruction
|= Rd
<< 8;
12489 inst
.instruction
|= Rm
<< 16;
12490 inst
.instruction
|= Rm
;
12498 Rd
= inst
.operands
[0].reg
;
12499 Rm
= inst
.operands
[1].reg
;
12501 reject_bad_reg (Rd
);
12502 reject_bad_reg (Rm
);
12504 if (Rd
<= 7 && Rm
<= 7
12505 && inst
.size_req
!= 4)
12507 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12508 inst
.instruction
|= Rd
;
12509 inst
.instruction
|= Rm
<< 3;
12511 else if (unified_syntax
)
12513 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12514 inst
.instruction
|= Rd
<< 8;
12515 inst
.instruction
|= Rm
<< 16;
12516 inst
.instruction
|= Rm
;
12519 inst
.error
= BAD_HIREG
;
12527 Rd
= inst
.operands
[0].reg
;
12528 Rm
= inst
.operands
[1].reg
;
12530 reject_bad_reg (Rd
);
12531 reject_bad_reg (Rm
);
12533 inst
.instruction
|= Rd
<< 8;
12534 inst
.instruction
|= Rm
;
12542 Rd
= inst
.operands
[0].reg
;
12543 Rs
= (inst
.operands
[1].present
12544 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
12545 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
12547 reject_bad_reg (Rd
);
12548 reject_bad_reg (Rs
);
12549 if (inst
.operands
[2].isreg
)
12550 reject_bad_reg (inst
.operands
[2].reg
);
12552 inst
.instruction
|= Rd
<< 8;
12553 inst
.instruction
|= Rs
<< 16;
12554 if (!inst
.operands
[2].isreg
)
12556 bfd_boolean narrow
;
12558 if ((inst
.instruction
& 0x00100000) != 0)
12559 narrow
= !in_it_block ();
12561 narrow
= in_it_block ();
12563 if (Rd
> 7 || Rs
> 7)
12566 if (inst
.size_req
== 4 || !unified_syntax
)
12569 if (inst
.reloc
.exp
.X_op
!= O_constant
12570 || inst
.reloc
.exp
.X_add_number
!= 0)
12573 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12574 relaxation, but it doesn't seem worth the hassle. */
12577 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12578 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
12579 inst
.instruction
|= Rs
<< 3;
12580 inst
.instruction
|= Rd
;
12584 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12585 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12589 encode_thumb32_shifted_operand (2);
12595 if (warn_on_deprecated
12596 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12597 as_tsktsk (_("setend use is deprecated for ARMv8"));
12599 set_it_insn_type (OUTSIDE_IT_INSN
);
12600 if (inst
.operands
[0].imm
)
12601 inst
.instruction
|= 0x8;
12607 if (!inst
.operands
[1].present
)
12608 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
12610 if (unified_syntax
)
12612 bfd_boolean narrow
;
12615 switch (inst
.instruction
)
12618 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
12620 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
12622 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
12624 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
12628 if (THUMB_SETS_FLAGS (inst
.instruction
))
12629 narrow
= !in_it_block ();
12631 narrow
= in_it_block ();
12632 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12634 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
12636 if (inst
.operands
[2].isreg
12637 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
12638 || inst
.operands
[2].reg
> 7))
12640 if (inst
.size_req
== 4)
12643 reject_bad_reg (inst
.operands
[0].reg
);
12644 reject_bad_reg (inst
.operands
[1].reg
);
12648 if (inst
.operands
[2].isreg
)
12650 reject_bad_reg (inst
.operands
[2].reg
);
12651 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12652 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12653 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12654 inst
.instruction
|= inst
.operands
[2].reg
;
12656 /* PR 12854: Error on extraneous shifts. */
12657 constraint (inst
.operands
[2].shifted
,
12658 _("extraneous shift as part of operand to shift insn"));
12662 inst
.operands
[1].shifted
= 1;
12663 inst
.operands
[1].shift_kind
= shift_kind
;
12664 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
12665 ? T_MNEM_movs
: T_MNEM_mov
);
12666 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12667 encode_thumb32_shifted_operand (1);
12668 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12669 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12674 if (inst
.operands
[2].isreg
)
12676 switch (shift_kind
)
12678 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12679 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12680 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12681 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12685 inst
.instruction
|= inst
.operands
[0].reg
;
12686 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12688 /* PR 12854: Error on extraneous shifts. */
12689 constraint (inst
.operands
[2].shifted
,
12690 _("extraneous shift as part of operand to shift insn"));
12694 switch (shift_kind
)
12696 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12697 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12698 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12701 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12702 inst
.instruction
|= inst
.operands
[0].reg
;
12703 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12709 constraint (inst
.operands
[0].reg
> 7
12710 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
12711 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12713 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
12715 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
12716 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
12717 _("source1 and dest must be same register"));
12719 switch (inst
.instruction
)
12721 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12722 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12723 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12724 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12728 inst
.instruction
|= inst
.operands
[0].reg
;
12729 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12731 /* PR 12854: Error on extraneous shifts. */
12732 constraint (inst
.operands
[2].shifted
,
12733 _("extraneous shift as part of operand to shift insn"));
12737 switch (inst
.instruction
)
12739 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12740 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12741 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12742 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
12745 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12746 inst
.instruction
|= inst
.operands
[0].reg
;
12747 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12755 unsigned Rd
, Rn
, Rm
;
12757 Rd
= inst
.operands
[0].reg
;
12758 Rn
= inst
.operands
[1].reg
;
12759 Rm
= inst
.operands
[2].reg
;
12761 reject_bad_reg (Rd
);
12762 reject_bad_reg (Rn
);
12763 reject_bad_reg (Rm
);
12765 inst
.instruction
|= Rd
<< 8;
12766 inst
.instruction
|= Rn
<< 16;
12767 inst
.instruction
|= Rm
;
12773 unsigned Rd
, Rn
, Rm
;
12775 Rd
= inst
.operands
[0].reg
;
12776 Rm
= inst
.operands
[1].reg
;
12777 Rn
= inst
.operands
[2].reg
;
12779 reject_bad_reg (Rd
);
12780 reject_bad_reg (Rn
);
12781 reject_bad_reg (Rm
);
12783 inst
.instruction
|= Rd
<< 8;
12784 inst
.instruction
|= Rn
<< 16;
12785 inst
.instruction
|= Rm
;
12791 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12792 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
12793 _("SMC is not permitted on this architecture"));
12794 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12795 _("expression too complex"));
12796 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12797 inst
.instruction
|= (value
& 0xf000) >> 12;
12798 inst
.instruction
|= (value
& 0x0ff0);
12799 inst
.instruction
|= (value
& 0x000f) << 16;
12800 /* PR gas/15623: SMC instructions must be last in an IT block. */
12801 set_it_insn_type_last ();
12807 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12809 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12810 inst
.instruction
|= (value
& 0x0fff);
12811 inst
.instruction
|= (value
& 0xf000) << 4;
12815 do_t_ssat_usat (int bias
)
12819 Rd
= inst
.operands
[0].reg
;
12820 Rn
= inst
.operands
[2].reg
;
12822 reject_bad_reg (Rd
);
12823 reject_bad_reg (Rn
);
12825 inst
.instruction
|= Rd
<< 8;
12826 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
12827 inst
.instruction
|= Rn
<< 16;
12829 if (inst
.operands
[3].present
)
12831 offsetT shift_amount
= inst
.reloc
.exp
.X_add_number
;
12833 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12835 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12836 _("expression too complex"));
12838 if (shift_amount
!= 0)
12840 constraint (shift_amount
> 31,
12841 _("shift expression is too large"));
12843 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
12844 inst
.instruction
|= 0x00200000; /* sh bit. */
12846 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
12847 inst
.instruction
|= (shift_amount
& 0x03) << 6;
12855 do_t_ssat_usat (1);
12863 Rd
= inst
.operands
[0].reg
;
12864 Rn
= inst
.operands
[2].reg
;
12866 reject_bad_reg (Rd
);
12867 reject_bad_reg (Rn
);
12869 inst
.instruction
|= Rd
<< 8;
12870 inst
.instruction
|= inst
.operands
[1].imm
- 1;
12871 inst
.instruction
|= Rn
<< 16;
12877 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
12878 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
12879 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
12880 || inst
.operands
[2].negative
,
12883 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
12885 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12886 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12887 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
12888 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
12894 if (!inst
.operands
[2].present
)
12895 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
12897 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
12898 || inst
.operands
[0].reg
== inst
.operands
[2].reg
12899 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
12902 inst
.instruction
|= inst
.operands
[0].reg
;
12903 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12904 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
12905 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
12911 unsigned Rd
, Rn
, Rm
;
12913 Rd
= inst
.operands
[0].reg
;
12914 Rn
= inst
.operands
[1].reg
;
12915 Rm
= inst
.operands
[2].reg
;
12917 reject_bad_reg (Rd
);
12918 reject_bad_reg (Rn
);
12919 reject_bad_reg (Rm
);
12921 inst
.instruction
|= Rd
<< 8;
12922 inst
.instruction
|= Rn
<< 16;
12923 inst
.instruction
|= Rm
;
12924 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
12932 Rd
= inst
.operands
[0].reg
;
12933 Rm
= inst
.operands
[1].reg
;
12935 reject_bad_reg (Rd
);
12936 reject_bad_reg (Rm
);
12938 if (inst
.instruction
<= 0xffff
12939 && inst
.size_req
!= 4
12940 && Rd
<= 7 && Rm
<= 7
12941 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
12943 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12944 inst
.instruction
|= Rd
;
12945 inst
.instruction
|= Rm
<< 3;
12947 else if (unified_syntax
)
12949 if (inst
.instruction
<= 0xffff)
12950 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12951 inst
.instruction
|= Rd
<< 8;
12952 inst
.instruction
|= Rm
;
12953 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
12957 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
12958 _("Thumb encoding does not support rotation"));
12959 constraint (1, BAD_HIREG
);
12966 /* We have to do the following check manually as ARM_EXT_OS only applies
12968 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6m
))
12970 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_os
)
12971 /* This only applies to the v6m howver, not later architectures. */
12972 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
))
12973 as_bad (_("SVC is not permitted on this architecture"));
12974 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, arm_ext_os
);
12977 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
12986 half
= (inst
.instruction
& 0x10) != 0;
12987 set_it_insn_type_last ();
12988 constraint (inst
.operands
[0].immisreg
,
12989 _("instruction requires register index"));
12991 Rn
= inst
.operands
[0].reg
;
12992 Rm
= inst
.operands
[0].imm
;
12994 constraint (Rn
== REG_SP
, BAD_SP
);
12995 reject_bad_reg (Rm
);
12997 constraint (!half
&& inst
.operands
[0].shifted
,
12998 _("instruction does not allow shifted index"));
12999 inst
.instruction
|= (Rn
<< 16) | Rm
;
13005 if (!inst
.operands
[0].present
)
13006 inst
.operands
[0].imm
= 0;
13008 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13010 constraint (inst
.size_req
== 2,
13011 _("immediate value out of range"));
13012 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13013 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13014 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13018 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13019 inst
.instruction
|= inst
.operands
[0].imm
;
13022 set_it_insn_type (NEUTRAL_IT_INSN
);
13029 do_t_ssat_usat (0);
13037 Rd
= inst
.operands
[0].reg
;
13038 Rn
= inst
.operands
[2].reg
;
13040 reject_bad_reg (Rd
);
13041 reject_bad_reg (Rn
);
13043 inst
.instruction
|= Rd
<< 8;
13044 inst
.instruction
|= inst
.operands
[1].imm
;
13045 inst
.instruction
|= Rn
<< 16;
13048 /* Neon instruction encoder helpers. */
13050 /* Encodings for the different types for various Neon opcodes. */
13052 /* An "invalid" code for the following tables. */
13055 struct neon_tab_entry
13058 unsigned float_or_poly
;
13059 unsigned scalar_or_imm
;
13062 /* Map overloaded Neon opcodes to their respective encodings. */
13063 #define NEON_ENC_TAB \
13064 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13065 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13066 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13067 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13068 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13069 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13070 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13071 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13072 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13073 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13074 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13075 /* Register variants of the following two instructions are encoded as
13076 vcge / vcgt with the operands reversed. */ \
13077 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13078 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13079 X(vfma, N_INV, 0x0000c10, N_INV), \
13080 X(vfms, N_INV, 0x0200c10, N_INV), \
13081 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13082 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13083 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13084 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13085 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13086 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13087 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13088 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13089 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13090 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13091 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13092 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13093 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13094 X(vshl, 0x0000400, N_INV, 0x0800510), \
13095 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13096 X(vand, 0x0000110, N_INV, 0x0800030), \
13097 X(vbic, 0x0100110, N_INV, 0x0800030), \
13098 X(veor, 0x1000110, N_INV, N_INV), \
13099 X(vorn, 0x0300110, N_INV, 0x0800010), \
13100 X(vorr, 0x0200110, N_INV, 0x0800010), \
13101 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13102 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13103 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13104 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13105 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13106 X(vst1, 0x0000000, 0x0800000, N_INV), \
13107 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13108 X(vst2, 0x0000100, 0x0800100, N_INV), \
13109 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13110 X(vst3, 0x0000200, 0x0800200, N_INV), \
13111 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13112 X(vst4, 0x0000300, 0x0800300, N_INV), \
13113 X(vmovn, 0x1b20200, N_INV, N_INV), \
13114 X(vtrn, 0x1b20080, N_INV, N_INV), \
13115 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13116 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13117 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13118 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13119 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13120 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13121 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13122 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13123 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13124 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13125 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13126 X(vseleq, 0xe000a00, N_INV, N_INV), \
13127 X(vselvs, 0xe100a00, N_INV, N_INV), \
13128 X(vselge, 0xe200a00, N_INV, N_INV), \
13129 X(vselgt, 0xe300a00, N_INV, N_INV), \
13130 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13131 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13132 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13133 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13134 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13135 X(aes, 0x3b00300, N_INV, N_INV), \
13136 X(sha3op, 0x2000c00, N_INV, N_INV), \
13137 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13138 X(sha2op, 0x3ba0380, N_INV, N_INV)
13142 #define X(OPC,I,F,S) N_MNEM_##OPC
13147 static const struct neon_tab_entry neon_enc_tab
[] =
13149 #define X(OPC,I,F,S) { (I), (F), (S) }
13154 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13155 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13156 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13157 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13158 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13159 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13160 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13161 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13162 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13163 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13164 #define NEON_ENC_SINGLE_(X) \
13165 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13166 #define NEON_ENC_DOUBLE_(X) \
13167 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13168 #define NEON_ENC_FPV8_(X) \
13169 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13171 #define NEON_ENCODE(type, inst) \
13174 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13175 inst.is_neon = 1; \
13179 #define check_neon_suffixes \
13182 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13184 as_bad (_("invalid neon suffix for non neon instruction")); \
13190 /* Define shapes for instruction operands. The following mnemonic characters
13191 are used in this table:
13193 F - VFP S<n> register
13194 D - Neon D<n> register
13195 Q - Neon Q<n> register
13199 L - D<n> register list
13201 This table is used to generate various data:
13202 - enumerations of the form NS_DDR to be used as arguments to
13204 - a table classifying shapes into single, double, quad, mixed.
13205 - a table used to drive neon_select_shape. */
13207 #define NEON_SHAPE_DEF \
13208 X(3, (D, D, D), DOUBLE), \
13209 X(3, (Q, Q, Q), QUAD), \
13210 X(3, (D, D, I), DOUBLE), \
13211 X(3, (Q, Q, I), QUAD), \
13212 X(3, (D, D, S), DOUBLE), \
13213 X(3, (Q, Q, S), QUAD), \
13214 X(2, (D, D), DOUBLE), \
13215 X(2, (Q, Q), QUAD), \
13216 X(2, (D, S), DOUBLE), \
13217 X(2, (Q, S), QUAD), \
13218 X(2, (D, R), DOUBLE), \
13219 X(2, (Q, R), QUAD), \
13220 X(2, (D, I), DOUBLE), \
13221 X(2, (Q, I), QUAD), \
13222 X(3, (D, L, D), DOUBLE), \
13223 X(2, (D, Q), MIXED), \
13224 X(2, (Q, D), MIXED), \
13225 X(3, (D, Q, I), MIXED), \
13226 X(3, (Q, D, I), MIXED), \
13227 X(3, (Q, D, D), MIXED), \
13228 X(3, (D, Q, Q), MIXED), \
13229 X(3, (Q, Q, D), MIXED), \
13230 X(3, (Q, D, S), MIXED), \
13231 X(3, (D, Q, S), MIXED), \
13232 X(4, (D, D, D, I), DOUBLE), \
13233 X(4, (Q, Q, Q, I), QUAD), \
13234 X(2, (F, F), SINGLE), \
13235 X(3, (F, F, F), SINGLE), \
13236 X(2, (F, I), SINGLE), \
13237 X(2, (F, D), MIXED), \
13238 X(2, (D, F), MIXED), \
13239 X(3, (F, F, I), MIXED), \
13240 X(4, (R, R, F, F), SINGLE), \
13241 X(4, (F, F, R, R), SINGLE), \
13242 X(3, (D, R, R), DOUBLE), \
13243 X(3, (R, R, D), DOUBLE), \
13244 X(2, (S, R), SINGLE), \
13245 X(2, (R, S), SINGLE), \
13246 X(2, (F, R), SINGLE), \
13247 X(2, (R, F), SINGLE)
13249 #define S2(A,B) NS_##A##B
13250 #define S3(A,B,C) NS_##A##B##C
13251 #define S4(A,B,C,D) NS_##A##B##C##D
13253 #define X(N, L, C) S##N L
13266 enum neon_shape_class
13274 #define X(N, L, C) SC_##C
13276 static enum neon_shape_class neon_shape_class
[] =
13294 /* Register widths of above. */
13295 static unsigned neon_shape_el_size
[] =
13306 struct neon_shape_info
13309 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
13312 #define S2(A,B) { SE_##A, SE_##B }
13313 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13314 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13316 #define X(N, L, C) { N, S##N L }
13318 static struct neon_shape_info neon_shape_tab
[] =
13328 /* Bit masks used in type checking given instructions.
13329 'N_EQK' means the type must be the same as (or based on in some way) the key
13330 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13331 set, various other bits can be set as well in order to modify the meaning of
13332 the type constraint. */
13334 enum neon_type_mask
13358 N_KEY
= 0x1000000, /* Key element (main type specifier). */
13359 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
13360 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
13361 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
13362 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
13363 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
13364 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13365 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13366 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13367 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
13368 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13370 N_MAX_NONSPECIAL
= N_P64
13373 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13375 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13376 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13377 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13378 #define N_SUF_32 (N_SU_32 | N_F32)
13379 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13380 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
13382 /* Pass this as the first type argument to neon_check_type to ignore types
13384 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13386 /* Select a "shape" for the current instruction (describing register types or
13387 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13388 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13389 function of operand parsing, so this function doesn't need to be called.
13390 Shapes should be listed in order of decreasing length. */
13392 static enum neon_shape
13393 neon_select_shape (enum neon_shape shape
, ...)
13396 enum neon_shape first_shape
= shape
;
13398 /* Fix missing optional operands. FIXME: we don't know at this point how
13399 many arguments we should have, so this makes the assumption that we have
13400 > 1. This is true of all current Neon opcodes, I think, but may not be
13401 true in the future. */
13402 if (!inst
.operands
[1].present
)
13403 inst
.operands
[1] = inst
.operands
[0];
13405 va_start (ap
, shape
);
13407 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
13412 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
13414 if (!inst
.operands
[j
].present
)
13420 switch (neon_shape_tab
[shape
].el
[j
])
13423 if (!(inst
.operands
[j
].isreg
13424 && inst
.operands
[j
].isvec
13425 && inst
.operands
[j
].issingle
13426 && !inst
.operands
[j
].isquad
))
13431 if (!(inst
.operands
[j
].isreg
13432 && inst
.operands
[j
].isvec
13433 && !inst
.operands
[j
].isquad
13434 && !inst
.operands
[j
].issingle
))
13439 if (!(inst
.operands
[j
].isreg
13440 && !inst
.operands
[j
].isvec
))
13445 if (!(inst
.operands
[j
].isreg
13446 && inst
.operands
[j
].isvec
13447 && inst
.operands
[j
].isquad
13448 && !inst
.operands
[j
].issingle
))
13453 if (!(!inst
.operands
[j
].isreg
13454 && !inst
.operands
[j
].isscalar
))
13459 if (!(!inst
.operands
[j
].isreg
13460 && inst
.operands
[j
].isscalar
))
13470 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
13471 /* We've matched all the entries in the shape table, and we don't
13472 have any left over operands which have not been matched. */
13478 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
13479 first_error (_("invalid instruction shape"));
13484 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13485 means the Q bit should be set). */
13488 neon_quad (enum neon_shape shape
)
13490 return neon_shape_class
[shape
] == SC_QUAD
;
13494 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
13497 /* Allow modification to be made to types which are constrained to be
13498 based on the key element, based on bits set alongside N_EQK. */
13499 if ((typebits
& N_EQK
) != 0)
13501 if ((typebits
& N_HLF
) != 0)
13503 else if ((typebits
& N_DBL
) != 0)
13505 if ((typebits
& N_SGN
) != 0)
13506 *g_type
= NT_signed
;
13507 else if ((typebits
& N_UNS
) != 0)
13508 *g_type
= NT_unsigned
;
13509 else if ((typebits
& N_INT
) != 0)
13510 *g_type
= NT_integer
;
13511 else if ((typebits
& N_FLT
) != 0)
13512 *g_type
= NT_float
;
13513 else if ((typebits
& N_SIZ
) != 0)
13514 *g_type
= NT_untyped
;
13518 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13519 operand type, i.e. the single type specified in a Neon instruction when it
13520 is the only one given. */
13522 static struct neon_type_el
13523 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
13525 struct neon_type_el dest
= *key
;
13527 gas_assert ((thisarg
& N_EQK
) != 0);
13529 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
13534 /* Convert Neon type and size into compact bitmask representation. */
13536 static enum neon_type_mask
13537 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
13544 case 8: return N_8
;
13545 case 16: return N_16
;
13546 case 32: return N_32
;
13547 case 64: return N_64
;
13555 case 8: return N_I8
;
13556 case 16: return N_I16
;
13557 case 32: return N_I32
;
13558 case 64: return N_I64
;
13566 case 16: return N_F16
;
13567 case 32: return N_F32
;
13568 case 64: return N_F64
;
13576 case 8: return N_P8
;
13577 case 16: return N_P16
;
13578 case 64: return N_P64
;
13586 case 8: return N_S8
;
13587 case 16: return N_S16
;
13588 case 32: return N_S32
;
13589 case 64: return N_S64
;
13597 case 8: return N_U8
;
13598 case 16: return N_U16
;
13599 case 32: return N_U32
;
13600 case 64: return N_U64
;
13611 /* Convert compact Neon bitmask type representation to a type and size. Only
13612 handles the case where a single bit is set in the mask. */
13615 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
13616 enum neon_type_mask mask
)
13618 if ((mask
& N_EQK
) != 0)
13621 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
13623 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
13625 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
13627 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
13632 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
13634 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
13635 *type
= NT_unsigned
;
13636 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
13637 *type
= NT_integer
;
13638 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
13639 *type
= NT_untyped
;
13640 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
13642 else if ((mask
& (N_F16
| N_F32
| N_F64
)) != 0)
13650 /* Modify a bitmask of allowed types. This is only needed for type
13654 modify_types_allowed (unsigned allowed
, unsigned mods
)
13657 enum neon_el_type type
;
13663 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
13665 if (el_type_of_type_chk (&type
, &size
,
13666 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
13668 neon_modify_type_size (mods
, &type
, &size
);
13669 destmask
|= type_chk_of_el_type (type
, size
);
13676 /* Check type and return type classification.
13677 The manual states (paraphrase): If one datatype is given, it indicates the
13679 - the second operand, if there is one
13680 - the operand, if there is no second operand
13681 - the result, if there are no operands.
13682 This isn't quite good enough though, so we use a concept of a "key" datatype
13683 which is set on a per-instruction basis, which is the one which matters when
13684 only one data type is written.
13685 Note: this function has side-effects (e.g. filling in missing operands). All
13686 Neon instructions should call it before performing bit encoding. */
13688 static struct neon_type_el
13689 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
13692 unsigned i
, pass
, key_el
= 0;
13693 unsigned types
[NEON_MAX_TYPE_ELS
];
13694 enum neon_el_type k_type
= NT_invtype
;
13695 unsigned k_size
= -1u;
13696 struct neon_type_el badtype
= {NT_invtype
, -1};
13697 unsigned key_allowed
= 0;
13699 /* Optional registers in Neon instructions are always (not) in operand 1.
13700 Fill in the missing operand here, if it was omitted. */
13701 if (els
> 1 && !inst
.operands
[1].present
)
13702 inst
.operands
[1] = inst
.operands
[0];
13704 /* Suck up all the varargs. */
13706 for (i
= 0; i
< els
; i
++)
13708 unsigned thisarg
= va_arg (ap
, unsigned);
13709 if (thisarg
== N_IGNORE_TYPE
)
13714 types
[i
] = thisarg
;
13715 if ((thisarg
& N_KEY
) != 0)
13720 if (inst
.vectype
.elems
> 0)
13721 for (i
= 0; i
< els
; i
++)
13722 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
13724 first_error (_("types specified in both the mnemonic and operands"));
13728 /* Duplicate inst.vectype elements here as necessary.
13729 FIXME: No idea if this is exactly the same as the ARM assembler,
13730 particularly when an insn takes one register and one non-register
13732 if (inst
.vectype
.elems
== 1 && els
> 1)
13735 inst
.vectype
.elems
= els
;
13736 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
13737 for (j
= 0; j
< els
; j
++)
13739 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13742 else if (inst
.vectype
.elems
== 0 && els
> 0)
13745 /* No types were given after the mnemonic, so look for types specified
13746 after each operand. We allow some flexibility here; as long as the
13747 "key" operand has a type, we can infer the others. */
13748 for (j
= 0; j
< els
; j
++)
13749 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
13750 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
13752 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
13754 for (j
= 0; j
< els
; j
++)
13755 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
13756 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13761 first_error (_("operand types can't be inferred"));
13765 else if (inst
.vectype
.elems
!= els
)
13767 first_error (_("type specifier has the wrong number of parts"));
13771 for (pass
= 0; pass
< 2; pass
++)
13773 for (i
= 0; i
< els
; i
++)
13775 unsigned thisarg
= types
[i
];
13776 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
13777 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
13778 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
13779 unsigned g_size
= inst
.vectype
.el
[i
].size
;
13781 /* Decay more-specific signed & unsigned types to sign-insensitive
13782 integer types if sign-specific variants are unavailable. */
13783 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
13784 && (types_allowed
& N_SU_ALL
) == 0)
13785 g_type
= NT_integer
;
13787 /* If only untyped args are allowed, decay any more specific types to
13788 them. Some instructions only care about signs for some element
13789 sizes, so handle that properly. */
13790 if (((types_allowed
& N_UNT
) == 0)
13791 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
13792 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
13793 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
13794 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
13795 g_type
= NT_untyped
;
13799 if ((thisarg
& N_KEY
) != 0)
13803 key_allowed
= thisarg
& ~N_KEY
;
13808 if ((thisarg
& N_VFP
) != 0)
13810 enum neon_shape_el regshape
;
13811 unsigned regwidth
, match
;
13813 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13816 first_error (_("invalid instruction shape"));
13819 regshape
= neon_shape_tab
[ns
].el
[i
];
13820 regwidth
= neon_shape_el_size
[regshape
];
13822 /* In VFP mode, operands must match register widths. If we
13823 have a key operand, use its width, else use the width of
13824 the current operand. */
13830 if (regwidth
!= match
)
13832 first_error (_("operand size must match register width"));
13837 if ((thisarg
& N_EQK
) == 0)
13839 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
13841 if ((given_type
& types_allowed
) == 0)
13843 first_error (_("bad type in Neon instruction"));
13849 enum neon_el_type mod_k_type
= k_type
;
13850 unsigned mod_k_size
= k_size
;
13851 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
13852 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
13854 first_error (_("inconsistent types in Neon instruction"));
13862 return inst
.vectype
.el
[key_el
];
13865 /* Neon-style VFP instruction forwarding. */
13867 /* Thumb VFP instructions have 0xE in the condition field. */
13870 do_vfp_cond_or_thumb (void)
13875 inst
.instruction
|= 0xe0000000;
13877 inst
.instruction
|= inst
.cond
<< 28;
13880 /* Look up and encode a simple mnemonic, for use as a helper function for the
13881 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13882 etc. It is assumed that operand parsing has already been done, and that the
13883 operands are in the form expected by the given opcode (this isn't necessarily
13884 the same as the form in which they were parsed, hence some massaging must
13885 take place before this function is called).
13886 Checks current arch version against that in the looked-up opcode. */
13889 do_vfp_nsyn_opcode (const char *opname
)
13891 const struct asm_opcode
*opcode
;
13893 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
13898 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
13899 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
13906 inst
.instruction
= opcode
->tvalue
;
13907 opcode
->tencode ();
13911 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
13912 opcode
->aencode ();
13917 do_vfp_nsyn_add_sub (enum neon_shape rs
)
13919 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
13924 do_vfp_nsyn_opcode ("fadds");
13926 do_vfp_nsyn_opcode ("fsubs");
13931 do_vfp_nsyn_opcode ("faddd");
13933 do_vfp_nsyn_opcode ("fsubd");
13937 /* Check operand types to see if this is a VFP instruction, and if so call
13941 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
13943 enum neon_shape rs
;
13944 struct neon_type_el et
;
13949 rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
13950 et
= neon_check_type (2, rs
,
13951 N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
13955 rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
13956 et
= neon_check_type (3, rs
,
13957 N_EQK
| N_VFP
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
13964 if (et
.type
!= NT_invtype
)
13975 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
13977 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
13982 do_vfp_nsyn_opcode ("fmacs");
13984 do_vfp_nsyn_opcode ("fnmacs");
13989 do_vfp_nsyn_opcode ("fmacd");
13991 do_vfp_nsyn_opcode ("fnmacd");
13996 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
13998 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
14003 do_vfp_nsyn_opcode ("ffmas");
14005 do_vfp_nsyn_opcode ("ffnmas");
14010 do_vfp_nsyn_opcode ("ffmad");
14012 do_vfp_nsyn_opcode ("ffnmad");
14017 do_vfp_nsyn_mul (enum neon_shape rs
)
14020 do_vfp_nsyn_opcode ("fmuls");
14022 do_vfp_nsyn_opcode ("fmuld");
14026 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
14028 int is_neg
= (inst
.instruction
& 0x80) != 0;
14029 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_VFP
| N_KEY
);
14034 do_vfp_nsyn_opcode ("fnegs");
14036 do_vfp_nsyn_opcode ("fabss");
14041 do_vfp_nsyn_opcode ("fnegd");
14043 do_vfp_nsyn_opcode ("fabsd");
14047 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14048 insns belong to Neon, and are handled elsewhere. */
14051 do_vfp_nsyn_ldm_stm (int is_dbmode
)
14053 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
14057 do_vfp_nsyn_opcode ("fldmdbs");
14059 do_vfp_nsyn_opcode ("fldmias");
14064 do_vfp_nsyn_opcode ("fstmdbs");
14066 do_vfp_nsyn_opcode ("fstmias");
14071 do_vfp_nsyn_sqrt (void)
14073 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
14074 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
14077 do_vfp_nsyn_opcode ("fsqrts");
14079 do_vfp_nsyn_opcode ("fsqrtd");
14083 do_vfp_nsyn_div (void)
14085 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
14086 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14087 N_F32
| N_F64
| N_KEY
| N_VFP
);
14090 do_vfp_nsyn_opcode ("fdivs");
14092 do_vfp_nsyn_opcode ("fdivd");
14096 do_vfp_nsyn_nmul (void)
14098 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
14099 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14100 N_F32
| N_F64
| N_KEY
| N_VFP
);
14104 NEON_ENCODE (SINGLE
, inst
);
14105 do_vfp_sp_dyadic ();
14109 NEON_ENCODE (DOUBLE
, inst
);
14110 do_vfp_dp_rd_rn_rm ();
14112 do_vfp_cond_or_thumb ();
14116 do_vfp_nsyn_cmp (void)
14118 if (inst
.operands
[1].isreg
)
14120 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
14121 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
14125 NEON_ENCODE (SINGLE
, inst
);
14126 do_vfp_sp_monadic ();
14130 NEON_ENCODE (DOUBLE
, inst
);
14131 do_vfp_dp_rd_rm ();
14136 enum neon_shape rs
= neon_select_shape (NS_FI
, NS_DI
, NS_NULL
);
14137 neon_check_type (2, rs
, N_F32
| N_F64
| N_KEY
| N_VFP
, N_EQK
);
14139 switch (inst
.instruction
& 0x0fffffff)
14142 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
14145 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
14153 NEON_ENCODE (SINGLE
, inst
);
14154 do_vfp_sp_compare_z ();
14158 NEON_ENCODE (DOUBLE
, inst
);
14162 do_vfp_cond_or_thumb ();
14166 nsyn_insert_sp (void)
14168 inst
.operands
[1] = inst
.operands
[0];
14169 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
14170 inst
.operands
[0].reg
= REG_SP
;
14171 inst
.operands
[0].isreg
= 1;
14172 inst
.operands
[0].writeback
= 1;
14173 inst
.operands
[0].present
= 1;
14177 do_vfp_nsyn_push (void)
14180 if (inst
.operands
[1].issingle
)
14181 do_vfp_nsyn_opcode ("fstmdbs");
14183 do_vfp_nsyn_opcode ("fstmdbd");
14187 do_vfp_nsyn_pop (void)
14190 if (inst
.operands
[1].issingle
)
14191 do_vfp_nsyn_opcode ("fldmias");
14193 do_vfp_nsyn_opcode ("fldmiad");
14196 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14197 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14200 neon_dp_fixup (struct arm_it
* insn
)
14202 unsigned int i
= insn
->instruction
;
14207 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14218 insn
->instruction
= i
;
14221 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14225 neon_logbits (unsigned x
)
14227 return ffs (x
) - 4;
14230 #define LOW4(R) ((R) & 0xf)
14231 #define HI1(R) (((R) >> 4) & 1)
14233 /* Encode insns with bit pattern:
14235 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14236 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14238 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14239 different meaning for some instruction. */
14242 neon_three_same (int isquad
, int ubit
, int size
)
14244 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14245 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14246 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14247 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14248 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14249 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14250 inst
.instruction
|= (isquad
!= 0) << 6;
14251 inst
.instruction
|= (ubit
!= 0) << 24;
14253 inst
.instruction
|= neon_logbits (size
) << 20;
14255 neon_dp_fixup (&inst
);
14258 /* Encode instructions of the form:
14260 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14261 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14263 Don't write size if SIZE == -1. */
14266 neon_two_same (int qbit
, int ubit
, int size
)
14268 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14269 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14270 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14271 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14272 inst
.instruction
|= (qbit
!= 0) << 6;
14273 inst
.instruction
|= (ubit
!= 0) << 24;
14276 inst
.instruction
|= neon_logbits (size
) << 18;
14278 neon_dp_fixup (&inst
);
14281 /* Neon instruction encoders, in approximate order of appearance. */
14284 do_neon_dyadic_i_su (void)
14286 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14287 struct neon_type_el et
= neon_check_type (3, rs
,
14288 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
14289 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14293 do_neon_dyadic_i64_su (void)
14295 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14296 struct neon_type_el et
= neon_check_type (3, rs
,
14297 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14298 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14302 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
14305 unsigned size
= et
.size
>> 3;
14306 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14307 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14308 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14309 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14310 inst
.instruction
|= (isquad
!= 0) << 6;
14311 inst
.instruction
|= immbits
<< 16;
14312 inst
.instruction
|= (size
>> 3) << 7;
14313 inst
.instruction
|= (size
& 0x7) << 19;
14315 inst
.instruction
|= (uval
!= 0) << 24;
14317 neon_dp_fixup (&inst
);
14321 do_neon_shl_imm (void)
14323 if (!inst
.operands
[2].isreg
)
14325 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14326 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
14327 int imm
= inst
.operands
[2].imm
;
14329 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14330 _("immediate out of range for shift"));
14331 NEON_ENCODE (IMMED
, inst
);
14332 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14336 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14337 struct neon_type_el et
= neon_check_type (3, rs
,
14338 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14341 /* VSHL/VQSHL 3-register variants have syntax such as:
14343 whereas other 3-register operations encoded by neon_three_same have
14346 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14348 tmp
= inst
.operands
[2].reg
;
14349 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14350 inst
.operands
[1].reg
= tmp
;
14351 NEON_ENCODE (INTEGER
, inst
);
14352 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14357 do_neon_qshl_imm (void)
14359 if (!inst
.operands
[2].isreg
)
14361 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14362 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
14363 int imm
= inst
.operands
[2].imm
;
14365 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14366 _("immediate out of range for shift"));
14367 NEON_ENCODE (IMMED
, inst
);
14368 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
14372 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14373 struct neon_type_el et
= neon_check_type (3, rs
,
14374 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14377 /* See note in do_neon_shl_imm. */
14378 tmp
= inst
.operands
[2].reg
;
14379 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14380 inst
.operands
[1].reg
= tmp
;
14381 NEON_ENCODE (INTEGER
, inst
);
14382 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14387 do_neon_rshl (void)
14389 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14390 struct neon_type_el et
= neon_check_type (3, rs
,
14391 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14394 tmp
= inst
.operands
[2].reg
;
14395 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14396 inst
.operands
[1].reg
= tmp
;
14397 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14401 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
14403 /* Handle .I8 pseudo-instructions. */
14406 /* Unfortunately, this will make everything apart from zero out-of-range.
14407 FIXME is this the intended semantics? There doesn't seem much point in
14408 accepting .I8 if so. */
14409 immediate
|= immediate
<< 8;
14415 if (immediate
== (immediate
& 0x000000ff))
14417 *immbits
= immediate
;
14420 else if (immediate
== (immediate
& 0x0000ff00))
14422 *immbits
= immediate
>> 8;
14425 else if (immediate
== (immediate
& 0x00ff0000))
14427 *immbits
= immediate
>> 16;
14430 else if (immediate
== (immediate
& 0xff000000))
14432 *immbits
= immediate
>> 24;
14435 if ((immediate
& 0xffff) != (immediate
>> 16))
14436 goto bad_immediate
;
14437 immediate
&= 0xffff;
14440 if (immediate
== (immediate
& 0x000000ff))
14442 *immbits
= immediate
;
14445 else if (immediate
== (immediate
& 0x0000ff00))
14447 *immbits
= immediate
>> 8;
14452 first_error (_("immediate value out of range"));
14457 do_neon_logic (void)
14459 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
14461 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14462 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14463 /* U bit and size field were set as part of the bitmask. */
14464 NEON_ENCODE (INTEGER
, inst
);
14465 neon_three_same (neon_quad (rs
), 0, -1);
14469 const int three_ops_form
= (inst
.operands
[2].present
14470 && !inst
.operands
[2].isreg
);
14471 const int immoperand
= (three_ops_form
? 2 : 1);
14472 enum neon_shape rs
= (three_ops_form
14473 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
14474 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
14475 struct neon_type_el et
= neon_check_type (2, rs
,
14476 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
14477 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
14481 if (et
.type
== NT_invtype
)
14484 if (three_ops_form
)
14485 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
14486 _("first and second operands shall be the same register"));
14488 NEON_ENCODE (IMMED
, inst
);
14490 immbits
= inst
.operands
[immoperand
].imm
;
14493 /* .i64 is a pseudo-op, so the immediate must be a repeating
14495 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
14496 inst
.operands
[immoperand
].reg
: 0))
14498 /* Set immbits to an invalid constant. */
14499 immbits
= 0xdeadbeef;
14506 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14510 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14514 /* Pseudo-instruction for VBIC. */
14515 neon_invert_size (&immbits
, 0, et
.size
);
14516 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14520 /* Pseudo-instruction for VORR. */
14521 neon_invert_size (&immbits
, 0, et
.size
);
14522 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14532 inst
.instruction
|= neon_quad (rs
) << 6;
14533 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14534 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14535 inst
.instruction
|= cmode
<< 8;
14536 neon_write_immbits (immbits
);
14538 neon_dp_fixup (&inst
);
14543 do_neon_bitfield (void)
14545 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14546 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14547 neon_three_same (neon_quad (rs
), 0, -1);
14551 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
14554 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14555 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
14557 if (et
.type
== NT_float
)
14559 NEON_ENCODE (FLOAT
, inst
);
14560 neon_three_same (neon_quad (rs
), 0, -1);
14564 NEON_ENCODE (INTEGER
, inst
);
14565 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
14570 do_neon_dyadic_if_su (void)
14572 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14576 do_neon_dyadic_if_su_d (void)
14578 /* This version only allow D registers, but that constraint is enforced during
14579 operand parsing so we don't need to do anything extra here. */
14580 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14584 do_neon_dyadic_if_i_d (void)
14586 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14587 affected if we specify unsigned args. */
14588 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14591 enum vfp_or_neon_is_neon_bits
14594 NEON_CHECK_ARCH
= 2,
14595 NEON_CHECK_ARCH8
= 4
14598 /* Call this function if an instruction which may have belonged to the VFP or
14599 Neon instruction sets, but turned out to be a Neon instruction (due to the
14600 operand types involved, etc.). We have to check and/or fix-up a couple of
14603 - Make sure the user hasn't attempted to make a Neon instruction
14605 - Alter the value in the condition code field if necessary.
14606 - Make sure that the arch supports Neon instructions.
14608 Which of these operations take place depends on bits from enum
14609 vfp_or_neon_is_neon_bits.
14611 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14612 current instruction's condition is COND_ALWAYS, the condition field is
14613 changed to inst.uncond_value. This is necessary because instructions shared
14614 between VFP and Neon may be conditional for the VFP variants only, and the
14615 unconditional Neon version must have, e.g., 0xF in the condition field. */
14618 vfp_or_neon_is_neon (unsigned check
)
14620 /* Conditions are always legal in Thumb mode (IT blocks). */
14621 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
14623 if (inst
.cond
!= COND_ALWAYS
)
14625 first_error (_(BAD_COND
));
14628 if (inst
.uncond_value
!= -1)
14629 inst
.instruction
|= inst
.uncond_value
<< 28;
14632 if ((check
& NEON_CHECK_ARCH
)
14633 && !mark_feature_used (&fpu_neon_ext_v1
))
14635 first_error (_(BAD_FPU
));
14639 if ((check
& NEON_CHECK_ARCH8
)
14640 && !mark_feature_used (&fpu_neon_ext_armv8
))
14642 first_error (_(BAD_FPU
));
14650 do_neon_addsub_if_i (void)
14652 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
14655 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14658 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14659 affected if we specify unsigned args. */
14660 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
14663 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14665 V<op> A,B (A is operand 0, B is operand 2)
14670 so handle that case specially. */
14673 neon_exchange_operands (void)
14675 void *scratch
= alloca (sizeof (inst
.operands
[0]));
14676 if (inst
.operands
[1].present
)
14678 /* Swap operands[1] and operands[2]. */
14679 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
14680 inst
.operands
[1] = inst
.operands
[2];
14681 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
14685 inst
.operands
[1] = inst
.operands
[2];
14686 inst
.operands
[2] = inst
.operands
[0];
14691 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
14693 if (inst
.operands
[2].isreg
)
14696 neon_exchange_operands ();
14697 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
14701 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14702 struct neon_type_el et
= neon_check_type (2, rs
,
14703 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
14705 NEON_ENCODE (IMMED
, inst
);
14706 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14707 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14708 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14709 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14710 inst
.instruction
|= neon_quad (rs
) << 6;
14711 inst
.instruction
|= (et
.type
== NT_float
) << 10;
14712 inst
.instruction
|= neon_logbits (et
.size
) << 18;
14714 neon_dp_fixup (&inst
);
14721 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
14725 do_neon_cmp_inv (void)
14727 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
14733 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
14736 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14737 scalars, which are encoded in 5 bits, M : Rm.
14738 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14739 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14743 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
14745 unsigned regno
= NEON_SCALAR_REG (scalar
);
14746 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
14751 if (regno
> 7 || elno
> 3)
14753 return regno
| (elno
<< 3);
14756 if (regno
> 15 || elno
> 1)
14758 return regno
| (elno
<< 4);
14762 first_error (_("scalar out of range for multiply instruction"));
14768 /* Encode multiply / multiply-accumulate scalar instructions. */
14771 neon_mul_mac (struct neon_type_el et
, int ubit
)
14775 /* Give a more helpful error message if we have an invalid type. */
14776 if (et
.type
== NT_invtype
)
14779 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
14780 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14781 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14782 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14783 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14784 inst
.instruction
|= LOW4 (scalar
);
14785 inst
.instruction
|= HI1 (scalar
) << 5;
14786 inst
.instruction
|= (et
.type
== NT_float
) << 8;
14787 inst
.instruction
|= neon_logbits (et
.size
) << 20;
14788 inst
.instruction
|= (ubit
!= 0) << 24;
14790 neon_dp_fixup (&inst
);
14794 do_neon_mac_maybe_scalar (void)
14796 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
14799 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14802 if (inst
.operands
[2].isscalar
)
14804 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
14805 struct neon_type_el et
= neon_check_type (3, rs
,
14806 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
14807 NEON_ENCODE (SCALAR
, inst
);
14808 neon_mul_mac (et
, neon_quad (rs
));
14812 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14813 affected if we specify unsigned args. */
14814 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14819 do_neon_fmac (void)
14821 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
14824 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14827 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14833 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14834 struct neon_type_el et
= neon_check_type (3, rs
,
14835 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14836 neon_three_same (neon_quad (rs
), 0, et
.size
);
14839 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14840 same types as the MAC equivalents. The polynomial type for this instruction
14841 is encoded the same as the integer type. */
14846 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
14849 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14852 if (inst
.operands
[2].isscalar
)
14853 do_neon_mac_maybe_scalar ();
14855 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
14859 do_neon_qdmulh (void)
14861 if (inst
.operands
[2].isscalar
)
14863 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
14864 struct neon_type_el et
= neon_check_type (3, rs
,
14865 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
14866 NEON_ENCODE (SCALAR
, inst
);
14867 neon_mul_mac (et
, neon_quad (rs
));
14871 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14872 struct neon_type_el et
= neon_check_type (3, rs
,
14873 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
14874 NEON_ENCODE (INTEGER
, inst
);
14875 /* The U bit (rounding) comes from bit mask. */
14876 neon_three_same (neon_quad (rs
), 0, et
.size
);
14881 do_neon_fcmp_absolute (void)
14883 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14884 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
14885 /* Size field comes from bit mask. */
14886 neon_three_same (neon_quad (rs
), 1, -1);
14890 do_neon_fcmp_absolute_inv (void)
14892 neon_exchange_operands ();
14893 do_neon_fcmp_absolute ();
14897 do_neon_step (void)
14899 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14900 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
14901 neon_three_same (neon_quad (rs
), 0, -1);
14905 do_neon_abs_neg (void)
14907 enum neon_shape rs
;
14908 struct neon_type_el et
;
14910 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
14913 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14916 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14917 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
14919 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14920 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14921 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14922 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14923 inst
.instruction
|= neon_quad (rs
) << 6;
14924 inst
.instruction
|= (et
.type
== NT_float
) << 10;
14925 inst
.instruction
|= neon_logbits (et
.size
) << 18;
14927 neon_dp_fixup (&inst
);
14933 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14934 struct neon_type_el et
= neon_check_type (2, rs
,
14935 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
14936 int imm
= inst
.operands
[2].imm
;
14937 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14938 _("immediate out of range for insert"));
14939 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14945 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14946 struct neon_type_el et
= neon_check_type (2, rs
,
14947 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
14948 int imm
= inst
.operands
[2].imm
;
14949 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
14950 _("immediate out of range for insert"));
14951 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
14955 do_neon_qshlu_imm (void)
14957 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14958 struct neon_type_el et
= neon_check_type (2, rs
,
14959 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
14960 int imm
= inst
.operands
[2].imm
;
14961 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14962 _("immediate out of range for shift"));
14963 /* Only encodes the 'U present' variant of the instruction.
14964 In this case, signed types have OP (bit 8) set to 0.
14965 Unsigned types have OP set to 1. */
14966 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
14967 /* The rest of the bits are the same as other immediate shifts. */
14968 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14972 do_neon_qmovn (void)
14974 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
14975 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
14976 /* Saturating move where operands can be signed or unsigned, and the
14977 destination has the same signedness. */
14978 NEON_ENCODE (INTEGER
, inst
);
14979 if (et
.type
== NT_unsigned
)
14980 inst
.instruction
|= 0xc0;
14982 inst
.instruction
|= 0x80;
14983 neon_two_same (0, 1, et
.size
/ 2);
14987 do_neon_qmovun (void)
14989 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
14990 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
14991 /* Saturating move with unsigned results. Operands must be signed. */
14992 NEON_ENCODE (INTEGER
, inst
);
14993 neon_two_same (0, 1, et
.size
/ 2);
14997 do_neon_rshift_sat_narrow (void)
14999 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15000 or unsigned. If operands are unsigned, results must also be unsigned. */
15001 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15002 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15003 int imm
= inst
.operands
[2].imm
;
15004 /* This gets the bounds check, size encoding and immediate bits calculation
15008 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15009 VQMOVN.I<size> <Dd>, <Qm>. */
15012 inst
.operands
[2].present
= 0;
15013 inst
.instruction
= N_MNEM_vqmovn
;
15018 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15019 _("immediate out of range"));
15020 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
15024 do_neon_rshift_sat_narrow_u (void)
15026 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15027 or unsigned. If operands are unsigned, results must also be unsigned. */
15028 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15029 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15030 int imm
= inst
.operands
[2].imm
;
15031 /* This gets the bounds check, size encoding and immediate bits calculation
15035 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15036 VQMOVUN.I<size> <Dd>, <Qm>. */
15039 inst
.operands
[2].present
= 0;
15040 inst
.instruction
= N_MNEM_vqmovun
;
15045 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15046 _("immediate out of range"));
15047 /* FIXME: The manual is kind of unclear about what value U should have in
15048 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15050 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
15054 do_neon_movn (void)
15056 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15057 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15058 NEON_ENCODE (INTEGER
, inst
);
15059 neon_two_same (0, 1, et
.size
/ 2);
15063 do_neon_rshift_narrow (void)
15065 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15066 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15067 int imm
= inst
.operands
[2].imm
;
15068 /* This gets the bounds check, size encoding and immediate bits calculation
15072 /* If immediate is zero then we are a pseudo-instruction for
15073 VMOVN.I<size> <Dd>, <Qm> */
15076 inst
.operands
[2].present
= 0;
15077 inst
.instruction
= N_MNEM_vmovn
;
15082 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15083 _("immediate out of range for narrowing operation"));
15084 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
15088 do_neon_shll (void)
15090 /* FIXME: Type checking when lengthening. */
15091 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
15092 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
15093 unsigned imm
= inst
.operands
[2].imm
;
15095 if (imm
== et
.size
)
15097 /* Maximum shift variant. */
15098 NEON_ENCODE (INTEGER
, inst
);
15099 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15100 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15101 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15102 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15103 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15105 neon_dp_fixup (&inst
);
15109 /* A more-specific type check for non-max versions. */
15110 et
= neon_check_type (2, NS_QDI
,
15111 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15112 NEON_ENCODE (IMMED
, inst
);
15113 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
15117 /* Check the various types for the VCVT instruction, and return which version
15118 the current instruction is. */
15120 #define CVT_FLAVOUR_VAR \
15121 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15122 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15123 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15124 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15125 /* Half-precision conversions. */ \
15126 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15127 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15128 /* VFP instructions. */ \
15129 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15130 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15131 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15132 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15133 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15134 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15135 /* VFP instructions with bitshift. */ \
15136 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15137 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15138 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15139 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15140 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15141 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15142 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15143 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15145 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15146 neon_cvt_flavour_##C,
15148 /* The different types of conversions we can do. */
15149 enum neon_cvt_flavour
15152 neon_cvt_flavour_invalid
,
15153 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
15158 static enum neon_cvt_flavour
15159 get_neon_cvt_flavour (enum neon_shape rs
)
15161 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15162 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15163 if (et.type != NT_invtype) \
15165 inst.error = NULL; \
15166 return (neon_cvt_flavour_##C); \
15169 struct neon_type_el et
;
15170 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
15171 || rs
== NS_FF
) ? N_VFP
: 0;
15172 /* The instruction versions which take an immediate take one register
15173 argument, which is extended to the width of the full register. Thus the
15174 "source" and "destination" registers must have the same width. Hack that
15175 here by making the size equal to the key (wider, in this case) operand. */
15176 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
15180 return neon_cvt_flavour_invalid
;
15195 /* Neon-syntax VFP conversions. */
15198 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
15200 const char *opname
= 0;
15202 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
)
15204 /* Conversions with immediate bitshift. */
15205 const char *enc
[] =
15207 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15213 if (flavour
< (int) ARRAY_SIZE (enc
))
15215 opname
= enc
[flavour
];
15216 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15217 _("operands 0 and 1 must be the same register"));
15218 inst
.operands
[1] = inst
.operands
[2];
15219 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
15224 /* Conversions without bitshift. */
15225 const char *enc
[] =
15227 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15233 if (flavour
< (int) ARRAY_SIZE (enc
))
15234 opname
= enc
[flavour
];
15238 do_vfp_nsyn_opcode (opname
);
15242 do_vfp_nsyn_cvtz (void)
15244 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_NULL
);
15245 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15246 const char *enc
[] =
15248 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15254 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
15255 do_vfp_nsyn_opcode (enc
[flavour
]);
15259 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
15260 enum neon_cvt_mode mode
)
15265 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15266 D register operands. */
15267 if (flavour
== neon_cvt_flavour_s32_f64
15268 || flavour
== neon_cvt_flavour_u32_f64
)
15269 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15272 set_it_insn_type (OUTSIDE_IT_INSN
);
15276 case neon_cvt_flavour_s32_f64
:
15280 case neon_cvt_flavour_s32_f32
:
15284 case neon_cvt_flavour_u32_f64
:
15288 case neon_cvt_flavour_u32_f32
:
15293 first_error (_("invalid instruction shape"));
15299 case neon_cvt_mode_a
: rm
= 0; break;
15300 case neon_cvt_mode_n
: rm
= 1; break;
15301 case neon_cvt_mode_p
: rm
= 2; break;
15302 case neon_cvt_mode_m
: rm
= 3; break;
15303 default: first_error (_("invalid rounding mode")); return;
15306 NEON_ENCODE (FPV8
, inst
);
15307 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
15308 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
15309 inst
.instruction
|= sz
<< 8;
15310 inst
.instruction
|= op
<< 7;
15311 inst
.instruction
|= rm
<< 16;
15312 inst
.instruction
|= 0xf0000000;
15313 inst
.is_neon
= TRUE
;
15317 do_neon_cvt_1 (enum neon_cvt_mode mode
)
15319 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
15320 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
, NS_NULL
);
15321 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15323 /* PR11109: Handle round-to-zero for VCVT conversions. */
15324 if (mode
== neon_cvt_mode_z
15325 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
15326 && (flavour
== neon_cvt_flavour_s32_f32
15327 || flavour
== neon_cvt_flavour_u32_f32
15328 || flavour
== neon_cvt_flavour_s32_f64
15329 || flavour
== neon_cvt_flavour_u32_f64
)
15330 && (rs
== NS_FD
|| rs
== NS_FF
))
15332 do_vfp_nsyn_cvtz ();
15336 /* VFP rather than Neon conversions. */
15337 if (flavour
>= neon_cvt_flavour_first_fp
)
15339 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15340 do_vfp_nsyn_cvt (rs
, flavour
);
15342 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15353 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15355 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15358 /* Fixed-point conversion with #0 immediate is encoded as an
15359 integer conversion. */
15360 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
15362 immbits
= 32 - inst
.operands
[2].imm
;
15363 NEON_ENCODE (IMMED
, inst
);
15364 if (flavour
!= neon_cvt_flavour_invalid
)
15365 inst
.instruction
|= enctab
[flavour
];
15366 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15367 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15368 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15369 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15370 inst
.instruction
|= neon_quad (rs
) << 6;
15371 inst
.instruction
|= 1 << 21;
15372 inst
.instruction
|= immbits
<< 16;
15374 neon_dp_fixup (&inst
);
15380 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
15382 NEON_ENCODE (FLOAT
, inst
);
15383 set_it_insn_type (OUTSIDE_IT_INSN
);
15385 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
15388 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15389 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15390 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15391 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15392 inst
.instruction
|= neon_quad (rs
) << 6;
15393 inst
.instruction
|= (flavour
== neon_cvt_flavour_u32_f32
) << 7;
15394 inst
.instruction
|= mode
<< 8;
15396 inst
.instruction
|= 0xfc000000;
15398 inst
.instruction
|= 0xf0000000;
15404 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
15406 NEON_ENCODE (INTEGER
, inst
);
15408 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15411 if (flavour
!= neon_cvt_flavour_invalid
)
15412 inst
.instruction
|= enctab
[flavour
];
15414 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15415 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15416 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15417 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15418 inst
.instruction
|= neon_quad (rs
) << 6;
15419 inst
.instruction
|= 2 << 18;
15421 neon_dp_fixup (&inst
);
15426 /* Half-precision conversions for Advanced SIMD -- neon. */
15431 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
15433 as_bad (_("operand size must match register width"));
15438 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
15440 as_bad (_("operand size must match register width"));
15445 inst
.instruction
= 0x3b60600;
15447 inst
.instruction
= 0x3b60700;
15449 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15450 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15451 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15452 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15453 neon_dp_fixup (&inst
);
15457 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15458 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15459 do_vfp_nsyn_cvt (rs
, flavour
);
15461 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15466 do_neon_cvtr (void)
15468 do_neon_cvt_1 (neon_cvt_mode_x
);
15474 do_neon_cvt_1 (neon_cvt_mode_z
);
15478 do_neon_cvta (void)
15480 do_neon_cvt_1 (neon_cvt_mode_a
);
15484 do_neon_cvtn (void)
15486 do_neon_cvt_1 (neon_cvt_mode_n
);
15490 do_neon_cvtp (void)
15492 do_neon_cvt_1 (neon_cvt_mode_p
);
15496 do_neon_cvtm (void)
15498 do_neon_cvt_1 (neon_cvt_mode_m
);
15502 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
15505 mark_feature_used (&fpu_vfp_ext_armv8
);
15507 encode_arm_vfp_reg (inst
.operands
[0].reg
,
15508 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
15509 encode_arm_vfp_reg (inst
.operands
[1].reg
,
15510 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
15511 inst
.instruction
|= to
? 0x10000 : 0;
15512 inst
.instruction
|= t
? 0x80 : 0;
15513 inst
.instruction
|= is_double
? 0x100 : 0;
15514 do_vfp_cond_or_thumb ();
15518 do_neon_cvttb_1 (bfd_boolean t
)
15520 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_DF
, NS_NULL
);
15524 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
15527 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
15529 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
15532 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
15534 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
15536 /* The VCVTB and VCVTT instructions with D-register operands
15537 don't work for SP only targets. */
15538 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15542 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
15544 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
15546 /* The VCVTB and VCVTT instructions with D-register operands
15547 don't work for SP only targets. */
15548 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15552 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
15559 do_neon_cvtb (void)
15561 do_neon_cvttb_1 (FALSE
);
15566 do_neon_cvtt (void)
15568 do_neon_cvttb_1 (TRUE
);
15572 neon_move_immediate (void)
15574 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
15575 struct neon_type_el et
= neon_check_type (2, rs
,
15576 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15577 unsigned immlo
, immhi
= 0, immbits
;
15578 int op
, cmode
, float_p
;
15580 constraint (et
.type
== NT_invtype
,
15581 _("operand size must be specified for immediate VMOV"));
15583 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15584 op
= (inst
.instruction
& (1 << 5)) != 0;
15586 immlo
= inst
.operands
[1].imm
;
15587 if (inst
.operands
[1].regisimm
)
15588 immhi
= inst
.operands
[1].reg
;
15590 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
15591 _("immediate has bits set outside the operand size"));
15593 float_p
= inst
.operands
[1].immisfloat
;
15595 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
15596 et
.size
, et
.type
)) == FAIL
)
15598 /* Invert relevant bits only. */
15599 neon_invert_size (&immlo
, &immhi
, et
.size
);
15600 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15601 with one or the other; those cases are caught by
15602 neon_cmode_for_move_imm. */
15604 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
15605 &op
, et
.size
, et
.type
)) == FAIL
)
15607 first_error (_("immediate out of range"));
15612 inst
.instruction
&= ~(1 << 5);
15613 inst
.instruction
|= op
<< 5;
15615 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15616 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15617 inst
.instruction
|= neon_quad (rs
) << 6;
15618 inst
.instruction
|= cmode
<< 8;
15620 neon_write_immbits (immbits
);
15626 if (inst
.operands
[1].isreg
)
15628 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15630 NEON_ENCODE (INTEGER
, inst
);
15631 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15632 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15633 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15634 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15635 inst
.instruction
|= neon_quad (rs
) << 6;
15639 NEON_ENCODE (IMMED
, inst
);
15640 neon_move_immediate ();
15643 neon_dp_fixup (&inst
);
15646 /* Encode instructions of form:
15648 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15649 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15652 neon_mixed_length (struct neon_type_el et
, unsigned size
)
15654 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15655 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15656 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15657 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15658 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15659 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15660 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
15661 inst
.instruction
|= neon_logbits (size
) << 20;
15663 neon_dp_fixup (&inst
);
15667 do_neon_dyadic_long (void)
15669 /* FIXME: Type checking for lengthening op. */
15670 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15671 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
15672 neon_mixed_length (et
, et
.size
);
15676 do_neon_abal (void)
15678 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15679 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
15680 neon_mixed_length (et
, et
.size
);
15684 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
15686 if (inst
.operands
[2].isscalar
)
15688 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
15689 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
15690 NEON_ENCODE (SCALAR
, inst
);
15691 neon_mul_mac (et
, et
.type
== NT_unsigned
);
15695 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15696 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
15697 NEON_ENCODE (INTEGER
, inst
);
15698 neon_mixed_length (et
, et
.size
);
15703 do_neon_mac_maybe_scalar_long (void)
15705 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
15709 do_neon_dyadic_wide (void)
15711 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
15712 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15713 neon_mixed_length (et
, et
.size
);
15717 do_neon_dyadic_narrow (void)
15719 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15720 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
15721 /* Operand sign is unimportant, and the U bit is part of the opcode,
15722 so force the operand type to integer. */
15723 et
.type
= NT_integer
;
15724 neon_mixed_length (et
, et
.size
/ 2);
15728 do_neon_mul_sat_scalar_long (void)
15730 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
15734 do_neon_vmull (void)
15736 if (inst
.operands
[2].isscalar
)
15737 do_neon_mac_maybe_scalar_long ();
15740 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15741 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
15743 if (et
.type
== NT_poly
)
15744 NEON_ENCODE (POLY
, inst
);
15746 NEON_ENCODE (INTEGER
, inst
);
15748 /* For polynomial encoding the U bit must be zero, and the size must
15749 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15750 obviously, as 0b10). */
15753 /* Check we're on the correct architecture. */
15754 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
15756 _("Instruction form not available on this architecture.");
15761 neon_mixed_length (et
, et
.size
);
15768 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
15769 struct neon_type_el et
= neon_check_type (3, rs
,
15770 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15771 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
15773 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
15774 _("shift out of range"));
15775 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15776 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15777 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15778 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15779 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15780 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15781 inst
.instruction
|= neon_quad (rs
) << 6;
15782 inst
.instruction
|= imm
<< 8;
15784 neon_dp_fixup (&inst
);
15790 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15791 struct neon_type_el et
= neon_check_type (2, rs
,
15792 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15793 unsigned op
= (inst
.instruction
>> 7) & 3;
15794 /* N (width of reversed regions) is encoded as part of the bitmask. We
15795 extract it here to check the elements to be reversed are smaller.
15796 Otherwise we'd get a reserved instruction. */
15797 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
15798 gas_assert (elsize
!= 0);
15799 constraint (et
.size
>= elsize
,
15800 _("elements must be smaller than reversal region"));
15801 neon_two_same (neon_quad (rs
), 1, et
.size
);
15807 if (inst
.operands
[1].isscalar
)
15809 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
15810 struct neon_type_el et
= neon_check_type (2, rs
,
15811 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15812 unsigned sizebits
= et
.size
>> 3;
15813 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
15814 int logsize
= neon_logbits (et
.size
);
15815 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
15817 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
15820 NEON_ENCODE (SCALAR
, inst
);
15821 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15822 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15823 inst
.instruction
|= LOW4 (dm
);
15824 inst
.instruction
|= HI1 (dm
) << 5;
15825 inst
.instruction
|= neon_quad (rs
) << 6;
15826 inst
.instruction
|= x
<< 17;
15827 inst
.instruction
|= sizebits
<< 16;
15829 neon_dp_fixup (&inst
);
15833 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
15834 struct neon_type_el et
= neon_check_type (2, rs
,
15835 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
15836 /* Duplicate ARM register to lanes of vector. */
15837 NEON_ENCODE (ARMREG
, inst
);
15840 case 8: inst
.instruction
|= 0x400000; break;
15841 case 16: inst
.instruction
|= 0x000020; break;
15842 case 32: inst
.instruction
|= 0x000000; break;
15845 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
15846 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
15847 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
15848 inst
.instruction
|= neon_quad (rs
) << 21;
15849 /* The encoding for this instruction is identical for the ARM and Thumb
15850 variants, except for the condition field. */
15851 do_vfp_cond_or_thumb ();
15855 /* VMOV has particularly many variations. It can be one of:
15856 0. VMOV<c><q> <Qd>, <Qm>
15857 1. VMOV<c><q> <Dd>, <Dm>
15858 (Register operations, which are VORR with Rm = Rn.)
15859 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15860 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15862 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15863 (ARM register to scalar.)
15864 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15865 (Two ARM registers to vector.)
15866 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15867 (Scalar to ARM register.)
15868 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15869 (Vector to two ARM registers.)
15870 8. VMOV.F32 <Sd>, <Sm>
15871 9. VMOV.F64 <Dd>, <Dm>
15872 (VFP register moves.)
15873 10. VMOV.F32 <Sd>, #imm
15874 11. VMOV.F64 <Dd>, #imm
15875 (VFP float immediate load.)
15876 12. VMOV <Rd>, <Sm>
15877 (VFP single to ARM reg.)
15878 13. VMOV <Sd>, <Rm>
15879 (ARM reg to VFP single.)
15880 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15881 (Two ARM regs to two VFP singles.)
15882 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15883 (Two VFP singles to two ARM regs.)
15885 These cases can be disambiguated using neon_select_shape, except cases 1/9
15886 and 3/11 which depend on the operand type too.
15888 All the encoded bits are hardcoded by this function.
15890 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15891 Cases 5, 7 may be used with VFPv2 and above.
15893 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15894 can specify a type where it doesn't make sense to, and is ignored). */
15899 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
15900 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
15902 struct neon_type_el et
;
15903 const char *ldconst
= 0;
15907 case NS_DD
: /* case 1/9. */
15908 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
15909 /* It is not an error here if no type is given. */
15911 if (et
.type
== NT_float
&& et
.size
== 64)
15913 do_vfp_nsyn_opcode ("fcpyd");
15916 /* fall through. */
15918 case NS_QQ
: /* case 0/1. */
15920 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15922 /* The architecture manual I have doesn't explicitly state which
15923 value the U bit should have for register->register moves, but
15924 the equivalent VORR instruction has U = 0, so do that. */
15925 inst
.instruction
= 0x0200110;
15926 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15927 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15928 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15929 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15930 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15931 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15932 inst
.instruction
|= neon_quad (rs
) << 6;
15934 neon_dp_fixup (&inst
);
15938 case NS_DI
: /* case 3/11. */
15939 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
15941 if (et
.type
== NT_float
&& et
.size
== 64)
15943 /* case 11 (fconstd). */
15944 ldconst
= "fconstd";
15945 goto encode_fconstd
;
15947 /* fall through. */
15949 case NS_QI
: /* case 2/3. */
15950 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15952 inst
.instruction
= 0x0800010;
15953 neon_move_immediate ();
15954 neon_dp_fixup (&inst
);
15957 case NS_SR
: /* case 4. */
15959 unsigned bcdebits
= 0;
15961 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
15962 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
15964 /* .<size> is optional here, defaulting to .32. */
15965 if (inst
.vectype
.elems
== 0
15966 && inst
.operands
[0].vectype
.type
== NT_invtype
15967 && inst
.operands
[1].vectype
.type
== NT_invtype
)
15969 inst
.vectype
.el
[0].type
= NT_untyped
;
15970 inst
.vectype
.el
[0].size
= 32;
15971 inst
.vectype
.elems
= 1;
15974 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
15975 logsize
= neon_logbits (et
.size
);
15977 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
15979 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
15980 && et
.size
!= 32, _(BAD_FPU
));
15981 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
15982 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
15986 case 8: bcdebits
= 0x8; break;
15987 case 16: bcdebits
= 0x1; break;
15988 case 32: bcdebits
= 0x0; break;
15992 bcdebits
|= x
<< logsize
;
15994 inst
.instruction
= 0xe000b10;
15995 do_vfp_cond_or_thumb ();
15996 inst
.instruction
|= LOW4 (dn
) << 16;
15997 inst
.instruction
|= HI1 (dn
) << 7;
15998 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
15999 inst
.instruction
|= (bcdebits
& 3) << 5;
16000 inst
.instruction
|= (bcdebits
>> 2) << 21;
16004 case NS_DRR
: /* case 5 (fmdrr). */
16005 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16008 inst
.instruction
= 0xc400b10;
16009 do_vfp_cond_or_thumb ();
16010 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
16011 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
16012 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16013 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
16016 case NS_RS
: /* case 6. */
16019 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16020 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
16021 unsigned abcdebits
= 0;
16023 /* .<dt> is optional here, defaulting to .32. */
16024 if (inst
.vectype
.elems
== 0
16025 && inst
.operands
[0].vectype
.type
== NT_invtype
16026 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16028 inst
.vectype
.el
[0].type
= NT_untyped
;
16029 inst
.vectype
.el
[0].size
= 32;
16030 inst
.vectype
.elems
= 1;
16033 et
= neon_check_type (2, NS_NULL
,
16034 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
16035 logsize
= neon_logbits (et
.size
);
16037 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16039 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16040 && et
.size
!= 32, _(BAD_FPU
));
16041 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16042 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16046 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
16047 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
16048 case 32: abcdebits
= 0x00; break;
16052 abcdebits
|= x
<< logsize
;
16053 inst
.instruction
= 0xe100b10;
16054 do_vfp_cond_or_thumb ();
16055 inst
.instruction
|= LOW4 (dn
) << 16;
16056 inst
.instruction
|= HI1 (dn
) << 7;
16057 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16058 inst
.instruction
|= (abcdebits
& 3) << 5;
16059 inst
.instruction
|= (abcdebits
>> 2) << 21;
16063 case NS_RRD
: /* case 7 (fmrrd). */
16064 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16067 inst
.instruction
= 0xc500b10;
16068 do_vfp_cond_or_thumb ();
16069 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16070 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16071 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16072 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16075 case NS_FF
: /* case 8 (fcpys). */
16076 do_vfp_nsyn_opcode ("fcpys");
16079 case NS_FI
: /* case 10 (fconsts). */
16080 ldconst
= "fconsts";
16082 if (is_quarter_float (inst
.operands
[1].imm
))
16084 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
16085 do_vfp_nsyn_opcode (ldconst
);
16088 first_error (_("immediate out of range"));
16091 case NS_RF
: /* case 12 (fmrs). */
16092 do_vfp_nsyn_opcode ("fmrs");
16095 case NS_FR
: /* case 13 (fmsr). */
16096 do_vfp_nsyn_opcode ("fmsr");
16099 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16100 (one of which is a list), but we have parsed four. Do some fiddling to
16101 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16103 case NS_RRFF
: /* case 14 (fmrrs). */
16104 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
16105 _("VFP registers must be adjacent"));
16106 inst
.operands
[2].imm
= 2;
16107 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16108 do_vfp_nsyn_opcode ("fmrrs");
16111 case NS_FFRR
: /* case 15 (fmsrr). */
16112 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
16113 _("VFP registers must be adjacent"));
16114 inst
.operands
[1] = inst
.operands
[2];
16115 inst
.operands
[2] = inst
.operands
[3];
16116 inst
.operands
[0].imm
= 2;
16117 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16118 do_vfp_nsyn_opcode ("fmsrr");
16122 /* neon_select_shape has determined that the instruction
16123 shape is wrong and has already set the error message. */
16132 do_neon_rshift_round_imm (void)
16134 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16135 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
16136 int imm
= inst
.operands
[2].imm
;
16138 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16141 inst
.operands
[2].present
= 0;
16146 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16147 _("immediate out of range for shift"));
16148 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
16153 do_neon_movl (void)
16155 struct neon_type_el et
= neon_check_type (2, NS_QD
,
16156 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16157 unsigned sizebits
= et
.size
>> 3;
16158 inst
.instruction
|= sizebits
<< 19;
16159 neon_two_same (0, et
.type
== NT_unsigned
, -1);
16165 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16166 struct neon_type_el et
= neon_check_type (2, rs
,
16167 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16168 NEON_ENCODE (INTEGER
, inst
);
16169 neon_two_same (neon_quad (rs
), 1, et
.size
);
16173 do_neon_zip_uzp (void)
16175 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16176 struct neon_type_el et
= neon_check_type (2, rs
,
16177 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16178 if (rs
== NS_DD
&& et
.size
== 32)
16180 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16181 inst
.instruction
= N_MNEM_vtrn
;
16185 neon_two_same (neon_quad (rs
), 1, et
.size
);
16189 do_neon_sat_abs_neg (void)
16191 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16192 struct neon_type_el et
= neon_check_type (2, rs
,
16193 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16194 neon_two_same (neon_quad (rs
), 1, et
.size
);
16198 do_neon_pair_long (void)
16200 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16201 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
16202 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16203 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
16204 neon_two_same (neon_quad (rs
), 1, et
.size
);
16208 do_neon_recip_est (void)
16210 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16211 struct neon_type_el et
= neon_check_type (2, rs
,
16212 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
16213 inst
.instruction
|= (et
.type
== NT_float
) << 8;
16214 neon_two_same (neon_quad (rs
), 1, et
.size
);
16220 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16221 struct neon_type_el et
= neon_check_type (2, rs
,
16222 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16223 neon_two_same (neon_quad (rs
), 1, et
.size
);
16229 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16230 struct neon_type_el et
= neon_check_type (2, rs
,
16231 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
16232 neon_two_same (neon_quad (rs
), 1, et
.size
);
16238 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16239 struct neon_type_el et
= neon_check_type (2, rs
,
16240 N_EQK
| N_INT
, N_8
| N_KEY
);
16241 neon_two_same (neon_quad (rs
), 1, et
.size
);
16247 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16248 neon_two_same (neon_quad (rs
), 1, -1);
16252 do_neon_tbl_tbx (void)
16254 unsigned listlenbits
;
16255 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
16257 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
16259 first_error (_("bad list length for table lookup"));
16263 listlenbits
= inst
.operands
[1].imm
- 1;
16264 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16265 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16266 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16267 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16268 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16269 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16270 inst
.instruction
|= listlenbits
<< 8;
16272 neon_dp_fixup (&inst
);
16276 do_neon_ldm_stm (void)
16278 /* P, U and L bits are part of bitmask. */
16279 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
16280 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
16282 if (inst
.operands
[1].issingle
)
16284 do_vfp_nsyn_ldm_stm (is_dbmode
);
16288 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
16289 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16291 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
16292 _("register list must contain at least 1 and at most 16 "
16295 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
16296 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
16297 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16298 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
16300 inst
.instruction
|= offsetbits
;
16302 do_vfp_cond_or_thumb ();
16306 do_neon_ldr_str (void)
16308 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
16310 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16311 And is UNPREDICTABLE in thumb mode. */
16313 && inst
.operands
[1].reg
== REG_PC
16314 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
16317 inst
.error
= _("Use of PC here is UNPREDICTABLE");
16318 else if (warn_on_deprecated
)
16319 as_tsktsk (_("Use of PC here is deprecated"));
16322 if (inst
.operands
[0].issingle
)
16325 do_vfp_nsyn_opcode ("flds");
16327 do_vfp_nsyn_opcode ("fsts");
16332 do_vfp_nsyn_opcode ("fldd");
16334 do_vfp_nsyn_opcode ("fstd");
16338 /* "interleave" version also handles non-interleaving register VLD1/VST1
16342 do_neon_ld_st_interleave (void)
16344 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
16345 N_8
| N_16
| N_32
| N_64
);
16346 unsigned alignbits
= 0;
16348 /* The bits in this table go:
16349 0: register stride of one (0) or two (1)
16350 1,2: register list length, minus one (1, 2, 3, 4).
16351 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16352 We use -1 for invalid entries. */
16353 const int typetable
[] =
16355 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16356 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16357 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16358 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16362 if (et
.type
== NT_invtype
)
16365 if (inst
.operands
[1].immisalign
)
16366 switch (inst
.operands
[1].imm
>> 8)
16368 case 64: alignbits
= 1; break;
16370 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
16371 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16372 goto bad_alignment
;
16376 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16377 goto bad_alignment
;
16382 first_error (_("bad alignment"));
16386 inst
.instruction
|= alignbits
<< 4;
16387 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16389 /* Bits [4:6] of the immediate in a list specifier encode register stride
16390 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16391 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16392 up the right value for "type" in a table based on this value and the given
16393 list style, then stick it back. */
16394 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
16395 | (((inst
.instruction
>> 8) & 3) << 3);
16397 typebits
= typetable
[idx
];
16399 constraint (typebits
== -1, _("bad list type for instruction"));
16400 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
16401 _("bad element type for instruction"));
16403 inst
.instruction
&= ~0xf00;
16404 inst
.instruction
|= typebits
<< 8;
16407 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16408 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16409 otherwise. The variable arguments are a list of pairs of legal (size, align)
16410 values, terminated with -1. */
16413 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
16416 int result
= FAIL
, thissize
, thisalign
;
16418 if (!inst
.operands
[1].immisalign
)
16424 va_start (ap
, do_align
);
16428 thissize
= va_arg (ap
, int);
16429 if (thissize
== -1)
16431 thisalign
= va_arg (ap
, int);
16433 if (size
== thissize
&& align
== thisalign
)
16436 while (result
!= SUCCESS
);
16440 if (result
== SUCCESS
)
16443 first_error (_("unsupported alignment for instruction"));
16449 do_neon_ld_st_lane (void)
16451 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16452 int align_good
, do_align
= 0;
16453 int logsize
= neon_logbits (et
.size
);
16454 int align
= inst
.operands
[1].imm
>> 8;
16455 int n
= (inst
.instruction
>> 8) & 3;
16456 int max_el
= 64 / et
.size
;
16458 if (et
.type
== NT_invtype
)
16461 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
16462 _("bad list length"));
16463 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
16464 _("scalar index out of range"));
16465 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
16467 _("stride of 2 unavailable when element size is 8"));
16471 case 0: /* VLD1 / VST1. */
16472 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
16474 if (align_good
== FAIL
)
16478 unsigned alignbits
= 0;
16481 case 16: alignbits
= 0x1; break;
16482 case 32: alignbits
= 0x3; break;
16485 inst
.instruction
|= alignbits
<< 4;
16489 case 1: /* VLD2 / VST2. */
16490 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
16492 if (align_good
== FAIL
)
16495 inst
.instruction
|= 1 << 4;
16498 case 2: /* VLD3 / VST3. */
16499 constraint (inst
.operands
[1].immisalign
,
16500 _("can't use alignment with this instruction"));
16503 case 3: /* VLD4 / VST4. */
16504 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
16505 16, 64, 32, 64, 32, 128, -1);
16506 if (align_good
== FAIL
)
16510 unsigned alignbits
= 0;
16513 case 8: alignbits
= 0x1; break;
16514 case 16: alignbits
= 0x1; break;
16515 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
16518 inst
.instruction
|= alignbits
<< 4;
16525 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16526 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16527 inst
.instruction
|= 1 << (4 + logsize
);
16529 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
16530 inst
.instruction
|= logsize
<< 10;
16533 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16536 do_neon_ld_dup (void)
16538 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16539 int align_good
, do_align
= 0;
16541 if (et
.type
== NT_invtype
)
16544 switch ((inst
.instruction
>> 8) & 3)
16546 case 0: /* VLD1. */
16547 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
16548 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16549 &do_align
, 16, 16, 32, 32, -1);
16550 if (align_good
== FAIL
)
16552 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
16555 case 2: inst
.instruction
|= 1 << 5; break;
16556 default: first_error (_("bad list length")); return;
16558 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16561 case 1: /* VLD2. */
16562 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16563 &do_align
, 8, 16, 16, 32, 32, 64, -1);
16564 if (align_good
== FAIL
)
16566 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
16567 _("bad list length"));
16568 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16569 inst
.instruction
|= 1 << 5;
16570 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16573 case 2: /* VLD3. */
16574 constraint (inst
.operands
[1].immisalign
,
16575 _("can't use alignment with this instruction"));
16576 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
16577 _("bad list length"));
16578 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16579 inst
.instruction
|= 1 << 5;
16580 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16583 case 3: /* VLD4. */
16585 int align
= inst
.operands
[1].imm
>> 8;
16586 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
16587 16, 64, 32, 64, 32, 128, -1);
16588 if (align_good
== FAIL
)
16590 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
16591 _("bad list length"));
16592 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16593 inst
.instruction
|= 1 << 5;
16594 if (et
.size
== 32 && align
== 128)
16595 inst
.instruction
|= 0x3 << 6;
16597 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16604 inst
.instruction
|= do_align
<< 4;
16607 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16608 apart from bits [11:4]. */
16611 do_neon_ldx_stx (void)
16613 if (inst
.operands
[1].isreg
)
16614 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
16616 switch (NEON_LANE (inst
.operands
[0].imm
))
16618 case NEON_INTERLEAVE_LANES
:
16619 NEON_ENCODE (INTERLV
, inst
);
16620 do_neon_ld_st_interleave ();
16623 case NEON_ALL_LANES
:
16624 NEON_ENCODE (DUP
, inst
);
16625 if (inst
.instruction
== N_INV
)
16627 first_error ("only loads support such operands");
16634 NEON_ENCODE (LANE
, inst
);
16635 do_neon_ld_st_lane ();
16638 /* L bit comes from bit mask. */
16639 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16640 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16641 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16643 if (inst
.operands
[1].postind
)
16645 int postreg
= inst
.operands
[1].imm
& 0xf;
16646 constraint (!inst
.operands
[1].immisreg
,
16647 _("post-index must be a register"));
16648 constraint (postreg
== 0xd || postreg
== 0xf,
16649 _("bad register for post-index"));
16650 inst
.instruction
|= postreg
;
16654 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
16655 constraint (inst
.reloc
.exp
.X_op
!= O_constant
16656 || inst
.reloc
.exp
.X_add_number
!= 0,
16659 if (inst
.operands
[1].writeback
)
16661 inst
.instruction
|= 0xd;
16664 inst
.instruction
|= 0xf;
16668 inst
.instruction
|= 0xf9000000;
16670 inst
.instruction
|= 0xf4000000;
16675 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
16677 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16678 D register operands. */
16679 if (neon_shape_class
[rs
] == SC_DOUBLE
)
16680 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16683 NEON_ENCODE (FPV8
, inst
);
16686 do_vfp_sp_dyadic ();
16688 do_vfp_dp_rd_rn_rm ();
16691 inst
.instruction
|= 0x100;
16693 inst
.instruction
|= 0xf0000000;
16699 set_it_insn_type (OUTSIDE_IT_INSN
);
16701 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
16702 first_error (_("invalid instruction shape"));
16708 set_it_insn_type (OUTSIDE_IT_INSN
);
16710 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
16713 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16716 neon_dyadic_misc (NT_untyped
, N_F32
, 0);
16720 do_vrint_1 (enum neon_cvt_mode mode
)
16722 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
16723 struct neon_type_el et
;
16728 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16729 D register operands. */
16730 if (neon_shape_class
[rs
] == SC_DOUBLE
)
16731 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16734 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
16735 if (et
.type
!= NT_invtype
)
16737 /* VFP encodings. */
16738 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
16739 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
16740 set_it_insn_type (OUTSIDE_IT_INSN
);
16742 NEON_ENCODE (FPV8
, inst
);
16744 do_vfp_sp_monadic ();
16746 do_vfp_dp_rd_rm ();
16750 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
16751 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
16752 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
16753 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
16754 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
16755 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
16756 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
16760 inst
.instruction
|= (rs
== NS_DD
) << 8;
16761 do_vfp_cond_or_thumb ();
16765 /* Neon encodings (or something broken...). */
16767 et
= neon_check_type (2, rs
, N_EQK
, N_F32
| N_KEY
);
16769 if (et
.type
== NT_invtype
)
16772 set_it_insn_type (OUTSIDE_IT_INSN
);
16773 NEON_ENCODE (FLOAT
, inst
);
16775 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16778 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16779 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16780 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16781 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16782 inst
.instruction
|= neon_quad (rs
) << 6;
16785 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
16786 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
16787 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
16788 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
16789 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
16790 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
16791 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
16796 inst
.instruction
|= 0xfc000000;
16798 inst
.instruction
|= 0xf0000000;
16805 do_vrint_1 (neon_cvt_mode_x
);
16811 do_vrint_1 (neon_cvt_mode_z
);
16817 do_vrint_1 (neon_cvt_mode_r
);
16823 do_vrint_1 (neon_cvt_mode_a
);
16829 do_vrint_1 (neon_cvt_mode_n
);
16835 do_vrint_1 (neon_cvt_mode_p
);
16841 do_vrint_1 (neon_cvt_mode_m
);
16844 /* Crypto v1 instructions. */
16846 do_crypto_2op_1 (unsigned elttype
, int op
)
16848 set_it_insn_type (OUTSIDE_IT_INSN
);
16850 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
16856 NEON_ENCODE (INTEGER
, inst
);
16857 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16858 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16859 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16860 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16862 inst
.instruction
|= op
<< 6;
16865 inst
.instruction
|= 0xfc000000;
16867 inst
.instruction
|= 0xf0000000;
16871 do_crypto_3op_1 (int u
, int op
)
16873 set_it_insn_type (OUTSIDE_IT_INSN
);
16875 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
16876 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
16881 NEON_ENCODE (INTEGER
, inst
);
16882 neon_three_same (1, u
, 8 << op
);
16888 do_crypto_2op_1 (N_8
, 0);
16894 do_crypto_2op_1 (N_8
, 1);
16900 do_crypto_2op_1 (N_8
, 2);
16906 do_crypto_2op_1 (N_8
, 3);
16912 do_crypto_3op_1 (0, 0);
16918 do_crypto_3op_1 (0, 1);
16924 do_crypto_3op_1 (0, 2);
16930 do_crypto_3op_1 (0, 3);
16936 do_crypto_3op_1 (1, 0);
16942 do_crypto_3op_1 (1, 1);
16946 do_sha256su1 (void)
16948 do_crypto_3op_1 (1, 2);
16954 do_crypto_2op_1 (N_32
, -1);
16960 do_crypto_2op_1 (N_32
, 0);
16964 do_sha256su0 (void)
16966 do_crypto_2op_1 (N_32
, 1);
16970 do_crc32_1 (unsigned int poly
, unsigned int sz
)
16972 unsigned int Rd
= inst
.operands
[0].reg
;
16973 unsigned int Rn
= inst
.operands
[1].reg
;
16974 unsigned int Rm
= inst
.operands
[2].reg
;
16976 set_it_insn_type (OUTSIDE_IT_INSN
);
16977 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
16978 inst
.instruction
|= LOW4 (Rn
) << 16;
16979 inst
.instruction
|= LOW4 (Rm
);
16980 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
16981 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
16983 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
16984 as_warn (UNPRED_REG ("r15"));
16985 if (thumb_mode
&& (Rd
== REG_SP
|| Rn
== REG_SP
|| Rm
== REG_SP
))
16986 as_warn (UNPRED_REG ("r13"));
17026 /* Overall per-instruction processing. */
17028 /* We need to be able to fix up arbitrary expressions in some statements.
17029 This is so that we can handle symbols that are an arbitrary distance from
17030 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17031 which returns part of an address in a form which will be valid for
17032 a data instruction. We do this by pushing the expression into a symbol
17033 in the expr_section, and creating a fix for that. */
17036 fix_new_arm (fragS
* frag
,
17050 /* Create an absolute valued symbol, so we have something to
17051 refer to in the object file. Unfortunately for us, gas's
17052 generic expression parsing will already have folded out
17053 any use of .set foo/.type foo %function that may have
17054 been used to set type information of the target location,
17055 that's being specified symbolically. We have to presume
17056 the user knows what they are doing. */
17060 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
17062 symbol
= symbol_find_or_make (name
);
17063 S_SET_SEGMENT (symbol
, absolute_section
);
17064 symbol_set_frag (symbol
, &zero_address_frag
);
17065 S_SET_VALUE (symbol
, exp
->X_add_number
);
17066 exp
->X_op
= O_symbol
;
17067 exp
->X_add_symbol
= symbol
;
17068 exp
->X_add_number
= 0;
17074 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
17075 (enum bfd_reloc_code_real
) reloc
);
17079 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
17080 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
17084 /* Mark whether the fix is to a THUMB instruction, or an ARM
17086 new_fix
->tc_fix_data
= thumb_mode
;
17089 /* Create a frg for an instruction requiring relaxation. */
17091 output_relax_insn (void)
17097 /* The size of the instruction is unknown, so tie the debug info to the
17098 start of the instruction. */
17099 dwarf2_emit_insn (0);
17101 switch (inst
.reloc
.exp
.X_op
)
17104 sym
= inst
.reloc
.exp
.X_add_symbol
;
17105 offset
= inst
.reloc
.exp
.X_add_number
;
17109 offset
= inst
.reloc
.exp
.X_add_number
;
17112 sym
= make_expr_symbol (&inst
.reloc
.exp
);
17116 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
17117 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
17118 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
17121 /* Write a 32-bit thumb instruction to buf. */
17123 put_thumb32_insn (char * buf
, unsigned long insn
)
17125 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
17126 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
17130 output_inst (const char * str
)
17136 as_bad ("%s -- `%s'", inst
.error
, str
);
17141 output_relax_insn ();
17144 if (inst
.size
== 0)
17147 to
= frag_more (inst
.size
);
17148 /* PR 9814: Record the thumb mode into the current frag so that we know
17149 what type of NOP padding to use, if necessary. We override any previous
17150 setting so that if the mode has changed then the NOPS that we use will
17151 match the encoding of the last instruction in the frag. */
17152 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
17154 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
17156 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
17157 put_thumb32_insn (to
, inst
.instruction
);
17159 else if (inst
.size
> INSN_SIZE
)
17161 gas_assert (inst
.size
== (2 * INSN_SIZE
));
17162 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
17163 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
17166 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
17168 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
17169 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
17170 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
17173 dwarf2_emit_insn (inst
.size
);
17177 output_it_inst (int cond
, int mask
, char * to
)
17179 unsigned long instruction
= 0xbf00;
17182 instruction
|= mask
;
17183 instruction
|= cond
<< 4;
17187 to
= frag_more (2);
17189 dwarf2_emit_insn (2);
17193 md_number_to_chars (to
, instruction
, 2);
17198 /* Tag values used in struct asm_opcode's tag field. */
17201 OT_unconditional
, /* Instruction cannot be conditionalized.
17202 The ARM condition field is still 0xE. */
17203 OT_unconditionalF
, /* Instruction cannot be conditionalized
17204 and carries 0xF in its ARM condition field. */
17205 OT_csuffix
, /* Instruction takes a conditional suffix. */
17206 OT_csuffixF
, /* Some forms of the instruction take a conditional
17207 suffix, others place 0xF where the condition field
17209 OT_cinfix3
, /* Instruction takes a conditional infix,
17210 beginning at character index 3. (In
17211 unified mode, it becomes a suffix.) */
17212 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
17213 tsts, cmps, cmns, and teqs. */
17214 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
17215 character index 3, even in unified mode. Used for
17216 legacy instructions where suffix and infix forms
17217 may be ambiguous. */
17218 OT_csuf_or_in3
, /* Instruction takes either a conditional
17219 suffix or an infix at character index 3. */
17220 OT_odd_infix_unc
, /* This is the unconditional variant of an
17221 instruction that takes a conditional infix
17222 at an unusual position. In unified mode,
17223 this variant will accept a suffix. */
17224 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
17225 are the conditional variants of instructions that
17226 take conditional infixes in unusual positions.
17227 The infix appears at character index
17228 (tag - OT_odd_infix_0). These are not accepted
17229 in unified mode. */
17232 /* Subroutine of md_assemble, responsible for looking up the primary
17233 opcode from the mnemonic the user wrote. STR points to the
17234 beginning of the mnemonic.
17236 This is not simply a hash table lookup, because of conditional
17237 variants. Most instructions have conditional variants, which are
17238 expressed with a _conditional affix_ to the mnemonic. If we were
17239 to encode each conditional variant as a literal string in the opcode
17240 table, it would have approximately 20,000 entries.
17242 Most mnemonics take this affix as a suffix, and in unified syntax,
17243 'most' is upgraded to 'all'. However, in the divided syntax, some
17244 instructions take the affix as an infix, notably the s-variants of
17245 the arithmetic instructions. Of those instructions, all but six
17246 have the infix appear after the third character of the mnemonic.
17248 Accordingly, the algorithm for looking up primary opcodes given
17251 1. Look up the identifier in the opcode table.
17252 If we find a match, go to step U.
17254 2. Look up the last two characters of the identifier in the
17255 conditions table. If we find a match, look up the first N-2
17256 characters of the identifier in the opcode table. If we
17257 find a match, go to step CE.
17259 3. Look up the fourth and fifth characters of the identifier in
17260 the conditions table. If we find a match, extract those
17261 characters from the identifier, and look up the remaining
17262 characters in the opcode table. If we find a match, go
17267 U. Examine the tag field of the opcode structure, in case this is
17268 one of the six instructions with its conditional infix in an
17269 unusual place. If it is, the tag tells us where to find the
17270 infix; look it up in the conditions table and set inst.cond
17271 accordingly. Otherwise, this is an unconditional instruction.
17272 Again set inst.cond accordingly. Return the opcode structure.
17274 CE. Examine the tag field to make sure this is an instruction that
17275 should receive a conditional suffix. If it is not, fail.
17276 Otherwise, set inst.cond from the suffix we already looked up,
17277 and return the opcode structure.
17279 CM. Examine the tag field to make sure this is an instruction that
17280 should receive a conditional infix after the third character.
17281 If it is not, fail. Otherwise, undo the edits to the current
17282 line of input and proceed as for case CE. */
17284 static const struct asm_opcode
*
17285 opcode_lookup (char **str
)
17289 const struct asm_opcode
*opcode
;
17290 const struct asm_cond
*cond
;
17293 /* Scan up to the end of the mnemonic, which must end in white space,
17294 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17295 for (base
= end
= *str
; *end
!= '\0'; end
++)
17296 if (*end
== ' ' || *end
== '.')
17302 /* Handle a possible width suffix and/or Neon type suffix. */
17307 /* The .w and .n suffixes are only valid if the unified syntax is in
17309 if (unified_syntax
&& end
[1] == 'w')
17311 else if (unified_syntax
&& end
[1] == 'n')
17316 inst
.vectype
.elems
= 0;
17318 *str
= end
+ offset
;
17320 if (end
[offset
] == '.')
17322 /* See if we have a Neon type suffix (possible in either unified or
17323 non-unified ARM syntax mode). */
17324 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
17327 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
17333 /* Look for unaffixed or special-case affixed mnemonic. */
17334 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17339 if (opcode
->tag
< OT_odd_infix_0
)
17341 inst
.cond
= COND_ALWAYS
;
17345 if (warn_on_deprecated
&& unified_syntax
)
17346 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17347 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
17348 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17351 inst
.cond
= cond
->value
;
17355 /* Cannot have a conditional suffix on a mnemonic of less than two
17357 if (end
- base
< 3)
17360 /* Look for suffixed mnemonic. */
17362 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17363 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17365 if (opcode
&& cond
)
17368 switch (opcode
->tag
)
17370 case OT_cinfix3_legacy
:
17371 /* Ignore conditional suffixes matched on infix only mnemonics. */
17375 case OT_cinfix3_deprecated
:
17376 case OT_odd_infix_unc
:
17377 if (!unified_syntax
)
17379 /* else fall through */
17383 case OT_csuf_or_in3
:
17384 inst
.cond
= cond
->value
;
17387 case OT_unconditional
:
17388 case OT_unconditionalF
:
17390 inst
.cond
= cond
->value
;
17393 /* Delayed diagnostic. */
17394 inst
.error
= BAD_COND
;
17395 inst
.cond
= COND_ALWAYS
;
17404 /* Cannot have a usual-position infix on a mnemonic of less than
17405 six characters (five would be a suffix). */
17406 if (end
- base
< 6)
17409 /* Look for infixed mnemonic in the usual position. */
17411 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17415 memcpy (save
, affix
, 2);
17416 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
17417 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17419 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
17420 memcpy (affix
, save
, 2);
17423 && (opcode
->tag
== OT_cinfix3
17424 || opcode
->tag
== OT_cinfix3_deprecated
17425 || opcode
->tag
== OT_csuf_or_in3
17426 || opcode
->tag
== OT_cinfix3_legacy
))
17429 if (warn_on_deprecated
&& unified_syntax
17430 && (opcode
->tag
== OT_cinfix3
17431 || opcode
->tag
== OT_cinfix3_deprecated
))
17432 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17434 inst
.cond
= cond
->value
;
17441 /* This function generates an initial IT instruction, leaving its block
17442 virtually open for the new instructions. Eventually,
17443 the mask will be updated by now_it_add_mask () each time
17444 a new instruction needs to be included in the IT block.
17445 Finally, the block is closed with close_automatic_it_block ().
17446 The block closure can be requested either from md_assemble (),
17447 a tencode (), or due to a label hook. */
17450 new_automatic_it_block (int cond
)
17452 now_it
.state
= AUTOMATIC_IT_BLOCK
;
17453 now_it
.mask
= 0x18;
17455 now_it
.block_length
= 1;
17456 mapping_state (MAP_THUMB
);
17457 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
17458 now_it
.warn_deprecated
= FALSE
;
17459 now_it
.insn_cond
= TRUE
;
17462 /* Close an automatic IT block.
17463 See comments in new_automatic_it_block (). */
17466 close_automatic_it_block (void)
17468 now_it
.mask
= 0x10;
17469 now_it
.block_length
= 0;
17472 /* Update the mask of the current automatically-generated IT
17473 instruction. See comments in new_automatic_it_block (). */
17476 now_it_add_mask (int cond
)
17478 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17479 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17480 | ((bitvalue) << (nbit)))
17481 const int resulting_bit
= (cond
& 1);
17483 now_it
.mask
&= 0xf;
17484 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17486 (5 - now_it
.block_length
));
17487 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17489 ((5 - now_it
.block_length
) - 1) );
17490 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
17493 #undef SET_BIT_VALUE
17496 /* The IT blocks handling machinery is accessed through the these functions:
17497 it_fsm_pre_encode () from md_assemble ()
17498 set_it_insn_type () optional, from the tencode functions
17499 set_it_insn_type_last () ditto
17500 in_it_block () ditto
17501 it_fsm_post_encode () from md_assemble ()
17502 force_automatic_it_block_close () from label habdling functions
17505 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17506 initializing the IT insn type with a generic initial value depending
17507 on the inst.condition.
17508 2) During the tencode function, two things may happen:
17509 a) The tencode function overrides the IT insn type by
17510 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17511 b) The tencode function queries the IT block state by
17512 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17514 Both set_it_insn_type and in_it_block run the internal FSM state
17515 handling function (handle_it_state), because: a) setting the IT insn
17516 type may incur in an invalid state (exiting the function),
17517 and b) querying the state requires the FSM to be updated.
17518 Specifically we want to avoid creating an IT block for conditional
17519 branches, so it_fsm_pre_encode is actually a guess and we can't
17520 determine whether an IT block is required until the tencode () routine
17521 has decided what type of instruction this actually it.
17522 Because of this, if set_it_insn_type and in_it_block have to be used,
17523 set_it_insn_type has to be called first.
17525 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17526 determines the insn IT type depending on the inst.cond code.
17527 When a tencode () routine encodes an instruction that can be
17528 either outside an IT block, or, in the case of being inside, has to be
17529 the last one, set_it_insn_type_last () will determine the proper
17530 IT instruction type based on the inst.cond code. Otherwise,
17531 set_it_insn_type can be called for overriding that logic or
17532 for covering other cases.
17534 Calling handle_it_state () may not transition the IT block state to
17535 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17536 still queried. Instead, if the FSM determines that the state should
17537 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17538 after the tencode () function: that's what it_fsm_post_encode () does.
17540 Since in_it_block () calls the state handling function to get an
17541 updated state, an error may occur (due to invalid insns combination).
17542 In that case, inst.error is set.
17543 Therefore, inst.error has to be checked after the execution of
17544 the tencode () routine.
17546 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17547 any pending state change (if any) that didn't take place in
17548 handle_it_state () as explained above. */
17551 it_fsm_pre_encode (void)
17553 if (inst
.cond
!= COND_ALWAYS
)
17554 inst
.it_insn_type
= INSIDE_IT_INSN
;
17556 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
17558 now_it
.state_handled
= 0;
17561 /* IT state FSM handling function. */
17564 handle_it_state (void)
17566 now_it
.state_handled
= 1;
17567 now_it
.insn_cond
= FALSE
;
17569 switch (now_it
.state
)
17571 case OUTSIDE_IT_BLOCK
:
17572 switch (inst
.it_insn_type
)
17574 case OUTSIDE_IT_INSN
:
17577 case INSIDE_IT_INSN
:
17578 case INSIDE_IT_LAST_INSN
:
17579 if (thumb_mode
== 0)
17582 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
17583 as_tsktsk (_("Warning: conditional outside an IT block"\
17588 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
17589 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
17591 /* Automatically generate the IT instruction. */
17592 new_automatic_it_block (inst
.cond
);
17593 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
17594 close_automatic_it_block ();
17598 inst
.error
= BAD_OUT_IT
;
17604 case IF_INSIDE_IT_LAST_INSN
:
17605 case NEUTRAL_IT_INSN
:
17609 now_it
.state
= MANUAL_IT_BLOCK
;
17610 now_it
.block_length
= 0;
17615 case AUTOMATIC_IT_BLOCK
:
17616 /* Three things may happen now:
17617 a) We should increment current it block size;
17618 b) We should close current it block (closing insn or 4 insns);
17619 c) We should close current it block and start a new one (due
17620 to incompatible conditions or
17621 4 insns-length block reached). */
17623 switch (inst
.it_insn_type
)
17625 case OUTSIDE_IT_INSN
:
17626 /* The closure of the block shall happen immediatelly,
17627 so any in_it_block () call reports the block as closed. */
17628 force_automatic_it_block_close ();
17631 case INSIDE_IT_INSN
:
17632 case INSIDE_IT_LAST_INSN
:
17633 case IF_INSIDE_IT_LAST_INSN
:
17634 now_it
.block_length
++;
17636 if (now_it
.block_length
> 4
17637 || !now_it_compatible (inst
.cond
))
17639 force_automatic_it_block_close ();
17640 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
17641 new_automatic_it_block (inst
.cond
);
17645 now_it
.insn_cond
= TRUE
;
17646 now_it_add_mask (inst
.cond
);
17649 if (now_it
.state
== AUTOMATIC_IT_BLOCK
17650 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
17651 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
17652 close_automatic_it_block ();
17655 case NEUTRAL_IT_INSN
:
17656 now_it
.block_length
++;
17657 now_it
.insn_cond
= TRUE
;
17659 if (now_it
.block_length
> 4)
17660 force_automatic_it_block_close ();
17662 now_it_add_mask (now_it
.cc
& 1);
17666 close_automatic_it_block ();
17667 now_it
.state
= MANUAL_IT_BLOCK
;
17672 case MANUAL_IT_BLOCK
:
17674 /* Check conditional suffixes. */
17675 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
17678 now_it
.mask
&= 0x1f;
17679 is_last
= (now_it
.mask
== 0x10);
17680 now_it
.insn_cond
= TRUE
;
17682 switch (inst
.it_insn_type
)
17684 case OUTSIDE_IT_INSN
:
17685 inst
.error
= BAD_NOT_IT
;
17688 case INSIDE_IT_INSN
:
17689 if (cond
!= inst
.cond
)
17691 inst
.error
= BAD_IT_COND
;
17696 case INSIDE_IT_LAST_INSN
:
17697 case IF_INSIDE_IT_LAST_INSN
:
17698 if (cond
!= inst
.cond
)
17700 inst
.error
= BAD_IT_COND
;
17705 inst
.error
= BAD_BRANCH
;
17710 case NEUTRAL_IT_INSN
:
17711 /* The BKPT instruction is unconditional even in an IT block. */
17715 inst
.error
= BAD_IT_IT
;
17725 struct depr_insn_mask
17727 unsigned long pattern
;
17728 unsigned long mask
;
17729 const char* description
;
17732 /* List of 16-bit instruction patterns deprecated in an IT block in
17734 static const struct depr_insn_mask depr_it_insns
[] = {
17735 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17736 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17737 { 0xa000, 0xb800, N_("ADR") },
17738 { 0x4800, 0xf800, N_("Literal loads") },
17739 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17740 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17741 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17742 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
17743 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17748 it_fsm_post_encode (void)
17752 if (!now_it
.state_handled
)
17753 handle_it_state ();
17755 if (now_it
.insn_cond
17756 && !now_it
.warn_deprecated
17757 && warn_on_deprecated
17758 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
17760 if (inst
.instruction
>= 0x10000)
17762 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
17763 "deprecated in ARMv8"));
17764 now_it
.warn_deprecated
= TRUE
;
17768 const struct depr_insn_mask
*p
= depr_it_insns
;
17770 while (p
->mask
!= 0)
17772 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
17774 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
17775 "of the following class are deprecated in ARMv8: "
17776 "%s"), p
->description
);
17777 now_it
.warn_deprecated
= TRUE
;
17785 if (now_it
.block_length
> 1)
17787 as_tsktsk (_("IT blocks containing more than one conditional "
17788 "instruction are deprecated in ARMv8"));
17789 now_it
.warn_deprecated
= TRUE
;
17793 is_last
= (now_it
.mask
== 0x10);
17796 now_it
.state
= OUTSIDE_IT_BLOCK
;
17802 force_automatic_it_block_close (void)
17804 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
17806 close_automatic_it_block ();
17807 now_it
.state
= OUTSIDE_IT_BLOCK
;
17815 if (!now_it
.state_handled
)
17816 handle_it_state ();
17818 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
17821 /* Whether OPCODE only has T32 encoding. Since this function is only used by
17822 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
17823 here, hence the "known" in the function name. */
17826 known_t32_only_insn (const struct asm_opcode
*opcode
)
17828 /* Original Thumb-1 wide instruction. */
17829 if (opcode
->tencode
== do_t_blx
17830 || opcode
->tencode
== do_t_branch23
17831 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
17832 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
17835 /* Wide-only instruction added to ARMv8-M. */
17836 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m
)
17837 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
17838 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
17839 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
17845 /* Whether wide instruction variant can be used if available for a valid OPCODE
17849 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
17851 if (known_t32_only_insn (opcode
))
17854 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
17855 of variant T3 of B.W is checked in do_t_branch. */
17856 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
17857 && opcode
->tencode
== do_t_branch
)
17860 /* Wide instruction variants of all instructions with narrow *and* wide
17861 variants become available with ARMv6t2. Other opcodes are either
17862 narrow-only or wide-only and are thus available if OPCODE is valid. */
17863 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
17866 /* OPCODE with narrow only instruction variant or wide variant not
17872 md_assemble (char *str
)
17875 const struct asm_opcode
* opcode
;
17877 /* Align the previous label if needed. */
17878 if (last_label_seen
!= NULL
)
17880 symbol_set_frag (last_label_seen
, frag_now
);
17881 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
17882 S_SET_SEGMENT (last_label_seen
, now_seg
);
17885 memset (&inst
, '\0', sizeof (inst
));
17886 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
17888 opcode
= opcode_lookup (&p
);
17891 /* It wasn't an instruction, but it might be a register alias of
17892 the form alias .req reg, or a Neon .dn/.qn directive. */
17893 if (! create_register_alias (str
, p
)
17894 && ! create_neon_reg_alias (str
, p
))
17895 as_bad (_("bad instruction `%s'"), str
);
17900 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
17901 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
17903 /* The value which unconditional instructions should have in place of the
17904 condition field. */
17905 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
17909 arm_feature_set variant
;
17911 variant
= cpu_variant
;
17912 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
17913 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
17914 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
17915 /* Check that this instruction is supported for this CPU. */
17916 if (!opcode
->tvariant
17917 || (thumb_mode
== 1
17918 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
17920 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
17923 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
17924 && opcode
->tencode
!= do_t_branch
)
17926 as_bad (_("Thumb does not support conditional execution"));
17930 /* Two things are addressed here:
17931 1) Implicit require narrow instructions on Thumb-1.
17932 This avoids relaxation accidentally introducing Thumb-2
17934 2) Reject wide instructions in non Thumb-2 cores.
17936 Only instructions with narrow and wide variants need to be handled
17937 but selecting all non wide-only instructions is easier. */
17938 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
17939 && !t32_insn_ok (variant
, opcode
))
17941 if (inst
.size_req
== 0)
17943 else if (inst
.size_req
== 4)
17945 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
17946 as_bad (_("selected processor does not support 32bit wide "
17947 "variant of instruction `%s'"), str
);
17949 as_bad (_("selected processor does not support `%s' in "
17950 "Thumb-2 mode"), str
);
17955 inst
.instruction
= opcode
->tvalue
;
17957 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
17959 /* Prepare the it_insn_type for those encodings that don't set
17961 it_fsm_pre_encode ();
17963 opcode
->tencode ();
17965 it_fsm_post_encode ();
17968 if (!(inst
.error
|| inst
.relax
))
17970 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
17971 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
17972 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
17974 as_bad (_("cannot honor width suffix -- `%s'"), str
);
17979 /* Something has gone badly wrong if we try to relax a fixed size
17981 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
17983 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
17984 *opcode
->tvariant
);
17985 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
17986 set those bits when Thumb-2 32-bit instructions are seen. The impact
17987 of relaxable instructions will be considered later after we finish all
17989 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
17990 variant
= arm_arch_none
;
17992 variant
= cpu_variant
;
17993 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
17994 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
17997 check_neon_suffixes
;
18001 mapping_state (MAP_THUMB
);
18004 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
18008 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18009 is_bx
= (opcode
->aencode
== do_bx
);
18011 /* Check that this instruction is supported for this CPU. */
18012 if (!(is_bx
&& fix_v4bx
)
18013 && !(opcode
->avariant
&&
18014 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
18016 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
18021 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
18025 inst
.instruction
= opcode
->avalue
;
18026 if (opcode
->tag
== OT_unconditionalF
)
18027 inst
.instruction
|= 0xFU
<< 28;
18029 inst
.instruction
|= inst
.cond
<< 28;
18030 inst
.size
= INSN_SIZE
;
18031 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
18033 it_fsm_pre_encode ();
18034 opcode
->aencode ();
18035 it_fsm_post_encode ();
18037 /* Arm mode bx is marked as both v4T and v5 because it's still required
18038 on a hypothetical non-thumb v5 core. */
18040 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
18042 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
18043 *opcode
->avariant
);
18045 check_neon_suffixes
;
18049 mapping_state (MAP_ARM
);
18054 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18062 check_it_blocks_finished (void)
18067 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
18068 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
18069 == MANUAL_IT_BLOCK
)
18071 as_warn (_("section '%s' finished with an open IT block."),
18075 if (now_it
.state
== MANUAL_IT_BLOCK
)
18076 as_warn (_("file finished with an open IT block."));
18080 /* Various frobbings of labels and their addresses. */
18083 arm_start_line_hook (void)
18085 last_label_seen
= NULL
;
18089 arm_frob_label (symbolS
* sym
)
18091 last_label_seen
= sym
;
18093 ARM_SET_THUMB (sym
, thumb_mode
);
18095 #if defined OBJ_COFF || defined OBJ_ELF
18096 ARM_SET_INTERWORK (sym
, support_interwork
);
18099 force_automatic_it_block_close ();
18101 /* Note - do not allow local symbols (.Lxxx) to be labelled
18102 as Thumb functions. This is because these labels, whilst
18103 they exist inside Thumb code, are not the entry points for
18104 possible ARM->Thumb calls. Also, these labels can be used
18105 as part of a computed goto or switch statement. eg gcc
18106 can generate code that looks like this:
18108 ldr r2, [pc, .Laaa]
18118 The first instruction loads the address of the jump table.
18119 The second instruction converts a table index into a byte offset.
18120 The third instruction gets the jump address out of the table.
18121 The fourth instruction performs the jump.
18123 If the address stored at .Laaa is that of a symbol which has the
18124 Thumb_Func bit set, then the linker will arrange for this address
18125 to have the bottom bit set, which in turn would mean that the
18126 address computation performed by the third instruction would end
18127 up with the bottom bit set. Since the ARM is capable of unaligned
18128 word loads, the instruction would then load the incorrect address
18129 out of the jump table, and chaos would ensue. */
18130 if (label_is_thumb_function_name
18131 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
18132 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
18134 /* When the address of a Thumb function is taken the bottom
18135 bit of that address should be set. This will allow
18136 interworking between Arm and Thumb functions to work
18139 THUMB_SET_FUNC (sym
, 1);
18141 label_is_thumb_function_name
= FALSE
;
18144 dwarf2_emit_label (sym
);
18148 arm_data_in_code (void)
18150 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
18152 *input_line_pointer
= '/';
18153 input_line_pointer
+= 5;
18154 *input_line_pointer
= 0;
18162 arm_canonicalize_symbol_name (char * name
)
18166 if (thumb_mode
&& (len
= strlen (name
)) > 5
18167 && streq (name
+ len
- 5, "/data"))
18168 *(name
+ len
- 5) = 0;
18173 /* Table of all register names defined by default. The user can
18174 define additional names with .req. Note that all register names
18175 should appear in both upper and lowercase variants. Some registers
18176 also have mixed-case names. */
18178 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18179 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18180 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18181 #define REGSET(p,t) \
18182 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18183 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18184 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18185 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18186 #define REGSETH(p,t) \
18187 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18188 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18189 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18190 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18191 #define REGSET2(p,t) \
18192 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18193 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18194 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18195 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18196 #define SPLRBANK(base,bank,t) \
18197 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18198 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18199 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18200 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18201 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18202 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18204 static const struct reg_entry reg_names
[] =
18206 /* ARM integer registers. */
18207 REGSET(r
, RN
), REGSET(R
, RN
),
18209 /* ATPCS synonyms. */
18210 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
18211 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
18212 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
18214 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
18215 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
18216 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
18218 /* Well-known aliases. */
18219 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
18220 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
18222 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
18223 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
18225 /* Coprocessor numbers. */
18226 REGSET(p
, CP
), REGSET(P
, CP
),
18228 /* Coprocessor register numbers. The "cr" variants are for backward
18230 REGSET(c
, CN
), REGSET(C
, CN
),
18231 REGSET(cr
, CN
), REGSET(CR
, CN
),
18233 /* ARM banked registers. */
18234 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
18235 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
18236 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
18237 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
18238 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
18239 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
18240 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
18242 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
18243 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
18244 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
18245 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
18246 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
18247 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
18248 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
18249 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
18251 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
18252 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
18253 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
18254 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
18255 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
18256 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
18257 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
18258 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18259 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18261 /* FPA registers. */
18262 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
18263 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
18265 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
18266 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
18268 /* VFP SP registers. */
18269 REGSET(s
,VFS
), REGSET(S
,VFS
),
18270 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
18272 /* VFP DP Registers. */
18273 REGSET(d
,VFD
), REGSET(D
,VFD
),
18274 /* Extra Neon DP registers. */
18275 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
18277 /* Neon QP registers. */
18278 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
18280 /* VFP control registers. */
18281 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
18282 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
18283 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
18284 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
18285 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
18286 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
18288 /* Maverick DSP coprocessor registers. */
18289 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
18290 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
18292 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
18293 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
18294 REGDEF(dspsc
,0,DSPSC
),
18296 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
18297 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
18298 REGDEF(DSPSC
,0,DSPSC
),
18300 /* iWMMXt data registers - p0, c0-15. */
18301 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
18303 /* iWMMXt control registers - p1, c0-3. */
18304 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
18305 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
18306 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
18307 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
18309 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18310 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
18311 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
18312 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
18313 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
18315 /* XScale accumulator registers. */
18316 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
18322 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18323 within psr_required_here. */
18324 static const struct asm_psr psrs
[] =
18326 /* Backward compatibility notation. Note that "all" is no longer
18327 truly all possible PSR bits. */
18328 {"all", PSR_c
| PSR_f
},
18332 /* Individual flags. */
18338 /* Combinations of flags. */
18339 {"fs", PSR_f
| PSR_s
},
18340 {"fx", PSR_f
| PSR_x
},
18341 {"fc", PSR_f
| PSR_c
},
18342 {"sf", PSR_s
| PSR_f
},
18343 {"sx", PSR_s
| PSR_x
},
18344 {"sc", PSR_s
| PSR_c
},
18345 {"xf", PSR_x
| PSR_f
},
18346 {"xs", PSR_x
| PSR_s
},
18347 {"xc", PSR_x
| PSR_c
},
18348 {"cf", PSR_c
| PSR_f
},
18349 {"cs", PSR_c
| PSR_s
},
18350 {"cx", PSR_c
| PSR_x
},
18351 {"fsx", PSR_f
| PSR_s
| PSR_x
},
18352 {"fsc", PSR_f
| PSR_s
| PSR_c
},
18353 {"fxs", PSR_f
| PSR_x
| PSR_s
},
18354 {"fxc", PSR_f
| PSR_x
| PSR_c
},
18355 {"fcs", PSR_f
| PSR_c
| PSR_s
},
18356 {"fcx", PSR_f
| PSR_c
| PSR_x
},
18357 {"sfx", PSR_s
| PSR_f
| PSR_x
},
18358 {"sfc", PSR_s
| PSR_f
| PSR_c
},
18359 {"sxf", PSR_s
| PSR_x
| PSR_f
},
18360 {"sxc", PSR_s
| PSR_x
| PSR_c
},
18361 {"scf", PSR_s
| PSR_c
| PSR_f
},
18362 {"scx", PSR_s
| PSR_c
| PSR_x
},
18363 {"xfs", PSR_x
| PSR_f
| PSR_s
},
18364 {"xfc", PSR_x
| PSR_f
| PSR_c
},
18365 {"xsf", PSR_x
| PSR_s
| PSR_f
},
18366 {"xsc", PSR_x
| PSR_s
| PSR_c
},
18367 {"xcf", PSR_x
| PSR_c
| PSR_f
},
18368 {"xcs", PSR_x
| PSR_c
| PSR_s
},
18369 {"cfs", PSR_c
| PSR_f
| PSR_s
},
18370 {"cfx", PSR_c
| PSR_f
| PSR_x
},
18371 {"csf", PSR_c
| PSR_s
| PSR_f
},
18372 {"csx", PSR_c
| PSR_s
| PSR_x
},
18373 {"cxf", PSR_c
| PSR_x
| PSR_f
},
18374 {"cxs", PSR_c
| PSR_x
| PSR_s
},
18375 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
18376 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
18377 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
18378 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
18379 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
18380 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
18381 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
18382 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
18383 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
18384 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
18385 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
18386 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
18387 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
18388 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
18389 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
18390 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
18391 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
18392 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
18393 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
18394 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
18395 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
18396 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
18397 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
18398 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
18401 /* Table of V7M psr names. */
18402 static const struct asm_psr v7m_psrs
[] =
18404 {"apsr", 0 }, {"APSR", 0 },
18405 {"iapsr", 1 }, {"IAPSR", 1 },
18406 {"eapsr", 2 }, {"EAPSR", 2 },
18407 {"psr", 3 }, {"PSR", 3 },
18408 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
18409 {"ipsr", 5 }, {"IPSR", 5 },
18410 {"epsr", 6 }, {"EPSR", 6 },
18411 {"iepsr", 7 }, {"IEPSR", 7 },
18412 {"msp", 8 }, {"MSP", 8 },
18413 {"psp", 9 }, {"PSP", 9 },
18414 {"primask", 16}, {"PRIMASK", 16},
18415 {"basepri", 17}, {"BASEPRI", 17},
18416 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
18417 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
18418 {"faultmask", 19}, {"FAULTMASK", 19},
18419 {"control", 20}, {"CONTROL", 20}
18422 /* Table of all shift-in-operand names. */
18423 static const struct asm_shift_name shift_names
[] =
18425 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
18426 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
18427 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
18428 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
18429 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
18430 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
18433 /* Table of all explicit relocation names. */
18435 static struct reloc_entry reloc_names
[] =
18437 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
18438 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
18439 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
18440 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
18441 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
18442 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
18443 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
18444 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
18445 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
18446 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
18447 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
18448 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
18449 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
18450 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
18451 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
18452 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
18453 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
18454 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
}
18458 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18459 static const struct asm_cond conds
[] =
18463 {"cs", 0x2}, {"hs", 0x2},
18464 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18478 #define UL_BARRIER(L,U,CODE,FEAT) \
18479 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18480 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18482 static struct asm_barrier_opt barrier_opt_names
[] =
18484 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
18485 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
18486 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
18487 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
18488 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
18489 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
18490 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
18491 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
18492 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
18493 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
18494 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
18495 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
18496 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
18497 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
18498 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
18499 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
18504 /* Table of ARM-format instructions. */
18506 /* Macros for gluing together operand strings. N.B. In all cases
18507 other than OPS0, the trailing OP_stop comes from default
18508 zero-initialization of the unspecified elements of the array. */
18509 #define OPS0() { OP_stop, }
18510 #define OPS1(a) { OP_##a, }
18511 #define OPS2(a,b) { OP_##a,OP_##b, }
18512 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18513 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18514 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18515 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18517 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18518 This is useful when mixing operands for ARM and THUMB, i.e. using the
18519 MIX_ARM_THUMB_OPERANDS macro.
18520 In order to use these macros, prefix the number of operands with _
18522 #define OPS_1(a) { a, }
18523 #define OPS_2(a,b) { a,b, }
18524 #define OPS_3(a,b,c) { a,b,c, }
18525 #define OPS_4(a,b,c,d) { a,b,c,d, }
18526 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18527 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18529 /* These macros abstract out the exact format of the mnemonic table and
18530 save some repeated characters. */
18532 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18533 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18534 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18535 THUMB_VARIANT, do_##ae, do_##te }
18537 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18538 a T_MNEM_xyz enumerator. */
18539 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18540 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18541 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18542 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18544 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18545 infix after the third character. */
18546 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18547 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18548 THUMB_VARIANT, do_##ae, do_##te }
18549 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18550 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18551 THUMB_VARIANT, do_##ae, do_##te }
18552 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18553 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18554 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18555 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18556 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18557 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18558 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18559 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18561 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18562 field is still 0xE. Many of the Thumb variants can be executed
18563 conditionally, so this is checked separately. */
18564 #define TUE(mnem, op, top, nops, ops, ae, te) \
18565 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18566 THUMB_VARIANT, do_##ae, do_##te }
18568 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18569 Used by mnemonics that have very minimal differences in the encoding for
18570 ARM and Thumb variants and can be handled in a common function. */
18571 #define TUEc(mnem, op, top, nops, ops, en) \
18572 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18573 THUMB_VARIANT, do_##en, do_##en }
18575 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18576 condition code field. */
18577 #define TUF(mnem, op, top, nops, ops, ae, te) \
18578 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18579 THUMB_VARIANT, do_##ae, do_##te }
18581 /* ARM-only variants of all the above. */
18582 #define CE(mnem, op, nops, ops, ae) \
18583 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18585 #define C3(mnem, op, nops, ops, ae) \
18586 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18588 /* Legacy mnemonics that always have conditional infix after the third
18590 #define CL(mnem, op, nops, ops, ae) \
18591 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18592 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18594 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
18595 #define cCE(mnem, op, nops, ops, ae) \
18596 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18598 /* Legacy coprocessor instructions where conditional infix and conditional
18599 suffix are ambiguous. For consistency this includes all FPA instructions,
18600 not just the potentially ambiguous ones. */
18601 #define cCL(mnem, op, nops, ops, ae) \
18602 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18603 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18605 /* Coprocessor, takes either a suffix or a position-3 infix
18606 (for an FPA corner case). */
18607 #define C3E(mnem, op, nops, ops, ae) \
18608 { mnem, OPS##nops ops, OT_csuf_or_in3, \
18609 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18611 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
18612 { m1 #m2 m3, OPS##nops ops, \
18613 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18614 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18616 #define CM(m1, m2, op, nops, ops, ae) \
18617 xCM_ (m1, , m2, op, nops, ops, ae), \
18618 xCM_ (m1, eq, m2, op, nops, ops, ae), \
18619 xCM_ (m1, ne, m2, op, nops, ops, ae), \
18620 xCM_ (m1, cs, m2, op, nops, ops, ae), \
18621 xCM_ (m1, hs, m2, op, nops, ops, ae), \
18622 xCM_ (m1, cc, m2, op, nops, ops, ae), \
18623 xCM_ (m1, ul, m2, op, nops, ops, ae), \
18624 xCM_ (m1, lo, m2, op, nops, ops, ae), \
18625 xCM_ (m1, mi, m2, op, nops, ops, ae), \
18626 xCM_ (m1, pl, m2, op, nops, ops, ae), \
18627 xCM_ (m1, vs, m2, op, nops, ops, ae), \
18628 xCM_ (m1, vc, m2, op, nops, ops, ae), \
18629 xCM_ (m1, hi, m2, op, nops, ops, ae), \
18630 xCM_ (m1, ls, m2, op, nops, ops, ae), \
18631 xCM_ (m1, ge, m2, op, nops, ops, ae), \
18632 xCM_ (m1, lt, m2, op, nops, ops, ae), \
18633 xCM_ (m1, gt, m2, op, nops, ops, ae), \
18634 xCM_ (m1, le, m2, op, nops, ops, ae), \
18635 xCM_ (m1, al, m2, op, nops, ops, ae)
18637 #define UE(mnem, op, nops, ops, ae) \
18638 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18640 #define UF(mnem, op, nops, ops, ae) \
18641 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18643 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
18644 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18645 use the same encoding function for each. */
18646 #define NUF(mnem, op, nops, ops, enc) \
18647 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
18648 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18650 /* Neon data processing, version which indirects through neon_enc_tab for
18651 the various overloaded versions of opcodes. */
18652 #define nUF(mnem, op, nops, ops, enc) \
18653 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
18654 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18656 /* Neon insn with conditional suffix for the ARM version, non-overloaded
18658 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
18659 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
18660 THUMB_VARIANT, do_##enc, do_##enc }
18662 #define NCE(mnem, op, nops, ops, enc) \
18663 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18665 #define NCEF(mnem, op, nops, ops, enc) \
18666 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18668 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
18669 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
18670 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
18671 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18673 #define nCE(mnem, op, nops, ops, enc) \
18674 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18676 #define nCEF(mnem, op, nops, ops, enc) \
18677 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18681 static const struct asm_opcode insns
[] =
18683 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
18684 #define THUMB_VARIANT & arm_ext_v4t
18685 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18686 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18687 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18688 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18689 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
18690 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
18691 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
18692 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
18693 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18694 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18695 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18696 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18697 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18698 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18699 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18700 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18702 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18703 for setting PSR flag bits. They are obsolete in V6 and do not
18704 have Thumb equivalents. */
18705 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18706 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18707 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
18708 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
18709 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
18710 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
18711 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18712 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18713 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
18715 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
18716 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
18717 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
18718 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
18720 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
18721 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
18722 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
18724 OP_ADDRGLDR
),ldst
, t_ldst
),
18725 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
18727 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18728 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18729 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18730 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18731 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18732 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18734 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
18735 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
18736 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
18737 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
18740 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
18741 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
18742 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
18743 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
18745 /* Thumb-compatibility pseudo ops. */
18746 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18747 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18748 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18749 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18750 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18751 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18752 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18753 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18754 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
18755 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
18756 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
18757 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
18759 /* These may simplify to neg. */
18760 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
18761 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
18763 #undef THUMB_VARIANT
18764 #define THUMB_VARIANT & arm_ext_v6
18766 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
18768 /* V1 instructions with no Thumb analogue prior to V6T2. */
18769 #undef THUMB_VARIANT
18770 #define THUMB_VARIANT & arm_ext_v6t2
18772 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18773 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18774 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
18776 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
18777 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
18778 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
18779 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
18781 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18782 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18784 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18785 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18787 /* V1 instructions with no Thumb analogue at all. */
18788 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
18789 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
18791 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
18792 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
18793 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
18794 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
18795 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
18796 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
18797 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
18798 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
18801 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
18802 #undef THUMB_VARIANT
18803 #define THUMB_VARIANT & arm_ext_v4t
18805 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
18806 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
18808 #undef THUMB_VARIANT
18809 #define THUMB_VARIANT & arm_ext_v6t2
18811 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
18812 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
18814 /* Generic coprocessor instructions. */
18815 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
18816 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18817 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18818 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18819 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18820 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18821 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18824 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
18826 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
18827 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
18830 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
18831 #undef THUMB_VARIANT
18832 #define THUMB_VARIANT & arm_ext_msr
18834 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
18835 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
18838 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
18839 #undef THUMB_VARIANT
18840 #define THUMB_VARIANT & arm_ext_v6t2
18842 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18843 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18844 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18845 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18846 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18847 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18848 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18849 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18852 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
18853 #undef THUMB_VARIANT
18854 #define THUMB_VARIANT & arm_ext_v4t
18856 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18857 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18858 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18859 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18860 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18861 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18864 #define ARM_VARIANT & arm_ext_v4t_5
18866 /* ARM Architecture 4T. */
18867 /* Note: bx (and blx) are required on V5, even if the processor does
18868 not support Thumb. */
18869 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
18872 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
18873 #undef THUMB_VARIANT
18874 #define THUMB_VARIANT & arm_ext_v5t
18876 /* Note: blx has 2 variants; the .value coded here is for
18877 BLX(2). Only this variant has conditional execution. */
18878 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
18879 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
18881 #undef THUMB_VARIANT
18882 #define THUMB_VARIANT & arm_ext_v6t2
18884 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
18885 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18886 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18887 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18888 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18889 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
18890 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18891 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18894 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
18895 #undef THUMB_VARIANT
18896 #define THUMB_VARIANT & arm_ext_v5exp
18898 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18899 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18900 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18901 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18903 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18904 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18906 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18907 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18908 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18909 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18911 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18912 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18913 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18914 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18916 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18917 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18919 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18920 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18921 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18922 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18925 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
18926 #undef THUMB_VARIANT
18927 #define THUMB_VARIANT & arm_ext_v6t2
18929 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
18930 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
18932 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
18933 ADDRGLDRS
), ldrd
, t_ldstd
),
18935 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
18936 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
18939 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
18941 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
18944 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
18945 #undef THUMB_VARIANT
18946 #define THUMB_VARIANT & arm_ext_v6
18948 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
18949 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
18950 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
18951 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
18952 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
18953 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18954 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18955 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18956 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18957 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
18959 #undef THUMB_VARIANT
18960 #define THUMB_VARIANT & arm_ext_v6t2_v8m
18962 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
18963 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
18965 #undef THUMB_VARIANT
18966 #define THUMB_VARIANT & arm_ext_v6t2
18968 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
18969 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
18971 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
18972 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
18974 /* ARM V6 not included in V7M. */
18975 #undef THUMB_VARIANT
18976 #define THUMB_VARIANT & arm_ext_v6_notm
18977 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
18978 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
18979 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
18980 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
18981 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
18982 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
18983 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
18984 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
18985 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
18986 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
18987 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
18988 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
18989 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
18990 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
18991 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
18992 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
18993 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
18994 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
18995 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
18997 /* ARM V6 not included in V7M (eg. integer SIMD). */
18998 #undef THUMB_VARIANT
18999 #define THUMB_VARIANT & arm_ext_v6_dsp
19000 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
19001 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
19002 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19003 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19004 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19005 /* Old name for QASX. */
19006 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19007 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19008 /* Old name for QSAX. */
19009 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19010 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19011 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19012 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19013 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19014 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19015 /* Old name for SASX. */
19016 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19017 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19018 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19019 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19020 /* Old name for SHASX. */
19021 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19022 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19023 /* Old name for SHSAX. */
19024 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19025 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19026 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19027 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19028 /* Old name for SSAX. */
19029 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19030 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19031 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19032 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19033 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19034 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19035 /* Old name for UASX. */
19036 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19037 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19038 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19039 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19040 /* Old name for UHASX. */
19041 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19042 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19043 /* Old name for UHSAX. */
19044 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19045 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19046 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19047 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19048 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19049 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19050 /* Old name for UQASX. */
19051 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19052 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19053 /* Old name for UQSAX. */
19054 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19055 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19056 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19057 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19058 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19059 /* Old name for USAX. */
19060 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19061 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19062 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19063 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19064 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19065 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19066 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19067 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19068 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19069 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19070 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19071 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19072 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19073 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19074 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19075 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19076 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19077 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19078 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19079 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19080 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19081 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19082 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19083 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19084 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19085 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19086 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19087 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19088 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19089 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
19090 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
19091 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19092 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19093 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
19096 #define ARM_VARIANT & arm_ext_v6k
19097 #undef THUMB_VARIANT
19098 #define THUMB_VARIANT & arm_ext_v6k
19100 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
19101 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
19102 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
19103 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
19105 #undef THUMB_VARIANT
19106 #define THUMB_VARIANT & arm_ext_v6_notm
19107 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
19109 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
19110 RRnpcb
), strexd
, t_strexd
),
19112 #undef THUMB_VARIANT
19113 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19114 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
19116 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
19118 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19120 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19122 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
19125 #define ARM_VARIANT & arm_ext_sec
19126 #undef THUMB_VARIANT
19127 #define THUMB_VARIANT & arm_ext_sec
19129 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
19132 #define ARM_VARIANT & arm_ext_virt
19133 #undef THUMB_VARIANT
19134 #define THUMB_VARIANT & arm_ext_virt
19136 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
19137 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
19140 #define ARM_VARIANT & arm_ext_pan
19141 #undef THUMB_VARIANT
19142 #define THUMB_VARIANT & arm_ext_pan
19144 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
19147 #define ARM_VARIANT & arm_ext_v6t2
19148 #undef THUMB_VARIANT
19149 #define THUMB_VARIANT & arm_ext_v6t2
19151 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
19152 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
19153 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19154 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19156 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
19157 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
19159 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19160 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19161 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19162 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19164 #undef THUMB_VARIANT
19165 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19166 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19167 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19169 /* Thumb-only instructions. */
19171 #define ARM_VARIANT NULL
19172 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
19173 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
19175 /* ARM does not really have an IT instruction, so always allow it.
19176 The opcode is copied from Thumb in order to allow warnings in
19177 -mimplicit-it=[never | arm] modes. */
19179 #define ARM_VARIANT & arm_ext_v1
19180 #undef THUMB_VARIANT
19181 #define THUMB_VARIANT & arm_ext_v6t2
19183 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
19184 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
19185 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
19186 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
19187 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
19188 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
19189 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
19190 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
19191 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
19192 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
19193 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
19194 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
19195 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
19196 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
19197 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
19198 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19199 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19200 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19202 /* Thumb2 only instructions. */
19204 #define ARM_VARIANT NULL
19206 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19207 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19208 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19209 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19210 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
19211 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
19213 /* Hardware division instructions. */
19215 #define ARM_VARIANT & arm_ext_adiv
19216 #undef THUMB_VARIANT
19217 #define THUMB_VARIANT & arm_ext_div
19219 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19220 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19222 /* ARM V6M/V7 instructions. */
19224 #define ARM_VARIANT & arm_ext_barrier
19225 #undef THUMB_VARIANT
19226 #define THUMB_VARIANT & arm_ext_barrier
19228 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
19229 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
19230 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
19232 /* ARM V7 instructions. */
19234 #define ARM_VARIANT & arm_ext_v7
19235 #undef THUMB_VARIANT
19236 #define THUMB_VARIANT & arm_ext_v7
19238 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
19239 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
19242 #define ARM_VARIANT & arm_ext_mp
19243 #undef THUMB_VARIANT
19244 #define THUMB_VARIANT & arm_ext_mp
19246 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
19248 /* AArchv8 instructions. */
19250 #define ARM_VARIANT & arm_ext_v8
19252 /* Instructions shared between armv8-a and armv8-m. */
19253 #undef THUMB_VARIANT
19254 #define THUMB_VARIANT & arm_ext_atomics
19256 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19257 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19258 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19259 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19260 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19261 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19262 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19263 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
19264 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19265 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19267 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19269 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19271 #undef THUMB_VARIANT
19272 #define THUMB_VARIANT & arm_ext_v8
19274 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
19275 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
19276 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
19278 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
19280 /* ARMv8 T32 only. */
19282 #define ARM_VARIANT NULL
19283 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
19284 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
19285 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
19287 /* FP for ARMv8. */
19289 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19290 #undef THUMB_VARIANT
19291 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19293 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19294 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19295 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19296 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19297 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19298 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19299 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
19300 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
19301 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
19302 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
19303 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
19304 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
19305 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
19306 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
19307 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
19308 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
19309 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
19311 /* Crypto v1 extensions. */
19313 #define ARM_VARIANT & fpu_crypto_ext_armv8
19314 #undef THUMB_VARIANT
19315 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19317 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
19318 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
19319 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
19320 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
19321 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
19322 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
19323 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
19324 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
19325 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
19326 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
19327 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
19328 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
19329 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
19330 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
19333 #define ARM_VARIANT & crc_ext_armv8
19334 #undef THUMB_VARIANT
19335 #define THUMB_VARIANT & crc_ext_armv8
19336 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
19337 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
19338 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
19339 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
19340 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
19341 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
19344 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19345 #undef THUMB_VARIANT
19346 #define THUMB_VARIANT NULL
19348 cCE("wfs", e200110
, 1, (RR
), rd
),
19349 cCE("rfs", e300110
, 1, (RR
), rd
),
19350 cCE("wfc", e400110
, 1, (RR
), rd
),
19351 cCE("rfc", e500110
, 1, (RR
), rd
),
19353 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19354 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19355 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19356 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19358 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19359 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19360 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19361 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19363 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
19364 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
19365 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
19366 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
19367 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
19368 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
19369 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
19370 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
19371 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
19372 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
19373 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
19374 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
19376 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
19377 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
19378 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
19379 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
19380 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
19381 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
19382 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
19383 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
19384 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
19385 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
19386 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
19387 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
19389 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
19390 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
19391 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
19392 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
19393 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
19394 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
19395 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
19396 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
19397 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
19398 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
19399 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
19400 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
19402 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
19403 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
19404 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
19405 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
19406 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
19407 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
19408 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
19409 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
19410 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
19411 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
19412 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
19413 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
19415 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
19416 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
19417 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
19418 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
19419 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
19420 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
19421 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
19422 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
19423 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
19424 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
19425 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
19426 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
19428 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
19429 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
19430 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
19431 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
19432 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
19433 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
19434 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
19435 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
19436 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
19437 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
19438 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
19439 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
19441 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
19442 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
19443 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
19444 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
19445 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
19446 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
19447 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
19448 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
19449 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
19450 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
19451 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
19452 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
19454 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
19455 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
19456 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
19457 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
19458 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
19459 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
19460 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
19461 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
19462 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
19463 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
19464 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
19465 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
19467 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
19468 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
19469 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
19470 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
19471 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
19472 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
19473 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
19474 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
19475 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
19476 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
19477 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
19478 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
19480 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
19481 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
19482 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
19483 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
19484 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
19485 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
19486 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
19487 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
19488 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
19489 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
19490 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
19491 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
19493 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
19494 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
19495 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
19496 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
19497 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
19498 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
19499 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
19500 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
19501 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
19502 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
19503 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
19504 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
19506 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
19507 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
19508 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
19509 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
19510 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
19511 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
19512 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
19513 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
19514 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
19515 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
19516 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
19517 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
19519 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
19520 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
19521 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
19522 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
19523 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
19524 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
19525 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
19526 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
19527 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
19528 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
19529 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
19530 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
19532 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
19533 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
19534 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
19535 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
19536 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
19537 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
19538 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
19539 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
19540 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
19541 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
19542 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
19543 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
19545 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
19546 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
19547 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
19548 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
19549 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
19550 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
19551 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
19552 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
19553 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
19554 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
19555 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
19556 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
19558 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
19559 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
19560 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
19561 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
19562 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
19563 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
19564 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
19565 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
19566 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
19567 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
19568 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
19569 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
19571 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19572 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19573 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19574 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19575 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19576 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19577 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19578 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19579 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19580 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19581 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19582 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19584 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19585 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19586 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19587 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19588 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19589 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19590 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19591 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19592 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19593 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19594 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19595 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19597 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19598 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19599 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19600 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19601 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19602 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19603 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19604 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19605 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19606 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19607 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19608 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19610 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19611 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19612 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19613 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19614 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19615 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19616 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19617 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19618 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19619 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19620 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19621 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19623 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19624 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19625 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19626 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19627 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19628 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19629 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19630 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19631 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19632 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19633 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19634 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19636 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19637 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19638 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19639 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19640 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19641 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19642 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19643 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19644 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19645 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19646 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19647 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19649 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19650 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19651 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19652 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19653 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19654 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19655 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19656 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19657 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19658 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19659 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19660 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19662 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19663 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19664 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19665 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19666 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19667 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19668 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19669 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19670 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19671 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19672 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19673 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19675 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19676 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19677 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19678 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19679 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19680 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19681 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19682 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19683 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19684 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19685 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19686 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19688 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19689 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19690 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19691 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19692 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19693 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19694 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19695 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19696 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19697 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19698 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19699 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19701 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19702 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19703 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19704 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19705 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19706 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19707 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19708 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19709 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19710 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19711 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19712 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19714 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19715 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19716 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19717 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19718 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19719 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19720 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19721 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19722 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19723 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19724 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19725 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19727 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19728 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19729 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19730 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19731 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19732 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19733 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19734 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19735 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19736 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19737 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19738 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19740 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19741 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19742 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19743 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19745 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
19746 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
19747 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
19748 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
19749 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
19750 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
19751 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
19752 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
19753 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
19754 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
19755 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
19756 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
19758 /* The implementation of the FIX instruction is broken on some
19759 assemblers, in that it accepts a precision specifier as well as a
19760 rounding specifier, despite the fact that this is meaningless.
19761 To be more compatible, we accept it as well, though of course it
19762 does not set any bits. */
19763 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
19764 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
19765 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
19766 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
19767 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
19768 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
19769 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
19770 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
19771 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
19772 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
19773 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
19774 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
19775 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
19777 /* Instructions that were new with the real FPA, call them V2. */
19779 #define ARM_VARIANT & fpu_fpa_ext_v2
19781 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19782 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19783 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19784 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19785 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19786 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19789 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
19791 /* Moves and type conversions. */
19792 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19793 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
19794 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
19795 cCE("fmstat", ef1fa10
, 0, (), noargs
),
19796 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
19797 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
19798 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19799 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19800 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19801 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19802 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19803 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19804 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
19805 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
19807 /* Memory operations. */
19808 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
19809 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
19810 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19811 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19812 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19813 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19814 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19815 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19816 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19817 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19818 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19819 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19820 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19821 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19822 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19823 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19824 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19825 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19827 /* Monadic operations. */
19828 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19829 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19830 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19832 /* Dyadic operations. */
19833 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19834 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19835 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19836 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19837 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19838 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19839 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19840 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19841 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19844 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19845 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
19846 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19847 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
19849 /* Double precision load/store are still present on single precision
19850 implementations. */
19851 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
19852 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
19853 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19854 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19855 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19856 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19857 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19858 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19859 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19860 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19863 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
19865 /* Moves and type conversions. */
19866 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19867 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
19868 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19869 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
19870 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
19871 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
19872 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
19873 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
19874 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
19875 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19876 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19877 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19878 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19880 /* Monadic operations. */
19881 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19882 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19883 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19885 /* Dyadic operations. */
19886 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19887 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19888 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19889 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19890 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19891 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19892 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19893 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19894 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19897 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19898 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
19899 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19900 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
19903 #define ARM_VARIANT & fpu_vfp_ext_v2
19905 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
19906 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
19907 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
19908 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
19910 /* Instructions which may belong to either the Neon or VFP instruction sets.
19911 Individual encoder functions perform additional architecture checks. */
19913 #define ARM_VARIANT & fpu_vfp_ext_v1xd
19914 #undef THUMB_VARIANT
19915 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
19917 /* These mnemonics are unique to VFP. */
19918 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
19919 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
19920 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
19921 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
19922 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
19923 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
19924 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
19925 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
19926 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
19927 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
19929 /* Mnemonics shared by Neon and VFP. */
19930 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
19931 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
19932 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
19934 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
19935 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
19937 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
19938 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
19940 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19941 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19942 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19943 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19944 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19945 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19946 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
19947 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
19949 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
19950 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
19951 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
19952 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
19955 /* NOTE: All VMOV encoding is special-cased! */
19956 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
19957 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
19959 #undef THUMB_VARIANT
19960 #define THUMB_VARIANT & fpu_neon_ext_v1
19962 #define ARM_VARIANT & fpu_neon_ext_v1
19964 /* Data processing with three registers of the same length. */
19965 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
19966 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
19967 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
19968 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
19969 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
19970 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
19971 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
19972 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
19973 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
19974 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
19975 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
19976 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
19977 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
19978 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
19979 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
19980 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
19981 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
19982 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
19983 /* If not immediate, fall back to neon_dyadic_i64_su.
19984 shl_imm should accept I8 I16 I32 I64,
19985 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
19986 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
19987 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
19988 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
19989 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
19990 /* Logic ops, types optional & ignored. */
19991 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
19992 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
19993 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
19994 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
19995 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
19996 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
19997 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
19998 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
19999 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
20000 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
20001 /* Bitfield ops, untyped. */
20002 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20003 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20004 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20005 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20006 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20007 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20008 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
20009 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20010 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20011 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20012 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20013 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20014 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20015 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20016 back to neon_dyadic_if_su. */
20017 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20018 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20019 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20020 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20021 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20022 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20023 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20024 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20025 /* Comparison. Type I8 I16 I32 F32. */
20026 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
20027 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
20028 /* As above, D registers only. */
20029 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20030 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20031 /* Int and float variants, signedness unimportant. */
20032 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20033 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20034 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
20035 /* Add/sub take types I8 I16 I32 I64 F32. */
20036 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20037 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20038 /* vtst takes sizes 8, 16, 32. */
20039 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
20040 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
20041 /* VMUL takes I8 I16 I32 F32 P8. */
20042 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
20043 /* VQD{R}MULH takes S16 S32. */
20044 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20045 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20046 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20047 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20048 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20049 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20050 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20051 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20052 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20053 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20054 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20055 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20056 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20057 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20058 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20059 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20060 /* ARM v8.1 extension. */
20061 nUF(vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20062 nUF(vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20063 nUF(vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20064 nUF(vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20066 /* Two address, int/float. Types S8 S16 S32 F32. */
20067 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20068 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20070 /* Data processing with two registers and a shift amount. */
20071 /* Right shifts, and variants with rounding.
20072 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20073 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20074 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20075 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20076 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20077 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20078 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20079 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20080 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20081 /* Shift and insert. Sizes accepted 8 16 32 64. */
20082 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
20083 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
20084 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
20085 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
20086 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20087 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
20088 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
20089 /* Right shift immediate, saturating & narrowing, with rounding variants.
20090 Types accepted S16 S32 S64 U16 U32 U64. */
20091 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20092 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20093 /* As above, unsigned. Types accepted S16 S32 S64. */
20094 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20095 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20096 /* Right shift narrowing. Types accepted I16 I32 I64. */
20097 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20098 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20099 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20100 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
20101 /* CVT with optional immediate for fixed-point variant. */
20102 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
20104 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
20105 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
20107 /* Data processing, three registers of different lengths. */
20108 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20109 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
20110 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20111 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20112 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20113 /* If not scalar, fall back to neon_dyadic_long.
20114 Vector types as above, scalar types S16 S32 U16 U32. */
20115 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20116 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20117 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20118 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20119 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20120 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20121 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20122 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20123 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20124 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20125 /* Saturating doubling multiplies. Types S16 S32. */
20126 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20127 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20128 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20129 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20130 S16 S32 U16 U32. */
20131 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
20133 /* Extract. Size 8. */
20134 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
20135 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
20137 /* Two registers, miscellaneous. */
20138 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20139 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
20140 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
20141 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
20142 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
20143 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
20144 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
20145 /* Vector replicate. Sizes 8 16 32. */
20146 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
20147 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
20148 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20149 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
20150 /* VMOVN. Types I16 I32 I64. */
20151 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
20152 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20153 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
20154 /* VQMOVUN. Types S16 S32 S64. */
20155 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
20156 /* VZIP / VUZP. Sizes 8 16 32. */
20157 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20158 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20159 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20160 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20161 /* VQABS / VQNEG. Types S8 S16 S32. */
20162 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20163 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20164 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20165 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20166 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20167 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20168 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
20169 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20170 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
20171 /* Reciprocal estimates. Types U32 F32. */
20172 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20173 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
20174 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20175 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
20176 /* VCLS. Types S8 S16 S32. */
20177 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
20178 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
20179 /* VCLZ. Types I8 I16 I32. */
20180 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
20181 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
20182 /* VCNT. Size 8. */
20183 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
20184 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
20185 /* Two address, untyped. */
20186 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
20187 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
20188 /* VTRN. Sizes 8 16 32. */
20189 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
20190 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
20192 /* Table lookup. Size 8. */
20193 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20194 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20196 #undef THUMB_VARIANT
20197 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20199 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20201 /* Neon element/structure load/store. */
20202 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20203 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20204 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20205 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20206 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20207 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20208 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20209 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20211 #undef THUMB_VARIANT
20212 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20214 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20215 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
20216 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20217 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20218 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20219 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20220 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20221 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20222 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20223 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20225 #undef THUMB_VARIANT
20226 #define THUMB_VARIANT & fpu_vfp_ext_v3
20228 #define ARM_VARIANT & fpu_vfp_ext_v3
20230 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
20231 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20232 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20233 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20234 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20235 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20236 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20237 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20238 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20241 #define ARM_VARIANT & fpu_vfp_ext_fma
20242 #undef THUMB_VARIANT
20243 #define THUMB_VARIANT & fpu_vfp_ext_fma
20244 /* Mnemonics shared by Neon and VFP. These are included in the
20245 VFP FMA variant; NEON and VFP FMA always includes the NEON
20246 FMA instructions. */
20247 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20248 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20249 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20250 the v form should always be used. */
20251 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20252 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20253 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20254 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20255 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20256 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20258 #undef THUMB_VARIANT
20260 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20262 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20263 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20264 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20265 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20266 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20267 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20268 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
20269 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
20272 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20274 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
20275 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
20276 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
20277 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
20278 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
20279 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
20280 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
20281 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
20282 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
20283 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20284 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20285 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20286 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20287 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20288 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20289 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20290 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20291 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20292 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
20293 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
20294 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20295 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20296 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20297 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20298 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20299 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20300 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
20301 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
20302 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
20303 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
20304 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
20305 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
20306 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
20307 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
20308 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20309 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20310 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20311 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20312 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20313 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20314 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20315 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20316 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20317 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20318 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20319 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20320 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
20321 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20322 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20323 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20324 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20325 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20326 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20327 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20328 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20329 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20330 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20331 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20332 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20333 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20334 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20335 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20336 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20337 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20338 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20339 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20340 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20341 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20342 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20343 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20344 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20345 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20346 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20347 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20348 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20349 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20350 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20351 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20352 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20353 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20354 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20355 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20356 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20357 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20358 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20359 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20360 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20361 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20362 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
20363 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20364 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20365 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20366 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20367 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20368 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20369 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20370 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20371 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20372 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20373 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20374 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20375 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20376 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20377 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20378 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20379 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20380 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20381 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20382 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20383 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20384 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
20385 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20386 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20387 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20388 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20389 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20390 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20391 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20392 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20393 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20394 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20395 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20396 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20397 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20398 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20399 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20400 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20401 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20402 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20403 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20404 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20405 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20406 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20407 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20408 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20409 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20410 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20411 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20412 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20413 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20414 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20415 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20416 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20417 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20418 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20419 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20420 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20421 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20422 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20423 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20424 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20425 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20426 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20427 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20428 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20429 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20430 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20431 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20432 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20433 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20434 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20435 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
20438 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20440 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
20441 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
20442 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
20443 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20444 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20445 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20446 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20447 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20448 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20449 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20450 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20451 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20452 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20453 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20454 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20455 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20456 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20457 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20458 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20459 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20460 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
20461 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20462 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20463 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20464 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20465 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20466 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20467 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20468 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20469 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20470 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20471 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20472 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20473 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20474 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20475 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20476 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20477 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20478 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20479 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20480 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20481 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20482 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20483 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20484 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20485 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20486 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20487 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20488 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20489 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20490 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20491 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20492 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20493 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20494 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20495 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20496 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20499 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20501 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20502 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20503 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20504 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20505 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20506 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20507 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20508 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20509 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
20510 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
20511 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
20512 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
20513 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
20514 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
20515 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
20516 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
20517 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
20518 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
20519 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
20520 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
20521 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
20522 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
20523 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
20524 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
20525 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
20526 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
20527 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
20528 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
20529 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
20530 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
20531 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
20532 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
20533 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
20534 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
20535 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
20536 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
20537 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
20538 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
20539 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
20540 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
20541 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
20542 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
20543 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
20544 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
20545 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
20546 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
20547 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
20548 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
20549 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
20550 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
20551 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
20552 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
20553 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
20554 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
20555 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20556 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20557 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20558 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20559 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20560 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20561 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
20562 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
20563 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
20564 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
20565 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20566 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20567 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20568 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20569 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20570 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20571 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20572 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20573 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
20574 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
20575 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
20576 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
20579 #define ARM_VARIANT NULL
20580 #undef THUMB_VARIANT
20581 #define THUMB_VARIANT & arm_ext_v8m
20582 TUE("tt", 0, e840f000
, 2, (RRnpc
, RRnpc
), 0, tt
),
20583 TUE("ttt", 0, e840f040
, 2, (RRnpc
, RRnpc
), 0, tt
),
20586 #undef THUMB_VARIANT
20612 /* MD interface: bits in the object file. */
20614 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20615 for use in the a.out file, and stores them in the array pointed to by buf.
20616 This knows about the endian-ness of the target machine and does
20617 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
20618 2 (short) and 4 (long) Floating numbers are put out as a series of
20619 LITTLENUMS (shorts, here at least). */
20622 md_number_to_chars (char * buf
, valueT val
, int n
)
20624 if (target_big_endian
)
20625 number_to_chars_bigendian (buf
, val
, n
);
20627 number_to_chars_littleendian (buf
, val
, n
);
20631 md_chars_to_number (char * buf
, int n
)
20634 unsigned char * where
= (unsigned char *) buf
;
20636 if (target_big_endian
)
20641 result
|= (*where
++ & 255);
20649 result
|= (where
[n
] & 255);
20656 /* MD interface: Sections. */
20658 /* Calculate the maximum variable size (i.e., excluding fr_fix)
20659 that an rs_machine_dependent frag may reach. */
20662 arm_frag_max_var (fragS
*fragp
)
20664 /* We only use rs_machine_dependent for variable-size Thumb instructions,
20665 which are either THUMB_SIZE (2) or INSN_SIZE (4).
20667 Note that we generate relaxable instructions even for cases that don't
20668 really need it, like an immediate that's a trivial constant. So we're
20669 overestimating the instruction size for some of those cases. Rather
20670 than putting more intelligence here, it would probably be better to
20671 avoid generating a relaxation frag in the first place when it can be
20672 determined up front that a short instruction will suffice. */
20674 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
20678 /* Estimate the size of a frag before relaxing. Assume everything fits in
20682 md_estimate_size_before_relax (fragS
* fragp
,
20683 segT segtype ATTRIBUTE_UNUSED
)
20689 /* Convert a machine dependent frag. */
20692 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
20694 unsigned long insn
;
20695 unsigned long old_op
;
20703 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
20705 old_op
= bfd_get_16(abfd
, buf
);
20706 if (fragp
->fr_symbol
)
20708 exp
.X_op
= O_symbol
;
20709 exp
.X_add_symbol
= fragp
->fr_symbol
;
20713 exp
.X_op
= O_constant
;
20715 exp
.X_add_number
= fragp
->fr_offset
;
20716 opcode
= fragp
->fr_subtype
;
20719 case T_MNEM_ldr_pc
:
20720 case T_MNEM_ldr_pc2
:
20721 case T_MNEM_ldr_sp
:
20722 case T_MNEM_str_sp
:
20729 if (fragp
->fr_var
== 4)
20731 insn
= THUMB_OP32 (opcode
);
20732 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
20734 insn
|= (old_op
& 0x700) << 4;
20738 insn
|= (old_op
& 7) << 12;
20739 insn
|= (old_op
& 0x38) << 13;
20741 insn
|= 0x00000c00;
20742 put_thumb32_insn (buf
, insn
);
20743 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
20747 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
20749 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
20752 if (fragp
->fr_var
== 4)
20754 insn
= THUMB_OP32 (opcode
);
20755 insn
|= (old_op
& 0xf0) << 4;
20756 put_thumb32_insn (buf
, insn
);
20757 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
20761 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
20762 exp
.X_add_number
-= 4;
20770 if (fragp
->fr_var
== 4)
20772 int r0off
= (opcode
== T_MNEM_mov
20773 || opcode
== T_MNEM_movs
) ? 0 : 8;
20774 insn
= THUMB_OP32 (opcode
);
20775 insn
= (insn
& 0xe1ffffff) | 0x10000000;
20776 insn
|= (old_op
& 0x700) << r0off
;
20777 put_thumb32_insn (buf
, insn
);
20778 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
20782 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
20787 if (fragp
->fr_var
== 4)
20789 insn
= THUMB_OP32(opcode
);
20790 put_thumb32_insn (buf
, insn
);
20791 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
20794 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
20798 if (fragp
->fr_var
== 4)
20800 insn
= THUMB_OP32(opcode
);
20801 insn
|= (old_op
& 0xf00) << 14;
20802 put_thumb32_insn (buf
, insn
);
20803 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
20806 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
20809 case T_MNEM_add_sp
:
20810 case T_MNEM_add_pc
:
20811 case T_MNEM_inc_sp
:
20812 case T_MNEM_dec_sp
:
20813 if (fragp
->fr_var
== 4)
20815 /* ??? Choose between add and addw. */
20816 insn
= THUMB_OP32 (opcode
);
20817 insn
|= (old_op
& 0xf0) << 4;
20818 put_thumb32_insn (buf
, insn
);
20819 if (opcode
== T_MNEM_add_pc
)
20820 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
20822 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
20825 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
20833 if (fragp
->fr_var
== 4)
20835 insn
= THUMB_OP32 (opcode
);
20836 insn
|= (old_op
& 0xf0) << 4;
20837 insn
|= (old_op
& 0xf) << 16;
20838 put_thumb32_insn (buf
, insn
);
20839 if (insn
& (1 << 20))
20840 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
20842 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
20845 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
20851 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
20852 (enum bfd_reloc_code_real
) reloc_type
);
20853 fixp
->fx_file
= fragp
->fr_file
;
20854 fixp
->fx_line
= fragp
->fr_line
;
20855 fragp
->fr_fix
+= fragp
->fr_var
;
20857 /* Set whether we use thumb-2 ISA based on final relaxation results. */
20858 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
20859 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
20860 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
20863 /* Return the size of a relaxable immediate operand instruction.
20864 SHIFT and SIZE specify the form of the allowable immediate. */
20866 relax_immediate (fragS
*fragp
, int size
, int shift
)
20872 /* ??? Should be able to do better than this. */
20873 if (fragp
->fr_symbol
)
20876 low
= (1 << shift
) - 1;
20877 mask
= (1 << (shift
+ size
)) - (1 << shift
);
20878 offset
= fragp
->fr_offset
;
20879 /* Force misaligned offsets to 32-bit variant. */
20882 if (offset
& ~mask
)
20887 /* Get the address of a symbol during relaxation. */
20889 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
20895 sym
= fragp
->fr_symbol
;
20896 sym_frag
= symbol_get_frag (sym
);
20897 know (S_GET_SEGMENT (sym
) != absolute_section
20898 || sym_frag
== &zero_address_frag
);
20899 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
20901 /* If frag has yet to be reached on this pass, assume it will
20902 move by STRETCH just as we did. If this is not so, it will
20903 be because some frag between grows, and that will force
20907 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
20911 /* Adjust stretch for any alignment frag. Note that if have
20912 been expanding the earlier code, the symbol may be
20913 defined in what appears to be an earlier frag. FIXME:
20914 This doesn't handle the fr_subtype field, which specifies
20915 a maximum number of bytes to skip when doing an
20917 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
20919 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
20922 stretch
= - ((- stretch
)
20923 & ~ ((1 << (int) f
->fr_offset
) - 1));
20925 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
20937 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
20940 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
20945 /* Assume worst case for symbols not known to be in the same section. */
20946 if (fragp
->fr_symbol
== NULL
20947 || !S_IS_DEFINED (fragp
->fr_symbol
)
20948 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
20949 || S_IS_WEAK (fragp
->fr_symbol
))
20952 val
= relaxed_symbol_addr (fragp
, stretch
);
20953 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
20954 addr
= (addr
+ 4) & ~3;
20955 /* Force misaligned targets to 32-bit variant. */
20959 if (val
< 0 || val
> 1020)
20964 /* Return the size of a relaxable add/sub immediate instruction. */
20966 relax_addsub (fragS
*fragp
, asection
*sec
)
20971 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
20972 op
= bfd_get_16(sec
->owner
, buf
);
20973 if ((op
& 0xf) == ((op
>> 4) & 0xf))
20974 return relax_immediate (fragp
, 8, 0);
20976 return relax_immediate (fragp
, 3, 0);
20979 /* Return TRUE iff the definition of symbol S could be pre-empted
20980 (overridden) at link or load time. */
20982 symbol_preemptible (symbolS
*s
)
20984 /* Weak symbols can always be pre-empted. */
20988 /* Non-global symbols cannot be pre-empted. */
20989 if (! S_IS_EXTERNAL (s
))
20993 /* In ELF, a global symbol can be marked protected, or private. In that
20994 case it can't be pre-empted (other definitions in the same link unit
20995 would violate the ODR). */
20996 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
21000 /* Other global symbols might be pre-empted. */
21004 /* Return the size of a relaxable branch instruction. BITS is the
21005 size of the offset field in the narrow instruction. */
21008 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
21014 /* Assume worst case for symbols not known to be in the same section. */
21015 if (!S_IS_DEFINED (fragp
->fr_symbol
)
21016 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
21017 || S_IS_WEAK (fragp
->fr_symbol
))
21021 /* A branch to a function in ARM state will require interworking. */
21022 if (S_IS_DEFINED (fragp
->fr_symbol
)
21023 && ARM_IS_FUNC (fragp
->fr_symbol
))
21027 if (symbol_preemptible (fragp
->fr_symbol
))
21030 val
= relaxed_symbol_addr (fragp
, stretch
);
21031 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
21034 /* Offset is a signed value *2 */
21036 if (val
>= limit
|| val
< -limit
)
21042 /* Relax a machine dependent frag. This returns the amount by which
21043 the current size of the frag should change. */
21046 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
21051 oldsize
= fragp
->fr_var
;
21052 switch (fragp
->fr_subtype
)
21054 case T_MNEM_ldr_pc2
:
21055 newsize
= relax_adr (fragp
, sec
, stretch
);
21057 case T_MNEM_ldr_pc
:
21058 case T_MNEM_ldr_sp
:
21059 case T_MNEM_str_sp
:
21060 newsize
= relax_immediate (fragp
, 8, 2);
21064 newsize
= relax_immediate (fragp
, 5, 2);
21068 newsize
= relax_immediate (fragp
, 5, 1);
21072 newsize
= relax_immediate (fragp
, 5, 0);
21075 newsize
= relax_adr (fragp
, sec
, stretch
);
21081 newsize
= relax_immediate (fragp
, 8, 0);
21084 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
21087 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
21089 case T_MNEM_add_sp
:
21090 case T_MNEM_add_pc
:
21091 newsize
= relax_immediate (fragp
, 8, 2);
21093 case T_MNEM_inc_sp
:
21094 case T_MNEM_dec_sp
:
21095 newsize
= relax_immediate (fragp
, 7, 2);
21101 newsize
= relax_addsub (fragp
, sec
);
21107 fragp
->fr_var
= newsize
;
21108 /* Freeze wide instructions that are at or before the same location as
21109 in the previous pass. This avoids infinite loops.
21110 Don't freeze them unconditionally because targets may be artificially
21111 misaligned by the expansion of preceding frags. */
21112 if (stretch
<= 0 && newsize
> 2)
21114 md_convert_frag (sec
->owner
, sec
, fragp
);
21118 return newsize
- oldsize
;
21121 /* Round up a section size to the appropriate boundary. */
21124 md_section_align (segT segment ATTRIBUTE_UNUSED
,
21127 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21128 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
21130 /* For a.out, force the section size to be aligned. If we don't do
21131 this, BFD will align it for us, but it will not write out the
21132 final bytes of the section. This may be a bug in BFD, but it is
21133 easier to fix it here since that is how the other a.out targets
21137 align
= bfd_get_section_alignment (stdoutput
, segment
);
21138 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
21145 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21146 of an rs_align_code fragment. */
21149 arm_handle_align (fragS
* fragP
)
21151 static char const arm_noop
[2][2][4] =
21154 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21155 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21158 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21159 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21162 static char const thumb_noop
[2][2][2] =
21165 {0xc0, 0x46}, /* LE */
21166 {0x46, 0xc0}, /* BE */
21169 {0x00, 0xbf}, /* LE */
21170 {0xbf, 0x00} /* BE */
21173 static char const wide_thumb_noop
[2][4] =
21174 { /* Wide Thumb-2 */
21175 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21176 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21179 unsigned bytes
, fix
, noop_size
;
21182 const char *narrow_noop
= NULL
;
21187 if (fragP
->fr_type
!= rs_align_code
)
21190 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
21191 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
21194 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21195 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
21197 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
21199 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
21201 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21202 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
21204 narrow_noop
= thumb_noop
[1][target_big_endian
];
21205 noop
= wide_thumb_noop
[target_big_endian
];
21208 noop
= thumb_noop
[0][target_big_endian
];
21216 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21217 ? selected_cpu
: arm_arch_none
,
21219 [target_big_endian
];
21226 fragP
->fr_var
= noop_size
;
21228 if (bytes
& (noop_size
- 1))
21230 fix
= bytes
& (noop_size
- 1);
21232 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
21234 memset (p
, 0, fix
);
21241 if (bytes
& noop_size
)
21243 /* Insert a narrow noop. */
21244 memcpy (p
, narrow_noop
, noop_size
);
21246 bytes
-= noop_size
;
21250 /* Use wide noops for the remainder */
21254 while (bytes
>= noop_size
)
21256 memcpy (p
, noop
, noop_size
);
21258 bytes
-= noop_size
;
21262 fragP
->fr_fix
+= fix
;
21265 /* Called from md_do_align. Used to create an alignment
21266 frag in a code section. */
21269 arm_frag_align_code (int n
, int max
)
21273 /* We assume that there will never be a requirement
21274 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21275 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21280 _("alignments greater than %d bytes not supported in .text sections."),
21281 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
21282 as_fatal ("%s", err_msg
);
21285 p
= frag_var (rs_align_code
,
21286 MAX_MEM_FOR_RS_ALIGN_CODE
,
21288 (relax_substateT
) max
,
21295 /* Perform target specific initialisation of a frag.
21296 Note - despite the name this initialisation is not done when the frag
21297 is created, but only when its type is assigned. A frag can be created
21298 and used a long time before its type is set, so beware of assuming that
21299 this initialisationis performed first. */
21303 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
21305 /* Record whether this frag is in an ARM or a THUMB area. */
21306 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21309 #else /* OBJ_ELF is defined. */
21311 arm_init_frag (fragS
* fragP
, int max_chars
)
21313 int frag_thumb_mode
;
21315 /* If the current ARM vs THUMB mode has not already
21316 been recorded into this frag then do so now. */
21317 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
21318 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21320 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
21322 /* Record a mapping symbol for alignment frags. We will delete this
21323 later if the alignment ends up empty. */
21324 switch (fragP
->fr_type
)
21327 case rs_align_test
:
21329 mapping_state_2 (MAP_DATA
, max_chars
);
21331 case rs_align_code
:
21332 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
21339 /* When we change sections we need to issue a new mapping symbol. */
21342 arm_elf_change_section (void)
21344 /* Link an unlinked unwind index table section to the .text section. */
21345 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
21346 && elf_linked_to_section (now_seg
) == NULL
)
21347 elf_linked_to_section (now_seg
) = text_section
;
21351 arm_elf_section_type (const char * str
, size_t len
)
21353 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
21354 return SHT_ARM_EXIDX
;
21359 /* Code to deal with unwinding tables. */
21361 static void add_unwind_adjustsp (offsetT
);
21363 /* Generate any deferred unwind frame offset. */
21366 flush_pending_unwind (void)
21370 offset
= unwind
.pending_offset
;
21371 unwind
.pending_offset
= 0;
21373 add_unwind_adjustsp (offset
);
21376 /* Add an opcode to this list for this function. Two-byte opcodes should
21377 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21381 add_unwind_opcode (valueT op
, int length
)
21383 /* Add any deferred stack adjustment. */
21384 if (unwind
.pending_offset
)
21385 flush_pending_unwind ();
21387 unwind
.sp_restored
= 0;
21389 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
21391 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
21392 if (unwind
.opcodes
)
21393 unwind
.opcodes
= (unsigned char *) xrealloc (unwind
.opcodes
,
21394 unwind
.opcode_alloc
);
21396 unwind
.opcodes
= (unsigned char *) xmalloc (unwind
.opcode_alloc
);
21401 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
21403 unwind
.opcode_count
++;
21407 /* Add unwind opcodes to adjust the stack pointer. */
21410 add_unwind_adjustsp (offsetT offset
)
21414 if (offset
> 0x200)
21416 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21421 /* Long form: 0xb2, uleb128. */
21422 /* This might not fit in a word so add the individual bytes,
21423 remembering the list is built in reverse order. */
21424 o
= (valueT
) ((offset
- 0x204) >> 2);
21426 add_unwind_opcode (0, 1);
21428 /* Calculate the uleb128 encoding of the offset. */
21432 bytes
[n
] = o
& 0x7f;
21438 /* Add the insn. */
21440 add_unwind_opcode (bytes
[n
- 1], 1);
21441 add_unwind_opcode (0xb2, 1);
21443 else if (offset
> 0x100)
21445 /* Two short opcodes. */
21446 add_unwind_opcode (0x3f, 1);
21447 op
= (offset
- 0x104) >> 2;
21448 add_unwind_opcode (op
, 1);
21450 else if (offset
> 0)
21452 /* Short opcode. */
21453 op
= (offset
- 4) >> 2;
21454 add_unwind_opcode (op
, 1);
21456 else if (offset
< 0)
21459 while (offset
> 0x100)
21461 add_unwind_opcode (0x7f, 1);
21464 op
= ((offset
- 4) >> 2) | 0x40;
21465 add_unwind_opcode (op
, 1);
21469 /* Finish the list of unwind opcodes for this function. */
21471 finish_unwind_opcodes (void)
21475 if (unwind
.fp_used
)
21477 /* Adjust sp as necessary. */
21478 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
21479 flush_pending_unwind ();
21481 /* After restoring sp from the frame pointer. */
21482 op
= 0x90 | unwind
.fp_reg
;
21483 add_unwind_opcode (op
, 1);
21486 flush_pending_unwind ();
21490 /* Start an exception table entry. If idx is nonzero this is an index table
21494 start_unwind_section (const segT text_seg
, int idx
)
21496 const char * text_name
;
21497 const char * prefix
;
21498 const char * prefix_once
;
21499 const char * group_name
;
21503 size_t sec_name_len
;
21510 prefix
= ELF_STRING_ARM_unwind
;
21511 prefix_once
= ELF_STRING_ARM_unwind_once
;
21512 type
= SHT_ARM_EXIDX
;
21516 prefix
= ELF_STRING_ARM_unwind_info
;
21517 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
21518 type
= SHT_PROGBITS
;
21521 text_name
= segment_name (text_seg
);
21522 if (streq (text_name
, ".text"))
21525 if (strncmp (text_name
, ".gnu.linkonce.t.",
21526 strlen (".gnu.linkonce.t.")) == 0)
21528 prefix
= prefix_once
;
21529 text_name
+= strlen (".gnu.linkonce.t.");
21532 prefix_len
= strlen (prefix
);
21533 text_len
= strlen (text_name
);
21534 sec_name_len
= prefix_len
+ text_len
;
21535 sec_name
= (char *) xmalloc (sec_name_len
+ 1);
21536 memcpy (sec_name
, prefix
, prefix_len
);
21537 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
21538 sec_name
[prefix_len
+ text_len
] = '\0';
21544 /* Handle COMDAT group. */
21545 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
21547 group_name
= elf_group_name (text_seg
);
21548 if (group_name
== NULL
)
21550 as_bad (_("Group section `%s' has no group signature"),
21551 segment_name (text_seg
));
21552 ignore_rest_of_line ();
21555 flags
|= SHF_GROUP
;
21559 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
21561 /* Set the section link for index tables. */
21563 elf_linked_to_section (now_seg
) = text_seg
;
21567 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21568 personality routine data. Returns zero, or the index table value for
21569 an inline entry. */
21572 create_unwind_entry (int have_data
)
21577 /* The current word of data. */
21579 /* The number of bytes left in this word. */
21582 finish_unwind_opcodes ();
21584 /* Remember the current text section. */
21585 unwind
.saved_seg
= now_seg
;
21586 unwind
.saved_subseg
= now_subseg
;
21588 start_unwind_section (now_seg
, 0);
21590 if (unwind
.personality_routine
== NULL
)
21592 if (unwind
.personality_index
== -2)
21595 as_bad (_("handlerdata in cantunwind frame"));
21596 return 1; /* EXIDX_CANTUNWIND. */
21599 /* Use a default personality routine if none is specified. */
21600 if (unwind
.personality_index
== -1)
21602 if (unwind
.opcode_count
> 3)
21603 unwind
.personality_index
= 1;
21605 unwind
.personality_index
= 0;
21608 /* Space for the personality routine entry. */
21609 if (unwind
.personality_index
== 0)
21611 if (unwind
.opcode_count
> 3)
21612 as_bad (_("too many unwind opcodes for personality routine 0"));
21616 /* All the data is inline in the index table. */
21619 while (unwind
.opcode_count
> 0)
21621 unwind
.opcode_count
--;
21622 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
21626 /* Pad with "finish" opcodes. */
21628 data
= (data
<< 8) | 0xb0;
21635 /* We get two opcodes "free" in the first word. */
21636 size
= unwind
.opcode_count
- 2;
21640 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
21641 if (unwind
.personality_index
!= -1)
21643 as_bad (_("attempt to recreate an unwind entry"));
21647 /* An extra byte is required for the opcode count. */
21648 size
= unwind
.opcode_count
+ 1;
21651 size
= (size
+ 3) >> 2;
21653 as_bad (_("too many unwind opcodes"));
21655 frag_align (2, 0, 0);
21656 record_alignment (now_seg
, 2);
21657 unwind
.table_entry
= expr_build_dot ();
21659 /* Allocate the table entry. */
21660 ptr
= frag_more ((size
<< 2) + 4);
21661 /* PR 13449: Zero the table entries in case some of them are not used. */
21662 memset (ptr
, 0, (size
<< 2) + 4);
21663 where
= frag_now_fix () - ((size
<< 2) + 4);
21665 switch (unwind
.personality_index
)
21668 /* ??? Should this be a PLT generating relocation? */
21669 /* Custom personality routine. */
21670 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
21671 BFD_RELOC_ARM_PREL31
);
21676 /* Set the first byte to the number of additional words. */
21677 data
= size
> 0 ? size
- 1 : 0;
21681 /* ABI defined personality routines. */
21683 /* Three opcodes bytes are packed into the first word. */
21690 /* The size and first two opcode bytes go in the first word. */
21691 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
21696 /* Should never happen. */
21700 /* Pack the opcodes into words (MSB first), reversing the list at the same
21702 while (unwind
.opcode_count
> 0)
21706 md_number_to_chars (ptr
, data
, 4);
21711 unwind
.opcode_count
--;
21713 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
21716 /* Finish off the last word. */
21719 /* Pad with "finish" opcodes. */
21721 data
= (data
<< 8) | 0xb0;
21723 md_number_to_chars (ptr
, data
, 4);
21728 /* Add an empty descriptor if there is no user-specified data. */
21729 ptr
= frag_more (4);
21730 md_number_to_chars (ptr
, 0, 4);
21737 /* Initialize the DWARF-2 unwind information for this procedure. */
21740 tc_arm_frame_initial_instructions (void)
21742 cfi_add_CFA_def_cfa (REG_SP
, 0);
21744 #endif /* OBJ_ELF */
21746 /* Convert REGNAME to a DWARF-2 register number. */
21749 tc_arm_regname_to_dw2regnum (char *regname
)
21751 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
21755 /* PR 16694: Allow VFP registers as well. */
21756 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
21760 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
21769 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
21773 exp
.X_op
= O_secrel
;
21774 exp
.X_add_symbol
= symbol
;
21775 exp
.X_add_number
= 0;
21776 emit_expr (&exp
, size
);
21780 /* MD interface: Symbol and relocation handling. */
21782 /* Return the address within the segment that a PC-relative fixup is
21783 relative to. For ARM, PC-relative fixups applied to instructions
21784 are generally relative to the location of the fixup plus 8 bytes.
21785 Thumb branches are offset by 4, and Thumb loads relative to PC
21786 require special handling. */
21789 md_pcrel_from_section (fixS
* fixP
, segT seg
)
21791 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21793 /* If this is pc-relative and we are going to emit a relocation
21794 then we just want to put out any pipeline compensation that the linker
21795 will need. Otherwise we want to use the calculated base.
21796 For WinCE we skip the bias for externals as well, since this
21797 is how the MS ARM-CE assembler behaves and we want to be compatible. */
21799 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
21800 || (arm_force_relocation (fixP
)
21802 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
21808 switch (fixP
->fx_r_type
)
21810 /* PC relative addressing on the Thumb is slightly odd as the
21811 bottom two bits of the PC are forced to zero for the
21812 calculation. This happens *after* application of the
21813 pipeline offset. However, Thumb adrl already adjusts for
21814 this, so we need not do it again. */
21815 case BFD_RELOC_ARM_THUMB_ADD
:
21818 case BFD_RELOC_ARM_THUMB_OFFSET
:
21819 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
21820 case BFD_RELOC_ARM_T32_ADD_PC12
:
21821 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
21822 return (base
+ 4) & ~3;
21824 /* Thumb branches are simply offset by +4. */
21825 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
21826 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
21827 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
21828 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
21829 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
21832 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
21834 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21835 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21836 && ARM_IS_FUNC (fixP
->fx_addsy
)
21837 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21838 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21841 /* BLX is like branches above, but forces the low two bits of PC to
21843 case BFD_RELOC_THUMB_PCREL_BLX
:
21845 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21846 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21847 && THUMB_IS_FUNC (fixP
->fx_addsy
)
21848 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21849 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21850 return (base
+ 4) & ~3;
21852 /* ARM mode branches are offset by +8. However, the Windows CE
21853 loader expects the relocation not to take this into account. */
21854 case BFD_RELOC_ARM_PCREL_BLX
:
21856 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21857 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21858 && ARM_IS_FUNC (fixP
->fx_addsy
)
21859 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21860 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21863 case BFD_RELOC_ARM_PCREL_CALL
:
21865 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21866 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21867 && THUMB_IS_FUNC (fixP
->fx_addsy
)
21868 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21869 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21872 case BFD_RELOC_ARM_PCREL_BRANCH
:
21873 case BFD_RELOC_ARM_PCREL_JUMP
:
21874 case BFD_RELOC_ARM_PLT32
:
21876 /* When handling fixups immediately, because we have already
21877 discovered the value of a symbol, or the address of the frag involved
21878 we must account for the offset by +8, as the OS loader will never see the reloc.
21879 see fixup_segment() in write.c
21880 The S_IS_EXTERNAL test handles the case of global symbols.
21881 Those need the calculated base, not just the pipe compensation the linker will need. */
21883 && fixP
->fx_addsy
!= NULL
21884 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21885 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
21893 /* ARM mode loads relative to PC are also offset by +8. Unlike
21894 branches, the Windows CE loader *does* expect the relocation
21895 to take this into account. */
21896 case BFD_RELOC_ARM_OFFSET_IMM
:
21897 case BFD_RELOC_ARM_OFFSET_IMM8
:
21898 case BFD_RELOC_ARM_HWLITERAL
:
21899 case BFD_RELOC_ARM_LITERAL
:
21900 case BFD_RELOC_ARM_CP_OFF_IMM
:
21904 /* Other PC-relative relocations are un-offset. */
21910 static bfd_boolean flag_warn_syms
= TRUE
;
21913 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
21915 /* PR 18347 - Warn if the user attempts to create a symbol with the same
21916 name as an ARM instruction. Whilst strictly speaking it is allowed, it
21917 does mean that the resulting code might be very confusing to the reader.
21918 Also this warning can be triggered if the user omits an operand before
21919 an immediate address, eg:
21923 GAS treats this as an assignment of the value of the symbol foo to a
21924 symbol LDR, and so (without this code) it will not issue any kind of
21925 warning or error message.
21927 Note - ARM instructions are case-insensitive but the strings in the hash
21928 table are all stored in lower case, so we must first ensure that name is
21930 if (flag_warn_syms
&& arm_ops_hsh
)
21932 char * nbuf
= strdup (name
);
21935 for (p
= nbuf
; *p
; p
++)
21937 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
21939 static struct hash_control
* already_warned
= NULL
;
21941 if (already_warned
== NULL
)
21942 already_warned
= hash_new ();
21943 /* Only warn about the symbol once. To keep the code
21944 simple we let hash_insert do the lookup for us. */
21945 if (hash_insert (already_warned
, name
, NULL
) == NULL
)
21946 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
21955 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
21956 Otherwise we have no need to default values of symbols. */
21959 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
21962 if (name
[0] == '_' && name
[1] == 'G'
21963 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
21967 if (symbol_find (name
))
21968 as_bad (_("GOT already in the symbol table"));
21970 GOT_symbol
= symbol_new (name
, undefined_section
,
21971 (valueT
) 0, & zero_address_frag
);
21981 /* Subroutine of md_apply_fix. Check to see if an immediate can be
21982 computed as two separate immediate values, added together. We
21983 already know that this value cannot be computed by just one ARM
21986 static unsigned int
21987 validate_immediate_twopart (unsigned int val
,
21988 unsigned int * highpart
)
21993 for (i
= 0; i
< 32; i
+= 2)
21994 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
22000 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
22002 else if (a
& 0xff0000)
22004 if (a
& 0xff000000)
22006 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
22010 gas_assert (a
& 0xff000000);
22011 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
22014 return (a
& 0xff) | (i
<< 7);
22021 validate_offset_imm (unsigned int val
, int hwse
)
22023 if ((hwse
&& val
> 255) || val
> 4095)
22028 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22029 negative immediate constant by altering the instruction. A bit of
22034 by inverting the second operand, and
22037 by negating the second operand. */
22040 negate_data_op (unsigned long * instruction
,
22041 unsigned long value
)
22044 unsigned long negated
, inverted
;
22046 negated
= encode_arm_immediate (-value
);
22047 inverted
= encode_arm_immediate (~value
);
22049 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
22052 /* First negates. */
22053 case OPCODE_SUB
: /* ADD <-> SUB */
22054 new_inst
= OPCODE_ADD
;
22059 new_inst
= OPCODE_SUB
;
22063 case OPCODE_CMP
: /* CMP <-> CMN */
22064 new_inst
= OPCODE_CMN
;
22069 new_inst
= OPCODE_CMP
;
22073 /* Now Inverted ops. */
22074 case OPCODE_MOV
: /* MOV <-> MVN */
22075 new_inst
= OPCODE_MVN
;
22080 new_inst
= OPCODE_MOV
;
22084 case OPCODE_AND
: /* AND <-> BIC */
22085 new_inst
= OPCODE_BIC
;
22090 new_inst
= OPCODE_AND
;
22094 case OPCODE_ADC
: /* ADC <-> SBC */
22095 new_inst
= OPCODE_SBC
;
22100 new_inst
= OPCODE_ADC
;
22104 /* We cannot do anything. */
22109 if (value
== (unsigned) FAIL
)
22112 *instruction
&= OPCODE_MASK
;
22113 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
22117 /* Like negate_data_op, but for Thumb-2. */
22119 static unsigned int
22120 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
22124 unsigned int negated
, inverted
;
22126 negated
= encode_thumb32_immediate (-value
);
22127 inverted
= encode_thumb32_immediate (~value
);
22129 rd
= (*instruction
>> 8) & 0xf;
22130 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
22133 /* ADD <-> SUB. Includes CMP <-> CMN. */
22134 case T2_OPCODE_SUB
:
22135 new_inst
= T2_OPCODE_ADD
;
22139 case T2_OPCODE_ADD
:
22140 new_inst
= T2_OPCODE_SUB
;
22144 /* ORR <-> ORN. Includes MOV <-> MVN. */
22145 case T2_OPCODE_ORR
:
22146 new_inst
= T2_OPCODE_ORN
;
22150 case T2_OPCODE_ORN
:
22151 new_inst
= T2_OPCODE_ORR
;
22155 /* AND <-> BIC. TST has no inverted equivalent. */
22156 case T2_OPCODE_AND
:
22157 new_inst
= T2_OPCODE_BIC
;
22164 case T2_OPCODE_BIC
:
22165 new_inst
= T2_OPCODE_AND
;
22170 case T2_OPCODE_ADC
:
22171 new_inst
= T2_OPCODE_SBC
;
22175 case T2_OPCODE_SBC
:
22176 new_inst
= T2_OPCODE_ADC
;
22180 /* We cannot do anything. */
22185 if (value
== (unsigned int)FAIL
)
22188 *instruction
&= T2_OPCODE_MASK
;
22189 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
22193 /* Read a 32-bit thumb instruction from buf. */
22194 static unsigned long
22195 get_thumb32_insn (char * buf
)
22197 unsigned long insn
;
22198 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
22199 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22205 /* We usually want to set the low bit on the address of thumb function
22206 symbols. In particular .word foo - . should have the low bit set.
22207 Generic code tries to fold the difference of two symbols to
22208 a constant. Prevent this and force a relocation when the first symbols
22209 is a thumb function. */
22212 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
22214 if (op
== O_subtract
22215 && l
->X_op
== O_symbol
22216 && r
->X_op
== O_symbol
22217 && THUMB_IS_FUNC (l
->X_add_symbol
))
22219 l
->X_op
= O_subtract
;
22220 l
->X_op_symbol
= r
->X_add_symbol
;
22221 l
->X_add_number
-= r
->X_add_number
;
22225 /* Process as normal. */
22229 /* Encode Thumb2 unconditional branches and calls. The encoding
22230 for the 2 are identical for the immediate values. */
22233 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
22235 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22238 addressT S
, I1
, I2
, lo
, hi
;
22240 S
= (value
>> 24) & 0x01;
22241 I1
= (value
>> 23) & 0x01;
22242 I2
= (value
>> 22) & 0x01;
22243 hi
= (value
>> 12) & 0x3ff;
22244 lo
= (value
>> 1) & 0x7ff;
22245 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22246 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22247 newval
|= (S
<< 10) | hi
;
22248 newval2
&= ~T2I1I2MASK
;
22249 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
22250 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22251 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
22255 md_apply_fix (fixS
* fixP
,
22259 offsetT value
= * valP
;
22261 unsigned int newimm
;
22262 unsigned long temp
;
22264 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
22266 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
22268 /* Note whether this will delete the relocation. */
22270 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
22273 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22274 consistency with the behaviour on 32-bit hosts. Remember value
22276 value
&= 0xffffffff;
22277 value
^= 0x80000000;
22278 value
-= 0x80000000;
22281 fixP
->fx_addnumber
= value
;
22283 /* Same treatment for fixP->fx_offset. */
22284 fixP
->fx_offset
&= 0xffffffff;
22285 fixP
->fx_offset
^= 0x80000000;
22286 fixP
->fx_offset
-= 0x80000000;
22288 switch (fixP
->fx_r_type
)
22290 case BFD_RELOC_NONE
:
22291 /* This will need to go in the object file. */
22295 case BFD_RELOC_ARM_IMMEDIATE
:
22296 /* We claim that this fixup has been processed here,
22297 even if in fact we generate an error because we do
22298 not have a reloc for it, so tc_gen_reloc will reject it. */
22301 if (fixP
->fx_addsy
)
22303 const char *msg
= 0;
22305 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22306 msg
= _("undefined symbol %s used as an immediate value");
22307 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22308 msg
= _("symbol %s is in a different section");
22309 else if (S_IS_WEAK (fixP
->fx_addsy
))
22310 msg
= _("symbol %s is weak and may be overridden later");
22314 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22315 msg
, S_GET_NAME (fixP
->fx_addsy
));
22320 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22322 /* If the offset is negative, we should use encoding A2 for ADR. */
22323 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
22324 newimm
= negate_data_op (&temp
, value
);
22327 newimm
= encode_arm_immediate (value
);
22329 /* If the instruction will fail, see if we can fix things up by
22330 changing the opcode. */
22331 if (newimm
== (unsigned int) FAIL
)
22332 newimm
= negate_data_op (&temp
, value
);
22335 if (newimm
== (unsigned int) FAIL
)
22337 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22338 _("invalid constant (%lx) after fixup"),
22339 (unsigned long) value
);
22343 newimm
|= (temp
& 0xfffff000);
22344 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22347 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
22349 unsigned int highpart
= 0;
22350 unsigned int newinsn
= 0xe1a00000; /* nop. */
22352 if (fixP
->fx_addsy
)
22354 const char *msg
= 0;
22356 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22357 msg
= _("undefined symbol %s used as an immediate value");
22358 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22359 msg
= _("symbol %s is in a different section");
22360 else if (S_IS_WEAK (fixP
->fx_addsy
))
22361 msg
= _("symbol %s is weak and may be overridden later");
22365 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22366 msg
, S_GET_NAME (fixP
->fx_addsy
));
22371 newimm
= encode_arm_immediate (value
);
22372 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22374 /* If the instruction will fail, see if we can fix things up by
22375 changing the opcode. */
22376 if (newimm
== (unsigned int) FAIL
22377 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
22379 /* No ? OK - try using two ADD instructions to generate
22381 newimm
= validate_immediate_twopart (value
, & highpart
);
22383 /* Yes - then make sure that the second instruction is
22385 if (newimm
!= (unsigned int) FAIL
)
22387 /* Still No ? Try using a negated value. */
22388 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
22389 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
22390 /* Otherwise - give up. */
22393 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22394 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22399 /* Replace the first operand in the 2nd instruction (which
22400 is the PC) with the destination register. We have
22401 already added in the PC in the first instruction and we
22402 do not want to do it again. */
22403 newinsn
&= ~ 0xf0000;
22404 newinsn
|= ((newinsn
& 0x0f000) << 4);
22407 newimm
|= (temp
& 0xfffff000);
22408 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22410 highpart
|= (newinsn
& 0xfffff000);
22411 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
22415 case BFD_RELOC_ARM_OFFSET_IMM
:
22416 if (!fixP
->fx_done
&& seg
->use_rela_p
)
22419 case BFD_RELOC_ARM_LITERAL
:
22425 if (validate_offset_imm (value
, 0) == FAIL
)
22427 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
22428 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22429 _("invalid literal constant: pool needs to be closer"));
22431 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22432 _("bad immediate value for offset (%ld)"),
22437 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22439 newval
&= 0xfffff000;
22442 newval
&= 0xff7ff000;
22443 newval
|= value
| (sign
? INDEX_UP
: 0);
22445 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22448 case BFD_RELOC_ARM_OFFSET_IMM8
:
22449 case BFD_RELOC_ARM_HWLITERAL
:
22455 if (validate_offset_imm (value
, 1) == FAIL
)
22457 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
22458 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22459 _("invalid literal constant: pool needs to be closer"));
22461 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22462 _("bad immediate value for 8-bit offset (%ld)"),
22467 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22469 newval
&= 0xfffff0f0;
22472 newval
&= 0xff7ff0f0;
22473 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
22475 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22478 case BFD_RELOC_ARM_T32_OFFSET_U8
:
22479 if (value
< 0 || value
> 1020 || value
% 4 != 0)
22480 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22481 _("bad immediate value for offset (%ld)"), (long) value
);
22484 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
22486 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
22489 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
22490 /* This is a complicated relocation used for all varieties of Thumb32
22491 load/store instruction with immediate offset:
22493 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22494 *4, optional writeback(W)
22495 (doubleword load/store)
22497 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22498 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22499 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22500 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22501 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22503 Uppercase letters indicate bits that are already encoded at
22504 this point. Lowercase letters are our problem. For the
22505 second block of instructions, the secondary opcode nybble
22506 (bits 8..11) is present, and bit 23 is zero, even if this is
22507 a PC-relative operation. */
22508 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22510 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
22512 if ((newval
& 0xf0000000) == 0xe0000000)
22514 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22516 newval
|= (1 << 23);
22519 if (value
% 4 != 0)
22521 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22522 _("offset not a multiple of 4"));
22528 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22529 _("offset out of range"));
22534 else if ((newval
& 0x000f0000) == 0x000f0000)
22536 /* PC-relative, 12-bit offset. */
22538 newval
|= (1 << 23);
22543 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22544 _("offset out of range"));
22549 else if ((newval
& 0x00000100) == 0x00000100)
22551 /* Writeback: 8-bit, +/- offset. */
22553 newval
|= (1 << 9);
22558 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22559 _("offset out of range"));
22564 else if ((newval
& 0x00000f00) == 0x00000e00)
22566 /* T-instruction: positive 8-bit offset. */
22567 if (value
< 0 || value
> 0xff)
22569 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22570 _("offset out of range"));
22578 /* Positive 12-bit or negative 8-bit offset. */
22582 newval
|= (1 << 23);
22592 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22593 _("offset out of range"));
22600 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
22601 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
22604 case BFD_RELOC_ARM_SHIFT_IMM
:
22605 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22606 if (((unsigned long) value
) > 32
22608 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
22610 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22611 _("shift expression is too large"));
22616 /* Shifts of zero must be done as lsl. */
22618 else if (value
== 32)
22620 newval
&= 0xfffff07f;
22621 newval
|= (value
& 0x1f) << 7;
22622 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22625 case BFD_RELOC_ARM_T32_IMMEDIATE
:
22626 case BFD_RELOC_ARM_T32_ADD_IMM
:
22627 case BFD_RELOC_ARM_T32_IMM12
:
22628 case BFD_RELOC_ARM_T32_ADD_PC12
:
22629 /* We claim that this fixup has been processed here,
22630 even if in fact we generate an error because we do
22631 not have a reloc for it, so tc_gen_reloc will reject it. */
22635 && ! S_IS_DEFINED (fixP
->fx_addsy
))
22637 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22638 _("undefined symbol %s used as an immediate value"),
22639 S_GET_NAME (fixP
->fx_addsy
));
22643 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22645 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
22648 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
22649 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
22651 newimm
= encode_thumb32_immediate (value
);
22652 if (newimm
== (unsigned int) FAIL
)
22653 newimm
= thumb32_negate_data_op (&newval
, value
);
22655 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
22656 && newimm
== (unsigned int) FAIL
)
22658 /* Turn add/sum into addw/subw. */
22659 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
22660 newval
= (newval
& 0xfeffffff) | 0x02000000;
22661 /* No flat 12-bit imm encoding for addsw/subsw. */
22662 if ((newval
& 0x00100000) == 0)
22664 /* 12 bit immediate for addw/subw. */
22668 newval
^= 0x00a00000;
22671 newimm
= (unsigned int) FAIL
;
22677 if (newimm
== (unsigned int)FAIL
)
22679 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22680 _("invalid constant (%lx) after fixup"),
22681 (unsigned long) value
);
22685 newval
|= (newimm
& 0x800) << 15;
22686 newval
|= (newimm
& 0x700) << 4;
22687 newval
|= (newimm
& 0x0ff);
22689 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
22690 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
22693 case BFD_RELOC_ARM_SMC
:
22694 if (((unsigned long) value
) > 0xffff)
22695 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22696 _("invalid smc expression"));
22697 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22698 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
22699 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22702 case BFD_RELOC_ARM_HVC
:
22703 if (((unsigned long) value
) > 0xffff)
22704 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22705 _("invalid hvc expression"));
22706 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22707 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
22708 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22711 case BFD_RELOC_ARM_SWI
:
22712 if (fixP
->tc_fix_data
!= 0)
22714 if (((unsigned long) value
) > 0xff)
22715 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22716 _("invalid swi expression"));
22717 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22719 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22723 if (((unsigned long) value
) > 0x00ffffff)
22724 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22725 _("invalid swi expression"));
22726 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22728 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22732 case BFD_RELOC_ARM_MULTI
:
22733 if (((unsigned long) value
) > 0xffff)
22734 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22735 _("invalid expression in load/store multiple"));
22736 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
22737 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22741 case BFD_RELOC_ARM_PCREL_CALL
:
22743 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22745 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22746 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22747 && THUMB_IS_FUNC (fixP
->fx_addsy
))
22748 /* Flip the bl to blx. This is a simple flip
22749 bit here because we generate PCREL_CALL for
22750 unconditional bls. */
22752 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22753 newval
= newval
| 0x10000000;
22754 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22760 goto arm_branch_common
;
22762 case BFD_RELOC_ARM_PCREL_JUMP
:
22763 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22765 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22766 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22767 && THUMB_IS_FUNC (fixP
->fx_addsy
))
22769 /* This would map to a bl<cond>, b<cond>,
22770 b<always> to a Thumb function. We
22771 need to force a relocation for this particular
22773 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22777 case BFD_RELOC_ARM_PLT32
:
22779 case BFD_RELOC_ARM_PCREL_BRANCH
:
22781 goto arm_branch_common
;
22783 case BFD_RELOC_ARM_PCREL_BLX
:
22786 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22788 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22789 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22790 && ARM_IS_FUNC (fixP
->fx_addsy
))
22792 /* Flip the blx to a bl and warn. */
22793 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
22794 newval
= 0xeb000000;
22795 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
22796 _("blx to '%s' an ARM ISA state function changed to bl"),
22798 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22804 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
22805 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
22809 /* We are going to store value (shifted right by two) in the
22810 instruction, in a 24 bit, signed field. Bits 26 through 32 either
22811 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
22812 also be be clear. */
22814 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22815 _("misaligned branch destination"));
22816 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
22817 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
22818 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22820 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22822 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22823 newval
|= (value
>> 2) & 0x00ffffff;
22824 /* Set the H bit on BLX instructions. */
22828 newval
|= 0x01000000;
22830 newval
&= ~0x01000000;
22832 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22836 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
22837 /* CBZ can only branch forward. */
22839 /* Attempts to use CBZ to branch to the next instruction
22840 (which, strictly speaking, are prohibited) will be turned into
22843 FIXME: It may be better to remove the instruction completely and
22844 perform relaxation. */
22847 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22848 newval
= 0xbf00; /* NOP encoding T1 */
22849 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22854 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22856 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22858 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22859 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
22860 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22865 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
22866 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
22867 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22869 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22871 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22872 newval
|= (value
& 0x1ff) >> 1;
22873 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22877 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
22878 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
22879 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22881 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22883 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22884 newval
|= (value
& 0xfff) >> 1;
22885 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22889 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
22891 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22892 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22893 && ARM_IS_FUNC (fixP
->fx_addsy
)
22894 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22896 /* Force a relocation for a branch 20 bits wide. */
22899 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
22900 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22901 _("conditional branch out of range"));
22903 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22906 addressT S
, J1
, J2
, lo
, hi
;
22908 S
= (value
& 0x00100000) >> 20;
22909 J2
= (value
& 0x00080000) >> 19;
22910 J1
= (value
& 0x00040000) >> 18;
22911 hi
= (value
& 0x0003f000) >> 12;
22912 lo
= (value
& 0x00000ffe) >> 1;
22914 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22915 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22916 newval
|= (S
<< 10) | hi
;
22917 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
22918 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22919 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
22923 case BFD_RELOC_THUMB_PCREL_BLX
:
22924 /* If there is a blx from a thumb state function to
22925 another thumb function flip this to a bl and warn
22929 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22930 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22931 && THUMB_IS_FUNC (fixP
->fx_addsy
))
22933 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
22934 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
22935 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
22937 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22938 newval
= newval
| 0x1000;
22939 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
22940 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
22945 goto thumb_bl_common
;
22947 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
22948 /* A bl from Thumb state ISA to an internal ARM state function
22949 is converted to a blx. */
22951 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22952 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22953 && ARM_IS_FUNC (fixP
->fx_addsy
)
22954 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22956 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22957 newval
= newval
& ~0x1000;
22958 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
22959 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
22965 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
22966 /* For a BLX instruction, make sure that the relocation is rounded up
22967 to a word boundary. This follows the semantics of the instruction
22968 which specifies that bit 1 of the target address will come from bit
22969 1 of the base address. */
22970 value
= (value
+ 3) & ~ 3;
22973 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
22974 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
22975 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
22978 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
22980 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
22981 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22982 else if ((value
& ~0x1ffffff)
22983 && ((value
& ~0x1ffffff) != ~0x1ffffff))
22984 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22985 _("Thumb2 branch out of range"));
22988 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22989 encode_thumb2_b_bl_offset (buf
, value
);
22993 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
22994 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
22995 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22997 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22998 encode_thumb2_b_bl_offset (buf
, value
);
23003 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23008 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23009 md_number_to_chars (buf
, value
, 2);
23013 case BFD_RELOC_ARM_TLS_CALL
:
23014 case BFD_RELOC_ARM_THM_TLS_CALL
:
23015 case BFD_RELOC_ARM_TLS_DESCSEQ
:
23016 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
23017 case BFD_RELOC_ARM_TLS_GOTDESC
:
23018 case BFD_RELOC_ARM_TLS_GD32
:
23019 case BFD_RELOC_ARM_TLS_LE32
:
23020 case BFD_RELOC_ARM_TLS_IE32
:
23021 case BFD_RELOC_ARM_TLS_LDM32
:
23022 case BFD_RELOC_ARM_TLS_LDO32
:
23023 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
23026 case BFD_RELOC_ARM_GOT32
:
23027 case BFD_RELOC_ARM_GOTOFF
:
23030 case BFD_RELOC_ARM_GOT_PREL
:
23031 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23032 md_number_to_chars (buf
, value
, 4);
23035 case BFD_RELOC_ARM_TARGET2
:
23036 /* TARGET2 is not partial-inplace, so we need to write the
23037 addend here for REL targets, because it won't be written out
23038 during reloc processing later. */
23039 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23040 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
23044 case BFD_RELOC_RVA
:
23046 case BFD_RELOC_ARM_TARGET1
:
23047 case BFD_RELOC_ARM_ROSEGREL32
:
23048 case BFD_RELOC_ARM_SBREL32
:
23049 case BFD_RELOC_32_PCREL
:
23051 case BFD_RELOC_32_SECREL
:
23053 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23055 /* For WinCE we only do this for pcrel fixups. */
23056 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
23058 md_number_to_chars (buf
, value
, 4);
23062 case BFD_RELOC_ARM_PREL31
:
23063 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23065 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
23066 if ((value
^ (value
>> 1)) & 0x40000000)
23068 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23069 _("rel31 relocation overflow"));
23071 newval
|= value
& 0x7fffffff;
23072 md_number_to_chars (buf
, newval
, 4);
23077 case BFD_RELOC_ARM_CP_OFF_IMM
:
23078 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
23079 if (value
< -1023 || value
> 1023 || (value
& 3))
23080 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23081 _("co-processor offset out of range"));
23086 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23087 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23088 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23090 newval
= get_thumb32_insn (buf
);
23092 newval
&= 0xffffff00;
23095 newval
&= 0xff7fff00;
23096 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
23098 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23099 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23100 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23102 put_thumb32_insn (buf
, newval
);
23105 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
23106 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
23107 if (value
< -255 || value
> 255)
23108 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23109 _("co-processor offset out of range"));
23111 goto cp_off_common
;
23113 case BFD_RELOC_ARM_THUMB_OFFSET
:
23114 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23115 /* Exactly what ranges, and where the offset is inserted depends
23116 on the type of instruction, we can establish this from the
23118 switch (newval
>> 12)
23120 case 4: /* PC load. */
23121 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23122 forced to zero for these loads; md_pcrel_from has already
23123 compensated for this. */
23125 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23126 _("invalid offset, target not word aligned (0x%08lX)"),
23127 (((unsigned long) fixP
->fx_frag
->fr_address
23128 + (unsigned long) fixP
->fx_where
) & ~3)
23129 + (unsigned long) value
);
23131 if (value
& ~0x3fc)
23132 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23133 _("invalid offset, value too big (0x%08lX)"),
23136 newval
|= value
>> 2;
23139 case 9: /* SP load/store. */
23140 if (value
& ~0x3fc)
23141 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23142 _("invalid offset, value too big (0x%08lX)"),
23144 newval
|= value
>> 2;
23147 case 6: /* Word load/store. */
23149 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23150 _("invalid offset, value too big (0x%08lX)"),
23152 newval
|= value
<< 4; /* 6 - 2. */
23155 case 7: /* Byte load/store. */
23157 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23158 _("invalid offset, value too big (0x%08lX)"),
23160 newval
|= value
<< 6;
23163 case 8: /* Halfword load/store. */
23165 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23166 _("invalid offset, value too big (0x%08lX)"),
23168 newval
|= value
<< 5; /* 6 - 1. */
23172 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23173 "Unable to process relocation for thumb opcode: %lx",
23174 (unsigned long) newval
);
23177 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23180 case BFD_RELOC_ARM_THUMB_ADD
:
23181 /* This is a complicated relocation, since we use it for all of
23182 the following immediate relocations:
23186 9bit ADD/SUB SP word-aligned
23187 10bit ADD PC/SP word-aligned
23189 The type of instruction being processed is encoded in the
23196 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23198 int rd
= (newval
>> 4) & 0xf;
23199 int rs
= newval
& 0xf;
23200 int subtract
= !!(newval
& 0x8000);
23202 /* Check for HI regs, only very restricted cases allowed:
23203 Adjusting SP, and using PC or SP to get an address. */
23204 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
23205 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
23206 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23207 _("invalid Hi register with immediate"));
23209 /* If value is negative, choose the opposite instruction. */
23213 subtract
= !subtract
;
23215 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23216 _("immediate value out of range"));
23221 if (value
& ~0x1fc)
23222 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23223 _("invalid immediate for stack address calculation"));
23224 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
23225 newval
|= value
>> 2;
23227 else if (rs
== REG_PC
|| rs
== REG_SP
)
23229 /* PR gas/18541. If the addition is for a defined symbol
23230 within range of an ADR instruction then accept it. */
23233 && fixP
->fx_addsy
!= NULL
)
23237 if (! S_IS_DEFINED (fixP
->fx_addsy
)
23238 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
23239 || S_IS_WEAK (fixP
->fx_addsy
))
23241 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23242 _("address calculation needs a strongly defined nearby symbol"));
23246 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23248 /* Round up to the next 4-byte boundary. */
23253 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
23257 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23258 _("symbol too far away"));
23268 if (subtract
|| value
& ~0x3fc)
23269 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23270 _("invalid immediate for address calculation (value = 0x%08lX)"),
23271 (unsigned long) (subtract
? - value
: value
));
23272 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
23274 newval
|= value
>> 2;
23279 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23280 _("immediate value out of range"));
23281 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
23282 newval
|= (rd
<< 8) | value
;
23287 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23288 _("immediate value out of range"));
23289 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
23290 newval
|= rd
| (rs
<< 3) | (value
<< 6);
23293 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23296 case BFD_RELOC_ARM_THUMB_IMM
:
23297 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23298 if (value
< 0 || value
> 255)
23299 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23300 _("invalid immediate: %ld is out of range"),
23303 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23306 case BFD_RELOC_ARM_THUMB_SHIFT
:
23307 /* 5bit shift value (0..32). LSL cannot take 32. */
23308 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
23309 temp
= newval
& 0xf800;
23310 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
23311 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23312 _("invalid shift value: %ld"), (long) value
);
23313 /* Shifts of zero must be encoded as LSL. */
23315 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
23316 /* Shifts of 32 are encoded as zero. */
23317 else if (value
== 32)
23319 newval
|= value
<< 6;
23320 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23323 case BFD_RELOC_VTABLE_INHERIT
:
23324 case BFD_RELOC_VTABLE_ENTRY
:
23328 case BFD_RELOC_ARM_MOVW
:
23329 case BFD_RELOC_ARM_MOVT
:
23330 case BFD_RELOC_ARM_THUMB_MOVW
:
23331 case BFD_RELOC_ARM_THUMB_MOVT
:
23332 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23334 /* REL format relocations are limited to a 16-bit addend. */
23335 if (!fixP
->fx_done
)
23337 if (value
< -0x8000 || value
> 0x7fff)
23338 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23339 _("offset out of range"));
23341 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
23342 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23347 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
23348 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23350 newval
= get_thumb32_insn (buf
);
23351 newval
&= 0xfbf08f00;
23352 newval
|= (value
& 0xf000) << 4;
23353 newval
|= (value
& 0x0800) << 15;
23354 newval
|= (value
& 0x0700) << 4;
23355 newval
|= (value
& 0x00ff);
23356 put_thumb32_insn (buf
, newval
);
23360 newval
= md_chars_to_number (buf
, 4);
23361 newval
&= 0xfff0f000;
23362 newval
|= value
& 0x0fff;
23363 newval
|= (value
& 0xf000) << 4;
23364 md_number_to_chars (buf
, newval
, 4);
23369 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
23370 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
23371 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
23372 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
23373 gas_assert (!fixP
->fx_done
);
23376 bfd_boolean is_mov
;
23377 bfd_vma encoded_addend
= value
;
23379 /* Check that addend can be encoded in instruction. */
23380 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
23381 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23382 _("the offset 0x%08lX is not representable"),
23383 (unsigned long) encoded_addend
);
23385 /* Extract the instruction. */
23386 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
23387 is_mov
= (insn
& 0xf800) == 0x2000;
23392 if (!seg
->use_rela_p
)
23393 insn
|= encoded_addend
;
23399 /* Extract the instruction. */
23400 /* Encoding is the following
23405 /* The following conditions must be true :
23410 rd
= (insn
>> 4) & 0xf;
23412 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
23413 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23414 _("Unable to process relocation for thumb opcode: %lx"),
23415 (unsigned long) insn
);
23417 /* Encode as ADD immediate8 thumb 1 code. */
23418 insn
= 0x3000 | (rd
<< 8);
23420 /* Place the encoded addend into the first 8 bits of the
23422 if (!seg
->use_rela_p
)
23423 insn
|= encoded_addend
;
23426 /* Update the instruction. */
23427 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
23431 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
23432 case BFD_RELOC_ARM_ALU_PC_G0
:
23433 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
23434 case BFD_RELOC_ARM_ALU_PC_G1
:
23435 case BFD_RELOC_ARM_ALU_PC_G2
:
23436 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
23437 case BFD_RELOC_ARM_ALU_SB_G0
:
23438 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
23439 case BFD_RELOC_ARM_ALU_SB_G1
:
23440 case BFD_RELOC_ARM_ALU_SB_G2
:
23441 gas_assert (!fixP
->fx_done
);
23442 if (!seg
->use_rela_p
)
23445 bfd_vma encoded_addend
;
23446 bfd_vma addend_abs
= abs (value
);
23448 /* Check that the absolute value of the addend can be
23449 expressed as an 8-bit constant plus a rotation. */
23450 encoded_addend
= encode_arm_immediate (addend_abs
);
23451 if (encoded_addend
== (unsigned int) FAIL
)
23452 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23453 _("the offset 0x%08lX is not representable"),
23454 (unsigned long) addend_abs
);
23456 /* Extract the instruction. */
23457 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23459 /* If the addend is positive, use an ADD instruction.
23460 Otherwise use a SUB. Take care not to destroy the S bit. */
23461 insn
&= 0xff1fffff;
23467 /* Place the encoded addend into the first 12 bits of the
23469 insn
&= 0xfffff000;
23470 insn
|= encoded_addend
;
23472 /* Update the instruction. */
23473 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23477 case BFD_RELOC_ARM_LDR_PC_G0
:
23478 case BFD_RELOC_ARM_LDR_PC_G1
:
23479 case BFD_RELOC_ARM_LDR_PC_G2
:
23480 case BFD_RELOC_ARM_LDR_SB_G0
:
23481 case BFD_RELOC_ARM_LDR_SB_G1
:
23482 case BFD_RELOC_ARM_LDR_SB_G2
:
23483 gas_assert (!fixP
->fx_done
);
23484 if (!seg
->use_rela_p
)
23487 bfd_vma addend_abs
= abs (value
);
23489 /* Check that the absolute value of the addend can be
23490 encoded in 12 bits. */
23491 if (addend_abs
>= 0x1000)
23492 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23493 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23494 (unsigned long) addend_abs
);
23496 /* Extract the instruction. */
23497 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23499 /* If the addend is negative, clear bit 23 of the instruction.
23500 Otherwise set it. */
23502 insn
&= ~(1 << 23);
23506 /* Place the absolute value of the addend into the first 12 bits
23507 of the instruction. */
23508 insn
&= 0xfffff000;
23509 insn
|= addend_abs
;
23511 /* Update the instruction. */
23512 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23516 case BFD_RELOC_ARM_LDRS_PC_G0
:
23517 case BFD_RELOC_ARM_LDRS_PC_G1
:
23518 case BFD_RELOC_ARM_LDRS_PC_G2
:
23519 case BFD_RELOC_ARM_LDRS_SB_G0
:
23520 case BFD_RELOC_ARM_LDRS_SB_G1
:
23521 case BFD_RELOC_ARM_LDRS_SB_G2
:
23522 gas_assert (!fixP
->fx_done
);
23523 if (!seg
->use_rela_p
)
23526 bfd_vma addend_abs
= abs (value
);
23528 /* Check that the absolute value of the addend can be
23529 encoded in 8 bits. */
23530 if (addend_abs
>= 0x100)
23531 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23532 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23533 (unsigned long) addend_abs
);
23535 /* Extract the instruction. */
23536 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23538 /* If the addend is negative, clear bit 23 of the instruction.
23539 Otherwise set it. */
23541 insn
&= ~(1 << 23);
23545 /* Place the first four bits of the absolute value of the addend
23546 into the first 4 bits of the instruction, and the remaining
23547 four into bits 8 .. 11. */
23548 insn
&= 0xfffff0f0;
23549 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
23551 /* Update the instruction. */
23552 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23556 case BFD_RELOC_ARM_LDC_PC_G0
:
23557 case BFD_RELOC_ARM_LDC_PC_G1
:
23558 case BFD_RELOC_ARM_LDC_PC_G2
:
23559 case BFD_RELOC_ARM_LDC_SB_G0
:
23560 case BFD_RELOC_ARM_LDC_SB_G1
:
23561 case BFD_RELOC_ARM_LDC_SB_G2
:
23562 gas_assert (!fixP
->fx_done
);
23563 if (!seg
->use_rela_p
)
23566 bfd_vma addend_abs
= abs (value
);
23568 /* Check that the absolute value of the addend is a multiple of
23569 four and, when divided by four, fits in 8 bits. */
23570 if (addend_abs
& 0x3)
23571 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23572 _("bad offset 0x%08lX (must be word-aligned)"),
23573 (unsigned long) addend_abs
);
23575 if ((addend_abs
>> 2) > 0xff)
23576 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23577 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23578 (unsigned long) addend_abs
);
23580 /* Extract the instruction. */
23581 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23583 /* If the addend is negative, clear bit 23 of the instruction.
23584 Otherwise set it. */
23586 insn
&= ~(1 << 23);
23590 /* Place the addend (divided by four) into the first eight
23591 bits of the instruction. */
23592 insn
&= 0xfffffff0;
23593 insn
|= addend_abs
>> 2;
23595 /* Update the instruction. */
23596 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23600 case BFD_RELOC_ARM_V4BX
:
23601 /* This will need to go in the object file. */
23605 case BFD_RELOC_UNUSED
:
23607 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23608 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
23612 /* Translate internal representation of relocation info to BFD target
23616 tc_gen_reloc (asection
*section
, fixS
*fixp
)
23619 bfd_reloc_code_real_type code
;
23621 reloc
= (arelent
*) xmalloc (sizeof (arelent
));
23623 reloc
->sym_ptr_ptr
= (asymbol
**) xmalloc (sizeof (asymbol
*));
23624 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
23625 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
23627 if (fixp
->fx_pcrel
)
23629 if (section
->use_rela_p
)
23630 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
23632 fixp
->fx_offset
= reloc
->address
;
23634 reloc
->addend
= fixp
->fx_offset
;
23636 switch (fixp
->fx_r_type
)
23639 if (fixp
->fx_pcrel
)
23641 code
= BFD_RELOC_8_PCREL
;
23646 if (fixp
->fx_pcrel
)
23648 code
= BFD_RELOC_16_PCREL
;
23653 if (fixp
->fx_pcrel
)
23655 code
= BFD_RELOC_32_PCREL
;
23659 case BFD_RELOC_ARM_MOVW
:
23660 if (fixp
->fx_pcrel
)
23662 code
= BFD_RELOC_ARM_MOVW_PCREL
;
23666 case BFD_RELOC_ARM_MOVT
:
23667 if (fixp
->fx_pcrel
)
23669 code
= BFD_RELOC_ARM_MOVT_PCREL
;
23673 case BFD_RELOC_ARM_THUMB_MOVW
:
23674 if (fixp
->fx_pcrel
)
23676 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
23680 case BFD_RELOC_ARM_THUMB_MOVT
:
23681 if (fixp
->fx_pcrel
)
23683 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
23687 case BFD_RELOC_NONE
:
23688 case BFD_RELOC_ARM_PCREL_BRANCH
:
23689 case BFD_RELOC_ARM_PCREL_BLX
:
23690 case BFD_RELOC_RVA
:
23691 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
23692 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
23693 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
23694 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23695 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23696 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23697 case BFD_RELOC_VTABLE_ENTRY
:
23698 case BFD_RELOC_VTABLE_INHERIT
:
23700 case BFD_RELOC_32_SECREL
:
23702 code
= fixp
->fx_r_type
;
23705 case BFD_RELOC_THUMB_PCREL_BLX
:
23707 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
23708 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23711 code
= BFD_RELOC_THUMB_PCREL_BLX
;
23714 case BFD_RELOC_ARM_LITERAL
:
23715 case BFD_RELOC_ARM_HWLITERAL
:
23716 /* If this is called then the a literal has
23717 been referenced across a section boundary. */
23718 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23719 _("literal referenced across section boundary"));
23723 case BFD_RELOC_ARM_TLS_CALL
:
23724 case BFD_RELOC_ARM_THM_TLS_CALL
:
23725 case BFD_RELOC_ARM_TLS_DESCSEQ
:
23726 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
23727 case BFD_RELOC_ARM_GOT32
:
23728 case BFD_RELOC_ARM_GOTOFF
:
23729 case BFD_RELOC_ARM_GOT_PREL
:
23730 case BFD_RELOC_ARM_PLT32
:
23731 case BFD_RELOC_ARM_TARGET1
:
23732 case BFD_RELOC_ARM_ROSEGREL32
:
23733 case BFD_RELOC_ARM_SBREL32
:
23734 case BFD_RELOC_ARM_PREL31
:
23735 case BFD_RELOC_ARM_TARGET2
:
23736 case BFD_RELOC_ARM_TLS_LDO32
:
23737 case BFD_RELOC_ARM_PCREL_CALL
:
23738 case BFD_RELOC_ARM_PCREL_JUMP
:
23739 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
23740 case BFD_RELOC_ARM_ALU_PC_G0
:
23741 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
23742 case BFD_RELOC_ARM_ALU_PC_G1
:
23743 case BFD_RELOC_ARM_ALU_PC_G2
:
23744 case BFD_RELOC_ARM_LDR_PC_G0
:
23745 case BFD_RELOC_ARM_LDR_PC_G1
:
23746 case BFD_RELOC_ARM_LDR_PC_G2
:
23747 case BFD_RELOC_ARM_LDRS_PC_G0
:
23748 case BFD_RELOC_ARM_LDRS_PC_G1
:
23749 case BFD_RELOC_ARM_LDRS_PC_G2
:
23750 case BFD_RELOC_ARM_LDC_PC_G0
:
23751 case BFD_RELOC_ARM_LDC_PC_G1
:
23752 case BFD_RELOC_ARM_LDC_PC_G2
:
23753 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
23754 case BFD_RELOC_ARM_ALU_SB_G0
:
23755 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
23756 case BFD_RELOC_ARM_ALU_SB_G1
:
23757 case BFD_RELOC_ARM_ALU_SB_G2
:
23758 case BFD_RELOC_ARM_LDR_SB_G0
:
23759 case BFD_RELOC_ARM_LDR_SB_G1
:
23760 case BFD_RELOC_ARM_LDR_SB_G2
:
23761 case BFD_RELOC_ARM_LDRS_SB_G0
:
23762 case BFD_RELOC_ARM_LDRS_SB_G1
:
23763 case BFD_RELOC_ARM_LDRS_SB_G2
:
23764 case BFD_RELOC_ARM_LDC_SB_G0
:
23765 case BFD_RELOC_ARM_LDC_SB_G1
:
23766 case BFD_RELOC_ARM_LDC_SB_G2
:
23767 case BFD_RELOC_ARM_V4BX
:
23768 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
23769 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
23770 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
23771 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
23772 code
= fixp
->fx_r_type
;
23775 case BFD_RELOC_ARM_TLS_GOTDESC
:
23776 case BFD_RELOC_ARM_TLS_GD32
:
23777 case BFD_RELOC_ARM_TLS_LE32
:
23778 case BFD_RELOC_ARM_TLS_IE32
:
23779 case BFD_RELOC_ARM_TLS_LDM32
:
23780 /* BFD will include the symbol's address in the addend.
23781 But we don't want that, so subtract it out again here. */
23782 if (!S_IS_COMMON (fixp
->fx_addsy
))
23783 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
23784 code
= fixp
->fx_r_type
;
23788 case BFD_RELOC_ARM_IMMEDIATE
:
23789 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23790 _("internal relocation (type: IMMEDIATE) not fixed up"));
23793 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
23794 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23795 _("ADRL used for a symbol not defined in the same file"));
23798 case BFD_RELOC_ARM_OFFSET_IMM
:
23799 if (section
->use_rela_p
)
23801 code
= fixp
->fx_r_type
;
23805 if (fixp
->fx_addsy
!= NULL
23806 && !S_IS_DEFINED (fixp
->fx_addsy
)
23807 && S_IS_LOCAL (fixp
->fx_addsy
))
23809 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23810 _("undefined local label `%s'"),
23811 S_GET_NAME (fixp
->fx_addsy
));
23815 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23816 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
23823 switch (fixp
->fx_r_type
)
23825 case BFD_RELOC_NONE
: type
= "NONE"; break;
23826 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
23827 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
23828 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
23829 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
23830 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
23831 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
23832 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
23833 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
23834 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
23835 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
23836 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
23837 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
23838 default: type
= _("<unknown>"); break;
23840 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23841 _("cannot represent %s relocation in this object file format"),
23848 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
23850 && fixp
->fx_addsy
== GOT_symbol
)
23852 code
= BFD_RELOC_ARM_GOTPC
;
23853 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
23857 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
23859 if (reloc
->howto
== NULL
)
23861 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23862 _("cannot represent %s relocation in this object file format"),
23863 bfd_get_reloc_code_name (code
));
23867 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
23868 vtable entry to be used in the relocation's section offset. */
23869 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
23870 reloc
->address
= fixp
->fx_offset
;
23875 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
23878 cons_fix_new_arm (fragS
* frag
,
23882 bfd_reloc_code_real_type reloc
)
23887 FIXME: @@ Should look at CPU word size. */
23891 reloc
= BFD_RELOC_8
;
23894 reloc
= BFD_RELOC_16
;
23898 reloc
= BFD_RELOC_32
;
23901 reloc
= BFD_RELOC_64
;
23906 if (exp
->X_op
== O_secrel
)
23908 exp
->X_op
= O_symbol
;
23909 reloc
= BFD_RELOC_32_SECREL
;
23913 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
23916 #if defined (OBJ_COFF)
23918 arm_validate_fix (fixS
* fixP
)
23920 /* If the destination of the branch is a defined symbol which does not have
23921 the THUMB_FUNC attribute, then we must be calling a function which has
23922 the (interfacearm) attribute. We look for the Thumb entry point to that
23923 function and change the branch to refer to that function instead. */
23924 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
23925 && fixP
->fx_addsy
!= NULL
23926 && S_IS_DEFINED (fixP
->fx_addsy
)
23927 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
23929 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
23936 arm_force_relocation (struct fix
* fixp
)
23938 #if defined (OBJ_COFF) && defined (TE_PE)
23939 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
23943 /* In case we have a call or a branch to a function in ARM ISA mode from
23944 a thumb function or vice-versa force the relocation. These relocations
23945 are cleared off for some cores that might have blx and simple transformations
23949 switch (fixp
->fx_r_type
)
23951 case BFD_RELOC_ARM_PCREL_JUMP
:
23952 case BFD_RELOC_ARM_PCREL_CALL
:
23953 case BFD_RELOC_THUMB_PCREL_BLX
:
23954 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
23958 case BFD_RELOC_ARM_PCREL_BLX
:
23959 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23960 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23961 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23962 if (ARM_IS_FUNC (fixp
->fx_addsy
))
23971 /* Resolve these relocations even if the symbol is extern or weak.
23972 Technically this is probably wrong due to symbol preemption.
23973 In practice these relocations do not have enough range to be useful
23974 at dynamic link time, and some code (e.g. in the Linux kernel)
23975 expects these references to be resolved. */
23976 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
23977 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
23978 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
23979 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
23980 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23981 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
23982 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
23983 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
23984 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
23985 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
23986 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
23987 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
23988 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
23989 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
23992 /* Always leave these relocations for the linker. */
23993 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
23994 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
23995 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
23998 /* Always generate relocations against function symbols. */
23999 if (fixp
->fx_r_type
== BFD_RELOC_32
24001 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
24004 return generic_force_reloc (fixp
);
24007 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24008 /* Relocations against function names must be left unadjusted,
24009 so that the linker can use this information to generate interworking
24010 stubs. The MIPS version of this function
24011 also prevents relocations that are mips-16 specific, but I do not
24012 know why it does this.
24015 There is one other problem that ought to be addressed here, but
24016 which currently is not: Taking the address of a label (rather
24017 than a function) and then later jumping to that address. Such
24018 addresses also ought to have their bottom bit set (assuming that
24019 they reside in Thumb code), but at the moment they will not. */
24022 arm_fix_adjustable (fixS
* fixP
)
24024 if (fixP
->fx_addsy
== NULL
)
24027 /* Preserve relocations against symbols with function type. */
24028 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
24031 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
24032 && fixP
->fx_subsy
== NULL
)
24035 /* We need the symbol name for the VTABLE entries. */
24036 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
24037 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
24040 /* Don't allow symbols to be discarded on GOT related relocs. */
24041 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
24042 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
24043 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
24044 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
24045 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
24046 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
24047 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
24048 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
24049 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
24050 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
24051 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
24052 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
24053 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
24054 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
24057 /* Similarly for group relocations. */
24058 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24059 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24060 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24063 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24064 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
24065 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
24066 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
24067 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
24068 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
24069 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
24070 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
24071 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
24074 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24075 offsets, so keep these symbols. */
24076 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24077 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
24082 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24087 elf32_arm_target_format (void)
24090 return (target_big_endian
24091 ? "elf32-bigarm-symbian"
24092 : "elf32-littlearm-symbian");
24093 #elif defined (TE_VXWORKS)
24094 return (target_big_endian
24095 ? "elf32-bigarm-vxworks"
24096 : "elf32-littlearm-vxworks");
24097 #elif defined (TE_NACL)
24098 return (target_big_endian
24099 ? "elf32-bigarm-nacl"
24100 : "elf32-littlearm-nacl");
24102 if (target_big_endian
)
24103 return "elf32-bigarm";
24105 return "elf32-littlearm";
24110 armelf_frob_symbol (symbolS
* symp
,
24113 elf_frob_symbol (symp
, puntp
);
24117 /* MD interface: Finalization. */
24122 literal_pool
* pool
;
24124 /* Ensure that all the IT blocks are properly closed. */
24125 check_it_blocks_finished ();
24127 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
24129 /* Put it at the end of the relevant section. */
24130 subseg_set (pool
->section
, pool
->sub_section
);
24132 arm_elf_change_section ();
24139 /* Remove any excess mapping symbols generated for alignment frags in
24140 SEC. We may have created a mapping symbol before a zero byte
24141 alignment; remove it if there's a mapping symbol after the
24144 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
24145 void *dummy ATTRIBUTE_UNUSED
)
24147 segment_info_type
*seginfo
= seg_info (sec
);
24150 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
24153 for (fragp
= seginfo
->frchainP
->frch_root
;
24155 fragp
= fragp
->fr_next
)
24157 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
24158 fragS
*next
= fragp
->fr_next
;
24160 /* Variable-sized frags have been converted to fixed size by
24161 this point. But if this was variable-sized to start with,
24162 there will be a fixed-size frag after it. So don't handle
24164 if (sym
== NULL
|| next
== NULL
)
24167 if (S_GET_VALUE (sym
) < next
->fr_address
)
24168 /* Not at the end of this frag. */
24170 know (S_GET_VALUE (sym
) == next
->fr_address
);
24174 if (next
->tc_frag_data
.first_map
!= NULL
)
24176 /* Next frag starts with a mapping symbol. Discard this
24178 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24182 if (next
->fr_next
== NULL
)
24184 /* This mapping symbol is at the end of the section. Discard
24186 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
24187 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24191 /* As long as we have empty frags without any mapping symbols,
24193 /* If the next frag is non-empty and does not start with a
24194 mapping symbol, then this mapping symbol is required. */
24195 if (next
->fr_address
!= next
->fr_next
->fr_address
)
24198 next
= next
->fr_next
;
24200 while (next
!= NULL
);
24205 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24209 arm_adjust_symtab (void)
24214 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24216 if (ARM_IS_THUMB (sym
))
24218 if (THUMB_IS_FUNC (sym
))
24220 /* Mark the symbol as a Thumb function. */
24221 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
24222 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
24223 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
24225 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
24226 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
24228 as_bad (_("%s: unexpected function type: %d"),
24229 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
24231 else switch (S_GET_STORAGE_CLASS (sym
))
24234 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
24237 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
24240 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
24248 if (ARM_IS_INTERWORK (sym
))
24249 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
24256 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24258 if (ARM_IS_THUMB (sym
))
24260 elf_symbol_type
* elf_sym
;
24262 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
24263 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
24265 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
24266 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
24268 /* If it's a .thumb_func, declare it as so,
24269 otherwise tag label as .code 16. */
24270 if (THUMB_IS_FUNC (sym
))
24271 elf_sym
->internal_elf_sym
.st_target_internal
24272 = ST_BRANCH_TO_THUMB
;
24273 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
24274 elf_sym
->internal_elf_sym
.st_info
=
24275 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
24280 /* Remove any overlapping mapping symbols generated by alignment frags. */
24281 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
24282 /* Now do generic ELF adjustments. */
24283 elf_adjust_symtab ();
24287 /* MD interface: Initialization. */
24290 set_constant_flonums (void)
24294 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
24295 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
24299 /* Auto-select Thumb mode if it's the only available instruction set for the
24300 given architecture. */
24303 autoselect_thumb_from_cpu_variant (void)
24305 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
24306 opcode_select (16);
24315 if ( (arm_ops_hsh
= hash_new ()) == NULL
24316 || (arm_cond_hsh
= hash_new ()) == NULL
24317 || (arm_shift_hsh
= hash_new ()) == NULL
24318 || (arm_psr_hsh
= hash_new ()) == NULL
24319 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
24320 || (arm_reg_hsh
= hash_new ()) == NULL
24321 || (arm_reloc_hsh
= hash_new ()) == NULL
24322 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
24323 as_fatal (_("virtual memory exhausted"));
24325 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
24326 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
24327 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
24328 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
24329 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
24330 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
24331 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
24332 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
24333 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
24334 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
24335 (void *) (v7m_psrs
+ i
));
24336 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
24337 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
24339 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
24341 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
24342 (void *) (barrier_opt_names
+ i
));
24344 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
24346 struct reloc_entry
* entry
= reloc_names
+ i
;
24348 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
24349 /* This makes encode_branch() use the EABI versions of this relocation. */
24350 entry
->reloc
= BFD_RELOC_UNUSED
;
24352 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
24356 set_constant_flonums ();
24358 /* Set the cpu variant based on the command-line options. We prefer
24359 -mcpu= over -march= if both are set (as for GCC); and we prefer
24360 -mfpu= over any other way of setting the floating point unit.
24361 Use of legacy options with new options are faulted. */
24364 if (mcpu_cpu_opt
|| march_cpu_opt
)
24365 as_bad (_("use of old and new-style options to set CPU type"));
24367 mcpu_cpu_opt
= legacy_cpu
;
24369 else if (!mcpu_cpu_opt
)
24370 mcpu_cpu_opt
= march_cpu_opt
;
24375 as_bad (_("use of old and new-style options to set FPU type"));
24377 mfpu_opt
= legacy_fpu
;
24379 else if (!mfpu_opt
)
24381 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24382 || defined (TE_NetBSD) || defined (TE_VXWORKS))
24383 /* Some environments specify a default FPU. If they don't, infer it
24384 from the processor. */
24386 mfpu_opt
= mcpu_fpu_opt
;
24388 mfpu_opt
= march_fpu_opt
;
24390 mfpu_opt
= &fpu_default
;
24396 if (mcpu_cpu_opt
!= NULL
)
24397 mfpu_opt
= &fpu_default
;
24398 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
24399 mfpu_opt
= &fpu_arch_vfp_v2
;
24401 mfpu_opt
= &fpu_arch_fpa
;
24407 mcpu_cpu_opt
= &cpu_default
;
24408 selected_cpu
= cpu_default
;
24410 else if (no_cpu_selected ())
24411 selected_cpu
= cpu_default
;
24414 selected_cpu
= *mcpu_cpu_opt
;
24416 mcpu_cpu_opt
= &arm_arch_any
;
24419 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
24421 autoselect_thumb_from_cpu_variant ();
24423 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
24425 #if defined OBJ_COFF || defined OBJ_ELF
24427 unsigned int flags
= 0;
24429 #if defined OBJ_ELF
24430 flags
= meabi_flags
;
24432 switch (meabi_flags
)
24434 case EF_ARM_EABI_UNKNOWN
:
24436 /* Set the flags in the private structure. */
24437 if (uses_apcs_26
) flags
|= F_APCS26
;
24438 if (support_interwork
) flags
|= F_INTERWORK
;
24439 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
24440 if (pic_code
) flags
|= F_PIC
;
24441 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
24442 flags
|= F_SOFT_FLOAT
;
24444 switch (mfloat_abi_opt
)
24446 case ARM_FLOAT_ABI_SOFT
:
24447 case ARM_FLOAT_ABI_SOFTFP
:
24448 flags
|= F_SOFT_FLOAT
;
24451 case ARM_FLOAT_ABI_HARD
:
24452 if (flags
& F_SOFT_FLOAT
)
24453 as_bad (_("hard-float conflicts with specified fpu"));
24457 /* Using pure-endian doubles (even if soft-float). */
24458 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
24459 flags
|= F_VFP_FLOAT
;
24461 #if defined OBJ_ELF
24462 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
24463 flags
|= EF_ARM_MAVERICK_FLOAT
;
24466 case EF_ARM_EABI_VER4
:
24467 case EF_ARM_EABI_VER5
:
24468 /* No additional flags to set. */
24475 bfd_set_private_flags (stdoutput
, flags
);
24477 /* We have run out flags in the COFF header to encode the
24478 status of ATPCS support, so instead we create a dummy,
24479 empty, debug section called .arm.atpcs. */
24484 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
24488 bfd_set_section_flags
24489 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
24490 bfd_set_section_size (stdoutput
, sec
, 0);
24491 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
24497 /* Record the CPU type as well. */
24498 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
24499 mach
= bfd_mach_arm_iWMMXt2
;
24500 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
24501 mach
= bfd_mach_arm_iWMMXt
;
24502 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
24503 mach
= bfd_mach_arm_XScale
;
24504 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
24505 mach
= bfd_mach_arm_ep9312
;
24506 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
24507 mach
= bfd_mach_arm_5TE
;
24508 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
24510 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
24511 mach
= bfd_mach_arm_5T
;
24513 mach
= bfd_mach_arm_5
;
24515 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
24517 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
24518 mach
= bfd_mach_arm_4T
;
24520 mach
= bfd_mach_arm_4
;
24522 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
24523 mach
= bfd_mach_arm_3M
;
24524 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
24525 mach
= bfd_mach_arm_3
;
24526 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
24527 mach
= bfd_mach_arm_2a
;
24528 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
24529 mach
= bfd_mach_arm_2
;
24531 mach
= bfd_mach_arm_unknown
;
24533 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
24536 /* Command line processing. */
24539 Invocation line includes a switch not recognized by the base assembler.
24540 See if it's a processor-specific option.
24542 This routine is somewhat complicated by the need for backwards
24543 compatibility (since older releases of gcc can't be changed).
24544 The new options try to make the interface as compatible as
24547 New options (supported) are:
24549 -mcpu=<cpu name> Assemble for selected processor
24550 -march=<architecture name> Assemble for selected architecture
24551 -mfpu=<fpu architecture> Assemble for selected FPU.
24552 -EB/-mbig-endian Big-endian
24553 -EL/-mlittle-endian Little-endian
24554 -k Generate PIC code
24555 -mthumb Start in Thumb mode
24556 -mthumb-interwork Code supports ARM/Thumb interworking
24558 -m[no-]warn-deprecated Warn about deprecated features
24559 -m[no-]warn-syms Warn when symbols match instructions
24561 For now we will also provide support for:
24563 -mapcs-32 32-bit Program counter
24564 -mapcs-26 26-bit Program counter
24565 -macps-float Floats passed in FP registers
24566 -mapcs-reentrant Reentrant code
24568 (sometime these will probably be replaced with -mapcs=<list of options>
24569 and -matpcs=<list of options>)
24571 The remaining options are only supported for back-wards compatibility.
24572 Cpu variants, the arm part is optional:
24573 -m[arm]1 Currently not supported.
24574 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
24575 -m[arm]3 Arm 3 processor
24576 -m[arm]6[xx], Arm 6 processors
24577 -m[arm]7[xx][t][[d]m] Arm 7 processors
24578 -m[arm]8[10] Arm 8 processors
24579 -m[arm]9[20][tdmi] Arm 9 processors
24580 -mstrongarm[110[0]] StrongARM processors
24581 -mxscale XScale processors
24582 -m[arm]v[2345[t[e]]] Arm architectures
24583 -mall All (except the ARM1)
24585 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
24586 -mfpe-old (No float load/store multiples)
24587 -mvfpxd VFP Single precision
24589 -mno-fpu Disable all floating point instructions
24591 The following CPU names are recognized:
24592 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24593 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24594 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24595 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24596 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24597 arm10t arm10e, arm1020t, arm1020e, arm10200e,
24598 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24602 const char * md_shortopts
= "m:k";
24604 #ifdef ARM_BI_ENDIAN
24605 #define OPTION_EB (OPTION_MD_BASE + 0)
24606 #define OPTION_EL (OPTION_MD_BASE + 1)
24608 #if TARGET_BYTES_BIG_ENDIAN
24609 #define OPTION_EB (OPTION_MD_BASE + 0)
24611 #define OPTION_EL (OPTION_MD_BASE + 1)
24614 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24616 struct option md_longopts
[] =
24619 {"EB", no_argument
, NULL
, OPTION_EB
},
24622 {"EL", no_argument
, NULL
, OPTION_EL
},
24624 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
24625 {NULL
, no_argument
, NULL
, 0}
24629 size_t md_longopts_size
= sizeof (md_longopts
);
24631 struct arm_option_table
24633 char *option
; /* Option name to match. */
24634 char *help
; /* Help information. */
24635 int *var
; /* Variable to change. */
24636 int value
; /* What to change it to. */
24637 char *deprecated
; /* If non-null, print this message. */
24640 struct arm_option_table arm_opts
[] =
24642 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
24643 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
24644 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24645 &support_interwork
, 1, NULL
},
24646 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
24647 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
24648 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
24650 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
24651 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
24652 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
24653 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
24656 /* These are recognized by the assembler, but have no affect on code. */
24657 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
24658 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
24660 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
24661 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24662 &warn_on_deprecated
, 0, NULL
},
24663 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
24664 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
24665 {NULL
, NULL
, NULL
, 0, NULL
}
24668 struct arm_legacy_option_table
24670 char *option
; /* Option name to match. */
24671 const arm_feature_set
**var
; /* Variable to change. */
24672 const arm_feature_set value
; /* What to change it to. */
24673 char *deprecated
; /* If non-null, print this message. */
24676 const struct arm_legacy_option_table arm_legacy_opts
[] =
24678 /* DON'T add any new processors to this list -- we want the whole list
24679 to go away... Add them to the processors table instead. */
24680 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
24681 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
24682 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
24683 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
24684 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
24685 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
24686 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
24687 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
24688 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
24689 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
24690 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
24691 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
24692 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
24693 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
24694 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
24695 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
24696 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
24697 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
24698 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
24699 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
24700 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
24701 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
24702 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
24703 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
24704 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
24705 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
24706 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
24707 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
24708 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
24709 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
24710 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
24711 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
24712 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
24713 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
24714 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
24715 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
24716 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
24717 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
24718 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
24719 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
24720 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
24721 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
24722 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
24723 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
24724 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
24725 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
24726 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24727 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24728 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24729 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24730 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
24731 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
24732 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
24733 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
24734 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
24735 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
24736 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
24737 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
24738 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
24739 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
24740 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
24741 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
24742 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
24743 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
24744 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
24745 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
24746 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
24747 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
24748 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
24749 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
24750 N_("use -mcpu=strongarm110")},
24751 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
24752 N_("use -mcpu=strongarm1100")},
24753 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
24754 N_("use -mcpu=strongarm1110")},
24755 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
24756 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
24757 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
24759 /* Architecture variants -- don't add any more to this list either. */
24760 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
24761 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
24762 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
24763 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
24764 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
24765 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
24766 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
24767 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
24768 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
24769 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
24770 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
24771 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
24772 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
24773 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
24774 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
24775 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
24776 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
24777 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
24779 /* Floating point variants -- don't add any more to this list either. */
24780 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
24781 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
24782 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
24783 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
24784 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
24786 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
24789 struct arm_cpu_option_table
24793 const arm_feature_set value
;
24794 /* For some CPUs we assume an FPU unless the user explicitly sets
24796 const arm_feature_set default_fpu
;
24797 /* The canonical name of the CPU, or NULL to use NAME converted to upper
24799 const char *canonical_name
;
24802 /* This list should, at a minimum, contain all the cpu names
24803 recognized by GCC. */
24804 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
24805 static const struct arm_cpu_option_table arm_cpus
[] =
24807 ARM_CPU_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
, NULL
),
24808 ARM_CPU_OPT ("arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
),
24809 ARM_CPU_OPT ("arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
),
24810 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
24811 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
24812 ARM_CPU_OPT ("arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24813 ARM_CPU_OPT ("arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24814 ARM_CPU_OPT ("arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24815 ARM_CPU_OPT ("arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24816 ARM_CPU_OPT ("arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24817 ARM_CPU_OPT ("arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24818 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
24819 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24820 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
24821 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24822 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
24823 ARM_CPU_OPT ("arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24824 ARM_CPU_OPT ("arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24825 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24826 ARM_CPU_OPT ("arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24827 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24828 ARM_CPU_OPT ("arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24829 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24830 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24831 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24832 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24833 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24834 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24835 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24836 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24837 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24838 ARM_CPU_OPT ("arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24839 ARM_CPU_OPT ("arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24840 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24841 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24842 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24843 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24844 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24845 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24846 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"),
24847 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24848 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24849 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24850 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24851 ARM_CPU_OPT ("fa526", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24852 ARM_CPU_OPT ("fa626", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24853 /* For V5 or later processors we default to using VFP; but the user
24854 should really set the FPU type explicitly. */
24855 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
24856 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24857 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
24858 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
24859 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
24860 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
24861 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"),
24862 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24863 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
24864 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"),
24865 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24866 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24867 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
24868 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
24869 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24870 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"),
24871 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
24872 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24873 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24874 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
,
24876 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
24877 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24878 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24879 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24880 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24881 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24882 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"),
24883 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
),
24884 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
,
24886 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
),
24887 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, "MPCore"),
24888 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, "MPCore"),
24889 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
),
24890 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
),
24891 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ
, FPU_NONE
, NULL
),
24892 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ
, FPU_ARCH_VFP_V2
, NULL
),
24893 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC
,
24894 FPU_NONE
, "Cortex-A5"),
24895 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24897 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC
,
24898 ARM_FEATURE_COPROC (FPU_VFP_V3
24899 | FPU_NEON_EXT_V1
),
24901 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC
,
24902 ARM_FEATURE_COPROC (FPU_VFP_V3
24903 | FPU_NEON_EXT_V1
),
24905 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24907 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24909 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24911 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24913 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24915 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24917 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24919 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, "Cortex-R4"),
24920 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R
, FPU_ARCH_VFP_V3D16
,
24922 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV
,
24923 FPU_NONE
, "Cortex-R5"),
24924 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV
,
24925 FPU_ARCH_VFP_V3D16
,
24927 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M7"),
24928 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M4"),
24929 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, "Cortex-M3"),
24930 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M1"),
24931 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0"),
24932 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0+"),
24933 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24936 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24940 /* ??? XSCALE is really an architecture. */
24941 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
24942 /* ??? iwmmxt is not a processor. */
24943 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
),
24944 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
),
24945 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
24947 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
24948 FPU_ARCH_MAVERICK
, "ARM920T"),
24949 /* Marvell processors. */
24950 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A
| ARM_EXT_MP
24952 ARM_EXT2_V6T2_V8M
),
24953 FPU_ARCH_VFP_V3D16
, NULL
),
24954 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A
| ARM_EXT_MP
24956 ARM_EXT2_V6T2_V8M
),
24957 FPU_ARCH_NEON_VFP_V4
, NULL
),
24958 /* APM X-Gene family. */
24959 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24961 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24964 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
24968 struct arm_arch_option_table
24972 const arm_feature_set value
;
24973 const arm_feature_set default_fpu
;
24976 /* This list should, at a minimum, contain all the architecture names
24977 recognized by GCC. */
24978 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
24979 static const struct arm_arch_option_table arm_archs
[] =
24981 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
24982 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
24983 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
24984 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
24985 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
24986 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
24987 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
24988 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
24989 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
24990 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
24991 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
24992 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
24993 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
24994 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
24995 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
),
24996 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
),
24997 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
),
24998 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
),
24999 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
),
25000 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
),
25001 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
),
25002 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25003 kept to preserve existing behaviour. */
25004 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25005 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25006 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
),
25007 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
),
25008 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
),
25009 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25010 kept to preserve existing behaviour. */
25011 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25012 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25013 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
25014 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
25015 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
),
25016 /* The official spelling of the ARMv7 profile variants is the dashed form.
25017 Accept the non-dashed form for compatibility with old toolchains. */
25018 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25019 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
),
25020 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25021 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25022 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25023 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25024 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25025 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
),
25026 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
25027 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
),
25028 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
),
25029 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
),
25030 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
),
25031 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
25032 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
25033 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
),
25034 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
25036 #undef ARM_ARCH_OPT
25038 /* ISA extensions in the co-processor and main instruction set space. */
25039 struct arm_option_extension_value_table
25043 const arm_feature_set merge_value
;
25044 const arm_feature_set clear_value
;
25045 const arm_feature_set allowed_archs
;
25048 /* The following table must be in alphabetical order with a NULL last entry.
25050 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, AA }
25051 static const struct arm_option_extension_value_table arm_extensions
[] =
25053 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25054 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25055 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25056 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
25057 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25058 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
25059 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25060 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25061 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25062 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
| ARM_EXT_V7R
)),
25063 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
25064 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ANY
),
25065 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
25066 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ANY
),
25067 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
25068 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ANY
),
25069 ARM_EXT_OPT ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25070 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25071 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
| ARM_EXT_V7R
)),
25072 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
25073 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
25074 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25075 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25076 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25077 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
25078 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
25079 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
25080 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25081 ARM_EXT_OPT ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25082 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25083 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
| ARM_EXT_V7A
)),
25084 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
25086 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
25087 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
25088 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8
,
25089 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
25090 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25091 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
25092 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ANY
),
25093 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
25097 /* ISA floating-point and Advanced SIMD extensions. */
25098 struct arm_option_fpu_value_table
25101 const arm_feature_set value
;
25104 /* This list should, at a minimum, contain all the fpu names
25105 recognized by GCC. */
25106 static const struct arm_option_fpu_value_table arm_fpus
[] =
25108 {"softfpa", FPU_NONE
},
25109 {"fpe", FPU_ARCH_FPE
},
25110 {"fpe2", FPU_ARCH_FPE
},
25111 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
25112 {"fpa", FPU_ARCH_FPA
},
25113 {"fpa10", FPU_ARCH_FPA
},
25114 {"fpa11", FPU_ARCH_FPA
},
25115 {"arm7500fe", FPU_ARCH_FPA
},
25116 {"softvfp", FPU_ARCH_VFP
},
25117 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
25118 {"vfp", FPU_ARCH_VFP_V2
},
25119 {"vfp9", FPU_ARCH_VFP_V2
},
25120 {"vfp3", FPU_ARCH_VFP_V3
}, /* For backwards compatbility. */
25121 {"vfp10", FPU_ARCH_VFP_V2
},
25122 {"vfp10-r0", FPU_ARCH_VFP_V1
},
25123 {"vfpxd", FPU_ARCH_VFP_V1xD
},
25124 {"vfpv2", FPU_ARCH_VFP_V2
},
25125 {"vfpv3", FPU_ARCH_VFP_V3
},
25126 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
25127 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
25128 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
25129 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
25130 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
25131 {"arm1020t", FPU_ARCH_VFP_V1
},
25132 {"arm1020e", FPU_ARCH_VFP_V2
},
25133 {"arm1136jfs", FPU_ARCH_VFP_V2
},
25134 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
25135 {"maverick", FPU_ARCH_MAVERICK
},
25136 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
25137 {"neon-fp16", FPU_ARCH_NEON_FP16
},
25138 {"vfpv4", FPU_ARCH_VFP_V4
},
25139 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
25140 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
25141 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
25142 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
25143 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
25144 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
25145 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
25146 {"crypto-neon-fp-armv8",
25147 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
25148 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
25149 {"crypto-neon-fp-armv8.1",
25150 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
25151 {NULL
, ARM_ARCH_NONE
}
25154 struct arm_option_value_table
25160 static const struct arm_option_value_table arm_float_abis
[] =
25162 {"hard", ARM_FLOAT_ABI_HARD
},
25163 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
25164 {"soft", ARM_FLOAT_ABI_SOFT
},
25169 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
25170 static const struct arm_option_value_table arm_eabis
[] =
25172 {"gnu", EF_ARM_EABI_UNKNOWN
},
25173 {"4", EF_ARM_EABI_VER4
},
25174 {"5", EF_ARM_EABI_VER5
},
25179 struct arm_long_option_table
25181 char * option
; /* Substring to match. */
25182 char * help
; /* Help information. */
25183 int (* func
) (char * subopt
); /* Function to decode sub-option. */
25184 char * deprecated
; /* If non-null, print this message. */
25188 arm_parse_extension (char *str
, const arm_feature_set
**opt_p
)
25190 arm_feature_set
*ext_set
= (arm_feature_set
*)
25191 xmalloc (sizeof (arm_feature_set
));
25193 /* We insist on extensions being specified in alphabetical order, and with
25194 extensions being added before being removed. We achieve this by having
25195 the global ARM_EXTENSIONS table in alphabetical order, and using the
25196 ADDING_VALUE variable to indicate whether we are adding an extension (1)
25197 or removing it (0) and only allowing it to change in the order
25199 const struct arm_option_extension_value_table
* opt
= NULL
;
25200 int adding_value
= -1;
25202 /* Copy the feature set, so that we can modify it. */
25203 *ext_set
= **opt_p
;
25206 while (str
!= NULL
&& *str
!= 0)
25213 as_bad (_("invalid architectural extension"));
25218 ext
= strchr (str
, '+');
25223 len
= strlen (str
);
25225 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
25227 if (adding_value
!= 0)
25230 opt
= arm_extensions
;
25238 if (adding_value
== -1)
25241 opt
= arm_extensions
;
25243 else if (adding_value
!= 1)
25245 as_bad (_("must specify extensions to add before specifying "
25246 "those to remove"));
25253 as_bad (_("missing architectural extension"));
25257 gas_assert (adding_value
!= -1);
25258 gas_assert (opt
!= NULL
);
25260 /* Scan over the options table trying to find an exact match. */
25261 for (; opt
->name
!= NULL
; opt
++)
25262 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25264 /* Check we can apply the extension to this architecture. */
25265 if (!ARM_CPU_HAS_FEATURE (*ext_set
, opt
->allowed_archs
))
25267 as_bad (_("extension does not apply to the base architecture"));
25271 /* Add or remove the extension. */
25273 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
25275 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
25280 if (opt
->name
== NULL
)
25282 /* Did we fail to find an extension because it wasn't specified in
25283 alphabetical order, or because it does not exist? */
25285 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
25286 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25289 if (opt
->name
== NULL
)
25290 as_bad (_("unknown architectural extension `%s'"), str
);
25292 as_bad (_("architectural extensions must be specified in "
25293 "alphabetical order"));
25299 /* We should skip the extension we've just matched the next time
25311 arm_parse_cpu (char *str
)
25313 const struct arm_cpu_option_table
*opt
;
25314 char *ext
= strchr (str
, '+');
25320 len
= strlen (str
);
25324 as_bad (_("missing cpu name `%s'"), str
);
25328 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
25329 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25331 mcpu_cpu_opt
= &opt
->value
;
25332 mcpu_fpu_opt
= &opt
->default_fpu
;
25333 if (opt
->canonical_name
)
25335 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
25336 strcpy (selected_cpu_name
, opt
->canonical_name
);
25342 if (len
>= sizeof selected_cpu_name
)
25343 len
= (sizeof selected_cpu_name
) - 1;
25345 for (i
= 0; i
< len
; i
++)
25346 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
25347 selected_cpu_name
[i
] = 0;
25351 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
25356 as_bad (_("unknown cpu `%s'"), str
);
25361 arm_parse_arch (char *str
)
25363 const struct arm_arch_option_table
*opt
;
25364 char *ext
= strchr (str
, '+');
25370 len
= strlen (str
);
25374 as_bad (_("missing architecture name `%s'"), str
);
25378 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
25379 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25381 march_cpu_opt
= &opt
->value
;
25382 march_fpu_opt
= &opt
->default_fpu
;
25383 strcpy (selected_cpu_name
, opt
->name
);
25386 return arm_parse_extension (ext
, &march_cpu_opt
);
25391 as_bad (_("unknown architecture `%s'\n"), str
);
25396 arm_parse_fpu (char * str
)
25398 const struct arm_option_fpu_value_table
* opt
;
25400 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
25401 if (streq (opt
->name
, str
))
25403 mfpu_opt
= &opt
->value
;
25407 as_bad (_("unknown floating point format `%s'\n"), str
);
25412 arm_parse_float_abi (char * str
)
25414 const struct arm_option_value_table
* opt
;
25416 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
25417 if (streq (opt
->name
, str
))
25419 mfloat_abi_opt
= opt
->value
;
25423 as_bad (_("unknown floating point abi `%s'\n"), str
);
25429 arm_parse_eabi (char * str
)
25431 const struct arm_option_value_table
*opt
;
25433 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
25434 if (streq (opt
->name
, str
))
25436 meabi_flags
= opt
->value
;
25439 as_bad (_("unknown EABI `%s'\n"), str
);
25445 arm_parse_it_mode (char * str
)
25447 bfd_boolean ret
= TRUE
;
25449 if (streq ("arm", str
))
25450 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
25451 else if (streq ("thumb", str
))
25452 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
25453 else if (streq ("always", str
))
25454 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
25455 else if (streq ("never", str
))
25456 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
25459 as_bad (_("unknown implicit IT mode `%s', should be "\
25460 "arm, thumb, always, or never."), str
);
25468 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED
)
25470 codecomposer_syntax
= TRUE
;
25471 arm_comment_chars
[0] = ';';
25472 arm_line_separator_chars
[0] = 0;
25476 struct arm_long_option_table arm_long_opts
[] =
25478 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
25479 arm_parse_cpu
, NULL
},
25480 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
25481 arm_parse_arch
, NULL
},
25482 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
25483 arm_parse_fpu
, NULL
},
25484 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
25485 arm_parse_float_abi
, NULL
},
25487 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
25488 arm_parse_eabi
, NULL
},
25490 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
25491 arm_parse_it_mode
, NULL
},
25492 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
25493 arm_ccs_mode
, NULL
},
25494 {NULL
, NULL
, 0, NULL
}
25498 md_parse_option (int c
, char * arg
)
25500 struct arm_option_table
*opt
;
25501 const struct arm_legacy_option_table
*fopt
;
25502 struct arm_long_option_table
*lopt
;
25508 target_big_endian
= 1;
25514 target_big_endian
= 0;
25518 case OPTION_FIX_V4BX
:
25523 /* Listing option. Just ignore these, we don't support additional
25528 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
25530 if (c
== opt
->option
[0]
25531 && ((arg
== NULL
&& opt
->option
[1] == 0)
25532 || streq (arg
, opt
->option
+ 1)))
25534 /* If the option is deprecated, tell the user. */
25535 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
25536 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
25537 arg
? arg
: "", _(opt
->deprecated
));
25539 if (opt
->var
!= NULL
)
25540 *opt
->var
= opt
->value
;
25546 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
25548 if (c
== fopt
->option
[0]
25549 && ((arg
== NULL
&& fopt
->option
[1] == 0)
25550 || streq (arg
, fopt
->option
+ 1)))
25552 /* If the option is deprecated, tell the user. */
25553 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
25554 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
25555 arg
? arg
: "", _(fopt
->deprecated
));
25557 if (fopt
->var
!= NULL
)
25558 *fopt
->var
= &fopt
->value
;
25564 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
25566 /* These options are expected to have an argument. */
25567 if (c
== lopt
->option
[0]
25569 && strncmp (arg
, lopt
->option
+ 1,
25570 strlen (lopt
->option
+ 1)) == 0)
25572 /* If the option is deprecated, tell the user. */
25573 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
25574 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
25575 _(lopt
->deprecated
));
25577 /* Call the sup-option parser. */
25578 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
25589 md_show_usage (FILE * fp
)
25591 struct arm_option_table
*opt
;
25592 struct arm_long_option_table
*lopt
;
25594 fprintf (fp
, _(" ARM-specific assembler options:\n"));
25596 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
25597 if (opt
->help
!= NULL
)
25598 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
25600 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
25601 if (lopt
->help
!= NULL
)
25602 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
25606 -EB assemble code for a big-endian cpu\n"));
25611 -EL assemble code for a little-endian cpu\n"));
25615 --fix-v4bx Allow BX in ARMv4 code\n"));
25623 arm_feature_set flags
;
25624 } cpu_arch_ver_table
;
25626 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
25627 must be sorted least features first but some reordering is needed, eg. for
25628 Thumb-2 instructions to be detected as coming from ARMv6T2. */
25629 static const cpu_arch_ver_table cpu_arch_ver
[] =
25635 {4, ARM_ARCH_V5TE
},
25636 {5, ARM_ARCH_V5TEJ
},
25640 {11, ARM_ARCH_V6M
},
25641 {12, ARM_ARCH_V6SM
},
25642 {8, ARM_ARCH_V6T2
},
25643 {10, ARM_ARCH_V7VE
},
25644 {10, ARM_ARCH_V7R
},
25645 {10, ARM_ARCH_V7M
},
25646 {14, ARM_ARCH_V8A
},
25647 {16, ARM_ARCH_V8M_BASE
},
25648 {17, ARM_ARCH_V8M_MAIN
},
25652 /* Set an attribute if it has not already been set by the user. */
25654 aeabi_set_attribute_int (int tag
, int value
)
25657 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
25658 || !attributes_set_explicitly
[tag
])
25659 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
25663 aeabi_set_attribute_string (int tag
, const char *value
)
25666 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
25667 || !attributes_set_explicitly
[tag
])
25668 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
25671 /* Set the public EABI object attributes. */
25673 aeabi_set_public_attributes (void)
25678 int fp16_optional
= 0;
25679 arm_feature_set flags
;
25680 arm_feature_set tmp
;
25681 arm_feature_set arm_arch_v8m_base
= ARM_ARCH_V8M_BASE
;
25682 const cpu_arch_ver_table
*p
;
25684 /* Choose the architecture based on the capabilities of the requested cpu
25685 (if any) and/or the instructions actually used. */
25686 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
25687 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
25688 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
25690 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
25691 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
25693 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
25694 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
25696 selected_cpu
= flags
;
25698 /* Allow the user to override the reported architecture. */
25701 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
25702 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
25705 /* We need to make sure that the attributes do not identify us as v6S-M
25706 when the only v6S-M feature in use is the Operating System Extensions. */
25707 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_os
))
25708 if (!ARM_CPU_HAS_FEATURE (flags
, arm_arch_v6m_only
))
25709 ARM_CLEAR_FEATURE (flags
, flags
, arm_ext_os
);
25713 for (p
= cpu_arch_ver
; p
->val
; p
++)
25715 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
25718 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
25722 /* The table lookup above finds the last architecture to contribute
25723 a new feature. Unfortunately, Tag13 is a subset of the union of
25724 v6T2 and v7-M, so it is never seen as contributing a new feature.
25725 We can not search for the last entry which is entirely used,
25726 because if no CPU is specified we build up only those flags
25727 actually used. Perhaps we should separate out the specified
25728 and implicit cases. Avoid taking this path for -march=all by
25729 checking for contradictory v7-A / v7-M features. */
25730 if (arch
== TAG_CPU_ARCH_V7
25731 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
25732 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
)
25733 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v6_dsp
))
25734 arch
= TAG_CPU_ARCH_V7E_M
;
25736 ARM_CLEAR_FEATURE (tmp
, flags
, arm_arch_v8m_base
);
25737 if (arch
== TAG_CPU_ARCH_V8M_BASE
&& ARM_CPU_HAS_FEATURE (tmp
, arm_arch_any
))
25738 arch
= TAG_CPU_ARCH_V8M_MAIN
;
25740 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
25741 coming from ARMv8-A. However, since ARMv8-A has more instructions than
25742 ARMv8-M, -march=all must be detected as ARMv8-A. */
25743 if (arch
== TAG_CPU_ARCH_V8M_MAIN
25744 && ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
25745 arch
= TAG_CPU_ARCH_V8
;
25747 /* Tag_CPU_name. */
25748 if (selected_cpu_name
[0])
25752 q
= selected_cpu_name
;
25753 if (strncmp (q
, "armv", 4) == 0)
25758 for (i
= 0; q
[i
]; i
++)
25759 q
[i
] = TOUPPER (q
[i
]);
25761 aeabi_set_attribute_string (Tag_CPU_name
, q
);
25764 /* Tag_CPU_arch. */
25765 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
25767 /* Tag_CPU_arch_profile. */
25768 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
25769 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
25770 || (ARM_CPU_HAS_FEATURE (flags
, arm_ext_atomics
)
25771 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
)))
25773 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
25775 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_m
))
25780 if (profile
!= '\0')
25781 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
25783 /* Tag_ARM_ISA_use. */
25784 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
25786 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
25788 /* Tag_THUMB_ISA_use. */
25789 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
25794 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
25795 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
25797 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
25801 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
25804 /* Tag_VFP_arch. */
25805 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
25806 aeabi_set_attribute_int (Tag_VFP_arch
,
25807 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
25809 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
25810 aeabi_set_attribute_int (Tag_VFP_arch
,
25811 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
25813 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
25816 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
25818 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
25820 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
25823 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
25824 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
25825 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
25826 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
25827 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
25829 /* Tag_ABI_HardFP_use. */
25830 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
25831 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
25832 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
25834 /* Tag_WMMX_arch. */
25835 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
25836 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
25837 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
25838 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
25840 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
25841 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
25842 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
25843 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
25845 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
25847 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
25851 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
25856 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
25857 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
25858 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
25862 We set Tag_DIV_use to two when integer divide instructions have been used
25863 in ARM state, or when Thumb integer divide instructions have been used,
25864 but we have no architecture profile set, nor have we any ARM instructions.
25866 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
25867 by the base architecture.
25869 For new architectures we will have to check these tests. */
25870 gas_assert (arch
<= TAG_CPU_ARCH_V8
25871 || (arch
>= TAG_CPU_ARCH_V8M_BASE
25872 && arch
<= TAG_CPU_ARCH_V8M_MAIN
));
25873 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
25874 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
25875 aeabi_set_attribute_int (Tag_DIV_use
, 0);
25876 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
25877 || (profile
== '\0'
25878 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
25879 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
25880 aeabi_set_attribute_int (Tag_DIV_use
, 2);
25882 /* Tag_MP_extension_use. */
25883 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
25884 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
25886 /* Tag Virtualization_use. */
25887 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
25889 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
25892 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
25895 /* Add the default contents for the .ARM.attributes section. */
25899 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
25902 aeabi_set_public_attributes ();
25904 #endif /* OBJ_ELF */
25907 /* Parse a .cpu directive. */
25910 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
25912 const struct arm_cpu_option_table
*opt
;
25916 name
= input_line_pointer
;
25917 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
25918 input_line_pointer
++;
25919 saved_char
= *input_line_pointer
;
25920 *input_line_pointer
= 0;
25922 /* Skip the first "all" entry. */
25923 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
25924 if (streq (opt
->name
, name
))
25926 mcpu_cpu_opt
= &opt
->value
;
25927 selected_cpu
= opt
->value
;
25928 if (opt
->canonical_name
)
25929 strcpy (selected_cpu_name
, opt
->canonical_name
);
25933 for (i
= 0; opt
->name
[i
]; i
++)
25934 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
25936 selected_cpu_name
[i
] = 0;
25938 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
25939 *input_line_pointer
= saved_char
;
25940 demand_empty_rest_of_line ();
25943 as_bad (_("unknown cpu `%s'"), name
);
25944 *input_line_pointer
= saved_char
;
25945 ignore_rest_of_line ();
25949 /* Parse a .arch directive. */
25952 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
25954 const struct arm_arch_option_table
*opt
;
25958 name
= input_line_pointer
;
25959 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
25960 input_line_pointer
++;
25961 saved_char
= *input_line_pointer
;
25962 *input_line_pointer
= 0;
25964 /* Skip the first "all" entry. */
25965 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
25966 if (streq (opt
->name
, name
))
25968 mcpu_cpu_opt
= &opt
->value
;
25969 selected_cpu
= opt
->value
;
25970 strcpy (selected_cpu_name
, opt
->name
);
25971 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
25972 *input_line_pointer
= saved_char
;
25973 demand_empty_rest_of_line ();
25977 as_bad (_("unknown architecture `%s'\n"), name
);
25978 *input_line_pointer
= saved_char
;
25979 ignore_rest_of_line ();
25983 /* Parse a .object_arch directive. */
25986 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
25988 const struct arm_arch_option_table
*opt
;
25992 name
= input_line_pointer
;
25993 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
25994 input_line_pointer
++;
25995 saved_char
= *input_line_pointer
;
25996 *input_line_pointer
= 0;
25998 /* Skip the first "all" entry. */
25999 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
26000 if (streq (opt
->name
, name
))
26002 object_arch
= &opt
->value
;
26003 *input_line_pointer
= saved_char
;
26004 demand_empty_rest_of_line ();
26008 as_bad (_("unknown architecture `%s'\n"), name
);
26009 *input_line_pointer
= saved_char
;
26010 ignore_rest_of_line ();
26013 /* Parse a .arch_extension directive. */
26016 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
26018 const struct arm_option_extension_value_table
*opt
;
26021 int adding_value
= 1;
26023 name
= input_line_pointer
;
26024 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26025 input_line_pointer
++;
26026 saved_char
= *input_line_pointer
;
26027 *input_line_pointer
= 0;
26029 if (strlen (name
) >= 2
26030 && strncmp (name
, "no", 2) == 0)
26036 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
26037 if (streq (opt
->name
, name
))
26039 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt
, opt
->allowed_archs
))
26041 as_bad (_("architectural extension `%s' is not allowed for the "
26042 "current base architecture"), name
);
26047 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_cpu
,
26050 ARM_CLEAR_FEATURE (selected_cpu
, selected_cpu
, opt
->clear_value
);
26052 mcpu_cpu_opt
= &selected_cpu
;
26053 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26054 *input_line_pointer
= saved_char
;
26055 demand_empty_rest_of_line ();
26059 if (opt
->name
== NULL
)
26060 as_bad (_("unknown architecture extension `%s'\n"), name
);
26062 *input_line_pointer
= saved_char
;
26063 ignore_rest_of_line ();
26066 /* Parse a .fpu directive. */
26069 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
26071 const struct arm_option_fpu_value_table
*opt
;
26075 name
= input_line_pointer
;
26076 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26077 input_line_pointer
++;
26078 saved_char
= *input_line_pointer
;
26079 *input_line_pointer
= 0;
26081 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
26082 if (streq (opt
->name
, name
))
26084 mfpu_opt
= &opt
->value
;
26085 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26086 *input_line_pointer
= saved_char
;
26087 demand_empty_rest_of_line ();
26091 as_bad (_("unknown floating point format `%s'\n"), name
);
26092 *input_line_pointer
= saved_char
;
26093 ignore_rest_of_line ();
26096 /* Copy symbol information. */
26099 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
26101 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
26105 /* Given a symbolic attribute NAME, return the proper integer value.
26106 Returns -1 if the attribute is not known. */
26109 arm_convert_symbolic_attribute (const char *name
)
26111 static const struct
26116 attribute_table
[] =
26118 /* When you modify this table you should
26119 also modify the list in doc/c-arm.texi. */
26120 #define T(tag) {#tag, tag}
26121 T (Tag_CPU_raw_name
),
26124 T (Tag_CPU_arch_profile
),
26125 T (Tag_ARM_ISA_use
),
26126 T (Tag_THUMB_ISA_use
),
26130 T (Tag_Advanced_SIMD_arch
),
26131 T (Tag_PCS_config
),
26132 T (Tag_ABI_PCS_R9_use
),
26133 T (Tag_ABI_PCS_RW_data
),
26134 T (Tag_ABI_PCS_RO_data
),
26135 T (Tag_ABI_PCS_GOT_use
),
26136 T (Tag_ABI_PCS_wchar_t
),
26137 T (Tag_ABI_FP_rounding
),
26138 T (Tag_ABI_FP_denormal
),
26139 T (Tag_ABI_FP_exceptions
),
26140 T (Tag_ABI_FP_user_exceptions
),
26141 T (Tag_ABI_FP_number_model
),
26142 T (Tag_ABI_align_needed
),
26143 T (Tag_ABI_align8_needed
),
26144 T (Tag_ABI_align_preserved
),
26145 T (Tag_ABI_align8_preserved
),
26146 T (Tag_ABI_enum_size
),
26147 T (Tag_ABI_HardFP_use
),
26148 T (Tag_ABI_VFP_args
),
26149 T (Tag_ABI_WMMX_args
),
26150 T (Tag_ABI_optimization_goals
),
26151 T (Tag_ABI_FP_optimization_goals
),
26152 T (Tag_compatibility
),
26153 T (Tag_CPU_unaligned_access
),
26154 T (Tag_FP_HP_extension
),
26155 T (Tag_VFP_HP_extension
),
26156 T (Tag_ABI_FP_16bit_format
),
26157 T (Tag_MPextension_use
),
26159 T (Tag_nodefaults
),
26160 T (Tag_also_compatible_with
),
26161 T (Tag_conformance
),
26163 T (Tag_Virtualization_use
),
26164 /* We deliberately do not include Tag_MPextension_use_legacy. */
26172 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
26173 if (streq (name
, attribute_table
[i
].name
))
26174 return attribute_table
[i
].tag
;
26180 /* Apply sym value for relocations only in the case that they are for
26181 local symbols in the same segment as the fixup and you have the
26182 respective architectural feature for blx and simple switches. */
26184 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
26187 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
26188 /* PR 17444: If the local symbol is in a different section then a reloc
26189 will always be generated for it, so applying the symbol value now
26190 will result in a double offset being stored in the relocation. */
26191 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
26192 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
26194 switch (fixP
->fx_r_type
)
26196 case BFD_RELOC_ARM_PCREL_BLX
:
26197 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
26198 if (ARM_IS_FUNC (fixP
->fx_addsy
))
26202 case BFD_RELOC_ARM_PCREL_CALL
:
26203 case BFD_RELOC_THUMB_PCREL_BLX
:
26204 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
26215 #endif /* OBJ_ELF */