1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2016 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
80 /* Results from operand parsing worker functions. */
84 PARSE_OPERAND_SUCCESS
,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result
;
96 /* Types of processor to assemble for. */
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
122 #endif /* ifndef FPU_DEFAULT */
124 #define streq(a, b) (strcmp (a, b) == 0)
126 static arm_feature_set cpu_variant
;
127 static arm_feature_set arm_arch_used
;
128 static arm_feature_set thumb_arch_used
;
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26
= FALSE
;
132 static int atpcs
= FALSE
;
133 static int support_interwork
= FALSE
;
134 static int uses_apcs_float
= FALSE
;
135 static int pic_code
= FALSE
;
136 static int fix_v4bx
= FALSE
;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated
= TRUE
;
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax
= FALSE
;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set
*legacy_cpu
= NULL
;
147 static const arm_feature_set
*legacy_fpu
= NULL
;
149 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
150 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
151 static const arm_feature_set
*march_cpu_opt
= NULL
;
152 static const arm_feature_set
*march_fpu_opt
= NULL
;
153 static const arm_feature_set
*mfpu_opt
= NULL
;
154 static const arm_feature_set
*object_arch
= NULL
;
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
158 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
159 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
160 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
161 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
162 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
163 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
164 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
165 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
168 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
171 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
172 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
173 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
174 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
175 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
176 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
177 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
178 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
179 static const arm_feature_set arm_ext_v4t_5
=
180 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
181 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
182 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
183 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
184 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
185 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
186 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
187 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
188 static const arm_feature_set arm_ext_v6m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
);
189 static const arm_feature_set arm_ext_v6_notm
=
190 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
191 static const arm_feature_set arm_ext_v6_dsp
=
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
193 static const arm_feature_set arm_ext_barrier
=
194 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
195 static const arm_feature_set arm_ext_msr
=
196 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
197 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
198 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
199 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
200 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
201 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
202 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
203 static const arm_feature_set arm_ext_m
=
204 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_OS
| ARM_EXT_V7M
, ARM_EXT2_V8M
);
205 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
206 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
207 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
208 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
209 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
210 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
211 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
212 static const arm_feature_set arm_ext_v6t2_v8m
=
213 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
214 /* Instructions shared between ARMv8-A and ARMv8-M. */
215 static const arm_feature_set arm_ext_atomics
=
216 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
217 static const arm_feature_set arm_ext_v8_2
=
218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A
);
220 static const arm_feature_set arm_arch_any
= ARM_ANY
;
221 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1, -1);
222 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
223 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
224 static const arm_feature_set arm_arch_v6m_only
= ARM_ARCH_V6M_ONLY
;
226 static const arm_feature_set arm_cext_iwmmxt2
=
227 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
228 static const arm_feature_set arm_cext_iwmmxt
=
229 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
230 static const arm_feature_set arm_cext_xscale
=
231 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
232 static const arm_feature_set arm_cext_maverick
=
233 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
234 static const arm_feature_set fpu_fpa_ext_v1
=
235 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
236 static const arm_feature_set fpu_fpa_ext_v2
=
237 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
238 static const arm_feature_set fpu_vfp_ext_v1xd
=
239 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
240 static const arm_feature_set fpu_vfp_ext_v1
=
241 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
242 static const arm_feature_set fpu_vfp_ext_v2
=
243 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
244 static const arm_feature_set fpu_vfp_ext_v3xd
=
245 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
246 static const arm_feature_set fpu_vfp_ext_v3
=
247 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
248 static const arm_feature_set fpu_vfp_ext_d32
=
249 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
250 static const arm_feature_set fpu_neon_ext_v1
=
251 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
252 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
253 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
254 static const arm_feature_set fpu_vfp_fp16
=
255 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
256 static const arm_feature_set fpu_neon_ext_fma
=
257 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
258 static const arm_feature_set fpu_vfp_ext_fma
=
259 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
260 static const arm_feature_set fpu_vfp_ext_armv8
=
261 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
262 static const arm_feature_set fpu_vfp_ext_armv8xd
=
263 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
264 static const arm_feature_set fpu_neon_ext_armv8
=
265 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
266 static const arm_feature_set fpu_crypto_ext_armv8
=
267 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
268 static const arm_feature_set crc_ext_armv8
=
269 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
270 static const arm_feature_set fpu_neon_ext_v8_1
=
271 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
| FPU_NEON_EXT_RDMA
);
273 static int mfloat_abi_opt
= -1;
274 /* Record user cpu selection for object attributes. */
275 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
276 /* Must be long enough to hold any of the names in arm_cpus. */
277 static char selected_cpu_name
[20];
279 extern FLONUM_TYPE generic_floating_point_number
;
281 /* Return if no cpu was selected on command-line. */
283 no_cpu_selected (void)
285 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
290 static int meabi_flags
= EABI_DEFAULT
;
292 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
295 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
300 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
305 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
306 symbolS
* GOT_symbol
;
309 /* 0: assemble for ARM,
310 1: assemble for Thumb,
311 2: assemble for Thumb even though target CPU does not support thumb
313 static int thumb_mode
= 0;
314 /* A value distinct from the possible values for thumb_mode that we
315 can use to record whether thumb_mode has been copied into the
316 tc_frag_data field of a frag. */
317 #define MODE_RECORDED (1 << 4)
319 /* Specifies the intrinsic IT insn behavior mode. */
320 enum implicit_it_mode
322 IMPLICIT_IT_MODE_NEVER
= 0x00,
323 IMPLICIT_IT_MODE_ARM
= 0x01,
324 IMPLICIT_IT_MODE_THUMB
= 0x02,
325 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
327 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
329 /* If unified_syntax is true, we are processing the new unified
330 ARM/Thumb syntax. Important differences from the old ARM mode:
332 - Immediate operands do not require a # prefix.
333 - Conditional affixes always appear at the end of the
334 instruction. (For backward compatibility, those instructions
335 that formerly had them in the middle, continue to accept them
337 - The IT instruction may appear, and if it does is validated
338 against subsequent conditional affixes. It does not generate
341 Important differences from the old Thumb mode:
343 - Immediate operands do not require a # prefix.
344 - Most of the V6T2 instructions are only available in unified mode.
345 - The .N and .W suffixes are recognized and honored (it is an error
346 if they cannot be honored).
347 - All instructions set the flags if and only if they have an 's' affix.
348 - Conditional affixes may be used. They are validated against
349 preceding IT instructions. Unlike ARM mode, you cannot use a
350 conditional affix except in the scope of an IT instruction. */
352 static bfd_boolean unified_syntax
= FALSE
;
354 /* An immediate operand can start with #, and ld*, st*, pld operands
355 can contain [ and ]. We need to tell APP not to elide whitespace
356 before a [, which can appear as the first operand for pld.
357 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
358 const char arm_symbol_chars
[] = "#[]{}";
373 enum neon_el_type type
;
377 #define NEON_MAX_TYPE_ELS 4
381 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
385 enum it_instruction_type
390 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
391 if inside, should be the last one. */
392 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
393 i.e. BKPT and NOP. */
394 IT_INSN
/* The IT insn has been parsed. */
397 /* The maximum number of operands we need. */
398 #define ARM_IT_MAX_OPERANDS 6
403 unsigned long instruction
;
407 /* "uncond_value" is set to the value in place of the conditional field in
408 unconditional versions of the instruction, or -1 if nothing is
411 struct neon_type vectype
;
412 /* This does not indicate an actual NEON instruction, only that
413 the mnemonic accepts neon-style type suffixes. */
415 /* Set to the opcode if the instruction needs relaxation.
416 Zero if the instruction is not relaxed. */
420 bfd_reloc_code_real_type type
;
425 enum it_instruction_type it_insn_type
;
431 struct neon_type_el vectype
;
432 unsigned present
: 1; /* Operand present. */
433 unsigned isreg
: 1; /* Operand was a register. */
434 unsigned immisreg
: 1; /* .imm field is a second register. */
435 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
436 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
437 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
438 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
439 instructions. This allows us to disambiguate ARM <-> vector insns. */
440 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
441 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
442 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
443 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
444 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
445 unsigned writeback
: 1; /* Operand has trailing ! */
446 unsigned preind
: 1; /* Preindexed address. */
447 unsigned postind
: 1; /* Postindexed address. */
448 unsigned negative
: 1; /* Index register was negated. */
449 unsigned shifted
: 1; /* Shift applied to operation. */
450 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
451 } operands
[ARM_IT_MAX_OPERANDS
];
454 static struct arm_it inst
;
456 #define NUM_FLOAT_VALS 8
458 const char * fp_const
[] =
460 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
463 /* Number of littlenums required to hold an extended precision number. */
464 #define MAX_LITTLENUMS 6
466 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
476 #define CP_T_X 0x00008000
477 #define CP_T_Y 0x00400000
479 #define CONDS_BIT 0x00100000
480 #define LOAD_BIT 0x00100000
482 #define DOUBLE_LOAD_FLAG 0x00000001
486 const char * template_name
;
490 #define COND_ALWAYS 0xE
494 const char * template_name
;
498 struct asm_barrier_opt
500 const char * template_name
;
502 const arm_feature_set arch
;
505 /* The bit that distinguishes CPSR and SPSR. */
506 #define SPSR_BIT (1 << 22)
508 /* The individual PSR flag bits. */
509 #define PSR_c (1 << 16)
510 #define PSR_x (1 << 17)
511 #define PSR_s (1 << 18)
512 #define PSR_f (1 << 19)
517 bfd_reloc_code_real_type reloc
;
522 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
523 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
528 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
531 /* Bits for DEFINED field in neon_typed_alias. */
532 #define NTA_HASTYPE 1
533 #define NTA_HASINDEX 2
535 struct neon_typed_alias
537 unsigned char defined
;
539 struct neon_type_el eltype
;
542 /* ARM register categories. This includes coprocessor numbers and various
543 architecture extensions' registers. */
570 /* Structure for a hash table entry for a register.
571 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
572 information which states whether a vector type or index is specified (for a
573 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
579 unsigned char builtin
;
580 struct neon_typed_alias
* neon
;
583 /* Diagnostics used when we don't get a register of the expected type. */
584 const char * const reg_expected_msgs
[] =
586 N_("ARM register expected"),
587 N_("bad or missing co-processor number"),
588 N_("co-processor register expected"),
589 N_("FPA register expected"),
590 N_("VFP single precision register expected"),
591 N_("VFP/Neon double precision register expected"),
592 N_("Neon quad precision register expected"),
593 N_("VFP single or double precision register expected"),
594 N_("Neon double or quad precision register expected"),
595 N_("VFP single, double or Neon quad precision register expected"),
596 N_("VFP system register expected"),
597 N_("Maverick MVF register expected"),
598 N_("Maverick MVD register expected"),
599 N_("Maverick MVFX register expected"),
600 N_("Maverick MVDX register expected"),
601 N_("Maverick MVAX register expected"),
602 N_("Maverick DSPSC register expected"),
603 N_("iWMMXt data register expected"),
604 N_("iWMMXt control register expected"),
605 N_("iWMMXt scalar register expected"),
606 N_("XScale accumulator register expected"),
609 /* Some well known registers that we refer to directly elsewhere. */
615 /* ARM instructions take 4bytes in the object file, Thumb instructions
621 /* Basic string to match. */
622 const char * template_name
;
624 /* Parameters to instruction. */
625 unsigned int operands
[8];
627 /* Conditional tag - see opcode_lookup. */
628 unsigned int tag
: 4;
630 /* Basic instruction code. */
631 unsigned int avalue
: 28;
633 /* Thumb-format instruction code. */
636 /* Which architecture variant provides this instruction. */
637 const arm_feature_set
* avariant
;
638 const arm_feature_set
* tvariant
;
640 /* Function to call to encode instruction in ARM format. */
641 void (* aencode
) (void);
643 /* Function to call to encode instruction in Thumb format. */
644 void (* tencode
) (void);
647 /* Defines for various bits that we will want to toggle. */
648 #define INST_IMMEDIATE 0x02000000
649 #define OFFSET_REG 0x02000000
650 #define HWOFFSET_IMM 0x00400000
651 #define SHIFT_BY_REG 0x00000010
652 #define PRE_INDEX 0x01000000
653 #define INDEX_UP 0x00800000
654 #define WRITE_BACK 0x00200000
655 #define LDM_TYPE_2_OR_3 0x00400000
656 #define CPSI_MMOD 0x00020000
658 #define LITERAL_MASK 0xf000f000
659 #define OPCODE_MASK 0xfe1fffff
660 #define V4_STR_BIT 0x00000020
661 #define VLDR_VMOV_SAME 0x0040f000
663 #define T2_SUBS_PC_LR 0xf3de8f00
665 #define DATA_OP_SHIFT 21
667 #define T2_OPCODE_MASK 0xfe1fffff
668 #define T2_DATA_OP_SHIFT 21
670 #define A_COND_MASK 0xf0000000
671 #define A_PUSH_POP_OP_MASK 0x0fff0000
673 /* Opcodes for pushing/poping registers to/from the stack. */
674 #define A1_OPCODE_PUSH 0x092d0000
675 #define A2_OPCODE_PUSH 0x052d0004
676 #define A2_OPCODE_POP 0x049d0004
678 /* Codes to distinguish the arithmetic instructions. */
689 #define OPCODE_CMP 10
690 #define OPCODE_CMN 11
691 #define OPCODE_ORR 12
692 #define OPCODE_MOV 13
693 #define OPCODE_BIC 14
694 #define OPCODE_MVN 15
696 #define T2_OPCODE_AND 0
697 #define T2_OPCODE_BIC 1
698 #define T2_OPCODE_ORR 2
699 #define T2_OPCODE_ORN 3
700 #define T2_OPCODE_EOR 4
701 #define T2_OPCODE_ADD 8
702 #define T2_OPCODE_ADC 10
703 #define T2_OPCODE_SBC 11
704 #define T2_OPCODE_SUB 13
705 #define T2_OPCODE_RSB 14
707 #define T_OPCODE_MUL 0x4340
708 #define T_OPCODE_TST 0x4200
709 #define T_OPCODE_CMN 0x42c0
710 #define T_OPCODE_NEG 0x4240
711 #define T_OPCODE_MVN 0x43c0
713 #define T_OPCODE_ADD_R3 0x1800
714 #define T_OPCODE_SUB_R3 0x1a00
715 #define T_OPCODE_ADD_HI 0x4400
716 #define T_OPCODE_ADD_ST 0xb000
717 #define T_OPCODE_SUB_ST 0xb080
718 #define T_OPCODE_ADD_SP 0xa800
719 #define T_OPCODE_ADD_PC 0xa000
720 #define T_OPCODE_ADD_I8 0x3000
721 #define T_OPCODE_SUB_I8 0x3800
722 #define T_OPCODE_ADD_I3 0x1c00
723 #define T_OPCODE_SUB_I3 0x1e00
725 #define T_OPCODE_ASR_R 0x4100
726 #define T_OPCODE_LSL_R 0x4080
727 #define T_OPCODE_LSR_R 0x40c0
728 #define T_OPCODE_ROR_R 0x41c0
729 #define T_OPCODE_ASR_I 0x1000
730 #define T_OPCODE_LSL_I 0x0000
731 #define T_OPCODE_LSR_I 0x0800
733 #define T_OPCODE_MOV_I8 0x2000
734 #define T_OPCODE_CMP_I8 0x2800
735 #define T_OPCODE_CMP_LR 0x4280
736 #define T_OPCODE_MOV_HR 0x4600
737 #define T_OPCODE_CMP_HR 0x4500
739 #define T_OPCODE_LDR_PC 0x4800
740 #define T_OPCODE_LDR_SP 0x9800
741 #define T_OPCODE_STR_SP 0x9000
742 #define T_OPCODE_LDR_IW 0x6800
743 #define T_OPCODE_STR_IW 0x6000
744 #define T_OPCODE_LDR_IH 0x8800
745 #define T_OPCODE_STR_IH 0x8000
746 #define T_OPCODE_LDR_IB 0x7800
747 #define T_OPCODE_STR_IB 0x7000
748 #define T_OPCODE_LDR_RW 0x5800
749 #define T_OPCODE_STR_RW 0x5000
750 #define T_OPCODE_LDR_RH 0x5a00
751 #define T_OPCODE_STR_RH 0x5200
752 #define T_OPCODE_LDR_RB 0x5c00
753 #define T_OPCODE_STR_RB 0x5400
755 #define T_OPCODE_PUSH 0xb400
756 #define T_OPCODE_POP 0xbc00
758 #define T_OPCODE_BRANCH 0xe000
760 #define THUMB_SIZE 2 /* Size of thumb instruction. */
761 #define THUMB_PP_PC_LR 0x0100
762 #define THUMB_LOAD_BIT 0x0800
763 #define THUMB2_LOAD_BIT 0x00100000
765 #define BAD_ARGS _("bad arguments to instruction")
766 #define BAD_SP _("r13 not allowed here")
767 #define BAD_PC _("r15 not allowed here")
768 #define BAD_COND _("instruction cannot be conditional")
769 #define BAD_OVERLAP _("registers may not be the same")
770 #define BAD_HIREG _("lo register required")
771 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
772 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
773 #define BAD_BRANCH _("branch must be last instruction in IT block")
774 #define BAD_NOT_IT _("instruction not allowed in IT block")
775 #define BAD_FPU _("selected FPU does not support instruction")
776 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
777 #define BAD_IT_COND _("incorrect condition in IT block")
778 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
779 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
780 #define BAD_PC_ADDRESSING \
781 _("cannot use register index with PC-relative addressing")
782 #define BAD_PC_WRITEBACK \
783 _("cannot use writeback with PC-relative addressing")
784 #define BAD_RANGE _("branch out of range")
785 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
787 static struct hash_control
* arm_ops_hsh
;
788 static struct hash_control
* arm_cond_hsh
;
789 static struct hash_control
* arm_shift_hsh
;
790 static struct hash_control
* arm_psr_hsh
;
791 static struct hash_control
* arm_v7m_psr_hsh
;
792 static struct hash_control
* arm_reg_hsh
;
793 static struct hash_control
* arm_reloc_hsh
;
794 static struct hash_control
* arm_barrier_opt_hsh
;
796 /* Stuff needed to resolve the label ambiguity
805 symbolS
* last_label_seen
;
806 static int label_is_thumb_function_name
= FALSE
;
808 /* Literal pool structure. Held on a per-section
809 and per-sub-section basis. */
811 #define MAX_LITERAL_POOL_SIZE 1024
812 typedef struct literal_pool
814 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
815 unsigned int next_free_entry
;
821 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
823 struct literal_pool
* next
;
824 unsigned int alignment
;
827 /* Pointer to a linked list of literal pools. */
828 literal_pool
* list_of_pools
= NULL
;
830 typedef enum asmfunc_states
833 WAITING_ASMFUNC_NAME
,
837 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
840 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
842 static struct current_it now_it
;
846 now_it_compatible (int cond
)
848 return (cond
& ~1) == (now_it
.cc
& ~1);
852 conditional_insn (void)
854 return inst
.cond
!= COND_ALWAYS
;
857 static int in_it_block (void);
859 static int handle_it_state (void);
861 static void force_automatic_it_block_close (void);
863 static void it_fsm_post_encode (void);
865 #define set_it_insn_type(type) \
868 inst.it_insn_type = type; \
869 if (handle_it_state () == FAIL) \
874 #define set_it_insn_type_nonvoid(type, failret) \
877 inst.it_insn_type = type; \
878 if (handle_it_state () == FAIL) \
883 #define set_it_insn_type_last() \
886 if (inst.cond == COND_ALWAYS) \
887 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
889 set_it_insn_type (INSIDE_IT_LAST_INSN); \
895 /* This array holds the chars that always start a comment. If the
896 pre-processor is disabled, these aren't very useful. */
897 char arm_comment_chars
[] = "@";
899 /* This array holds the chars that only start a comment at the beginning of
900 a line. If the line seems to have the form '# 123 filename'
901 .line and .file directives will appear in the pre-processed output. */
902 /* Note that input_file.c hand checks for '#' at the beginning of the
903 first line of the input file. This is because the compiler outputs
904 #NO_APP at the beginning of its output. */
905 /* Also note that comments like this one will always work. */
906 const char line_comment_chars
[] = "#";
908 char arm_line_separator_chars
[] = ";";
910 /* Chars that can be used to separate mant
911 from exp in floating point numbers. */
912 const char EXP_CHARS
[] = "eE";
914 /* Chars that mean this number is a floating point constant. */
918 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
920 /* Prefix characters that indicate the start of an immediate
922 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
924 /* Separator character handling. */
926 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
929 skip_past_char (char ** str
, char c
)
931 /* PR gas/14987: Allow for whitespace before the expected character. */
932 skip_whitespace (*str
);
943 #define skip_past_comma(str) skip_past_char (str, ',')
945 /* Arithmetic expressions (possibly involving symbols). */
947 /* Return TRUE if anything in the expression is a bignum. */
950 walk_no_bignums (symbolS
* sp
)
952 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
955 if (symbol_get_value_expression (sp
)->X_add_symbol
)
957 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
958 || (symbol_get_value_expression (sp
)->X_op_symbol
959 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
965 static int in_my_get_expression
= 0;
967 /* Third argument to my_get_expression. */
968 #define GE_NO_PREFIX 0
969 #define GE_IMM_PREFIX 1
970 #define GE_OPT_PREFIX 2
971 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
972 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
973 #define GE_OPT_PREFIX_BIG 3
976 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
981 /* In unified syntax, all prefixes are optional. */
983 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
988 case GE_NO_PREFIX
: break;
990 if (!is_immediate_prefix (**str
))
992 inst
.error
= _("immediate expression requires a # prefix");
998 case GE_OPT_PREFIX_BIG
:
999 if (is_immediate_prefix (**str
))
1005 memset (ep
, 0, sizeof (expressionS
));
1007 save_in
= input_line_pointer
;
1008 input_line_pointer
= *str
;
1009 in_my_get_expression
= 1;
1010 seg
= expression (ep
);
1011 in_my_get_expression
= 0;
1013 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1015 /* We found a bad or missing expression in md_operand(). */
1016 *str
= input_line_pointer
;
1017 input_line_pointer
= save_in
;
1018 if (inst
.error
== NULL
)
1019 inst
.error
= (ep
->X_op
== O_absent
1020 ? _("missing expression") :_("bad expression"));
1025 if (seg
!= absolute_section
1026 && seg
!= text_section
1027 && seg
!= data_section
1028 && seg
!= bss_section
1029 && seg
!= undefined_section
)
1031 inst
.error
= _("bad segment");
1032 *str
= input_line_pointer
;
1033 input_line_pointer
= save_in
;
1040 /* Get rid of any bignums now, so that we don't generate an error for which
1041 we can't establish a line number later on. Big numbers are never valid
1042 in instructions, which is where this routine is always called. */
1043 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1044 && (ep
->X_op
== O_big
1045 || (ep
->X_add_symbol
1046 && (walk_no_bignums (ep
->X_add_symbol
)
1048 && walk_no_bignums (ep
->X_op_symbol
))))))
1050 inst
.error
= _("invalid constant");
1051 *str
= input_line_pointer
;
1052 input_line_pointer
= save_in
;
1056 *str
= input_line_pointer
;
1057 input_line_pointer
= save_in
;
1061 /* Turn a string in input_line_pointer into a floating point constant
1062 of type TYPE, and store the appropriate bytes in *LITP. The number
1063 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1064 returned, or NULL on OK.
1066 Note that fp constants aren't represent in the normal way on the ARM.
1067 In big endian mode, things are as expected. However, in little endian
1068 mode fp constants are big-endian word-wise, and little-endian byte-wise
1069 within the words. For example, (double) 1.1 in big endian mode is
1070 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1071 the byte sequence 99 99 f1 3f 9a 99 99 99.
1073 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1076 md_atof (int type
, char * litP
, int * sizeP
)
1079 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1111 return _("Unrecognized or unsupported floating point constant");
1114 t
= atof_ieee (input_line_pointer
, type
, words
);
1116 input_line_pointer
= t
;
1117 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1119 if (target_big_endian
)
1121 for (i
= 0; i
< prec
; i
++)
1123 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1124 litP
+= sizeof (LITTLENUM_TYPE
);
1129 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1130 for (i
= prec
- 1; i
>= 0; i
--)
1132 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1133 litP
+= sizeof (LITTLENUM_TYPE
);
1136 /* For a 4 byte float the order of elements in `words' is 1 0.
1137 For an 8 byte float the order is 1 0 3 2. */
1138 for (i
= 0; i
< prec
; i
+= 2)
1140 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1141 sizeof (LITTLENUM_TYPE
));
1142 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1143 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1144 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1151 /* We handle all bad expressions here, so that we can report the faulty
1152 instruction in the error message. */
1154 md_operand (expressionS
* exp
)
1156 if (in_my_get_expression
)
1157 exp
->X_op
= O_illegal
;
1160 /* Immediate values. */
1162 /* Generic immediate-value read function for use in directives.
1163 Accepts anything that 'expression' can fold to a constant.
1164 *val receives the number. */
1167 immediate_for_directive (int *val
)
1170 exp
.X_op
= O_illegal
;
1172 if (is_immediate_prefix (*input_line_pointer
))
1174 input_line_pointer
++;
1178 if (exp
.X_op
!= O_constant
)
1180 as_bad (_("expected #constant"));
1181 ignore_rest_of_line ();
1184 *val
= exp
.X_add_number
;
1189 /* Register parsing. */
1191 /* Generic register parser. CCP points to what should be the
1192 beginning of a register name. If it is indeed a valid register
1193 name, advance CCP over it and return the reg_entry structure;
1194 otherwise return NULL. Does not issue diagnostics. */
1196 static struct reg_entry
*
1197 arm_reg_parse_multi (char **ccp
)
1201 struct reg_entry
*reg
;
1203 skip_whitespace (start
);
1205 #ifdef REGISTER_PREFIX
1206 if (*start
!= REGISTER_PREFIX
)
1210 #ifdef OPTIONAL_REGISTER_PREFIX
1211 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1216 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1221 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1223 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1233 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1234 enum arm_reg_type type
)
1236 /* Alternative syntaxes are accepted for a few register classes. */
1243 /* Generic coprocessor register names are allowed for these. */
1244 if (reg
&& reg
->type
== REG_TYPE_CN
)
1249 /* For backward compatibility, a bare number is valid here. */
1251 unsigned long processor
= strtoul (start
, ccp
, 10);
1252 if (*ccp
!= start
&& processor
<= 15)
1256 case REG_TYPE_MMXWC
:
1257 /* WC includes WCG. ??? I'm not sure this is true for all
1258 instructions that take WC registers. */
1259 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1270 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1271 return value is the register number or FAIL. */
1274 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1277 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1280 /* Do not allow a scalar (reg+index) to parse as a register. */
1281 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1284 if (reg
&& reg
->type
== type
)
1287 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1294 /* Parse a Neon type specifier. *STR should point at the leading '.'
1295 character. Does no verification at this stage that the type fits the opcode
1302 Can all be legally parsed by this function.
1304 Fills in neon_type struct pointer with parsed information, and updates STR
1305 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1306 type, FAIL if not. */
1309 parse_neon_type (struct neon_type
*type
, char **str
)
1316 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1318 enum neon_el_type thistype
= NT_untyped
;
1319 unsigned thissize
= -1u;
1326 /* Just a size without an explicit type. */
1330 switch (TOLOWER (*ptr
))
1332 case 'i': thistype
= NT_integer
; break;
1333 case 'f': thistype
= NT_float
; break;
1334 case 'p': thistype
= NT_poly
; break;
1335 case 's': thistype
= NT_signed
; break;
1336 case 'u': thistype
= NT_unsigned
; break;
1338 thistype
= NT_float
;
1343 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1349 /* .f is an abbreviation for .f32. */
1350 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1355 thissize
= strtoul (ptr
, &ptr
, 10);
1357 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1360 as_bad (_("bad size %d in type specifier"), thissize
);
1368 type
->el
[type
->elems
].type
= thistype
;
1369 type
->el
[type
->elems
].size
= thissize
;
1374 /* Empty/missing type is not a successful parse. */
1375 if (type
->elems
== 0)
1383 /* Errors may be set multiple times during parsing or bit encoding
1384 (particularly in the Neon bits), but usually the earliest error which is set
1385 will be the most meaningful. Avoid overwriting it with later (cascading)
1386 errors by calling this function. */
1389 first_error (const char *err
)
1395 /* Parse a single type, e.g. ".s32", leading period included. */
1397 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1400 struct neon_type optype
;
1404 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1406 if (optype
.elems
== 1)
1407 *vectype
= optype
.el
[0];
1410 first_error (_("only one type should be specified for operand"));
1416 first_error (_("vector type expected"));
1428 /* Special meanings for indices (which have a range of 0-7), which will fit into
1431 #define NEON_ALL_LANES 15
1432 #define NEON_INTERLEAVE_LANES 14
1434 /* Parse either a register or a scalar, with an optional type. Return the
1435 register number, and optionally fill in the actual type of the register
1436 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1437 type/index information in *TYPEINFO. */
1440 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1441 enum arm_reg_type
*rtype
,
1442 struct neon_typed_alias
*typeinfo
)
1445 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1446 struct neon_typed_alias atype
;
1447 struct neon_type_el parsetype
;
1451 atype
.eltype
.type
= NT_invtype
;
1452 atype
.eltype
.size
= -1;
1454 /* Try alternate syntax for some types of register. Note these are mutually
1455 exclusive with the Neon syntax extensions. */
1458 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1466 /* Undo polymorphism when a set of register types may be accepted. */
1467 if ((type
== REG_TYPE_NDQ
1468 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1469 || (type
== REG_TYPE_VFSD
1470 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1471 || (type
== REG_TYPE_NSDQ
1472 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1473 || reg
->type
== REG_TYPE_NQ
))
1474 || (type
== REG_TYPE_MMXWC
1475 && (reg
->type
== REG_TYPE_MMXWCG
)))
1476 type
= (enum arm_reg_type
) reg
->type
;
1478 if (type
!= reg
->type
)
1484 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1486 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1488 first_error (_("can't redefine type for operand"));
1491 atype
.defined
|= NTA_HASTYPE
;
1492 atype
.eltype
= parsetype
;
1495 if (skip_past_char (&str
, '[') == SUCCESS
)
1497 if (type
!= REG_TYPE_VFD
)
1499 first_error (_("only D registers may be indexed"));
1503 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1505 first_error (_("can't change index for operand"));
1509 atype
.defined
|= NTA_HASINDEX
;
1511 if (skip_past_char (&str
, ']') == SUCCESS
)
1512 atype
.index
= NEON_ALL_LANES
;
1517 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1519 if (exp
.X_op
!= O_constant
)
1521 first_error (_("constant expression required"));
1525 if (skip_past_char (&str
, ']') == FAIL
)
1528 atype
.index
= exp
.X_add_number
;
1543 /* Like arm_reg_parse, but allow allow the following extra features:
1544 - If RTYPE is non-zero, return the (possibly restricted) type of the
1545 register (e.g. Neon double or quad reg when either has been requested).
1546 - If this is a Neon vector type with additional type information, fill
1547 in the struct pointed to by VECTYPE (if non-NULL).
1548 This function will fault on encountering a scalar. */
1551 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1552 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1554 struct neon_typed_alias atype
;
1556 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1561 /* Do not allow regname(... to parse as a register. */
1565 /* Do not allow a scalar (reg+index) to parse as a register. */
1566 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1568 first_error (_("register operand expected, but got scalar"));
1573 *vectype
= atype
.eltype
;
1580 #define NEON_SCALAR_REG(X) ((X) >> 4)
1581 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1583 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1584 have enough information to be able to do a good job bounds-checking. So, we
1585 just do easy checks here, and do further checks later. */
1588 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1592 struct neon_typed_alias atype
;
1594 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1596 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1599 if (atype
.index
== NEON_ALL_LANES
)
1601 first_error (_("scalar must have an index"));
1604 else if (atype
.index
>= 64 / elsize
)
1606 first_error (_("scalar index out of range"));
1611 *type
= atype
.eltype
;
1615 return reg
* 16 + atype
.index
;
1618 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1621 parse_reg_list (char ** strp
)
1623 char * str
= * strp
;
1627 /* We come back here if we get ranges concatenated by '+' or '|'. */
1630 skip_whitespace (str
);
1644 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1646 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1656 first_error (_("bad range in register list"));
1660 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1662 if (range
& (1 << i
))
1664 (_("Warning: duplicated register (r%d) in register list"),
1672 if (range
& (1 << reg
))
1673 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1675 else if (reg
<= cur_reg
)
1676 as_tsktsk (_("Warning: register range not in ascending order"));
1681 while (skip_past_comma (&str
) != FAIL
1682 || (in_range
= 1, *str
++ == '-'));
1685 if (skip_past_char (&str
, '}') == FAIL
)
1687 first_error (_("missing `}'"));
1695 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1698 if (exp
.X_op
== O_constant
)
1700 if (exp
.X_add_number
1701 != (exp
.X_add_number
& 0x0000ffff))
1703 inst
.error
= _("invalid register mask");
1707 if ((range
& exp
.X_add_number
) != 0)
1709 int regno
= range
& exp
.X_add_number
;
1712 regno
= (1 << regno
) - 1;
1714 (_("Warning: duplicated register (r%d) in register list"),
1718 range
|= exp
.X_add_number
;
1722 if (inst
.reloc
.type
!= 0)
1724 inst
.error
= _("expression too complex");
1728 memcpy (&inst
.reloc
.exp
, &exp
, sizeof (expressionS
));
1729 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1730 inst
.reloc
.pc_rel
= 0;
1734 if (*str
== '|' || *str
== '+')
1740 while (another_range
);
1746 /* Types of registers in a list. */
1755 /* Parse a VFP register list. If the string is invalid return FAIL.
1756 Otherwise return the number of registers, and set PBASE to the first
1757 register. Parses registers of type ETYPE.
1758 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1759 - Q registers can be used to specify pairs of D registers
1760 - { } can be omitted from around a singleton register list
1761 FIXME: This is not implemented, as it would require backtracking in
1764 This could be done (the meaning isn't really ambiguous), but doesn't
1765 fit in well with the current parsing framework.
1766 - 32 D registers may be used (also true for VFPv3).
1767 FIXME: Types are ignored in these register lists, which is probably a
1771 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1776 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1780 unsigned long mask
= 0;
1783 if (skip_past_char (&str
, '{') == FAIL
)
1785 inst
.error
= _("expecting {");
1792 regtype
= REG_TYPE_VFS
;
1797 regtype
= REG_TYPE_VFD
;
1800 case REGLIST_NEON_D
:
1801 regtype
= REG_TYPE_NDQ
;
1805 if (etype
!= REGLIST_VFP_S
)
1807 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1808 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1812 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1815 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1822 base_reg
= max_regs
;
1826 int setmask
= 1, addregs
= 1;
1828 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1830 if (new_base
== FAIL
)
1832 first_error (_(reg_expected_msgs
[regtype
]));
1836 if (new_base
>= max_regs
)
1838 first_error (_("register out of range in list"));
1842 /* Note: a value of 2 * n is returned for the register Q<n>. */
1843 if (regtype
== REG_TYPE_NQ
)
1849 if (new_base
< base_reg
)
1850 base_reg
= new_base
;
1852 if (mask
& (setmask
<< new_base
))
1854 first_error (_("invalid register list"));
1858 if ((mask
>> new_base
) != 0 && ! warned
)
1860 as_tsktsk (_("register list not in ascending order"));
1864 mask
|= setmask
<< new_base
;
1867 if (*str
== '-') /* We have the start of a range expression */
1873 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1876 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1880 if (high_range
>= max_regs
)
1882 first_error (_("register out of range in list"));
1886 if (regtype
== REG_TYPE_NQ
)
1887 high_range
= high_range
+ 1;
1889 if (high_range
<= new_base
)
1891 inst
.error
= _("register range not in ascending order");
1895 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1897 if (mask
& (setmask
<< new_base
))
1899 inst
.error
= _("invalid register list");
1903 mask
|= setmask
<< new_base
;
1908 while (skip_past_comma (&str
) != FAIL
);
1912 /* Sanity check -- should have raised a parse error above. */
1913 if (count
== 0 || count
> max_regs
)
1918 /* Final test -- the registers must be consecutive. */
1920 for (i
= 0; i
< count
; i
++)
1922 if ((mask
& (1u << i
)) == 0)
1924 inst
.error
= _("non-contiguous register range");
1934 /* True if two alias types are the same. */
1937 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1945 if (a
->defined
!= b
->defined
)
1948 if ((a
->defined
& NTA_HASTYPE
) != 0
1949 && (a
->eltype
.type
!= b
->eltype
.type
1950 || a
->eltype
.size
!= b
->eltype
.size
))
1953 if ((a
->defined
& NTA_HASINDEX
) != 0
1954 && (a
->index
!= b
->index
))
1960 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1961 The base register is put in *PBASE.
1962 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1964 The register stride (minus one) is put in bit 4 of the return value.
1965 Bits [6:5] encode the list length (minus one).
1966 The type of the list elements is put in *ELTYPE, if non-NULL. */
1968 #define NEON_LANE(X) ((X) & 0xf)
1969 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1970 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1973 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1974 struct neon_type_el
*eltype
)
1981 int leading_brace
= 0;
1982 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1983 const char *const incr_error
= _("register stride must be 1 or 2");
1984 const char *const type_error
= _("mismatched element/structure types in list");
1985 struct neon_typed_alias firsttype
;
1987 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1992 struct neon_typed_alias atype
;
1993 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1997 first_error (_(reg_expected_msgs
[rtype
]));
2004 if (rtype
== REG_TYPE_NQ
)
2010 else if (reg_incr
== -1)
2012 reg_incr
= getreg
- base_reg
;
2013 if (reg_incr
< 1 || reg_incr
> 2)
2015 first_error (_(incr_error
));
2019 else if (getreg
!= base_reg
+ reg_incr
* count
)
2021 first_error (_(incr_error
));
2025 if (! neon_alias_types_same (&atype
, &firsttype
))
2027 first_error (_(type_error
));
2031 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2035 struct neon_typed_alias htype
;
2036 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2038 lane
= NEON_INTERLEAVE_LANES
;
2039 else if (lane
!= NEON_INTERLEAVE_LANES
)
2041 first_error (_(type_error
));
2046 else if (reg_incr
!= 1)
2048 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2052 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2055 first_error (_(reg_expected_msgs
[rtype
]));
2058 if (! neon_alias_types_same (&htype
, &firsttype
))
2060 first_error (_(type_error
));
2063 count
+= hireg
+ dregs
- getreg
;
2067 /* If we're using Q registers, we can't use [] or [n] syntax. */
2068 if (rtype
== REG_TYPE_NQ
)
2074 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2078 else if (lane
!= atype
.index
)
2080 first_error (_(type_error
));
2084 else if (lane
== -1)
2085 lane
= NEON_INTERLEAVE_LANES
;
2086 else if (lane
!= NEON_INTERLEAVE_LANES
)
2088 first_error (_(type_error
));
2093 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2095 /* No lane set by [x]. We must be interleaving structures. */
2097 lane
= NEON_INTERLEAVE_LANES
;
2100 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2101 || (count
> 1 && reg_incr
== -1))
2103 first_error (_("error parsing element/structure list"));
2107 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2109 first_error (_("expected }"));
2117 *eltype
= firsttype
.eltype
;
2122 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2125 /* Parse an explicit relocation suffix on an expression. This is
2126 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2127 arm_reloc_hsh contains no entries, so this function can only
2128 succeed if there is no () after the word. Returns -1 on error,
2129 BFD_RELOC_UNUSED if there wasn't any suffix. */
2132 parse_reloc (char **str
)
2134 struct reloc_entry
*r
;
2138 return BFD_RELOC_UNUSED
;
2143 while (*q
&& *q
!= ')' && *q
!= ',')
2148 if ((r
= (struct reloc_entry
*)
2149 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2156 /* Directives: register aliases. */
2158 static struct reg_entry
*
2159 insert_reg_alias (char *str
, unsigned number
, int type
)
2161 struct reg_entry
*new_reg
;
2164 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2166 if (new_reg
->builtin
)
2167 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2169 /* Only warn about a redefinition if it's not defined as the
2171 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2172 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2177 name
= xstrdup (str
);
2178 new_reg
= (struct reg_entry
*) xmalloc (sizeof (struct reg_entry
));
2180 new_reg
->name
= name
;
2181 new_reg
->number
= number
;
2182 new_reg
->type
= type
;
2183 new_reg
->builtin
= FALSE
;
2184 new_reg
->neon
= NULL
;
2186 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2193 insert_neon_reg_alias (char *str
, int number
, int type
,
2194 struct neon_typed_alias
*atype
)
2196 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2200 first_error (_("attempt to redefine typed alias"));
2206 reg
->neon
= (struct neon_typed_alias
*)
2207 xmalloc (sizeof (struct neon_typed_alias
));
2208 *reg
->neon
= *atype
;
2212 /* Look for the .req directive. This is of the form:
2214 new_register_name .req existing_register_name
2216 If we find one, or if it looks sufficiently like one that we want to
2217 handle any error here, return TRUE. Otherwise return FALSE. */
2220 create_register_alias (char * newname
, char *p
)
2222 struct reg_entry
*old
;
2223 char *oldname
, *nbuf
;
2226 /* The input scrubber ensures that whitespace after the mnemonic is
2227 collapsed to single spaces. */
2229 if (strncmp (oldname
, " .req ", 6) != 0)
2233 if (*oldname
== '\0')
2236 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2239 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2243 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2244 the desired alias name, and p points to its end. If not, then
2245 the desired alias name is in the global original_case_string. */
2246 #ifdef TC_CASE_SENSITIVE
2249 newname
= original_case_string
;
2250 nlen
= strlen (newname
);
2253 nbuf
= (char *) alloca (nlen
+ 1);
2254 memcpy (nbuf
, newname
, nlen
);
2257 /* Create aliases under the new name as stated; an all-lowercase
2258 version of the new name; and an all-uppercase version of the new
2260 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2262 for (p
= nbuf
; *p
; p
++)
2265 if (strncmp (nbuf
, newname
, nlen
))
2267 /* If this attempt to create an additional alias fails, do not bother
2268 trying to create the all-lower case alias. We will fail and issue
2269 a second, duplicate error message. This situation arises when the
2270 programmer does something like:
2273 The second .req creates the "Foo" alias but then fails to create
2274 the artificial FOO alias because it has already been created by the
2276 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2280 for (p
= nbuf
; *p
; p
++)
2283 if (strncmp (nbuf
, newname
, nlen
))
2284 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2290 /* Create a Neon typed/indexed register alias using directives, e.g.:
2295 These typed registers can be used instead of the types specified after the
2296 Neon mnemonic, so long as all operands given have types. Types can also be
2297 specified directly, e.g.:
2298 vadd d0.s32, d1.s32, d2.s32 */
2301 create_neon_reg_alias (char *newname
, char *p
)
2303 enum arm_reg_type basetype
;
2304 struct reg_entry
*basereg
;
2305 struct reg_entry mybasereg
;
2306 struct neon_type ntype
;
2307 struct neon_typed_alias typeinfo
;
2308 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2311 typeinfo
.defined
= 0;
2312 typeinfo
.eltype
.type
= NT_invtype
;
2313 typeinfo
.eltype
.size
= -1;
2314 typeinfo
.index
= -1;
2318 if (strncmp (p
, " .dn ", 5) == 0)
2319 basetype
= REG_TYPE_VFD
;
2320 else if (strncmp (p
, " .qn ", 5) == 0)
2321 basetype
= REG_TYPE_NQ
;
2330 basereg
= arm_reg_parse_multi (&p
);
2332 if (basereg
&& basereg
->type
!= basetype
)
2334 as_bad (_("bad type for register"));
2338 if (basereg
== NULL
)
2341 /* Try parsing as an integer. */
2342 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2343 if (exp
.X_op
!= O_constant
)
2345 as_bad (_("expression must be constant"));
2348 basereg
= &mybasereg
;
2349 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2355 typeinfo
= *basereg
->neon
;
2357 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2359 /* We got a type. */
2360 if (typeinfo
.defined
& NTA_HASTYPE
)
2362 as_bad (_("can't redefine the type of a register alias"));
2366 typeinfo
.defined
|= NTA_HASTYPE
;
2367 if (ntype
.elems
!= 1)
2369 as_bad (_("you must specify a single type only"));
2372 typeinfo
.eltype
= ntype
.el
[0];
2375 if (skip_past_char (&p
, '[') == SUCCESS
)
2378 /* We got a scalar index. */
2380 if (typeinfo
.defined
& NTA_HASINDEX
)
2382 as_bad (_("can't redefine the index of a scalar alias"));
2386 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2388 if (exp
.X_op
!= O_constant
)
2390 as_bad (_("scalar index must be constant"));
2394 typeinfo
.defined
|= NTA_HASINDEX
;
2395 typeinfo
.index
= exp
.X_add_number
;
2397 if (skip_past_char (&p
, ']') == FAIL
)
2399 as_bad (_("expecting ]"));
2404 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2405 the desired alias name, and p points to its end. If not, then
2406 the desired alias name is in the global original_case_string. */
2407 #ifdef TC_CASE_SENSITIVE
2408 namelen
= nameend
- newname
;
2410 newname
= original_case_string
;
2411 namelen
= strlen (newname
);
2414 namebuf
= (char *) alloca (namelen
+ 1);
2415 strncpy (namebuf
, newname
, namelen
);
2416 namebuf
[namelen
] = '\0';
2418 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2419 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2421 /* Insert name in all uppercase. */
2422 for (p
= namebuf
; *p
; p
++)
2425 if (strncmp (namebuf
, newname
, namelen
))
2426 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2427 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2429 /* Insert name in all lowercase. */
2430 for (p
= namebuf
; *p
; p
++)
2433 if (strncmp (namebuf
, newname
, namelen
))
2434 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2435 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2440 /* Should never be called, as .req goes between the alias and the
2441 register name, not at the beginning of the line. */
2444 s_req (int a ATTRIBUTE_UNUSED
)
2446 as_bad (_("invalid syntax for .req directive"));
2450 s_dn (int a ATTRIBUTE_UNUSED
)
2452 as_bad (_("invalid syntax for .dn directive"));
2456 s_qn (int a ATTRIBUTE_UNUSED
)
2458 as_bad (_("invalid syntax for .qn directive"));
2461 /* The .unreq directive deletes an alias which was previously defined
2462 by .req. For example:
2468 s_unreq (int a ATTRIBUTE_UNUSED
)
2473 name
= input_line_pointer
;
2475 while (*input_line_pointer
!= 0
2476 && *input_line_pointer
!= ' '
2477 && *input_line_pointer
!= '\n')
2478 ++input_line_pointer
;
2480 saved_char
= *input_line_pointer
;
2481 *input_line_pointer
= 0;
2484 as_bad (_("invalid syntax for .unreq directive"));
2487 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2491 as_bad (_("unknown register alias '%s'"), name
);
2492 else if (reg
->builtin
)
2493 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2500 hash_delete (arm_reg_hsh
, name
, FALSE
);
2501 free ((char *) reg
->name
);
2506 /* Also locate the all upper case and all lower case versions.
2507 Do not complain if we cannot find one or the other as it
2508 was probably deleted above. */
2510 nbuf
= strdup (name
);
2511 for (p
= nbuf
; *p
; p
++)
2513 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2516 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2517 free ((char *) reg
->name
);
2523 for (p
= nbuf
; *p
; p
++)
2525 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2528 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2529 free ((char *) reg
->name
);
2539 *input_line_pointer
= saved_char
;
2540 demand_empty_rest_of_line ();
2543 /* Directives: Instruction set selection. */
2546 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2547 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2548 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2549 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2551 /* Create a new mapping symbol for the transition to STATE. */
2554 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2557 const char * symname
;
2564 type
= BSF_NO_FLAGS
;
2568 type
= BSF_NO_FLAGS
;
2572 type
= BSF_NO_FLAGS
;
2578 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2579 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2584 THUMB_SET_FUNC (symbolP
, 0);
2585 ARM_SET_THUMB (symbolP
, 0);
2586 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2590 THUMB_SET_FUNC (symbolP
, 1);
2591 ARM_SET_THUMB (symbolP
, 1);
2592 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2600 /* Save the mapping symbols for future reference. Also check that
2601 we do not place two mapping symbols at the same offset within a
2602 frag. We'll handle overlap between frags in
2603 check_mapping_symbols.
2605 If .fill or other data filling directive generates zero sized data,
2606 the mapping symbol for the following code will have the same value
2607 as the one generated for the data filling directive. In this case,
2608 we replace the old symbol with the new one at the same address. */
2611 if (frag
->tc_frag_data
.first_map
!= NULL
)
2613 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2614 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2616 frag
->tc_frag_data
.first_map
= symbolP
;
2618 if (frag
->tc_frag_data
.last_map
!= NULL
)
2620 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2621 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2622 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2624 frag
->tc_frag_data
.last_map
= symbolP
;
2627 /* We must sometimes convert a region marked as code to data during
2628 code alignment, if an odd number of bytes have to be padded. The
2629 code mapping symbol is pushed to an aligned address. */
2632 insert_data_mapping_symbol (enum mstate state
,
2633 valueT value
, fragS
*frag
, offsetT bytes
)
2635 /* If there was already a mapping symbol, remove it. */
2636 if (frag
->tc_frag_data
.last_map
!= NULL
2637 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2639 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2643 know (frag
->tc_frag_data
.first_map
== symp
);
2644 frag
->tc_frag_data
.first_map
= NULL
;
2646 frag
->tc_frag_data
.last_map
= NULL
;
2647 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2650 make_mapping_symbol (MAP_DATA
, value
, frag
);
2651 make_mapping_symbol (state
, value
+ bytes
, frag
);
2654 static void mapping_state_2 (enum mstate state
, int max_chars
);
2656 /* Set the mapping state to STATE. Only call this when about to
2657 emit some STATE bytes to the file. */
2659 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2661 mapping_state (enum mstate state
)
2663 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2665 if (mapstate
== state
)
2666 /* The mapping symbol has already been emitted.
2667 There is nothing else to do. */
2670 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2672 All ARM instructions require 4-byte alignment.
2673 (Almost) all Thumb instructions require 2-byte alignment.
2675 When emitting instructions into any section, mark the section
2678 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2679 but themselves require 2-byte alignment; this applies to some
2680 PC- relative forms. However, these cases will invovle implicit
2681 literal pool generation or an explicit .align >=2, both of
2682 which will cause the section to me marked with sufficient
2683 alignment. Thus, we don't handle those cases here. */
2684 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2686 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2687 /* This case will be evaluated later. */
2690 mapping_state_2 (state
, 0);
2693 /* Same as mapping_state, but MAX_CHARS bytes have already been
2694 allocated. Put the mapping symbol that far back. */
2697 mapping_state_2 (enum mstate state
, int max_chars
)
2699 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2701 if (!SEG_NORMAL (now_seg
))
2704 if (mapstate
== state
)
2705 /* The mapping symbol has already been emitted.
2706 There is nothing else to do. */
2709 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2710 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2712 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2713 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2716 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2719 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2720 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2724 #define mapping_state(x) ((void)0)
2725 #define mapping_state_2(x, y) ((void)0)
2728 /* Find the real, Thumb encoded start of a Thumb function. */
2732 find_real_start (symbolS
* symbolP
)
2735 const char * name
= S_GET_NAME (symbolP
);
2736 symbolS
* new_target
;
2738 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2739 #define STUB_NAME ".real_start_of"
2744 /* The compiler may generate BL instructions to local labels because
2745 it needs to perform a branch to a far away location. These labels
2746 do not have a corresponding ".real_start_of" label. We check
2747 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2748 the ".real_start_of" convention for nonlocal branches. */
2749 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2752 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2753 new_target
= symbol_find (real_start
);
2755 if (new_target
== NULL
)
2757 as_warn (_("Failed to find real start of function: %s\n"), name
);
2758 new_target
= symbolP
;
2766 opcode_select (int width
)
2773 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2774 as_bad (_("selected processor does not support THUMB opcodes"));
2777 /* No need to force the alignment, since we will have been
2778 coming from ARM mode, which is word-aligned. */
2779 record_alignment (now_seg
, 1);
2786 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2787 as_bad (_("selected processor does not support ARM opcodes"));
2792 frag_align (2, 0, 0);
2794 record_alignment (now_seg
, 1);
2799 as_bad (_("invalid instruction size selected (%d)"), width
);
2804 s_arm (int ignore ATTRIBUTE_UNUSED
)
2807 demand_empty_rest_of_line ();
2811 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2814 demand_empty_rest_of_line ();
2818 s_code (int unused ATTRIBUTE_UNUSED
)
2822 temp
= get_absolute_expression ();
2827 opcode_select (temp
);
2831 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2836 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2838 /* If we are not already in thumb mode go into it, EVEN if
2839 the target processor does not support thumb instructions.
2840 This is used by gcc/config/arm/lib1funcs.asm for example
2841 to compile interworking support functions even if the
2842 target processor should not support interworking. */
2846 record_alignment (now_seg
, 1);
2849 demand_empty_rest_of_line ();
2853 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2857 /* The following label is the name/address of the start of a Thumb function.
2858 We need to know this for the interworking support. */
2859 label_is_thumb_function_name
= TRUE
;
2862 /* Perform a .set directive, but also mark the alias as
2863 being a thumb function. */
2866 s_thumb_set (int equiv
)
2868 /* XXX the following is a duplicate of the code for s_set() in read.c
2869 We cannot just call that code as we need to get at the symbol that
2876 /* Especial apologies for the random logic:
2877 This just grew, and could be parsed much more simply!
2879 delim
= get_symbol_name (& name
);
2880 end_name
= input_line_pointer
;
2881 (void) restore_line_pointer (delim
);
2883 if (*input_line_pointer
!= ',')
2886 as_bad (_("expected comma after name \"%s\""), name
);
2888 ignore_rest_of_line ();
2892 input_line_pointer
++;
2895 if (name
[0] == '.' && name
[1] == '\0')
2897 /* XXX - this should not happen to .thumb_set. */
2901 if ((symbolP
= symbol_find (name
)) == NULL
2902 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2905 /* When doing symbol listings, play games with dummy fragments living
2906 outside the normal fragment chain to record the file and line info
2908 if (listing
& LISTING_SYMBOLS
)
2910 extern struct list_info_struct
* listing_tail
;
2911 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
2913 memset (dummy_frag
, 0, sizeof (fragS
));
2914 dummy_frag
->fr_type
= rs_fill
;
2915 dummy_frag
->line
= listing_tail
;
2916 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2917 dummy_frag
->fr_symbol
= symbolP
;
2921 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2924 /* "set" symbols are local unless otherwise specified. */
2925 SF_SET_LOCAL (symbolP
);
2926 #endif /* OBJ_COFF */
2927 } /* Make a new symbol. */
2929 symbol_table_insert (symbolP
);
2934 && S_IS_DEFINED (symbolP
)
2935 && S_GET_SEGMENT (symbolP
) != reg_section
)
2936 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2938 pseudo_set (symbolP
);
2940 demand_empty_rest_of_line ();
2942 /* XXX Now we come to the Thumb specific bit of code. */
2944 THUMB_SET_FUNC (symbolP
, 1);
2945 ARM_SET_THUMB (symbolP
, 1);
2946 #if defined OBJ_ELF || defined OBJ_COFF
2947 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2951 /* Directives: Mode selection. */
2953 /* .syntax [unified|divided] - choose the new unified syntax
2954 (same for Arm and Thumb encoding, modulo slight differences in what
2955 can be represented) or the old divergent syntax for each mode. */
2957 s_syntax (int unused ATTRIBUTE_UNUSED
)
2961 delim
= get_symbol_name (& name
);
2963 if (!strcasecmp (name
, "unified"))
2964 unified_syntax
= TRUE
;
2965 else if (!strcasecmp (name
, "divided"))
2966 unified_syntax
= FALSE
;
2969 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2972 (void) restore_line_pointer (delim
);
2973 demand_empty_rest_of_line ();
2976 /* Directives: sectioning and alignment. */
2979 s_bss (int ignore ATTRIBUTE_UNUSED
)
2981 /* We don't support putting frags in the BSS segment, we fake it by
2982 marking in_bss, then looking at s_skip for clues. */
2983 subseg_set (bss_section
, 0);
2984 demand_empty_rest_of_line ();
2986 #ifdef md_elf_section_change_hook
2987 md_elf_section_change_hook ();
2992 s_even (int ignore ATTRIBUTE_UNUSED
)
2994 /* Never make frag if expect extra pass. */
2996 frag_align (1, 0, 0);
2998 record_alignment (now_seg
, 1);
3000 demand_empty_rest_of_line ();
3003 /* Directives: CodeComposer Studio. */
3005 /* .ref (for CodeComposer Studio syntax only). */
3007 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3009 if (codecomposer_syntax
)
3010 ignore_rest_of_line ();
3012 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3015 /* If name is not NULL, then it is used for marking the beginning of a
3016 function, wherease if it is NULL then it means the function end. */
3018 asmfunc_debug (const char * name
)
3020 static const char * last_name
= NULL
;
3024 gas_assert (last_name
== NULL
);
3027 if (debug_type
== DEBUG_STABS
)
3028 stabs_generate_asm_func (name
, name
);
3032 gas_assert (last_name
!= NULL
);
3034 if (debug_type
== DEBUG_STABS
)
3035 stabs_generate_asm_endfunc (last_name
, last_name
);
3042 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3044 if (codecomposer_syntax
)
3046 switch (asmfunc_state
)
3048 case OUTSIDE_ASMFUNC
:
3049 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3052 case WAITING_ASMFUNC_NAME
:
3053 as_bad (_(".asmfunc repeated."));
3056 case WAITING_ENDASMFUNC
:
3057 as_bad (_(".asmfunc without function."));
3060 demand_empty_rest_of_line ();
3063 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3067 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3069 if (codecomposer_syntax
)
3071 switch (asmfunc_state
)
3073 case OUTSIDE_ASMFUNC
:
3074 as_bad (_(".endasmfunc without a .asmfunc."));
3077 case WAITING_ASMFUNC_NAME
:
3078 as_bad (_(".endasmfunc without function."));
3081 case WAITING_ENDASMFUNC
:
3082 asmfunc_state
= OUTSIDE_ASMFUNC
;
3083 asmfunc_debug (NULL
);
3086 demand_empty_rest_of_line ();
3089 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3093 s_ccs_def (int name
)
3095 if (codecomposer_syntax
)
3098 as_bad (_(".def pseudo-op only available with -mccs flag."));
3101 /* Directives: Literal pools. */
3103 static literal_pool
*
3104 find_literal_pool (void)
3106 literal_pool
* pool
;
3108 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3110 if (pool
->section
== now_seg
3111 && pool
->sub_section
== now_subseg
)
3118 static literal_pool
*
3119 find_or_make_literal_pool (void)
3121 /* Next literal pool ID number. */
3122 static unsigned int latest_pool_num
= 1;
3123 literal_pool
* pool
;
3125 pool
= find_literal_pool ();
3129 /* Create a new pool. */
3130 pool
= (literal_pool
*) xmalloc (sizeof (* pool
));
3134 pool
->next_free_entry
= 0;
3135 pool
->section
= now_seg
;
3136 pool
->sub_section
= now_subseg
;
3137 pool
->next
= list_of_pools
;
3138 pool
->symbol
= NULL
;
3139 pool
->alignment
= 2;
3141 /* Add it to the list. */
3142 list_of_pools
= pool
;
3145 /* New pools, and emptied pools, will have a NULL symbol. */
3146 if (pool
->symbol
== NULL
)
3148 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3149 (valueT
) 0, &zero_address_frag
);
3150 pool
->id
= latest_pool_num
++;
3157 /* Add the literal in the global 'inst'
3158 structure to the relevant literal pool. */
3161 add_to_lit_pool (unsigned int nbytes
)
3163 #define PADDING_SLOT 0x1
3164 #define LIT_ENTRY_SIZE_MASK 0xFF
3165 literal_pool
* pool
;
3166 unsigned int entry
, pool_size
= 0;
3167 bfd_boolean padding_slot_p
= FALSE
;
3173 imm1
= inst
.operands
[1].imm
;
3174 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3175 : inst
.reloc
.exp
.X_unsigned
? 0
3176 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3177 if (target_big_endian
)
3180 imm2
= inst
.operands
[1].imm
;
3184 pool
= find_or_make_literal_pool ();
3186 /* Check if this literal value is already in the pool. */
3187 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3191 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3192 && (inst
.reloc
.exp
.X_op
== O_constant
)
3193 && (pool
->literals
[entry
].X_add_number
3194 == inst
.reloc
.exp
.X_add_number
)
3195 && (pool
->literals
[entry
].X_md
== nbytes
)
3196 && (pool
->literals
[entry
].X_unsigned
3197 == inst
.reloc
.exp
.X_unsigned
))
3200 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3201 && (inst
.reloc
.exp
.X_op
== O_symbol
)
3202 && (pool
->literals
[entry
].X_add_number
3203 == inst
.reloc
.exp
.X_add_number
)
3204 && (pool
->literals
[entry
].X_add_symbol
3205 == inst
.reloc
.exp
.X_add_symbol
)
3206 && (pool
->literals
[entry
].X_op_symbol
3207 == inst
.reloc
.exp
.X_op_symbol
)
3208 && (pool
->literals
[entry
].X_md
== nbytes
))
3211 else if ((nbytes
== 8)
3212 && !(pool_size
& 0x7)
3213 && ((entry
+ 1) != pool
->next_free_entry
)
3214 && (pool
->literals
[entry
].X_op
== O_constant
)
3215 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3216 && (pool
->literals
[entry
].X_unsigned
3217 == inst
.reloc
.exp
.X_unsigned
)
3218 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3219 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3220 && (pool
->literals
[entry
+ 1].X_unsigned
3221 == inst
.reloc
.exp
.X_unsigned
))
3224 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3225 if (padding_slot_p
&& (nbytes
== 4))
3231 /* Do we need to create a new entry? */
3232 if (entry
== pool
->next_free_entry
)
3234 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3236 inst
.error
= _("literal pool overflow");
3242 /* For 8-byte entries, we align to an 8-byte boundary,
3243 and split it into two 4-byte entries, because on 32-bit
3244 host, 8-byte constants are treated as big num, thus
3245 saved in "generic_bignum" which will be overwritten
3246 by later assignments.
3248 We also need to make sure there is enough space for
3251 We also check to make sure the literal operand is a
3253 if (!(inst
.reloc
.exp
.X_op
== O_constant
3254 || inst
.reloc
.exp
.X_op
== O_big
))
3256 inst
.error
= _("invalid type for literal pool");
3259 else if (pool_size
& 0x7)
3261 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3263 inst
.error
= _("literal pool overflow");
3267 pool
->literals
[entry
] = inst
.reloc
.exp
;
3268 pool
->literals
[entry
].X_add_number
= 0;
3269 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3270 pool
->next_free_entry
+= 1;
3273 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3275 inst
.error
= _("literal pool overflow");
3279 pool
->literals
[entry
] = inst
.reloc
.exp
;
3280 pool
->literals
[entry
].X_op
= O_constant
;
3281 pool
->literals
[entry
].X_add_number
= imm1
;
3282 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3283 pool
->literals
[entry
++].X_md
= 4;
3284 pool
->literals
[entry
] = inst
.reloc
.exp
;
3285 pool
->literals
[entry
].X_op
= O_constant
;
3286 pool
->literals
[entry
].X_add_number
= imm2
;
3287 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3288 pool
->literals
[entry
].X_md
= 4;
3289 pool
->alignment
= 3;
3290 pool
->next_free_entry
+= 1;
3294 pool
->literals
[entry
] = inst
.reloc
.exp
;
3295 pool
->literals
[entry
].X_md
= 4;
3299 /* PR ld/12974: Record the location of the first source line to reference
3300 this entry in the literal pool. If it turns out during linking that the
3301 symbol does not exist we will be able to give an accurate line number for
3302 the (first use of the) missing reference. */
3303 if (debug_type
== DEBUG_DWARF2
)
3304 dwarf2_where (pool
->locs
+ entry
);
3306 pool
->next_free_entry
+= 1;
3308 else if (padding_slot_p
)
3310 pool
->literals
[entry
] = inst
.reloc
.exp
;
3311 pool
->literals
[entry
].X_md
= nbytes
;
3314 inst
.reloc
.exp
.X_op
= O_symbol
;
3315 inst
.reloc
.exp
.X_add_number
= pool_size
;
3316 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
3322 tc_start_label_without_colon (void)
3324 bfd_boolean ret
= TRUE
;
3326 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3328 const char *label
= input_line_pointer
;
3330 while (!is_end_of_line
[(int) label
[-1]])
3335 as_bad (_("Invalid label '%s'"), label
);
3339 asmfunc_debug (label
);
3341 asmfunc_state
= WAITING_ENDASMFUNC
;
3347 /* Can't use symbol_new here, so have to create a symbol and then at
3348 a later date assign it a value. Thats what these functions do. */
3351 symbol_locate (symbolS
* symbolP
,
3352 const char * name
, /* It is copied, the caller can modify. */
3353 segT segment
, /* Segment identifier (SEG_<something>). */
3354 valueT valu
, /* Symbol value. */
3355 fragS
* frag
) /* Associated fragment. */
3358 char * preserved_copy_of_name
;
3360 name_length
= strlen (name
) + 1; /* +1 for \0. */
3361 obstack_grow (¬es
, name
, name_length
);
3362 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3364 #ifdef tc_canonicalize_symbol_name
3365 preserved_copy_of_name
=
3366 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3369 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3371 S_SET_SEGMENT (symbolP
, segment
);
3372 S_SET_VALUE (symbolP
, valu
);
3373 symbol_clear_list_pointers (symbolP
);
3375 symbol_set_frag (symbolP
, frag
);
3377 /* Link to end of symbol chain. */
3379 extern int symbol_table_frozen
;
3381 if (symbol_table_frozen
)
3385 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3387 obj_symbol_new_hook (symbolP
);
3389 #ifdef tc_symbol_new_hook
3390 tc_symbol_new_hook (symbolP
);
3394 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3395 #endif /* DEBUG_SYMS */
3399 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3402 literal_pool
* pool
;
3405 pool
= find_literal_pool ();
3407 || pool
->symbol
== NULL
3408 || pool
->next_free_entry
== 0)
3411 /* Align pool as you have word accesses.
3412 Only make a frag if we have to. */
3414 frag_align (pool
->alignment
, 0, 0);
3416 record_alignment (now_seg
, 2);
3419 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3420 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3422 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3424 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3425 (valueT
) frag_now_fix (), frag_now
);
3426 symbol_table_insert (pool
->symbol
);
3428 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3430 #if defined OBJ_COFF || defined OBJ_ELF
3431 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3434 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3437 if (debug_type
== DEBUG_DWARF2
)
3438 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3440 /* First output the expression in the instruction to the pool. */
3441 emit_expr (&(pool
->literals
[entry
]),
3442 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3445 /* Mark the pool as empty. */
3446 pool
->next_free_entry
= 0;
3447 pool
->symbol
= NULL
;
3451 /* Forward declarations for functions below, in the MD interface
3453 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3454 static valueT
create_unwind_entry (int);
3455 static void start_unwind_section (const segT
, int);
3456 static void add_unwind_opcode (valueT
, int);
3457 static void flush_pending_unwind (void);
3459 /* Directives: Data. */
3462 s_arm_elf_cons (int nbytes
)
3466 #ifdef md_flush_pending_output
3467 md_flush_pending_output ();
3470 if (is_it_end_of_statement ())
3472 demand_empty_rest_of_line ();
3476 #ifdef md_cons_align
3477 md_cons_align (nbytes
);
3480 mapping_state (MAP_DATA
);
3484 char *base
= input_line_pointer
;
3488 if (exp
.X_op
!= O_symbol
)
3489 emit_expr (&exp
, (unsigned int) nbytes
);
3492 char *before_reloc
= input_line_pointer
;
3493 reloc
= parse_reloc (&input_line_pointer
);
3496 as_bad (_("unrecognized relocation suffix"));
3497 ignore_rest_of_line ();
3500 else if (reloc
== BFD_RELOC_UNUSED
)
3501 emit_expr (&exp
, (unsigned int) nbytes
);
3504 reloc_howto_type
*howto
= (reloc_howto_type
*)
3505 bfd_reloc_type_lookup (stdoutput
,
3506 (bfd_reloc_code_real_type
) reloc
);
3507 int size
= bfd_get_reloc_size (howto
);
3509 if (reloc
== BFD_RELOC_ARM_PLT32
)
3511 as_bad (_("(plt) is only valid on branch targets"));
3512 reloc
= BFD_RELOC_UNUSED
;
3517 as_bad (_("%s relocations do not fit in %d bytes"),
3518 howto
->name
, nbytes
);
3521 /* We've parsed an expression stopping at O_symbol.
3522 But there may be more expression left now that we
3523 have parsed the relocation marker. Parse it again.
3524 XXX Surely there is a cleaner way to do this. */
3525 char *p
= input_line_pointer
;
3527 char *save_buf
= (char *) alloca (input_line_pointer
- base
);
3528 memcpy (save_buf
, base
, input_line_pointer
- base
);
3529 memmove (base
+ (input_line_pointer
- before_reloc
),
3530 base
, before_reloc
- base
);
3532 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3534 memcpy (base
, save_buf
, p
- base
);
3536 offset
= nbytes
- size
;
3537 p
= frag_more (nbytes
);
3538 memset (p
, 0, nbytes
);
3539 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3540 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3545 while (*input_line_pointer
++ == ',');
3547 /* Put terminator back into stream. */
3548 input_line_pointer
--;
3549 demand_empty_rest_of_line ();
3552 /* Emit an expression containing a 32-bit thumb instruction.
3553 Implementation based on put_thumb32_insn. */
3556 emit_thumb32_expr (expressionS
* exp
)
3558 expressionS exp_high
= *exp
;
3560 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3561 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3562 exp
->X_add_number
&= 0xffff;
3563 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3566 /* Guess the instruction size based on the opcode. */
3569 thumb_insn_size (int opcode
)
3571 if ((unsigned int) opcode
< 0xe800u
)
3573 else if ((unsigned int) opcode
>= 0xe8000000u
)
3580 emit_insn (expressionS
*exp
, int nbytes
)
3584 if (exp
->X_op
== O_constant
)
3589 size
= thumb_insn_size (exp
->X_add_number
);
3593 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3595 as_bad (_(".inst.n operand too big. "\
3596 "Use .inst.w instead"));
3601 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3602 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3604 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3606 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3607 emit_thumb32_expr (exp
);
3609 emit_expr (exp
, (unsigned int) size
);
3611 it_fsm_post_encode ();
3615 as_bad (_("cannot determine Thumb instruction size. " \
3616 "Use .inst.n/.inst.w instead"));
3619 as_bad (_("constant expression required"));
3624 /* Like s_arm_elf_cons but do not use md_cons_align and
3625 set the mapping state to MAP_ARM/MAP_THUMB. */
3628 s_arm_elf_inst (int nbytes
)
3630 if (is_it_end_of_statement ())
3632 demand_empty_rest_of_line ();
3636 /* Calling mapping_state () here will not change ARM/THUMB,
3637 but will ensure not to be in DATA state. */
3640 mapping_state (MAP_THUMB
);
3645 as_bad (_("width suffixes are invalid in ARM mode"));
3646 ignore_rest_of_line ();
3652 mapping_state (MAP_ARM
);
3661 if (! emit_insn (& exp
, nbytes
))
3663 ignore_rest_of_line ();
3667 while (*input_line_pointer
++ == ',');
3669 /* Put terminator back into stream. */
3670 input_line_pointer
--;
3671 demand_empty_rest_of_line ();
3674 /* Parse a .rel31 directive. */
3677 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3684 if (*input_line_pointer
== '1')
3685 highbit
= 0x80000000;
3686 else if (*input_line_pointer
!= '0')
3687 as_bad (_("expected 0 or 1"));
3689 input_line_pointer
++;
3690 if (*input_line_pointer
!= ',')
3691 as_bad (_("missing comma"));
3692 input_line_pointer
++;
3694 #ifdef md_flush_pending_output
3695 md_flush_pending_output ();
3698 #ifdef md_cons_align
3702 mapping_state (MAP_DATA
);
3707 md_number_to_chars (p
, highbit
, 4);
3708 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3709 BFD_RELOC_ARM_PREL31
);
3711 demand_empty_rest_of_line ();
3714 /* Directives: AEABI stack-unwind tables. */
3716 /* Parse an unwind_fnstart directive. Simply records the current location. */
3719 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3721 demand_empty_rest_of_line ();
3722 if (unwind
.proc_start
)
3724 as_bad (_("duplicate .fnstart directive"));
3728 /* Mark the start of the function. */
3729 unwind
.proc_start
= expr_build_dot ();
3731 /* Reset the rest of the unwind info. */
3732 unwind
.opcode_count
= 0;
3733 unwind
.table_entry
= NULL
;
3734 unwind
.personality_routine
= NULL
;
3735 unwind
.personality_index
= -1;
3736 unwind
.frame_size
= 0;
3737 unwind
.fp_offset
= 0;
3738 unwind
.fp_reg
= REG_SP
;
3740 unwind
.sp_restored
= 0;
3744 /* Parse a handlerdata directive. Creates the exception handling table entry
3745 for the function. */
3748 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3750 demand_empty_rest_of_line ();
3751 if (!unwind
.proc_start
)
3752 as_bad (MISSING_FNSTART
);
3754 if (unwind
.table_entry
)
3755 as_bad (_("duplicate .handlerdata directive"));
3757 create_unwind_entry (1);
3760 /* Parse an unwind_fnend directive. Generates the index table entry. */
3763 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3768 unsigned int marked_pr_dependency
;
3770 demand_empty_rest_of_line ();
3772 if (!unwind
.proc_start
)
3774 as_bad (_(".fnend directive without .fnstart"));
3778 /* Add eh table entry. */
3779 if (unwind
.table_entry
== NULL
)
3780 val
= create_unwind_entry (0);
3784 /* Add index table entry. This is two words. */
3785 start_unwind_section (unwind
.saved_seg
, 1);
3786 frag_align (2, 0, 0);
3787 record_alignment (now_seg
, 2);
3789 ptr
= frag_more (8);
3791 where
= frag_now_fix () - 8;
3793 /* Self relative offset of the function start. */
3794 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3795 BFD_RELOC_ARM_PREL31
);
3797 /* Indicate dependency on EHABI-defined personality routines to the
3798 linker, if it hasn't been done already. */
3799 marked_pr_dependency
3800 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3801 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3802 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3804 static const char *const name
[] =
3806 "__aeabi_unwind_cpp_pr0",
3807 "__aeabi_unwind_cpp_pr1",
3808 "__aeabi_unwind_cpp_pr2"
3810 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3811 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3812 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3813 |= 1 << unwind
.personality_index
;
3817 /* Inline exception table entry. */
3818 md_number_to_chars (ptr
+ 4, val
, 4);
3820 /* Self relative offset of the table entry. */
3821 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3822 BFD_RELOC_ARM_PREL31
);
3824 /* Restore the original section. */
3825 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3827 unwind
.proc_start
= NULL
;
3831 /* Parse an unwind_cantunwind directive. */
3834 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3836 demand_empty_rest_of_line ();
3837 if (!unwind
.proc_start
)
3838 as_bad (MISSING_FNSTART
);
3840 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3841 as_bad (_("personality routine specified for cantunwind frame"));
3843 unwind
.personality_index
= -2;
3847 /* Parse a personalityindex directive. */
3850 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3854 if (!unwind
.proc_start
)
3855 as_bad (MISSING_FNSTART
);
3857 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3858 as_bad (_("duplicate .personalityindex directive"));
3862 if (exp
.X_op
!= O_constant
3863 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3865 as_bad (_("bad personality routine number"));
3866 ignore_rest_of_line ();
3870 unwind
.personality_index
= exp
.X_add_number
;
3872 demand_empty_rest_of_line ();
3876 /* Parse a personality directive. */
3879 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3883 if (!unwind
.proc_start
)
3884 as_bad (MISSING_FNSTART
);
3886 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3887 as_bad (_("duplicate .personality directive"));
3889 c
= get_symbol_name (& name
);
3890 p
= input_line_pointer
;
3892 ++ input_line_pointer
;
3893 unwind
.personality_routine
= symbol_find_or_make (name
);
3895 demand_empty_rest_of_line ();
3899 /* Parse a directive saving core registers. */
3902 s_arm_unwind_save_core (void)
3908 range
= parse_reg_list (&input_line_pointer
);
3911 as_bad (_("expected register list"));
3912 ignore_rest_of_line ();
3916 demand_empty_rest_of_line ();
3918 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3919 into .unwind_save {..., sp...}. We aren't bothered about the value of
3920 ip because it is clobbered by calls. */
3921 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3922 && (range
& 0x3000) == 0x1000)
3924 unwind
.opcode_count
--;
3925 unwind
.sp_restored
= 0;
3926 range
= (range
| 0x2000) & ~0x1000;
3927 unwind
.pending_offset
= 0;
3933 /* See if we can use the short opcodes. These pop a block of up to 8
3934 registers starting with r4, plus maybe r14. */
3935 for (n
= 0; n
< 8; n
++)
3937 /* Break at the first non-saved register. */
3938 if ((range
& (1 << (n
+ 4))) == 0)
3941 /* See if there are any other bits set. */
3942 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3944 /* Use the long form. */
3945 op
= 0x8000 | ((range
>> 4) & 0xfff);
3946 add_unwind_opcode (op
, 2);
3950 /* Use the short form. */
3952 op
= 0xa8; /* Pop r14. */
3954 op
= 0xa0; /* Do not pop r14. */
3956 add_unwind_opcode (op
, 1);
3963 op
= 0xb100 | (range
& 0xf);
3964 add_unwind_opcode (op
, 2);
3967 /* Record the number of bytes pushed. */
3968 for (n
= 0; n
< 16; n
++)
3970 if (range
& (1 << n
))
3971 unwind
.frame_size
+= 4;
3976 /* Parse a directive saving FPA registers. */
3979 s_arm_unwind_save_fpa (int reg
)
3985 /* Get Number of registers to transfer. */
3986 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3989 exp
.X_op
= O_illegal
;
3991 if (exp
.X_op
!= O_constant
)
3993 as_bad (_("expected , <constant>"));
3994 ignore_rest_of_line ();
3998 num_regs
= exp
.X_add_number
;
4000 if (num_regs
< 1 || num_regs
> 4)
4002 as_bad (_("number of registers must be in the range [1:4]"));
4003 ignore_rest_of_line ();
4007 demand_empty_rest_of_line ();
4012 op
= 0xb4 | (num_regs
- 1);
4013 add_unwind_opcode (op
, 1);
4018 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4019 add_unwind_opcode (op
, 2);
4021 unwind
.frame_size
+= num_regs
* 12;
4025 /* Parse a directive saving VFP registers for ARMv6 and above. */
4028 s_arm_unwind_save_vfp_armv6 (void)
4033 int num_vfpv3_regs
= 0;
4034 int num_regs_below_16
;
4036 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
4039 as_bad (_("expected register list"));
4040 ignore_rest_of_line ();
4044 demand_empty_rest_of_line ();
4046 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4047 than FSTMX/FLDMX-style ones). */
4049 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4051 num_vfpv3_regs
= count
;
4052 else if (start
+ count
> 16)
4053 num_vfpv3_regs
= start
+ count
- 16;
4055 if (num_vfpv3_regs
> 0)
4057 int start_offset
= start
> 16 ? start
- 16 : 0;
4058 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4059 add_unwind_opcode (op
, 2);
4062 /* Generate opcode for registers numbered in the range 0 .. 15. */
4063 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4064 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4065 if (num_regs_below_16
> 0)
4067 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4068 add_unwind_opcode (op
, 2);
4071 unwind
.frame_size
+= count
* 8;
4075 /* Parse a directive saving VFP registers for pre-ARMv6. */
4078 s_arm_unwind_save_vfp (void)
4084 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
4087 as_bad (_("expected register list"));
4088 ignore_rest_of_line ();
4092 demand_empty_rest_of_line ();
4097 op
= 0xb8 | (count
- 1);
4098 add_unwind_opcode (op
, 1);
4103 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4104 add_unwind_opcode (op
, 2);
4106 unwind
.frame_size
+= count
* 8 + 4;
4110 /* Parse a directive saving iWMMXt data registers. */
4113 s_arm_unwind_save_mmxwr (void)
4121 if (*input_line_pointer
== '{')
4122 input_line_pointer
++;
4126 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4130 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4135 as_tsktsk (_("register list not in ascending order"));
4138 if (*input_line_pointer
== '-')
4140 input_line_pointer
++;
4141 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4144 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4147 else if (reg
>= hi_reg
)
4149 as_bad (_("bad register range"));
4152 for (; reg
< hi_reg
; reg
++)
4156 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4158 skip_past_char (&input_line_pointer
, '}');
4160 demand_empty_rest_of_line ();
4162 /* Generate any deferred opcodes because we're going to be looking at
4164 flush_pending_unwind ();
4166 for (i
= 0; i
< 16; i
++)
4168 if (mask
& (1 << i
))
4169 unwind
.frame_size
+= 8;
4172 /* Attempt to combine with a previous opcode. We do this because gcc
4173 likes to output separate unwind directives for a single block of
4175 if (unwind
.opcode_count
> 0)
4177 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4178 if ((i
& 0xf8) == 0xc0)
4181 /* Only merge if the blocks are contiguous. */
4184 if ((mask
& 0xfe00) == (1 << 9))
4186 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4187 unwind
.opcode_count
--;
4190 else if (i
== 6 && unwind
.opcode_count
>= 2)
4192 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4196 op
= 0xffff << (reg
- 1);
4198 && ((mask
& op
) == (1u << (reg
- 1))))
4200 op
= (1 << (reg
+ i
+ 1)) - 1;
4201 op
&= ~((1 << reg
) - 1);
4203 unwind
.opcode_count
-= 2;
4210 /* We want to generate opcodes in the order the registers have been
4211 saved, ie. descending order. */
4212 for (reg
= 15; reg
>= -1; reg
--)
4214 /* Save registers in blocks. */
4216 || !(mask
& (1 << reg
)))
4218 /* We found an unsaved reg. Generate opcodes to save the
4225 op
= 0xc0 | (hi_reg
- 10);
4226 add_unwind_opcode (op
, 1);
4231 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4232 add_unwind_opcode (op
, 2);
4241 ignore_rest_of_line ();
4245 s_arm_unwind_save_mmxwcg (void)
4252 if (*input_line_pointer
== '{')
4253 input_line_pointer
++;
4255 skip_whitespace (input_line_pointer
);
4259 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4263 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4269 as_tsktsk (_("register list not in ascending order"));
4272 if (*input_line_pointer
== '-')
4274 input_line_pointer
++;
4275 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4278 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4281 else if (reg
>= hi_reg
)
4283 as_bad (_("bad register range"));
4286 for (; reg
< hi_reg
; reg
++)
4290 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4292 skip_past_char (&input_line_pointer
, '}');
4294 demand_empty_rest_of_line ();
4296 /* Generate any deferred opcodes because we're going to be looking at
4298 flush_pending_unwind ();
4300 for (reg
= 0; reg
< 16; reg
++)
4302 if (mask
& (1 << reg
))
4303 unwind
.frame_size
+= 4;
4306 add_unwind_opcode (op
, 2);
4309 ignore_rest_of_line ();
4313 /* Parse an unwind_save directive.
4314 If the argument is non-zero, this is a .vsave directive. */
4317 s_arm_unwind_save (int arch_v6
)
4320 struct reg_entry
*reg
;
4321 bfd_boolean had_brace
= FALSE
;
4323 if (!unwind
.proc_start
)
4324 as_bad (MISSING_FNSTART
);
4326 /* Figure out what sort of save we have. */
4327 peek
= input_line_pointer
;
4335 reg
= arm_reg_parse_multi (&peek
);
4339 as_bad (_("register expected"));
4340 ignore_rest_of_line ();
4349 as_bad (_("FPA .unwind_save does not take a register list"));
4350 ignore_rest_of_line ();
4353 input_line_pointer
= peek
;
4354 s_arm_unwind_save_fpa (reg
->number
);
4358 s_arm_unwind_save_core ();
4363 s_arm_unwind_save_vfp_armv6 ();
4365 s_arm_unwind_save_vfp ();
4368 case REG_TYPE_MMXWR
:
4369 s_arm_unwind_save_mmxwr ();
4372 case REG_TYPE_MMXWCG
:
4373 s_arm_unwind_save_mmxwcg ();
4377 as_bad (_(".unwind_save does not support this kind of register"));
4378 ignore_rest_of_line ();
4383 /* Parse an unwind_movsp directive. */
4386 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4392 if (!unwind
.proc_start
)
4393 as_bad (MISSING_FNSTART
);
4395 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4398 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4399 ignore_rest_of_line ();
4403 /* Optional constant. */
4404 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4406 if (immediate_for_directive (&offset
) == FAIL
)
4412 demand_empty_rest_of_line ();
4414 if (reg
== REG_SP
|| reg
== REG_PC
)
4416 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4420 if (unwind
.fp_reg
!= REG_SP
)
4421 as_bad (_("unexpected .unwind_movsp directive"));
4423 /* Generate opcode to restore the value. */
4425 add_unwind_opcode (op
, 1);
4427 /* Record the information for later. */
4428 unwind
.fp_reg
= reg
;
4429 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4430 unwind
.sp_restored
= 1;
4433 /* Parse an unwind_pad directive. */
4436 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4440 if (!unwind
.proc_start
)
4441 as_bad (MISSING_FNSTART
);
4443 if (immediate_for_directive (&offset
) == FAIL
)
4448 as_bad (_("stack increment must be multiple of 4"));
4449 ignore_rest_of_line ();
4453 /* Don't generate any opcodes, just record the details for later. */
4454 unwind
.frame_size
+= offset
;
4455 unwind
.pending_offset
+= offset
;
4457 demand_empty_rest_of_line ();
4460 /* Parse an unwind_setfp directive. */
4463 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4469 if (!unwind
.proc_start
)
4470 as_bad (MISSING_FNSTART
);
4472 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4473 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4476 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4478 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4480 as_bad (_("expected <reg>, <reg>"));
4481 ignore_rest_of_line ();
4485 /* Optional constant. */
4486 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4488 if (immediate_for_directive (&offset
) == FAIL
)
4494 demand_empty_rest_of_line ();
4496 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4498 as_bad (_("register must be either sp or set by a previous"
4499 "unwind_movsp directive"));
4503 /* Don't generate any opcodes, just record the information for later. */
4504 unwind
.fp_reg
= fp_reg
;
4506 if (sp_reg
== REG_SP
)
4507 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4509 unwind
.fp_offset
-= offset
;
4512 /* Parse an unwind_raw directive. */
4515 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4518 /* This is an arbitrary limit. */
4519 unsigned char op
[16];
4522 if (!unwind
.proc_start
)
4523 as_bad (MISSING_FNSTART
);
4526 if (exp
.X_op
== O_constant
4527 && skip_past_comma (&input_line_pointer
) != FAIL
)
4529 unwind
.frame_size
+= exp
.X_add_number
;
4533 exp
.X_op
= O_illegal
;
4535 if (exp
.X_op
!= O_constant
)
4537 as_bad (_("expected <offset>, <opcode>"));
4538 ignore_rest_of_line ();
4544 /* Parse the opcode. */
4549 as_bad (_("unwind opcode too long"));
4550 ignore_rest_of_line ();
4552 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4554 as_bad (_("invalid unwind opcode"));
4555 ignore_rest_of_line ();
4558 op
[count
++] = exp
.X_add_number
;
4560 /* Parse the next byte. */
4561 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4567 /* Add the opcode bytes in reverse order. */
4569 add_unwind_opcode (op
[count
], 1);
4571 demand_empty_rest_of_line ();
4575 /* Parse a .eabi_attribute directive. */
4578 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4580 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4582 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4583 attributes_set_explicitly
[tag
] = 1;
4586 /* Emit a tls fix for the symbol. */
4589 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4593 #ifdef md_flush_pending_output
4594 md_flush_pending_output ();
4597 #ifdef md_cons_align
4601 /* Since we're just labelling the code, there's no need to define a
4604 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4605 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4606 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4607 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4609 #endif /* OBJ_ELF */
4611 static void s_arm_arch (int);
4612 static void s_arm_object_arch (int);
4613 static void s_arm_cpu (int);
4614 static void s_arm_fpu (int);
4615 static void s_arm_arch_extension (int);
4620 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4627 if (exp
.X_op
== O_symbol
)
4628 exp
.X_op
= O_secrel
;
4630 emit_expr (&exp
, 4);
4632 while (*input_line_pointer
++ == ',');
4634 input_line_pointer
--;
4635 demand_empty_rest_of_line ();
4639 /* This table describes all the machine specific pseudo-ops the assembler
4640 has to support. The fields are:
4641 pseudo-op name without dot
4642 function to call to execute this pseudo-op
4643 Integer arg to pass to the function. */
4645 const pseudo_typeS md_pseudo_table
[] =
4647 /* Never called because '.req' does not start a line. */
4648 { "req", s_req
, 0 },
4649 /* Following two are likewise never called. */
4652 { "unreq", s_unreq
, 0 },
4653 { "bss", s_bss
, 0 },
4654 { "align", s_align_ptwo
, 2 },
4655 { "arm", s_arm
, 0 },
4656 { "thumb", s_thumb
, 0 },
4657 { "code", s_code
, 0 },
4658 { "force_thumb", s_force_thumb
, 0 },
4659 { "thumb_func", s_thumb_func
, 0 },
4660 { "thumb_set", s_thumb_set
, 0 },
4661 { "even", s_even
, 0 },
4662 { "ltorg", s_ltorg
, 0 },
4663 { "pool", s_ltorg
, 0 },
4664 { "syntax", s_syntax
, 0 },
4665 { "cpu", s_arm_cpu
, 0 },
4666 { "arch", s_arm_arch
, 0 },
4667 { "object_arch", s_arm_object_arch
, 0 },
4668 { "fpu", s_arm_fpu
, 0 },
4669 { "arch_extension", s_arm_arch_extension
, 0 },
4671 { "word", s_arm_elf_cons
, 4 },
4672 { "long", s_arm_elf_cons
, 4 },
4673 { "inst.n", s_arm_elf_inst
, 2 },
4674 { "inst.w", s_arm_elf_inst
, 4 },
4675 { "inst", s_arm_elf_inst
, 0 },
4676 { "rel31", s_arm_rel31
, 0 },
4677 { "fnstart", s_arm_unwind_fnstart
, 0 },
4678 { "fnend", s_arm_unwind_fnend
, 0 },
4679 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4680 { "personality", s_arm_unwind_personality
, 0 },
4681 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4682 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4683 { "save", s_arm_unwind_save
, 0 },
4684 { "vsave", s_arm_unwind_save
, 1 },
4685 { "movsp", s_arm_unwind_movsp
, 0 },
4686 { "pad", s_arm_unwind_pad
, 0 },
4687 { "setfp", s_arm_unwind_setfp
, 0 },
4688 { "unwind_raw", s_arm_unwind_raw
, 0 },
4689 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4690 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4694 /* These are used for dwarf. */
4698 /* These are used for dwarf2. */
4699 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
4700 { "loc", dwarf2_directive_loc
, 0 },
4701 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4703 { "extend", float_cons
, 'x' },
4704 { "ldouble", float_cons
, 'x' },
4705 { "packed", float_cons
, 'p' },
4707 {"secrel32", pe_directive_secrel
, 0},
4710 /* These are for compatibility with CodeComposer Studio. */
4711 {"ref", s_ccs_ref
, 0},
4712 {"def", s_ccs_def
, 0},
4713 {"asmfunc", s_ccs_asmfunc
, 0},
4714 {"endasmfunc", s_ccs_endasmfunc
, 0},
4719 /* Parser functions used exclusively in instruction operands. */
4721 /* Generic immediate-value read function for use in insn parsing.
4722 STR points to the beginning of the immediate (the leading #);
4723 VAL receives the value; if the value is outside [MIN, MAX]
4724 issue an error. PREFIX_OPT is true if the immediate prefix is
4728 parse_immediate (char **str
, int *val
, int min
, int max
,
4729 bfd_boolean prefix_opt
)
4732 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4733 if (exp
.X_op
!= O_constant
)
4735 inst
.error
= _("constant expression required");
4739 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4741 inst
.error
= _("immediate value out of range");
4745 *val
= exp
.X_add_number
;
4749 /* Less-generic immediate-value read function with the possibility of loading a
4750 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4751 instructions. Puts the result directly in inst.operands[i]. */
4754 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
4755 bfd_boolean allow_symbol_p
)
4758 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
4761 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
4763 if (exp_p
->X_op
== O_constant
)
4765 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
4766 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4767 O_constant. We have to be careful not to break compilation for
4768 32-bit X_add_number, though. */
4769 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
4771 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4772 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
4774 inst
.operands
[i
].regisimm
= 1;
4777 else if (exp_p
->X_op
== O_big
4778 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
4780 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4782 /* Bignums have their least significant bits in
4783 generic_bignum[0]. Make sure we put 32 bits in imm and
4784 32 bits in reg, in a (hopefully) portable way. */
4785 gas_assert (parts
!= 0);
4787 /* Make sure that the number is not too big.
4788 PR 11972: Bignums can now be sign-extended to the
4789 size of a .octa so check that the out of range bits
4790 are all zero or all one. */
4791 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
4793 LITTLENUM_TYPE m
= -1;
4795 if (generic_bignum
[parts
* 2] != 0
4796 && generic_bignum
[parts
* 2] != m
)
4799 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
4800 if (generic_bignum
[j
] != generic_bignum
[j
-1])
4804 inst
.operands
[i
].imm
= 0;
4805 for (j
= 0; j
< parts
; j
++, idx
++)
4806 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4807 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4808 inst
.operands
[i
].reg
= 0;
4809 for (j
= 0; j
< parts
; j
++, idx
++)
4810 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4811 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4812 inst
.operands
[i
].regisimm
= 1;
4814 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
4822 /* Returns the pseudo-register number of an FPA immediate constant,
4823 or FAIL if there isn't a valid constant here. */
4826 parse_fpa_immediate (char ** str
)
4828 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4834 /* First try and match exact strings, this is to guarantee
4835 that some formats will work even for cross assembly. */
4837 for (i
= 0; fp_const
[i
]; i
++)
4839 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4843 *str
+= strlen (fp_const
[i
]);
4844 if (is_end_of_line
[(unsigned char) **str
])
4850 /* Just because we didn't get a match doesn't mean that the constant
4851 isn't valid, just that it is in a format that we don't
4852 automatically recognize. Try parsing it with the standard
4853 expression routines. */
4855 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4857 /* Look for a raw floating point number. */
4858 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4859 && is_end_of_line
[(unsigned char) *save_in
])
4861 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4863 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4865 if (words
[j
] != fp_values
[i
][j
])
4869 if (j
== MAX_LITTLENUMS
)
4877 /* Try and parse a more complex expression, this will probably fail
4878 unless the code uses a floating point prefix (eg "0f"). */
4879 save_in
= input_line_pointer
;
4880 input_line_pointer
= *str
;
4881 if (expression (&exp
) == absolute_section
4882 && exp
.X_op
== O_big
4883 && exp
.X_add_number
< 0)
4885 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4887 #define X_PRECISION 5
4888 #define E_PRECISION 15L
4889 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
4891 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4893 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4895 if (words
[j
] != fp_values
[i
][j
])
4899 if (j
== MAX_LITTLENUMS
)
4901 *str
= input_line_pointer
;
4902 input_line_pointer
= save_in
;
4909 *str
= input_line_pointer
;
4910 input_line_pointer
= save_in
;
4911 inst
.error
= _("invalid FPA immediate expression");
4915 /* Returns 1 if a number has "quarter-precision" float format
4916 0baBbbbbbc defgh000 00000000 00000000. */
4919 is_quarter_float (unsigned imm
)
4921 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4922 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4926 /* Detect the presence of a floating point or integer zero constant,
4930 parse_ifimm_zero (char **in
)
4934 if (!is_immediate_prefix (**in
))
4939 /* Accept #0x0 as a synonym for #0. */
4940 if (strncmp (*in
, "0x", 2) == 0)
4943 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
4948 error_code
= atof_generic (in
, ".", EXP_CHARS
,
4949 &generic_floating_point_number
);
4952 && generic_floating_point_number
.sign
== '+'
4953 && (generic_floating_point_number
.low
4954 > generic_floating_point_number
.leader
))
4960 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4961 0baBbbbbbc defgh000 00000000 00000000.
4962 The zero and minus-zero cases need special handling, since they can't be
4963 encoded in the "quarter-precision" float format, but can nonetheless be
4964 loaded as integer constants. */
4967 parse_qfloat_immediate (char **ccp
, int *immed
)
4971 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4972 int found_fpchar
= 0;
4974 skip_past_char (&str
, '#');
4976 /* We must not accidentally parse an integer as a floating-point number. Make
4977 sure that the value we parse is not an integer by checking for special
4978 characters '.' or 'e'.
4979 FIXME: This is a horrible hack, but doing better is tricky because type
4980 information isn't in a very usable state at parse time. */
4982 skip_whitespace (fpnum
);
4984 if (strncmp (fpnum
, "0x", 2) == 0)
4988 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
4989 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
4999 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5001 unsigned fpword
= 0;
5004 /* Our FP word must be 32 bits (single-precision FP). */
5005 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5007 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5011 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5024 /* Shift operands. */
5027 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5030 struct asm_shift_name
5033 enum shift_kind kind
;
5036 /* Third argument to parse_shift. */
5037 enum parse_shift_mode
5039 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5040 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5041 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5042 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5043 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5046 /* Parse a <shift> specifier on an ARM data processing instruction.
5047 This has three forms:
5049 (LSL|LSR|ASL|ASR|ROR) Rs
5050 (LSL|LSR|ASL|ASR|ROR) #imm
5053 Note that ASL is assimilated to LSL in the instruction encoding, and
5054 RRX to ROR #0 (which cannot be written as such). */
5057 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5059 const struct asm_shift_name
*shift_name
;
5060 enum shift_kind shift
;
5065 for (p
= *str
; ISALPHA (*p
); p
++)
5070 inst
.error
= _("shift expression expected");
5074 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5077 if (shift_name
== NULL
)
5079 inst
.error
= _("shift expression expected");
5083 shift
= shift_name
->kind
;
5087 case NO_SHIFT_RESTRICT
:
5088 case SHIFT_IMMEDIATE
: break;
5090 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5091 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5093 inst
.error
= _("'LSL' or 'ASR' required");
5098 case SHIFT_LSL_IMMEDIATE
:
5099 if (shift
!= SHIFT_LSL
)
5101 inst
.error
= _("'LSL' required");
5106 case SHIFT_ASR_IMMEDIATE
:
5107 if (shift
!= SHIFT_ASR
)
5109 inst
.error
= _("'ASR' required");
5117 if (shift
!= SHIFT_RRX
)
5119 /* Whitespace can appear here if the next thing is a bare digit. */
5120 skip_whitespace (p
);
5122 if (mode
== NO_SHIFT_RESTRICT
5123 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5125 inst
.operands
[i
].imm
= reg
;
5126 inst
.operands
[i
].immisreg
= 1;
5128 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5131 inst
.operands
[i
].shift_kind
= shift
;
5132 inst
.operands
[i
].shifted
= 1;
5137 /* Parse a <shifter_operand> for an ARM data processing instruction:
5140 #<immediate>, <rotate>
5144 where <shift> is defined by parse_shift above, and <rotate> is a
5145 multiple of 2 between 0 and 30. Validation of immediate operands
5146 is deferred to md_apply_fix. */
5149 parse_shifter_operand (char **str
, int i
)
5154 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5156 inst
.operands
[i
].reg
= value
;
5157 inst
.operands
[i
].isreg
= 1;
5159 /* parse_shift will override this if appropriate */
5160 inst
.reloc
.exp
.X_op
= O_constant
;
5161 inst
.reloc
.exp
.X_add_number
= 0;
5163 if (skip_past_comma (str
) == FAIL
)
5166 /* Shift operation on register. */
5167 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5170 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
5173 if (skip_past_comma (str
) == SUCCESS
)
5175 /* #x, y -- ie explicit rotation by Y. */
5176 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5179 if (exp
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
5181 inst
.error
= _("constant expression expected");
5185 value
= exp
.X_add_number
;
5186 if (value
< 0 || value
> 30 || value
% 2 != 0)
5188 inst
.error
= _("invalid rotation");
5191 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
5193 inst
.error
= _("invalid constant");
5197 /* Encode as specified. */
5198 inst
.operands
[i
].imm
= inst
.reloc
.exp
.X_add_number
| value
<< 7;
5202 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
5203 inst
.reloc
.pc_rel
= 0;
5207 /* Group relocation information. Each entry in the table contains the
5208 textual name of the relocation as may appear in assembler source
5209 and must end with a colon.
5210 Along with this textual name are the relocation codes to be used if
5211 the corresponding instruction is an ALU instruction (ADD or SUB only),
5212 an LDR, an LDRS, or an LDC. */
5214 struct group_reloc_table_entry
5225 /* Varieties of non-ALU group relocation. */
5232 static struct group_reloc_table_entry group_reloc_table
[] =
5233 { /* Program counter relative: */
5235 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5240 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5241 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5242 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5243 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5245 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5250 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5251 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5252 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5253 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5255 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5256 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5257 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5258 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5259 /* Section base relative */
5261 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5266 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5267 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5268 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5269 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5271 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5276 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5277 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5278 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5279 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5281 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5282 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5283 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5284 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5285 /* Absolute thumb alu relocations. */
5287 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5292 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5297 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5302 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5307 /* Given the address of a pointer pointing to the textual name of a group
5308 relocation as may appear in assembler source, attempt to find its details
5309 in group_reloc_table. The pointer will be updated to the character after
5310 the trailing colon. On failure, FAIL will be returned; SUCCESS
5311 otherwise. On success, *entry will be updated to point at the relevant
5312 group_reloc_table entry. */
5315 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5318 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5320 int length
= strlen (group_reloc_table
[i
].name
);
5322 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5323 && (*str
)[length
] == ':')
5325 *out
= &group_reloc_table
[i
];
5326 *str
+= (length
+ 1);
5334 /* Parse a <shifter_operand> for an ARM data processing instruction
5335 (as for parse_shifter_operand) where group relocations are allowed:
5338 #<immediate>, <rotate>
5339 #:<group_reloc>:<expression>
5343 where <group_reloc> is one of the strings defined in group_reloc_table.
5344 The hashes are optional.
5346 Everything else is as for parse_shifter_operand. */
5348 static parse_operand_result
5349 parse_shifter_operand_group_reloc (char **str
, int i
)
5351 /* Determine if we have the sequence of characters #: or just :
5352 coming next. If we do, then we check for a group relocation.
5353 If we don't, punt the whole lot to parse_shifter_operand. */
5355 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5356 || (*str
)[0] == ':')
5358 struct group_reloc_table_entry
*entry
;
5360 if ((*str
)[0] == '#')
5365 /* Try to parse a group relocation. Anything else is an error. */
5366 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5368 inst
.error
= _("unknown group relocation");
5369 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5372 /* We now have the group relocation table entry corresponding to
5373 the name in the assembler source. Next, we parse the expression. */
5374 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
5375 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5377 /* Record the relocation type (always the ALU variant here). */
5378 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5379 gas_assert (inst
.reloc
.type
!= 0);
5381 return PARSE_OPERAND_SUCCESS
;
5384 return parse_shifter_operand (str
, i
) == SUCCESS
5385 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5387 /* Never reached. */
5390 /* Parse a Neon alignment expression. Information is written to
5391 inst.operands[i]. We assume the initial ':' has been skipped.
5393 align .imm = align << 8, .immisalign=1, .preind=0 */
5394 static parse_operand_result
5395 parse_neon_alignment (char **str
, int i
)
5400 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5402 if (exp
.X_op
!= O_constant
)
5404 inst
.error
= _("alignment must be constant");
5405 return PARSE_OPERAND_FAIL
;
5408 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5409 inst
.operands
[i
].immisalign
= 1;
5410 /* Alignments are not pre-indexes. */
5411 inst
.operands
[i
].preind
= 0;
5414 return PARSE_OPERAND_SUCCESS
;
5417 /* Parse all forms of an ARM address expression. Information is written
5418 to inst.operands[i] and/or inst.reloc.
5420 Preindexed addressing (.preind=1):
5422 [Rn, #offset] .reg=Rn .reloc.exp=offset
5423 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5424 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5425 .shift_kind=shift .reloc.exp=shift_imm
5427 These three may have a trailing ! which causes .writeback to be set also.
5429 Postindexed addressing (.postind=1, .writeback=1):
5431 [Rn], #offset .reg=Rn .reloc.exp=offset
5432 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5433 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5434 .shift_kind=shift .reloc.exp=shift_imm
5436 Unindexed addressing (.preind=0, .postind=0):
5438 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5442 [Rn]{!} shorthand for [Rn,#0]{!}
5443 =immediate .isreg=0 .reloc.exp=immediate
5444 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5446 It is the caller's responsibility to check for addressing modes not
5447 supported by the instruction, and to set inst.reloc.type. */
5449 static parse_operand_result
5450 parse_address_main (char **str
, int i
, int group_relocations
,
5451 group_reloc_type group_type
)
5456 if (skip_past_char (&p
, '[') == FAIL
)
5458 if (skip_past_char (&p
, '=') == FAIL
)
5460 /* Bare address - translate to PC-relative offset. */
5461 inst
.reloc
.pc_rel
= 1;
5462 inst
.operands
[i
].reg
= REG_PC
;
5463 inst
.operands
[i
].isreg
= 1;
5464 inst
.operands
[i
].preind
= 1;
5466 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_OPT_PREFIX_BIG
))
5467 return PARSE_OPERAND_FAIL
;
5469 else if (parse_big_immediate (&p
, i
, &inst
.reloc
.exp
,
5470 /*allow_symbol_p=*/TRUE
))
5471 return PARSE_OPERAND_FAIL
;
5474 return PARSE_OPERAND_SUCCESS
;
5477 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5478 skip_whitespace (p
);
5480 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5482 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5483 return PARSE_OPERAND_FAIL
;
5485 inst
.operands
[i
].reg
= reg
;
5486 inst
.operands
[i
].isreg
= 1;
5488 if (skip_past_comma (&p
) == SUCCESS
)
5490 inst
.operands
[i
].preind
= 1;
5493 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5495 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5497 inst
.operands
[i
].imm
= reg
;
5498 inst
.operands
[i
].immisreg
= 1;
5500 if (skip_past_comma (&p
) == SUCCESS
)
5501 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5502 return PARSE_OPERAND_FAIL
;
5504 else if (skip_past_char (&p
, ':') == SUCCESS
)
5506 /* FIXME: '@' should be used here, but it's filtered out by generic
5507 code before we get to see it here. This may be subject to
5509 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5511 if (result
!= PARSE_OPERAND_SUCCESS
)
5516 if (inst
.operands
[i
].negative
)
5518 inst
.operands
[i
].negative
= 0;
5522 if (group_relocations
5523 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5525 struct group_reloc_table_entry
*entry
;
5527 /* Skip over the #: or : sequence. */
5533 /* Try to parse a group relocation. Anything else is an
5535 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5537 inst
.error
= _("unknown group relocation");
5538 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5541 /* We now have the group relocation table entry corresponding to
5542 the name in the assembler source. Next, we parse the
5544 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5545 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5547 /* Record the relocation type. */
5551 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldr_code
;
5555 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5559 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldc_code
;
5566 if (inst
.reloc
.type
== 0)
5568 inst
.error
= _("this group relocation is not allowed on this instruction");
5569 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5575 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5576 return PARSE_OPERAND_FAIL
;
5577 /* If the offset is 0, find out if it's a +0 or -0. */
5578 if (inst
.reloc
.exp
.X_op
== O_constant
5579 && inst
.reloc
.exp
.X_add_number
== 0)
5581 skip_whitespace (q
);
5585 skip_whitespace (q
);
5588 inst
.operands
[i
].negative
= 1;
5593 else if (skip_past_char (&p
, ':') == SUCCESS
)
5595 /* FIXME: '@' should be used here, but it's filtered out by generic code
5596 before we get to see it here. This may be subject to change. */
5597 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5599 if (result
!= PARSE_OPERAND_SUCCESS
)
5603 if (skip_past_char (&p
, ']') == FAIL
)
5605 inst
.error
= _("']' expected");
5606 return PARSE_OPERAND_FAIL
;
5609 if (skip_past_char (&p
, '!') == SUCCESS
)
5610 inst
.operands
[i
].writeback
= 1;
5612 else if (skip_past_comma (&p
) == SUCCESS
)
5614 if (skip_past_char (&p
, '{') == SUCCESS
)
5616 /* [Rn], {expr} - unindexed, with option */
5617 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5618 0, 255, TRUE
) == FAIL
)
5619 return PARSE_OPERAND_FAIL
;
5621 if (skip_past_char (&p
, '}') == FAIL
)
5623 inst
.error
= _("'}' expected at end of 'option' field");
5624 return PARSE_OPERAND_FAIL
;
5626 if (inst
.operands
[i
].preind
)
5628 inst
.error
= _("cannot combine index with option");
5629 return PARSE_OPERAND_FAIL
;
5632 return PARSE_OPERAND_SUCCESS
;
5636 inst
.operands
[i
].postind
= 1;
5637 inst
.operands
[i
].writeback
= 1;
5639 if (inst
.operands
[i
].preind
)
5641 inst
.error
= _("cannot combine pre- and post-indexing");
5642 return PARSE_OPERAND_FAIL
;
5646 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5648 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5650 /* We might be using the immediate for alignment already. If we
5651 are, OR the register number into the low-order bits. */
5652 if (inst
.operands
[i
].immisalign
)
5653 inst
.operands
[i
].imm
|= reg
;
5655 inst
.operands
[i
].imm
= reg
;
5656 inst
.operands
[i
].immisreg
= 1;
5658 if (skip_past_comma (&p
) == SUCCESS
)
5659 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5660 return PARSE_OPERAND_FAIL
;
5665 if (inst
.operands
[i
].negative
)
5667 inst
.operands
[i
].negative
= 0;
5670 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5671 return PARSE_OPERAND_FAIL
;
5672 /* If the offset is 0, find out if it's a +0 or -0. */
5673 if (inst
.reloc
.exp
.X_op
== O_constant
5674 && inst
.reloc
.exp
.X_add_number
== 0)
5676 skip_whitespace (q
);
5680 skip_whitespace (q
);
5683 inst
.operands
[i
].negative
= 1;
5689 /* If at this point neither .preind nor .postind is set, we have a
5690 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5691 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5693 inst
.operands
[i
].preind
= 1;
5694 inst
.reloc
.exp
.X_op
= O_constant
;
5695 inst
.reloc
.exp
.X_add_number
= 0;
5698 return PARSE_OPERAND_SUCCESS
;
5702 parse_address (char **str
, int i
)
5704 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5708 static parse_operand_result
5709 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5711 return parse_address_main (str
, i
, 1, type
);
5714 /* Parse an operand for a MOVW or MOVT instruction. */
5716 parse_half (char **str
)
5721 skip_past_char (&p
, '#');
5722 if (strncasecmp (p
, ":lower16:", 9) == 0)
5723 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
5724 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5725 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
5727 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
5730 skip_whitespace (p
);
5733 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5736 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5738 if (inst
.reloc
.exp
.X_op
!= O_constant
)
5740 inst
.error
= _("constant expression expected");
5743 if (inst
.reloc
.exp
.X_add_number
< 0
5744 || inst
.reloc
.exp
.X_add_number
> 0xffff)
5746 inst
.error
= _("immediate value out of range");
5754 /* Miscellaneous. */
5756 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5757 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5759 parse_psr (char **str
, bfd_boolean lhs
)
5762 unsigned long psr_field
;
5763 const struct asm_psr
*psr
;
5765 bfd_boolean is_apsr
= FALSE
;
5766 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
5768 /* PR gas/12698: If the user has specified -march=all then m_profile will
5769 be TRUE, but we want to ignore it in this case as we are building for any
5770 CPU type, including non-m variants. */
5771 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
5774 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5775 feature for ease of use and backwards compatibility. */
5777 if (strncasecmp (p
, "SPSR", 4) == 0)
5780 goto unsupported_psr
;
5782 psr_field
= SPSR_BIT
;
5784 else if (strncasecmp (p
, "CPSR", 4) == 0)
5787 goto unsupported_psr
;
5791 else if (strncasecmp (p
, "APSR", 4) == 0)
5793 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5794 and ARMv7-R architecture CPUs. */
5803 while (ISALNUM (*p
) || *p
== '_');
5805 if (strncasecmp (start
, "iapsr", 5) == 0
5806 || strncasecmp (start
, "eapsr", 5) == 0
5807 || strncasecmp (start
, "xpsr", 4) == 0
5808 || strncasecmp (start
, "psr", 3) == 0)
5809 p
= start
+ strcspn (start
, "rR") + 1;
5811 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5817 /* If APSR is being written, a bitfield may be specified. Note that
5818 APSR itself is handled above. */
5819 if (psr
->field
<= 3)
5821 psr_field
= psr
->field
;
5827 /* M-profile MSR instructions have the mask field set to "10", except
5828 *PSR variants which modify APSR, which may use a different mask (and
5829 have been handled already). Do that by setting the PSR_f field
5831 return psr
->field
| (lhs
? PSR_f
: 0);
5834 goto unsupported_psr
;
5840 /* A suffix follows. */
5846 while (ISALNUM (*p
) || *p
== '_');
5850 /* APSR uses a notation for bits, rather than fields. */
5851 unsigned int nzcvq_bits
= 0;
5852 unsigned int g_bit
= 0;
5855 for (bit
= start
; bit
!= p
; bit
++)
5857 switch (TOLOWER (*bit
))
5860 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
5864 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
5868 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
5872 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
5876 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
5880 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
5884 inst
.error
= _("unexpected bit specified after APSR");
5889 if (nzcvq_bits
== 0x1f)
5894 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
5896 inst
.error
= _("selected processor does not "
5897 "support DSP extension");
5904 if ((nzcvq_bits
& 0x20) != 0
5905 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
5906 || (g_bit
& 0x2) != 0)
5908 inst
.error
= _("bad bitmask specified after APSR");
5914 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
5919 psr_field
|= psr
->field
;
5925 goto error
; /* Garbage after "[CS]PSR". */
5927 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5928 is deprecated, but allow it anyway. */
5932 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5935 else if (!m_profile
)
5936 /* These bits are never right for M-profile devices: don't set them
5937 (only code paths which read/write APSR reach here). */
5938 psr_field
|= (PSR_c
| PSR_f
);
5944 inst
.error
= _("selected processor does not support requested special "
5945 "purpose register");
5949 inst
.error
= _("flag for {c}psr instruction expected");
5953 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5954 value suitable for splatting into the AIF field of the instruction. */
5957 parse_cps_flags (char **str
)
5966 case '\0': case ',':
5969 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
5970 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
5971 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
5974 inst
.error
= _("unrecognized CPS flag");
5979 if (saw_a_flag
== 0)
5981 inst
.error
= _("missing CPS flags");
5989 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5990 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5993 parse_endian_specifier (char **str
)
5998 if (strncasecmp (s
, "BE", 2))
6000 else if (strncasecmp (s
, "LE", 2))
6004 inst
.error
= _("valid endian specifiers are be or le");
6008 if (ISALNUM (s
[2]) || s
[2] == '_')
6010 inst
.error
= _("valid endian specifiers are be or le");
6015 return little_endian
;
6018 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6019 value suitable for poking into the rotate field of an sxt or sxta
6020 instruction, or FAIL on error. */
6023 parse_ror (char **str
)
6028 if (strncasecmp (s
, "ROR", 3) == 0)
6032 inst
.error
= _("missing rotation field after comma");
6036 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6041 case 0: *str
= s
; return 0x0;
6042 case 8: *str
= s
; return 0x1;
6043 case 16: *str
= s
; return 0x2;
6044 case 24: *str
= s
; return 0x3;
6047 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6052 /* Parse a conditional code (from conds[] below). The value returned is in the
6053 range 0 .. 14, or FAIL. */
6055 parse_cond (char **str
)
6058 const struct asm_cond
*c
;
6060 /* Condition codes are always 2 characters, so matching up to
6061 3 characters is sufficient. */
6066 while (ISALPHA (*q
) && n
< 3)
6068 cond
[n
] = TOLOWER (*q
);
6073 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6076 inst
.error
= _("condition required");
6084 /* If the given feature available in the selected CPU, mark it as used.
6085 Returns TRUE iff feature is available. */
6087 mark_feature_used (const arm_feature_set
*feature
)
6089 /* Ensure the option is valid on the current architecture. */
6090 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
6093 /* Add the appropriate architecture feature for the barrier option used.
6096 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
6098 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
6103 /* Parse an option for a barrier instruction. Returns the encoding for the
6106 parse_barrier (char **str
)
6109 const struct asm_barrier_opt
*o
;
6112 while (ISALPHA (*q
))
6115 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6120 if (!mark_feature_used (&o
->arch
))
6127 /* Parse the operands of a table branch instruction. Similar to a memory
6130 parse_tb (char **str
)
6135 if (skip_past_char (&p
, '[') == FAIL
)
6137 inst
.error
= _("'[' expected");
6141 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6143 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6146 inst
.operands
[0].reg
= reg
;
6148 if (skip_past_comma (&p
) == FAIL
)
6150 inst
.error
= _("',' expected");
6154 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6156 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6159 inst
.operands
[0].imm
= reg
;
6161 if (skip_past_comma (&p
) == SUCCESS
)
6163 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6165 if (inst
.reloc
.exp
.X_add_number
!= 1)
6167 inst
.error
= _("invalid shift");
6170 inst
.operands
[0].shifted
= 1;
6173 if (skip_past_char (&p
, ']') == FAIL
)
6175 inst
.error
= _("']' expected");
6182 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6183 information on the types the operands can take and how they are encoded.
6184 Up to four operands may be read; this function handles setting the
6185 ".present" field for each read operand itself.
6186 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6187 else returns FAIL. */
6190 parse_neon_mov (char **str
, int *which_operand
)
6192 int i
= *which_operand
, val
;
6193 enum arm_reg_type rtype
;
6195 struct neon_type_el optype
;
6197 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6199 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6200 inst
.operands
[i
].reg
= val
;
6201 inst
.operands
[i
].isscalar
= 1;
6202 inst
.operands
[i
].vectype
= optype
;
6203 inst
.operands
[i
++].present
= 1;
6205 if (skip_past_comma (&ptr
) == FAIL
)
6208 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6211 inst
.operands
[i
].reg
= val
;
6212 inst
.operands
[i
].isreg
= 1;
6213 inst
.operands
[i
].present
= 1;
6215 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6218 /* Cases 0, 1, 2, 3, 5 (D only). */
6219 if (skip_past_comma (&ptr
) == FAIL
)
6222 inst
.operands
[i
].reg
= val
;
6223 inst
.operands
[i
].isreg
= 1;
6224 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6225 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6226 inst
.operands
[i
].isvec
= 1;
6227 inst
.operands
[i
].vectype
= optype
;
6228 inst
.operands
[i
++].present
= 1;
6230 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6232 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6233 Case 13: VMOV <Sd>, <Rm> */
6234 inst
.operands
[i
].reg
= val
;
6235 inst
.operands
[i
].isreg
= 1;
6236 inst
.operands
[i
].present
= 1;
6238 if (rtype
== REG_TYPE_NQ
)
6240 first_error (_("can't use Neon quad register here"));
6243 else if (rtype
!= REG_TYPE_VFS
)
6246 if (skip_past_comma (&ptr
) == FAIL
)
6248 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6250 inst
.operands
[i
].reg
= val
;
6251 inst
.operands
[i
].isreg
= 1;
6252 inst
.operands
[i
].present
= 1;
6255 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6258 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6259 Case 1: VMOV<c><q> <Dd>, <Dm>
6260 Case 8: VMOV.F32 <Sd>, <Sm>
6261 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6263 inst
.operands
[i
].reg
= val
;
6264 inst
.operands
[i
].isreg
= 1;
6265 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6266 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6267 inst
.operands
[i
].isvec
= 1;
6268 inst
.operands
[i
].vectype
= optype
;
6269 inst
.operands
[i
].present
= 1;
6271 if (skip_past_comma (&ptr
) == SUCCESS
)
6276 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6279 inst
.operands
[i
].reg
= val
;
6280 inst
.operands
[i
].isreg
= 1;
6281 inst
.operands
[i
++].present
= 1;
6283 if (skip_past_comma (&ptr
) == FAIL
)
6286 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6289 inst
.operands
[i
].reg
= val
;
6290 inst
.operands
[i
].isreg
= 1;
6291 inst
.operands
[i
].present
= 1;
6294 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6295 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6296 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6297 Case 10: VMOV.F32 <Sd>, #<imm>
6298 Case 11: VMOV.F64 <Dd>, #<imm> */
6299 inst
.operands
[i
].immisfloat
= 1;
6300 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6302 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6303 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6307 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6311 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6314 inst
.operands
[i
].reg
= val
;
6315 inst
.operands
[i
].isreg
= 1;
6316 inst
.operands
[i
++].present
= 1;
6318 if (skip_past_comma (&ptr
) == FAIL
)
6321 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6323 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6324 inst
.operands
[i
].reg
= val
;
6325 inst
.operands
[i
].isscalar
= 1;
6326 inst
.operands
[i
].present
= 1;
6327 inst
.operands
[i
].vectype
= optype
;
6329 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6331 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6332 inst
.operands
[i
].reg
= val
;
6333 inst
.operands
[i
].isreg
= 1;
6334 inst
.operands
[i
++].present
= 1;
6336 if (skip_past_comma (&ptr
) == FAIL
)
6339 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6342 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6346 inst
.operands
[i
].reg
= val
;
6347 inst
.operands
[i
].isreg
= 1;
6348 inst
.operands
[i
].isvec
= 1;
6349 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6350 inst
.operands
[i
].vectype
= optype
;
6351 inst
.operands
[i
].present
= 1;
6353 if (rtype
== REG_TYPE_VFS
)
6357 if (skip_past_comma (&ptr
) == FAIL
)
6359 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6362 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6365 inst
.operands
[i
].reg
= val
;
6366 inst
.operands
[i
].isreg
= 1;
6367 inst
.operands
[i
].isvec
= 1;
6368 inst
.operands
[i
].issingle
= 1;
6369 inst
.operands
[i
].vectype
= optype
;
6370 inst
.operands
[i
].present
= 1;
6373 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6377 inst
.operands
[i
].reg
= val
;
6378 inst
.operands
[i
].isreg
= 1;
6379 inst
.operands
[i
].isvec
= 1;
6380 inst
.operands
[i
].issingle
= 1;
6381 inst
.operands
[i
].vectype
= optype
;
6382 inst
.operands
[i
].present
= 1;
6387 first_error (_("parse error"));
6391 /* Successfully parsed the operands. Update args. */
6397 first_error (_("expected comma"));
6401 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6405 /* Use this macro when the operand constraints are different
6406 for ARM and THUMB (e.g. ldrd). */
6407 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6408 ((arm_operand) | ((thumb_operand) << 16))
6410 /* Matcher codes for parse_operands. */
6411 enum operand_parse_code
6413 OP_stop
, /* end of line */
6415 OP_RR
, /* ARM register */
6416 OP_RRnpc
, /* ARM register, not r15 */
6417 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6418 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6419 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6420 optional trailing ! */
6421 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6422 OP_RCP
, /* Coprocessor number */
6423 OP_RCN
, /* Coprocessor register */
6424 OP_RF
, /* FPA register */
6425 OP_RVS
, /* VFP single precision register */
6426 OP_RVD
, /* VFP double precision register (0..15) */
6427 OP_RND
, /* Neon double precision register (0..31) */
6428 OP_RNQ
, /* Neon quad precision register */
6429 OP_RVSD
, /* VFP single or double precision register */
6430 OP_RNDQ
, /* Neon double or quad precision register */
6431 OP_RNSDQ
, /* Neon single, double or quad precision register */
6432 OP_RNSC
, /* Neon scalar D[X] */
6433 OP_RVC
, /* VFP control register */
6434 OP_RMF
, /* Maverick F register */
6435 OP_RMD
, /* Maverick D register */
6436 OP_RMFX
, /* Maverick FX register */
6437 OP_RMDX
, /* Maverick DX register */
6438 OP_RMAX
, /* Maverick AX register */
6439 OP_RMDS
, /* Maverick DSPSC register */
6440 OP_RIWR
, /* iWMMXt wR register */
6441 OP_RIWC
, /* iWMMXt wC register */
6442 OP_RIWG
, /* iWMMXt wCG register */
6443 OP_RXA
, /* XScale accumulator register */
6445 OP_REGLST
, /* ARM register list */
6446 OP_VRSLST
, /* VFP single-precision register list */
6447 OP_VRDLST
, /* VFP double-precision register list */
6448 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6449 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6450 OP_NSTRLST
, /* Neon element/structure list */
6452 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6453 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6454 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6455 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6456 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6457 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6458 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6459 OP_VMOV
, /* Neon VMOV operands. */
6460 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6461 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6462 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6464 OP_I0
, /* immediate zero */
6465 OP_I7
, /* immediate value 0 .. 7 */
6466 OP_I15
, /* 0 .. 15 */
6467 OP_I16
, /* 1 .. 16 */
6468 OP_I16z
, /* 0 .. 16 */
6469 OP_I31
, /* 0 .. 31 */
6470 OP_I31w
, /* 0 .. 31, optional trailing ! */
6471 OP_I32
, /* 1 .. 32 */
6472 OP_I32z
, /* 0 .. 32 */
6473 OP_I63
, /* 0 .. 63 */
6474 OP_I63s
, /* -64 .. 63 */
6475 OP_I64
, /* 1 .. 64 */
6476 OP_I64z
, /* 0 .. 64 */
6477 OP_I255
, /* 0 .. 255 */
6479 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6480 OP_I7b
, /* 0 .. 7 */
6481 OP_I15b
, /* 0 .. 15 */
6482 OP_I31b
, /* 0 .. 31 */
6484 OP_SH
, /* shifter operand */
6485 OP_SHG
, /* shifter operand with possible group relocation */
6486 OP_ADDR
, /* Memory address expression (any mode) */
6487 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6488 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6489 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6490 OP_EXP
, /* arbitrary expression */
6491 OP_EXPi
, /* same, with optional immediate prefix */
6492 OP_EXPr
, /* same, with optional relocation suffix */
6493 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6495 OP_CPSF
, /* CPS flags */
6496 OP_ENDI
, /* Endianness specifier */
6497 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6498 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6499 OP_COND
, /* conditional code */
6500 OP_TB
, /* Table branch. */
6502 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6504 OP_RRnpc_I0
, /* ARM register or literal 0 */
6505 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
6506 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6507 OP_RF_IF
, /* FPA register or immediate */
6508 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6509 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6511 /* Optional operands. */
6512 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6513 OP_oI31b
, /* 0 .. 31 */
6514 OP_oI32b
, /* 1 .. 32 */
6515 OP_oI32z
, /* 0 .. 32 */
6516 OP_oIffffb
, /* 0 .. 65535 */
6517 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6519 OP_oRR
, /* ARM register */
6520 OP_oRRnpc
, /* ARM register, not the PC */
6521 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6522 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6523 OP_oRND
, /* Optional Neon double precision register */
6524 OP_oRNQ
, /* Optional Neon quad precision register */
6525 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6526 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6527 OP_oSHll
, /* LSL immediate */
6528 OP_oSHar
, /* ASR immediate */
6529 OP_oSHllar
, /* LSL or ASR immediate */
6530 OP_oROR
, /* ROR 0/8/16/24 */
6531 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6533 /* Some pre-defined mixed (ARM/THUMB) operands. */
6534 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6535 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6536 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6538 OP_FIRST_OPTIONAL
= OP_oI7b
6541 /* Generic instruction operand parser. This does no encoding and no
6542 semantic validation; it merely squirrels values away in the inst
6543 structure. Returns SUCCESS or FAIL depending on whether the
6544 specified grammar matched. */
6546 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6548 unsigned const int *upat
= pattern
;
6549 char *backtrack_pos
= 0;
6550 const char *backtrack_error
= 0;
6551 int i
, val
= 0, backtrack_index
= 0;
6552 enum arm_reg_type rtype
;
6553 parse_operand_result result
;
6554 unsigned int op_parse_code
;
6556 #define po_char_or_fail(chr) \
6559 if (skip_past_char (&str, chr) == FAIL) \
6564 #define po_reg_or_fail(regtype) \
6567 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6568 & inst.operands[i].vectype); \
6571 first_error (_(reg_expected_msgs[regtype])); \
6574 inst.operands[i].reg = val; \
6575 inst.operands[i].isreg = 1; \
6576 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6577 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6578 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6579 || rtype == REG_TYPE_VFD \
6580 || rtype == REG_TYPE_NQ); \
6584 #define po_reg_or_goto(regtype, label) \
6587 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6588 & inst.operands[i].vectype); \
6592 inst.operands[i].reg = val; \
6593 inst.operands[i].isreg = 1; \
6594 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6595 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6596 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6597 || rtype == REG_TYPE_VFD \
6598 || rtype == REG_TYPE_NQ); \
6602 #define po_imm_or_fail(min, max, popt) \
6605 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6607 inst.operands[i].imm = val; \
6611 #define po_scalar_or_goto(elsz, label) \
6614 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6617 inst.operands[i].reg = val; \
6618 inst.operands[i].isscalar = 1; \
6622 #define po_misc_or_fail(expr) \
6630 #define po_misc_or_fail_no_backtrack(expr) \
6634 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6635 backtrack_pos = 0; \
6636 if (result != PARSE_OPERAND_SUCCESS) \
6641 #define po_barrier_or_imm(str) \
6644 val = parse_barrier (&str); \
6645 if (val == FAIL && ! ISALPHA (*str)) \
6648 /* ISB can only take SY as an option. */ \
6649 || ((inst.instruction & 0xf0) == 0x60 \
6652 inst.error = _("invalid barrier type"); \
6653 backtrack_pos = 0; \
6659 skip_whitespace (str
);
6661 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6663 op_parse_code
= upat
[i
];
6664 if (op_parse_code
>= 1<<16)
6665 op_parse_code
= thumb
? (op_parse_code
>> 16)
6666 : (op_parse_code
& ((1<<16)-1));
6668 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6670 /* Remember where we are in case we need to backtrack. */
6671 gas_assert (!backtrack_pos
);
6672 backtrack_pos
= str
;
6673 backtrack_error
= inst
.error
;
6674 backtrack_index
= i
;
6677 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6678 po_char_or_fail (',');
6680 switch (op_parse_code
)
6688 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6689 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6690 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6691 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6692 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6693 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6695 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6697 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6699 /* Also accept generic coprocessor regs for unknown registers. */
6701 po_reg_or_fail (REG_TYPE_CN
);
6703 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6704 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6705 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6706 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6707 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6708 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6709 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6710 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6711 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6712 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6714 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6716 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6717 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6719 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6721 /* Neon scalar. Using an element size of 8 means that some invalid
6722 scalars are accepted here, so deal with those in later code. */
6723 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6727 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6730 po_imm_or_fail (0, 0, TRUE
);
6735 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6740 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
6743 if (parse_ifimm_zero (&str
))
6744 inst
.operands
[i
].imm
= 0;
6748 = _("only floating point zero is allowed as immediate value");
6756 po_scalar_or_goto (8, try_rr
);
6759 po_reg_or_fail (REG_TYPE_RN
);
6765 po_scalar_or_goto (8, try_nsdq
);
6768 po_reg_or_fail (REG_TYPE_NSDQ
);
6774 po_scalar_or_goto (8, try_ndq
);
6777 po_reg_or_fail (REG_TYPE_NDQ
);
6783 po_scalar_or_goto (8, try_vfd
);
6786 po_reg_or_fail (REG_TYPE_VFD
);
6791 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6792 not careful then bad things might happen. */
6793 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
6798 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
6801 /* There's a possibility of getting a 64-bit immediate here, so
6802 we need special handling. */
6803 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6806 inst
.error
= _("immediate value is out of range");
6814 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
6817 po_imm_or_fail (0, 63, TRUE
);
6822 po_char_or_fail ('[');
6823 po_reg_or_fail (REG_TYPE_RN
);
6824 po_char_or_fail (']');
6830 po_reg_or_fail (REG_TYPE_RN
);
6831 if (skip_past_char (&str
, '!') == SUCCESS
)
6832 inst
.operands
[i
].writeback
= 1;
6836 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
6837 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
6838 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
6839 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
6840 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
6841 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
6842 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
6843 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
6844 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
6845 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
6846 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
6847 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
6849 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
6851 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
6852 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
6854 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
6855 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
6856 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
6857 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
6859 /* Immediate variants */
6861 po_char_or_fail ('{');
6862 po_imm_or_fail (0, 255, TRUE
);
6863 po_char_or_fail ('}');
6867 /* The expression parser chokes on a trailing !, so we have
6868 to find it first and zap it. */
6871 while (*s
&& *s
!= ',')
6876 inst
.operands
[i
].writeback
= 1;
6878 po_imm_or_fail (0, 31, TRUE
);
6886 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6891 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6896 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6898 if (inst
.reloc
.exp
.X_op
== O_symbol
)
6900 val
= parse_reloc (&str
);
6903 inst
.error
= _("unrecognized relocation suffix");
6906 else if (val
!= BFD_RELOC_UNUSED
)
6908 inst
.operands
[i
].imm
= val
;
6909 inst
.operands
[i
].hasreloc
= 1;
6914 /* Operand for MOVW or MOVT. */
6916 po_misc_or_fail (parse_half (&str
));
6919 /* Register or expression. */
6920 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
6921 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
6923 /* Register or immediate. */
6924 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
6925 I0
: po_imm_or_fail (0, 0, FALSE
); break;
6927 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
6929 if (!is_immediate_prefix (*str
))
6932 val
= parse_fpa_immediate (&str
);
6935 /* FPA immediates are encoded as registers 8-15.
6936 parse_fpa_immediate has already applied the offset. */
6937 inst
.operands
[i
].reg
= val
;
6938 inst
.operands
[i
].isreg
= 1;
6941 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
6942 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
6944 /* Two kinds of register. */
6947 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6949 || (rege
->type
!= REG_TYPE_MMXWR
6950 && rege
->type
!= REG_TYPE_MMXWC
6951 && rege
->type
!= REG_TYPE_MMXWCG
))
6953 inst
.error
= _("iWMMXt data or control register expected");
6956 inst
.operands
[i
].reg
= rege
->number
;
6957 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
6963 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6965 || (rege
->type
!= REG_TYPE_MMXWC
6966 && rege
->type
!= REG_TYPE_MMXWCG
))
6968 inst
.error
= _("iWMMXt control register expected");
6971 inst
.operands
[i
].reg
= rege
->number
;
6972 inst
.operands
[i
].isreg
= 1;
6977 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
6978 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
6979 case OP_oROR
: val
= parse_ror (&str
); break;
6980 case OP_COND
: val
= parse_cond (&str
); break;
6981 case OP_oBARRIER_I15
:
6982 po_barrier_or_imm (str
); break;
6984 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
6990 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
6991 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
6993 inst
.error
= _("Banked registers are not available with this "
6999 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7003 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7006 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7008 if (strncasecmp (str
, "APSR_", 5) == 0)
7015 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7016 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7017 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7018 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7019 default: found
= 16;
7023 inst
.operands
[i
].isvec
= 1;
7024 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7025 inst
.operands
[i
].reg
= REG_PC
;
7032 po_misc_or_fail (parse_tb (&str
));
7035 /* Register lists. */
7037 val
= parse_reg_list (&str
);
7040 inst
.operands
[i
].writeback
= 1;
7046 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
7050 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
7054 /* Allow Q registers too. */
7055 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7060 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7062 inst
.operands
[i
].issingle
= 1;
7067 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7072 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7073 &inst
.operands
[i
].vectype
);
7076 /* Addressing modes */
7078 po_misc_or_fail (parse_address (&str
, i
));
7082 po_misc_or_fail_no_backtrack (
7083 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7087 po_misc_or_fail_no_backtrack (
7088 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7092 po_misc_or_fail_no_backtrack (
7093 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7097 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7101 po_misc_or_fail_no_backtrack (
7102 parse_shifter_operand_group_reloc (&str
, i
));
7106 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7110 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7114 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7118 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7121 /* Various value-based sanity checks and shared operations. We
7122 do not signal immediate failures for the register constraints;
7123 this allows a syntax error to take precedence. */
7124 switch (op_parse_code
)
7132 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7133 inst
.error
= BAD_PC
;
7138 if (inst
.operands
[i
].isreg
)
7140 if (inst
.operands
[i
].reg
== REG_PC
)
7141 inst
.error
= BAD_PC
;
7142 else if (inst
.operands
[i
].reg
== REG_SP
)
7143 inst
.error
= BAD_SP
;
7148 if (inst
.operands
[i
].isreg
7149 && inst
.operands
[i
].reg
== REG_PC
7150 && (inst
.operands
[i
].writeback
|| thumb
))
7151 inst
.error
= BAD_PC
;
7160 case OP_oBARRIER_I15
:
7169 inst
.operands
[i
].imm
= val
;
7176 /* If we get here, this operand was successfully parsed. */
7177 inst
.operands
[i
].present
= 1;
7181 inst
.error
= BAD_ARGS
;
7186 /* The parse routine should already have set inst.error, but set a
7187 default here just in case. */
7189 inst
.error
= _("syntax error");
7193 /* Do not backtrack over a trailing optional argument that
7194 absorbed some text. We will only fail again, with the
7195 'garbage following instruction' error message, which is
7196 probably less helpful than the current one. */
7197 if (backtrack_index
== i
&& backtrack_pos
!= str
7198 && upat
[i
+1] == OP_stop
)
7201 inst
.error
= _("syntax error");
7205 /* Try again, skipping the optional argument at backtrack_pos. */
7206 str
= backtrack_pos
;
7207 inst
.error
= backtrack_error
;
7208 inst
.operands
[backtrack_index
].present
= 0;
7209 i
= backtrack_index
;
7213 /* Check that we have parsed all the arguments. */
7214 if (*str
!= '\0' && !inst
.error
)
7215 inst
.error
= _("garbage following instruction");
7217 return inst
.error
? FAIL
: SUCCESS
;
7220 #undef po_char_or_fail
7221 #undef po_reg_or_fail
7222 #undef po_reg_or_goto
7223 #undef po_imm_or_fail
7224 #undef po_scalar_or_fail
7225 #undef po_barrier_or_imm
7227 /* Shorthand macro for instruction encoding functions issuing errors. */
7228 #define constraint(expr, err) \
7239 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7240 instructions are unpredictable if these registers are used. This
7241 is the BadReg predicate in ARM's Thumb-2 documentation. */
7242 #define reject_bad_reg(reg) \
7244 if (reg == REG_SP || reg == REG_PC) \
7246 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7251 /* If REG is R13 (the stack pointer), warn that its use is
7253 #define warn_deprecated_sp(reg) \
7255 if (warn_on_deprecated && reg == REG_SP) \
7256 as_tsktsk (_("use of r13 is deprecated")); \
7259 /* Functions for operand encoding. ARM, then Thumb. */
7261 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7263 /* If VAL can be encoded in the immediate field of an ARM instruction,
7264 return the encoded form. Otherwise, return FAIL. */
7267 encode_arm_immediate (unsigned int val
)
7274 for (i
= 2; i
< 32; i
+= 2)
7275 if ((a
= rotate_left (val
, i
)) <= 0xff)
7276 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7281 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7282 return the encoded form. Otherwise, return FAIL. */
7284 encode_thumb32_immediate (unsigned int val
)
7291 for (i
= 1; i
<= 24; i
++)
7294 if ((val
& ~(0xff << i
)) == 0)
7295 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7299 if (val
== ((a
<< 16) | a
))
7301 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7305 if (val
== ((a
<< 16) | a
))
7306 return 0x200 | (a
>> 8);
7310 /* Encode a VFP SP or DP register number into inst.instruction. */
7313 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7315 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7318 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7321 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7324 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7329 first_error (_("D register out of range for selected VFP version"));
7337 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7341 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7345 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7349 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7353 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7357 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7365 /* Encode a <shift> in an ARM-format instruction. The immediate,
7366 if any, is handled by md_apply_fix. */
7368 encode_arm_shift (int i
)
7370 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7371 inst
.instruction
|= SHIFT_ROR
<< 5;
7374 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7375 if (inst
.operands
[i
].immisreg
)
7377 inst
.instruction
|= SHIFT_BY_REG
;
7378 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7381 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7386 encode_arm_shifter_operand (int i
)
7388 if (inst
.operands
[i
].isreg
)
7390 inst
.instruction
|= inst
.operands
[i
].reg
;
7391 encode_arm_shift (i
);
7395 inst
.instruction
|= INST_IMMEDIATE
;
7396 if (inst
.reloc
.type
!= BFD_RELOC_ARM_IMMEDIATE
)
7397 inst
.instruction
|= inst
.operands
[i
].imm
;
7401 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7403 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7406 Generate an error if the operand is not a register. */
7407 constraint (!inst
.operands
[i
].isreg
,
7408 _("Instruction does not support =N addresses"));
7410 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7412 if (inst
.operands
[i
].preind
)
7416 inst
.error
= _("instruction does not accept preindexed addressing");
7419 inst
.instruction
|= PRE_INDEX
;
7420 if (inst
.operands
[i
].writeback
)
7421 inst
.instruction
|= WRITE_BACK
;
7424 else if (inst
.operands
[i
].postind
)
7426 gas_assert (inst
.operands
[i
].writeback
);
7428 inst
.instruction
|= WRITE_BACK
;
7430 else /* unindexed - only for coprocessor */
7432 inst
.error
= _("instruction does not accept unindexed addressing");
7436 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7437 && (((inst
.instruction
& 0x000f0000) >> 16)
7438 == ((inst
.instruction
& 0x0000f000) >> 12)))
7439 as_warn ((inst
.instruction
& LOAD_BIT
)
7440 ? _("destination register same as write-back base")
7441 : _("source register same as write-back base"));
7444 /* inst.operands[i] was set up by parse_address. Encode it into an
7445 ARM-format mode 2 load or store instruction. If is_t is true,
7446 reject forms that cannot be used with a T instruction (i.e. not
7449 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7451 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7453 encode_arm_addr_mode_common (i
, is_t
);
7455 if (inst
.operands
[i
].immisreg
)
7457 constraint ((inst
.operands
[i
].imm
== REG_PC
7458 || (is_pc
&& inst
.operands
[i
].writeback
)),
7460 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7461 inst
.instruction
|= inst
.operands
[i
].imm
;
7462 if (!inst
.operands
[i
].negative
)
7463 inst
.instruction
|= INDEX_UP
;
7464 if (inst
.operands
[i
].shifted
)
7466 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7467 inst
.instruction
|= SHIFT_ROR
<< 5;
7470 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7471 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7475 else /* immediate offset in inst.reloc */
7477 if (is_pc
&& !inst
.reloc
.pc_rel
)
7479 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7481 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7482 cannot use PC in addressing.
7483 PC cannot be used in writeback addressing, either. */
7484 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7487 /* Use of PC in str is deprecated for ARMv7. */
7488 if (warn_on_deprecated
7490 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
7491 as_tsktsk (_("use of PC in this instruction is deprecated"));
7494 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7496 /* Prefer + for zero encoded value. */
7497 if (!inst
.operands
[i
].negative
)
7498 inst
.instruction
|= INDEX_UP
;
7499 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
7504 /* inst.operands[i] was set up by parse_address. Encode it into an
7505 ARM-format mode 3 load or store instruction. Reject forms that
7506 cannot be used with such instructions. If is_t is true, reject
7507 forms that cannot be used with a T instruction (i.e. not
7510 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
7512 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
7514 inst
.error
= _("instruction does not accept scaled register index");
7518 encode_arm_addr_mode_common (i
, is_t
);
7520 if (inst
.operands
[i
].immisreg
)
7522 constraint ((inst
.operands
[i
].imm
== REG_PC
7523 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
7525 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
7527 inst
.instruction
|= inst
.operands
[i
].imm
;
7528 if (!inst
.operands
[i
].negative
)
7529 inst
.instruction
|= INDEX_UP
;
7531 else /* immediate offset in inst.reloc */
7533 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.reloc
.pc_rel
7534 && inst
.operands
[i
].writeback
),
7536 inst
.instruction
|= HWOFFSET_IMM
;
7537 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7539 /* Prefer + for zero encoded value. */
7540 if (!inst
.operands
[i
].negative
)
7541 inst
.instruction
|= INDEX_UP
;
7543 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
7548 /* Write immediate bits [7:0] to the following locations:
7550 |28/24|23 19|18 16|15 4|3 0|
7551 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7553 This function is used by VMOV/VMVN/VORR/VBIC. */
7556 neon_write_immbits (unsigned immbits
)
7558 inst
.instruction
|= immbits
& 0xf;
7559 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
7560 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
7563 /* Invert low-order SIZE bits of XHI:XLO. */
7566 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
7568 unsigned immlo
= xlo
? *xlo
: 0;
7569 unsigned immhi
= xhi
? *xhi
: 0;
7574 immlo
= (~immlo
) & 0xff;
7578 immlo
= (~immlo
) & 0xffff;
7582 immhi
= (~immhi
) & 0xffffffff;
7586 immlo
= (~immlo
) & 0xffffffff;
7600 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7604 neon_bits_same_in_bytes (unsigned imm
)
7606 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
7607 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
7608 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
7609 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
7612 /* For immediate of above form, return 0bABCD. */
7615 neon_squash_bits (unsigned imm
)
7617 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
7618 | ((imm
& 0x01000000) >> 21);
7621 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7624 neon_qfloat_bits (unsigned imm
)
7626 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
7629 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7630 the instruction. *OP is passed as the initial value of the op field, and
7631 may be set to a different value depending on the constant (i.e.
7632 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7633 MVN). If the immediate looks like a repeated pattern then also
7634 try smaller element sizes. */
7637 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
7638 unsigned *immbits
, int *op
, int size
,
7639 enum neon_el_type type
)
7641 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7643 if (type
== NT_float
&& !float_p
)
7646 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
7648 if (size
!= 32 || *op
== 1)
7650 *immbits
= neon_qfloat_bits (immlo
);
7656 if (neon_bits_same_in_bytes (immhi
)
7657 && neon_bits_same_in_bytes (immlo
))
7661 *immbits
= (neon_squash_bits (immhi
) << 4)
7662 | neon_squash_bits (immlo
);
7673 if (immlo
== (immlo
& 0x000000ff))
7678 else if (immlo
== (immlo
& 0x0000ff00))
7680 *immbits
= immlo
>> 8;
7683 else if (immlo
== (immlo
& 0x00ff0000))
7685 *immbits
= immlo
>> 16;
7688 else if (immlo
== (immlo
& 0xff000000))
7690 *immbits
= immlo
>> 24;
7693 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
7695 *immbits
= (immlo
>> 8) & 0xff;
7698 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
7700 *immbits
= (immlo
>> 16) & 0xff;
7704 if ((immlo
& 0xffff) != (immlo
>> 16))
7711 if (immlo
== (immlo
& 0x000000ff))
7716 else if (immlo
== (immlo
& 0x0000ff00))
7718 *immbits
= immlo
>> 8;
7722 if ((immlo
& 0xff) != (immlo
>> 8))
7727 if (immlo
== (immlo
& 0x000000ff))
7729 /* Don't allow MVN with 8-bit immediate. */
7739 #if defined BFD_HOST_64_BIT
7740 /* Returns TRUE if double precision value V may be cast
7741 to single precision without loss of accuracy. */
7744 is_double_a_single (bfd_int64_t v
)
7746 int exp
= (int)((v
>> 52) & 0x7FF);
7747 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7749 return (exp
== 0 || exp
== 0x7FF
7750 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
7751 && (mantissa
& 0x1FFFFFFFl
) == 0;
7754 /* Returns a double precision value casted to single precision
7755 (ignoring the least significant bits in exponent and mantissa). */
7758 double_to_single (bfd_int64_t v
)
7760 int sign
= (int) ((v
>> 63) & 1l);
7761 int exp
= (int) ((v
>> 52) & 0x7FF);
7762 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7768 exp
= exp
- 1023 + 127;
7777 /* No denormalized numbers. */
7783 return (sign
<< 31) | (exp
<< 23) | mantissa
;
7785 #endif /* BFD_HOST_64_BIT */
7794 static void do_vfp_nsyn_opcode (const char *);
7796 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7797 Determine whether it can be performed with a move instruction; if
7798 it can, convert inst.instruction to that move instruction and
7799 return TRUE; if it can't, convert inst.instruction to a literal-pool
7800 load and return FALSE. If this is not a valid thing to do in the
7801 current context, set inst.error and return TRUE.
7803 inst.operands[i] describes the destination register. */
7806 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
7809 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
7810 bfd_boolean arm_p
= (t
== CONST_ARM
);
7813 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
7817 if ((inst
.instruction
& tbit
) == 0)
7819 inst
.error
= _("invalid pseudo operation");
7823 if (inst
.reloc
.exp
.X_op
!= O_constant
7824 && inst
.reloc
.exp
.X_op
!= O_symbol
7825 && inst
.reloc
.exp
.X_op
!= O_big
)
7827 inst
.error
= _("constant expression expected");
7831 if (inst
.reloc
.exp
.X_op
== O_constant
7832 || inst
.reloc
.exp
.X_op
== O_big
)
7834 #if defined BFD_HOST_64_BIT
7839 if (inst
.reloc
.exp
.X_op
== O_big
)
7841 LITTLENUM_TYPE w
[X_PRECISION
];
7844 if (inst
.reloc
.exp
.X_add_number
== -1)
7846 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
7848 /* FIXME: Should we check words w[2..5] ? */
7853 #if defined BFD_HOST_64_BIT
7855 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
7856 << LITTLENUM_NUMBER_OF_BITS
)
7857 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
7858 << LITTLENUM_NUMBER_OF_BITS
)
7859 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
7860 << LITTLENUM_NUMBER_OF_BITS
)
7861 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
7863 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
7864 | (l
[0] & LITTLENUM_MASK
);
7868 v
= inst
.reloc
.exp
.X_add_number
;
7870 if (!inst
.operands
[i
].issingle
)
7874 /* This can be encoded only for a low register. */
7875 if ((v
& ~0xFF) == 0 && (inst
.operands
[i
].reg
< 8))
7877 /* This can be done with a mov(1) instruction. */
7878 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
7879 inst
.instruction
|= v
;
7883 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
7884 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7886 /* Check if on thumb2 it can be done with a mov.w, mvn or
7887 movw instruction. */
7888 unsigned int newimm
;
7889 bfd_boolean isNegated
;
7891 newimm
= encode_thumb32_immediate (v
);
7892 if (newimm
!= (unsigned int) FAIL
)
7896 newimm
= encode_thumb32_immediate (~v
);
7897 if (newimm
!= (unsigned int) FAIL
)
7901 /* The number can be loaded with a mov.w or mvn
7903 if (newimm
!= (unsigned int) FAIL
7904 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
7906 inst
.instruction
= (0xf04f0000 /* MOV.W. */
7907 | (inst
.operands
[i
].reg
<< 8));
7908 /* Change to MOVN. */
7909 inst
.instruction
|= (isNegated
? 0x200000 : 0);
7910 inst
.instruction
|= (newimm
& 0x800) << 15;
7911 inst
.instruction
|= (newimm
& 0x700) << 4;
7912 inst
.instruction
|= (newimm
& 0x0ff);
7915 /* The number can be loaded with a movw instruction. */
7916 else if ((v
& ~0xFFFF) == 0
7917 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7919 int imm
= v
& 0xFFFF;
7921 inst
.instruction
= 0xf2400000; /* MOVW. */
7922 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
7923 inst
.instruction
|= (imm
& 0xf000) << 4;
7924 inst
.instruction
|= (imm
& 0x0800) << 15;
7925 inst
.instruction
|= (imm
& 0x0700) << 4;
7926 inst
.instruction
|= (imm
& 0x00ff);
7933 int value
= encode_arm_immediate (v
);
7937 /* This can be done with a mov instruction. */
7938 inst
.instruction
&= LITERAL_MASK
;
7939 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
7940 inst
.instruction
|= value
& 0xfff;
7944 value
= encode_arm_immediate (~ v
);
7947 /* This can be done with a mvn instruction. */
7948 inst
.instruction
&= LITERAL_MASK
;
7949 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
7950 inst
.instruction
|= value
& 0xfff;
7954 else if (t
== CONST_VEC
)
7957 unsigned immbits
= 0;
7958 unsigned immlo
= inst
.operands
[1].imm
;
7959 unsigned immhi
= inst
.operands
[1].regisimm
7960 ? inst
.operands
[1].reg
7961 : inst
.reloc
.exp
.X_unsigned
7963 : ((bfd_int64_t
)((int) immlo
)) >> 32;
7964 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
7965 &op
, 64, NT_invtype
);
7969 neon_invert_size (&immlo
, &immhi
, 64);
7971 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
7972 &op
, 64, NT_invtype
);
7977 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
7983 /* Fill other bits in vmov encoding for both thumb and arm. */
7985 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
7987 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
7988 neon_write_immbits (immbits
);
7996 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
7997 if (inst
.operands
[i
].issingle
7998 && is_quarter_float (inst
.operands
[1].imm
)
7999 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8001 inst
.operands
[1].imm
=
8002 neon_qfloat_bits (v
);
8003 do_vfp_nsyn_opcode ("fconsts");
8007 /* If our host does not support a 64-bit type then we cannot perform
8008 the following optimization. This mean that there will be a
8009 discrepancy between the output produced by an assembler built for
8010 a 32-bit-only host and the output produced from a 64-bit host, but
8011 this cannot be helped. */
8012 #if defined BFD_HOST_64_BIT
8013 else if (!inst
.operands
[1].issingle
8014 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8016 if (is_double_a_single (v
)
8017 && is_quarter_float (double_to_single (v
)))
8019 inst
.operands
[1].imm
=
8020 neon_qfloat_bits (double_to_single (v
));
8021 do_vfp_nsyn_opcode ("fconstd");
8029 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8030 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8033 inst
.operands
[1].reg
= REG_PC
;
8034 inst
.operands
[1].isreg
= 1;
8035 inst
.operands
[1].preind
= 1;
8036 inst
.reloc
.pc_rel
= 1;
8037 inst
.reloc
.type
= (thumb_p
8038 ? BFD_RELOC_ARM_THUMB_OFFSET
8040 ? BFD_RELOC_ARM_HWLITERAL
8041 : BFD_RELOC_ARM_LITERAL
));
8045 /* inst.operands[i] was set up by parse_address. Encode it into an
8046 ARM-format instruction. Reject all forms which cannot be encoded
8047 into a coprocessor load/store instruction. If wb_ok is false,
8048 reject use of writeback; if unind_ok is false, reject use of
8049 unindexed addressing. If reloc_override is not 0, use it instead
8050 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8051 (in which case it is preserved). */
8054 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8056 if (!inst
.operands
[i
].isreg
)
8059 if (! inst
.operands
[0].isvec
)
8061 inst
.error
= _("invalid co-processor operand");
8064 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8068 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8070 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8072 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8074 gas_assert (!inst
.operands
[i
].writeback
);
8077 inst
.error
= _("instruction does not support unindexed addressing");
8080 inst
.instruction
|= inst
.operands
[i
].imm
;
8081 inst
.instruction
|= INDEX_UP
;
8085 if (inst
.operands
[i
].preind
)
8086 inst
.instruction
|= PRE_INDEX
;
8088 if (inst
.operands
[i
].writeback
)
8090 if (inst
.operands
[i
].reg
== REG_PC
)
8092 inst
.error
= _("pc may not be used with write-back");
8097 inst
.error
= _("instruction does not support writeback");
8100 inst
.instruction
|= WRITE_BACK
;
8104 inst
.reloc
.type
= (bfd_reloc_code_real_type
) reloc_override
;
8105 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8106 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
8107 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8110 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8112 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8115 /* Prefer + for zero encoded value. */
8116 if (!inst
.operands
[i
].negative
)
8117 inst
.instruction
|= INDEX_UP
;
8122 /* Functions for instruction encoding, sorted by sub-architecture.
8123 First some generics; their names are taken from the conventional
8124 bit positions for register arguments in ARM format instructions. */
8134 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8140 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8141 inst
.instruction
|= inst
.operands
[1].reg
;
8147 inst
.instruction
|= inst
.operands
[0].reg
;
8148 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8154 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8155 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8161 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8162 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8168 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8169 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8173 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8175 if (ARM_CPU_IS_ANY (cpu_variant
))
8177 as_tsktsk ("%s", msg
);
8180 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8192 unsigned Rn
= inst
.operands
[2].reg
;
8193 /* Enforce restrictions on SWP instruction. */
8194 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8196 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8197 _("Rn must not overlap other operands"));
8199 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8201 if (!check_obsolete (&arm_ext_v8
,
8202 _("swp{b} use is obsoleted for ARMv8 and later"))
8203 && warn_on_deprecated
8204 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8205 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8208 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8209 inst
.instruction
|= inst
.operands
[1].reg
;
8210 inst
.instruction
|= Rn
<< 16;
8216 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8217 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8218 inst
.instruction
|= inst
.operands
[2].reg
;
8224 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8225 constraint (((inst
.reloc
.exp
.X_op
!= O_constant
8226 && inst
.reloc
.exp
.X_op
!= O_illegal
)
8227 || inst
.reloc
.exp
.X_add_number
!= 0),
8229 inst
.instruction
|= inst
.operands
[0].reg
;
8230 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8231 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8237 inst
.instruction
|= inst
.operands
[0].imm
;
8243 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8244 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8247 /* ARM instructions, in alphabetical order by function name (except
8248 that wrapper functions appear immediately after the function they
8251 /* This is a pseudo-op of the form "adr rd, label" to be converted
8252 into a relative address of the form "add rd, pc, #label-.-8". */
8257 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8259 /* Frag hacking will turn this into a sub instruction if the offset turns
8260 out to be negative. */
8261 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
8262 inst
.reloc
.pc_rel
= 1;
8263 inst
.reloc
.exp
.X_add_number
-= 8;
8266 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8267 into a relative address of the form:
8268 add rd, pc, #low(label-.-8)"
8269 add rd, rd, #high(label-.-8)" */
8274 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8276 /* Frag hacking will turn this into a sub instruction if the offset turns
8277 out to be negative. */
8278 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8279 inst
.reloc
.pc_rel
= 1;
8280 inst
.size
= INSN_SIZE
* 2;
8281 inst
.reloc
.exp
.X_add_number
-= 8;
8287 if (!inst
.operands
[1].present
)
8288 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8289 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8290 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8291 encode_arm_shifter_operand (2);
8297 if (inst
.operands
[0].present
)
8298 inst
.instruction
|= inst
.operands
[0].imm
;
8300 inst
.instruction
|= 0xf;
8306 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8307 constraint (msb
> 32, _("bit-field extends past end of register"));
8308 /* The instruction encoding stores the LSB and MSB,
8309 not the LSB and width. */
8310 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8311 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8312 inst
.instruction
|= (msb
- 1) << 16;
8320 /* #0 in second position is alternative syntax for bfc, which is
8321 the same instruction but with REG_PC in the Rm field. */
8322 if (!inst
.operands
[1].isreg
)
8323 inst
.operands
[1].reg
= REG_PC
;
8325 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8326 constraint (msb
> 32, _("bit-field extends past end of register"));
8327 /* The instruction encoding stores the LSB and MSB,
8328 not the LSB and width. */
8329 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8330 inst
.instruction
|= inst
.operands
[1].reg
;
8331 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8332 inst
.instruction
|= (msb
- 1) << 16;
8338 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8339 _("bit-field extends past end of register"));
8340 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8341 inst
.instruction
|= inst
.operands
[1].reg
;
8342 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8343 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8346 /* ARM V5 breakpoint instruction (argument parse)
8347 BKPT <16 bit unsigned immediate>
8348 Instruction is not conditional.
8349 The bit pattern given in insns[] has the COND_ALWAYS condition,
8350 and it is an error if the caller tried to override that. */
8355 /* Top 12 of 16 bits to bits 19:8. */
8356 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8358 /* Bottom 4 of 16 bits to bits 3:0. */
8359 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8363 encode_branch (int default_reloc
)
8365 if (inst
.operands
[0].hasreloc
)
8367 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8368 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8369 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8370 inst
.reloc
.type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8371 ? BFD_RELOC_ARM_PLT32
8372 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8375 inst
.reloc
.type
= (bfd_reloc_code_real_type
) default_reloc
;
8376 inst
.reloc
.pc_rel
= 1;
8383 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8384 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8387 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8394 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8396 if (inst
.cond
== COND_ALWAYS
)
8397 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8399 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8403 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8406 /* ARM V5 branch-link-exchange instruction (argument parse)
8407 BLX <target_addr> ie BLX(1)
8408 BLX{<condition>} <Rm> ie BLX(2)
8409 Unfortunately, there are two different opcodes for this mnemonic.
8410 So, the insns[].value is not used, and the code here zaps values
8411 into inst.instruction.
8412 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8417 if (inst
.operands
[0].isreg
)
8419 /* Arg is a register; the opcode provided by insns[] is correct.
8420 It is not illegal to do "blx pc", just useless. */
8421 if (inst
.operands
[0].reg
== REG_PC
)
8422 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8424 inst
.instruction
|= inst
.operands
[0].reg
;
8428 /* Arg is an address; this instruction cannot be executed
8429 conditionally, and the opcode must be adjusted.
8430 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8431 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8432 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8433 inst
.instruction
= 0xfa000000;
8434 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8441 bfd_boolean want_reloc
;
8443 if (inst
.operands
[0].reg
== REG_PC
)
8444 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8446 inst
.instruction
|= inst
.operands
[0].reg
;
8447 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8448 it is for ARMv4t or earlier. */
8449 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8450 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
8454 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8459 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
8463 /* ARM v5TEJ. Jump to Jazelle code. */
8468 if (inst
.operands
[0].reg
== REG_PC
)
8469 as_tsktsk (_("use of r15 in bxj is not really useful"));
8471 inst
.instruction
|= inst
.operands
[0].reg
;
8474 /* Co-processor data operation:
8475 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8476 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8480 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8481 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
8482 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8483 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8484 inst
.instruction
|= inst
.operands
[4].reg
;
8485 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8491 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8492 encode_arm_shifter_operand (1);
8495 /* Transfer between coprocessor and ARM registers.
8496 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8501 No special properties. */
8503 struct deprecated_coproc_regs_s
8510 arm_feature_set deprecated
;
8511 arm_feature_set obsoleted
;
8512 const char *dep_msg
;
8513 const char *obs_msg
;
8516 #define DEPR_ACCESS_V8 \
8517 N_("This coprocessor register access is deprecated in ARMv8")
8519 /* Table of all deprecated coprocessor registers. */
8520 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
8522 {15, 0, 7, 10, 5, /* CP15DMB. */
8523 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8524 DEPR_ACCESS_V8
, NULL
},
8525 {15, 0, 7, 10, 4, /* CP15DSB. */
8526 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8527 DEPR_ACCESS_V8
, NULL
},
8528 {15, 0, 7, 5, 4, /* CP15ISB. */
8529 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8530 DEPR_ACCESS_V8
, NULL
},
8531 {14, 6, 1, 0, 0, /* TEEHBR. */
8532 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8533 DEPR_ACCESS_V8
, NULL
},
8534 {14, 6, 0, 0, 0, /* TEECR. */
8535 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8536 DEPR_ACCESS_V8
, NULL
},
8539 #undef DEPR_ACCESS_V8
8541 static const size_t deprecated_coproc_reg_count
=
8542 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
8550 Rd
= inst
.operands
[2].reg
;
8553 if (inst
.instruction
== 0xee000010
8554 || inst
.instruction
== 0xfe000010)
8556 reject_bad_reg (Rd
);
8559 constraint (Rd
== REG_SP
, BAD_SP
);
8564 if (inst
.instruction
== 0xe000010)
8565 constraint (Rd
== REG_PC
, BAD_PC
);
8568 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
8570 const struct deprecated_coproc_regs_s
*r
=
8571 deprecated_coproc_regs
+ i
;
8573 if (inst
.operands
[0].reg
== r
->cp
8574 && inst
.operands
[1].imm
== r
->opc1
8575 && inst
.operands
[3].reg
== r
->crn
8576 && inst
.operands
[4].reg
== r
->crm
8577 && inst
.operands
[5].imm
== r
->opc2
)
8579 if (! ARM_CPU_IS_ANY (cpu_variant
)
8580 && warn_on_deprecated
8581 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
8582 as_tsktsk ("%s", r
->dep_msg
);
8586 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8587 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
8588 inst
.instruction
|= Rd
<< 12;
8589 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8590 inst
.instruction
|= inst
.operands
[4].reg
;
8591 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8594 /* Transfer between coprocessor register and pair of ARM registers.
8595 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8600 Two XScale instructions are special cases of these:
8602 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8603 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8605 Result unpredictable if Rd or Rn is R15. */
8612 Rd
= inst
.operands
[2].reg
;
8613 Rn
= inst
.operands
[3].reg
;
8617 reject_bad_reg (Rd
);
8618 reject_bad_reg (Rn
);
8622 constraint (Rd
== REG_PC
, BAD_PC
);
8623 constraint (Rn
== REG_PC
, BAD_PC
);
8626 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8627 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
8628 inst
.instruction
|= Rd
<< 12;
8629 inst
.instruction
|= Rn
<< 16;
8630 inst
.instruction
|= inst
.operands
[4].reg
;
8636 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
8637 if (inst
.operands
[1].present
)
8639 inst
.instruction
|= CPSI_MMOD
;
8640 inst
.instruction
|= inst
.operands
[1].imm
;
8647 inst
.instruction
|= inst
.operands
[0].imm
;
8653 unsigned Rd
, Rn
, Rm
;
8655 Rd
= inst
.operands
[0].reg
;
8656 Rn
= (inst
.operands
[1].present
8657 ? inst
.operands
[1].reg
: Rd
);
8658 Rm
= inst
.operands
[2].reg
;
8660 constraint ((Rd
== REG_PC
), BAD_PC
);
8661 constraint ((Rn
== REG_PC
), BAD_PC
);
8662 constraint ((Rm
== REG_PC
), BAD_PC
);
8664 inst
.instruction
|= Rd
<< 16;
8665 inst
.instruction
|= Rn
<< 0;
8666 inst
.instruction
|= Rm
<< 8;
8672 /* There is no IT instruction in ARM mode. We
8673 process it to do the validation as if in
8674 thumb mode, just in case the code gets
8675 assembled for thumb using the unified syntax. */
8680 set_it_insn_type (IT_INSN
);
8681 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
8682 now_it
.cc
= inst
.operands
[0].imm
;
8686 /* If there is only one register in the register list,
8687 then return its register number. Otherwise return -1. */
8689 only_one_reg_in_list (int range
)
8691 int i
= ffs (range
) - 1;
8692 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
8696 encode_ldmstm(int from_push_pop_mnem
)
8698 int base_reg
= inst
.operands
[0].reg
;
8699 int range
= inst
.operands
[1].imm
;
8702 inst
.instruction
|= base_reg
<< 16;
8703 inst
.instruction
|= range
;
8705 if (inst
.operands
[1].writeback
)
8706 inst
.instruction
|= LDM_TYPE_2_OR_3
;
8708 if (inst
.operands
[0].writeback
)
8710 inst
.instruction
|= WRITE_BACK
;
8711 /* Check for unpredictable uses of writeback. */
8712 if (inst
.instruction
& LOAD_BIT
)
8714 /* Not allowed in LDM type 2. */
8715 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
8716 && ((range
& (1 << REG_PC
)) == 0))
8717 as_warn (_("writeback of base register is UNPREDICTABLE"));
8718 /* Only allowed if base reg not in list for other types. */
8719 else if (range
& (1 << base_reg
))
8720 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8724 /* Not allowed for type 2. */
8725 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
8726 as_warn (_("writeback of base register is UNPREDICTABLE"));
8727 /* Only allowed if base reg not in list, or first in list. */
8728 else if ((range
& (1 << base_reg
))
8729 && (range
& ((1 << base_reg
) - 1)))
8730 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8734 /* If PUSH/POP has only one register, then use the A2 encoding. */
8735 one_reg
= only_one_reg_in_list (range
);
8736 if (from_push_pop_mnem
&& one_reg
>= 0)
8738 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
8740 inst
.instruction
&= A_COND_MASK
;
8741 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
8742 inst
.instruction
|= one_reg
<< 12;
8749 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
8752 /* ARMv5TE load-consecutive (argument parse)
8761 constraint (inst
.operands
[0].reg
% 2 != 0,
8762 _("first transfer register must be even"));
8763 constraint (inst
.operands
[1].present
8764 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8765 _("can only transfer two consecutive registers"));
8766 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8767 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
8769 if (!inst
.operands
[1].present
)
8770 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8772 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8773 register and the first register written; we have to diagnose
8774 overlap between the base and the second register written here. */
8776 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
8777 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
8778 as_warn (_("base register written back, and overlaps "
8779 "second transfer register"));
8781 if (!(inst
.instruction
& V4_STR_BIT
))
8783 /* For an index-register load, the index register must not overlap the
8784 destination (even if not write-back). */
8785 if (inst
.operands
[2].immisreg
8786 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
8787 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
8788 as_warn (_("index register overlaps transfer register"));
8790 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8791 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
8797 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
8798 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
8799 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
8800 || inst
.operands
[1].negative
8801 /* This can arise if the programmer has written
8803 or if they have mistakenly used a register name as the last
8806 It is very difficult to distinguish between these two cases
8807 because "rX" might actually be a label. ie the register
8808 name has been occluded by a symbol of the same name. So we
8809 just generate a general 'bad addressing mode' type error
8810 message and leave it up to the programmer to discover the
8811 true cause and fix their mistake. */
8812 || (inst
.operands
[1].reg
== REG_PC
),
8815 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8816 || inst
.reloc
.exp
.X_add_number
!= 0,
8817 _("offset must be zero in ARM encoding"));
8819 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
8821 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8822 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8823 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8829 constraint (inst
.operands
[0].reg
% 2 != 0,
8830 _("even register required"));
8831 constraint (inst
.operands
[1].present
8832 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8833 _("can only load two consecutive registers"));
8834 /* If op 1 were present and equal to PC, this function wouldn't
8835 have been called in the first place. */
8836 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8838 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8839 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8842 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8843 which is not a multiple of four is UNPREDICTABLE. */
8845 check_ldr_r15_aligned (void)
8847 constraint (!(inst
.operands
[1].immisreg
)
8848 && (inst
.operands
[0].reg
== REG_PC
8849 && inst
.operands
[1].reg
== REG_PC
8850 && (inst
.reloc
.exp
.X_add_number
& 0x3)),
8851 _("ldr to register 15 must be 4-byte alligned"));
8857 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8858 if (!inst
.operands
[1].isreg
)
8859 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
8861 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
8862 check_ldr_r15_aligned ();
8868 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8870 if (inst
.operands
[1].preind
)
8872 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8873 || inst
.reloc
.exp
.X_add_number
!= 0,
8874 _("this instruction requires a post-indexed address"));
8876 inst
.operands
[1].preind
= 0;
8877 inst
.operands
[1].postind
= 1;
8878 inst
.operands
[1].writeback
= 1;
8880 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8881 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
8884 /* Halfword and signed-byte load/store operations. */
8889 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
8890 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8891 if (!inst
.operands
[1].isreg
)
8892 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
8894 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
8900 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8902 if (inst
.operands
[1].preind
)
8904 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8905 || inst
.reloc
.exp
.X_add_number
!= 0,
8906 _("this instruction requires a post-indexed address"));
8908 inst
.operands
[1].preind
= 0;
8909 inst
.operands
[1].postind
= 1;
8910 inst
.operands
[1].writeback
= 1;
8912 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8913 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
8916 /* Co-processor register load/store.
8917 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8921 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8922 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8923 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
8929 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8930 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
8931 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
8932 && !(inst
.instruction
& 0x00400000))
8933 as_tsktsk (_("Rd and Rm should be different in mla"));
8935 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8936 inst
.instruction
|= inst
.operands
[1].reg
;
8937 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
8938 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
8944 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8945 encode_arm_shifter_operand (1);
8948 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8955 top
= (inst
.instruction
& 0x00400000) != 0;
8956 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
8957 _(":lower16: not allowed this instruction"));
8958 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
8959 _(":upper16: not allowed instruction"));
8960 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8961 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
8963 imm
= inst
.reloc
.exp
.X_add_number
;
8964 /* The value is in two pieces: 0:11, 16:19. */
8965 inst
.instruction
|= (imm
& 0x00000fff);
8966 inst
.instruction
|= (imm
& 0x0000f000) << 4;
8971 do_vfp_nsyn_mrs (void)
8973 if (inst
.operands
[0].isvec
)
8975 if (inst
.operands
[1].reg
!= 1)
8976 first_error (_("operand 1 must be FPSCR"));
8977 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
8978 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
8979 do_vfp_nsyn_opcode ("fmstat");
8981 else if (inst
.operands
[1].isvec
)
8982 do_vfp_nsyn_opcode ("fmrx");
8990 do_vfp_nsyn_msr (void)
8992 if (inst
.operands
[0].isvec
)
8993 do_vfp_nsyn_opcode ("fmxr");
9003 unsigned Rt
= inst
.operands
[0].reg
;
9005 if (thumb_mode
&& Rt
== REG_SP
)
9007 inst
.error
= BAD_SP
;
9011 /* APSR_ sets isvec. All other refs to PC are illegal. */
9012 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9014 inst
.error
= BAD_PC
;
9018 /* If we get through parsing the register name, we just insert the number
9019 generated into the instruction without further validation. */
9020 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9021 inst
.instruction
|= (Rt
<< 12);
9027 unsigned Rt
= inst
.operands
[1].reg
;
9030 reject_bad_reg (Rt
);
9031 else if (Rt
== REG_PC
)
9033 inst
.error
= BAD_PC
;
9037 /* If we get through parsing the register name, we just insert the number
9038 generated into the instruction without further validation. */
9039 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9040 inst
.instruction
|= (Rt
<< 12);
9048 if (do_vfp_nsyn_mrs () == SUCCESS
)
9051 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9052 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9054 if (inst
.operands
[1].isreg
)
9056 br
= inst
.operands
[1].reg
;
9057 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf000))
9058 as_bad (_("bad register for mrs"));
9062 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9063 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9065 _("'APSR', 'CPSR' or 'SPSR' expected"));
9066 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9069 inst
.instruction
|= br
;
9072 /* Two possible forms:
9073 "{C|S}PSR_<field>, Rm",
9074 "{C|S}PSR_f, #expression". */
9079 if (do_vfp_nsyn_msr () == SUCCESS
)
9082 inst
.instruction
|= inst
.operands
[0].imm
;
9083 if (inst
.operands
[1].isreg
)
9084 inst
.instruction
|= inst
.operands
[1].reg
;
9087 inst
.instruction
|= INST_IMMEDIATE
;
9088 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
9089 inst
.reloc
.pc_rel
= 0;
9096 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9098 if (!inst
.operands
[2].present
)
9099 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9100 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9101 inst
.instruction
|= inst
.operands
[1].reg
;
9102 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9104 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9105 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9106 as_tsktsk (_("Rd and Rm should be different in mul"));
9109 /* Long Multiply Parser
9110 UMULL RdLo, RdHi, Rm, Rs
9111 SMULL RdLo, RdHi, Rm, Rs
9112 UMLAL RdLo, RdHi, Rm, Rs
9113 SMLAL RdLo, RdHi, Rm, Rs. */
9118 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9119 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9120 inst
.instruction
|= inst
.operands
[2].reg
;
9121 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9123 /* rdhi and rdlo must be different. */
9124 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9125 as_tsktsk (_("rdhi and rdlo must be different"));
9127 /* rdhi, rdlo and rm must all be different before armv6. */
9128 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9129 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9130 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9131 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9137 if (inst
.operands
[0].present
9138 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9140 /* Architectural NOP hints are CPSR sets with no bits selected. */
9141 inst
.instruction
&= 0xf0000000;
9142 inst
.instruction
|= 0x0320f000;
9143 if (inst
.operands
[0].present
)
9144 inst
.instruction
|= inst
.operands
[0].imm
;
9148 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9149 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9150 Condition defaults to COND_ALWAYS.
9151 Error if Rd, Rn or Rm are R15. */
9156 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9157 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9158 inst
.instruction
|= inst
.operands
[2].reg
;
9159 if (inst
.operands
[3].present
)
9160 encode_arm_shift (3);
9163 /* ARM V6 PKHTB (Argument Parse). */
9168 if (!inst
.operands
[3].present
)
9170 /* If the shift specifier is omitted, turn the instruction
9171 into pkhbt rd, rm, rn. */
9172 inst
.instruction
&= 0xfff00010;
9173 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9174 inst
.instruction
|= inst
.operands
[1].reg
;
9175 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9179 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9180 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9181 inst
.instruction
|= inst
.operands
[2].reg
;
9182 encode_arm_shift (3);
9186 /* ARMv5TE: Preload-Cache
9187 MP Extensions: Preload for write
9191 Syntactically, like LDR with B=1, W=0, L=1. */
9196 constraint (!inst
.operands
[0].isreg
,
9197 _("'[' expected after PLD mnemonic"));
9198 constraint (inst
.operands
[0].postind
,
9199 _("post-indexed expression used in preload instruction"));
9200 constraint (inst
.operands
[0].writeback
,
9201 _("writeback used in preload instruction"));
9202 constraint (!inst
.operands
[0].preind
,
9203 _("unindexed addressing used in preload instruction"));
9204 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9207 /* ARMv7: PLI <addr_mode> */
9211 constraint (!inst
.operands
[0].isreg
,
9212 _("'[' expected after PLI mnemonic"));
9213 constraint (inst
.operands
[0].postind
,
9214 _("post-indexed expression used in preload instruction"));
9215 constraint (inst
.operands
[0].writeback
,
9216 _("writeback used in preload instruction"));
9217 constraint (!inst
.operands
[0].preind
,
9218 _("unindexed addressing used in preload instruction"));
9219 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9220 inst
.instruction
&= ~PRE_INDEX
;
9226 constraint (inst
.operands
[0].writeback
,
9227 _("push/pop do not support {reglist}^"));
9228 inst
.operands
[1] = inst
.operands
[0];
9229 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9230 inst
.operands
[0].isreg
= 1;
9231 inst
.operands
[0].writeback
= 1;
9232 inst
.operands
[0].reg
= REG_SP
;
9233 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9236 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9237 word at the specified address and the following word
9239 Unconditionally executed.
9240 Error if Rn is R15. */
9245 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9246 if (inst
.operands
[0].writeback
)
9247 inst
.instruction
|= WRITE_BACK
;
9250 /* ARM V6 ssat (argument parse). */
9255 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9256 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9257 inst
.instruction
|= inst
.operands
[2].reg
;
9259 if (inst
.operands
[3].present
)
9260 encode_arm_shift (3);
9263 /* ARM V6 usat (argument parse). */
9268 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9269 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9270 inst
.instruction
|= inst
.operands
[2].reg
;
9272 if (inst
.operands
[3].present
)
9273 encode_arm_shift (3);
9276 /* ARM V6 ssat16 (argument parse). */
9281 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9282 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9283 inst
.instruction
|= inst
.operands
[2].reg
;
9289 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9290 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9291 inst
.instruction
|= inst
.operands
[2].reg
;
9294 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9295 preserving the other bits.
9297 setend <endian_specifier>, where <endian_specifier> is either
9303 if (warn_on_deprecated
9304 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9305 as_tsktsk (_("setend use is deprecated for ARMv8"));
9307 if (inst
.operands
[0].imm
)
9308 inst
.instruction
|= 0x200;
9314 unsigned int Rm
= (inst
.operands
[1].present
9315 ? inst
.operands
[1].reg
9316 : inst
.operands
[0].reg
);
9318 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9319 inst
.instruction
|= Rm
;
9320 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9322 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9323 inst
.instruction
|= SHIFT_BY_REG
;
9324 /* PR 12854: Error on extraneous shifts. */
9325 constraint (inst
.operands
[2].shifted
,
9326 _("extraneous shift as part of operand to shift insn"));
9329 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
9335 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
9336 inst
.reloc
.pc_rel
= 0;
9342 inst
.reloc
.type
= BFD_RELOC_ARM_HVC
;
9343 inst
.reloc
.pc_rel
= 0;
9349 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
9350 inst
.reloc
.pc_rel
= 0;
9356 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9357 _("selected processor does not support SETPAN instruction"));
9359 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9365 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9366 _("selected processor does not support SETPAN instruction"));
9368 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9371 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9372 SMLAxy{cond} Rd,Rm,Rs,Rn
9373 SMLAWy{cond} Rd,Rm,Rs,Rn
9374 Error if any register is R15. */
9379 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9380 inst
.instruction
|= inst
.operands
[1].reg
;
9381 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9382 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9385 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9386 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9387 Error if any register is R15.
9388 Warning if Rdlo == Rdhi. */
9393 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9394 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9395 inst
.instruction
|= inst
.operands
[2].reg
;
9396 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9398 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9399 as_tsktsk (_("rdhi and rdlo must be different"));
9402 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9403 SMULxy{cond} Rd,Rm,Rs
9404 Error if any register is R15. */
9409 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9410 inst
.instruction
|= inst
.operands
[1].reg
;
9411 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9414 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9415 the same for both ARM and Thumb-2. */
9422 if (inst
.operands
[0].present
)
9424 reg
= inst
.operands
[0].reg
;
9425 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9430 inst
.instruction
|= reg
<< 16;
9431 inst
.instruction
|= inst
.operands
[1].imm
;
9432 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9433 inst
.instruction
|= WRITE_BACK
;
9436 /* ARM V6 strex (argument parse). */
9441 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9442 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9443 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9444 || inst
.operands
[2].negative
9445 /* See comment in do_ldrex(). */
9446 || (inst
.operands
[2].reg
== REG_PC
),
9449 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9450 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9452 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9453 || inst
.reloc
.exp
.X_add_number
!= 0,
9454 _("offset must be zero in ARM encoding"));
9456 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9457 inst
.instruction
|= inst
.operands
[1].reg
;
9458 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9459 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9465 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9466 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9467 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9468 || inst
.operands
[2].negative
,
9471 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9472 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9480 constraint (inst
.operands
[1].reg
% 2 != 0,
9481 _("even register required"));
9482 constraint (inst
.operands
[2].present
9483 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
9484 _("can only store two consecutive registers"));
9485 /* If op 2 were present and equal to PC, this function wouldn't
9486 have been called in the first place. */
9487 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
9489 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9490 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
9491 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
9494 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9495 inst
.instruction
|= inst
.operands
[1].reg
;
9496 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9503 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9504 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9512 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9513 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9518 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9519 extends it to 32-bits, and adds the result to a value in another
9520 register. You can specify a rotation by 0, 8, 16, or 24 bits
9521 before extracting the 16-bit value.
9522 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9523 Condition defaults to COND_ALWAYS.
9524 Error if any register uses R15. */
9529 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9530 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9531 inst
.instruction
|= inst
.operands
[2].reg
;
9532 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
9537 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9538 Condition defaults to COND_ALWAYS.
9539 Error if any register uses R15. */
9544 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9545 inst
.instruction
|= inst
.operands
[1].reg
;
9546 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
9549 /* VFP instructions. In a logical order: SP variant first, monad
9550 before dyad, arithmetic then move then load/store. */
9553 do_vfp_sp_monadic (void)
9555 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9556 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9560 do_vfp_sp_dyadic (void)
9562 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9563 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9564 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9568 do_vfp_sp_compare_z (void)
9570 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9574 do_vfp_dp_sp_cvt (void)
9576 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9577 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9581 do_vfp_sp_dp_cvt (void)
9583 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9584 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9588 do_vfp_reg_from_sp (void)
9590 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9591 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9595 do_vfp_reg2_from_sp2 (void)
9597 constraint (inst
.operands
[2].imm
!= 2,
9598 _("only two consecutive VFP SP registers allowed here"));
9599 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9600 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9601 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9605 do_vfp_sp_from_reg (void)
9607 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
9608 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9612 do_vfp_sp2_from_reg2 (void)
9614 constraint (inst
.operands
[0].imm
!= 2,
9615 _("only two consecutive VFP SP registers allowed here"));
9616 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
9617 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9618 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9622 do_vfp_sp_ldst (void)
9624 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9625 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9629 do_vfp_dp_ldst (void)
9631 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9632 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9637 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
9639 if (inst
.operands
[0].writeback
)
9640 inst
.instruction
|= WRITE_BACK
;
9642 constraint (ldstm_type
!= VFP_LDSTMIA
,
9643 _("this addressing mode requires base-register writeback"));
9644 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9645 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
9646 inst
.instruction
|= inst
.operands
[1].imm
;
9650 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
9654 if (inst
.operands
[0].writeback
)
9655 inst
.instruction
|= WRITE_BACK
;
9657 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
9658 _("this addressing mode requires base-register writeback"));
9660 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9661 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9663 count
= inst
.operands
[1].imm
<< 1;
9664 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
9667 inst
.instruction
|= count
;
9671 do_vfp_sp_ldstmia (void)
9673 vfp_sp_ldstm (VFP_LDSTMIA
);
9677 do_vfp_sp_ldstmdb (void)
9679 vfp_sp_ldstm (VFP_LDSTMDB
);
9683 do_vfp_dp_ldstmia (void)
9685 vfp_dp_ldstm (VFP_LDSTMIA
);
9689 do_vfp_dp_ldstmdb (void)
9691 vfp_dp_ldstm (VFP_LDSTMDB
);
9695 do_vfp_xp_ldstmia (void)
9697 vfp_dp_ldstm (VFP_LDSTMIAX
);
9701 do_vfp_xp_ldstmdb (void)
9703 vfp_dp_ldstm (VFP_LDSTMDBX
);
9707 do_vfp_dp_rd_rm (void)
9709 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9710 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9714 do_vfp_dp_rn_rd (void)
9716 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
9717 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9721 do_vfp_dp_rd_rn (void)
9723 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9724 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9728 do_vfp_dp_rd_rn_rm (void)
9730 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9731 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9732 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
9738 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9742 do_vfp_dp_rm_rd_rn (void)
9744 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
9745 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9746 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
9749 /* VFPv3 instructions. */
9751 do_vfp_sp_const (void)
9753 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9754 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9755 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9759 do_vfp_dp_const (void)
9761 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9762 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9763 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9767 vfp_conv (int srcsize
)
9769 int immbits
= srcsize
- inst
.operands
[1].imm
;
9771 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
9773 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9774 i.e. immbits must be in range 0 - 16. */
9775 inst
.error
= _("immediate value out of range, expected range [0, 16]");
9778 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
9780 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9781 i.e. immbits must be in range 0 - 31. */
9782 inst
.error
= _("immediate value out of range, expected range [1, 32]");
9786 inst
.instruction
|= (immbits
& 1) << 5;
9787 inst
.instruction
|= (immbits
>> 1);
9791 do_vfp_sp_conv_16 (void)
9793 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9798 do_vfp_dp_conv_16 (void)
9800 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9805 do_vfp_sp_conv_32 (void)
9807 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9812 do_vfp_dp_conv_32 (void)
9814 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9818 /* FPA instructions. Also in a logical order. */
9823 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9824 inst
.instruction
|= inst
.operands
[1].reg
;
9828 do_fpa_ldmstm (void)
9830 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9831 switch (inst
.operands
[1].imm
)
9833 case 1: inst
.instruction
|= CP_T_X
; break;
9834 case 2: inst
.instruction
|= CP_T_Y
; break;
9835 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
9840 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
9842 /* The instruction specified "ea" or "fd", so we can only accept
9843 [Rn]{!}. The instruction does not really support stacking or
9844 unstacking, so we have to emulate these by setting appropriate
9845 bits and offsets. */
9846 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9847 || inst
.reloc
.exp
.X_add_number
!= 0,
9848 _("this instruction does not support indexing"));
9850 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
9851 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
9853 if (!(inst
.instruction
& INDEX_UP
))
9854 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
9856 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
9858 inst
.operands
[2].preind
= 0;
9859 inst
.operands
[2].postind
= 1;
9863 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9866 /* iWMMXt instructions: strictly in alphabetical order. */
9869 do_iwmmxt_tandorc (void)
9871 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
9875 do_iwmmxt_textrc (void)
9877 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9878 inst
.instruction
|= inst
.operands
[1].imm
;
9882 do_iwmmxt_textrm (void)
9884 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9885 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9886 inst
.instruction
|= inst
.operands
[2].imm
;
9890 do_iwmmxt_tinsr (void)
9892 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9893 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9894 inst
.instruction
|= inst
.operands
[2].imm
;
9898 do_iwmmxt_tmia (void)
9900 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
9901 inst
.instruction
|= inst
.operands
[1].reg
;
9902 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
9906 do_iwmmxt_waligni (void)
9908 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9909 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9910 inst
.instruction
|= inst
.operands
[2].reg
;
9911 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
9915 do_iwmmxt_wmerge (void)
9917 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9918 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9919 inst
.instruction
|= inst
.operands
[2].reg
;
9920 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
9924 do_iwmmxt_wmov (void)
9926 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9927 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9928 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9929 inst
.instruction
|= inst
.operands
[1].reg
;
9933 do_iwmmxt_wldstbh (void)
9936 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9938 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
9940 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
9941 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
9945 do_iwmmxt_wldstw (void)
9947 /* RIWR_RIWC clears .isreg for a control register. */
9948 if (!inst
.operands
[0].isreg
)
9950 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
9951 inst
.instruction
|= 0xf0000000;
9954 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9955 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
9959 do_iwmmxt_wldstd (void)
9961 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9962 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
9963 && inst
.operands
[1].immisreg
)
9965 inst
.instruction
&= ~0x1a000ff;
9966 inst
.instruction
|= (0xfU
<< 28);
9967 if (inst
.operands
[1].preind
)
9968 inst
.instruction
|= PRE_INDEX
;
9969 if (!inst
.operands
[1].negative
)
9970 inst
.instruction
|= INDEX_UP
;
9971 if (inst
.operands
[1].writeback
)
9972 inst
.instruction
|= WRITE_BACK
;
9973 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9974 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
9975 inst
.instruction
|= inst
.operands
[1].imm
;
9978 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
9982 do_iwmmxt_wshufh (void)
9984 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9985 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9986 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
9987 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
9991 do_iwmmxt_wzero (void)
9993 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9994 inst
.instruction
|= inst
.operands
[0].reg
;
9995 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9996 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10000 do_iwmmxt_wrwrwr_or_imm5 (void)
10002 if (inst
.operands
[2].isreg
)
10005 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10006 _("immediate operand requires iWMMXt2"));
10008 if (inst
.operands
[2].imm
== 0)
10010 switch ((inst
.instruction
>> 20) & 0xf)
10016 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10017 inst
.operands
[2].imm
= 16;
10018 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10024 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10025 inst
.operands
[2].imm
= 32;
10026 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10033 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10035 wrn
= (inst
.instruction
>> 16) & 0xf;
10036 inst
.instruction
&= 0xff0fff0f;
10037 inst
.instruction
|= wrn
;
10038 /* Bail out here; the instruction is now assembled. */
10043 /* Map 32 -> 0, etc. */
10044 inst
.operands
[2].imm
&= 0x1f;
10045 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10049 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10050 operations first, then control, shift, and load/store. */
10052 /* Insns like "foo X,Y,Z". */
10055 do_mav_triple (void)
10057 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10058 inst
.instruction
|= inst
.operands
[1].reg
;
10059 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10062 /* Insns like "foo W,X,Y,Z".
10063 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10068 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10069 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10070 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10071 inst
.instruction
|= inst
.operands
[3].reg
;
10074 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10076 do_mav_dspsc (void)
10078 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10081 /* Maverick shift immediate instructions.
10082 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10083 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10086 do_mav_shift (void)
10088 int imm
= inst
.operands
[2].imm
;
10090 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10091 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10093 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10094 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10095 Bit 4 should be 0. */
10096 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10098 inst
.instruction
|= imm
;
10101 /* XScale instructions. Also sorted arithmetic before move. */
10103 /* Xscale multiply-accumulate (argument parse)
10106 MIAxycc acc0,Rm,Rs. */
10111 inst
.instruction
|= inst
.operands
[1].reg
;
10112 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10115 /* Xscale move-accumulator-register (argument parse)
10117 MARcc acc0,RdLo,RdHi. */
10122 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10123 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10126 /* Xscale move-register-accumulator (argument parse)
10128 MRAcc RdLo,RdHi,acc0. */
10133 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10134 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10135 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10138 /* Encoding functions relevant only to Thumb. */
10140 /* inst.operands[i] is a shifted-register operand; encode
10141 it into inst.instruction in the format used by Thumb32. */
10144 encode_thumb32_shifted_operand (int i
)
10146 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10147 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10149 constraint (inst
.operands
[i
].immisreg
,
10150 _("shift by register not allowed in thumb mode"));
10151 inst
.instruction
|= inst
.operands
[i
].reg
;
10152 if (shift
== SHIFT_RRX
)
10153 inst
.instruction
|= SHIFT_ROR
<< 4;
10156 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10157 _("expression too complex"));
10159 constraint (value
> 32
10160 || (value
== 32 && (shift
== SHIFT_LSL
10161 || shift
== SHIFT_ROR
)),
10162 _("shift expression is too large"));
10166 else if (value
== 32)
10169 inst
.instruction
|= shift
<< 4;
10170 inst
.instruction
|= (value
& 0x1c) << 10;
10171 inst
.instruction
|= (value
& 0x03) << 6;
10176 /* inst.operands[i] was set up by parse_address. Encode it into a
10177 Thumb32 format load or store instruction. Reject forms that cannot
10178 be used with such instructions. If is_t is true, reject forms that
10179 cannot be used with a T instruction; if is_d is true, reject forms
10180 that cannot be used with a D instruction. If it is a store insn,
10181 reject PC in Rn. */
10184 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10186 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10188 constraint (!inst
.operands
[i
].isreg
,
10189 _("Instruction does not support =N addresses"));
10191 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10192 if (inst
.operands
[i
].immisreg
)
10194 constraint (is_pc
, BAD_PC_ADDRESSING
);
10195 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10196 constraint (inst
.operands
[i
].negative
,
10197 _("Thumb does not support negative register indexing"));
10198 constraint (inst
.operands
[i
].postind
,
10199 _("Thumb does not support register post-indexing"));
10200 constraint (inst
.operands
[i
].writeback
,
10201 _("Thumb does not support register indexing with writeback"));
10202 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10203 _("Thumb supports only LSL in shifted register indexing"));
10205 inst
.instruction
|= inst
.operands
[i
].imm
;
10206 if (inst
.operands
[i
].shifted
)
10208 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10209 _("expression too complex"));
10210 constraint (inst
.reloc
.exp
.X_add_number
< 0
10211 || inst
.reloc
.exp
.X_add_number
> 3,
10212 _("shift out of range"));
10213 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
10215 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10217 else if (inst
.operands
[i
].preind
)
10219 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10220 constraint (is_t
&& inst
.operands
[i
].writeback
,
10221 _("cannot use writeback with this instruction"));
10222 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10223 BAD_PC_ADDRESSING
);
10227 inst
.instruction
|= 0x01000000;
10228 if (inst
.operands
[i
].writeback
)
10229 inst
.instruction
|= 0x00200000;
10233 inst
.instruction
|= 0x00000c00;
10234 if (inst
.operands
[i
].writeback
)
10235 inst
.instruction
|= 0x00000100;
10237 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10239 else if (inst
.operands
[i
].postind
)
10241 gas_assert (inst
.operands
[i
].writeback
);
10242 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10243 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10246 inst
.instruction
|= 0x00200000;
10248 inst
.instruction
|= 0x00000900;
10249 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10251 else /* unindexed - only for coprocessor */
10252 inst
.error
= _("instruction does not accept unindexed addressing");
10255 /* Table of Thumb instructions which exist in both 16- and 32-bit
10256 encodings (the latter only in post-V6T2 cores). The index is the
10257 value used in the insns table below. When there is more than one
10258 possible 16-bit encoding for the instruction, this table always
10260 Also contains several pseudo-instructions used during relaxation. */
10261 #define T16_32_TAB \
10262 X(_adc, 4140, eb400000), \
10263 X(_adcs, 4140, eb500000), \
10264 X(_add, 1c00, eb000000), \
10265 X(_adds, 1c00, eb100000), \
10266 X(_addi, 0000, f1000000), \
10267 X(_addis, 0000, f1100000), \
10268 X(_add_pc,000f, f20f0000), \
10269 X(_add_sp,000d, f10d0000), \
10270 X(_adr, 000f, f20f0000), \
10271 X(_and, 4000, ea000000), \
10272 X(_ands, 4000, ea100000), \
10273 X(_asr, 1000, fa40f000), \
10274 X(_asrs, 1000, fa50f000), \
10275 X(_b, e000, f000b000), \
10276 X(_bcond, d000, f0008000), \
10277 X(_bic, 4380, ea200000), \
10278 X(_bics, 4380, ea300000), \
10279 X(_cmn, 42c0, eb100f00), \
10280 X(_cmp, 2800, ebb00f00), \
10281 X(_cpsie, b660, f3af8400), \
10282 X(_cpsid, b670, f3af8600), \
10283 X(_cpy, 4600, ea4f0000), \
10284 X(_dec_sp,80dd, f1ad0d00), \
10285 X(_eor, 4040, ea800000), \
10286 X(_eors, 4040, ea900000), \
10287 X(_inc_sp,00dd, f10d0d00), \
10288 X(_ldmia, c800, e8900000), \
10289 X(_ldr, 6800, f8500000), \
10290 X(_ldrb, 7800, f8100000), \
10291 X(_ldrh, 8800, f8300000), \
10292 X(_ldrsb, 5600, f9100000), \
10293 X(_ldrsh, 5e00, f9300000), \
10294 X(_ldr_pc,4800, f85f0000), \
10295 X(_ldr_pc2,4800, f85f0000), \
10296 X(_ldr_sp,9800, f85d0000), \
10297 X(_lsl, 0000, fa00f000), \
10298 X(_lsls, 0000, fa10f000), \
10299 X(_lsr, 0800, fa20f000), \
10300 X(_lsrs, 0800, fa30f000), \
10301 X(_mov, 2000, ea4f0000), \
10302 X(_movs, 2000, ea5f0000), \
10303 X(_mul, 4340, fb00f000), \
10304 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10305 X(_mvn, 43c0, ea6f0000), \
10306 X(_mvns, 43c0, ea7f0000), \
10307 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10308 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10309 X(_orr, 4300, ea400000), \
10310 X(_orrs, 4300, ea500000), \
10311 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10312 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10313 X(_rev, ba00, fa90f080), \
10314 X(_rev16, ba40, fa90f090), \
10315 X(_revsh, bac0, fa90f0b0), \
10316 X(_ror, 41c0, fa60f000), \
10317 X(_rors, 41c0, fa70f000), \
10318 X(_sbc, 4180, eb600000), \
10319 X(_sbcs, 4180, eb700000), \
10320 X(_stmia, c000, e8800000), \
10321 X(_str, 6000, f8400000), \
10322 X(_strb, 7000, f8000000), \
10323 X(_strh, 8000, f8200000), \
10324 X(_str_sp,9000, f84d0000), \
10325 X(_sub, 1e00, eba00000), \
10326 X(_subs, 1e00, ebb00000), \
10327 X(_subi, 8000, f1a00000), \
10328 X(_subis, 8000, f1b00000), \
10329 X(_sxtb, b240, fa4ff080), \
10330 X(_sxth, b200, fa0ff080), \
10331 X(_tst, 4200, ea100f00), \
10332 X(_uxtb, b2c0, fa5ff080), \
10333 X(_uxth, b280, fa1ff080), \
10334 X(_nop, bf00, f3af8000), \
10335 X(_yield, bf10, f3af8001), \
10336 X(_wfe, bf20, f3af8002), \
10337 X(_wfi, bf30, f3af8003), \
10338 X(_sev, bf40, f3af8004), \
10339 X(_sevl, bf50, f3af8005), \
10340 X(_udf, de00, f7f0a000)
10342 /* To catch errors in encoding functions, the codes are all offset by
10343 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10344 as 16-bit instructions. */
10345 #define X(a,b,c) T_MNEM##a
10346 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10349 #define X(a,b,c) 0x##b
10350 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10351 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10354 #define X(a,b,c) 0x##c
10355 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10356 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10357 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10361 /* Thumb instruction encoders, in alphabetical order. */
10363 /* ADDW or SUBW. */
10366 do_t_add_sub_w (void)
10370 Rd
= inst
.operands
[0].reg
;
10371 Rn
= inst
.operands
[1].reg
;
10373 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10374 is the SP-{plus,minus}-immediate form of the instruction. */
10376 constraint (Rd
== REG_PC
, BAD_PC
);
10378 reject_bad_reg (Rd
);
10380 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10381 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10384 /* Parse an add or subtract instruction. We get here with inst.instruction
10385 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10388 do_t_add_sub (void)
10392 Rd
= inst
.operands
[0].reg
;
10393 Rs
= (inst
.operands
[1].present
10394 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10395 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10398 set_it_insn_type_last ();
10400 if (unified_syntax
)
10403 bfd_boolean narrow
;
10406 flags
= (inst
.instruction
== T_MNEM_adds
10407 || inst
.instruction
== T_MNEM_subs
);
10409 narrow
= !in_it_block ();
10411 narrow
= in_it_block ();
10412 if (!inst
.operands
[2].isreg
)
10416 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10418 add
= (inst
.instruction
== T_MNEM_add
10419 || inst
.instruction
== T_MNEM_adds
);
10421 if (inst
.size_req
!= 4)
10423 /* Attempt to use a narrow opcode, with relaxation if
10425 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10426 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10427 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10428 opcode
= T_MNEM_add_sp
;
10429 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10430 opcode
= T_MNEM_add_pc
;
10431 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
10434 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
10436 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
10440 inst
.instruction
= THUMB_OP16(opcode
);
10441 inst
.instruction
|= (Rd
<< 4) | Rs
;
10442 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10443 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
10444 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10445 if (inst
.size_req
!= 2)
10446 inst
.relax
= opcode
;
10449 constraint (inst
.size_req
== 2, BAD_HIREG
);
10451 if (inst
.size_req
== 4
10452 || (inst
.size_req
!= 2 && !opcode
))
10456 constraint (add
, BAD_PC
);
10457 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
10458 _("only SUBS PC, LR, #const allowed"));
10459 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10460 _("expression too complex"));
10461 constraint (inst
.reloc
.exp
.X_add_number
< 0
10462 || inst
.reloc
.exp
.X_add_number
> 0xff,
10463 _("immediate value out of range"));
10464 inst
.instruction
= T2_SUBS_PC_LR
10465 | inst
.reloc
.exp
.X_add_number
;
10466 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10469 else if (Rs
== REG_PC
)
10471 /* Always use addw/subw. */
10472 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
10473 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10477 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10478 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
10481 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10483 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
10485 inst
.instruction
|= Rd
<< 8;
10486 inst
.instruction
|= Rs
<< 16;
10491 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10492 unsigned int shift
= inst
.operands
[2].shift_kind
;
10494 Rn
= inst
.operands
[2].reg
;
10495 /* See if we can do this with a 16-bit instruction. */
10496 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
10498 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10503 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
10504 || inst
.instruction
== T_MNEM_add
)
10506 : T_OPCODE_SUB_R3
);
10507 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10511 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
10513 /* Thumb-1 cores (except v6-M) require at least one high
10514 register in a narrow non flag setting add. */
10515 if (Rd
> 7 || Rn
> 7
10516 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
10517 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
10524 inst
.instruction
= T_OPCODE_ADD_HI
;
10525 inst
.instruction
|= (Rd
& 8) << 4;
10526 inst
.instruction
|= (Rd
& 7);
10527 inst
.instruction
|= Rn
<< 3;
10533 constraint (Rd
== REG_PC
, BAD_PC
);
10534 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10535 constraint (Rs
== REG_PC
, BAD_PC
);
10536 reject_bad_reg (Rn
);
10538 /* If we get here, it can't be done in 16 bits. */
10539 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
10540 _("shift must be constant"));
10541 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10542 inst
.instruction
|= Rd
<< 8;
10543 inst
.instruction
|= Rs
<< 16;
10544 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
10545 _("shift value over 3 not allowed in thumb mode"));
10546 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
10547 _("only LSL shift allowed in thumb mode"));
10548 encode_thumb32_shifted_operand (2);
10553 constraint (inst
.instruction
== T_MNEM_adds
10554 || inst
.instruction
== T_MNEM_subs
,
10557 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
10559 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
10560 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
10563 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10564 ? 0x0000 : 0x8000);
10565 inst
.instruction
|= (Rd
<< 4) | Rs
;
10566 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10570 Rn
= inst
.operands
[2].reg
;
10571 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
10573 /* We now have Rd, Rs, and Rn set to registers. */
10574 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10576 /* Can't do this for SUB. */
10577 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
10578 inst
.instruction
= T_OPCODE_ADD_HI
;
10579 inst
.instruction
|= (Rd
& 8) << 4;
10580 inst
.instruction
|= (Rd
& 7);
10582 inst
.instruction
|= Rn
<< 3;
10584 inst
.instruction
|= Rs
<< 3;
10586 constraint (1, _("dest must overlap one source register"));
10590 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10591 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
10592 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10602 Rd
= inst
.operands
[0].reg
;
10603 reject_bad_reg (Rd
);
10605 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
10607 /* Defer to section relaxation. */
10608 inst
.relax
= inst
.instruction
;
10609 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10610 inst
.instruction
|= Rd
<< 4;
10612 else if (unified_syntax
&& inst
.size_req
!= 2)
10614 /* Generate a 32-bit opcode. */
10615 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10616 inst
.instruction
|= Rd
<< 8;
10617 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
10618 inst
.reloc
.pc_rel
= 1;
10622 /* Generate a 16-bit opcode. */
10623 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10624 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10625 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
10626 inst
.reloc
.pc_rel
= 1;
10628 inst
.instruction
|= Rd
<< 4;
10632 /* Arithmetic instructions for which there is just one 16-bit
10633 instruction encoding, and it allows only two low registers.
10634 For maximal compatibility with ARM syntax, we allow three register
10635 operands even when Thumb-32 instructions are not available, as long
10636 as the first two are identical. For instance, both "sbc r0,r1" and
10637 "sbc r0,r0,r1" are allowed. */
10643 Rd
= inst
.operands
[0].reg
;
10644 Rs
= (inst
.operands
[1].present
10645 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10646 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10647 Rn
= inst
.operands
[2].reg
;
10649 reject_bad_reg (Rd
);
10650 reject_bad_reg (Rs
);
10651 if (inst
.operands
[2].isreg
)
10652 reject_bad_reg (Rn
);
10654 if (unified_syntax
)
10656 if (!inst
.operands
[2].isreg
)
10658 /* For an immediate, we always generate a 32-bit opcode;
10659 section relaxation will shrink it later if possible. */
10660 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10661 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10662 inst
.instruction
|= Rd
<< 8;
10663 inst
.instruction
|= Rs
<< 16;
10664 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10668 bfd_boolean narrow
;
10670 /* See if we can do this with a 16-bit instruction. */
10671 if (THUMB_SETS_FLAGS (inst
.instruction
))
10672 narrow
= !in_it_block ();
10674 narrow
= in_it_block ();
10676 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10678 if (inst
.operands
[2].shifted
)
10680 if (inst
.size_req
== 4)
10686 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10687 inst
.instruction
|= Rd
;
10688 inst
.instruction
|= Rn
<< 3;
10692 /* If we get here, it can't be done in 16 bits. */
10693 constraint (inst
.operands
[2].shifted
10694 && inst
.operands
[2].immisreg
,
10695 _("shift must be constant"));
10696 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10697 inst
.instruction
|= Rd
<< 8;
10698 inst
.instruction
|= Rs
<< 16;
10699 encode_thumb32_shifted_operand (2);
10704 /* On its face this is a lie - the instruction does set the
10705 flags. However, the only supported mnemonic in this mode
10706 says it doesn't. */
10707 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10709 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10710 _("unshifted register required"));
10711 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10712 constraint (Rd
!= Rs
,
10713 _("dest and source1 must be the same register"));
10715 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10716 inst
.instruction
|= Rd
;
10717 inst
.instruction
|= Rn
<< 3;
10721 /* Similarly, but for instructions where the arithmetic operation is
10722 commutative, so we can allow either of them to be different from
10723 the destination operand in a 16-bit instruction. For instance, all
10724 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10731 Rd
= inst
.operands
[0].reg
;
10732 Rs
= (inst
.operands
[1].present
10733 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10734 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10735 Rn
= inst
.operands
[2].reg
;
10737 reject_bad_reg (Rd
);
10738 reject_bad_reg (Rs
);
10739 if (inst
.operands
[2].isreg
)
10740 reject_bad_reg (Rn
);
10742 if (unified_syntax
)
10744 if (!inst
.operands
[2].isreg
)
10746 /* For an immediate, we always generate a 32-bit opcode;
10747 section relaxation will shrink it later if possible. */
10748 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10749 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10750 inst
.instruction
|= Rd
<< 8;
10751 inst
.instruction
|= Rs
<< 16;
10752 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10756 bfd_boolean narrow
;
10758 /* See if we can do this with a 16-bit instruction. */
10759 if (THUMB_SETS_FLAGS (inst
.instruction
))
10760 narrow
= !in_it_block ();
10762 narrow
= in_it_block ();
10764 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10766 if (inst
.operands
[2].shifted
)
10768 if (inst
.size_req
== 4)
10775 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10776 inst
.instruction
|= Rd
;
10777 inst
.instruction
|= Rn
<< 3;
10782 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10783 inst
.instruction
|= Rd
;
10784 inst
.instruction
|= Rs
<< 3;
10789 /* If we get here, it can't be done in 16 bits. */
10790 constraint (inst
.operands
[2].shifted
10791 && inst
.operands
[2].immisreg
,
10792 _("shift must be constant"));
10793 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10794 inst
.instruction
|= Rd
<< 8;
10795 inst
.instruction
|= Rs
<< 16;
10796 encode_thumb32_shifted_operand (2);
10801 /* On its face this is a lie - the instruction does set the
10802 flags. However, the only supported mnemonic in this mode
10803 says it doesn't. */
10804 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10806 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10807 _("unshifted register required"));
10808 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10810 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10811 inst
.instruction
|= Rd
;
10814 inst
.instruction
|= Rn
<< 3;
10816 inst
.instruction
|= Rs
<< 3;
10818 constraint (1, _("dest must overlap one source register"));
10826 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
10827 constraint (msb
> 32, _("bit-field extends past end of register"));
10828 /* The instruction encoding stores the LSB and MSB,
10829 not the LSB and width. */
10830 Rd
= inst
.operands
[0].reg
;
10831 reject_bad_reg (Rd
);
10832 inst
.instruction
|= Rd
<< 8;
10833 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
10834 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
10835 inst
.instruction
|= msb
- 1;
10844 Rd
= inst
.operands
[0].reg
;
10845 reject_bad_reg (Rd
);
10847 /* #0 in second position is alternative syntax for bfc, which is
10848 the same instruction but with REG_PC in the Rm field. */
10849 if (!inst
.operands
[1].isreg
)
10853 Rn
= inst
.operands
[1].reg
;
10854 reject_bad_reg (Rn
);
10857 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
10858 constraint (msb
> 32, _("bit-field extends past end of register"));
10859 /* The instruction encoding stores the LSB and MSB,
10860 not the LSB and width. */
10861 inst
.instruction
|= Rd
<< 8;
10862 inst
.instruction
|= Rn
<< 16;
10863 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10864 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10865 inst
.instruction
|= msb
- 1;
10873 Rd
= inst
.operands
[0].reg
;
10874 Rn
= inst
.operands
[1].reg
;
10876 reject_bad_reg (Rd
);
10877 reject_bad_reg (Rn
);
10879 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
10880 _("bit-field extends past end of register"));
10881 inst
.instruction
|= Rd
<< 8;
10882 inst
.instruction
|= Rn
<< 16;
10883 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
10884 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
10885 inst
.instruction
|= inst
.operands
[3].imm
- 1;
10888 /* ARM V5 Thumb BLX (argument parse)
10889 BLX <target_addr> which is BLX(1)
10890 BLX <Rm> which is BLX(2)
10891 Unfortunately, there are two different opcodes for this mnemonic.
10892 So, the insns[].value is not used, and the code here zaps values
10893 into inst.instruction.
10895 ??? How to take advantage of the additional two bits of displacement
10896 available in Thumb32 mode? Need new relocation? */
10901 set_it_insn_type_last ();
10903 if (inst
.operands
[0].isreg
)
10905 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
10906 /* We have a register, so this is BLX(2). */
10907 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
10911 /* No register. This must be BLX(1). */
10912 inst
.instruction
= 0xf000e800;
10913 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
10925 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
10927 if (in_it_block ())
10929 /* Conditional branches inside IT blocks are encoded as unconditional
10931 cond
= COND_ALWAYS
;
10936 if (cond
!= COND_ALWAYS
)
10937 opcode
= T_MNEM_bcond
;
10939 opcode
= inst
.instruction
;
10942 && (inst
.size_req
== 4
10943 || (inst
.size_req
!= 2
10944 && (inst
.operands
[0].hasreloc
10945 || inst
.reloc
.exp
.X_op
== O_constant
))))
10947 inst
.instruction
= THUMB_OP32(opcode
);
10948 if (cond
== COND_ALWAYS
)
10949 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
10952 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
10953 _("selected architecture does not support "
10954 "wide conditional branch instruction"));
10956 gas_assert (cond
!= 0xF);
10957 inst
.instruction
|= cond
<< 22;
10958 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
10963 inst
.instruction
= THUMB_OP16(opcode
);
10964 if (cond
== COND_ALWAYS
)
10965 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
10968 inst
.instruction
|= cond
<< 8;
10969 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
10971 /* Allow section relaxation. */
10972 if (unified_syntax
&& inst
.size_req
!= 2)
10973 inst
.relax
= opcode
;
10975 inst
.reloc
.type
= reloc
;
10976 inst
.reloc
.pc_rel
= 1;
10979 /* Actually do the work for Thumb state bkpt and hlt. The only difference
10980 between the two is the maximum immediate allowed - which is passed in
10983 do_t_bkpt_hlt1 (int range
)
10985 constraint (inst
.cond
!= COND_ALWAYS
,
10986 _("instruction is always unconditional"));
10987 if (inst
.operands
[0].present
)
10989 constraint (inst
.operands
[0].imm
> range
,
10990 _("immediate value out of range"));
10991 inst
.instruction
|= inst
.operands
[0].imm
;
10994 set_it_insn_type (NEUTRAL_IT_INSN
);
11000 do_t_bkpt_hlt1 (63);
11006 do_t_bkpt_hlt1 (255);
11010 do_t_branch23 (void)
11012 set_it_insn_type_last ();
11013 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11015 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11016 this file. We used to simply ignore the PLT reloc type here --
11017 the branch encoding is now needed to deal with TLSCALL relocs.
11018 So if we see a PLT reloc now, put it back to how it used to be to
11019 keep the preexisting behaviour. */
11020 if (inst
.reloc
.type
== BFD_RELOC_ARM_PLT32
)
11021 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11023 #if defined(OBJ_COFF)
11024 /* If the destination of the branch is a defined symbol which does not have
11025 the THUMB_FUNC attribute, then we must be calling a function which has
11026 the (interfacearm) attribute. We look for the Thumb entry point to that
11027 function and change the branch to refer to that function instead. */
11028 if ( inst
.reloc
.exp
.X_op
== O_symbol
11029 && inst
.reloc
.exp
.X_add_symbol
!= NULL
11030 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
11031 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
11032 inst
.reloc
.exp
.X_add_symbol
=
11033 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
11040 set_it_insn_type_last ();
11041 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11042 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11043 should cause the alignment to be checked once it is known. This is
11044 because BX PC only works if the instruction is word aligned. */
11052 set_it_insn_type_last ();
11053 Rm
= inst
.operands
[0].reg
;
11054 reject_bad_reg (Rm
);
11055 inst
.instruction
|= Rm
<< 16;
11064 Rd
= inst
.operands
[0].reg
;
11065 Rm
= inst
.operands
[1].reg
;
11067 reject_bad_reg (Rd
);
11068 reject_bad_reg (Rm
);
11070 inst
.instruction
|= Rd
<< 8;
11071 inst
.instruction
|= Rm
<< 16;
11072 inst
.instruction
|= Rm
;
11078 set_it_insn_type (OUTSIDE_IT_INSN
);
11079 inst
.instruction
|= inst
.operands
[0].imm
;
11085 set_it_insn_type (OUTSIDE_IT_INSN
);
11087 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11088 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11090 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11091 inst
.instruction
= 0xf3af8000;
11092 inst
.instruction
|= imod
<< 9;
11093 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11094 if (inst
.operands
[1].present
)
11095 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11099 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11100 && (inst
.operands
[0].imm
& 4),
11101 _("selected processor does not support 'A' form "
11102 "of this instruction"));
11103 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11104 _("Thumb does not support the 2-argument "
11105 "form of this instruction"));
11106 inst
.instruction
|= inst
.operands
[0].imm
;
11110 /* THUMB CPY instruction (argument parse). */
11115 if (inst
.size_req
== 4)
11117 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11118 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11119 inst
.instruction
|= inst
.operands
[1].reg
;
11123 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11124 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11125 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11132 set_it_insn_type (OUTSIDE_IT_INSN
);
11133 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11134 inst
.instruction
|= inst
.operands
[0].reg
;
11135 inst
.reloc
.pc_rel
= 1;
11136 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11142 inst
.instruction
|= inst
.operands
[0].imm
;
11148 unsigned Rd
, Rn
, Rm
;
11150 Rd
= inst
.operands
[0].reg
;
11151 Rn
= (inst
.operands
[1].present
11152 ? inst
.operands
[1].reg
: Rd
);
11153 Rm
= inst
.operands
[2].reg
;
11155 reject_bad_reg (Rd
);
11156 reject_bad_reg (Rn
);
11157 reject_bad_reg (Rm
);
11159 inst
.instruction
|= Rd
<< 8;
11160 inst
.instruction
|= Rn
<< 16;
11161 inst
.instruction
|= Rm
;
11167 if (unified_syntax
&& inst
.size_req
== 4)
11168 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11170 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11176 unsigned int cond
= inst
.operands
[0].imm
;
11178 set_it_insn_type (IT_INSN
);
11179 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
11181 now_it
.warn_deprecated
= FALSE
;
11183 /* If the condition is a negative condition, invert the mask. */
11184 if ((cond
& 0x1) == 0x0)
11186 unsigned int mask
= inst
.instruction
& 0x000f;
11188 if ((mask
& 0x7) == 0)
11190 /* No conversion needed. */
11191 now_it
.block_length
= 1;
11193 else if ((mask
& 0x3) == 0)
11196 now_it
.block_length
= 2;
11198 else if ((mask
& 0x1) == 0)
11201 now_it
.block_length
= 3;
11206 now_it
.block_length
= 4;
11209 inst
.instruction
&= 0xfff0;
11210 inst
.instruction
|= mask
;
11213 inst
.instruction
|= cond
<< 4;
11216 /* Helper function used for both push/pop and ldm/stm. */
11218 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
11222 load
= (inst
.instruction
& (1 << 20)) != 0;
11224 if (mask
& (1 << 13))
11225 inst
.error
= _("SP not allowed in register list");
11227 if ((mask
& (1 << base
)) != 0
11229 inst
.error
= _("having the base register in the register list when "
11230 "using write back is UNPREDICTABLE");
11234 if (mask
& (1 << 15))
11236 if (mask
& (1 << 14))
11237 inst
.error
= _("LR and PC should not both be in register list");
11239 set_it_insn_type_last ();
11244 if (mask
& (1 << 15))
11245 inst
.error
= _("PC not allowed in register list");
11248 if ((mask
& (mask
- 1)) == 0)
11250 /* Single register transfers implemented as str/ldr. */
11253 if (inst
.instruction
& (1 << 23))
11254 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11256 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11260 if (inst
.instruction
& (1 << 23))
11261 inst
.instruction
= 0x00800000; /* ia -> [base] */
11263 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11266 inst
.instruction
|= 0xf8400000;
11268 inst
.instruction
|= 0x00100000;
11270 mask
= ffs (mask
) - 1;
11273 else if (writeback
)
11274 inst
.instruction
|= WRITE_BACK
;
11276 inst
.instruction
|= mask
;
11277 inst
.instruction
|= base
<< 16;
11283 /* This really doesn't seem worth it. */
11284 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
11285 _("expression too complex"));
11286 constraint (inst
.operands
[1].writeback
,
11287 _("Thumb load/store multiple does not support {reglist}^"));
11289 if (unified_syntax
)
11291 bfd_boolean narrow
;
11295 /* See if we can use a 16-bit instruction. */
11296 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11297 && inst
.size_req
!= 4
11298 && !(inst
.operands
[1].imm
& ~0xff))
11300 mask
= 1 << inst
.operands
[0].reg
;
11302 if (inst
.operands
[0].reg
<= 7)
11304 if (inst
.instruction
== T_MNEM_stmia
11305 ? inst
.operands
[0].writeback
11306 : (inst
.operands
[0].writeback
11307 == !(inst
.operands
[1].imm
& mask
)))
11309 if (inst
.instruction
== T_MNEM_stmia
11310 && (inst
.operands
[1].imm
& mask
)
11311 && (inst
.operands
[1].imm
& (mask
- 1)))
11312 as_warn (_("value stored for r%d is UNKNOWN"),
11313 inst
.operands
[0].reg
);
11315 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11316 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11317 inst
.instruction
|= inst
.operands
[1].imm
;
11320 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11322 /* This means 1 register in reg list one of 3 situations:
11323 1. Instruction is stmia, but without writeback.
11324 2. lmdia without writeback, but with Rn not in
11326 3. ldmia with writeback, but with Rn in reglist.
11327 Case 3 is UNPREDICTABLE behaviour, so we handle
11328 case 1 and 2 which can be converted into a 16-bit
11329 str or ldr. The SP cases are handled below. */
11330 unsigned long opcode
;
11331 /* First, record an error for Case 3. */
11332 if (inst
.operands
[1].imm
& mask
11333 && inst
.operands
[0].writeback
)
11335 _("having the base register in the register list when "
11336 "using write back is UNPREDICTABLE");
11338 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11340 inst
.instruction
= THUMB_OP16 (opcode
);
11341 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11342 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11346 else if (inst
.operands
[0] .reg
== REG_SP
)
11348 if (inst
.operands
[0].writeback
)
11351 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11352 ? T_MNEM_push
: T_MNEM_pop
);
11353 inst
.instruction
|= inst
.operands
[1].imm
;
11356 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11359 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11360 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11361 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11369 if (inst
.instruction
< 0xffff)
11370 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11372 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
11373 inst
.operands
[0].writeback
);
11378 constraint (inst
.operands
[0].reg
> 7
11379 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11380 constraint (inst
.instruction
!= T_MNEM_ldmia
11381 && inst
.instruction
!= T_MNEM_stmia
,
11382 _("Thumb-2 instruction only valid in unified syntax"));
11383 if (inst
.instruction
== T_MNEM_stmia
)
11385 if (!inst
.operands
[0].writeback
)
11386 as_warn (_("this instruction will write back the base register"));
11387 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11388 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11389 as_warn (_("value stored for r%d is UNKNOWN"),
11390 inst
.operands
[0].reg
);
11394 if (!inst
.operands
[0].writeback
11395 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11396 as_warn (_("this instruction will write back the base register"));
11397 else if (inst
.operands
[0].writeback
11398 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11399 as_warn (_("this instruction will not write back the base register"));
11402 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11403 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11404 inst
.instruction
|= inst
.operands
[1].imm
;
11411 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11412 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11413 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
11414 || inst
.operands
[1].negative
,
11417 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
11419 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11420 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11421 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11427 if (!inst
.operands
[1].present
)
11429 constraint (inst
.operands
[0].reg
== REG_LR
,
11430 _("r14 not allowed as first register "
11431 "when second register is omitted"));
11432 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11434 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11437 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11438 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11439 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11445 unsigned long opcode
;
11448 if (inst
.operands
[0].isreg
11449 && !inst
.operands
[0].preind
11450 && inst
.operands
[0].reg
== REG_PC
)
11451 set_it_insn_type_last ();
11453 opcode
= inst
.instruction
;
11454 if (unified_syntax
)
11456 if (!inst
.operands
[1].isreg
)
11458 if (opcode
<= 0xffff)
11459 inst
.instruction
= THUMB_OP32 (opcode
);
11460 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11463 if (inst
.operands
[1].isreg
11464 && !inst
.operands
[1].writeback
11465 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
11466 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
11467 && opcode
<= 0xffff
11468 && inst
.size_req
!= 4)
11470 /* Insn may have a 16-bit form. */
11471 Rn
= inst
.operands
[1].reg
;
11472 if (inst
.operands
[1].immisreg
)
11474 inst
.instruction
= THUMB_OP16 (opcode
);
11476 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
11478 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
11479 reject_bad_reg (inst
.operands
[1].imm
);
11481 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
11482 && opcode
!= T_MNEM_ldrsb
)
11483 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
11484 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
11491 if (inst
.reloc
.pc_rel
)
11492 opcode
= T_MNEM_ldr_pc2
;
11494 opcode
= T_MNEM_ldr_pc
;
11498 if (opcode
== T_MNEM_ldr
)
11499 opcode
= T_MNEM_ldr_sp
;
11501 opcode
= T_MNEM_str_sp
;
11503 inst
.instruction
= inst
.operands
[0].reg
<< 8;
11507 inst
.instruction
= inst
.operands
[0].reg
;
11508 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11510 inst
.instruction
|= THUMB_OP16 (opcode
);
11511 if (inst
.size_req
== 2)
11512 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11514 inst
.relax
= opcode
;
11518 /* Definitely a 32-bit variant. */
11520 /* Warning for Erratum 752419. */
11521 if (opcode
== T_MNEM_ldr
11522 && inst
.operands
[0].reg
== REG_SP
11523 && inst
.operands
[1].writeback
== 1
11524 && !inst
.operands
[1].immisreg
)
11526 if (no_cpu_selected ()
11527 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
11528 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
11529 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
11530 as_warn (_("This instruction may be unpredictable "
11531 "if executed on M-profile cores "
11532 "with interrupts enabled."));
11535 /* Do some validations regarding addressing modes. */
11536 if (inst
.operands
[1].immisreg
)
11537 reject_bad_reg (inst
.operands
[1].imm
);
11539 constraint (inst
.operands
[1].writeback
== 1
11540 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11543 inst
.instruction
= THUMB_OP32 (opcode
);
11544 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11545 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
11546 check_ldr_r15_aligned ();
11550 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11552 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
11554 /* Only [Rn,Rm] is acceptable. */
11555 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
11556 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
11557 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
11558 || inst
.operands
[1].negative
,
11559 _("Thumb does not support this addressing mode"));
11560 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11564 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11565 if (!inst
.operands
[1].isreg
)
11566 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11569 constraint (!inst
.operands
[1].preind
11570 || inst
.operands
[1].shifted
11571 || inst
.operands
[1].writeback
,
11572 _("Thumb does not support this addressing mode"));
11573 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
11575 constraint (inst
.instruction
& 0x0600,
11576 _("byte or halfword not valid for base register"));
11577 constraint (inst
.operands
[1].reg
== REG_PC
11578 && !(inst
.instruction
& THUMB_LOAD_BIT
),
11579 _("r15 based store not allowed"));
11580 constraint (inst
.operands
[1].immisreg
,
11581 _("invalid base register for register offset"));
11583 if (inst
.operands
[1].reg
== REG_PC
)
11584 inst
.instruction
= T_OPCODE_LDR_PC
;
11585 else if (inst
.instruction
& THUMB_LOAD_BIT
)
11586 inst
.instruction
= T_OPCODE_LDR_SP
;
11588 inst
.instruction
= T_OPCODE_STR_SP
;
11590 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11591 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11595 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
11596 if (!inst
.operands
[1].immisreg
)
11598 /* Immediate offset. */
11599 inst
.instruction
|= inst
.operands
[0].reg
;
11600 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11601 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11605 /* Register offset. */
11606 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
11607 constraint (inst
.operands
[1].negative
,
11608 _("Thumb does not support this addressing mode"));
11611 switch (inst
.instruction
)
11613 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
11614 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
11615 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
11616 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
11617 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
11618 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
11619 case 0x5600 /* ldrsb */:
11620 case 0x5e00 /* ldrsh */: break;
11624 inst
.instruction
|= inst
.operands
[0].reg
;
11625 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11626 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
11632 if (!inst
.operands
[1].present
)
11634 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11635 constraint (inst
.operands
[0].reg
== REG_LR
,
11636 _("r14 not allowed here"));
11637 constraint (inst
.operands
[0].reg
== REG_R12
,
11638 _("r12 not allowed here"));
11641 if (inst
.operands
[2].writeback
11642 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
11643 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
11644 as_warn (_("base register written back, and overlaps "
11645 "one of transfer registers"));
11647 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11648 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11649 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
11655 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11656 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
11662 unsigned Rd
, Rn
, Rm
, Ra
;
11664 Rd
= inst
.operands
[0].reg
;
11665 Rn
= inst
.operands
[1].reg
;
11666 Rm
= inst
.operands
[2].reg
;
11667 Ra
= inst
.operands
[3].reg
;
11669 reject_bad_reg (Rd
);
11670 reject_bad_reg (Rn
);
11671 reject_bad_reg (Rm
);
11672 reject_bad_reg (Ra
);
11674 inst
.instruction
|= Rd
<< 8;
11675 inst
.instruction
|= Rn
<< 16;
11676 inst
.instruction
|= Rm
;
11677 inst
.instruction
|= Ra
<< 12;
11683 unsigned RdLo
, RdHi
, Rn
, Rm
;
11685 RdLo
= inst
.operands
[0].reg
;
11686 RdHi
= inst
.operands
[1].reg
;
11687 Rn
= inst
.operands
[2].reg
;
11688 Rm
= inst
.operands
[3].reg
;
11690 reject_bad_reg (RdLo
);
11691 reject_bad_reg (RdHi
);
11692 reject_bad_reg (Rn
);
11693 reject_bad_reg (Rm
);
11695 inst
.instruction
|= RdLo
<< 12;
11696 inst
.instruction
|= RdHi
<< 8;
11697 inst
.instruction
|= Rn
<< 16;
11698 inst
.instruction
|= Rm
;
11702 do_t_mov_cmp (void)
11706 Rn
= inst
.operands
[0].reg
;
11707 Rm
= inst
.operands
[1].reg
;
11710 set_it_insn_type_last ();
11712 if (unified_syntax
)
11714 int r0off
= (inst
.instruction
== T_MNEM_mov
11715 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
11716 unsigned long opcode
;
11717 bfd_boolean narrow
;
11718 bfd_boolean low_regs
;
11720 low_regs
= (Rn
<= 7 && Rm
<= 7);
11721 opcode
= inst
.instruction
;
11722 if (in_it_block ())
11723 narrow
= opcode
!= T_MNEM_movs
;
11725 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
11726 if (inst
.size_req
== 4
11727 || inst
.operands
[1].shifted
)
11730 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11731 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
11732 && !inst
.operands
[1].shifted
11736 inst
.instruction
= T2_SUBS_PC_LR
;
11740 if (opcode
== T_MNEM_cmp
)
11742 constraint (Rn
== REG_PC
, BAD_PC
);
11745 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11747 warn_deprecated_sp (Rm
);
11748 /* R15 was documented as a valid choice for Rm in ARMv6,
11749 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11750 tools reject R15, so we do too. */
11751 constraint (Rm
== REG_PC
, BAD_PC
);
11754 reject_bad_reg (Rm
);
11756 else if (opcode
== T_MNEM_mov
11757 || opcode
== T_MNEM_movs
)
11759 if (inst
.operands
[1].isreg
)
11761 if (opcode
== T_MNEM_movs
)
11763 reject_bad_reg (Rn
);
11764 reject_bad_reg (Rm
);
11768 /* This is mov.n. */
11769 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
11770 && (Rm
== REG_SP
|| Rm
== REG_PC
))
11772 as_tsktsk (_("Use of r%u as a source register is "
11773 "deprecated when r%u is the destination "
11774 "register."), Rm
, Rn
);
11779 /* This is mov.w. */
11780 constraint (Rn
== REG_PC
, BAD_PC
);
11781 constraint (Rm
== REG_PC
, BAD_PC
);
11782 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
11786 reject_bad_reg (Rn
);
11789 if (!inst
.operands
[1].isreg
)
11791 /* Immediate operand. */
11792 if (!in_it_block () && opcode
== T_MNEM_mov
)
11794 if (low_regs
&& narrow
)
11796 inst
.instruction
= THUMB_OP16 (opcode
);
11797 inst
.instruction
|= Rn
<< 8;
11798 if (inst
.size_req
== 2)
11800 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11801 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
11802 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
11805 inst
.relax
= opcode
;
11809 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11810 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11811 inst
.instruction
|= Rn
<< r0off
;
11812 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11815 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
11816 && (inst
.instruction
== T_MNEM_mov
11817 || inst
.instruction
== T_MNEM_movs
))
11819 /* Register shifts are encoded as separate shift instructions. */
11820 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
11822 if (in_it_block ())
11827 if (inst
.size_req
== 4)
11830 if (!low_regs
|| inst
.operands
[1].imm
> 7)
11836 switch (inst
.operands
[1].shift_kind
)
11839 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
11842 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
11845 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
11848 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
11854 inst
.instruction
= opcode
;
11857 inst
.instruction
|= Rn
;
11858 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
11863 inst
.instruction
|= CONDS_BIT
;
11865 inst
.instruction
|= Rn
<< 8;
11866 inst
.instruction
|= Rm
<< 16;
11867 inst
.instruction
|= inst
.operands
[1].imm
;
11872 /* Some mov with immediate shift have narrow variants.
11873 Register shifts are handled above. */
11874 if (low_regs
&& inst
.operands
[1].shifted
11875 && (inst
.instruction
== T_MNEM_mov
11876 || inst
.instruction
== T_MNEM_movs
))
11878 if (in_it_block ())
11879 narrow
= (inst
.instruction
== T_MNEM_mov
);
11881 narrow
= (inst
.instruction
== T_MNEM_movs
);
11886 switch (inst
.operands
[1].shift_kind
)
11888 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
11889 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
11890 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
11891 default: narrow
= FALSE
; break;
11897 inst
.instruction
|= Rn
;
11898 inst
.instruction
|= Rm
<< 3;
11899 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
11903 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11904 inst
.instruction
|= Rn
<< r0off
;
11905 encode_thumb32_shifted_operand (1);
11909 switch (inst
.instruction
)
11912 /* In v4t or v5t a move of two lowregs produces unpredictable
11913 results. Don't allow this. */
11916 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
11917 "MOV Rd, Rs with two low registers is not "
11918 "permitted on this architecture");
11919 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
11923 inst
.instruction
= T_OPCODE_MOV_HR
;
11924 inst
.instruction
|= (Rn
& 0x8) << 4;
11925 inst
.instruction
|= (Rn
& 0x7);
11926 inst
.instruction
|= Rm
<< 3;
11930 /* We know we have low registers at this point.
11931 Generate LSLS Rd, Rs, #0. */
11932 inst
.instruction
= T_OPCODE_LSL_I
;
11933 inst
.instruction
|= Rn
;
11934 inst
.instruction
|= Rm
<< 3;
11940 inst
.instruction
= T_OPCODE_CMP_LR
;
11941 inst
.instruction
|= Rn
;
11942 inst
.instruction
|= Rm
<< 3;
11946 inst
.instruction
= T_OPCODE_CMP_HR
;
11947 inst
.instruction
|= (Rn
& 0x8) << 4;
11948 inst
.instruction
|= (Rn
& 0x7);
11949 inst
.instruction
|= Rm
<< 3;
11956 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11958 /* PR 10443: Do not silently ignore shifted operands. */
11959 constraint (inst
.operands
[1].shifted
,
11960 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11962 if (inst
.operands
[1].isreg
)
11964 if (Rn
< 8 && Rm
< 8)
11966 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11967 since a MOV instruction produces unpredictable results. */
11968 if (inst
.instruction
== T_OPCODE_MOV_I8
)
11969 inst
.instruction
= T_OPCODE_ADD_I3
;
11971 inst
.instruction
= T_OPCODE_CMP_LR
;
11973 inst
.instruction
|= Rn
;
11974 inst
.instruction
|= Rm
<< 3;
11978 if (inst
.instruction
== T_OPCODE_MOV_I8
)
11979 inst
.instruction
= T_OPCODE_MOV_HR
;
11981 inst
.instruction
= T_OPCODE_CMP_HR
;
11987 constraint (Rn
> 7,
11988 _("only lo regs allowed with immediate"));
11989 inst
.instruction
|= Rn
<< 8;
11990 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
12001 top
= (inst
.instruction
& 0x00800000) != 0;
12002 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
12004 constraint (top
, _(":lower16: not allowed this instruction"));
12005 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
12007 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
12009 constraint (!top
, _(":upper16: not allowed this instruction"));
12010 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
12013 Rd
= inst
.operands
[0].reg
;
12014 reject_bad_reg (Rd
);
12016 inst
.instruction
|= Rd
<< 8;
12017 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
12019 imm
= inst
.reloc
.exp
.X_add_number
;
12020 inst
.instruction
|= (imm
& 0xf000) << 4;
12021 inst
.instruction
|= (imm
& 0x0800) << 15;
12022 inst
.instruction
|= (imm
& 0x0700) << 4;
12023 inst
.instruction
|= (imm
& 0x00ff);
12028 do_t_mvn_tst (void)
12032 Rn
= inst
.operands
[0].reg
;
12033 Rm
= inst
.operands
[1].reg
;
12035 if (inst
.instruction
== T_MNEM_cmp
12036 || inst
.instruction
== T_MNEM_cmn
)
12037 constraint (Rn
== REG_PC
, BAD_PC
);
12039 reject_bad_reg (Rn
);
12040 reject_bad_reg (Rm
);
12042 if (unified_syntax
)
12044 int r0off
= (inst
.instruction
== T_MNEM_mvn
12045 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12046 bfd_boolean narrow
;
12048 if (inst
.size_req
== 4
12049 || inst
.instruction
> 0xffff
12050 || inst
.operands
[1].shifted
12051 || Rn
> 7 || Rm
> 7)
12053 else if (inst
.instruction
== T_MNEM_cmn
12054 || inst
.instruction
== T_MNEM_tst
)
12056 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12057 narrow
= !in_it_block ();
12059 narrow
= in_it_block ();
12061 if (!inst
.operands
[1].isreg
)
12063 /* For an immediate, we always generate a 32-bit opcode;
12064 section relaxation will shrink it later if possible. */
12065 if (inst
.instruction
< 0xffff)
12066 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12067 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12068 inst
.instruction
|= Rn
<< r0off
;
12069 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12073 /* See if we can do this with a 16-bit instruction. */
12076 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12077 inst
.instruction
|= Rn
;
12078 inst
.instruction
|= Rm
<< 3;
12082 constraint (inst
.operands
[1].shifted
12083 && inst
.operands
[1].immisreg
,
12084 _("shift must be constant"));
12085 if (inst
.instruction
< 0xffff)
12086 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12087 inst
.instruction
|= Rn
<< r0off
;
12088 encode_thumb32_shifted_operand (1);
12094 constraint (inst
.instruction
> 0xffff
12095 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12096 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12097 _("unshifted register required"));
12098 constraint (Rn
> 7 || Rm
> 7,
12101 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12102 inst
.instruction
|= Rn
;
12103 inst
.instruction
|= Rm
<< 3;
12112 if (do_vfp_nsyn_mrs () == SUCCESS
)
12115 Rd
= inst
.operands
[0].reg
;
12116 reject_bad_reg (Rd
);
12117 inst
.instruction
|= Rd
<< 8;
12119 if (inst
.operands
[1].isreg
)
12121 unsigned br
= inst
.operands
[1].reg
;
12122 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12123 as_bad (_("bad register for mrs"));
12125 inst
.instruction
|= br
& (0xf << 16);
12126 inst
.instruction
|= (br
& 0x300) >> 4;
12127 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12131 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12133 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12135 /* PR gas/12698: The constraint is only applied for m_profile.
12136 If the user has specified -march=all, we want to ignore it as
12137 we are building for any CPU type, including non-m variants. */
12138 bfd_boolean m_profile
=
12139 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12140 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12141 "not support requested special purpose register"));
12144 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12146 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12147 _("'APSR', 'CPSR' or 'SPSR' expected"));
12149 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12150 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12151 inst
.instruction
|= 0xf0000;
12161 if (do_vfp_nsyn_msr () == SUCCESS
)
12164 constraint (!inst
.operands
[1].isreg
,
12165 _("Thumb encoding does not support an immediate here"));
12167 if (inst
.operands
[0].isreg
)
12168 flags
= (int)(inst
.operands
[0].reg
);
12170 flags
= inst
.operands
[0].imm
;
12172 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12174 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12176 /* PR gas/12698: The constraint is only applied for m_profile.
12177 If the user has specified -march=all, we want to ignore it as
12178 we are building for any CPU type, including non-m variants. */
12179 bfd_boolean m_profile
=
12180 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12181 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12182 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12183 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12184 && bits
!= PSR_f
)) && m_profile
,
12185 _("selected processor does not support requested special "
12186 "purpose register"));
12189 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12190 "requested special purpose register"));
12192 Rn
= inst
.operands
[1].reg
;
12193 reject_bad_reg (Rn
);
12195 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12196 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12197 inst
.instruction
|= (flags
& 0x300) >> 4;
12198 inst
.instruction
|= (flags
& 0xff);
12199 inst
.instruction
|= Rn
<< 16;
12205 bfd_boolean narrow
;
12206 unsigned Rd
, Rn
, Rm
;
12208 if (!inst
.operands
[2].present
)
12209 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12211 Rd
= inst
.operands
[0].reg
;
12212 Rn
= inst
.operands
[1].reg
;
12213 Rm
= inst
.operands
[2].reg
;
12215 if (unified_syntax
)
12217 if (inst
.size_req
== 4
12223 else if (inst
.instruction
== T_MNEM_muls
)
12224 narrow
= !in_it_block ();
12226 narrow
= in_it_block ();
12230 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12231 constraint (Rn
> 7 || Rm
> 7,
12238 /* 16-bit MULS/Conditional MUL. */
12239 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12240 inst
.instruction
|= Rd
;
12243 inst
.instruction
|= Rm
<< 3;
12245 inst
.instruction
|= Rn
<< 3;
12247 constraint (1, _("dest must overlap one source register"));
12251 constraint (inst
.instruction
!= T_MNEM_mul
,
12252 _("Thumb-2 MUL must not set flags"));
12254 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12255 inst
.instruction
|= Rd
<< 8;
12256 inst
.instruction
|= Rn
<< 16;
12257 inst
.instruction
|= Rm
<< 0;
12259 reject_bad_reg (Rd
);
12260 reject_bad_reg (Rn
);
12261 reject_bad_reg (Rm
);
12268 unsigned RdLo
, RdHi
, Rn
, Rm
;
12270 RdLo
= inst
.operands
[0].reg
;
12271 RdHi
= inst
.operands
[1].reg
;
12272 Rn
= inst
.operands
[2].reg
;
12273 Rm
= inst
.operands
[3].reg
;
12275 reject_bad_reg (RdLo
);
12276 reject_bad_reg (RdHi
);
12277 reject_bad_reg (Rn
);
12278 reject_bad_reg (Rm
);
12280 inst
.instruction
|= RdLo
<< 12;
12281 inst
.instruction
|= RdHi
<< 8;
12282 inst
.instruction
|= Rn
<< 16;
12283 inst
.instruction
|= Rm
;
12286 as_tsktsk (_("rdhi and rdlo must be different"));
12292 set_it_insn_type (NEUTRAL_IT_INSN
);
12294 if (unified_syntax
)
12296 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12298 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12299 inst
.instruction
|= inst
.operands
[0].imm
;
12303 /* PR9722: Check for Thumb2 availability before
12304 generating a thumb2 nop instruction. */
12305 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12307 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12308 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12311 inst
.instruction
= 0x46c0;
12316 constraint (inst
.operands
[0].present
,
12317 _("Thumb does not support NOP with hints"));
12318 inst
.instruction
= 0x46c0;
12325 if (unified_syntax
)
12327 bfd_boolean narrow
;
12329 if (THUMB_SETS_FLAGS (inst
.instruction
))
12330 narrow
= !in_it_block ();
12332 narrow
= in_it_block ();
12333 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12335 if (inst
.size_req
== 4)
12340 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12341 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12342 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12346 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12347 inst
.instruction
|= inst
.operands
[0].reg
;
12348 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12353 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12355 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12357 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12358 inst
.instruction
|= inst
.operands
[0].reg
;
12359 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12368 Rd
= inst
.operands
[0].reg
;
12369 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12371 reject_bad_reg (Rd
);
12372 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12373 reject_bad_reg (Rn
);
12375 inst
.instruction
|= Rd
<< 8;
12376 inst
.instruction
|= Rn
<< 16;
12378 if (!inst
.operands
[2].isreg
)
12380 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12381 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12387 Rm
= inst
.operands
[2].reg
;
12388 reject_bad_reg (Rm
);
12390 constraint (inst
.operands
[2].shifted
12391 && inst
.operands
[2].immisreg
,
12392 _("shift must be constant"));
12393 encode_thumb32_shifted_operand (2);
12400 unsigned Rd
, Rn
, Rm
;
12402 Rd
= inst
.operands
[0].reg
;
12403 Rn
= inst
.operands
[1].reg
;
12404 Rm
= inst
.operands
[2].reg
;
12406 reject_bad_reg (Rd
);
12407 reject_bad_reg (Rn
);
12408 reject_bad_reg (Rm
);
12410 inst
.instruction
|= Rd
<< 8;
12411 inst
.instruction
|= Rn
<< 16;
12412 inst
.instruction
|= Rm
;
12413 if (inst
.operands
[3].present
)
12415 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
12416 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12417 _("expression too complex"));
12418 inst
.instruction
|= (val
& 0x1c) << 10;
12419 inst
.instruction
|= (val
& 0x03) << 6;
12426 if (!inst
.operands
[3].present
)
12430 inst
.instruction
&= ~0x00000020;
12432 /* PR 10168. Swap the Rm and Rn registers. */
12433 Rtmp
= inst
.operands
[1].reg
;
12434 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
12435 inst
.operands
[2].reg
= Rtmp
;
12443 if (inst
.operands
[0].immisreg
)
12444 reject_bad_reg (inst
.operands
[0].imm
);
12446 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12450 do_t_push_pop (void)
12454 constraint (inst
.operands
[0].writeback
,
12455 _("push/pop do not support {reglist}^"));
12456 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
12457 _("expression too complex"));
12459 mask
= inst
.operands
[0].imm
;
12460 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
12461 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
12462 else if (inst
.size_req
!= 4
12463 && (mask
& ~0xff) == (1 << (inst
.instruction
== T_MNEM_push
12464 ? REG_LR
: REG_PC
)))
12466 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12467 inst
.instruction
|= THUMB_PP_PC_LR
;
12468 inst
.instruction
|= mask
& 0xff;
12470 else if (unified_syntax
)
12472 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12473 encode_thumb2_ldmstm (13, mask
, TRUE
);
12477 inst
.error
= _("invalid register list to push/pop instruction");
12487 Rd
= inst
.operands
[0].reg
;
12488 Rm
= inst
.operands
[1].reg
;
12490 reject_bad_reg (Rd
);
12491 reject_bad_reg (Rm
);
12493 inst
.instruction
|= Rd
<< 8;
12494 inst
.instruction
|= Rm
<< 16;
12495 inst
.instruction
|= Rm
;
12503 Rd
= inst
.operands
[0].reg
;
12504 Rm
= inst
.operands
[1].reg
;
12506 reject_bad_reg (Rd
);
12507 reject_bad_reg (Rm
);
12509 if (Rd
<= 7 && Rm
<= 7
12510 && inst
.size_req
!= 4)
12512 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12513 inst
.instruction
|= Rd
;
12514 inst
.instruction
|= Rm
<< 3;
12516 else if (unified_syntax
)
12518 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12519 inst
.instruction
|= Rd
<< 8;
12520 inst
.instruction
|= Rm
<< 16;
12521 inst
.instruction
|= Rm
;
12524 inst
.error
= BAD_HIREG
;
12532 Rd
= inst
.operands
[0].reg
;
12533 Rm
= inst
.operands
[1].reg
;
12535 reject_bad_reg (Rd
);
12536 reject_bad_reg (Rm
);
12538 inst
.instruction
|= Rd
<< 8;
12539 inst
.instruction
|= Rm
;
12547 Rd
= inst
.operands
[0].reg
;
12548 Rs
= (inst
.operands
[1].present
12549 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
12550 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
12552 reject_bad_reg (Rd
);
12553 reject_bad_reg (Rs
);
12554 if (inst
.operands
[2].isreg
)
12555 reject_bad_reg (inst
.operands
[2].reg
);
12557 inst
.instruction
|= Rd
<< 8;
12558 inst
.instruction
|= Rs
<< 16;
12559 if (!inst
.operands
[2].isreg
)
12561 bfd_boolean narrow
;
12563 if ((inst
.instruction
& 0x00100000) != 0)
12564 narrow
= !in_it_block ();
12566 narrow
= in_it_block ();
12568 if (Rd
> 7 || Rs
> 7)
12571 if (inst
.size_req
== 4 || !unified_syntax
)
12574 if (inst
.reloc
.exp
.X_op
!= O_constant
12575 || inst
.reloc
.exp
.X_add_number
!= 0)
12578 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12579 relaxation, but it doesn't seem worth the hassle. */
12582 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12583 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
12584 inst
.instruction
|= Rs
<< 3;
12585 inst
.instruction
|= Rd
;
12589 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12590 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12594 encode_thumb32_shifted_operand (2);
12600 if (warn_on_deprecated
12601 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12602 as_tsktsk (_("setend use is deprecated for ARMv8"));
12604 set_it_insn_type (OUTSIDE_IT_INSN
);
12605 if (inst
.operands
[0].imm
)
12606 inst
.instruction
|= 0x8;
12612 if (!inst
.operands
[1].present
)
12613 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
12615 if (unified_syntax
)
12617 bfd_boolean narrow
;
12620 switch (inst
.instruction
)
12623 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
12625 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
12627 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
12629 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
12633 if (THUMB_SETS_FLAGS (inst
.instruction
))
12634 narrow
= !in_it_block ();
12636 narrow
= in_it_block ();
12637 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12639 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
12641 if (inst
.operands
[2].isreg
12642 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
12643 || inst
.operands
[2].reg
> 7))
12645 if (inst
.size_req
== 4)
12648 reject_bad_reg (inst
.operands
[0].reg
);
12649 reject_bad_reg (inst
.operands
[1].reg
);
12653 if (inst
.operands
[2].isreg
)
12655 reject_bad_reg (inst
.operands
[2].reg
);
12656 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12657 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12658 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12659 inst
.instruction
|= inst
.operands
[2].reg
;
12661 /* PR 12854: Error on extraneous shifts. */
12662 constraint (inst
.operands
[2].shifted
,
12663 _("extraneous shift as part of operand to shift insn"));
12667 inst
.operands
[1].shifted
= 1;
12668 inst
.operands
[1].shift_kind
= shift_kind
;
12669 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
12670 ? T_MNEM_movs
: T_MNEM_mov
);
12671 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12672 encode_thumb32_shifted_operand (1);
12673 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12674 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12679 if (inst
.operands
[2].isreg
)
12681 switch (shift_kind
)
12683 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12684 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12685 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12686 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12690 inst
.instruction
|= inst
.operands
[0].reg
;
12691 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12693 /* PR 12854: Error on extraneous shifts. */
12694 constraint (inst
.operands
[2].shifted
,
12695 _("extraneous shift as part of operand to shift insn"));
12699 switch (shift_kind
)
12701 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12702 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12703 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12706 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12707 inst
.instruction
|= inst
.operands
[0].reg
;
12708 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12714 constraint (inst
.operands
[0].reg
> 7
12715 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
12716 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12718 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
12720 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
12721 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
12722 _("source1 and dest must be same register"));
12724 switch (inst
.instruction
)
12726 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12727 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12728 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12729 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12733 inst
.instruction
|= inst
.operands
[0].reg
;
12734 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12736 /* PR 12854: Error on extraneous shifts. */
12737 constraint (inst
.operands
[2].shifted
,
12738 _("extraneous shift as part of operand to shift insn"));
12742 switch (inst
.instruction
)
12744 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12745 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12746 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12747 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
12750 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12751 inst
.instruction
|= inst
.operands
[0].reg
;
12752 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12760 unsigned Rd
, Rn
, Rm
;
12762 Rd
= inst
.operands
[0].reg
;
12763 Rn
= inst
.operands
[1].reg
;
12764 Rm
= inst
.operands
[2].reg
;
12766 reject_bad_reg (Rd
);
12767 reject_bad_reg (Rn
);
12768 reject_bad_reg (Rm
);
12770 inst
.instruction
|= Rd
<< 8;
12771 inst
.instruction
|= Rn
<< 16;
12772 inst
.instruction
|= Rm
;
12778 unsigned Rd
, Rn
, Rm
;
12780 Rd
= inst
.operands
[0].reg
;
12781 Rm
= inst
.operands
[1].reg
;
12782 Rn
= inst
.operands
[2].reg
;
12784 reject_bad_reg (Rd
);
12785 reject_bad_reg (Rn
);
12786 reject_bad_reg (Rm
);
12788 inst
.instruction
|= Rd
<< 8;
12789 inst
.instruction
|= Rn
<< 16;
12790 inst
.instruction
|= Rm
;
12796 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12797 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
12798 _("SMC is not permitted on this architecture"));
12799 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12800 _("expression too complex"));
12801 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12802 inst
.instruction
|= (value
& 0xf000) >> 12;
12803 inst
.instruction
|= (value
& 0x0ff0);
12804 inst
.instruction
|= (value
& 0x000f) << 16;
12805 /* PR gas/15623: SMC instructions must be last in an IT block. */
12806 set_it_insn_type_last ();
12812 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12814 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12815 inst
.instruction
|= (value
& 0x0fff);
12816 inst
.instruction
|= (value
& 0xf000) << 4;
12820 do_t_ssat_usat (int bias
)
12824 Rd
= inst
.operands
[0].reg
;
12825 Rn
= inst
.operands
[2].reg
;
12827 reject_bad_reg (Rd
);
12828 reject_bad_reg (Rn
);
12830 inst
.instruction
|= Rd
<< 8;
12831 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
12832 inst
.instruction
|= Rn
<< 16;
12834 if (inst
.operands
[3].present
)
12836 offsetT shift_amount
= inst
.reloc
.exp
.X_add_number
;
12838 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12840 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12841 _("expression too complex"));
12843 if (shift_amount
!= 0)
12845 constraint (shift_amount
> 31,
12846 _("shift expression is too large"));
12848 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
12849 inst
.instruction
|= 0x00200000; /* sh bit. */
12851 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
12852 inst
.instruction
|= (shift_amount
& 0x03) << 6;
12860 do_t_ssat_usat (1);
12868 Rd
= inst
.operands
[0].reg
;
12869 Rn
= inst
.operands
[2].reg
;
12871 reject_bad_reg (Rd
);
12872 reject_bad_reg (Rn
);
12874 inst
.instruction
|= Rd
<< 8;
12875 inst
.instruction
|= inst
.operands
[1].imm
- 1;
12876 inst
.instruction
|= Rn
<< 16;
12882 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
12883 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
12884 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
12885 || inst
.operands
[2].negative
,
12888 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
12890 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12891 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12892 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
12893 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
12899 if (!inst
.operands
[2].present
)
12900 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
12902 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
12903 || inst
.operands
[0].reg
== inst
.operands
[2].reg
12904 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
12907 inst
.instruction
|= inst
.operands
[0].reg
;
12908 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12909 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
12910 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
12916 unsigned Rd
, Rn
, Rm
;
12918 Rd
= inst
.operands
[0].reg
;
12919 Rn
= inst
.operands
[1].reg
;
12920 Rm
= inst
.operands
[2].reg
;
12922 reject_bad_reg (Rd
);
12923 reject_bad_reg (Rn
);
12924 reject_bad_reg (Rm
);
12926 inst
.instruction
|= Rd
<< 8;
12927 inst
.instruction
|= Rn
<< 16;
12928 inst
.instruction
|= Rm
;
12929 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
12937 Rd
= inst
.operands
[0].reg
;
12938 Rm
= inst
.operands
[1].reg
;
12940 reject_bad_reg (Rd
);
12941 reject_bad_reg (Rm
);
12943 if (inst
.instruction
<= 0xffff
12944 && inst
.size_req
!= 4
12945 && Rd
<= 7 && Rm
<= 7
12946 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
12948 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12949 inst
.instruction
|= Rd
;
12950 inst
.instruction
|= Rm
<< 3;
12952 else if (unified_syntax
)
12954 if (inst
.instruction
<= 0xffff)
12955 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12956 inst
.instruction
|= Rd
<< 8;
12957 inst
.instruction
|= Rm
;
12958 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
12962 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
12963 _("Thumb encoding does not support rotation"));
12964 constraint (1, BAD_HIREG
);
12971 /* We have to do the following check manually as ARM_EXT_OS only applies
12973 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6m
))
12975 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_os
)
12976 /* This only applies to the v6m howver, not later architectures. */
12977 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
))
12978 as_bad (_("SVC is not permitted on this architecture"));
12979 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, arm_ext_os
);
12982 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
12991 half
= (inst
.instruction
& 0x10) != 0;
12992 set_it_insn_type_last ();
12993 constraint (inst
.operands
[0].immisreg
,
12994 _("instruction requires register index"));
12996 Rn
= inst
.operands
[0].reg
;
12997 Rm
= inst
.operands
[0].imm
;
12999 constraint (Rn
== REG_SP
, BAD_SP
);
13000 reject_bad_reg (Rm
);
13002 constraint (!half
&& inst
.operands
[0].shifted
,
13003 _("instruction does not allow shifted index"));
13004 inst
.instruction
|= (Rn
<< 16) | Rm
;
13010 if (!inst
.operands
[0].present
)
13011 inst
.operands
[0].imm
= 0;
13013 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13015 constraint (inst
.size_req
== 2,
13016 _("immediate value out of range"));
13017 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13018 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13019 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13023 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13024 inst
.instruction
|= inst
.operands
[0].imm
;
13027 set_it_insn_type (NEUTRAL_IT_INSN
);
13034 do_t_ssat_usat (0);
13042 Rd
= inst
.operands
[0].reg
;
13043 Rn
= inst
.operands
[2].reg
;
13045 reject_bad_reg (Rd
);
13046 reject_bad_reg (Rn
);
13048 inst
.instruction
|= Rd
<< 8;
13049 inst
.instruction
|= inst
.operands
[1].imm
;
13050 inst
.instruction
|= Rn
<< 16;
13053 /* Neon instruction encoder helpers. */
13055 /* Encodings for the different types for various Neon opcodes. */
13057 /* An "invalid" code for the following tables. */
13060 struct neon_tab_entry
13063 unsigned float_or_poly
;
13064 unsigned scalar_or_imm
;
13067 /* Map overloaded Neon opcodes to their respective encodings. */
13068 #define NEON_ENC_TAB \
13069 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13070 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13071 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13072 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13073 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13074 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13075 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13076 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13077 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13078 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13079 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13080 /* Register variants of the following two instructions are encoded as
13081 vcge / vcgt with the operands reversed. */ \
13082 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13083 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13084 X(vfma, N_INV, 0x0000c10, N_INV), \
13085 X(vfms, N_INV, 0x0200c10, N_INV), \
13086 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13087 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13088 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13089 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13090 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13091 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13092 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13093 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13094 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13095 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13096 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13097 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13098 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13099 X(vshl, 0x0000400, N_INV, 0x0800510), \
13100 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13101 X(vand, 0x0000110, N_INV, 0x0800030), \
13102 X(vbic, 0x0100110, N_INV, 0x0800030), \
13103 X(veor, 0x1000110, N_INV, N_INV), \
13104 X(vorn, 0x0300110, N_INV, 0x0800010), \
13105 X(vorr, 0x0200110, N_INV, 0x0800010), \
13106 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13107 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13108 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13109 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13110 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13111 X(vst1, 0x0000000, 0x0800000, N_INV), \
13112 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13113 X(vst2, 0x0000100, 0x0800100, N_INV), \
13114 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13115 X(vst3, 0x0000200, 0x0800200, N_INV), \
13116 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13117 X(vst4, 0x0000300, 0x0800300, N_INV), \
13118 X(vmovn, 0x1b20200, N_INV, N_INV), \
13119 X(vtrn, 0x1b20080, N_INV, N_INV), \
13120 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13121 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13122 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13123 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13124 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13125 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13126 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13127 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13128 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13129 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13130 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13131 X(vseleq, 0xe000a00, N_INV, N_INV), \
13132 X(vselvs, 0xe100a00, N_INV, N_INV), \
13133 X(vselge, 0xe200a00, N_INV, N_INV), \
13134 X(vselgt, 0xe300a00, N_INV, N_INV), \
13135 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13136 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13137 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13138 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13139 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13140 X(aes, 0x3b00300, N_INV, N_INV), \
13141 X(sha3op, 0x2000c00, N_INV, N_INV), \
13142 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13143 X(sha2op, 0x3ba0380, N_INV, N_INV)
13147 #define X(OPC,I,F,S) N_MNEM_##OPC
13152 static const struct neon_tab_entry neon_enc_tab
[] =
13154 #define X(OPC,I,F,S) { (I), (F), (S) }
13159 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13160 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13161 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13162 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13163 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13164 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13165 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13166 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13167 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13168 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13169 #define NEON_ENC_SINGLE_(X) \
13170 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13171 #define NEON_ENC_DOUBLE_(X) \
13172 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13173 #define NEON_ENC_FPV8_(X) \
13174 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13176 #define NEON_ENCODE(type, inst) \
13179 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13180 inst.is_neon = 1; \
13184 #define check_neon_suffixes \
13187 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13189 as_bad (_("invalid neon suffix for non neon instruction")); \
13195 /* Define shapes for instruction operands. The following mnemonic characters
13196 are used in this table:
13198 F - VFP S<n> register
13199 D - Neon D<n> register
13200 Q - Neon Q<n> register
13204 L - D<n> register list
13206 This table is used to generate various data:
13207 - enumerations of the form NS_DDR to be used as arguments to
13209 - a table classifying shapes into single, double, quad, mixed.
13210 - a table used to drive neon_select_shape. */
13212 #define NEON_SHAPE_DEF \
13213 X(3, (D, D, D), DOUBLE), \
13214 X(3, (Q, Q, Q), QUAD), \
13215 X(3, (D, D, I), DOUBLE), \
13216 X(3, (Q, Q, I), QUAD), \
13217 X(3, (D, D, S), DOUBLE), \
13218 X(3, (Q, Q, S), QUAD), \
13219 X(2, (D, D), DOUBLE), \
13220 X(2, (Q, Q), QUAD), \
13221 X(2, (D, S), DOUBLE), \
13222 X(2, (Q, S), QUAD), \
13223 X(2, (D, R), DOUBLE), \
13224 X(2, (Q, R), QUAD), \
13225 X(2, (D, I), DOUBLE), \
13226 X(2, (Q, I), QUAD), \
13227 X(3, (D, L, D), DOUBLE), \
13228 X(2, (D, Q), MIXED), \
13229 X(2, (Q, D), MIXED), \
13230 X(3, (D, Q, I), MIXED), \
13231 X(3, (Q, D, I), MIXED), \
13232 X(3, (Q, D, D), MIXED), \
13233 X(3, (D, Q, Q), MIXED), \
13234 X(3, (Q, Q, D), MIXED), \
13235 X(3, (Q, D, S), MIXED), \
13236 X(3, (D, Q, S), MIXED), \
13237 X(4, (D, D, D, I), DOUBLE), \
13238 X(4, (Q, Q, Q, I), QUAD), \
13239 X(2, (F, F), SINGLE), \
13240 X(3, (F, F, F), SINGLE), \
13241 X(2, (F, I), SINGLE), \
13242 X(2, (F, D), MIXED), \
13243 X(2, (D, F), MIXED), \
13244 X(3, (F, F, I), MIXED), \
13245 X(4, (R, R, F, F), SINGLE), \
13246 X(4, (F, F, R, R), SINGLE), \
13247 X(3, (D, R, R), DOUBLE), \
13248 X(3, (R, R, D), DOUBLE), \
13249 X(2, (S, R), SINGLE), \
13250 X(2, (R, S), SINGLE), \
13251 X(2, (F, R), SINGLE), \
13252 X(2, (R, F), SINGLE)
13254 #define S2(A,B) NS_##A##B
13255 #define S3(A,B,C) NS_##A##B##C
13256 #define S4(A,B,C,D) NS_##A##B##C##D
13258 #define X(N, L, C) S##N L
13271 enum neon_shape_class
13279 #define X(N, L, C) SC_##C
13281 static enum neon_shape_class neon_shape_class
[] =
13299 /* Register widths of above. */
13300 static unsigned neon_shape_el_size
[] =
13311 struct neon_shape_info
13314 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
13317 #define S2(A,B) { SE_##A, SE_##B }
13318 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13319 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13321 #define X(N, L, C) { N, S##N L }
13323 static struct neon_shape_info neon_shape_tab
[] =
13333 /* Bit masks used in type checking given instructions.
13334 'N_EQK' means the type must be the same as (or based on in some way) the key
13335 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13336 set, various other bits can be set as well in order to modify the meaning of
13337 the type constraint. */
13339 enum neon_type_mask
13363 N_KEY
= 0x1000000, /* Key element (main type specifier). */
13364 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
13365 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
13366 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
13367 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
13368 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
13369 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13370 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13371 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13372 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
13373 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13375 N_MAX_NONSPECIAL
= N_P64
13378 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13380 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13381 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13382 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13383 #define N_SUF_32 (N_SU_32 | N_F32)
13384 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13385 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
13387 /* Pass this as the first type argument to neon_check_type to ignore types
13389 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13391 /* Select a "shape" for the current instruction (describing register types or
13392 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13393 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13394 function of operand parsing, so this function doesn't need to be called.
13395 Shapes should be listed in order of decreasing length. */
13397 static enum neon_shape
13398 neon_select_shape (enum neon_shape shape
, ...)
13401 enum neon_shape first_shape
= shape
;
13403 /* Fix missing optional operands. FIXME: we don't know at this point how
13404 many arguments we should have, so this makes the assumption that we have
13405 > 1. This is true of all current Neon opcodes, I think, but may not be
13406 true in the future. */
13407 if (!inst
.operands
[1].present
)
13408 inst
.operands
[1] = inst
.operands
[0];
13410 va_start (ap
, shape
);
13412 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
13417 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
13419 if (!inst
.operands
[j
].present
)
13425 switch (neon_shape_tab
[shape
].el
[j
])
13428 if (!(inst
.operands
[j
].isreg
13429 && inst
.operands
[j
].isvec
13430 && inst
.operands
[j
].issingle
13431 && !inst
.operands
[j
].isquad
))
13436 if (!(inst
.operands
[j
].isreg
13437 && inst
.operands
[j
].isvec
13438 && !inst
.operands
[j
].isquad
13439 && !inst
.operands
[j
].issingle
))
13444 if (!(inst
.operands
[j
].isreg
13445 && !inst
.operands
[j
].isvec
))
13450 if (!(inst
.operands
[j
].isreg
13451 && inst
.operands
[j
].isvec
13452 && inst
.operands
[j
].isquad
13453 && !inst
.operands
[j
].issingle
))
13458 if (!(!inst
.operands
[j
].isreg
13459 && !inst
.operands
[j
].isscalar
))
13464 if (!(!inst
.operands
[j
].isreg
13465 && inst
.operands
[j
].isscalar
))
13475 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
13476 /* We've matched all the entries in the shape table, and we don't
13477 have any left over operands which have not been matched. */
13483 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
13484 first_error (_("invalid instruction shape"));
13489 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13490 means the Q bit should be set). */
13493 neon_quad (enum neon_shape shape
)
13495 return neon_shape_class
[shape
] == SC_QUAD
;
13499 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
13502 /* Allow modification to be made to types which are constrained to be
13503 based on the key element, based on bits set alongside N_EQK. */
13504 if ((typebits
& N_EQK
) != 0)
13506 if ((typebits
& N_HLF
) != 0)
13508 else if ((typebits
& N_DBL
) != 0)
13510 if ((typebits
& N_SGN
) != 0)
13511 *g_type
= NT_signed
;
13512 else if ((typebits
& N_UNS
) != 0)
13513 *g_type
= NT_unsigned
;
13514 else if ((typebits
& N_INT
) != 0)
13515 *g_type
= NT_integer
;
13516 else if ((typebits
& N_FLT
) != 0)
13517 *g_type
= NT_float
;
13518 else if ((typebits
& N_SIZ
) != 0)
13519 *g_type
= NT_untyped
;
13523 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13524 operand type, i.e. the single type specified in a Neon instruction when it
13525 is the only one given. */
13527 static struct neon_type_el
13528 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
13530 struct neon_type_el dest
= *key
;
13532 gas_assert ((thisarg
& N_EQK
) != 0);
13534 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
13539 /* Convert Neon type and size into compact bitmask representation. */
13541 static enum neon_type_mask
13542 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
13549 case 8: return N_8
;
13550 case 16: return N_16
;
13551 case 32: return N_32
;
13552 case 64: return N_64
;
13560 case 8: return N_I8
;
13561 case 16: return N_I16
;
13562 case 32: return N_I32
;
13563 case 64: return N_I64
;
13571 case 16: return N_F16
;
13572 case 32: return N_F32
;
13573 case 64: return N_F64
;
13581 case 8: return N_P8
;
13582 case 16: return N_P16
;
13583 case 64: return N_P64
;
13591 case 8: return N_S8
;
13592 case 16: return N_S16
;
13593 case 32: return N_S32
;
13594 case 64: return N_S64
;
13602 case 8: return N_U8
;
13603 case 16: return N_U16
;
13604 case 32: return N_U32
;
13605 case 64: return N_U64
;
13616 /* Convert compact Neon bitmask type representation to a type and size. Only
13617 handles the case where a single bit is set in the mask. */
13620 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
13621 enum neon_type_mask mask
)
13623 if ((mask
& N_EQK
) != 0)
13626 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
13628 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
13630 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
13632 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
13637 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
13639 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
13640 *type
= NT_unsigned
;
13641 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
13642 *type
= NT_integer
;
13643 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
13644 *type
= NT_untyped
;
13645 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
13647 else if ((mask
& (N_F16
| N_F32
| N_F64
)) != 0)
13655 /* Modify a bitmask of allowed types. This is only needed for type
13659 modify_types_allowed (unsigned allowed
, unsigned mods
)
13662 enum neon_el_type type
;
13668 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
13670 if (el_type_of_type_chk (&type
, &size
,
13671 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
13673 neon_modify_type_size (mods
, &type
, &size
);
13674 destmask
|= type_chk_of_el_type (type
, size
);
13681 /* Check type and return type classification.
13682 The manual states (paraphrase): If one datatype is given, it indicates the
13684 - the second operand, if there is one
13685 - the operand, if there is no second operand
13686 - the result, if there are no operands.
13687 This isn't quite good enough though, so we use a concept of a "key" datatype
13688 which is set on a per-instruction basis, which is the one which matters when
13689 only one data type is written.
13690 Note: this function has side-effects (e.g. filling in missing operands). All
13691 Neon instructions should call it before performing bit encoding. */
13693 static struct neon_type_el
13694 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
13697 unsigned i
, pass
, key_el
= 0;
13698 unsigned types
[NEON_MAX_TYPE_ELS
];
13699 enum neon_el_type k_type
= NT_invtype
;
13700 unsigned k_size
= -1u;
13701 struct neon_type_el badtype
= {NT_invtype
, -1};
13702 unsigned key_allowed
= 0;
13704 /* Optional registers in Neon instructions are always (not) in operand 1.
13705 Fill in the missing operand here, if it was omitted. */
13706 if (els
> 1 && !inst
.operands
[1].present
)
13707 inst
.operands
[1] = inst
.operands
[0];
13709 /* Suck up all the varargs. */
13711 for (i
= 0; i
< els
; i
++)
13713 unsigned thisarg
= va_arg (ap
, unsigned);
13714 if (thisarg
== N_IGNORE_TYPE
)
13719 types
[i
] = thisarg
;
13720 if ((thisarg
& N_KEY
) != 0)
13725 if (inst
.vectype
.elems
> 0)
13726 for (i
= 0; i
< els
; i
++)
13727 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
13729 first_error (_("types specified in both the mnemonic and operands"));
13733 /* Duplicate inst.vectype elements here as necessary.
13734 FIXME: No idea if this is exactly the same as the ARM assembler,
13735 particularly when an insn takes one register and one non-register
13737 if (inst
.vectype
.elems
== 1 && els
> 1)
13740 inst
.vectype
.elems
= els
;
13741 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
13742 for (j
= 0; j
< els
; j
++)
13744 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13747 else if (inst
.vectype
.elems
== 0 && els
> 0)
13750 /* No types were given after the mnemonic, so look for types specified
13751 after each operand. We allow some flexibility here; as long as the
13752 "key" operand has a type, we can infer the others. */
13753 for (j
= 0; j
< els
; j
++)
13754 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
13755 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
13757 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
13759 for (j
= 0; j
< els
; j
++)
13760 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
13761 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13766 first_error (_("operand types can't be inferred"));
13770 else if (inst
.vectype
.elems
!= els
)
13772 first_error (_("type specifier has the wrong number of parts"));
13776 for (pass
= 0; pass
< 2; pass
++)
13778 for (i
= 0; i
< els
; i
++)
13780 unsigned thisarg
= types
[i
];
13781 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
13782 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
13783 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
13784 unsigned g_size
= inst
.vectype
.el
[i
].size
;
13786 /* Decay more-specific signed & unsigned types to sign-insensitive
13787 integer types if sign-specific variants are unavailable. */
13788 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
13789 && (types_allowed
& N_SU_ALL
) == 0)
13790 g_type
= NT_integer
;
13792 /* If only untyped args are allowed, decay any more specific types to
13793 them. Some instructions only care about signs for some element
13794 sizes, so handle that properly. */
13795 if (((types_allowed
& N_UNT
) == 0)
13796 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
13797 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
13798 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
13799 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
13800 g_type
= NT_untyped
;
13804 if ((thisarg
& N_KEY
) != 0)
13808 key_allowed
= thisarg
& ~N_KEY
;
13813 if ((thisarg
& N_VFP
) != 0)
13815 enum neon_shape_el regshape
;
13816 unsigned regwidth
, match
;
13818 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13821 first_error (_("invalid instruction shape"));
13824 regshape
= neon_shape_tab
[ns
].el
[i
];
13825 regwidth
= neon_shape_el_size
[regshape
];
13827 /* In VFP mode, operands must match register widths. If we
13828 have a key operand, use its width, else use the width of
13829 the current operand. */
13835 if (regwidth
!= match
)
13837 first_error (_("operand size must match register width"));
13842 if ((thisarg
& N_EQK
) == 0)
13844 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
13846 if ((given_type
& types_allowed
) == 0)
13848 first_error (_("bad type in Neon instruction"));
13854 enum neon_el_type mod_k_type
= k_type
;
13855 unsigned mod_k_size
= k_size
;
13856 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
13857 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
13859 first_error (_("inconsistent types in Neon instruction"));
13867 return inst
.vectype
.el
[key_el
];
13870 /* Neon-style VFP instruction forwarding. */
13872 /* Thumb VFP instructions have 0xE in the condition field. */
13875 do_vfp_cond_or_thumb (void)
13880 inst
.instruction
|= 0xe0000000;
13882 inst
.instruction
|= inst
.cond
<< 28;
13885 /* Look up and encode a simple mnemonic, for use as a helper function for the
13886 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13887 etc. It is assumed that operand parsing has already been done, and that the
13888 operands are in the form expected by the given opcode (this isn't necessarily
13889 the same as the form in which they were parsed, hence some massaging must
13890 take place before this function is called).
13891 Checks current arch version against that in the looked-up opcode. */
13894 do_vfp_nsyn_opcode (const char *opname
)
13896 const struct asm_opcode
*opcode
;
13898 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
13903 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
13904 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
13911 inst
.instruction
= opcode
->tvalue
;
13912 opcode
->tencode ();
13916 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
13917 opcode
->aencode ();
13922 do_vfp_nsyn_add_sub (enum neon_shape rs
)
13924 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
13929 do_vfp_nsyn_opcode ("fadds");
13931 do_vfp_nsyn_opcode ("fsubs");
13936 do_vfp_nsyn_opcode ("faddd");
13938 do_vfp_nsyn_opcode ("fsubd");
13942 /* Check operand types to see if this is a VFP instruction, and if so call
13946 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
13948 enum neon_shape rs
;
13949 struct neon_type_el et
;
13954 rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
13955 et
= neon_check_type (2, rs
,
13956 N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
13960 rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
13961 et
= neon_check_type (3, rs
,
13962 N_EQK
| N_VFP
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
13969 if (et
.type
!= NT_invtype
)
13980 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
13982 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
13987 do_vfp_nsyn_opcode ("fmacs");
13989 do_vfp_nsyn_opcode ("fnmacs");
13994 do_vfp_nsyn_opcode ("fmacd");
13996 do_vfp_nsyn_opcode ("fnmacd");
14001 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
14003 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
14008 do_vfp_nsyn_opcode ("ffmas");
14010 do_vfp_nsyn_opcode ("ffnmas");
14015 do_vfp_nsyn_opcode ("ffmad");
14017 do_vfp_nsyn_opcode ("ffnmad");
14022 do_vfp_nsyn_mul (enum neon_shape rs
)
14025 do_vfp_nsyn_opcode ("fmuls");
14027 do_vfp_nsyn_opcode ("fmuld");
14031 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
14033 int is_neg
= (inst
.instruction
& 0x80) != 0;
14034 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_VFP
| N_KEY
);
14039 do_vfp_nsyn_opcode ("fnegs");
14041 do_vfp_nsyn_opcode ("fabss");
14046 do_vfp_nsyn_opcode ("fnegd");
14048 do_vfp_nsyn_opcode ("fabsd");
14052 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14053 insns belong to Neon, and are handled elsewhere. */
14056 do_vfp_nsyn_ldm_stm (int is_dbmode
)
14058 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
14062 do_vfp_nsyn_opcode ("fldmdbs");
14064 do_vfp_nsyn_opcode ("fldmias");
14069 do_vfp_nsyn_opcode ("fstmdbs");
14071 do_vfp_nsyn_opcode ("fstmias");
14076 do_vfp_nsyn_sqrt (void)
14078 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
14079 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
14082 do_vfp_nsyn_opcode ("fsqrts");
14084 do_vfp_nsyn_opcode ("fsqrtd");
14088 do_vfp_nsyn_div (void)
14090 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
14091 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14092 N_F32
| N_F64
| N_KEY
| N_VFP
);
14095 do_vfp_nsyn_opcode ("fdivs");
14097 do_vfp_nsyn_opcode ("fdivd");
14101 do_vfp_nsyn_nmul (void)
14103 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
14104 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14105 N_F32
| N_F64
| N_KEY
| N_VFP
);
14109 NEON_ENCODE (SINGLE
, inst
);
14110 do_vfp_sp_dyadic ();
14114 NEON_ENCODE (DOUBLE
, inst
);
14115 do_vfp_dp_rd_rn_rm ();
14117 do_vfp_cond_or_thumb ();
14121 do_vfp_nsyn_cmp (void)
14123 if (inst
.operands
[1].isreg
)
14125 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
14126 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
14130 NEON_ENCODE (SINGLE
, inst
);
14131 do_vfp_sp_monadic ();
14135 NEON_ENCODE (DOUBLE
, inst
);
14136 do_vfp_dp_rd_rm ();
14141 enum neon_shape rs
= neon_select_shape (NS_FI
, NS_DI
, NS_NULL
);
14142 neon_check_type (2, rs
, N_F32
| N_F64
| N_KEY
| N_VFP
, N_EQK
);
14144 switch (inst
.instruction
& 0x0fffffff)
14147 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
14150 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
14158 NEON_ENCODE (SINGLE
, inst
);
14159 do_vfp_sp_compare_z ();
14163 NEON_ENCODE (DOUBLE
, inst
);
14167 do_vfp_cond_or_thumb ();
14171 nsyn_insert_sp (void)
14173 inst
.operands
[1] = inst
.operands
[0];
14174 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
14175 inst
.operands
[0].reg
= REG_SP
;
14176 inst
.operands
[0].isreg
= 1;
14177 inst
.operands
[0].writeback
= 1;
14178 inst
.operands
[0].present
= 1;
14182 do_vfp_nsyn_push (void)
14185 if (inst
.operands
[1].issingle
)
14186 do_vfp_nsyn_opcode ("fstmdbs");
14188 do_vfp_nsyn_opcode ("fstmdbd");
14192 do_vfp_nsyn_pop (void)
14195 if (inst
.operands
[1].issingle
)
14196 do_vfp_nsyn_opcode ("fldmias");
14198 do_vfp_nsyn_opcode ("fldmiad");
14201 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14202 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14205 neon_dp_fixup (struct arm_it
* insn
)
14207 unsigned int i
= insn
->instruction
;
14212 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14223 insn
->instruction
= i
;
14226 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14230 neon_logbits (unsigned x
)
14232 return ffs (x
) - 4;
14235 #define LOW4(R) ((R) & 0xf)
14236 #define HI1(R) (((R) >> 4) & 1)
14238 /* Encode insns with bit pattern:
14240 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14241 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14243 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14244 different meaning for some instruction. */
14247 neon_three_same (int isquad
, int ubit
, int size
)
14249 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14250 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14251 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14252 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14253 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14254 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14255 inst
.instruction
|= (isquad
!= 0) << 6;
14256 inst
.instruction
|= (ubit
!= 0) << 24;
14258 inst
.instruction
|= neon_logbits (size
) << 20;
14260 neon_dp_fixup (&inst
);
14263 /* Encode instructions of the form:
14265 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14266 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14268 Don't write size if SIZE == -1. */
14271 neon_two_same (int qbit
, int ubit
, int size
)
14273 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14274 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14275 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14276 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14277 inst
.instruction
|= (qbit
!= 0) << 6;
14278 inst
.instruction
|= (ubit
!= 0) << 24;
14281 inst
.instruction
|= neon_logbits (size
) << 18;
14283 neon_dp_fixup (&inst
);
14286 /* Neon instruction encoders, in approximate order of appearance. */
14289 do_neon_dyadic_i_su (void)
14291 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14292 struct neon_type_el et
= neon_check_type (3, rs
,
14293 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
14294 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14298 do_neon_dyadic_i64_su (void)
14300 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14301 struct neon_type_el et
= neon_check_type (3, rs
,
14302 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14303 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14307 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
14310 unsigned size
= et
.size
>> 3;
14311 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14312 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14313 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14314 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14315 inst
.instruction
|= (isquad
!= 0) << 6;
14316 inst
.instruction
|= immbits
<< 16;
14317 inst
.instruction
|= (size
>> 3) << 7;
14318 inst
.instruction
|= (size
& 0x7) << 19;
14320 inst
.instruction
|= (uval
!= 0) << 24;
14322 neon_dp_fixup (&inst
);
14326 do_neon_shl_imm (void)
14328 if (!inst
.operands
[2].isreg
)
14330 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14331 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
14332 int imm
= inst
.operands
[2].imm
;
14334 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14335 _("immediate out of range for shift"));
14336 NEON_ENCODE (IMMED
, inst
);
14337 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14341 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14342 struct neon_type_el et
= neon_check_type (3, rs
,
14343 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14346 /* VSHL/VQSHL 3-register variants have syntax such as:
14348 whereas other 3-register operations encoded by neon_three_same have
14351 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14353 tmp
= inst
.operands
[2].reg
;
14354 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14355 inst
.operands
[1].reg
= tmp
;
14356 NEON_ENCODE (INTEGER
, inst
);
14357 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14362 do_neon_qshl_imm (void)
14364 if (!inst
.operands
[2].isreg
)
14366 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14367 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
14368 int imm
= inst
.operands
[2].imm
;
14370 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14371 _("immediate out of range for shift"));
14372 NEON_ENCODE (IMMED
, inst
);
14373 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
14377 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14378 struct neon_type_el et
= neon_check_type (3, rs
,
14379 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14382 /* See note in do_neon_shl_imm. */
14383 tmp
= inst
.operands
[2].reg
;
14384 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14385 inst
.operands
[1].reg
= tmp
;
14386 NEON_ENCODE (INTEGER
, inst
);
14387 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14392 do_neon_rshl (void)
14394 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14395 struct neon_type_el et
= neon_check_type (3, rs
,
14396 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14399 tmp
= inst
.operands
[2].reg
;
14400 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14401 inst
.operands
[1].reg
= tmp
;
14402 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14406 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
14408 /* Handle .I8 pseudo-instructions. */
14411 /* Unfortunately, this will make everything apart from zero out-of-range.
14412 FIXME is this the intended semantics? There doesn't seem much point in
14413 accepting .I8 if so. */
14414 immediate
|= immediate
<< 8;
14420 if (immediate
== (immediate
& 0x000000ff))
14422 *immbits
= immediate
;
14425 else if (immediate
== (immediate
& 0x0000ff00))
14427 *immbits
= immediate
>> 8;
14430 else if (immediate
== (immediate
& 0x00ff0000))
14432 *immbits
= immediate
>> 16;
14435 else if (immediate
== (immediate
& 0xff000000))
14437 *immbits
= immediate
>> 24;
14440 if ((immediate
& 0xffff) != (immediate
>> 16))
14441 goto bad_immediate
;
14442 immediate
&= 0xffff;
14445 if (immediate
== (immediate
& 0x000000ff))
14447 *immbits
= immediate
;
14450 else if (immediate
== (immediate
& 0x0000ff00))
14452 *immbits
= immediate
>> 8;
14457 first_error (_("immediate value out of range"));
14462 do_neon_logic (void)
14464 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
14466 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14467 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14468 /* U bit and size field were set as part of the bitmask. */
14469 NEON_ENCODE (INTEGER
, inst
);
14470 neon_three_same (neon_quad (rs
), 0, -1);
14474 const int three_ops_form
= (inst
.operands
[2].present
14475 && !inst
.operands
[2].isreg
);
14476 const int immoperand
= (three_ops_form
? 2 : 1);
14477 enum neon_shape rs
= (three_ops_form
14478 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
14479 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
14480 struct neon_type_el et
= neon_check_type (2, rs
,
14481 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
14482 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
14486 if (et
.type
== NT_invtype
)
14489 if (three_ops_form
)
14490 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
14491 _("first and second operands shall be the same register"));
14493 NEON_ENCODE (IMMED
, inst
);
14495 immbits
= inst
.operands
[immoperand
].imm
;
14498 /* .i64 is a pseudo-op, so the immediate must be a repeating
14500 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
14501 inst
.operands
[immoperand
].reg
: 0))
14503 /* Set immbits to an invalid constant. */
14504 immbits
= 0xdeadbeef;
14511 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14515 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14519 /* Pseudo-instruction for VBIC. */
14520 neon_invert_size (&immbits
, 0, et
.size
);
14521 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14525 /* Pseudo-instruction for VORR. */
14526 neon_invert_size (&immbits
, 0, et
.size
);
14527 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14537 inst
.instruction
|= neon_quad (rs
) << 6;
14538 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14539 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14540 inst
.instruction
|= cmode
<< 8;
14541 neon_write_immbits (immbits
);
14543 neon_dp_fixup (&inst
);
14548 do_neon_bitfield (void)
14550 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14551 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14552 neon_three_same (neon_quad (rs
), 0, -1);
14556 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
14559 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14560 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
14562 if (et
.type
== NT_float
)
14564 NEON_ENCODE (FLOAT
, inst
);
14565 neon_three_same (neon_quad (rs
), 0, -1);
14569 NEON_ENCODE (INTEGER
, inst
);
14570 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
14575 do_neon_dyadic_if_su (void)
14577 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14581 do_neon_dyadic_if_su_d (void)
14583 /* This version only allow D registers, but that constraint is enforced during
14584 operand parsing so we don't need to do anything extra here. */
14585 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14589 do_neon_dyadic_if_i_d (void)
14591 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14592 affected if we specify unsigned args. */
14593 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14596 enum vfp_or_neon_is_neon_bits
14599 NEON_CHECK_ARCH
= 2,
14600 NEON_CHECK_ARCH8
= 4
14603 /* Call this function if an instruction which may have belonged to the VFP or
14604 Neon instruction sets, but turned out to be a Neon instruction (due to the
14605 operand types involved, etc.). We have to check and/or fix-up a couple of
14608 - Make sure the user hasn't attempted to make a Neon instruction
14610 - Alter the value in the condition code field if necessary.
14611 - Make sure that the arch supports Neon instructions.
14613 Which of these operations take place depends on bits from enum
14614 vfp_or_neon_is_neon_bits.
14616 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14617 current instruction's condition is COND_ALWAYS, the condition field is
14618 changed to inst.uncond_value. This is necessary because instructions shared
14619 between VFP and Neon may be conditional for the VFP variants only, and the
14620 unconditional Neon version must have, e.g., 0xF in the condition field. */
14623 vfp_or_neon_is_neon (unsigned check
)
14625 /* Conditions are always legal in Thumb mode (IT blocks). */
14626 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
14628 if (inst
.cond
!= COND_ALWAYS
)
14630 first_error (_(BAD_COND
));
14633 if (inst
.uncond_value
!= -1)
14634 inst
.instruction
|= inst
.uncond_value
<< 28;
14637 if ((check
& NEON_CHECK_ARCH
)
14638 && !mark_feature_used (&fpu_neon_ext_v1
))
14640 first_error (_(BAD_FPU
));
14644 if ((check
& NEON_CHECK_ARCH8
)
14645 && !mark_feature_used (&fpu_neon_ext_armv8
))
14647 first_error (_(BAD_FPU
));
14655 do_neon_addsub_if_i (void)
14657 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
14660 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14663 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14664 affected if we specify unsigned args. */
14665 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
14668 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14670 V<op> A,B (A is operand 0, B is operand 2)
14675 so handle that case specially. */
14678 neon_exchange_operands (void)
14680 void *scratch
= alloca (sizeof (inst
.operands
[0]));
14681 if (inst
.operands
[1].present
)
14683 /* Swap operands[1] and operands[2]. */
14684 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
14685 inst
.operands
[1] = inst
.operands
[2];
14686 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
14690 inst
.operands
[1] = inst
.operands
[2];
14691 inst
.operands
[2] = inst
.operands
[0];
14696 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
14698 if (inst
.operands
[2].isreg
)
14701 neon_exchange_operands ();
14702 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
14706 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14707 struct neon_type_el et
= neon_check_type (2, rs
,
14708 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
14710 NEON_ENCODE (IMMED
, inst
);
14711 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14712 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14713 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14714 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14715 inst
.instruction
|= neon_quad (rs
) << 6;
14716 inst
.instruction
|= (et
.type
== NT_float
) << 10;
14717 inst
.instruction
|= neon_logbits (et
.size
) << 18;
14719 neon_dp_fixup (&inst
);
14726 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
14730 do_neon_cmp_inv (void)
14732 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
14738 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
14741 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14742 scalars, which are encoded in 5 bits, M : Rm.
14743 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14744 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14748 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
14750 unsigned regno
= NEON_SCALAR_REG (scalar
);
14751 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
14756 if (regno
> 7 || elno
> 3)
14758 return regno
| (elno
<< 3);
14761 if (regno
> 15 || elno
> 1)
14763 return regno
| (elno
<< 4);
14767 first_error (_("scalar out of range for multiply instruction"));
14773 /* Encode multiply / multiply-accumulate scalar instructions. */
14776 neon_mul_mac (struct neon_type_el et
, int ubit
)
14780 /* Give a more helpful error message if we have an invalid type. */
14781 if (et
.type
== NT_invtype
)
14784 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
14785 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14786 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14787 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14788 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14789 inst
.instruction
|= LOW4 (scalar
);
14790 inst
.instruction
|= HI1 (scalar
) << 5;
14791 inst
.instruction
|= (et
.type
== NT_float
) << 8;
14792 inst
.instruction
|= neon_logbits (et
.size
) << 20;
14793 inst
.instruction
|= (ubit
!= 0) << 24;
14795 neon_dp_fixup (&inst
);
14799 do_neon_mac_maybe_scalar (void)
14801 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
14804 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14807 if (inst
.operands
[2].isscalar
)
14809 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
14810 struct neon_type_el et
= neon_check_type (3, rs
,
14811 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
14812 NEON_ENCODE (SCALAR
, inst
);
14813 neon_mul_mac (et
, neon_quad (rs
));
14817 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14818 affected if we specify unsigned args. */
14819 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14824 do_neon_fmac (void)
14826 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
14829 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14832 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14838 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14839 struct neon_type_el et
= neon_check_type (3, rs
,
14840 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14841 neon_three_same (neon_quad (rs
), 0, et
.size
);
14844 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14845 same types as the MAC equivalents. The polynomial type for this instruction
14846 is encoded the same as the integer type. */
14851 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
14854 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14857 if (inst
.operands
[2].isscalar
)
14858 do_neon_mac_maybe_scalar ();
14860 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
14864 do_neon_qdmulh (void)
14866 if (inst
.operands
[2].isscalar
)
14868 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
14869 struct neon_type_el et
= neon_check_type (3, rs
,
14870 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
14871 NEON_ENCODE (SCALAR
, inst
);
14872 neon_mul_mac (et
, neon_quad (rs
));
14876 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14877 struct neon_type_el et
= neon_check_type (3, rs
,
14878 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
14879 NEON_ENCODE (INTEGER
, inst
);
14880 /* The U bit (rounding) comes from bit mask. */
14881 neon_three_same (neon_quad (rs
), 0, et
.size
);
14886 do_neon_fcmp_absolute (void)
14888 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14889 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
14890 /* Size field comes from bit mask. */
14891 neon_three_same (neon_quad (rs
), 1, -1);
14895 do_neon_fcmp_absolute_inv (void)
14897 neon_exchange_operands ();
14898 do_neon_fcmp_absolute ();
14902 do_neon_step (void)
14904 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14905 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
14906 neon_three_same (neon_quad (rs
), 0, -1);
14910 do_neon_abs_neg (void)
14912 enum neon_shape rs
;
14913 struct neon_type_el et
;
14915 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
14918 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14921 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14922 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
14924 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14925 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14926 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14927 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14928 inst
.instruction
|= neon_quad (rs
) << 6;
14929 inst
.instruction
|= (et
.type
== NT_float
) << 10;
14930 inst
.instruction
|= neon_logbits (et
.size
) << 18;
14932 neon_dp_fixup (&inst
);
14938 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14939 struct neon_type_el et
= neon_check_type (2, rs
,
14940 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
14941 int imm
= inst
.operands
[2].imm
;
14942 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14943 _("immediate out of range for insert"));
14944 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14950 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14951 struct neon_type_el et
= neon_check_type (2, rs
,
14952 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
14953 int imm
= inst
.operands
[2].imm
;
14954 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
14955 _("immediate out of range for insert"));
14956 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
14960 do_neon_qshlu_imm (void)
14962 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14963 struct neon_type_el et
= neon_check_type (2, rs
,
14964 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
14965 int imm
= inst
.operands
[2].imm
;
14966 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14967 _("immediate out of range for shift"));
14968 /* Only encodes the 'U present' variant of the instruction.
14969 In this case, signed types have OP (bit 8) set to 0.
14970 Unsigned types have OP set to 1. */
14971 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
14972 /* The rest of the bits are the same as other immediate shifts. */
14973 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14977 do_neon_qmovn (void)
14979 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
14980 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
14981 /* Saturating move where operands can be signed or unsigned, and the
14982 destination has the same signedness. */
14983 NEON_ENCODE (INTEGER
, inst
);
14984 if (et
.type
== NT_unsigned
)
14985 inst
.instruction
|= 0xc0;
14987 inst
.instruction
|= 0x80;
14988 neon_two_same (0, 1, et
.size
/ 2);
14992 do_neon_qmovun (void)
14994 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
14995 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
14996 /* Saturating move with unsigned results. Operands must be signed. */
14997 NEON_ENCODE (INTEGER
, inst
);
14998 neon_two_same (0, 1, et
.size
/ 2);
15002 do_neon_rshift_sat_narrow (void)
15004 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15005 or unsigned. If operands are unsigned, results must also be unsigned. */
15006 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15007 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15008 int imm
= inst
.operands
[2].imm
;
15009 /* This gets the bounds check, size encoding and immediate bits calculation
15013 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15014 VQMOVN.I<size> <Dd>, <Qm>. */
15017 inst
.operands
[2].present
= 0;
15018 inst
.instruction
= N_MNEM_vqmovn
;
15023 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15024 _("immediate out of range"));
15025 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
15029 do_neon_rshift_sat_narrow_u (void)
15031 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15032 or unsigned. If operands are unsigned, results must also be unsigned. */
15033 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15034 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15035 int imm
= inst
.operands
[2].imm
;
15036 /* This gets the bounds check, size encoding and immediate bits calculation
15040 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15041 VQMOVUN.I<size> <Dd>, <Qm>. */
15044 inst
.operands
[2].present
= 0;
15045 inst
.instruction
= N_MNEM_vqmovun
;
15050 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15051 _("immediate out of range"));
15052 /* FIXME: The manual is kind of unclear about what value U should have in
15053 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15055 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
15059 do_neon_movn (void)
15061 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15062 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15063 NEON_ENCODE (INTEGER
, inst
);
15064 neon_two_same (0, 1, et
.size
/ 2);
15068 do_neon_rshift_narrow (void)
15070 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15071 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15072 int imm
= inst
.operands
[2].imm
;
15073 /* This gets the bounds check, size encoding and immediate bits calculation
15077 /* If immediate is zero then we are a pseudo-instruction for
15078 VMOVN.I<size> <Dd>, <Qm> */
15081 inst
.operands
[2].present
= 0;
15082 inst
.instruction
= N_MNEM_vmovn
;
15087 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15088 _("immediate out of range for narrowing operation"));
15089 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
15093 do_neon_shll (void)
15095 /* FIXME: Type checking when lengthening. */
15096 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
15097 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
15098 unsigned imm
= inst
.operands
[2].imm
;
15100 if (imm
== et
.size
)
15102 /* Maximum shift variant. */
15103 NEON_ENCODE (INTEGER
, inst
);
15104 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15105 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15106 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15107 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15108 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15110 neon_dp_fixup (&inst
);
15114 /* A more-specific type check for non-max versions. */
15115 et
= neon_check_type (2, NS_QDI
,
15116 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15117 NEON_ENCODE (IMMED
, inst
);
15118 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
15122 /* Check the various types for the VCVT instruction, and return which version
15123 the current instruction is. */
15125 #define CVT_FLAVOUR_VAR \
15126 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15127 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15128 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15129 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15130 /* Half-precision conversions. */ \
15131 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15132 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15133 /* VFP instructions. */ \
15134 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15135 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15136 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15137 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15138 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15139 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15140 /* VFP instructions with bitshift. */ \
15141 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15142 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15143 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15144 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15145 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15146 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15147 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15148 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15150 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15151 neon_cvt_flavour_##C,
15153 /* The different types of conversions we can do. */
15154 enum neon_cvt_flavour
15157 neon_cvt_flavour_invalid
,
15158 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
15163 static enum neon_cvt_flavour
15164 get_neon_cvt_flavour (enum neon_shape rs
)
15166 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15167 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15168 if (et.type != NT_invtype) \
15170 inst.error = NULL; \
15171 return (neon_cvt_flavour_##C); \
15174 struct neon_type_el et
;
15175 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
15176 || rs
== NS_FF
) ? N_VFP
: 0;
15177 /* The instruction versions which take an immediate take one register
15178 argument, which is extended to the width of the full register. Thus the
15179 "source" and "destination" registers must have the same width. Hack that
15180 here by making the size equal to the key (wider, in this case) operand. */
15181 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
15185 return neon_cvt_flavour_invalid
;
15200 /* Neon-syntax VFP conversions. */
15203 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
15205 const char *opname
= 0;
15207 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
)
15209 /* Conversions with immediate bitshift. */
15210 const char *enc
[] =
15212 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15218 if (flavour
< (int) ARRAY_SIZE (enc
))
15220 opname
= enc
[flavour
];
15221 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15222 _("operands 0 and 1 must be the same register"));
15223 inst
.operands
[1] = inst
.operands
[2];
15224 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
15229 /* Conversions without bitshift. */
15230 const char *enc
[] =
15232 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15238 if (flavour
< (int) ARRAY_SIZE (enc
))
15239 opname
= enc
[flavour
];
15243 do_vfp_nsyn_opcode (opname
);
15247 do_vfp_nsyn_cvtz (void)
15249 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_NULL
);
15250 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15251 const char *enc
[] =
15253 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15259 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
15260 do_vfp_nsyn_opcode (enc
[flavour
]);
15264 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
15265 enum neon_cvt_mode mode
)
15270 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15271 D register operands. */
15272 if (flavour
== neon_cvt_flavour_s32_f64
15273 || flavour
== neon_cvt_flavour_u32_f64
)
15274 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15277 set_it_insn_type (OUTSIDE_IT_INSN
);
15281 case neon_cvt_flavour_s32_f64
:
15285 case neon_cvt_flavour_s32_f32
:
15289 case neon_cvt_flavour_u32_f64
:
15293 case neon_cvt_flavour_u32_f32
:
15298 first_error (_("invalid instruction shape"));
15304 case neon_cvt_mode_a
: rm
= 0; break;
15305 case neon_cvt_mode_n
: rm
= 1; break;
15306 case neon_cvt_mode_p
: rm
= 2; break;
15307 case neon_cvt_mode_m
: rm
= 3; break;
15308 default: first_error (_("invalid rounding mode")); return;
15311 NEON_ENCODE (FPV8
, inst
);
15312 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
15313 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
15314 inst
.instruction
|= sz
<< 8;
15315 inst
.instruction
|= op
<< 7;
15316 inst
.instruction
|= rm
<< 16;
15317 inst
.instruction
|= 0xf0000000;
15318 inst
.is_neon
= TRUE
;
15322 do_neon_cvt_1 (enum neon_cvt_mode mode
)
15324 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
15325 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
, NS_NULL
);
15326 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15328 /* PR11109: Handle round-to-zero for VCVT conversions. */
15329 if (mode
== neon_cvt_mode_z
15330 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
15331 && (flavour
== neon_cvt_flavour_s32_f32
15332 || flavour
== neon_cvt_flavour_u32_f32
15333 || flavour
== neon_cvt_flavour_s32_f64
15334 || flavour
== neon_cvt_flavour_u32_f64
)
15335 && (rs
== NS_FD
|| rs
== NS_FF
))
15337 do_vfp_nsyn_cvtz ();
15341 /* VFP rather than Neon conversions. */
15342 if (flavour
>= neon_cvt_flavour_first_fp
)
15344 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15345 do_vfp_nsyn_cvt (rs
, flavour
);
15347 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15358 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15360 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15363 /* Fixed-point conversion with #0 immediate is encoded as an
15364 integer conversion. */
15365 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
15367 immbits
= 32 - inst
.operands
[2].imm
;
15368 NEON_ENCODE (IMMED
, inst
);
15369 if (flavour
!= neon_cvt_flavour_invalid
)
15370 inst
.instruction
|= enctab
[flavour
];
15371 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15372 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15373 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15374 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15375 inst
.instruction
|= neon_quad (rs
) << 6;
15376 inst
.instruction
|= 1 << 21;
15377 inst
.instruction
|= immbits
<< 16;
15379 neon_dp_fixup (&inst
);
15385 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
15387 NEON_ENCODE (FLOAT
, inst
);
15388 set_it_insn_type (OUTSIDE_IT_INSN
);
15390 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
15393 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15394 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15395 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15396 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15397 inst
.instruction
|= neon_quad (rs
) << 6;
15398 inst
.instruction
|= (flavour
== neon_cvt_flavour_u32_f32
) << 7;
15399 inst
.instruction
|= mode
<< 8;
15401 inst
.instruction
|= 0xfc000000;
15403 inst
.instruction
|= 0xf0000000;
15409 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
15411 NEON_ENCODE (INTEGER
, inst
);
15413 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15416 if (flavour
!= neon_cvt_flavour_invalid
)
15417 inst
.instruction
|= enctab
[flavour
];
15419 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15420 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15421 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15422 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15423 inst
.instruction
|= neon_quad (rs
) << 6;
15424 inst
.instruction
|= 2 << 18;
15426 neon_dp_fixup (&inst
);
15431 /* Half-precision conversions for Advanced SIMD -- neon. */
15436 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
15438 as_bad (_("operand size must match register width"));
15443 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
15445 as_bad (_("operand size must match register width"));
15450 inst
.instruction
= 0x3b60600;
15452 inst
.instruction
= 0x3b60700;
15454 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15455 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15456 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15457 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15458 neon_dp_fixup (&inst
);
15462 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15463 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15464 do_vfp_nsyn_cvt (rs
, flavour
);
15466 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15471 do_neon_cvtr (void)
15473 do_neon_cvt_1 (neon_cvt_mode_x
);
15479 do_neon_cvt_1 (neon_cvt_mode_z
);
15483 do_neon_cvta (void)
15485 do_neon_cvt_1 (neon_cvt_mode_a
);
15489 do_neon_cvtn (void)
15491 do_neon_cvt_1 (neon_cvt_mode_n
);
15495 do_neon_cvtp (void)
15497 do_neon_cvt_1 (neon_cvt_mode_p
);
15501 do_neon_cvtm (void)
15503 do_neon_cvt_1 (neon_cvt_mode_m
);
15507 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
15510 mark_feature_used (&fpu_vfp_ext_armv8
);
15512 encode_arm_vfp_reg (inst
.operands
[0].reg
,
15513 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
15514 encode_arm_vfp_reg (inst
.operands
[1].reg
,
15515 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
15516 inst
.instruction
|= to
? 0x10000 : 0;
15517 inst
.instruction
|= t
? 0x80 : 0;
15518 inst
.instruction
|= is_double
? 0x100 : 0;
15519 do_vfp_cond_or_thumb ();
15523 do_neon_cvttb_1 (bfd_boolean t
)
15525 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_DF
, NS_NULL
);
15529 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
15532 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
15534 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
15537 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
15539 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
15541 /* The VCVTB and VCVTT instructions with D-register operands
15542 don't work for SP only targets. */
15543 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15547 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
15549 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
15551 /* The VCVTB and VCVTT instructions with D-register operands
15552 don't work for SP only targets. */
15553 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15557 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
15564 do_neon_cvtb (void)
15566 do_neon_cvttb_1 (FALSE
);
15571 do_neon_cvtt (void)
15573 do_neon_cvttb_1 (TRUE
);
15577 neon_move_immediate (void)
15579 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
15580 struct neon_type_el et
= neon_check_type (2, rs
,
15581 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15582 unsigned immlo
, immhi
= 0, immbits
;
15583 int op
, cmode
, float_p
;
15585 constraint (et
.type
== NT_invtype
,
15586 _("operand size must be specified for immediate VMOV"));
15588 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15589 op
= (inst
.instruction
& (1 << 5)) != 0;
15591 immlo
= inst
.operands
[1].imm
;
15592 if (inst
.operands
[1].regisimm
)
15593 immhi
= inst
.operands
[1].reg
;
15595 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
15596 _("immediate has bits set outside the operand size"));
15598 float_p
= inst
.operands
[1].immisfloat
;
15600 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
15601 et
.size
, et
.type
)) == FAIL
)
15603 /* Invert relevant bits only. */
15604 neon_invert_size (&immlo
, &immhi
, et
.size
);
15605 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15606 with one or the other; those cases are caught by
15607 neon_cmode_for_move_imm. */
15609 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
15610 &op
, et
.size
, et
.type
)) == FAIL
)
15612 first_error (_("immediate out of range"));
15617 inst
.instruction
&= ~(1 << 5);
15618 inst
.instruction
|= op
<< 5;
15620 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15621 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15622 inst
.instruction
|= neon_quad (rs
) << 6;
15623 inst
.instruction
|= cmode
<< 8;
15625 neon_write_immbits (immbits
);
15631 if (inst
.operands
[1].isreg
)
15633 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15635 NEON_ENCODE (INTEGER
, inst
);
15636 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15637 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15638 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15639 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15640 inst
.instruction
|= neon_quad (rs
) << 6;
15644 NEON_ENCODE (IMMED
, inst
);
15645 neon_move_immediate ();
15648 neon_dp_fixup (&inst
);
15651 /* Encode instructions of form:
15653 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15654 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15657 neon_mixed_length (struct neon_type_el et
, unsigned size
)
15659 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15660 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15661 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15662 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15663 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15664 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15665 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
15666 inst
.instruction
|= neon_logbits (size
) << 20;
15668 neon_dp_fixup (&inst
);
15672 do_neon_dyadic_long (void)
15674 /* FIXME: Type checking for lengthening op. */
15675 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15676 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
15677 neon_mixed_length (et
, et
.size
);
15681 do_neon_abal (void)
15683 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15684 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
15685 neon_mixed_length (et
, et
.size
);
15689 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
15691 if (inst
.operands
[2].isscalar
)
15693 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
15694 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
15695 NEON_ENCODE (SCALAR
, inst
);
15696 neon_mul_mac (et
, et
.type
== NT_unsigned
);
15700 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15701 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
15702 NEON_ENCODE (INTEGER
, inst
);
15703 neon_mixed_length (et
, et
.size
);
15708 do_neon_mac_maybe_scalar_long (void)
15710 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
15714 do_neon_dyadic_wide (void)
15716 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
15717 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15718 neon_mixed_length (et
, et
.size
);
15722 do_neon_dyadic_narrow (void)
15724 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15725 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
15726 /* Operand sign is unimportant, and the U bit is part of the opcode,
15727 so force the operand type to integer. */
15728 et
.type
= NT_integer
;
15729 neon_mixed_length (et
, et
.size
/ 2);
15733 do_neon_mul_sat_scalar_long (void)
15735 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
15739 do_neon_vmull (void)
15741 if (inst
.operands
[2].isscalar
)
15742 do_neon_mac_maybe_scalar_long ();
15745 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
15746 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
15748 if (et
.type
== NT_poly
)
15749 NEON_ENCODE (POLY
, inst
);
15751 NEON_ENCODE (INTEGER
, inst
);
15753 /* For polynomial encoding the U bit must be zero, and the size must
15754 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15755 obviously, as 0b10). */
15758 /* Check we're on the correct architecture. */
15759 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
15761 _("Instruction form not available on this architecture.");
15766 neon_mixed_length (et
, et
.size
);
15773 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
15774 struct neon_type_el et
= neon_check_type (3, rs
,
15775 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15776 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
15778 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
15779 _("shift out of range"));
15780 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15781 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15782 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15783 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15784 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15785 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15786 inst
.instruction
|= neon_quad (rs
) << 6;
15787 inst
.instruction
|= imm
<< 8;
15789 neon_dp_fixup (&inst
);
15795 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15796 struct neon_type_el et
= neon_check_type (2, rs
,
15797 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15798 unsigned op
= (inst
.instruction
>> 7) & 3;
15799 /* N (width of reversed regions) is encoded as part of the bitmask. We
15800 extract it here to check the elements to be reversed are smaller.
15801 Otherwise we'd get a reserved instruction. */
15802 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
15803 gas_assert (elsize
!= 0);
15804 constraint (et
.size
>= elsize
,
15805 _("elements must be smaller than reversal region"));
15806 neon_two_same (neon_quad (rs
), 1, et
.size
);
15812 if (inst
.operands
[1].isscalar
)
15814 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
15815 struct neon_type_el et
= neon_check_type (2, rs
,
15816 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15817 unsigned sizebits
= et
.size
>> 3;
15818 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
15819 int logsize
= neon_logbits (et
.size
);
15820 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
15822 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
15825 NEON_ENCODE (SCALAR
, inst
);
15826 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15827 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15828 inst
.instruction
|= LOW4 (dm
);
15829 inst
.instruction
|= HI1 (dm
) << 5;
15830 inst
.instruction
|= neon_quad (rs
) << 6;
15831 inst
.instruction
|= x
<< 17;
15832 inst
.instruction
|= sizebits
<< 16;
15834 neon_dp_fixup (&inst
);
15838 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
15839 struct neon_type_el et
= neon_check_type (2, rs
,
15840 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
15841 /* Duplicate ARM register to lanes of vector. */
15842 NEON_ENCODE (ARMREG
, inst
);
15845 case 8: inst
.instruction
|= 0x400000; break;
15846 case 16: inst
.instruction
|= 0x000020; break;
15847 case 32: inst
.instruction
|= 0x000000; break;
15850 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
15851 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
15852 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
15853 inst
.instruction
|= neon_quad (rs
) << 21;
15854 /* The encoding for this instruction is identical for the ARM and Thumb
15855 variants, except for the condition field. */
15856 do_vfp_cond_or_thumb ();
15860 /* VMOV has particularly many variations. It can be one of:
15861 0. VMOV<c><q> <Qd>, <Qm>
15862 1. VMOV<c><q> <Dd>, <Dm>
15863 (Register operations, which are VORR with Rm = Rn.)
15864 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15865 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15867 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15868 (ARM register to scalar.)
15869 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15870 (Two ARM registers to vector.)
15871 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15872 (Scalar to ARM register.)
15873 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15874 (Vector to two ARM registers.)
15875 8. VMOV.F32 <Sd>, <Sm>
15876 9. VMOV.F64 <Dd>, <Dm>
15877 (VFP register moves.)
15878 10. VMOV.F32 <Sd>, #imm
15879 11. VMOV.F64 <Dd>, #imm
15880 (VFP float immediate load.)
15881 12. VMOV <Rd>, <Sm>
15882 (VFP single to ARM reg.)
15883 13. VMOV <Sd>, <Rm>
15884 (ARM reg to VFP single.)
15885 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15886 (Two ARM regs to two VFP singles.)
15887 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15888 (Two VFP singles to two ARM regs.)
15890 These cases can be disambiguated using neon_select_shape, except cases 1/9
15891 and 3/11 which depend on the operand type too.
15893 All the encoded bits are hardcoded by this function.
15895 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15896 Cases 5, 7 may be used with VFPv2 and above.
15898 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15899 can specify a type where it doesn't make sense to, and is ignored). */
15904 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
15905 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
15907 struct neon_type_el et
;
15908 const char *ldconst
= 0;
15912 case NS_DD
: /* case 1/9. */
15913 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
15914 /* It is not an error here if no type is given. */
15916 if (et
.type
== NT_float
&& et
.size
== 64)
15918 do_vfp_nsyn_opcode ("fcpyd");
15921 /* fall through. */
15923 case NS_QQ
: /* case 0/1. */
15925 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15927 /* The architecture manual I have doesn't explicitly state which
15928 value the U bit should have for register->register moves, but
15929 the equivalent VORR instruction has U = 0, so do that. */
15930 inst
.instruction
= 0x0200110;
15931 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15932 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15933 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15934 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15935 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15936 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15937 inst
.instruction
|= neon_quad (rs
) << 6;
15939 neon_dp_fixup (&inst
);
15943 case NS_DI
: /* case 3/11. */
15944 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
15946 if (et
.type
== NT_float
&& et
.size
== 64)
15948 /* case 11 (fconstd). */
15949 ldconst
= "fconstd";
15950 goto encode_fconstd
;
15952 /* fall through. */
15954 case NS_QI
: /* case 2/3. */
15955 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15957 inst
.instruction
= 0x0800010;
15958 neon_move_immediate ();
15959 neon_dp_fixup (&inst
);
15962 case NS_SR
: /* case 4. */
15964 unsigned bcdebits
= 0;
15966 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
15967 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
15969 /* .<size> is optional here, defaulting to .32. */
15970 if (inst
.vectype
.elems
== 0
15971 && inst
.operands
[0].vectype
.type
== NT_invtype
15972 && inst
.operands
[1].vectype
.type
== NT_invtype
)
15974 inst
.vectype
.el
[0].type
= NT_untyped
;
15975 inst
.vectype
.el
[0].size
= 32;
15976 inst
.vectype
.elems
= 1;
15979 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
15980 logsize
= neon_logbits (et
.size
);
15982 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
15984 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
15985 && et
.size
!= 32, _(BAD_FPU
));
15986 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
15987 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
15991 case 8: bcdebits
= 0x8; break;
15992 case 16: bcdebits
= 0x1; break;
15993 case 32: bcdebits
= 0x0; break;
15997 bcdebits
|= x
<< logsize
;
15999 inst
.instruction
= 0xe000b10;
16000 do_vfp_cond_or_thumb ();
16001 inst
.instruction
|= LOW4 (dn
) << 16;
16002 inst
.instruction
|= HI1 (dn
) << 7;
16003 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16004 inst
.instruction
|= (bcdebits
& 3) << 5;
16005 inst
.instruction
|= (bcdebits
>> 2) << 21;
16009 case NS_DRR
: /* case 5 (fmdrr). */
16010 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16013 inst
.instruction
= 0xc400b10;
16014 do_vfp_cond_or_thumb ();
16015 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
16016 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
16017 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16018 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
16021 case NS_RS
: /* case 6. */
16024 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16025 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
16026 unsigned abcdebits
= 0;
16028 /* .<dt> is optional here, defaulting to .32. */
16029 if (inst
.vectype
.elems
== 0
16030 && inst
.operands
[0].vectype
.type
== NT_invtype
16031 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16033 inst
.vectype
.el
[0].type
= NT_untyped
;
16034 inst
.vectype
.el
[0].size
= 32;
16035 inst
.vectype
.elems
= 1;
16038 et
= neon_check_type (2, NS_NULL
,
16039 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
16040 logsize
= neon_logbits (et
.size
);
16042 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16044 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16045 && et
.size
!= 32, _(BAD_FPU
));
16046 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16047 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16051 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
16052 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
16053 case 32: abcdebits
= 0x00; break;
16057 abcdebits
|= x
<< logsize
;
16058 inst
.instruction
= 0xe100b10;
16059 do_vfp_cond_or_thumb ();
16060 inst
.instruction
|= LOW4 (dn
) << 16;
16061 inst
.instruction
|= HI1 (dn
) << 7;
16062 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16063 inst
.instruction
|= (abcdebits
& 3) << 5;
16064 inst
.instruction
|= (abcdebits
>> 2) << 21;
16068 case NS_RRD
: /* case 7 (fmrrd). */
16069 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16072 inst
.instruction
= 0xc500b10;
16073 do_vfp_cond_or_thumb ();
16074 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16075 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16076 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16077 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16080 case NS_FF
: /* case 8 (fcpys). */
16081 do_vfp_nsyn_opcode ("fcpys");
16084 case NS_FI
: /* case 10 (fconsts). */
16085 ldconst
= "fconsts";
16087 if (is_quarter_float (inst
.operands
[1].imm
))
16089 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
16090 do_vfp_nsyn_opcode (ldconst
);
16093 first_error (_("immediate out of range"));
16096 case NS_RF
: /* case 12 (fmrs). */
16097 do_vfp_nsyn_opcode ("fmrs");
16100 case NS_FR
: /* case 13 (fmsr). */
16101 do_vfp_nsyn_opcode ("fmsr");
16104 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16105 (one of which is a list), but we have parsed four. Do some fiddling to
16106 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16108 case NS_RRFF
: /* case 14 (fmrrs). */
16109 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
16110 _("VFP registers must be adjacent"));
16111 inst
.operands
[2].imm
= 2;
16112 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16113 do_vfp_nsyn_opcode ("fmrrs");
16116 case NS_FFRR
: /* case 15 (fmsrr). */
16117 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
16118 _("VFP registers must be adjacent"));
16119 inst
.operands
[1] = inst
.operands
[2];
16120 inst
.operands
[2] = inst
.operands
[3];
16121 inst
.operands
[0].imm
= 2;
16122 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16123 do_vfp_nsyn_opcode ("fmsrr");
16127 /* neon_select_shape has determined that the instruction
16128 shape is wrong and has already set the error message. */
16137 do_neon_rshift_round_imm (void)
16139 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16140 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
16141 int imm
= inst
.operands
[2].imm
;
16143 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16146 inst
.operands
[2].present
= 0;
16151 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16152 _("immediate out of range for shift"));
16153 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
16158 do_neon_movl (void)
16160 struct neon_type_el et
= neon_check_type (2, NS_QD
,
16161 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16162 unsigned sizebits
= et
.size
>> 3;
16163 inst
.instruction
|= sizebits
<< 19;
16164 neon_two_same (0, et
.type
== NT_unsigned
, -1);
16170 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16171 struct neon_type_el et
= neon_check_type (2, rs
,
16172 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16173 NEON_ENCODE (INTEGER
, inst
);
16174 neon_two_same (neon_quad (rs
), 1, et
.size
);
16178 do_neon_zip_uzp (void)
16180 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16181 struct neon_type_el et
= neon_check_type (2, rs
,
16182 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16183 if (rs
== NS_DD
&& et
.size
== 32)
16185 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16186 inst
.instruction
= N_MNEM_vtrn
;
16190 neon_two_same (neon_quad (rs
), 1, et
.size
);
16194 do_neon_sat_abs_neg (void)
16196 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16197 struct neon_type_el et
= neon_check_type (2, rs
,
16198 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16199 neon_two_same (neon_quad (rs
), 1, et
.size
);
16203 do_neon_pair_long (void)
16205 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16206 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
16207 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16208 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
16209 neon_two_same (neon_quad (rs
), 1, et
.size
);
16213 do_neon_recip_est (void)
16215 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16216 struct neon_type_el et
= neon_check_type (2, rs
,
16217 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
16218 inst
.instruction
|= (et
.type
== NT_float
) << 8;
16219 neon_two_same (neon_quad (rs
), 1, et
.size
);
16225 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16226 struct neon_type_el et
= neon_check_type (2, rs
,
16227 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16228 neon_two_same (neon_quad (rs
), 1, et
.size
);
16234 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16235 struct neon_type_el et
= neon_check_type (2, rs
,
16236 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
16237 neon_two_same (neon_quad (rs
), 1, et
.size
);
16243 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16244 struct neon_type_el et
= neon_check_type (2, rs
,
16245 N_EQK
| N_INT
, N_8
| N_KEY
);
16246 neon_two_same (neon_quad (rs
), 1, et
.size
);
16252 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16253 neon_two_same (neon_quad (rs
), 1, -1);
16257 do_neon_tbl_tbx (void)
16259 unsigned listlenbits
;
16260 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
16262 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
16264 first_error (_("bad list length for table lookup"));
16268 listlenbits
= inst
.operands
[1].imm
- 1;
16269 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16270 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16271 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16272 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16273 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16274 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16275 inst
.instruction
|= listlenbits
<< 8;
16277 neon_dp_fixup (&inst
);
16281 do_neon_ldm_stm (void)
16283 /* P, U and L bits are part of bitmask. */
16284 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
16285 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
16287 if (inst
.operands
[1].issingle
)
16289 do_vfp_nsyn_ldm_stm (is_dbmode
);
16293 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
16294 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16296 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
16297 _("register list must contain at least 1 and at most 16 "
16300 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
16301 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
16302 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16303 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
16305 inst
.instruction
|= offsetbits
;
16307 do_vfp_cond_or_thumb ();
16311 do_neon_ldr_str (void)
16313 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
16315 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16316 And is UNPREDICTABLE in thumb mode. */
16318 && inst
.operands
[1].reg
== REG_PC
16319 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
16322 inst
.error
= _("Use of PC here is UNPREDICTABLE");
16323 else if (warn_on_deprecated
)
16324 as_tsktsk (_("Use of PC here is deprecated"));
16327 if (inst
.operands
[0].issingle
)
16330 do_vfp_nsyn_opcode ("flds");
16332 do_vfp_nsyn_opcode ("fsts");
16337 do_vfp_nsyn_opcode ("fldd");
16339 do_vfp_nsyn_opcode ("fstd");
16343 /* "interleave" version also handles non-interleaving register VLD1/VST1
16347 do_neon_ld_st_interleave (void)
16349 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
16350 N_8
| N_16
| N_32
| N_64
);
16351 unsigned alignbits
= 0;
16353 /* The bits in this table go:
16354 0: register stride of one (0) or two (1)
16355 1,2: register list length, minus one (1, 2, 3, 4).
16356 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16357 We use -1 for invalid entries. */
16358 const int typetable
[] =
16360 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16361 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16362 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16363 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16367 if (et
.type
== NT_invtype
)
16370 if (inst
.operands
[1].immisalign
)
16371 switch (inst
.operands
[1].imm
>> 8)
16373 case 64: alignbits
= 1; break;
16375 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
16376 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16377 goto bad_alignment
;
16381 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16382 goto bad_alignment
;
16387 first_error (_("bad alignment"));
16391 inst
.instruction
|= alignbits
<< 4;
16392 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16394 /* Bits [4:6] of the immediate in a list specifier encode register stride
16395 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16396 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16397 up the right value for "type" in a table based on this value and the given
16398 list style, then stick it back. */
16399 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
16400 | (((inst
.instruction
>> 8) & 3) << 3);
16402 typebits
= typetable
[idx
];
16404 constraint (typebits
== -1, _("bad list type for instruction"));
16405 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
16406 _("bad element type for instruction"));
16408 inst
.instruction
&= ~0xf00;
16409 inst
.instruction
|= typebits
<< 8;
16412 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16413 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16414 otherwise. The variable arguments are a list of pairs of legal (size, align)
16415 values, terminated with -1. */
16418 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
16421 int result
= FAIL
, thissize
, thisalign
;
16423 if (!inst
.operands
[1].immisalign
)
16429 va_start (ap
, do_align
);
16433 thissize
= va_arg (ap
, int);
16434 if (thissize
== -1)
16436 thisalign
= va_arg (ap
, int);
16438 if (size
== thissize
&& align
== thisalign
)
16441 while (result
!= SUCCESS
);
16445 if (result
== SUCCESS
)
16448 first_error (_("unsupported alignment for instruction"));
16454 do_neon_ld_st_lane (void)
16456 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16457 int align_good
, do_align
= 0;
16458 int logsize
= neon_logbits (et
.size
);
16459 int align
= inst
.operands
[1].imm
>> 8;
16460 int n
= (inst
.instruction
>> 8) & 3;
16461 int max_el
= 64 / et
.size
;
16463 if (et
.type
== NT_invtype
)
16466 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
16467 _("bad list length"));
16468 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
16469 _("scalar index out of range"));
16470 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
16472 _("stride of 2 unavailable when element size is 8"));
16476 case 0: /* VLD1 / VST1. */
16477 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
16479 if (align_good
== FAIL
)
16483 unsigned alignbits
= 0;
16486 case 16: alignbits
= 0x1; break;
16487 case 32: alignbits
= 0x3; break;
16490 inst
.instruction
|= alignbits
<< 4;
16494 case 1: /* VLD2 / VST2. */
16495 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
16497 if (align_good
== FAIL
)
16500 inst
.instruction
|= 1 << 4;
16503 case 2: /* VLD3 / VST3. */
16504 constraint (inst
.operands
[1].immisalign
,
16505 _("can't use alignment with this instruction"));
16508 case 3: /* VLD4 / VST4. */
16509 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
16510 16, 64, 32, 64, 32, 128, -1);
16511 if (align_good
== FAIL
)
16515 unsigned alignbits
= 0;
16518 case 8: alignbits
= 0x1; break;
16519 case 16: alignbits
= 0x1; break;
16520 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
16523 inst
.instruction
|= alignbits
<< 4;
16530 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16531 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16532 inst
.instruction
|= 1 << (4 + logsize
);
16534 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
16535 inst
.instruction
|= logsize
<< 10;
16538 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16541 do_neon_ld_dup (void)
16543 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16544 int align_good
, do_align
= 0;
16546 if (et
.type
== NT_invtype
)
16549 switch ((inst
.instruction
>> 8) & 3)
16551 case 0: /* VLD1. */
16552 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
16553 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16554 &do_align
, 16, 16, 32, 32, -1);
16555 if (align_good
== FAIL
)
16557 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
16560 case 2: inst
.instruction
|= 1 << 5; break;
16561 default: first_error (_("bad list length")); return;
16563 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16566 case 1: /* VLD2. */
16567 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
16568 &do_align
, 8, 16, 16, 32, 32, 64, -1);
16569 if (align_good
== FAIL
)
16571 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
16572 _("bad list length"));
16573 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16574 inst
.instruction
|= 1 << 5;
16575 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16578 case 2: /* VLD3. */
16579 constraint (inst
.operands
[1].immisalign
,
16580 _("can't use alignment with this instruction"));
16581 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
16582 _("bad list length"));
16583 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16584 inst
.instruction
|= 1 << 5;
16585 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16588 case 3: /* VLD4. */
16590 int align
= inst
.operands
[1].imm
>> 8;
16591 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
16592 16, 64, 32, 64, 32, 128, -1);
16593 if (align_good
== FAIL
)
16595 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
16596 _("bad list length"));
16597 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16598 inst
.instruction
|= 1 << 5;
16599 if (et
.size
== 32 && align
== 128)
16600 inst
.instruction
|= 0x3 << 6;
16602 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16609 inst
.instruction
|= do_align
<< 4;
16612 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16613 apart from bits [11:4]. */
16616 do_neon_ldx_stx (void)
16618 if (inst
.operands
[1].isreg
)
16619 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
16621 switch (NEON_LANE (inst
.operands
[0].imm
))
16623 case NEON_INTERLEAVE_LANES
:
16624 NEON_ENCODE (INTERLV
, inst
);
16625 do_neon_ld_st_interleave ();
16628 case NEON_ALL_LANES
:
16629 NEON_ENCODE (DUP
, inst
);
16630 if (inst
.instruction
== N_INV
)
16632 first_error ("only loads support such operands");
16639 NEON_ENCODE (LANE
, inst
);
16640 do_neon_ld_st_lane ();
16643 /* L bit comes from bit mask. */
16644 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16645 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16646 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16648 if (inst
.operands
[1].postind
)
16650 int postreg
= inst
.operands
[1].imm
& 0xf;
16651 constraint (!inst
.operands
[1].immisreg
,
16652 _("post-index must be a register"));
16653 constraint (postreg
== 0xd || postreg
== 0xf,
16654 _("bad register for post-index"));
16655 inst
.instruction
|= postreg
;
16659 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
16660 constraint (inst
.reloc
.exp
.X_op
!= O_constant
16661 || inst
.reloc
.exp
.X_add_number
!= 0,
16664 if (inst
.operands
[1].writeback
)
16666 inst
.instruction
|= 0xd;
16669 inst
.instruction
|= 0xf;
16673 inst
.instruction
|= 0xf9000000;
16675 inst
.instruction
|= 0xf4000000;
16680 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
16682 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16683 D register operands. */
16684 if (neon_shape_class
[rs
] == SC_DOUBLE
)
16685 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16688 NEON_ENCODE (FPV8
, inst
);
16691 do_vfp_sp_dyadic ();
16693 do_vfp_dp_rd_rn_rm ();
16696 inst
.instruction
|= 0x100;
16698 inst
.instruction
|= 0xf0000000;
16704 set_it_insn_type (OUTSIDE_IT_INSN
);
16706 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
16707 first_error (_("invalid instruction shape"));
16713 set_it_insn_type (OUTSIDE_IT_INSN
);
16715 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
16718 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16721 neon_dyadic_misc (NT_untyped
, N_F32
, 0);
16725 do_vrint_1 (enum neon_cvt_mode mode
)
16727 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
16728 struct neon_type_el et
;
16733 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16734 D register operands. */
16735 if (neon_shape_class
[rs
] == SC_DOUBLE
)
16736 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16739 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
16740 if (et
.type
!= NT_invtype
)
16742 /* VFP encodings. */
16743 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
16744 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
16745 set_it_insn_type (OUTSIDE_IT_INSN
);
16747 NEON_ENCODE (FPV8
, inst
);
16749 do_vfp_sp_monadic ();
16751 do_vfp_dp_rd_rm ();
16755 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
16756 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
16757 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
16758 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
16759 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
16760 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
16761 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
16765 inst
.instruction
|= (rs
== NS_DD
) << 8;
16766 do_vfp_cond_or_thumb ();
16770 /* Neon encodings (or something broken...). */
16772 et
= neon_check_type (2, rs
, N_EQK
, N_F32
| N_KEY
);
16774 if (et
.type
== NT_invtype
)
16777 set_it_insn_type (OUTSIDE_IT_INSN
);
16778 NEON_ENCODE (FLOAT
, inst
);
16780 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16783 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16784 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16785 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16786 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16787 inst
.instruction
|= neon_quad (rs
) << 6;
16790 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
16791 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
16792 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
16793 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
16794 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
16795 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
16796 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
16801 inst
.instruction
|= 0xfc000000;
16803 inst
.instruction
|= 0xf0000000;
16810 do_vrint_1 (neon_cvt_mode_x
);
16816 do_vrint_1 (neon_cvt_mode_z
);
16822 do_vrint_1 (neon_cvt_mode_r
);
16828 do_vrint_1 (neon_cvt_mode_a
);
16834 do_vrint_1 (neon_cvt_mode_n
);
16840 do_vrint_1 (neon_cvt_mode_p
);
16846 do_vrint_1 (neon_cvt_mode_m
);
16849 /* Crypto v1 instructions. */
16851 do_crypto_2op_1 (unsigned elttype
, int op
)
16853 set_it_insn_type (OUTSIDE_IT_INSN
);
16855 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
16861 NEON_ENCODE (INTEGER
, inst
);
16862 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16863 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16864 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16865 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16867 inst
.instruction
|= op
<< 6;
16870 inst
.instruction
|= 0xfc000000;
16872 inst
.instruction
|= 0xf0000000;
16876 do_crypto_3op_1 (int u
, int op
)
16878 set_it_insn_type (OUTSIDE_IT_INSN
);
16880 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
16881 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
16886 NEON_ENCODE (INTEGER
, inst
);
16887 neon_three_same (1, u
, 8 << op
);
16893 do_crypto_2op_1 (N_8
, 0);
16899 do_crypto_2op_1 (N_8
, 1);
16905 do_crypto_2op_1 (N_8
, 2);
16911 do_crypto_2op_1 (N_8
, 3);
16917 do_crypto_3op_1 (0, 0);
16923 do_crypto_3op_1 (0, 1);
16929 do_crypto_3op_1 (0, 2);
16935 do_crypto_3op_1 (0, 3);
16941 do_crypto_3op_1 (1, 0);
16947 do_crypto_3op_1 (1, 1);
16951 do_sha256su1 (void)
16953 do_crypto_3op_1 (1, 2);
16959 do_crypto_2op_1 (N_32
, -1);
16965 do_crypto_2op_1 (N_32
, 0);
16969 do_sha256su0 (void)
16971 do_crypto_2op_1 (N_32
, 1);
16975 do_crc32_1 (unsigned int poly
, unsigned int sz
)
16977 unsigned int Rd
= inst
.operands
[0].reg
;
16978 unsigned int Rn
= inst
.operands
[1].reg
;
16979 unsigned int Rm
= inst
.operands
[2].reg
;
16981 set_it_insn_type (OUTSIDE_IT_INSN
);
16982 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
16983 inst
.instruction
|= LOW4 (Rn
) << 16;
16984 inst
.instruction
|= LOW4 (Rm
);
16985 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
16986 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
16988 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
16989 as_warn (UNPRED_REG ("r15"));
16990 if (thumb_mode
&& (Rd
== REG_SP
|| Rn
== REG_SP
|| Rm
== REG_SP
))
16991 as_warn (UNPRED_REG ("r13"));
17031 /* Overall per-instruction processing. */
17033 /* We need to be able to fix up arbitrary expressions in some statements.
17034 This is so that we can handle symbols that are an arbitrary distance from
17035 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17036 which returns part of an address in a form which will be valid for
17037 a data instruction. We do this by pushing the expression into a symbol
17038 in the expr_section, and creating a fix for that. */
17041 fix_new_arm (fragS
* frag
,
17055 /* Create an absolute valued symbol, so we have something to
17056 refer to in the object file. Unfortunately for us, gas's
17057 generic expression parsing will already have folded out
17058 any use of .set foo/.type foo %function that may have
17059 been used to set type information of the target location,
17060 that's being specified symbolically. We have to presume
17061 the user knows what they are doing. */
17065 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
17067 symbol
= symbol_find_or_make (name
);
17068 S_SET_SEGMENT (symbol
, absolute_section
);
17069 symbol_set_frag (symbol
, &zero_address_frag
);
17070 S_SET_VALUE (symbol
, exp
->X_add_number
);
17071 exp
->X_op
= O_symbol
;
17072 exp
->X_add_symbol
= symbol
;
17073 exp
->X_add_number
= 0;
17079 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
17080 (enum bfd_reloc_code_real
) reloc
);
17084 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
17085 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
17089 /* Mark whether the fix is to a THUMB instruction, or an ARM
17091 new_fix
->tc_fix_data
= thumb_mode
;
17094 /* Create a frg for an instruction requiring relaxation. */
17096 output_relax_insn (void)
17102 /* The size of the instruction is unknown, so tie the debug info to the
17103 start of the instruction. */
17104 dwarf2_emit_insn (0);
17106 switch (inst
.reloc
.exp
.X_op
)
17109 sym
= inst
.reloc
.exp
.X_add_symbol
;
17110 offset
= inst
.reloc
.exp
.X_add_number
;
17114 offset
= inst
.reloc
.exp
.X_add_number
;
17117 sym
= make_expr_symbol (&inst
.reloc
.exp
);
17121 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
17122 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
17123 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
17126 /* Write a 32-bit thumb instruction to buf. */
17128 put_thumb32_insn (char * buf
, unsigned long insn
)
17130 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
17131 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
17135 output_inst (const char * str
)
17141 as_bad ("%s -- `%s'", inst
.error
, str
);
17146 output_relax_insn ();
17149 if (inst
.size
== 0)
17152 to
= frag_more (inst
.size
);
17153 /* PR 9814: Record the thumb mode into the current frag so that we know
17154 what type of NOP padding to use, if necessary. We override any previous
17155 setting so that if the mode has changed then the NOPS that we use will
17156 match the encoding of the last instruction in the frag. */
17157 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
17159 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
17161 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
17162 put_thumb32_insn (to
, inst
.instruction
);
17164 else if (inst
.size
> INSN_SIZE
)
17166 gas_assert (inst
.size
== (2 * INSN_SIZE
));
17167 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
17168 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
17171 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
17173 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
17174 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
17175 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
17178 dwarf2_emit_insn (inst
.size
);
17182 output_it_inst (int cond
, int mask
, char * to
)
17184 unsigned long instruction
= 0xbf00;
17187 instruction
|= mask
;
17188 instruction
|= cond
<< 4;
17192 to
= frag_more (2);
17194 dwarf2_emit_insn (2);
17198 md_number_to_chars (to
, instruction
, 2);
17203 /* Tag values used in struct asm_opcode's tag field. */
17206 OT_unconditional
, /* Instruction cannot be conditionalized.
17207 The ARM condition field is still 0xE. */
17208 OT_unconditionalF
, /* Instruction cannot be conditionalized
17209 and carries 0xF in its ARM condition field. */
17210 OT_csuffix
, /* Instruction takes a conditional suffix. */
17211 OT_csuffixF
, /* Some forms of the instruction take a conditional
17212 suffix, others place 0xF where the condition field
17214 OT_cinfix3
, /* Instruction takes a conditional infix,
17215 beginning at character index 3. (In
17216 unified mode, it becomes a suffix.) */
17217 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
17218 tsts, cmps, cmns, and teqs. */
17219 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
17220 character index 3, even in unified mode. Used for
17221 legacy instructions where suffix and infix forms
17222 may be ambiguous. */
17223 OT_csuf_or_in3
, /* Instruction takes either a conditional
17224 suffix or an infix at character index 3. */
17225 OT_odd_infix_unc
, /* This is the unconditional variant of an
17226 instruction that takes a conditional infix
17227 at an unusual position. In unified mode,
17228 this variant will accept a suffix. */
17229 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
17230 are the conditional variants of instructions that
17231 take conditional infixes in unusual positions.
17232 The infix appears at character index
17233 (tag - OT_odd_infix_0). These are not accepted
17234 in unified mode. */
17237 /* Subroutine of md_assemble, responsible for looking up the primary
17238 opcode from the mnemonic the user wrote. STR points to the
17239 beginning of the mnemonic.
17241 This is not simply a hash table lookup, because of conditional
17242 variants. Most instructions have conditional variants, which are
17243 expressed with a _conditional affix_ to the mnemonic. If we were
17244 to encode each conditional variant as a literal string in the opcode
17245 table, it would have approximately 20,000 entries.
17247 Most mnemonics take this affix as a suffix, and in unified syntax,
17248 'most' is upgraded to 'all'. However, in the divided syntax, some
17249 instructions take the affix as an infix, notably the s-variants of
17250 the arithmetic instructions. Of those instructions, all but six
17251 have the infix appear after the third character of the mnemonic.
17253 Accordingly, the algorithm for looking up primary opcodes given
17256 1. Look up the identifier in the opcode table.
17257 If we find a match, go to step U.
17259 2. Look up the last two characters of the identifier in the
17260 conditions table. If we find a match, look up the first N-2
17261 characters of the identifier in the opcode table. If we
17262 find a match, go to step CE.
17264 3. Look up the fourth and fifth characters of the identifier in
17265 the conditions table. If we find a match, extract those
17266 characters from the identifier, and look up the remaining
17267 characters in the opcode table. If we find a match, go
17272 U. Examine the tag field of the opcode structure, in case this is
17273 one of the six instructions with its conditional infix in an
17274 unusual place. If it is, the tag tells us where to find the
17275 infix; look it up in the conditions table and set inst.cond
17276 accordingly. Otherwise, this is an unconditional instruction.
17277 Again set inst.cond accordingly. Return the opcode structure.
17279 CE. Examine the tag field to make sure this is an instruction that
17280 should receive a conditional suffix. If it is not, fail.
17281 Otherwise, set inst.cond from the suffix we already looked up,
17282 and return the opcode structure.
17284 CM. Examine the tag field to make sure this is an instruction that
17285 should receive a conditional infix after the third character.
17286 If it is not, fail. Otherwise, undo the edits to the current
17287 line of input and proceed as for case CE. */
17289 static const struct asm_opcode
*
17290 opcode_lookup (char **str
)
17294 const struct asm_opcode
*opcode
;
17295 const struct asm_cond
*cond
;
17298 /* Scan up to the end of the mnemonic, which must end in white space,
17299 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17300 for (base
= end
= *str
; *end
!= '\0'; end
++)
17301 if (*end
== ' ' || *end
== '.')
17307 /* Handle a possible width suffix and/or Neon type suffix. */
17312 /* The .w and .n suffixes are only valid if the unified syntax is in
17314 if (unified_syntax
&& end
[1] == 'w')
17316 else if (unified_syntax
&& end
[1] == 'n')
17321 inst
.vectype
.elems
= 0;
17323 *str
= end
+ offset
;
17325 if (end
[offset
] == '.')
17327 /* See if we have a Neon type suffix (possible in either unified or
17328 non-unified ARM syntax mode). */
17329 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
17332 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
17338 /* Look for unaffixed or special-case affixed mnemonic. */
17339 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17344 if (opcode
->tag
< OT_odd_infix_0
)
17346 inst
.cond
= COND_ALWAYS
;
17350 if (warn_on_deprecated
&& unified_syntax
)
17351 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17352 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
17353 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17356 inst
.cond
= cond
->value
;
17360 /* Cannot have a conditional suffix on a mnemonic of less than two
17362 if (end
- base
< 3)
17365 /* Look for suffixed mnemonic. */
17367 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17368 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17370 if (opcode
&& cond
)
17373 switch (opcode
->tag
)
17375 case OT_cinfix3_legacy
:
17376 /* Ignore conditional suffixes matched on infix only mnemonics. */
17380 case OT_cinfix3_deprecated
:
17381 case OT_odd_infix_unc
:
17382 if (!unified_syntax
)
17384 /* else fall through */
17388 case OT_csuf_or_in3
:
17389 inst
.cond
= cond
->value
;
17392 case OT_unconditional
:
17393 case OT_unconditionalF
:
17395 inst
.cond
= cond
->value
;
17398 /* Delayed diagnostic. */
17399 inst
.error
= BAD_COND
;
17400 inst
.cond
= COND_ALWAYS
;
17409 /* Cannot have a usual-position infix on a mnemonic of less than
17410 six characters (five would be a suffix). */
17411 if (end
- base
< 6)
17414 /* Look for infixed mnemonic in the usual position. */
17416 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17420 memcpy (save
, affix
, 2);
17421 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
17422 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17424 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
17425 memcpy (affix
, save
, 2);
17428 && (opcode
->tag
== OT_cinfix3
17429 || opcode
->tag
== OT_cinfix3_deprecated
17430 || opcode
->tag
== OT_csuf_or_in3
17431 || opcode
->tag
== OT_cinfix3_legacy
))
17434 if (warn_on_deprecated
&& unified_syntax
17435 && (opcode
->tag
== OT_cinfix3
17436 || opcode
->tag
== OT_cinfix3_deprecated
))
17437 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17439 inst
.cond
= cond
->value
;
17446 /* This function generates an initial IT instruction, leaving its block
17447 virtually open for the new instructions. Eventually,
17448 the mask will be updated by now_it_add_mask () each time
17449 a new instruction needs to be included in the IT block.
17450 Finally, the block is closed with close_automatic_it_block ().
17451 The block closure can be requested either from md_assemble (),
17452 a tencode (), or due to a label hook. */
17455 new_automatic_it_block (int cond
)
17457 now_it
.state
= AUTOMATIC_IT_BLOCK
;
17458 now_it
.mask
= 0x18;
17460 now_it
.block_length
= 1;
17461 mapping_state (MAP_THUMB
);
17462 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
17463 now_it
.warn_deprecated
= FALSE
;
17464 now_it
.insn_cond
= TRUE
;
17467 /* Close an automatic IT block.
17468 See comments in new_automatic_it_block (). */
17471 close_automatic_it_block (void)
17473 now_it
.mask
= 0x10;
17474 now_it
.block_length
= 0;
17477 /* Update the mask of the current automatically-generated IT
17478 instruction. See comments in new_automatic_it_block (). */
17481 now_it_add_mask (int cond
)
17483 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17484 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17485 | ((bitvalue) << (nbit)))
17486 const int resulting_bit
= (cond
& 1);
17488 now_it
.mask
&= 0xf;
17489 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17491 (5 - now_it
.block_length
));
17492 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
17494 ((5 - now_it
.block_length
) - 1) );
17495 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
17498 #undef SET_BIT_VALUE
17501 /* The IT blocks handling machinery is accessed through the these functions:
17502 it_fsm_pre_encode () from md_assemble ()
17503 set_it_insn_type () optional, from the tencode functions
17504 set_it_insn_type_last () ditto
17505 in_it_block () ditto
17506 it_fsm_post_encode () from md_assemble ()
17507 force_automatic_it_block_close () from label habdling functions
17510 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17511 initializing the IT insn type with a generic initial value depending
17512 on the inst.condition.
17513 2) During the tencode function, two things may happen:
17514 a) The tencode function overrides the IT insn type by
17515 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17516 b) The tencode function queries the IT block state by
17517 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17519 Both set_it_insn_type and in_it_block run the internal FSM state
17520 handling function (handle_it_state), because: a) setting the IT insn
17521 type may incur in an invalid state (exiting the function),
17522 and b) querying the state requires the FSM to be updated.
17523 Specifically we want to avoid creating an IT block for conditional
17524 branches, so it_fsm_pre_encode is actually a guess and we can't
17525 determine whether an IT block is required until the tencode () routine
17526 has decided what type of instruction this actually it.
17527 Because of this, if set_it_insn_type and in_it_block have to be used,
17528 set_it_insn_type has to be called first.
17530 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17531 determines the insn IT type depending on the inst.cond code.
17532 When a tencode () routine encodes an instruction that can be
17533 either outside an IT block, or, in the case of being inside, has to be
17534 the last one, set_it_insn_type_last () will determine the proper
17535 IT instruction type based on the inst.cond code. Otherwise,
17536 set_it_insn_type can be called for overriding that logic or
17537 for covering other cases.
17539 Calling handle_it_state () may not transition the IT block state to
17540 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17541 still queried. Instead, if the FSM determines that the state should
17542 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17543 after the tencode () function: that's what it_fsm_post_encode () does.
17545 Since in_it_block () calls the state handling function to get an
17546 updated state, an error may occur (due to invalid insns combination).
17547 In that case, inst.error is set.
17548 Therefore, inst.error has to be checked after the execution of
17549 the tencode () routine.
17551 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17552 any pending state change (if any) that didn't take place in
17553 handle_it_state () as explained above. */
17556 it_fsm_pre_encode (void)
17558 if (inst
.cond
!= COND_ALWAYS
)
17559 inst
.it_insn_type
= INSIDE_IT_INSN
;
17561 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
17563 now_it
.state_handled
= 0;
17566 /* IT state FSM handling function. */
17569 handle_it_state (void)
17571 now_it
.state_handled
= 1;
17572 now_it
.insn_cond
= FALSE
;
17574 switch (now_it
.state
)
17576 case OUTSIDE_IT_BLOCK
:
17577 switch (inst
.it_insn_type
)
17579 case OUTSIDE_IT_INSN
:
17582 case INSIDE_IT_INSN
:
17583 case INSIDE_IT_LAST_INSN
:
17584 if (thumb_mode
== 0)
17587 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
17588 as_tsktsk (_("Warning: conditional outside an IT block"\
17593 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
17594 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
17596 /* Automatically generate the IT instruction. */
17597 new_automatic_it_block (inst
.cond
);
17598 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
17599 close_automatic_it_block ();
17603 inst
.error
= BAD_OUT_IT
;
17609 case IF_INSIDE_IT_LAST_INSN
:
17610 case NEUTRAL_IT_INSN
:
17614 now_it
.state
= MANUAL_IT_BLOCK
;
17615 now_it
.block_length
= 0;
17620 case AUTOMATIC_IT_BLOCK
:
17621 /* Three things may happen now:
17622 a) We should increment current it block size;
17623 b) We should close current it block (closing insn or 4 insns);
17624 c) We should close current it block and start a new one (due
17625 to incompatible conditions or
17626 4 insns-length block reached). */
17628 switch (inst
.it_insn_type
)
17630 case OUTSIDE_IT_INSN
:
17631 /* The closure of the block shall happen immediatelly,
17632 so any in_it_block () call reports the block as closed. */
17633 force_automatic_it_block_close ();
17636 case INSIDE_IT_INSN
:
17637 case INSIDE_IT_LAST_INSN
:
17638 case IF_INSIDE_IT_LAST_INSN
:
17639 now_it
.block_length
++;
17641 if (now_it
.block_length
> 4
17642 || !now_it_compatible (inst
.cond
))
17644 force_automatic_it_block_close ();
17645 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
17646 new_automatic_it_block (inst
.cond
);
17650 now_it
.insn_cond
= TRUE
;
17651 now_it_add_mask (inst
.cond
);
17654 if (now_it
.state
== AUTOMATIC_IT_BLOCK
17655 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
17656 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
17657 close_automatic_it_block ();
17660 case NEUTRAL_IT_INSN
:
17661 now_it
.block_length
++;
17662 now_it
.insn_cond
= TRUE
;
17664 if (now_it
.block_length
> 4)
17665 force_automatic_it_block_close ();
17667 now_it_add_mask (now_it
.cc
& 1);
17671 close_automatic_it_block ();
17672 now_it
.state
= MANUAL_IT_BLOCK
;
17677 case MANUAL_IT_BLOCK
:
17679 /* Check conditional suffixes. */
17680 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
17683 now_it
.mask
&= 0x1f;
17684 is_last
= (now_it
.mask
== 0x10);
17685 now_it
.insn_cond
= TRUE
;
17687 switch (inst
.it_insn_type
)
17689 case OUTSIDE_IT_INSN
:
17690 inst
.error
= BAD_NOT_IT
;
17693 case INSIDE_IT_INSN
:
17694 if (cond
!= inst
.cond
)
17696 inst
.error
= BAD_IT_COND
;
17701 case INSIDE_IT_LAST_INSN
:
17702 case IF_INSIDE_IT_LAST_INSN
:
17703 if (cond
!= inst
.cond
)
17705 inst
.error
= BAD_IT_COND
;
17710 inst
.error
= BAD_BRANCH
;
17715 case NEUTRAL_IT_INSN
:
17716 /* The BKPT instruction is unconditional even in an IT block. */
17720 inst
.error
= BAD_IT_IT
;
17730 struct depr_insn_mask
17732 unsigned long pattern
;
17733 unsigned long mask
;
17734 const char* description
;
17737 /* List of 16-bit instruction patterns deprecated in an IT block in
17739 static const struct depr_insn_mask depr_it_insns
[] = {
17740 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17741 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17742 { 0xa000, 0xb800, N_("ADR") },
17743 { 0x4800, 0xf800, N_("Literal loads") },
17744 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17745 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17746 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17747 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
17748 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17753 it_fsm_post_encode (void)
17757 if (!now_it
.state_handled
)
17758 handle_it_state ();
17760 if (now_it
.insn_cond
17761 && !now_it
.warn_deprecated
17762 && warn_on_deprecated
17763 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
17765 if (inst
.instruction
>= 0x10000)
17767 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
17768 "deprecated in ARMv8"));
17769 now_it
.warn_deprecated
= TRUE
;
17773 const struct depr_insn_mask
*p
= depr_it_insns
;
17775 while (p
->mask
!= 0)
17777 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
17779 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
17780 "of the following class are deprecated in ARMv8: "
17781 "%s"), p
->description
);
17782 now_it
.warn_deprecated
= TRUE
;
17790 if (now_it
.block_length
> 1)
17792 as_tsktsk (_("IT blocks containing more than one conditional "
17793 "instruction are deprecated in ARMv8"));
17794 now_it
.warn_deprecated
= TRUE
;
17798 is_last
= (now_it
.mask
== 0x10);
17801 now_it
.state
= OUTSIDE_IT_BLOCK
;
17807 force_automatic_it_block_close (void)
17809 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
17811 close_automatic_it_block ();
17812 now_it
.state
= OUTSIDE_IT_BLOCK
;
17820 if (!now_it
.state_handled
)
17821 handle_it_state ();
17823 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
17826 /* Whether OPCODE only has T32 encoding. Since this function is only used by
17827 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
17828 here, hence the "known" in the function name. */
17831 known_t32_only_insn (const struct asm_opcode
*opcode
)
17833 /* Original Thumb-1 wide instruction. */
17834 if (opcode
->tencode
== do_t_blx
17835 || opcode
->tencode
== do_t_branch23
17836 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
17837 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
17840 /* Wide-only instruction added to ARMv8-M. */
17841 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m
)
17842 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
17843 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
17844 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
17850 /* Whether wide instruction variant can be used if available for a valid OPCODE
17854 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
17856 if (known_t32_only_insn (opcode
))
17859 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
17860 of variant T3 of B.W is checked in do_t_branch. */
17861 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
17862 && opcode
->tencode
== do_t_branch
)
17865 /* Wide instruction variants of all instructions with narrow *and* wide
17866 variants become available with ARMv6t2. Other opcodes are either
17867 narrow-only or wide-only and are thus available if OPCODE is valid. */
17868 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
17871 /* OPCODE with narrow only instruction variant or wide variant not
17877 md_assemble (char *str
)
17880 const struct asm_opcode
* opcode
;
17882 /* Align the previous label if needed. */
17883 if (last_label_seen
!= NULL
)
17885 symbol_set_frag (last_label_seen
, frag_now
);
17886 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
17887 S_SET_SEGMENT (last_label_seen
, now_seg
);
17890 memset (&inst
, '\0', sizeof (inst
));
17891 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
17893 opcode
= opcode_lookup (&p
);
17896 /* It wasn't an instruction, but it might be a register alias of
17897 the form alias .req reg, or a Neon .dn/.qn directive. */
17898 if (! create_register_alias (str
, p
)
17899 && ! create_neon_reg_alias (str
, p
))
17900 as_bad (_("bad instruction `%s'"), str
);
17905 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
17906 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
17908 /* The value which unconditional instructions should have in place of the
17909 condition field. */
17910 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
17914 arm_feature_set variant
;
17916 variant
= cpu_variant
;
17917 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
17918 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
17919 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
17920 /* Check that this instruction is supported for this CPU. */
17921 if (!opcode
->tvariant
17922 || (thumb_mode
== 1
17923 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
17925 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
17928 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
17929 && opcode
->tencode
!= do_t_branch
)
17931 as_bad (_("Thumb does not support conditional execution"));
17935 /* Two things are addressed here:
17936 1) Implicit require narrow instructions on Thumb-1.
17937 This avoids relaxation accidentally introducing Thumb-2
17939 2) Reject wide instructions in non Thumb-2 cores.
17941 Only instructions with narrow and wide variants need to be handled
17942 but selecting all non wide-only instructions is easier. */
17943 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
17944 && !t32_insn_ok (variant
, opcode
))
17946 if (inst
.size_req
== 0)
17948 else if (inst
.size_req
== 4)
17950 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
17951 as_bad (_("selected processor does not support 32bit wide "
17952 "variant of instruction `%s'"), str
);
17954 as_bad (_("selected processor does not support `%s' in "
17955 "Thumb-2 mode"), str
);
17960 inst
.instruction
= opcode
->tvalue
;
17962 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
17964 /* Prepare the it_insn_type for those encodings that don't set
17966 it_fsm_pre_encode ();
17968 opcode
->tencode ();
17970 it_fsm_post_encode ();
17973 if (!(inst
.error
|| inst
.relax
))
17975 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
17976 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
17977 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
17979 as_bad (_("cannot honor width suffix -- `%s'"), str
);
17984 /* Something has gone badly wrong if we try to relax a fixed size
17986 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
17988 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
17989 *opcode
->tvariant
);
17990 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
17991 set those bits when Thumb-2 32-bit instructions are seen. The impact
17992 of relaxable instructions will be considered later after we finish all
17994 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
17995 variant
= arm_arch_none
;
17997 variant
= cpu_variant
;
17998 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
17999 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
18002 check_neon_suffixes
;
18006 mapping_state (MAP_THUMB
);
18009 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
18013 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18014 is_bx
= (opcode
->aencode
== do_bx
);
18016 /* Check that this instruction is supported for this CPU. */
18017 if (!(is_bx
&& fix_v4bx
)
18018 && !(opcode
->avariant
&&
18019 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
18021 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
18026 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
18030 inst
.instruction
= opcode
->avalue
;
18031 if (opcode
->tag
== OT_unconditionalF
)
18032 inst
.instruction
|= 0xFU
<< 28;
18034 inst
.instruction
|= inst
.cond
<< 28;
18035 inst
.size
= INSN_SIZE
;
18036 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
18038 it_fsm_pre_encode ();
18039 opcode
->aencode ();
18040 it_fsm_post_encode ();
18042 /* Arm mode bx is marked as both v4T and v5 because it's still required
18043 on a hypothetical non-thumb v5 core. */
18045 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
18047 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
18048 *opcode
->avariant
);
18050 check_neon_suffixes
;
18054 mapping_state (MAP_ARM
);
18059 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18067 check_it_blocks_finished (void)
18072 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
18073 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
18074 == MANUAL_IT_BLOCK
)
18076 as_warn (_("section '%s' finished with an open IT block."),
18080 if (now_it
.state
== MANUAL_IT_BLOCK
)
18081 as_warn (_("file finished with an open IT block."));
18085 /* Various frobbings of labels and their addresses. */
18088 arm_start_line_hook (void)
18090 last_label_seen
= NULL
;
18094 arm_frob_label (symbolS
* sym
)
18096 last_label_seen
= sym
;
18098 ARM_SET_THUMB (sym
, thumb_mode
);
18100 #if defined OBJ_COFF || defined OBJ_ELF
18101 ARM_SET_INTERWORK (sym
, support_interwork
);
18104 force_automatic_it_block_close ();
18106 /* Note - do not allow local symbols (.Lxxx) to be labelled
18107 as Thumb functions. This is because these labels, whilst
18108 they exist inside Thumb code, are not the entry points for
18109 possible ARM->Thumb calls. Also, these labels can be used
18110 as part of a computed goto or switch statement. eg gcc
18111 can generate code that looks like this:
18113 ldr r2, [pc, .Laaa]
18123 The first instruction loads the address of the jump table.
18124 The second instruction converts a table index into a byte offset.
18125 The third instruction gets the jump address out of the table.
18126 The fourth instruction performs the jump.
18128 If the address stored at .Laaa is that of a symbol which has the
18129 Thumb_Func bit set, then the linker will arrange for this address
18130 to have the bottom bit set, which in turn would mean that the
18131 address computation performed by the third instruction would end
18132 up with the bottom bit set. Since the ARM is capable of unaligned
18133 word loads, the instruction would then load the incorrect address
18134 out of the jump table, and chaos would ensue. */
18135 if (label_is_thumb_function_name
18136 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
18137 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
18139 /* When the address of a Thumb function is taken the bottom
18140 bit of that address should be set. This will allow
18141 interworking between Arm and Thumb functions to work
18144 THUMB_SET_FUNC (sym
, 1);
18146 label_is_thumb_function_name
= FALSE
;
18149 dwarf2_emit_label (sym
);
18153 arm_data_in_code (void)
18155 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
18157 *input_line_pointer
= '/';
18158 input_line_pointer
+= 5;
18159 *input_line_pointer
= 0;
18167 arm_canonicalize_symbol_name (char * name
)
18171 if (thumb_mode
&& (len
= strlen (name
)) > 5
18172 && streq (name
+ len
- 5, "/data"))
18173 *(name
+ len
- 5) = 0;
18178 /* Table of all register names defined by default. The user can
18179 define additional names with .req. Note that all register names
18180 should appear in both upper and lowercase variants. Some registers
18181 also have mixed-case names. */
18183 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18184 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18185 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18186 #define REGSET(p,t) \
18187 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18188 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18189 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18190 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18191 #define REGSETH(p,t) \
18192 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18193 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18194 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18195 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18196 #define REGSET2(p,t) \
18197 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18198 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18199 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18200 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18201 #define SPLRBANK(base,bank,t) \
18202 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18203 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18204 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18205 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18206 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18207 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18209 static const struct reg_entry reg_names
[] =
18211 /* ARM integer registers. */
18212 REGSET(r
, RN
), REGSET(R
, RN
),
18214 /* ATPCS synonyms. */
18215 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
18216 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
18217 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
18219 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
18220 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
18221 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
18223 /* Well-known aliases. */
18224 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
18225 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
18227 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
18228 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
18230 /* Coprocessor numbers. */
18231 REGSET(p
, CP
), REGSET(P
, CP
),
18233 /* Coprocessor register numbers. The "cr" variants are for backward
18235 REGSET(c
, CN
), REGSET(C
, CN
),
18236 REGSET(cr
, CN
), REGSET(CR
, CN
),
18238 /* ARM banked registers. */
18239 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
18240 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
18241 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
18242 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
18243 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
18244 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
18245 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
18247 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
18248 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
18249 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
18250 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
18251 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
18252 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
18253 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
18254 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
18256 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
18257 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
18258 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
18259 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
18260 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
18261 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
18262 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
18263 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18264 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18266 /* FPA registers. */
18267 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
18268 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
18270 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
18271 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
18273 /* VFP SP registers. */
18274 REGSET(s
,VFS
), REGSET(S
,VFS
),
18275 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
18277 /* VFP DP Registers. */
18278 REGSET(d
,VFD
), REGSET(D
,VFD
),
18279 /* Extra Neon DP registers. */
18280 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
18282 /* Neon QP registers. */
18283 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
18285 /* VFP control registers. */
18286 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
18287 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
18288 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
18289 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
18290 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
18291 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
18293 /* Maverick DSP coprocessor registers. */
18294 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
18295 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
18297 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
18298 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
18299 REGDEF(dspsc
,0,DSPSC
),
18301 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
18302 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
18303 REGDEF(DSPSC
,0,DSPSC
),
18305 /* iWMMXt data registers - p0, c0-15. */
18306 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
18308 /* iWMMXt control registers - p1, c0-3. */
18309 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
18310 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
18311 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
18312 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
18314 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18315 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
18316 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
18317 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
18318 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
18320 /* XScale accumulator registers. */
18321 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
18327 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18328 within psr_required_here. */
18329 static const struct asm_psr psrs
[] =
18331 /* Backward compatibility notation. Note that "all" is no longer
18332 truly all possible PSR bits. */
18333 {"all", PSR_c
| PSR_f
},
18337 /* Individual flags. */
18343 /* Combinations of flags. */
18344 {"fs", PSR_f
| PSR_s
},
18345 {"fx", PSR_f
| PSR_x
},
18346 {"fc", PSR_f
| PSR_c
},
18347 {"sf", PSR_s
| PSR_f
},
18348 {"sx", PSR_s
| PSR_x
},
18349 {"sc", PSR_s
| PSR_c
},
18350 {"xf", PSR_x
| PSR_f
},
18351 {"xs", PSR_x
| PSR_s
},
18352 {"xc", PSR_x
| PSR_c
},
18353 {"cf", PSR_c
| PSR_f
},
18354 {"cs", PSR_c
| PSR_s
},
18355 {"cx", PSR_c
| PSR_x
},
18356 {"fsx", PSR_f
| PSR_s
| PSR_x
},
18357 {"fsc", PSR_f
| PSR_s
| PSR_c
},
18358 {"fxs", PSR_f
| PSR_x
| PSR_s
},
18359 {"fxc", PSR_f
| PSR_x
| PSR_c
},
18360 {"fcs", PSR_f
| PSR_c
| PSR_s
},
18361 {"fcx", PSR_f
| PSR_c
| PSR_x
},
18362 {"sfx", PSR_s
| PSR_f
| PSR_x
},
18363 {"sfc", PSR_s
| PSR_f
| PSR_c
},
18364 {"sxf", PSR_s
| PSR_x
| PSR_f
},
18365 {"sxc", PSR_s
| PSR_x
| PSR_c
},
18366 {"scf", PSR_s
| PSR_c
| PSR_f
},
18367 {"scx", PSR_s
| PSR_c
| PSR_x
},
18368 {"xfs", PSR_x
| PSR_f
| PSR_s
},
18369 {"xfc", PSR_x
| PSR_f
| PSR_c
},
18370 {"xsf", PSR_x
| PSR_s
| PSR_f
},
18371 {"xsc", PSR_x
| PSR_s
| PSR_c
},
18372 {"xcf", PSR_x
| PSR_c
| PSR_f
},
18373 {"xcs", PSR_x
| PSR_c
| PSR_s
},
18374 {"cfs", PSR_c
| PSR_f
| PSR_s
},
18375 {"cfx", PSR_c
| PSR_f
| PSR_x
},
18376 {"csf", PSR_c
| PSR_s
| PSR_f
},
18377 {"csx", PSR_c
| PSR_s
| PSR_x
},
18378 {"cxf", PSR_c
| PSR_x
| PSR_f
},
18379 {"cxs", PSR_c
| PSR_x
| PSR_s
},
18380 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
18381 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
18382 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
18383 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
18384 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
18385 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
18386 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
18387 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
18388 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
18389 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
18390 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
18391 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
18392 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
18393 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
18394 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
18395 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
18396 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
18397 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
18398 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
18399 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
18400 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
18401 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
18402 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
18403 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
18406 /* Table of V7M psr names. */
18407 static const struct asm_psr v7m_psrs
[] =
18409 {"apsr", 0 }, {"APSR", 0 },
18410 {"iapsr", 1 }, {"IAPSR", 1 },
18411 {"eapsr", 2 }, {"EAPSR", 2 },
18412 {"psr", 3 }, {"PSR", 3 },
18413 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
18414 {"ipsr", 5 }, {"IPSR", 5 },
18415 {"epsr", 6 }, {"EPSR", 6 },
18416 {"iepsr", 7 }, {"IEPSR", 7 },
18417 {"msp", 8 }, {"MSP", 8 },
18418 {"psp", 9 }, {"PSP", 9 },
18419 {"primask", 16}, {"PRIMASK", 16},
18420 {"basepri", 17}, {"BASEPRI", 17},
18421 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
18422 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
18423 {"faultmask", 19}, {"FAULTMASK", 19},
18424 {"control", 20}, {"CONTROL", 20}
18427 /* Table of all shift-in-operand names. */
18428 static const struct asm_shift_name shift_names
[] =
18430 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
18431 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
18432 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
18433 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
18434 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
18435 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
18438 /* Table of all explicit relocation names. */
18440 static struct reloc_entry reloc_names
[] =
18442 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
18443 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
18444 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
18445 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
18446 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
18447 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
18448 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
18449 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
18450 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
18451 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
18452 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
18453 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
18454 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
18455 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
18456 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
18457 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
18458 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
18459 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
}
18463 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18464 static const struct asm_cond conds
[] =
18468 {"cs", 0x2}, {"hs", 0x2},
18469 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18483 #define UL_BARRIER(L,U,CODE,FEAT) \
18484 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18485 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18487 static struct asm_barrier_opt barrier_opt_names
[] =
18489 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
18490 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
18491 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
18492 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
18493 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
18494 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
18495 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
18496 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
18497 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
18498 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
18499 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
18500 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
18501 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
18502 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
18503 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
18504 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
18509 /* Table of ARM-format instructions. */
18511 /* Macros for gluing together operand strings. N.B. In all cases
18512 other than OPS0, the trailing OP_stop comes from default
18513 zero-initialization of the unspecified elements of the array. */
18514 #define OPS0() { OP_stop, }
18515 #define OPS1(a) { OP_##a, }
18516 #define OPS2(a,b) { OP_##a,OP_##b, }
18517 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18518 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18519 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18520 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18522 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18523 This is useful when mixing operands for ARM and THUMB, i.e. using the
18524 MIX_ARM_THUMB_OPERANDS macro.
18525 In order to use these macros, prefix the number of operands with _
18527 #define OPS_1(a) { a, }
18528 #define OPS_2(a,b) { a,b, }
18529 #define OPS_3(a,b,c) { a,b,c, }
18530 #define OPS_4(a,b,c,d) { a,b,c,d, }
18531 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18532 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18534 /* These macros abstract out the exact format of the mnemonic table and
18535 save some repeated characters. */
18537 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18538 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18539 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18540 THUMB_VARIANT, do_##ae, do_##te }
18542 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18543 a T_MNEM_xyz enumerator. */
18544 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18545 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18546 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18547 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18549 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18550 infix after the third character. */
18551 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18552 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18553 THUMB_VARIANT, do_##ae, do_##te }
18554 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18555 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18556 THUMB_VARIANT, do_##ae, do_##te }
18557 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18558 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18559 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18560 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18561 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18562 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18563 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18564 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18566 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18567 field is still 0xE. Many of the Thumb variants can be executed
18568 conditionally, so this is checked separately. */
18569 #define TUE(mnem, op, top, nops, ops, ae, te) \
18570 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18571 THUMB_VARIANT, do_##ae, do_##te }
18573 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18574 Used by mnemonics that have very minimal differences in the encoding for
18575 ARM and Thumb variants and can be handled in a common function. */
18576 #define TUEc(mnem, op, top, nops, ops, en) \
18577 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18578 THUMB_VARIANT, do_##en, do_##en }
18580 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18581 condition code field. */
18582 #define TUF(mnem, op, top, nops, ops, ae, te) \
18583 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18584 THUMB_VARIANT, do_##ae, do_##te }
18586 /* ARM-only variants of all the above. */
18587 #define CE(mnem, op, nops, ops, ae) \
18588 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18590 #define C3(mnem, op, nops, ops, ae) \
18591 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18593 /* Legacy mnemonics that always have conditional infix after the third
18595 #define CL(mnem, op, nops, ops, ae) \
18596 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18597 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18599 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
18600 #define cCE(mnem, op, nops, ops, ae) \
18601 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18603 /* Legacy coprocessor instructions where conditional infix and conditional
18604 suffix are ambiguous. For consistency this includes all FPA instructions,
18605 not just the potentially ambiguous ones. */
18606 #define cCL(mnem, op, nops, ops, ae) \
18607 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18608 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18610 /* Coprocessor, takes either a suffix or a position-3 infix
18611 (for an FPA corner case). */
18612 #define C3E(mnem, op, nops, ops, ae) \
18613 { mnem, OPS##nops ops, OT_csuf_or_in3, \
18614 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18616 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
18617 { m1 #m2 m3, OPS##nops ops, \
18618 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18619 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18621 #define CM(m1, m2, op, nops, ops, ae) \
18622 xCM_ (m1, , m2, op, nops, ops, ae), \
18623 xCM_ (m1, eq, m2, op, nops, ops, ae), \
18624 xCM_ (m1, ne, m2, op, nops, ops, ae), \
18625 xCM_ (m1, cs, m2, op, nops, ops, ae), \
18626 xCM_ (m1, hs, m2, op, nops, ops, ae), \
18627 xCM_ (m1, cc, m2, op, nops, ops, ae), \
18628 xCM_ (m1, ul, m2, op, nops, ops, ae), \
18629 xCM_ (m1, lo, m2, op, nops, ops, ae), \
18630 xCM_ (m1, mi, m2, op, nops, ops, ae), \
18631 xCM_ (m1, pl, m2, op, nops, ops, ae), \
18632 xCM_ (m1, vs, m2, op, nops, ops, ae), \
18633 xCM_ (m1, vc, m2, op, nops, ops, ae), \
18634 xCM_ (m1, hi, m2, op, nops, ops, ae), \
18635 xCM_ (m1, ls, m2, op, nops, ops, ae), \
18636 xCM_ (m1, ge, m2, op, nops, ops, ae), \
18637 xCM_ (m1, lt, m2, op, nops, ops, ae), \
18638 xCM_ (m1, gt, m2, op, nops, ops, ae), \
18639 xCM_ (m1, le, m2, op, nops, ops, ae), \
18640 xCM_ (m1, al, m2, op, nops, ops, ae)
18642 #define UE(mnem, op, nops, ops, ae) \
18643 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18645 #define UF(mnem, op, nops, ops, ae) \
18646 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18648 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
18649 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18650 use the same encoding function for each. */
18651 #define NUF(mnem, op, nops, ops, enc) \
18652 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
18653 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18655 /* Neon data processing, version which indirects through neon_enc_tab for
18656 the various overloaded versions of opcodes. */
18657 #define nUF(mnem, op, nops, ops, enc) \
18658 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
18659 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18661 /* Neon insn with conditional suffix for the ARM version, non-overloaded
18663 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
18664 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
18665 THUMB_VARIANT, do_##enc, do_##enc }
18667 #define NCE(mnem, op, nops, ops, enc) \
18668 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18670 #define NCEF(mnem, op, nops, ops, enc) \
18671 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18673 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
18674 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
18675 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
18676 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18678 #define nCE(mnem, op, nops, ops, enc) \
18679 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18681 #define nCEF(mnem, op, nops, ops, enc) \
18682 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18686 static const struct asm_opcode insns
[] =
18688 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
18689 #define THUMB_VARIANT & arm_ext_v4t
18690 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18691 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18692 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18693 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18694 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
18695 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
18696 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
18697 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
18698 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18699 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18700 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18701 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18702 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18703 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
18704 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18705 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
18707 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18708 for setting PSR flag bits. They are obsolete in V6 and do not
18709 have Thumb equivalents. */
18710 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18711 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18712 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
18713 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
18714 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
18715 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
18716 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18717 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18718 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
18720 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
18721 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
18722 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
18723 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
18725 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
18726 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
18727 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
18729 OP_ADDRGLDR
),ldst
, t_ldst
),
18730 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
18732 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18733 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18734 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18735 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18736 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18737 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18739 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
18740 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
18741 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
18742 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
18745 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
18746 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
18747 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
18748 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
18750 /* Thumb-compatibility pseudo ops. */
18751 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18752 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18753 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18754 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18755 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18756 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18757 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18758 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
18759 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
18760 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
18761 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
18762 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
18764 /* These may simplify to neg. */
18765 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
18766 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
18768 #undef THUMB_VARIANT
18769 #define THUMB_VARIANT & arm_ext_v6
18771 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
18773 /* V1 instructions with no Thumb analogue prior to V6T2. */
18774 #undef THUMB_VARIANT
18775 #define THUMB_VARIANT & arm_ext_v6t2
18777 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18778 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
18779 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
18781 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
18782 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
18783 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
18784 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
18786 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18787 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18789 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18790 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
18792 /* V1 instructions with no Thumb analogue at all. */
18793 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
18794 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
18796 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
18797 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
18798 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
18799 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
18800 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
18801 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
18802 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
18803 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
18806 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
18807 #undef THUMB_VARIANT
18808 #define THUMB_VARIANT & arm_ext_v4t
18810 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
18811 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
18813 #undef THUMB_VARIANT
18814 #define THUMB_VARIANT & arm_ext_v6t2
18816 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
18817 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
18819 /* Generic coprocessor instructions. */
18820 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
18821 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18822 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18823 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18824 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18825 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18826 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18829 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
18831 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
18832 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
18835 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
18836 #undef THUMB_VARIANT
18837 #define THUMB_VARIANT & arm_ext_msr
18839 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
18840 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
18843 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
18844 #undef THUMB_VARIANT
18845 #define THUMB_VARIANT & arm_ext_v6t2
18847 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18848 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18849 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18850 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18851 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18852 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18853 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
18854 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
18857 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
18858 #undef THUMB_VARIANT
18859 #define THUMB_VARIANT & arm_ext_v4t
18861 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18862 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18863 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18864 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18865 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18866 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
18869 #define ARM_VARIANT & arm_ext_v4t_5
18871 /* ARM Architecture 4T. */
18872 /* Note: bx (and blx) are required on V5, even if the processor does
18873 not support Thumb. */
18874 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
18877 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
18878 #undef THUMB_VARIANT
18879 #define THUMB_VARIANT & arm_ext_v5t
18881 /* Note: blx has 2 variants; the .value coded here is for
18882 BLX(2). Only this variant has conditional execution. */
18883 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
18884 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
18886 #undef THUMB_VARIANT
18887 #define THUMB_VARIANT & arm_ext_v6t2
18889 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
18890 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18891 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18892 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18893 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
18894 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
18895 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18896 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
18899 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
18900 #undef THUMB_VARIANT
18901 #define THUMB_VARIANT & arm_ext_v5exp
18903 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18904 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18905 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18906 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18908 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18909 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
18911 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18912 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18913 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18914 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
18916 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18917 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18918 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18919 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18921 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18922 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
18924 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18925 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18926 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18927 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
18930 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
18931 #undef THUMB_VARIANT
18932 #define THUMB_VARIANT & arm_ext_v6t2
18934 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
18935 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
18937 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
18938 ADDRGLDRS
), ldrd
, t_ldstd
),
18940 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
18941 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
18944 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
18946 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
18949 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
18950 #undef THUMB_VARIANT
18951 #define THUMB_VARIANT & arm_ext_v6
18953 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
18954 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
18955 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
18956 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
18957 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
18958 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18959 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18960 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18961 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
18962 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
18964 #undef THUMB_VARIANT
18965 #define THUMB_VARIANT & arm_ext_v6t2_v8m
18967 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
18968 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
18970 #undef THUMB_VARIANT
18971 #define THUMB_VARIANT & arm_ext_v6t2
18973 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
18974 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
18976 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
18977 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
18979 /* ARM V6 not included in V7M. */
18980 #undef THUMB_VARIANT
18981 #define THUMB_VARIANT & arm_ext_v6_notm
18982 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
18983 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
18984 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
18985 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
18986 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
18987 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
18988 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
18989 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
18990 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
18991 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
18992 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
18993 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
18994 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
18995 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
18996 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
18997 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
18998 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
18999 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
19000 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
19002 /* ARM V6 not included in V7M (eg. integer SIMD). */
19003 #undef THUMB_VARIANT
19004 #define THUMB_VARIANT & arm_ext_v6_dsp
19005 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
19006 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
19007 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19008 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19009 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19010 /* Old name for QASX. */
19011 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19012 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19013 /* Old name for QSAX. */
19014 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19015 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19016 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19017 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19018 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19019 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19020 /* Old name for SASX. */
19021 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19022 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19023 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19024 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19025 /* Old name for SHASX. */
19026 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19027 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19028 /* Old name for SHSAX. */
19029 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19030 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19031 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19032 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19033 /* Old name for SSAX. */
19034 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19035 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19036 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19037 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19038 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19039 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19040 /* Old name for UASX. */
19041 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19042 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19043 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19044 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19045 /* Old name for UHASX. */
19046 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19047 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19048 /* Old name for UHSAX. */
19049 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19050 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19051 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19052 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19053 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19054 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19055 /* Old name for UQASX. */
19056 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19057 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19058 /* Old name for UQSAX. */
19059 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19060 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19061 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19062 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19063 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19064 /* Old name for USAX. */
19065 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19066 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19067 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19068 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19069 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19070 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19071 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19072 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19073 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19074 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19075 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19076 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19077 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19078 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19079 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19080 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19081 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19082 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19083 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19084 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19085 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19086 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19087 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19088 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19089 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19090 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19091 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19092 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19093 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19094 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
19095 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
19096 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19097 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19098 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
19101 #define ARM_VARIANT & arm_ext_v6k
19102 #undef THUMB_VARIANT
19103 #define THUMB_VARIANT & arm_ext_v6k
19105 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
19106 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
19107 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
19108 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
19110 #undef THUMB_VARIANT
19111 #define THUMB_VARIANT & arm_ext_v6_notm
19112 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
19114 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
19115 RRnpcb
), strexd
, t_strexd
),
19117 #undef THUMB_VARIANT
19118 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19119 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
19121 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
19123 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19125 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19127 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
19130 #define ARM_VARIANT & arm_ext_sec
19131 #undef THUMB_VARIANT
19132 #define THUMB_VARIANT & arm_ext_sec
19134 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
19137 #define ARM_VARIANT & arm_ext_virt
19138 #undef THUMB_VARIANT
19139 #define THUMB_VARIANT & arm_ext_virt
19141 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
19142 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
19145 #define ARM_VARIANT & arm_ext_pan
19146 #undef THUMB_VARIANT
19147 #define THUMB_VARIANT & arm_ext_pan
19149 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
19152 #define ARM_VARIANT & arm_ext_v6t2
19153 #undef THUMB_VARIANT
19154 #define THUMB_VARIANT & arm_ext_v6t2
19156 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
19157 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
19158 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19159 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19161 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
19162 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
19164 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19165 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19166 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19167 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19169 #undef THUMB_VARIANT
19170 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19171 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19172 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19174 /* Thumb-only instructions. */
19176 #define ARM_VARIANT NULL
19177 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
19178 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
19180 /* ARM does not really have an IT instruction, so always allow it.
19181 The opcode is copied from Thumb in order to allow warnings in
19182 -mimplicit-it=[never | arm] modes. */
19184 #define ARM_VARIANT & arm_ext_v1
19185 #undef THUMB_VARIANT
19186 #define THUMB_VARIANT & arm_ext_v6t2
19188 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
19189 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
19190 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
19191 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
19192 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
19193 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
19194 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
19195 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
19196 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
19197 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
19198 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
19199 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
19200 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
19201 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
19202 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
19203 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19204 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19205 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19207 /* Thumb2 only instructions. */
19209 #define ARM_VARIANT NULL
19211 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19212 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19213 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19214 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19215 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
19216 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
19218 /* Hardware division instructions. */
19220 #define ARM_VARIANT & arm_ext_adiv
19221 #undef THUMB_VARIANT
19222 #define THUMB_VARIANT & arm_ext_div
19224 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19225 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19227 /* ARM V6M/V7 instructions. */
19229 #define ARM_VARIANT & arm_ext_barrier
19230 #undef THUMB_VARIANT
19231 #define THUMB_VARIANT & arm_ext_barrier
19233 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
19234 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
19235 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
19237 /* ARM V7 instructions. */
19239 #define ARM_VARIANT & arm_ext_v7
19240 #undef THUMB_VARIANT
19241 #define THUMB_VARIANT & arm_ext_v7
19243 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
19244 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
19247 #define ARM_VARIANT & arm_ext_mp
19248 #undef THUMB_VARIANT
19249 #define THUMB_VARIANT & arm_ext_mp
19251 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
19253 /* AArchv8 instructions. */
19255 #define ARM_VARIANT & arm_ext_v8
19257 /* Instructions shared between armv8-a and armv8-m. */
19258 #undef THUMB_VARIANT
19259 #define THUMB_VARIANT & arm_ext_atomics
19261 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19262 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19263 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19264 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19265 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19266 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19267 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19268 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
19269 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19270 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19272 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19274 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19276 #undef THUMB_VARIANT
19277 #define THUMB_VARIANT & arm_ext_v8
19279 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
19280 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
19281 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
19283 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
19285 /* ARMv8 T32 only. */
19287 #define ARM_VARIANT NULL
19288 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
19289 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
19290 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
19292 /* FP for ARMv8. */
19294 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19295 #undef THUMB_VARIANT
19296 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19298 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19299 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19300 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19301 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19302 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19303 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19304 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
19305 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
19306 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
19307 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
19308 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
19309 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
19310 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
19311 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
19312 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
19313 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
19314 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
19316 /* Crypto v1 extensions. */
19318 #define ARM_VARIANT & fpu_crypto_ext_armv8
19319 #undef THUMB_VARIANT
19320 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19322 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
19323 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
19324 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
19325 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
19326 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
19327 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
19328 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
19329 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
19330 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
19331 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
19332 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
19333 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
19334 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
19335 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
19338 #define ARM_VARIANT & crc_ext_armv8
19339 #undef THUMB_VARIANT
19340 #define THUMB_VARIANT & crc_ext_armv8
19341 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
19342 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
19343 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
19344 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
19345 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
19346 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
19348 /* ARMv8.2 RAS extension. */
19350 #define ARM_VARIANT & arm_ext_v8_2
19351 #undef THUMB_VARIANT
19352 #define THUMB_VARIANT & arm_ext_v8_2
19353 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
19356 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19357 #undef THUMB_VARIANT
19358 #define THUMB_VARIANT NULL
19360 cCE("wfs", e200110
, 1, (RR
), rd
),
19361 cCE("rfs", e300110
, 1, (RR
), rd
),
19362 cCE("wfc", e400110
, 1, (RR
), rd
),
19363 cCE("rfc", e500110
, 1, (RR
), rd
),
19365 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19366 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19367 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19368 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19370 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19371 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19372 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19373 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
19375 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
19376 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
19377 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
19378 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
19379 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
19380 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
19381 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
19382 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
19383 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
19384 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
19385 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
19386 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
19388 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
19389 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
19390 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
19391 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
19392 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
19393 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
19394 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
19395 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
19396 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
19397 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
19398 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
19399 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
19401 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
19402 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
19403 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
19404 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
19405 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
19406 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
19407 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
19408 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
19409 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
19410 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
19411 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
19412 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
19414 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
19415 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
19416 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
19417 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
19418 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
19419 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
19420 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
19421 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
19422 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
19423 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
19424 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
19425 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
19427 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
19428 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
19429 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
19430 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
19431 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
19432 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
19433 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
19434 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
19435 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
19436 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
19437 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
19438 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
19440 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
19441 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
19442 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
19443 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
19444 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
19445 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
19446 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
19447 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
19448 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
19449 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
19450 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
19451 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
19453 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
19454 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
19455 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
19456 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
19457 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
19458 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
19459 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
19460 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
19461 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
19462 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
19463 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
19464 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
19466 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
19467 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
19468 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
19469 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
19470 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
19471 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
19472 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
19473 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
19474 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
19475 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
19476 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
19477 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
19479 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
19480 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
19481 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
19482 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
19483 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
19484 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
19485 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
19486 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
19487 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
19488 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
19489 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
19490 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
19492 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
19493 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
19494 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
19495 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
19496 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
19497 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
19498 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
19499 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
19500 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
19501 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
19502 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
19503 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
19505 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
19506 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
19507 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
19508 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
19509 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
19510 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
19511 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
19512 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
19513 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
19514 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
19515 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
19516 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
19518 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
19519 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
19520 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
19521 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
19522 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
19523 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
19524 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
19525 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
19526 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
19527 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
19528 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
19529 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
19531 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
19532 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
19533 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
19534 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
19535 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
19536 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
19537 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
19538 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
19539 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
19540 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
19541 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
19542 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
19544 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
19545 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
19546 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
19547 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
19548 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
19549 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
19550 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
19551 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
19552 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
19553 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
19554 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
19555 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
19557 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
19558 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
19559 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
19560 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
19561 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
19562 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
19563 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
19564 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
19565 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
19566 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
19567 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
19568 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
19570 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
19571 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
19572 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
19573 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
19574 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
19575 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
19576 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
19577 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
19578 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
19579 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
19580 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
19581 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
19583 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19584 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19585 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19586 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19587 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19588 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19589 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19590 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19591 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19592 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19593 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19594 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19596 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19597 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19598 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19599 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19600 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19601 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19602 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19603 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19604 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19605 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19606 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19607 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19609 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19610 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19611 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19612 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19613 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19614 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19615 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19616 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19617 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19618 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19619 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19620 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19622 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19623 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19624 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19625 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19626 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19627 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19628 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19629 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19630 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19631 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19632 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19633 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19635 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19636 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19637 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19638 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19639 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19640 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19641 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19642 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19643 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19644 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19645 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19646 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19648 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19649 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19650 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19651 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19652 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19653 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19654 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19655 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19656 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19657 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19658 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19659 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19661 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19662 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19663 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19664 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19665 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19666 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19667 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19668 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19669 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19670 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19671 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19672 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19674 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19675 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19676 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19677 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19678 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19679 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19680 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19681 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19682 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19683 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19684 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19685 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19687 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19688 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19689 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19690 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19691 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19692 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19693 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19694 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19695 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19696 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19697 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19698 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19700 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19701 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19702 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19703 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19704 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19705 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19706 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19707 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19708 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19709 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19710 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19711 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19713 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19714 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19715 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19716 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19717 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19718 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19719 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19720 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19721 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19722 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19723 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19724 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19726 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19727 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19728 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19729 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19730 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19731 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19732 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19733 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19734 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19735 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19736 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19737 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19739 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19740 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19741 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19742 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19743 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19744 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19745 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19746 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19747 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19748 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19749 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19750 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
19752 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19753 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19754 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19755 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
19757 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
19758 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
19759 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
19760 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
19761 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
19762 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
19763 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
19764 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
19765 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
19766 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
19767 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
19768 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
19770 /* The implementation of the FIX instruction is broken on some
19771 assemblers, in that it accepts a precision specifier as well as a
19772 rounding specifier, despite the fact that this is meaningless.
19773 To be more compatible, we accept it as well, though of course it
19774 does not set any bits. */
19775 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
19776 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
19777 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
19778 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
19779 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
19780 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
19781 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
19782 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
19783 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
19784 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
19785 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
19786 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
19787 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
19789 /* Instructions that were new with the real FPA, call them V2. */
19791 #define ARM_VARIANT & fpu_fpa_ext_v2
19793 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19794 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19795 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19796 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19797 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19798 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
19801 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
19803 /* Moves and type conversions. */
19804 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19805 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
19806 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
19807 cCE("fmstat", ef1fa10
, 0, (), noargs
),
19808 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
19809 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
19810 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19811 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19812 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19813 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19814 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19815 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19816 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
19817 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
19819 /* Memory operations. */
19820 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
19821 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
19822 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19823 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19824 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19825 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19826 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19827 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19828 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19829 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19830 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19831 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
19832 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19833 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
19834 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19835 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
19836 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19837 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
19839 /* Monadic operations. */
19840 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19841 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19842 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19844 /* Dyadic operations. */
19845 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19846 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19847 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19848 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19849 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19850 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19851 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19852 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19853 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
19856 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19857 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
19858 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
19859 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
19861 /* Double precision load/store are still present on single precision
19862 implementations. */
19863 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
19864 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
19865 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19866 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19867 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19868 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19869 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19870 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
19871 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19872 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
19875 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
19877 /* Moves and type conversions. */
19878 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19879 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
19880 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19881 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
19882 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
19883 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
19884 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
19885 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
19886 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
19887 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19888 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19889 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19890 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
19892 /* Monadic operations. */
19893 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19894 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19895 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19897 /* Dyadic operations. */
19898 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19899 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19900 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19901 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19902 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19903 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19904 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19905 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19906 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
19909 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19910 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
19911 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
19912 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
19915 #define ARM_VARIANT & fpu_vfp_ext_v2
19917 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
19918 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
19919 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
19920 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
19922 /* Instructions which may belong to either the Neon or VFP instruction sets.
19923 Individual encoder functions perform additional architecture checks. */
19925 #define ARM_VARIANT & fpu_vfp_ext_v1xd
19926 #undef THUMB_VARIANT
19927 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
19929 /* These mnemonics are unique to VFP. */
19930 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
19931 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
19932 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
19933 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
19934 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
19935 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
19936 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
19937 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
19938 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
19939 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
19941 /* Mnemonics shared by Neon and VFP. */
19942 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
19943 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
19944 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
19946 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
19947 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
19949 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
19950 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
19952 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19953 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19954 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19955 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19956 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19957 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
19958 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
19959 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
19961 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
19962 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
19963 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
19964 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
19967 /* NOTE: All VMOV encoding is special-cased! */
19968 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
19969 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
19971 #undef THUMB_VARIANT
19972 #define THUMB_VARIANT & fpu_neon_ext_v1
19974 #define ARM_VARIANT & fpu_neon_ext_v1
19976 /* Data processing with three registers of the same length. */
19977 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
19978 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
19979 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
19980 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
19981 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
19982 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
19983 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
19984 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
19985 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
19986 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
19987 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
19988 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
19989 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
19990 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
19991 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
19992 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
19993 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
19994 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
19995 /* If not immediate, fall back to neon_dyadic_i64_su.
19996 shl_imm should accept I8 I16 I32 I64,
19997 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
19998 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
19999 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
20000 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
20001 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
20002 /* Logic ops, types optional & ignored. */
20003 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20004 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20005 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20006 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20007 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20008 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20009 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20010 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20011 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
20012 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
20013 /* Bitfield ops, untyped. */
20014 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20015 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20016 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20017 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20018 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20019 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20020 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
20021 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20022 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20023 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20024 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20025 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20026 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20027 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20028 back to neon_dyadic_if_su. */
20029 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20030 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20031 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20032 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20033 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20034 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20035 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20036 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20037 /* Comparison. Type I8 I16 I32 F32. */
20038 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
20039 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
20040 /* As above, D registers only. */
20041 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20042 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20043 /* Int and float variants, signedness unimportant. */
20044 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20045 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20046 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
20047 /* Add/sub take types I8 I16 I32 I64 F32. */
20048 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20049 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20050 /* vtst takes sizes 8, 16, 32. */
20051 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
20052 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
20053 /* VMUL takes I8 I16 I32 F32 P8. */
20054 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
20055 /* VQD{R}MULH takes S16 S32. */
20056 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20057 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20058 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20059 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20060 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20061 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20062 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20063 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20064 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20065 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20066 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20067 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20068 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20069 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20070 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20071 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20072 /* ARM v8.1 extension. */
20073 nUF(vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20074 nUF(vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20075 nUF(vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20076 nUF(vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20078 /* Two address, int/float. Types S8 S16 S32 F32. */
20079 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20080 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20082 /* Data processing with two registers and a shift amount. */
20083 /* Right shifts, and variants with rounding.
20084 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20085 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20086 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20087 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20088 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20089 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20090 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20091 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20092 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20093 /* Shift and insert. Sizes accepted 8 16 32 64. */
20094 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
20095 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
20096 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
20097 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
20098 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20099 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
20100 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
20101 /* Right shift immediate, saturating & narrowing, with rounding variants.
20102 Types accepted S16 S32 S64 U16 U32 U64. */
20103 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20104 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20105 /* As above, unsigned. Types accepted S16 S32 S64. */
20106 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20107 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20108 /* Right shift narrowing. Types accepted I16 I32 I64. */
20109 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20110 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20111 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20112 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
20113 /* CVT with optional immediate for fixed-point variant. */
20114 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
20116 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
20117 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
20119 /* Data processing, three registers of different lengths. */
20120 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20121 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
20122 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20123 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20124 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20125 /* If not scalar, fall back to neon_dyadic_long.
20126 Vector types as above, scalar types S16 S32 U16 U32. */
20127 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20128 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20129 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20130 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20131 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20132 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20133 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20134 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20135 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20136 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20137 /* Saturating doubling multiplies. Types S16 S32. */
20138 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20139 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20140 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20141 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20142 S16 S32 U16 U32. */
20143 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
20145 /* Extract. Size 8. */
20146 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
20147 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
20149 /* Two registers, miscellaneous. */
20150 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20151 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
20152 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
20153 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
20154 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
20155 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
20156 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
20157 /* Vector replicate. Sizes 8 16 32. */
20158 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
20159 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
20160 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20161 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
20162 /* VMOVN. Types I16 I32 I64. */
20163 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
20164 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20165 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
20166 /* VQMOVUN. Types S16 S32 S64. */
20167 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
20168 /* VZIP / VUZP. Sizes 8 16 32. */
20169 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20170 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20171 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20172 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20173 /* VQABS / VQNEG. Types S8 S16 S32. */
20174 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20175 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20176 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20177 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20178 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20179 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20180 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
20181 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20182 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
20183 /* Reciprocal estimates. Types U32 F32. */
20184 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20185 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
20186 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20187 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
20188 /* VCLS. Types S8 S16 S32. */
20189 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
20190 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
20191 /* VCLZ. Types I8 I16 I32. */
20192 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
20193 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
20194 /* VCNT. Size 8. */
20195 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
20196 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
20197 /* Two address, untyped. */
20198 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
20199 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
20200 /* VTRN. Sizes 8 16 32. */
20201 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
20202 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
20204 /* Table lookup. Size 8. */
20205 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20206 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20208 #undef THUMB_VARIANT
20209 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20211 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20213 /* Neon element/structure load/store. */
20214 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20215 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20216 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20217 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20218 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20219 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20220 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20221 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20223 #undef THUMB_VARIANT
20224 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20226 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20227 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
20228 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20229 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20230 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20231 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20232 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20233 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20234 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20235 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20237 #undef THUMB_VARIANT
20238 #define THUMB_VARIANT & fpu_vfp_ext_v3
20240 #define ARM_VARIANT & fpu_vfp_ext_v3
20242 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
20243 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20244 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20245 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20246 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20247 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20248 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20249 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20250 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20253 #define ARM_VARIANT & fpu_vfp_ext_fma
20254 #undef THUMB_VARIANT
20255 #define THUMB_VARIANT & fpu_vfp_ext_fma
20256 /* Mnemonics shared by Neon and VFP. These are included in the
20257 VFP FMA variant; NEON and VFP FMA always includes the NEON
20258 FMA instructions. */
20259 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20260 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20261 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20262 the v form should always be used. */
20263 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20264 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20265 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20266 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20267 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20268 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20270 #undef THUMB_VARIANT
20272 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20274 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20275 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20276 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20277 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20278 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20279 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20280 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
20281 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
20284 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20286 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
20287 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
20288 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
20289 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
20290 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
20291 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
20292 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
20293 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
20294 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
20295 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20296 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20297 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20298 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20299 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20300 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20301 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20302 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20303 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20304 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
20305 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
20306 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20307 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20308 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20309 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20310 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20311 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20312 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
20313 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
20314 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
20315 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
20316 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
20317 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
20318 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
20319 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
20320 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20321 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20322 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20323 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20324 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20325 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20326 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20327 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20328 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20329 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20330 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20331 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20332 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
20333 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20334 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20335 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20336 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20337 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20338 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20339 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20340 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20341 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20342 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20343 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20344 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20345 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20346 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20347 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20348 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20349 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20350 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20351 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20352 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20353 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20354 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20355 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20356 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20357 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20358 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20359 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20360 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20361 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20362 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20363 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20364 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20365 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20366 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20367 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20368 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20369 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20370 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20371 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20372 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20373 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20374 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
20375 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20376 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20377 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20378 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20379 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20380 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20381 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20382 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20383 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20384 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20385 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20386 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20387 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20388 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20389 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20390 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20391 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20392 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20393 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20394 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20395 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20396 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
20397 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20398 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20399 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20400 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20401 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20402 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20403 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20404 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20405 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20406 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20407 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20408 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20409 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20410 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20411 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20412 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20413 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
20414 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
20415 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20416 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
20417 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
20418 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
20419 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20420 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20421 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20422 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20423 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20424 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20425 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20426 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20427 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20428 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20429 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20430 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20431 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20432 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20433 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
20434 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20435 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20436 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20437 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20438 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20439 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20440 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20441 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20442 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
20443 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20444 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20445 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20446 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20447 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
20450 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20452 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
20453 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
20454 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
20455 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20456 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20457 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20458 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20459 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20460 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20461 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20462 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20463 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20464 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20465 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20466 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20467 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20468 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20469 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20470 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20471 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20472 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
20473 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20474 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20475 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20476 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20477 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20478 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20479 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20480 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20481 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20482 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20483 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20484 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20485 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20486 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20487 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20488 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20489 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20490 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20491 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20492 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20493 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20494 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20495 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20496 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20497 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20498 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20499 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20500 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20501 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20502 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20503 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20504 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20505 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20506 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20507 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20508 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
20511 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20513 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20514 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20515 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20516 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20517 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
20518 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
20519 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
20520 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
20521 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
20522 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
20523 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
20524 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
20525 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
20526 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
20527 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
20528 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
20529 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
20530 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
20531 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
20532 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
20533 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
20534 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
20535 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
20536 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
20537 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
20538 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
20539 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
20540 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
20541 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
20542 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
20543 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
20544 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
20545 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
20546 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
20547 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
20548 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
20549 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
20550 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
20551 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
20552 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
20553 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
20554 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
20555 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
20556 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
20557 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
20558 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
20559 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
20560 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
20561 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
20562 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
20563 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
20564 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
20565 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
20566 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
20567 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20568 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20569 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20570 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20571 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
20572 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
20573 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
20574 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
20575 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
20576 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
20577 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20578 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20579 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20580 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20581 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20582 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
20583 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20584 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
20585 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
20586 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
20587 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
20588 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
20591 #define ARM_VARIANT NULL
20592 #undef THUMB_VARIANT
20593 #define THUMB_VARIANT & arm_ext_v8m
20594 TUE("tt", 0, e840f000
, 2, (RRnpc
, RRnpc
), 0, tt
),
20595 TUE("ttt", 0, e840f040
, 2, (RRnpc
, RRnpc
), 0, tt
),
20598 #undef THUMB_VARIANT
20624 /* MD interface: bits in the object file. */
20626 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20627 for use in the a.out file, and stores them in the array pointed to by buf.
20628 This knows about the endian-ness of the target machine and does
20629 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
20630 2 (short) and 4 (long) Floating numbers are put out as a series of
20631 LITTLENUMS (shorts, here at least). */
20634 md_number_to_chars (char * buf
, valueT val
, int n
)
20636 if (target_big_endian
)
20637 number_to_chars_bigendian (buf
, val
, n
);
20639 number_to_chars_littleendian (buf
, val
, n
);
20643 md_chars_to_number (char * buf
, int n
)
20646 unsigned char * where
= (unsigned char *) buf
;
20648 if (target_big_endian
)
20653 result
|= (*where
++ & 255);
20661 result
|= (where
[n
] & 255);
20668 /* MD interface: Sections. */
20670 /* Calculate the maximum variable size (i.e., excluding fr_fix)
20671 that an rs_machine_dependent frag may reach. */
20674 arm_frag_max_var (fragS
*fragp
)
20676 /* We only use rs_machine_dependent for variable-size Thumb instructions,
20677 which are either THUMB_SIZE (2) or INSN_SIZE (4).
20679 Note that we generate relaxable instructions even for cases that don't
20680 really need it, like an immediate that's a trivial constant. So we're
20681 overestimating the instruction size for some of those cases. Rather
20682 than putting more intelligence here, it would probably be better to
20683 avoid generating a relaxation frag in the first place when it can be
20684 determined up front that a short instruction will suffice. */
20686 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
20690 /* Estimate the size of a frag before relaxing. Assume everything fits in
20694 md_estimate_size_before_relax (fragS
* fragp
,
20695 segT segtype ATTRIBUTE_UNUSED
)
20701 /* Convert a machine dependent frag. */
20704 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
20706 unsigned long insn
;
20707 unsigned long old_op
;
20715 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
20717 old_op
= bfd_get_16(abfd
, buf
);
20718 if (fragp
->fr_symbol
)
20720 exp
.X_op
= O_symbol
;
20721 exp
.X_add_symbol
= fragp
->fr_symbol
;
20725 exp
.X_op
= O_constant
;
20727 exp
.X_add_number
= fragp
->fr_offset
;
20728 opcode
= fragp
->fr_subtype
;
20731 case T_MNEM_ldr_pc
:
20732 case T_MNEM_ldr_pc2
:
20733 case T_MNEM_ldr_sp
:
20734 case T_MNEM_str_sp
:
20741 if (fragp
->fr_var
== 4)
20743 insn
= THUMB_OP32 (opcode
);
20744 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
20746 insn
|= (old_op
& 0x700) << 4;
20750 insn
|= (old_op
& 7) << 12;
20751 insn
|= (old_op
& 0x38) << 13;
20753 insn
|= 0x00000c00;
20754 put_thumb32_insn (buf
, insn
);
20755 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
20759 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
20761 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
20764 if (fragp
->fr_var
== 4)
20766 insn
= THUMB_OP32 (opcode
);
20767 insn
|= (old_op
& 0xf0) << 4;
20768 put_thumb32_insn (buf
, insn
);
20769 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
20773 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
20774 exp
.X_add_number
-= 4;
20782 if (fragp
->fr_var
== 4)
20784 int r0off
= (opcode
== T_MNEM_mov
20785 || opcode
== T_MNEM_movs
) ? 0 : 8;
20786 insn
= THUMB_OP32 (opcode
);
20787 insn
= (insn
& 0xe1ffffff) | 0x10000000;
20788 insn
|= (old_op
& 0x700) << r0off
;
20789 put_thumb32_insn (buf
, insn
);
20790 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
20794 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
20799 if (fragp
->fr_var
== 4)
20801 insn
= THUMB_OP32(opcode
);
20802 put_thumb32_insn (buf
, insn
);
20803 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
20806 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
20810 if (fragp
->fr_var
== 4)
20812 insn
= THUMB_OP32(opcode
);
20813 insn
|= (old_op
& 0xf00) << 14;
20814 put_thumb32_insn (buf
, insn
);
20815 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
20818 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
20821 case T_MNEM_add_sp
:
20822 case T_MNEM_add_pc
:
20823 case T_MNEM_inc_sp
:
20824 case T_MNEM_dec_sp
:
20825 if (fragp
->fr_var
== 4)
20827 /* ??? Choose between add and addw. */
20828 insn
= THUMB_OP32 (opcode
);
20829 insn
|= (old_op
& 0xf0) << 4;
20830 put_thumb32_insn (buf
, insn
);
20831 if (opcode
== T_MNEM_add_pc
)
20832 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
20834 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
20837 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
20845 if (fragp
->fr_var
== 4)
20847 insn
= THUMB_OP32 (opcode
);
20848 insn
|= (old_op
& 0xf0) << 4;
20849 insn
|= (old_op
& 0xf) << 16;
20850 put_thumb32_insn (buf
, insn
);
20851 if (insn
& (1 << 20))
20852 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
20854 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
20857 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
20863 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
20864 (enum bfd_reloc_code_real
) reloc_type
);
20865 fixp
->fx_file
= fragp
->fr_file
;
20866 fixp
->fx_line
= fragp
->fr_line
;
20867 fragp
->fr_fix
+= fragp
->fr_var
;
20869 /* Set whether we use thumb-2 ISA based on final relaxation results. */
20870 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
20871 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
20872 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
20875 /* Return the size of a relaxable immediate operand instruction.
20876 SHIFT and SIZE specify the form of the allowable immediate. */
20878 relax_immediate (fragS
*fragp
, int size
, int shift
)
20884 /* ??? Should be able to do better than this. */
20885 if (fragp
->fr_symbol
)
20888 low
= (1 << shift
) - 1;
20889 mask
= (1 << (shift
+ size
)) - (1 << shift
);
20890 offset
= fragp
->fr_offset
;
20891 /* Force misaligned offsets to 32-bit variant. */
20894 if (offset
& ~mask
)
20899 /* Get the address of a symbol during relaxation. */
20901 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
20907 sym
= fragp
->fr_symbol
;
20908 sym_frag
= symbol_get_frag (sym
);
20909 know (S_GET_SEGMENT (sym
) != absolute_section
20910 || sym_frag
== &zero_address_frag
);
20911 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
20913 /* If frag has yet to be reached on this pass, assume it will
20914 move by STRETCH just as we did. If this is not so, it will
20915 be because some frag between grows, and that will force
20919 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
20923 /* Adjust stretch for any alignment frag. Note that if have
20924 been expanding the earlier code, the symbol may be
20925 defined in what appears to be an earlier frag. FIXME:
20926 This doesn't handle the fr_subtype field, which specifies
20927 a maximum number of bytes to skip when doing an
20929 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
20931 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
20934 stretch
= - ((- stretch
)
20935 & ~ ((1 << (int) f
->fr_offset
) - 1));
20937 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
20949 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
20952 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
20957 /* Assume worst case for symbols not known to be in the same section. */
20958 if (fragp
->fr_symbol
== NULL
20959 || !S_IS_DEFINED (fragp
->fr_symbol
)
20960 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
20961 || S_IS_WEAK (fragp
->fr_symbol
))
20964 val
= relaxed_symbol_addr (fragp
, stretch
);
20965 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
20966 addr
= (addr
+ 4) & ~3;
20967 /* Force misaligned targets to 32-bit variant. */
20971 if (val
< 0 || val
> 1020)
20976 /* Return the size of a relaxable add/sub immediate instruction. */
20978 relax_addsub (fragS
*fragp
, asection
*sec
)
20983 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
20984 op
= bfd_get_16(sec
->owner
, buf
);
20985 if ((op
& 0xf) == ((op
>> 4) & 0xf))
20986 return relax_immediate (fragp
, 8, 0);
20988 return relax_immediate (fragp
, 3, 0);
20991 /* Return TRUE iff the definition of symbol S could be pre-empted
20992 (overridden) at link or load time. */
20994 symbol_preemptible (symbolS
*s
)
20996 /* Weak symbols can always be pre-empted. */
21000 /* Non-global symbols cannot be pre-empted. */
21001 if (! S_IS_EXTERNAL (s
))
21005 /* In ELF, a global symbol can be marked protected, or private. In that
21006 case it can't be pre-empted (other definitions in the same link unit
21007 would violate the ODR). */
21008 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
21012 /* Other global symbols might be pre-empted. */
21016 /* Return the size of a relaxable branch instruction. BITS is the
21017 size of the offset field in the narrow instruction. */
21020 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
21026 /* Assume worst case for symbols not known to be in the same section. */
21027 if (!S_IS_DEFINED (fragp
->fr_symbol
)
21028 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
21029 || S_IS_WEAK (fragp
->fr_symbol
))
21033 /* A branch to a function in ARM state will require interworking. */
21034 if (S_IS_DEFINED (fragp
->fr_symbol
)
21035 && ARM_IS_FUNC (fragp
->fr_symbol
))
21039 if (symbol_preemptible (fragp
->fr_symbol
))
21042 val
= relaxed_symbol_addr (fragp
, stretch
);
21043 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
21046 /* Offset is a signed value *2 */
21048 if (val
>= limit
|| val
< -limit
)
21054 /* Relax a machine dependent frag. This returns the amount by which
21055 the current size of the frag should change. */
21058 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
21063 oldsize
= fragp
->fr_var
;
21064 switch (fragp
->fr_subtype
)
21066 case T_MNEM_ldr_pc2
:
21067 newsize
= relax_adr (fragp
, sec
, stretch
);
21069 case T_MNEM_ldr_pc
:
21070 case T_MNEM_ldr_sp
:
21071 case T_MNEM_str_sp
:
21072 newsize
= relax_immediate (fragp
, 8, 2);
21076 newsize
= relax_immediate (fragp
, 5, 2);
21080 newsize
= relax_immediate (fragp
, 5, 1);
21084 newsize
= relax_immediate (fragp
, 5, 0);
21087 newsize
= relax_adr (fragp
, sec
, stretch
);
21093 newsize
= relax_immediate (fragp
, 8, 0);
21096 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
21099 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
21101 case T_MNEM_add_sp
:
21102 case T_MNEM_add_pc
:
21103 newsize
= relax_immediate (fragp
, 8, 2);
21105 case T_MNEM_inc_sp
:
21106 case T_MNEM_dec_sp
:
21107 newsize
= relax_immediate (fragp
, 7, 2);
21113 newsize
= relax_addsub (fragp
, sec
);
21119 fragp
->fr_var
= newsize
;
21120 /* Freeze wide instructions that are at or before the same location as
21121 in the previous pass. This avoids infinite loops.
21122 Don't freeze them unconditionally because targets may be artificially
21123 misaligned by the expansion of preceding frags. */
21124 if (stretch
<= 0 && newsize
> 2)
21126 md_convert_frag (sec
->owner
, sec
, fragp
);
21130 return newsize
- oldsize
;
21133 /* Round up a section size to the appropriate boundary. */
21136 md_section_align (segT segment ATTRIBUTE_UNUSED
,
21139 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21140 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
21142 /* For a.out, force the section size to be aligned. If we don't do
21143 this, BFD will align it for us, but it will not write out the
21144 final bytes of the section. This may be a bug in BFD, but it is
21145 easier to fix it here since that is how the other a.out targets
21149 align
= bfd_get_section_alignment (stdoutput
, segment
);
21150 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
21157 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21158 of an rs_align_code fragment. */
21161 arm_handle_align (fragS
* fragP
)
21163 static char const arm_noop
[2][2][4] =
21166 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21167 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21170 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21171 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21174 static char const thumb_noop
[2][2][2] =
21177 {0xc0, 0x46}, /* LE */
21178 {0x46, 0xc0}, /* BE */
21181 {0x00, 0xbf}, /* LE */
21182 {0xbf, 0x00} /* BE */
21185 static char const wide_thumb_noop
[2][4] =
21186 { /* Wide Thumb-2 */
21187 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21188 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21191 unsigned bytes
, fix
, noop_size
;
21194 const char *narrow_noop
= NULL
;
21199 if (fragP
->fr_type
!= rs_align_code
)
21202 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
21203 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
21206 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21207 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
21209 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
21211 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
21213 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21214 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
21216 narrow_noop
= thumb_noop
[1][target_big_endian
];
21217 noop
= wide_thumb_noop
[target_big_endian
];
21220 noop
= thumb_noop
[0][target_big_endian
];
21228 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21229 ? selected_cpu
: arm_arch_none
,
21231 [target_big_endian
];
21238 fragP
->fr_var
= noop_size
;
21240 if (bytes
& (noop_size
- 1))
21242 fix
= bytes
& (noop_size
- 1);
21244 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
21246 memset (p
, 0, fix
);
21253 if (bytes
& noop_size
)
21255 /* Insert a narrow noop. */
21256 memcpy (p
, narrow_noop
, noop_size
);
21258 bytes
-= noop_size
;
21262 /* Use wide noops for the remainder */
21266 while (bytes
>= noop_size
)
21268 memcpy (p
, noop
, noop_size
);
21270 bytes
-= noop_size
;
21274 fragP
->fr_fix
+= fix
;
21277 /* Called from md_do_align. Used to create an alignment
21278 frag in a code section. */
21281 arm_frag_align_code (int n
, int max
)
21285 /* We assume that there will never be a requirement
21286 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21287 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21292 _("alignments greater than %d bytes not supported in .text sections."),
21293 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
21294 as_fatal ("%s", err_msg
);
21297 p
= frag_var (rs_align_code
,
21298 MAX_MEM_FOR_RS_ALIGN_CODE
,
21300 (relax_substateT
) max
,
21307 /* Perform target specific initialisation of a frag.
21308 Note - despite the name this initialisation is not done when the frag
21309 is created, but only when its type is assigned. A frag can be created
21310 and used a long time before its type is set, so beware of assuming that
21311 this initialisationis performed first. */
21315 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
21317 /* Record whether this frag is in an ARM or a THUMB area. */
21318 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21321 #else /* OBJ_ELF is defined. */
21323 arm_init_frag (fragS
* fragP
, int max_chars
)
21325 int frag_thumb_mode
;
21327 /* If the current ARM vs THUMB mode has not already
21328 been recorded into this frag then do so now. */
21329 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
21330 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
21332 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
21334 /* Record a mapping symbol for alignment frags. We will delete this
21335 later if the alignment ends up empty. */
21336 switch (fragP
->fr_type
)
21339 case rs_align_test
:
21341 mapping_state_2 (MAP_DATA
, max_chars
);
21343 case rs_align_code
:
21344 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
21351 /* When we change sections we need to issue a new mapping symbol. */
21354 arm_elf_change_section (void)
21356 /* Link an unlinked unwind index table section to the .text section. */
21357 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
21358 && elf_linked_to_section (now_seg
) == NULL
)
21359 elf_linked_to_section (now_seg
) = text_section
;
21363 arm_elf_section_type (const char * str
, size_t len
)
21365 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
21366 return SHT_ARM_EXIDX
;
21371 /* Code to deal with unwinding tables. */
21373 static void add_unwind_adjustsp (offsetT
);
21375 /* Generate any deferred unwind frame offset. */
21378 flush_pending_unwind (void)
21382 offset
= unwind
.pending_offset
;
21383 unwind
.pending_offset
= 0;
21385 add_unwind_adjustsp (offset
);
21388 /* Add an opcode to this list for this function. Two-byte opcodes should
21389 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21393 add_unwind_opcode (valueT op
, int length
)
21395 /* Add any deferred stack adjustment. */
21396 if (unwind
.pending_offset
)
21397 flush_pending_unwind ();
21399 unwind
.sp_restored
= 0;
21401 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
21403 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
21404 if (unwind
.opcodes
)
21405 unwind
.opcodes
= (unsigned char *) xrealloc (unwind
.opcodes
,
21406 unwind
.opcode_alloc
);
21408 unwind
.opcodes
= (unsigned char *) xmalloc (unwind
.opcode_alloc
);
21413 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
21415 unwind
.opcode_count
++;
21419 /* Add unwind opcodes to adjust the stack pointer. */
21422 add_unwind_adjustsp (offsetT offset
)
21426 if (offset
> 0x200)
21428 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21433 /* Long form: 0xb2, uleb128. */
21434 /* This might not fit in a word so add the individual bytes,
21435 remembering the list is built in reverse order. */
21436 o
= (valueT
) ((offset
- 0x204) >> 2);
21438 add_unwind_opcode (0, 1);
21440 /* Calculate the uleb128 encoding of the offset. */
21444 bytes
[n
] = o
& 0x7f;
21450 /* Add the insn. */
21452 add_unwind_opcode (bytes
[n
- 1], 1);
21453 add_unwind_opcode (0xb2, 1);
21455 else if (offset
> 0x100)
21457 /* Two short opcodes. */
21458 add_unwind_opcode (0x3f, 1);
21459 op
= (offset
- 0x104) >> 2;
21460 add_unwind_opcode (op
, 1);
21462 else if (offset
> 0)
21464 /* Short opcode. */
21465 op
= (offset
- 4) >> 2;
21466 add_unwind_opcode (op
, 1);
21468 else if (offset
< 0)
21471 while (offset
> 0x100)
21473 add_unwind_opcode (0x7f, 1);
21476 op
= ((offset
- 4) >> 2) | 0x40;
21477 add_unwind_opcode (op
, 1);
21481 /* Finish the list of unwind opcodes for this function. */
21483 finish_unwind_opcodes (void)
21487 if (unwind
.fp_used
)
21489 /* Adjust sp as necessary. */
21490 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
21491 flush_pending_unwind ();
21493 /* After restoring sp from the frame pointer. */
21494 op
= 0x90 | unwind
.fp_reg
;
21495 add_unwind_opcode (op
, 1);
21498 flush_pending_unwind ();
21502 /* Start an exception table entry. If idx is nonzero this is an index table
21506 start_unwind_section (const segT text_seg
, int idx
)
21508 const char * text_name
;
21509 const char * prefix
;
21510 const char * prefix_once
;
21511 const char * group_name
;
21515 size_t sec_name_len
;
21522 prefix
= ELF_STRING_ARM_unwind
;
21523 prefix_once
= ELF_STRING_ARM_unwind_once
;
21524 type
= SHT_ARM_EXIDX
;
21528 prefix
= ELF_STRING_ARM_unwind_info
;
21529 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
21530 type
= SHT_PROGBITS
;
21533 text_name
= segment_name (text_seg
);
21534 if (streq (text_name
, ".text"))
21537 if (strncmp (text_name
, ".gnu.linkonce.t.",
21538 strlen (".gnu.linkonce.t.")) == 0)
21540 prefix
= prefix_once
;
21541 text_name
+= strlen (".gnu.linkonce.t.");
21544 prefix_len
= strlen (prefix
);
21545 text_len
= strlen (text_name
);
21546 sec_name_len
= prefix_len
+ text_len
;
21547 sec_name
= (char *) xmalloc (sec_name_len
+ 1);
21548 memcpy (sec_name
, prefix
, prefix_len
);
21549 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
21550 sec_name
[prefix_len
+ text_len
] = '\0';
21556 /* Handle COMDAT group. */
21557 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
21559 group_name
= elf_group_name (text_seg
);
21560 if (group_name
== NULL
)
21562 as_bad (_("Group section `%s' has no group signature"),
21563 segment_name (text_seg
));
21564 ignore_rest_of_line ();
21567 flags
|= SHF_GROUP
;
21571 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
21573 /* Set the section link for index tables. */
21575 elf_linked_to_section (now_seg
) = text_seg
;
21579 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21580 personality routine data. Returns zero, or the index table value for
21581 an inline entry. */
21584 create_unwind_entry (int have_data
)
21589 /* The current word of data. */
21591 /* The number of bytes left in this word. */
21594 finish_unwind_opcodes ();
21596 /* Remember the current text section. */
21597 unwind
.saved_seg
= now_seg
;
21598 unwind
.saved_subseg
= now_subseg
;
21600 start_unwind_section (now_seg
, 0);
21602 if (unwind
.personality_routine
== NULL
)
21604 if (unwind
.personality_index
== -2)
21607 as_bad (_("handlerdata in cantunwind frame"));
21608 return 1; /* EXIDX_CANTUNWIND. */
21611 /* Use a default personality routine if none is specified. */
21612 if (unwind
.personality_index
== -1)
21614 if (unwind
.opcode_count
> 3)
21615 unwind
.personality_index
= 1;
21617 unwind
.personality_index
= 0;
21620 /* Space for the personality routine entry. */
21621 if (unwind
.personality_index
== 0)
21623 if (unwind
.opcode_count
> 3)
21624 as_bad (_("too many unwind opcodes for personality routine 0"));
21628 /* All the data is inline in the index table. */
21631 while (unwind
.opcode_count
> 0)
21633 unwind
.opcode_count
--;
21634 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
21638 /* Pad with "finish" opcodes. */
21640 data
= (data
<< 8) | 0xb0;
21647 /* We get two opcodes "free" in the first word. */
21648 size
= unwind
.opcode_count
- 2;
21652 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
21653 if (unwind
.personality_index
!= -1)
21655 as_bad (_("attempt to recreate an unwind entry"));
21659 /* An extra byte is required for the opcode count. */
21660 size
= unwind
.opcode_count
+ 1;
21663 size
= (size
+ 3) >> 2;
21665 as_bad (_("too many unwind opcodes"));
21667 frag_align (2, 0, 0);
21668 record_alignment (now_seg
, 2);
21669 unwind
.table_entry
= expr_build_dot ();
21671 /* Allocate the table entry. */
21672 ptr
= frag_more ((size
<< 2) + 4);
21673 /* PR 13449: Zero the table entries in case some of them are not used. */
21674 memset (ptr
, 0, (size
<< 2) + 4);
21675 where
= frag_now_fix () - ((size
<< 2) + 4);
21677 switch (unwind
.personality_index
)
21680 /* ??? Should this be a PLT generating relocation? */
21681 /* Custom personality routine. */
21682 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
21683 BFD_RELOC_ARM_PREL31
);
21688 /* Set the first byte to the number of additional words. */
21689 data
= size
> 0 ? size
- 1 : 0;
21693 /* ABI defined personality routines. */
21695 /* Three opcodes bytes are packed into the first word. */
21702 /* The size and first two opcode bytes go in the first word. */
21703 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
21708 /* Should never happen. */
21712 /* Pack the opcodes into words (MSB first), reversing the list at the same
21714 while (unwind
.opcode_count
> 0)
21718 md_number_to_chars (ptr
, data
, 4);
21723 unwind
.opcode_count
--;
21725 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
21728 /* Finish off the last word. */
21731 /* Pad with "finish" opcodes. */
21733 data
= (data
<< 8) | 0xb0;
21735 md_number_to_chars (ptr
, data
, 4);
21740 /* Add an empty descriptor if there is no user-specified data. */
21741 ptr
= frag_more (4);
21742 md_number_to_chars (ptr
, 0, 4);
21749 /* Initialize the DWARF-2 unwind information for this procedure. */
21752 tc_arm_frame_initial_instructions (void)
21754 cfi_add_CFA_def_cfa (REG_SP
, 0);
21756 #endif /* OBJ_ELF */
21758 /* Convert REGNAME to a DWARF-2 register number. */
21761 tc_arm_regname_to_dw2regnum (char *regname
)
21763 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
21767 /* PR 16694: Allow VFP registers as well. */
21768 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
21772 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
21781 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
21785 exp
.X_op
= O_secrel
;
21786 exp
.X_add_symbol
= symbol
;
21787 exp
.X_add_number
= 0;
21788 emit_expr (&exp
, size
);
21792 /* MD interface: Symbol and relocation handling. */
21794 /* Return the address within the segment that a PC-relative fixup is
21795 relative to. For ARM, PC-relative fixups applied to instructions
21796 are generally relative to the location of the fixup plus 8 bytes.
21797 Thumb branches are offset by 4, and Thumb loads relative to PC
21798 require special handling. */
21801 md_pcrel_from_section (fixS
* fixP
, segT seg
)
21803 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21805 /* If this is pc-relative and we are going to emit a relocation
21806 then we just want to put out any pipeline compensation that the linker
21807 will need. Otherwise we want to use the calculated base.
21808 For WinCE we skip the bias for externals as well, since this
21809 is how the MS ARM-CE assembler behaves and we want to be compatible. */
21811 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
21812 || (arm_force_relocation (fixP
)
21814 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
21820 switch (fixP
->fx_r_type
)
21822 /* PC relative addressing on the Thumb is slightly odd as the
21823 bottom two bits of the PC are forced to zero for the
21824 calculation. This happens *after* application of the
21825 pipeline offset. However, Thumb adrl already adjusts for
21826 this, so we need not do it again. */
21827 case BFD_RELOC_ARM_THUMB_ADD
:
21830 case BFD_RELOC_ARM_THUMB_OFFSET
:
21831 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
21832 case BFD_RELOC_ARM_T32_ADD_PC12
:
21833 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
21834 return (base
+ 4) & ~3;
21836 /* Thumb branches are simply offset by +4. */
21837 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
21838 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
21839 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
21840 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
21841 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
21844 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
21846 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21847 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21848 && ARM_IS_FUNC (fixP
->fx_addsy
)
21849 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21850 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21853 /* BLX is like branches above, but forces the low two bits of PC to
21855 case BFD_RELOC_THUMB_PCREL_BLX
:
21857 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21858 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21859 && THUMB_IS_FUNC (fixP
->fx_addsy
)
21860 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21861 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21862 return (base
+ 4) & ~3;
21864 /* ARM mode branches are offset by +8. However, the Windows CE
21865 loader expects the relocation not to take this into account. */
21866 case BFD_RELOC_ARM_PCREL_BLX
:
21868 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21869 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21870 && ARM_IS_FUNC (fixP
->fx_addsy
)
21871 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21872 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21875 case BFD_RELOC_ARM_PCREL_CALL
:
21877 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21878 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
21879 && THUMB_IS_FUNC (fixP
->fx_addsy
)
21880 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
21881 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
21884 case BFD_RELOC_ARM_PCREL_BRANCH
:
21885 case BFD_RELOC_ARM_PCREL_JUMP
:
21886 case BFD_RELOC_ARM_PLT32
:
21888 /* When handling fixups immediately, because we have already
21889 discovered the value of a symbol, or the address of the frag involved
21890 we must account for the offset by +8, as the OS loader will never see the reloc.
21891 see fixup_segment() in write.c
21892 The S_IS_EXTERNAL test handles the case of global symbols.
21893 Those need the calculated base, not just the pipe compensation the linker will need. */
21895 && fixP
->fx_addsy
!= NULL
21896 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
21897 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
21905 /* ARM mode loads relative to PC are also offset by +8. Unlike
21906 branches, the Windows CE loader *does* expect the relocation
21907 to take this into account. */
21908 case BFD_RELOC_ARM_OFFSET_IMM
:
21909 case BFD_RELOC_ARM_OFFSET_IMM8
:
21910 case BFD_RELOC_ARM_HWLITERAL
:
21911 case BFD_RELOC_ARM_LITERAL
:
21912 case BFD_RELOC_ARM_CP_OFF_IMM
:
21916 /* Other PC-relative relocations are un-offset. */
21922 static bfd_boolean flag_warn_syms
= TRUE
;
21925 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
21927 /* PR 18347 - Warn if the user attempts to create a symbol with the same
21928 name as an ARM instruction. Whilst strictly speaking it is allowed, it
21929 does mean that the resulting code might be very confusing to the reader.
21930 Also this warning can be triggered if the user omits an operand before
21931 an immediate address, eg:
21935 GAS treats this as an assignment of the value of the symbol foo to a
21936 symbol LDR, and so (without this code) it will not issue any kind of
21937 warning or error message.
21939 Note - ARM instructions are case-insensitive but the strings in the hash
21940 table are all stored in lower case, so we must first ensure that name is
21942 if (flag_warn_syms
&& arm_ops_hsh
)
21944 char * nbuf
= strdup (name
);
21947 for (p
= nbuf
; *p
; p
++)
21949 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
21951 static struct hash_control
* already_warned
= NULL
;
21953 if (already_warned
== NULL
)
21954 already_warned
= hash_new ();
21955 /* Only warn about the symbol once. To keep the code
21956 simple we let hash_insert do the lookup for us. */
21957 if (hash_insert (already_warned
, name
, NULL
) == NULL
)
21958 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
21967 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
21968 Otherwise we have no need to default values of symbols. */
21971 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
21974 if (name
[0] == '_' && name
[1] == 'G'
21975 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
21979 if (symbol_find (name
))
21980 as_bad (_("GOT already in the symbol table"));
21982 GOT_symbol
= symbol_new (name
, undefined_section
,
21983 (valueT
) 0, & zero_address_frag
);
21993 /* Subroutine of md_apply_fix. Check to see if an immediate can be
21994 computed as two separate immediate values, added together. We
21995 already know that this value cannot be computed by just one ARM
21998 static unsigned int
21999 validate_immediate_twopart (unsigned int val
,
22000 unsigned int * highpart
)
22005 for (i
= 0; i
< 32; i
+= 2)
22006 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
22012 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
22014 else if (a
& 0xff0000)
22016 if (a
& 0xff000000)
22018 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
22022 gas_assert (a
& 0xff000000);
22023 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
22026 return (a
& 0xff) | (i
<< 7);
22033 validate_offset_imm (unsigned int val
, int hwse
)
22035 if ((hwse
&& val
> 255) || val
> 4095)
22040 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22041 negative immediate constant by altering the instruction. A bit of
22046 by inverting the second operand, and
22049 by negating the second operand. */
22052 negate_data_op (unsigned long * instruction
,
22053 unsigned long value
)
22056 unsigned long negated
, inverted
;
22058 negated
= encode_arm_immediate (-value
);
22059 inverted
= encode_arm_immediate (~value
);
22061 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
22064 /* First negates. */
22065 case OPCODE_SUB
: /* ADD <-> SUB */
22066 new_inst
= OPCODE_ADD
;
22071 new_inst
= OPCODE_SUB
;
22075 case OPCODE_CMP
: /* CMP <-> CMN */
22076 new_inst
= OPCODE_CMN
;
22081 new_inst
= OPCODE_CMP
;
22085 /* Now Inverted ops. */
22086 case OPCODE_MOV
: /* MOV <-> MVN */
22087 new_inst
= OPCODE_MVN
;
22092 new_inst
= OPCODE_MOV
;
22096 case OPCODE_AND
: /* AND <-> BIC */
22097 new_inst
= OPCODE_BIC
;
22102 new_inst
= OPCODE_AND
;
22106 case OPCODE_ADC
: /* ADC <-> SBC */
22107 new_inst
= OPCODE_SBC
;
22112 new_inst
= OPCODE_ADC
;
22116 /* We cannot do anything. */
22121 if (value
== (unsigned) FAIL
)
22124 *instruction
&= OPCODE_MASK
;
22125 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
22129 /* Like negate_data_op, but for Thumb-2. */
22131 static unsigned int
22132 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
22136 unsigned int negated
, inverted
;
22138 negated
= encode_thumb32_immediate (-value
);
22139 inverted
= encode_thumb32_immediate (~value
);
22141 rd
= (*instruction
>> 8) & 0xf;
22142 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
22145 /* ADD <-> SUB. Includes CMP <-> CMN. */
22146 case T2_OPCODE_SUB
:
22147 new_inst
= T2_OPCODE_ADD
;
22151 case T2_OPCODE_ADD
:
22152 new_inst
= T2_OPCODE_SUB
;
22156 /* ORR <-> ORN. Includes MOV <-> MVN. */
22157 case T2_OPCODE_ORR
:
22158 new_inst
= T2_OPCODE_ORN
;
22162 case T2_OPCODE_ORN
:
22163 new_inst
= T2_OPCODE_ORR
;
22167 /* AND <-> BIC. TST has no inverted equivalent. */
22168 case T2_OPCODE_AND
:
22169 new_inst
= T2_OPCODE_BIC
;
22176 case T2_OPCODE_BIC
:
22177 new_inst
= T2_OPCODE_AND
;
22182 case T2_OPCODE_ADC
:
22183 new_inst
= T2_OPCODE_SBC
;
22187 case T2_OPCODE_SBC
:
22188 new_inst
= T2_OPCODE_ADC
;
22192 /* We cannot do anything. */
22197 if (value
== (unsigned int)FAIL
)
22200 *instruction
&= T2_OPCODE_MASK
;
22201 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
22205 /* Read a 32-bit thumb instruction from buf. */
22206 static unsigned long
22207 get_thumb32_insn (char * buf
)
22209 unsigned long insn
;
22210 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
22211 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22217 /* We usually want to set the low bit on the address of thumb function
22218 symbols. In particular .word foo - . should have the low bit set.
22219 Generic code tries to fold the difference of two symbols to
22220 a constant. Prevent this and force a relocation when the first symbols
22221 is a thumb function. */
22224 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
22226 if (op
== O_subtract
22227 && l
->X_op
== O_symbol
22228 && r
->X_op
== O_symbol
22229 && THUMB_IS_FUNC (l
->X_add_symbol
))
22231 l
->X_op
= O_subtract
;
22232 l
->X_op_symbol
= r
->X_add_symbol
;
22233 l
->X_add_number
-= r
->X_add_number
;
22237 /* Process as normal. */
22241 /* Encode Thumb2 unconditional branches and calls. The encoding
22242 for the 2 are identical for the immediate values. */
22245 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
22247 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22250 addressT S
, I1
, I2
, lo
, hi
;
22252 S
= (value
>> 24) & 0x01;
22253 I1
= (value
>> 23) & 0x01;
22254 I2
= (value
>> 22) & 0x01;
22255 hi
= (value
>> 12) & 0x3ff;
22256 lo
= (value
>> 1) & 0x7ff;
22257 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22258 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22259 newval
|= (S
<< 10) | hi
;
22260 newval2
&= ~T2I1I2MASK
;
22261 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
22262 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22263 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
22267 md_apply_fix (fixS
* fixP
,
22271 offsetT value
= * valP
;
22273 unsigned int newimm
;
22274 unsigned long temp
;
22276 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
22278 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
22280 /* Note whether this will delete the relocation. */
22282 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
22285 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22286 consistency with the behaviour on 32-bit hosts. Remember value
22288 value
&= 0xffffffff;
22289 value
^= 0x80000000;
22290 value
-= 0x80000000;
22293 fixP
->fx_addnumber
= value
;
22295 /* Same treatment for fixP->fx_offset. */
22296 fixP
->fx_offset
&= 0xffffffff;
22297 fixP
->fx_offset
^= 0x80000000;
22298 fixP
->fx_offset
-= 0x80000000;
22300 switch (fixP
->fx_r_type
)
22302 case BFD_RELOC_NONE
:
22303 /* This will need to go in the object file. */
22307 case BFD_RELOC_ARM_IMMEDIATE
:
22308 /* We claim that this fixup has been processed here,
22309 even if in fact we generate an error because we do
22310 not have a reloc for it, so tc_gen_reloc will reject it. */
22313 if (fixP
->fx_addsy
)
22315 const char *msg
= 0;
22317 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22318 msg
= _("undefined symbol %s used as an immediate value");
22319 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22320 msg
= _("symbol %s is in a different section");
22321 else if (S_IS_WEAK (fixP
->fx_addsy
))
22322 msg
= _("symbol %s is weak and may be overridden later");
22326 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22327 msg
, S_GET_NAME (fixP
->fx_addsy
));
22332 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22334 /* If the offset is negative, we should use encoding A2 for ADR. */
22335 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
22336 newimm
= negate_data_op (&temp
, value
);
22339 newimm
= encode_arm_immediate (value
);
22341 /* If the instruction will fail, see if we can fix things up by
22342 changing the opcode. */
22343 if (newimm
== (unsigned int) FAIL
)
22344 newimm
= negate_data_op (&temp
, value
);
22347 if (newimm
== (unsigned int) FAIL
)
22349 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22350 _("invalid constant (%lx) after fixup"),
22351 (unsigned long) value
);
22355 newimm
|= (temp
& 0xfffff000);
22356 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22359 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
22361 unsigned int highpart
= 0;
22362 unsigned int newinsn
= 0xe1a00000; /* nop. */
22364 if (fixP
->fx_addsy
)
22366 const char *msg
= 0;
22368 if (! S_IS_DEFINED (fixP
->fx_addsy
))
22369 msg
= _("undefined symbol %s used as an immediate value");
22370 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22371 msg
= _("symbol %s is in a different section");
22372 else if (S_IS_WEAK (fixP
->fx_addsy
))
22373 msg
= _("symbol %s is weak and may be overridden later");
22377 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22378 msg
, S_GET_NAME (fixP
->fx_addsy
));
22383 newimm
= encode_arm_immediate (value
);
22384 temp
= md_chars_to_number (buf
, INSN_SIZE
);
22386 /* If the instruction will fail, see if we can fix things up by
22387 changing the opcode. */
22388 if (newimm
== (unsigned int) FAIL
22389 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
22391 /* No ? OK - try using two ADD instructions to generate
22393 newimm
= validate_immediate_twopart (value
, & highpart
);
22395 /* Yes - then make sure that the second instruction is
22397 if (newimm
!= (unsigned int) FAIL
)
22399 /* Still No ? Try using a negated value. */
22400 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
22401 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
22402 /* Otherwise - give up. */
22405 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22406 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22411 /* Replace the first operand in the 2nd instruction (which
22412 is the PC) with the destination register. We have
22413 already added in the PC in the first instruction and we
22414 do not want to do it again. */
22415 newinsn
&= ~ 0xf0000;
22416 newinsn
|= ((newinsn
& 0x0f000) << 4);
22419 newimm
|= (temp
& 0xfffff000);
22420 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
22422 highpart
|= (newinsn
& 0xfffff000);
22423 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
22427 case BFD_RELOC_ARM_OFFSET_IMM
:
22428 if (!fixP
->fx_done
&& seg
->use_rela_p
)
22431 case BFD_RELOC_ARM_LITERAL
:
22437 if (validate_offset_imm (value
, 0) == FAIL
)
22439 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
22440 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22441 _("invalid literal constant: pool needs to be closer"));
22443 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22444 _("bad immediate value for offset (%ld)"),
22449 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22451 newval
&= 0xfffff000;
22454 newval
&= 0xff7ff000;
22455 newval
|= value
| (sign
? INDEX_UP
: 0);
22457 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22460 case BFD_RELOC_ARM_OFFSET_IMM8
:
22461 case BFD_RELOC_ARM_HWLITERAL
:
22467 if (validate_offset_imm (value
, 1) == FAIL
)
22469 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
22470 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22471 _("invalid literal constant: pool needs to be closer"));
22473 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22474 _("bad immediate value for 8-bit offset (%ld)"),
22479 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22481 newval
&= 0xfffff0f0;
22484 newval
&= 0xff7ff0f0;
22485 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
22487 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22490 case BFD_RELOC_ARM_T32_OFFSET_U8
:
22491 if (value
< 0 || value
> 1020 || value
% 4 != 0)
22492 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22493 _("bad immediate value for offset (%ld)"), (long) value
);
22496 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
22498 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
22501 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
22502 /* This is a complicated relocation used for all varieties of Thumb32
22503 load/store instruction with immediate offset:
22505 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22506 *4, optional writeback(W)
22507 (doubleword load/store)
22509 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22510 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22511 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22512 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22513 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22515 Uppercase letters indicate bits that are already encoded at
22516 this point. Lowercase letters are our problem. For the
22517 second block of instructions, the secondary opcode nybble
22518 (bits 8..11) is present, and bit 23 is zero, even if this is
22519 a PC-relative operation. */
22520 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22522 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
22524 if ((newval
& 0xf0000000) == 0xe0000000)
22526 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22528 newval
|= (1 << 23);
22531 if (value
% 4 != 0)
22533 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22534 _("offset not a multiple of 4"));
22540 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22541 _("offset out of range"));
22546 else if ((newval
& 0x000f0000) == 0x000f0000)
22548 /* PC-relative, 12-bit offset. */
22550 newval
|= (1 << 23);
22555 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22556 _("offset out of range"));
22561 else if ((newval
& 0x00000100) == 0x00000100)
22563 /* Writeback: 8-bit, +/- offset. */
22565 newval
|= (1 << 9);
22570 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22571 _("offset out of range"));
22576 else if ((newval
& 0x00000f00) == 0x00000e00)
22578 /* T-instruction: positive 8-bit offset. */
22579 if (value
< 0 || value
> 0xff)
22581 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22582 _("offset out of range"));
22590 /* Positive 12-bit or negative 8-bit offset. */
22594 newval
|= (1 << 23);
22604 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22605 _("offset out of range"));
22612 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
22613 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
22616 case BFD_RELOC_ARM_SHIFT_IMM
:
22617 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22618 if (((unsigned long) value
) > 32
22620 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
22622 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22623 _("shift expression is too large"));
22628 /* Shifts of zero must be done as lsl. */
22630 else if (value
== 32)
22632 newval
&= 0xfffff07f;
22633 newval
|= (value
& 0x1f) << 7;
22634 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22637 case BFD_RELOC_ARM_T32_IMMEDIATE
:
22638 case BFD_RELOC_ARM_T32_ADD_IMM
:
22639 case BFD_RELOC_ARM_T32_IMM12
:
22640 case BFD_RELOC_ARM_T32_ADD_PC12
:
22641 /* We claim that this fixup has been processed here,
22642 even if in fact we generate an error because we do
22643 not have a reloc for it, so tc_gen_reloc will reject it. */
22647 && ! S_IS_DEFINED (fixP
->fx_addsy
))
22649 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22650 _("undefined symbol %s used as an immediate value"),
22651 S_GET_NAME (fixP
->fx_addsy
));
22655 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22657 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
22660 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
22661 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
22663 newimm
= encode_thumb32_immediate (value
);
22664 if (newimm
== (unsigned int) FAIL
)
22665 newimm
= thumb32_negate_data_op (&newval
, value
);
22667 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
22668 && newimm
== (unsigned int) FAIL
)
22670 /* Turn add/sum into addw/subw. */
22671 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
22672 newval
= (newval
& 0xfeffffff) | 0x02000000;
22673 /* No flat 12-bit imm encoding for addsw/subsw. */
22674 if ((newval
& 0x00100000) == 0)
22676 /* 12 bit immediate for addw/subw. */
22680 newval
^= 0x00a00000;
22683 newimm
= (unsigned int) FAIL
;
22689 if (newimm
== (unsigned int)FAIL
)
22691 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22692 _("invalid constant (%lx) after fixup"),
22693 (unsigned long) value
);
22697 newval
|= (newimm
& 0x800) << 15;
22698 newval
|= (newimm
& 0x700) << 4;
22699 newval
|= (newimm
& 0x0ff);
22701 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
22702 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
22705 case BFD_RELOC_ARM_SMC
:
22706 if (((unsigned long) value
) > 0xffff)
22707 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22708 _("invalid smc expression"));
22709 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22710 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
22711 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22714 case BFD_RELOC_ARM_HVC
:
22715 if (((unsigned long) value
) > 0xffff)
22716 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22717 _("invalid hvc expression"));
22718 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22719 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
22720 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22723 case BFD_RELOC_ARM_SWI
:
22724 if (fixP
->tc_fix_data
!= 0)
22726 if (((unsigned long) value
) > 0xff)
22727 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22728 _("invalid swi expression"));
22729 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22731 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22735 if (((unsigned long) value
) > 0x00ffffff)
22736 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22737 _("invalid swi expression"));
22738 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22740 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22744 case BFD_RELOC_ARM_MULTI
:
22745 if (((unsigned long) value
) > 0xffff)
22746 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22747 _("invalid expression in load/store multiple"));
22748 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
22749 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22753 case BFD_RELOC_ARM_PCREL_CALL
:
22755 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22757 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22758 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22759 && THUMB_IS_FUNC (fixP
->fx_addsy
))
22760 /* Flip the bl to blx. This is a simple flip
22761 bit here because we generate PCREL_CALL for
22762 unconditional bls. */
22764 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22765 newval
= newval
| 0x10000000;
22766 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22772 goto arm_branch_common
;
22774 case BFD_RELOC_ARM_PCREL_JUMP
:
22775 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22777 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22778 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22779 && THUMB_IS_FUNC (fixP
->fx_addsy
))
22781 /* This would map to a bl<cond>, b<cond>,
22782 b<always> to a Thumb function. We
22783 need to force a relocation for this particular
22785 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22789 case BFD_RELOC_ARM_PLT32
:
22791 case BFD_RELOC_ARM_PCREL_BRANCH
:
22793 goto arm_branch_common
;
22795 case BFD_RELOC_ARM_PCREL_BLX
:
22798 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22800 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22801 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22802 && ARM_IS_FUNC (fixP
->fx_addsy
))
22804 /* Flip the blx to a bl and warn. */
22805 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
22806 newval
= 0xeb000000;
22807 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
22808 _("blx to '%s' an ARM ISA state function changed to bl"),
22810 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22816 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
22817 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
22821 /* We are going to store value (shifted right by two) in the
22822 instruction, in a 24 bit, signed field. Bits 26 through 32 either
22823 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
22824 also be be clear. */
22826 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22827 _("misaligned branch destination"));
22828 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
22829 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
22830 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22832 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22834 newval
= md_chars_to_number (buf
, INSN_SIZE
);
22835 newval
|= (value
>> 2) & 0x00ffffff;
22836 /* Set the H bit on BLX instructions. */
22840 newval
|= 0x01000000;
22842 newval
&= ~0x01000000;
22844 md_number_to_chars (buf
, newval
, INSN_SIZE
);
22848 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
22849 /* CBZ can only branch forward. */
22851 /* Attempts to use CBZ to branch to the next instruction
22852 (which, strictly speaking, are prohibited) will be turned into
22855 FIXME: It may be better to remove the instruction completely and
22856 perform relaxation. */
22859 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22860 newval
= 0xbf00; /* NOP encoding T1 */
22861 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22866 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22868 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22870 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22871 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
22872 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22877 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
22878 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
22879 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22881 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22883 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22884 newval
|= (value
& 0x1ff) >> 1;
22885 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22889 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
22890 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
22891 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22893 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22895 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22896 newval
|= (value
& 0xfff) >> 1;
22897 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22901 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
22903 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22904 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22905 && ARM_IS_FUNC (fixP
->fx_addsy
)
22906 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22908 /* Force a relocation for a branch 20 bits wide. */
22911 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
22912 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22913 _("conditional branch out of range"));
22915 if (fixP
->fx_done
|| !seg
->use_rela_p
)
22918 addressT S
, J1
, J2
, lo
, hi
;
22920 S
= (value
& 0x00100000) >> 20;
22921 J2
= (value
& 0x00080000) >> 19;
22922 J1
= (value
& 0x00040000) >> 18;
22923 hi
= (value
& 0x0003f000) >> 12;
22924 lo
= (value
& 0x00000ffe) >> 1;
22926 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22927 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22928 newval
|= (S
<< 10) | hi
;
22929 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
22930 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22931 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
22935 case BFD_RELOC_THUMB_PCREL_BLX
:
22936 /* If there is a blx from a thumb state function to
22937 another thumb function flip this to a bl and warn
22941 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22942 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22943 && THUMB_IS_FUNC (fixP
->fx_addsy
))
22945 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
22946 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
22947 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
22949 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22950 newval
= newval
| 0x1000;
22951 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
22952 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
22957 goto thumb_bl_common
;
22959 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
22960 /* A bl from Thumb state ISA to an internal ARM state function
22961 is converted to a blx. */
22963 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22964 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22965 && ARM_IS_FUNC (fixP
->fx_addsy
)
22966 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22968 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22969 newval
= newval
& ~0x1000;
22970 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
22971 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
22977 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
22978 /* For a BLX instruction, make sure that the relocation is rounded up
22979 to a word boundary. This follows the semantics of the instruction
22980 which specifies that bit 1 of the target address will come from bit
22981 1 of the base address. */
22982 value
= (value
+ 3) & ~ 3;
22985 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
22986 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
22987 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
22990 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
22992 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
22993 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
22994 else if ((value
& ~0x1ffffff)
22995 && ((value
& ~0x1ffffff) != ~0x1ffffff))
22996 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
22997 _("Thumb2 branch out of range"));
23000 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23001 encode_thumb2_b_bl_offset (buf
, value
);
23005 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23006 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
23007 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23009 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23010 encode_thumb2_b_bl_offset (buf
, value
);
23015 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23020 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23021 md_number_to_chars (buf
, value
, 2);
23025 case BFD_RELOC_ARM_TLS_CALL
:
23026 case BFD_RELOC_ARM_THM_TLS_CALL
:
23027 case BFD_RELOC_ARM_TLS_DESCSEQ
:
23028 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
23029 case BFD_RELOC_ARM_TLS_GOTDESC
:
23030 case BFD_RELOC_ARM_TLS_GD32
:
23031 case BFD_RELOC_ARM_TLS_LE32
:
23032 case BFD_RELOC_ARM_TLS_IE32
:
23033 case BFD_RELOC_ARM_TLS_LDM32
:
23034 case BFD_RELOC_ARM_TLS_LDO32
:
23035 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
23038 case BFD_RELOC_ARM_GOT32
:
23039 case BFD_RELOC_ARM_GOTOFF
:
23042 case BFD_RELOC_ARM_GOT_PREL
:
23043 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23044 md_number_to_chars (buf
, value
, 4);
23047 case BFD_RELOC_ARM_TARGET2
:
23048 /* TARGET2 is not partial-inplace, so we need to write the
23049 addend here for REL targets, because it won't be written out
23050 during reloc processing later. */
23051 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23052 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
23056 case BFD_RELOC_RVA
:
23058 case BFD_RELOC_ARM_TARGET1
:
23059 case BFD_RELOC_ARM_ROSEGREL32
:
23060 case BFD_RELOC_ARM_SBREL32
:
23061 case BFD_RELOC_32_PCREL
:
23063 case BFD_RELOC_32_SECREL
:
23065 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23067 /* For WinCE we only do this for pcrel fixups. */
23068 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
23070 md_number_to_chars (buf
, value
, 4);
23074 case BFD_RELOC_ARM_PREL31
:
23075 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23077 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
23078 if ((value
^ (value
>> 1)) & 0x40000000)
23080 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23081 _("rel31 relocation overflow"));
23083 newval
|= value
& 0x7fffffff;
23084 md_number_to_chars (buf
, newval
, 4);
23089 case BFD_RELOC_ARM_CP_OFF_IMM
:
23090 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
23091 if (value
< -1023 || value
> 1023 || (value
& 3))
23092 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23093 _("co-processor offset out of range"));
23098 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23099 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23100 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23102 newval
= get_thumb32_insn (buf
);
23104 newval
&= 0xffffff00;
23107 newval
&= 0xff7fff00;
23108 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
23110 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23111 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23112 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23114 put_thumb32_insn (buf
, newval
);
23117 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
23118 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
23119 if (value
< -255 || value
> 255)
23120 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23121 _("co-processor offset out of range"));
23123 goto cp_off_common
;
23125 case BFD_RELOC_ARM_THUMB_OFFSET
:
23126 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23127 /* Exactly what ranges, and where the offset is inserted depends
23128 on the type of instruction, we can establish this from the
23130 switch (newval
>> 12)
23132 case 4: /* PC load. */
23133 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23134 forced to zero for these loads; md_pcrel_from has already
23135 compensated for this. */
23137 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23138 _("invalid offset, target not word aligned (0x%08lX)"),
23139 (((unsigned long) fixP
->fx_frag
->fr_address
23140 + (unsigned long) fixP
->fx_where
) & ~3)
23141 + (unsigned long) value
);
23143 if (value
& ~0x3fc)
23144 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23145 _("invalid offset, value too big (0x%08lX)"),
23148 newval
|= value
>> 2;
23151 case 9: /* SP load/store. */
23152 if (value
& ~0x3fc)
23153 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23154 _("invalid offset, value too big (0x%08lX)"),
23156 newval
|= value
>> 2;
23159 case 6: /* Word load/store. */
23161 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23162 _("invalid offset, value too big (0x%08lX)"),
23164 newval
|= value
<< 4; /* 6 - 2. */
23167 case 7: /* Byte load/store. */
23169 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23170 _("invalid offset, value too big (0x%08lX)"),
23172 newval
|= value
<< 6;
23175 case 8: /* Halfword load/store. */
23177 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23178 _("invalid offset, value too big (0x%08lX)"),
23180 newval
|= value
<< 5; /* 6 - 1. */
23184 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23185 "Unable to process relocation for thumb opcode: %lx",
23186 (unsigned long) newval
);
23189 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23192 case BFD_RELOC_ARM_THUMB_ADD
:
23193 /* This is a complicated relocation, since we use it for all of
23194 the following immediate relocations:
23198 9bit ADD/SUB SP word-aligned
23199 10bit ADD PC/SP word-aligned
23201 The type of instruction being processed is encoded in the
23208 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23210 int rd
= (newval
>> 4) & 0xf;
23211 int rs
= newval
& 0xf;
23212 int subtract
= !!(newval
& 0x8000);
23214 /* Check for HI regs, only very restricted cases allowed:
23215 Adjusting SP, and using PC or SP to get an address. */
23216 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
23217 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
23218 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23219 _("invalid Hi register with immediate"));
23221 /* If value is negative, choose the opposite instruction. */
23225 subtract
= !subtract
;
23227 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23228 _("immediate value out of range"));
23233 if (value
& ~0x1fc)
23234 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23235 _("invalid immediate for stack address calculation"));
23236 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
23237 newval
|= value
>> 2;
23239 else if (rs
== REG_PC
|| rs
== REG_SP
)
23241 /* PR gas/18541. If the addition is for a defined symbol
23242 within range of an ADR instruction then accept it. */
23245 && fixP
->fx_addsy
!= NULL
)
23249 if (! S_IS_DEFINED (fixP
->fx_addsy
)
23250 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
23251 || S_IS_WEAK (fixP
->fx_addsy
))
23253 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23254 _("address calculation needs a strongly defined nearby symbol"));
23258 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23260 /* Round up to the next 4-byte boundary. */
23265 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
23269 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23270 _("symbol too far away"));
23280 if (subtract
|| value
& ~0x3fc)
23281 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23282 _("invalid immediate for address calculation (value = 0x%08lX)"),
23283 (unsigned long) (subtract
? - value
: value
));
23284 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
23286 newval
|= value
>> 2;
23291 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23292 _("immediate value out of range"));
23293 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
23294 newval
|= (rd
<< 8) | value
;
23299 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23300 _("immediate value out of range"));
23301 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
23302 newval
|= rd
| (rs
<< 3) | (value
<< 6);
23305 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23308 case BFD_RELOC_ARM_THUMB_IMM
:
23309 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23310 if (value
< 0 || value
> 255)
23311 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23312 _("invalid immediate: %ld is out of range"),
23315 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23318 case BFD_RELOC_ARM_THUMB_SHIFT
:
23319 /* 5bit shift value (0..32). LSL cannot take 32. */
23320 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
23321 temp
= newval
& 0xf800;
23322 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
23323 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23324 _("invalid shift value: %ld"), (long) value
);
23325 /* Shifts of zero must be encoded as LSL. */
23327 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
23328 /* Shifts of 32 are encoded as zero. */
23329 else if (value
== 32)
23331 newval
|= value
<< 6;
23332 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23335 case BFD_RELOC_VTABLE_INHERIT
:
23336 case BFD_RELOC_VTABLE_ENTRY
:
23340 case BFD_RELOC_ARM_MOVW
:
23341 case BFD_RELOC_ARM_MOVT
:
23342 case BFD_RELOC_ARM_THUMB_MOVW
:
23343 case BFD_RELOC_ARM_THUMB_MOVT
:
23344 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23346 /* REL format relocations are limited to a 16-bit addend. */
23347 if (!fixP
->fx_done
)
23349 if (value
< -0x8000 || value
> 0x7fff)
23350 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23351 _("offset out of range"));
23353 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
23354 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23359 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
23360 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
23362 newval
= get_thumb32_insn (buf
);
23363 newval
&= 0xfbf08f00;
23364 newval
|= (value
& 0xf000) << 4;
23365 newval
|= (value
& 0x0800) << 15;
23366 newval
|= (value
& 0x0700) << 4;
23367 newval
|= (value
& 0x00ff);
23368 put_thumb32_insn (buf
, newval
);
23372 newval
= md_chars_to_number (buf
, 4);
23373 newval
&= 0xfff0f000;
23374 newval
|= value
& 0x0fff;
23375 newval
|= (value
& 0xf000) << 4;
23376 md_number_to_chars (buf
, newval
, 4);
23381 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
23382 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
23383 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
23384 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
23385 gas_assert (!fixP
->fx_done
);
23388 bfd_boolean is_mov
;
23389 bfd_vma encoded_addend
= value
;
23391 /* Check that addend can be encoded in instruction. */
23392 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
23393 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23394 _("the offset 0x%08lX is not representable"),
23395 (unsigned long) encoded_addend
);
23397 /* Extract the instruction. */
23398 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
23399 is_mov
= (insn
& 0xf800) == 0x2000;
23404 if (!seg
->use_rela_p
)
23405 insn
|= encoded_addend
;
23411 /* Extract the instruction. */
23412 /* Encoding is the following
23417 /* The following conditions must be true :
23422 rd
= (insn
>> 4) & 0xf;
23424 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
23425 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23426 _("Unable to process relocation for thumb opcode: %lx"),
23427 (unsigned long) insn
);
23429 /* Encode as ADD immediate8 thumb 1 code. */
23430 insn
= 0x3000 | (rd
<< 8);
23432 /* Place the encoded addend into the first 8 bits of the
23434 if (!seg
->use_rela_p
)
23435 insn
|= encoded_addend
;
23438 /* Update the instruction. */
23439 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
23443 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
23444 case BFD_RELOC_ARM_ALU_PC_G0
:
23445 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
23446 case BFD_RELOC_ARM_ALU_PC_G1
:
23447 case BFD_RELOC_ARM_ALU_PC_G2
:
23448 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
23449 case BFD_RELOC_ARM_ALU_SB_G0
:
23450 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
23451 case BFD_RELOC_ARM_ALU_SB_G1
:
23452 case BFD_RELOC_ARM_ALU_SB_G2
:
23453 gas_assert (!fixP
->fx_done
);
23454 if (!seg
->use_rela_p
)
23457 bfd_vma encoded_addend
;
23458 bfd_vma addend_abs
= abs (value
);
23460 /* Check that the absolute value of the addend can be
23461 expressed as an 8-bit constant plus a rotation. */
23462 encoded_addend
= encode_arm_immediate (addend_abs
);
23463 if (encoded_addend
== (unsigned int) FAIL
)
23464 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23465 _("the offset 0x%08lX is not representable"),
23466 (unsigned long) addend_abs
);
23468 /* Extract the instruction. */
23469 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23471 /* If the addend is positive, use an ADD instruction.
23472 Otherwise use a SUB. Take care not to destroy the S bit. */
23473 insn
&= 0xff1fffff;
23479 /* Place the encoded addend into the first 12 bits of the
23481 insn
&= 0xfffff000;
23482 insn
|= encoded_addend
;
23484 /* Update the instruction. */
23485 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23489 case BFD_RELOC_ARM_LDR_PC_G0
:
23490 case BFD_RELOC_ARM_LDR_PC_G1
:
23491 case BFD_RELOC_ARM_LDR_PC_G2
:
23492 case BFD_RELOC_ARM_LDR_SB_G0
:
23493 case BFD_RELOC_ARM_LDR_SB_G1
:
23494 case BFD_RELOC_ARM_LDR_SB_G2
:
23495 gas_assert (!fixP
->fx_done
);
23496 if (!seg
->use_rela_p
)
23499 bfd_vma addend_abs
= abs (value
);
23501 /* Check that the absolute value of the addend can be
23502 encoded in 12 bits. */
23503 if (addend_abs
>= 0x1000)
23504 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23505 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23506 (unsigned long) addend_abs
);
23508 /* Extract the instruction. */
23509 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23511 /* If the addend is negative, clear bit 23 of the instruction.
23512 Otherwise set it. */
23514 insn
&= ~(1 << 23);
23518 /* Place the absolute value of the addend into the first 12 bits
23519 of the instruction. */
23520 insn
&= 0xfffff000;
23521 insn
|= addend_abs
;
23523 /* Update the instruction. */
23524 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23528 case BFD_RELOC_ARM_LDRS_PC_G0
:
23529 case BFD_RELOC_ARM_LDRS_PC_G1
:
23530 case BFD_RELOC_ARM_LDRS_PC_G2
:
23531 case BFD_RELOC_ARM_LDRS_SB_G0
:
23532 case BFD_RELOC_ARM_LDRS_SB_G1
:
23533 case BFD_RELOC_ARM_LDRS_SB_G2
:
23534 gas_assert (!fixP
->fx_done
);
23535 if (!seg
->use_rela_p
)
23538 bfd_vma addend_abs
= abs (value
);
23540 /* Check that the absolute value of the addend can be
23541 encoded in 8 bits. */
23542 if (addend_abs
>= 0x100)
23543 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23544 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23545 (unsigned long) addend_abs
);
23547 /* Extract the instruction. */
23548 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23550 /* If the addend is negative, clear bit 23 of the instruction.
23551 Otherwise set it. */
23553 insn
&= ~(1 << 23);
23557 /* Place the first four bits of the absolute value of the addend
23558 into the first 4 bits of the instruction, and the remaining
23559 four into bits 8 .. 11. */
23560 insn
&= 0xfffff0f0;
23561 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
23563 /* Update the instruction. */
23564 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23568 case BFD_RELOC_ARM_LDC_PC_G0
:
23569 case BFD_RELOC_ARM_LDC_PC_G1
:
23570 case BFD_RELOC_ARM_LDC_PC_G2
:
23571 case BFD_RELOC_ARM_LDC_SB_G0
:
23572 case BFD_RELOC_ARM_LDC_SB_G1
:
23573 case BFD_RELOC_ARM_LDC_SB_G2
:
23574 gas_assert (!fixP
->fx_done
);
23575 if (!seg
->use_rela_p
)
23578 bfd_vma addend_abs
= abs (value
);
23580 /* Check that the absolute value of the addend is a multiple of
23581 four and, when divided by four, fits in 8 bits. */
23582 if (addend_abs
& 0x3)
23583 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23584 _("bad offset 0x%08lX (must be word-aligned)"),
23585 (unsigned long) addend_abs
);
23587 if ((addend_abs
>> 2) > 0xff)
23588 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23589 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23590 (unsigned long) addend_abs
);
23592 /* Extract the instruction. */
23593 insn
= md_chars_to_number (buf
, INSN_SIZE
);
23595 /* If the addend is negative, clear bit 23 of the instruction.
23596 Otherwise set it. */
23598 insn
&= ~(1 << 23);
23602 /* Place the addend (divided by four) into the first eight
23603 bits of the instruction. */
23604 insn
&= 0xfffffff0;
23605 insn
|= addend_abs
>> 2;
23607 /* Update the instruction. */
23608 md_number_to_chars (buf
, insn
, INSN_SIZE
);
23612 case BFD_RELOC_ARM_V4BX
:
23613 /* This will need to go in the object file. */
23617 case BFD_RELOC_UNUSED
:
23619 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23620 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
23624 /* Translate internal representation of relocation info to BFD target
23628 tc_gen_reloc (asection
*section
, fixS
*fixp
)
23631 bfd_reloc_code_real_type code
;
23633 reloc
= (arelent
*) xmalloc (sizeof (arelent
));
23635 reloc
->sym_ptr_ptr
= (asymbol
**) xmalloc (sizeof (asymbol
*));
23636 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
23637 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
23639 if (fixp
->fx_pcrel
)
23641 if (section
->use_rela_p
)
23642 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
23644 fixp
->fx_offset
= reloc
->address
;
23646 reloc
->addend
= fixp
->fx_offset
;
23648 switch (fixp
->fx_r_type
)
23651 if (fixp
->fx_pcrel
)
23653 code
= BFD_RELOC_8_PCREL
;
23658 if (fixp
->fx_pcrel
)
23660 code
= BFD_RELOC_16_PCREL
;
23665 if (fixp
->fx_pcrel
)
23667 code
= BFD_RELOC_32_PCREL
;
23671 case BFD_RELOC_ARM_MOVW
:
23672 if (fixp
->fx_pcrel
)
23674 code
= BFD_RELOC_ARM_MOVW_PCREL
;
23678 case BFD_RELOC_ARM_MOVT
:
23679 if (fixp
->fx_pcrel
)
23681 code
= BFD_RELOC_ARM_MOVT_PCREL
;
23685 case BFD_RELOC_ARM_THUMB_MOVW
:
23686 if (fixp
->fx_pcrel
)
23688 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
23692 case BFD_RELOC_ARM_THUMB_MOVT
:
23693 if (fixp
->fx_pcrel
)
23695 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
23699 case BFD_RELOC_NONE
:
23700 case BFD_RELOC_ARM_PCREL_BRANCH
:
23701 case BFD_RELOC_ARM_PCREL_BLX
:
23702 case BFD_RELOC_RVA
:
23703 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
23704 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
23705 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
23706 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23707 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23708 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23709 case BFD_RELOC_VTABLE_ENTRY
:
23710 case BFD_RELOC_VTABLE_INHERIT
:
23712 case BFD_RELOC_32_SECREL
:
23714 code
= fixp
->fx_r_type
;
23717 case BFD_RELOC_THUMB_PCREL_BLX
:
23719 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
23720 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23723 code
= BFD_RELOC_THUMB_PCREL_BLX
;
23726 case BFD_RELOC_ARM_LITERAL
:
23727 case BFD_RELOC_ARM_HWLITERAL
:
23728 /* If this is called then the a literal has
23729 been referenced across a section boundary. */
23730 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23731 _("literal referenced across section boundary"));
23735 case BFD_RELOC_ARM_TLS_CALL
:
23736 case BFD_RELOC_ARM_THM_TLS_CALL
:
23737 case BFD_RELOC_ARM_TLS_DESCSEQ
:
23738 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
23739 case BFD_RELOC_ARM_GOT32
:
23740 case BFD_RELOC_ARM_GOTOFF
:
23741 case BFD_RELOC_ARM_GOT_PREL
:
23742 case BFD_RELOC_ARM_PLT32
:
23743 case BFD_RELOC_ARM_TARGET1
:
23744 case BFD_RELOC_ARM_ROSEGREL32
:
23745 case BFD_RELOC_ARM_SBREL32
:
23746 case BFD_RELOC_ARM_PREL31
:
23747 case BFD_RELOC_ARM_TARGET2
:
23748 case BFD_RELOC_ARM_TLS_LDO32
:
23749 case BFD_RELOC_ARM_PCREL_CALL
:
23750 case BFD_RELOC_ARM_PCREL_JUMP
:
23751 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
23752 case BFD_RELOC_ARM_ALU_PC_G0
:
23753 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
23754 case BFD_RELOC_ARM_ALU_PC_G1
:
23755 case BFD_RELOC_ARM_ALU_PC_G2
:
23756 case BFD_RELOC_ARM_LDR_PC_G0
:
23757 case BFD_RELOC_ARM_LDR_PC_G1
:
23758 case BFD_RELOC_ARM_LDR_PC_G2
:
23759 case BFD_RELOC_ARM_LDRS_PC_G0
:
23760 case BFD_RELOC_ARM_LDRS_PC_G1
:
23761 case BFD_RELOC_ARM_LDRS_PC_G2
:
23762 case BFD_RELOC_ARM_LDC_PC_G0
:
23763 case BFD_RELOC_ARM_LDC_PC_G1
:
23764 case BFD_RELOC_ARM_LDC_PC_G2
:
23765 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
23766 case BFD_RELOC_ARM_ALU_SB_G0
:
23767 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
23768 case BFD_RELOC_ARM_ALU_SB_G1
:
23769 case BFD_RELOC_ARM_ALU_SB_G2
:
23770 case BFD_RELOC_ARM_LDR_SB_G0
:
23771 case BFD_RELOC_ARM_LDR_SB_G1
:
23772 case BFD_RELOC_ARM_LDR_SB_G2
:
23773 case BFD_RELOC_ARM_LDRS_SB_G0
:
23774 case BFD_RELOC_ARM_LDRS_SB_G1
:
23775 case BFD_RELOC_ARM_LDRS_SB_G2
:
23776 case BFD_RELOC_ARM_LDC_SB_G0
:
23777 case BFD_RELOC_ARM_LDC_SB_G1
:
23778 case BFD_RELOC_ARM_LDC_SB_G2
:
23779 case BFD_RELOC_ARM_V4BX
:
23780 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
23781 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
23782 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
23783 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
23784 code
= fixp
->fx_r_type
;
23787 case BFD_RELOC_ARM_TLS_GOTDESC
:
23788 case BFD_RELOC_ARM_TLS_GD32
:
23789 case BFD_RELOC_ARM_TLS_LE32
:
23790 case BFD_RELOC_ARM_TLS_IE32
:
23791 case BFD_RELOC_ARM_TLS_LDM32
:
23792 /* BFD will include the symbol's address in the addend.
23793 But we don't want that, so subtract it out again here. */
23794 if (!S_IS_COMMON (fixp
->fx_addsy
))
23795 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
23796 code
= fixp
->fx_r_type
;
23800 case BFD_RELOC_ARM_IMMEDIATE
:
23801 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23802 _("internal relocation (type: IMMEDIATE) not fixed up"));
23805 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
23806 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23807 _("ADRL used for a symbol not defined in the same file"));
23810 case BFD_RELOC_ARM_OFFSET_IMM
:
23811 if (section
->use_rela_p
)
23813 code
= fixp
->fx_r_type
;
23817 if (fixp
->fx_addsy
!= NULL
23818 && !S_IS_DEFINED (fixp
->fx_addsy
)
23819 && S_IS_LOCAL (fixp
->fx_addsy
))
23821 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23822 _("undefined local label `%s'"),
23823 S_GET_NAME (fixp
->fx_addsy
));
23827 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23828 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
23835 switch (fixp
->fx_r_type
)
23837 case BFD_RELOC_NONE
: type
= "NONE"; break;
23838 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
23839 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
23840 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
23841 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
23842 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
23843 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
23844 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
23845 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
23846 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
23847 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
23848 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
23849 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
23850 default: type
= _("<unknown>"); break;
23852 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23853 _("cannot represent %s relocation in this object file format"),
23860 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
23862 && fixp
->fx_addsy
== GOT_symbol
)
23864 code
= BFD_RELOC_ARM_GOTPC
;
23865 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
23869 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
23871 if (reloc
->howto
== NULL
)
23873 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
23874 _("cannot represent %s relocation in this object file format"),
23875 bfd_get_reloc_code_name (code
));
23879 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
23880 vtable entry to be used in the relocation's section offset. */
23881 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
23882 reloc
->address
= fixp
->fx_offset
;
23887 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
23890 cons_fix_new_arm (fragS
* frag
,
23894 bfd_reloc_code_real_type reloc
)
23899 FIXME: @@ Should look at CPU word size. */
23903 reloc
= BFD_RELOC_8
;
23906 reloc
= BFD_RELOC_16
;
23910 reloc
= BFD_RELOC_32
;
23913 reloc
= BFD_RELOC_64
;
23918 if (exp
->X_op
== O_secrel
)
23920 exp
->X_op
= O_symbol
;
23921 reloc
= BFD_RELOC_32_SECREL
;
23925 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
23928 #if defined (OBJ_COFF)
23930 arm_validate_fix (fixS
* fixP
)
23932 /* If the destination of the branch is a defined symbol which does not have
23933 the THUMB_FUNC attribute, then we must be calling a function which has
23934 the (interfacearm) attribute. We look for the Thumb entry point to that
23935 function and change the branch to refer to that function instead. */
23936 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
23937 && fixP
->fx_addsy
!= NULL
23938 && S_IS_DEFINED (fixP
->fx_addsy
)
23939 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
23941 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
23948 arm_force_relocation (struct fix
* fixp
)
23950 #if defined (OBJ_COFF) && defined (TE_PE)
23951 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
23955 /* In case we have a call or a branch to a function in ARM ISA mode from
23956 a thumb function or vice-versa force the relocation. These relocations
23957 are cleared off for some cores that might have blx and simple transformations
23961 switch (fixp
->fx_r_type
)
23963 case BFD_RELOC_ARM_PCREL_JUMP
:
23964 case BFD_RELOC_ARM_PCREL_CALL
:
23965 case BFD_RELOC_THUMB_PCREL_BLX
:
23966 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
23970 case BFD_RELOC_ARM_PCREL_BLX
:
23971 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23972 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23973 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23974 if (ARM_IS_FUNC (fixp
->fx_addsy
))
23983 /* Resolve these relocations even if the symbol is extern or weak.
23984 Technically this is probably wrong due to symbol preemption.
23985 In practice these relocations do not have enough range to be useful
23986 at dynamic link time, and some code (e.g. in the Linux kernel)
23987 expects these references to be resolved. */
23988 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
23989 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
23990 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
23991 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
23992 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23993 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
23994 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
23995 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
23996 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
23997 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
23998 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
23999 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
24000 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
24001 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
24004 /* Always leave these relocations for the linker. */
24005 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24006 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24007 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24010 /* Always generate relocations against function symbols. */
24011 if (fixp
->fx_r_type
== BFD_RELOC_32
24013 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
24016 return generic_force_reloc (fixp
);
24019 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24020 /* Relocations against function names must be left unadjusted,
24021 so that the linker can use this information to generate interworking
24022 stubs. The MIPS version of this function
24023 also prevents relocations that are mips-16 specific, but I do not
24024 know why it does this.
24027 There is one other problem that ought to be addressed here, but
24028 which currently is not: Taking the address of a label (rather
24029 than a function) and then later jumping to that address. Such
24030 addresses also ought to have their bottom bit set (assuming that
24031 they reside in Thumb code), but at the moment they will not. */
24034 arm_fix_adjustable (fixS
* fixP
)
24036 if (fixP
->fx_addsy
== NULL
)
24039 /* Preserve relocations against symbols with function type. */
24040 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
24043 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
24044 && fixP
->fx_subsy
== NULL
)
24047 /* We need the symbol name for the VTABLE entries. */
24048 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
24049 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
24052 /* Don't allow symbols to be discarded on GOT related relocs. */
24053 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
24054 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
24055 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
24056 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
24057 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
24058 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
24059 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
24060 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
24061 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
24062 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
24063 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
24064 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
24065 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
24066 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
24069 /* Similarly for group relocations. */
24070 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24071 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24072 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24075 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24076 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
24077 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
24078 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
24079 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
24080 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
24081 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
24082 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
24083 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
24086 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24087 offsets, so keep these symbols. */
24088 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24089 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
24094 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24098 elf32_arm_target_format (void)
24101 return (target_big_endian
24102 ? "elf32-bigarm-symbian"
24103 : "elf32-littlearm-symbian");
24104 #elif defined (TE_VXWORKS)
24105 return (target_big_endian
24106 ? "elf32-bigarm-vxworks"
24107 : "elf32-littlearm-vxworks");
24108 #elif defined (TE_NACL)
24109 return (target_big_endian
24110 ? "elf32-bigarm-nacl"
24111 : "elf32-littlearm-nacl");
24113 if (target_big_endian
)
24114 return "elf32-bigarm";
24116 return "elf32-littlearm";
24121 armelf_frob_symbol (symbolS
* symp
,
24124 elf_frob_symbol (symp
, puntp
);
24128 /* MD interface: Finalization. */
24133 literal_pool
* pool
;
24135 /* Ensure that all the IT blocks are properly closed. */
24136 check_it_blocks_finished ();
24138 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
24140 /* Put it at the end of the relevant section. */
24141 subseg_set (pool
->section
, pool
->sub_section
);
24143 arm_elf_change_section ();
24150 /* Remove any excess mapping symbols generated for alignment frags in
24151 SEC. We may have created a mapping symbol before a zero byte
24152 alignment; remove it if there's a mapping symbol after the
24155 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
24156 void *dummy ATTRIBUTE_UNUSED
)
24158 segment_info_type
*seginfo
= seg_info (sec
);
24161 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
24164 for (fragp
= seginfo
->frchainP
->frch_root
;
24166 fragp
= fragp
->fr_next
)
24168 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
24169 fragS
*next
= fragp
->fr_next
;
24171 /* Variable-sized frags have been converted to fixed size by
24172 this point. But if this was variable-sized to start with,
24173 there will be a fixed-size frag after it. So don't handle
24175 if (sym
== NULL
|| next
== NULL
)
24178 if (S_GET_VALUE (sym
) < next
->fr_address
)
24179 /* Not at the end of this frag. */
24181 know (S_GET_VALUE (sym
) == next
->fr_address
);
24185 if (next
->tc_frag_data
.first_map
!= NULL
)
24187 /* Next frag starts with a mapping symbol. Discard this
24189 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24193 if (next
->fr_next
== NULL
)
24195 /* This mapping symbol is at the end of the section. Discard
24197 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
24198 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24202 /* As long as we have empty frags without any mapping symbols,
24204 /* If the next frag is non-empty and does not start with a
24205 mapping symbol, then this mapping symbol is required. */
24206 if (next
->fr_address
!= next
->fr_next
->fr_address
)
24209 next
= next
->fr_next
;
24211 while (next
!= NULL
);
24216 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24220 arm_adjust_symtab (void)
24225 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24227 if (ARM_IS_THUMB (sym
))
24229 if (THUMB_IS_FUNC (sym
))
24231 /* Mark the symbol as a Thumb function. */
24232 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
24233 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
24234 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
24236 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
24237 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
24239 as_bad (_("%s: unexpected function type: %d"),
24240 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
24242 else switch (S_GET_STORAGE_CLASS (sym
))
24245 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
24248 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
24251 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
24259 if (ARM_IS_INTERWORK (sym
))
24260 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
24267 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24269 if (ARM_IS_THUMB (sym
))
24271 elf_symbol_type
* elf_sym
;
24273 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
24274 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
24276 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
24277 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
24279 /* If it's a .thumb_func, declare it as so,
24280 otherwise tag label as .code 16. */
24281 if (THUMB_IS_FUNC (sym
))
24282 elf_sym
->internal_elf_sym
.st_target_internal
24283 = ST_BRANCH_TO_THUMB
;
24284 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
24285 elf_sym
->internal_elf_sym
.st_info
=
24286 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
24291 /* Remove any overlapping mapping symbols generated by alignment frags. */
24292 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
24293 /* Now do generic ELF adjustments. */
24294 elf_adjust_symtab ();
24298 /* MD interface: Initialization. */
24301 set_constant_flonums (void)
24305 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
24306 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
24310 /* Auto-select Thumb mode if it's the only available instruction set for the
24311 given architecture. */
24314 autoselect_thumb_from_cpu_variant (void)
24316 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
24317 opcode_select (16);
24326 if ( (arm_ops_hsh
= hash_new ()) == NULL
24327 || (arm_cond_hsh
= hash_new ()) == NULL
24328 || (arm_shift_hsh
= hash_new ()) == NULL
24329 || (arm_psr_hsh
= hash_new ()) == NULL
24330 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
24331 || (arm_reg_hsh
= hash_new ()) == NULL
24332 || (arm_reloc_hsh
= hash_new ()) == NULL
24333 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
24334 as_fatal (_("virtual memory exhausted"));
24336 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
24337 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
24338 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
24339 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
24340 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
24341 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
24342 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
24343 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
24344 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
24345 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
24346 (void *) (v7m_psrs
+ i
));
24347 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
24348 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
24350 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
24352 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
24353 (void *) (barrier_opt_names
+ i
));
24355 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
24357 struct reloc_entry
* entry
= reloc_names
+ i
;
24359 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
24360 /* This makes encode_branch() use the EABI versions of this relocation. */
24361 entry
->reloc
= BFD_RELOC_UNUSED
;
24363 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
24367 set_constant_flonums ();
24369 /* Set the cpu variant based on the command-line options. We prefer
24370 -mcpu= over -march= if both are set (as for GCC); and we prefer
24371 -mfpu= over any other way of setting the floating point unit.
24372 Use of legacy options with new options are faulted. */
24375 if (mcpu_cpu_opt
|| march_cpu_opt
)
24376 as_bad (_("use of old and new-style options to set CPU type"));
24378 mcpu_cpu_opt
= legacy_cpu
;
24380 else if (!mcpu_cpu_opt
)
24381 mcpu_cpu_opt
= march_cpu_opt
;
24386 as_bad (_("use of old and new-style options to set FPU type"));
24388 mfpu_opt
= legacy_fpu
;
24390 else if (!mfpu_opt
)
24392 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24393 || defined (TE_NetBSD) || defined (TE_VXWORKS))
24394 /* Some environments specify a default FPU. If they don't, infer it
24395 from the processor. */
24397 mfpu_opt
= mcpu_fpu_opt
;
24399 mfpu_opt
= march_fpu_opt
;
24401 mfpu_opt
= &fpu_default
;
24407 if (mcpu_cpu_opt
!= NULL
)
24408 mfpu_opt
= &fpu_default
;
24409 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
24410 mfpu_opt
= &fpu_arch_vfp_v2
;
24412 mfpu_opt
= &fpu_arch_fpa
;
24418 mcpu_cpu_opt
= &cpu_default
;
24419 selected_cpu
= cpu_default
;
24421 else if (no_cpu_selected ())
24422 selected_cpu
= cpu_default
;
24425 selected_cpu
= *mcpu_cpu_opt
;
24427 mcpu_cpu_opt
= &arm_arch_any
;
24430 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
24432 autoselect_thumb_from_cpu_variant ();
24434 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
24436 #if defined OBJ_COFF || defined OBJ_ELF
24438 unsigned int flags
= 0;
24440 #if defined OBJ_ELF
24441 flags
= meabi_flags
;
24443 switch (meabi_flags
)
24445 case EF_ARM_EABI_UNKNOWN
:
24447 /* Set the flags in the private structure. */
24448 if (uses_apcs_26
) flags
|= F_APCS26
;
24449 if (support_interwork
) flags
|= F_INTERWORK
;
24450 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
24451 if (pic_code
) flags
|= F_PIC
;
24452 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
24453 flags
|= F_SOFT_FLOAT
;
24455 switch (mfloat_abi_opt
)
24457 case ARM_FLOAT_ABI_SOFT
:
24458 case ARM_FLOAT_ABI_SOFTFP
:
24459 flags
|= F_SOFT_FLOAT
;
24462 case ARM_FLOAT_ABI_HARD
:
24463 if (flags
& F_SOFT_FLOAT
)
24464 as_bad (_("hard-float conflicts with specified fpu"));
24468 /* Using pure-endian doubles (even if soft-float). */
24469 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
24470 flags
|= F_VFP_FLOAT
;
24472 #if defined OBJ_ELF
24473 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
24474 flags
|= EF_ARM_MAVERICK_FLOAT
;
24477 case EF_ARM_EABI_VER4
:
24478 case EF_ARM_EABI_VER5
:
24479 /* No additional flags to set. */
24486 bfd_set_private_flags (stdoutput
, flags
);
24488 /* We have run out flags in the COFF header to encode the
24489 status of ATPCS support, so instead we create a dummy,
24490 empty, debug section called .arm.atpcs. */
24495 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
24499 bfd_set_section_flags
24500 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
24501 bfd_set_section_size (stdoutput
, sec
, 0);
24502 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
24508 /* Record the CPU type as well. */
24509 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
24510 mach
= bfd_mach_arm_iWMMXt2
;
24511 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
24512 mach
= bfd_mach_arm_iWMMXt
;
24513 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
24514 mach
= bfd_mach_arm_XScale
;
24515 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
24516 mach
= bfd_mach_arm_ep9312
;
24517 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
24518 mach
= bfd_mach_arm_5TE
;
24519 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
24521 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
24522 mach
= bfd_mach_arm_5T
;
24524 mach
= bfd_mach_arm_5
;
24526 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
24528 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
24529 mach
= bfd_mach_arm_4T
;
24531 mach
= bfd_mach_arm_4
;
24533 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
24534 mach
= bfd_mach_arm_3M
;
24535 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
24536 mach
= bfd_mach_arm_3
;
24537 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
24538 mach
= bfd_mach_arm_2a
;
24539 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
24540 mach
= bfd_mach_arm_2
;
24542 mach
= bfd_mach_arm_unknown
;
24544 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
24547 /* Command line processing. */
24550 Invocation line includes a switch not recognized by the base assembler.
24551 See if it's a processor-specific option.
24553 This routine is somewhat complicated by the need for backwards
24554 compatibility (since older releases of gcc can't be changed).
24555 The new options try to make the interface as compatible as
24558 New options (supported) are:
24560 -mcpu=<cpu name> Assemble for selected processor
24561 -march=<architecture name> Assemble for selected architecture
24562 -mfpu=<fpu architecture> Assemble for selected FPU.
24563 -EB/-mbig-endian Big-endian
24564 -EL/-mlittle-endian Little-endian
24565 -k Generate PIC code
24566 -mthumb Start in Thumb mode
24567 -mthumb-interwork Code supports ARM/Thumb interworking
24569 -m[no-]warn-deprecated Warn about deprecated features
24570 -m[no-]warn-syms Warn when symbols match instructions
24572 For now we will also provide support for:
24574 -mapcs-32 32-bit Program counter
24575 -mapcs-26 26-bit Program counter
24576 -macps-float Floats passed in FP registers
24577 -mapcs-reentrant Reentrant code
24579 (sometime these will probably be replaced with -mapcs=<list of options>
24580 and -matpcs=<list of options>)
24582 The remaining options are only supported for back-wards compatibility.
24583 Cpu variants, the arm part is optional:
24584 -m[arm]1 Currently not supported.
24585 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
24586 -m[arm]3 Arm 3 processor
24587 -m[arm]6[xx], Arm 6 processors
24588 -m[arm]7[xx][t][[d]m] Arm 7 processors
24589 -m[arm]8[10] Arm 8 processors
24590 -m[arm]9[20][tdmi] Arm 9 processors
24591 -mstrongarm[110[0]] StrongARM processors
24592 -mxscale XScale processors
24593 -m[arm]v[2345[t[e]]] Arm architectures
24594 -mall All (except the ARM1)
24596 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
24597 -mfpe-old (No float load/store multiples)
24598 -mvfpxd VFP Single precision
24600 -mno-fpu Disable all floating point instructions
24602 The following CPU names are recognized:
24603 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24604 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24605 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24606 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24607 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24608 arm10t arm10e, arm1020t, arm1020e, arm10200e,
24609 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24613 const char * md_shortopts
= "m:k";
24615 #ifdef ARM_BI_ENDIAN
24616 #define OPTION_EB (OPTION_MD_BASE + 0)
24617 #define OPTION_EL (OPTION_MD_BASE + 1)
24619 #if TARGET_BYTES_BIG_ENDIAN
24620 #define OPTION_EB (OPTION_MD_BASE + 0)
24622 #define OPTION_EL (OPTION_MD_BASE + 1)
24625 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24627 struct option md_longopts
[] =
24630 {"EB", no_argument
, NULL
, OPTION_EB
},
24633 {"EL", no_argument
, NULL
, OPTION_EL
},
24635 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
24636 {NULL
, no_argument
, NULL
, 0}
24640 size_t md_longopts_size
= sizeof (md_longopts
);
24642 struct arm_option_table
24644 char *option
; /* Option name to match. */
24645 char *help
; /* Help information. */
24646 int *var
; /* Variable to change. */
24647 int value
; /* What to change it to. */
24648 char *deprecated
; /* If non-null, print this message. */
24651 struct arm_option_table arm_opts
[] =
24653 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
24654 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
24655 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24656 &support_interwork
, 1, NULL
},
24657 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
24658 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
24659 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
24661 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
24662 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
24663 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
24664 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
24667 /* These are recognized by the assembler, but have no affect on code. */
24668 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
24669 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
24671 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
24672 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24673 &warn_on_deprecated
, 0, NULL
},
24674 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
24675 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
24676 {NULL
, NULL
, NULL
, 0, NULL
}
24679 struct arm_legacy_option_table
24681 char *option
; /* Option name to match. */
24682 const arm_feature_set
**var
; /* Variable to change. */
24683 const arm_feature_set value
; /* What to change it to. */
24684 char *deprecated
; /* If non-null, print this message. */
24687 const struct arm_legacy_option_table arm_legacy_opts
[] =
24689 /* DON'T add any new processors to this list -- we want the whole list
24690 to go away... Add them to the processors table instead. */
24691 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
24692 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
24693 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
24694 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
24695 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
24696 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
24697 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
24698 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
24699 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
24700 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
24701 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
24702 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
24703 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
24704 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
24705 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
24706 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
24707 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
24708 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
24709 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
24710 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
24711 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
24712 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
24713 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
24714 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
24715 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
24716 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
24717 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
24718 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
24719 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
24720 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
24721 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
24722 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
24723 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
24724 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
24725 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
24726 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
24727 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
24728 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
24729 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
24730 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
24731 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
24732 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
24733 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
24734 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
24735 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
24736 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
24737 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24738 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24739 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24740 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
24741 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
24742 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
24743 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
24744 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
24745 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
24746 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
24747 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
24748 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
24749 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
24750 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
24751 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
24752 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
24753 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
24754 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
24755 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
24756 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
24757 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
24758 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
24759 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
24760 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
24761 N_("use -mcpu=strongarm110")},
24762 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
24763 N_("use -mcpu=strongarm1100")},
24764 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
24765 N_("use -mcpu=strongarm1110")},
24766 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
24767 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
24768 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
24770 /* Architecture variants -- don't add any more to this list either. */
24771 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
24772 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
24773 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
24774 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
24775 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
24776 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
24777 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
24778 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
24779 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
24780 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
24781 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
24782 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
24783 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
24784 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
24785 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
24786 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
24787 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
24788 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
24790 /* Floating point variants -- don't add any more to this list either. */
24791 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
24792 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
24793 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
24794 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
24795 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
24797 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
24800 struct arm_cpu_option_table
24804 const arm_feature_set value
;
24805 /* For some CPUs we assume an FPU unless the user explicitly sets
24807 const arm_feature_set default_fpu
;
24808 /* The canonical name of the CPU, or NULL to use NAME converted to upper
24810 const char *canonical_name
;
24813 /* This list should, at a minimum, contain all the cpu names
24814 recognized by GCC. */
24815 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
24816 static const struct arm_cpu_option_table arm_cpus
[] =
24818 ARM_CPU_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
, NULL
),
24819 ARM_CPU_OPT ("arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
),
24820 ARM_CPU_OPT ("arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
),
24821 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
24822 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
),
24823 ARM_CPU_OPT ("arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24824 ARM_CPU_OPT ("arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24825 ARM_CPU_OPT ("arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24826 ARM_CPU_OPT ("arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24827 ARM_CPU_OPT ("arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24828 ARM_CPU_OPT ("arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24829 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
24830 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24831 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
24832 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24833 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
),
24834 ARM_CPU_OPT ("arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24835 ARM_CPU_OPT ("arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24836 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24837 ARM_CPU_OPT ("arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24838 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24839 ARM_CPU_OPT ("arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24840 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24841 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24842 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24843 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24844 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24845 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
),
24846 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24847 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24848 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24849 ARM_CPU_OPT ("arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24850 ARM_CPU_OPT ("arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24851 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24852 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24853 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24854 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24855 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24856 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24857 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"),
24858 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24859 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24860 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24861 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
),
24862 ARM_CPU_OPT ("fa526", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24863 ARM_CPU_OPT ("fa626", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
),
24864 /* For V5 or later processors we default to using VFP; but the user
24865 should really set the FPU type explicitly. */
24866 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
24867 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24868 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
24869 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"),
24870 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
24871 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
24872 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"),
24873 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24874 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
),
24875 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"),
24876 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24877 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24878 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
24879 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
24880 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24881 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"),
24882 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
),
24883 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24884 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24885 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
,
24887 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
),
24888 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24889 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24890 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24891 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24892 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
),
24893 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"),
24894 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
),
24895 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
,
24897 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
),
24898 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, "MPCore"),
24899 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, "MPCore"),
24900 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
),
24901 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
),
24902 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ
, FPU_NONE
, NULL
),
24903 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ
, FPU_ARCH_VFP_V2
, NULL
),
24904 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC
,
24905 FPU_NONE
, "Cortex-A5"),
24906 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24908 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC
,
24909 ARM_FEATURE_COPROC (FPU_VFP_V3
24910 | FPU_NEON_EXT_V1
),
24912 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC
,
24913 ARM_FEATURE_COPROC (FPU_VFP_V3
24914 | FPU_NEON_EXT_V1
),
24916 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24918 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24920 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE
, FPU_ARCH_NEON_VFP_V4
,
24922 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24924 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24926 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24928 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24930 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, "Cortex-R4"),
24931 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R
, FPU_ARCH_VFP_V3D16
,
24933 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV
,
24934 FPU_NONE
, "Cortex-R5"),
24935 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV
,
24936 FPU_ARCH_VFP_V3D16
,
24938 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M7"),
24939 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM
, FPU_NONE
, "Cortex-M4"),
24940 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, "Cortex-M3"),
24941 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M1"),
24942 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0"),
24943 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM
, FPU_NONE
, "Cortex-M0+"),
24944 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24947 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24951 /* ??? XSCALE is really an architecture. */
24952 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
24953 /* ??? iwmmxt is not a processor. */
24954 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
),
24955 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
),
24956 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
),
24958 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
24959 FPU_ARCH_MAVERICK
, "ARM920T"),
24960 /* Marvell processors. */
24961 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A
| ARM_EXT_MP
24963 ARM_EXT2_V6T2_V8M
),
24964 FPU_ARCH_VFP_V3D16
, NULL
),
24965 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A
| ARM_EXT_MP
24967 ARM_EXT2_V6T2_V8M
),
24968 FPU_ARCH_NEON_VFP_V4
, NULL
),
24969 /* APM X-Gene family. */
24970 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24972 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A
, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
24975 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
24979 struct arm_arch_option_table
24983 const arm_feature_set value
;
24984 const arm_feature_set default_fpu
;
24987 /* This list should, at a minimum, contain all the architecture names
24988 recognized by GCC. */
24989 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
24990 static const struct arm_arch_option_table arm_archs
[] =
24992 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
24993 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
24994 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
24995 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
24996 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
24997 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
24998 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
24999 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
25000 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
25001 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
25002 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
25003 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
25004 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
25005 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
25006 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
),
25007 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
),
25008 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
),
25009 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
),
25010 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
),
25011 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
),
25012 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
),
25013 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25014 kept to preserve existing behaviour. */
25015 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25016 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
25017 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
),
25018 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
),
25019 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
),
25020 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25021 kept to preserve existing behaviour. */
25022 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25023 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
25024 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
25025 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
25026 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
),
25027 /* The official spelling of the ARMv7 profile variants is the dashed form.
25028 Accept the non-dashed form for compatibility with old toolchains. */
25029 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25030 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
),
25031 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25032 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25033 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
25034 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
25035 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
25036 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
),
25037 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
25038 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
),
25039 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
),
25040 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
),
25041 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
),
25042 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
25043 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
25044 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
),
25045 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
25047 #undef ARM_ARCH_OPT
25049 /* ISA extensions in the co-processor and main instruction set space. */
25050 struct arm_option_extension_value_table
25054 const arm_feature_set merge_value
;
25055 const arm_feature_set clear_value
;
25056 const arm_feature_set allowed_archs
;
25059 /* The following table must be in alphabetical order with a NULL last entry.
25061 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, AA }
25062 static const struct arm_option_extension_value_table arm_extensions
[] =
25064 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25065 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25066 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
25067 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
25068 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25069 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
25070 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25071 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25072 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
25073 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
| ARM_EXT_V7R
)),
25074 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
25075 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ANY
),
25076 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
25077 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ANY
),
25078 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
25079 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ANY
),
25080 ARM_EXT_OPT ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25081 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
25082 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
| ARM_EXT_V7R
)),
25083 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
25084 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
25085 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25086 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25087 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
25088 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
25089 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
25090 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
25091 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25092 ARM_EXT_OPT ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25093 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25094 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
| ARM_EXT_V7A
)),
25095 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
25097 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
25098 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
25099 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8
,
25100 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
25101 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
25102 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
25103 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ANY
),
25104 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
25108 /* ISA floating-point and Advanced SIMD extensions. */
25109 struct arm_option_fpu_value_table
25112 const arm_feature_set value
;
25115 /* This list should, at a minimum, contain all the fpu names
25116 recognized by GCC. */
25117 static const struct arm_option_fpu_value_table arm_fpus
[] =
25119 {"softfpa", FPU_NONE
},
25120 {"fpe", FPU_ARCH_FPE
},
25121 {"fpe2", FPU_ARCH_FPE
},
25122 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
25123 {"fpa", FPU_ARCH_FPA
},
25124 {"fpa10", FPU_ARCH_FPA
},
25125 {"fpa11", FPU_ARCH_FPA
},
25126 {"arm7500fe", FPU_ARCH_FPA
},
25127 {"softvfp", FPU_ARCH_VFP
},
25128 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
25129 {"vfp", FPU_ARCH_VFP_V2
},
25130 {"vfp9", FPU_ARCH_VFP_V2
},
25131 {"vfp3", FPU_ARCH_VFP_V3
}, /* For backwards compatbility. */
25132 {"vfp10", FPU_ARCH_VFP_V2
},
25133 {"vfp10-r0", FPU_ARCH_VFP_V1
},
25134 {"vfpxd", FPU_ARCH_VFP_V1xD
},
25135 {"vfpv2", FPU_ARCH_VFP_V2
},
25136 {"vfpv3", FPU_ARCH_VFP_V3
},
25137 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
25138 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
25139 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
25140 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
25141 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
25142 {"arm1020t", FPU_ARCH_VFP_V1
},
25143 {"arm1020e", FPU_ARCH_VFP_V2
},
25144 {"arm1136jfs", FPU_ARCH_VFP_V2
},
25145 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
25146 {"maverick", FPU_ARCH_MAVERICK
},
25147 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
25148 {"neon-fp16", FPU_ARCH_NEON_FP16
},
25149 {"vfpv4", FPU_ARCH_VFP_V4
},
25150 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
25151 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
25152 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
25153 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
25154 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
25155 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
25156 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
25157 {"crypto-neon-fp-armv8",
25158 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
25159 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
25160 {"crypto-neon-fp-armv8.1",
25161 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
25162 {NULL
, ARM_ARCH_NONE
}
25165 struct arm_option_value_table
25171 static const struct arm_option_value_table arm_float_abis
[] =
25173 {"hard", ARM_FLOAT_ABI_HARD
},
25174 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
25175 {"soft", ARM_FLOAT_ABI_SOFT
},
25180 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
25181 static const struct arm_option_value_table arm_eabis
[] =
25183 {"gnu", EF_ARM_EABI_UNKNOWN
},
25184 {"4", EF_ARM_EABI_VER4
},
25185 {"5", EF_ARM_EABI_VER5
},
25190 struct arm_long_option_table
25192 char * option
; /* Substring to match. */
25193 char * help
; /* Help information. */
25194 int (* func
) (char * subopt
); /* Function to decode sub-option. */
25195 char * deprecated
; /* If non-null, print this message. */
25199 arm_parse_extension (char *str
, const arm_feature_set
**opt_p
)
25201 arm_feature_set
*ext_set
= (arm_feature_set
*)
25202 xmalloc (sizeof (arm_feature_set
));
25204 /* We insist on extensions being specified in alphabetical order, and with
25205 extensions being added before being removed. We achieve this by having
25206 the global ARM_EXTENSIONS table in alphabetical order, and using the
25207 ADDING_VALUE variable to indicate whether we are adding an extension (1)
25208 or removing it (0) and only allowing it to change in the order
25210 const struct arm_option_extension_value_table
* opt
= NULL
;
25211 int adding_value
= -1;
25213 /* Copy the feature set, so that we can modify it. */
25214 *ext_set
= **opt_p
;
25217 while (str
!= NULL
&& *str
!= 0)
25224 as_bad (_("invalid architectural extension"));
25229 ext
= strchr (str
, '+');
25234 len
= strlen (str
);
25236 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
25238 if (adding_value
!= 0)
25241 opt
= arm_extensions
;
25249 if (adding_value
== -1)
25252 opt
= arm_extensions
;
25254 else if (adding_value
!= 1)
25256 as_bad (_("must specify extensions to add before specifying "
25257 "those to remove"));
25264 as_bad (_("missing architectural extension"));
25268 gas_assert (adding_value
!= -1);
25269 gas_assert (opt
!= NULL
);
25271 /* Scan over the options table trying to find an exact match. */
25272 for (; opt
->name
!= NULL
; opt
++)
25273 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25275 /* Check we can apply the extension to this architecture. */
25276 if (!ARM_CPU_HAS_FEATURE (*ext_set
, opt
->allowed_archs
))
25278 as_bad (_("extension does not apply to the base architecture"));
25282 /* Add or remove the extension. */
25284 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
25286 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
25291 if (opt
->name
== NULL
)
25293 /* Did we fail to find an extension because it wasn't specified in
25294 alphabetical order, or because it does not exist? */
25296 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
25297 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25300 if (opt
->name
== NULL
)
25301 as_bad (_("unknown architectural extension `%s'"), str
);
25303 as_bad (_("architectural extensions must be specified in "
25304 "alphabetical order"));
25310 /* We should skip the extension we've just matched the next time
25322 arm_parse_cpu (char *str
)
25324 const struct arm_cpu_option_table
*opt
;
25325 char *ext
= strchr (str
, '+');
25331 len
= strlen (str
);
25335 as_bad (_("missing cpu name `%s'"), str
);
25339 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
25340 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25342 mcpu_cpu_opt
= &opt
->value
;
25343 mcpu_fpu_opt
= &opt
->default_fpu
;
25344 if (opt
->canonical_name
)
25346 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
25347 strcpy (selected_cpu_name
, opt
->canonical_name
);
25353 if (len
>= sizeof selected_cpu_name
)
25354 len
= (sizeof selected_cpu_name
) - 1;
25356 for (i
= 0; i
< len
; i
++)
25357 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
25358 selected_cpu_name
[i
] = 0;
25362 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
25367 as_bad (_("unknown cpu `%s'"), str
);
25372 arm_parse_arch (char *str
)
25374 const struct arm_arch_option_table
*opt
;
25375 char *ext
= strchr (str
, '+');
25381 len
= strlen (str
);
25385 as_bad (_("missing architecture name `%s'"), str
);
25389 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
25390 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
25392 march_cpu_opt
= &opt
->value
;
25393 march_fpu_opt
= &opt
->default_fpu
;
25394 strcpy (selected_cpu_name
, opt
->name
);
25397 return arm_parse_extension (ext
, &march_cpu_opt
);
25402 as_bad (_("unknown architecture `%s'\n"), str
);
25407 arm_parse_fpu (char * str
)
25409 const struct arm_option_fpu_value_table
* opt
;
25411 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
25412 if (streq (opt
->name
, str
))
25414 mfpu_opt
= &opt
->value
;
25418 as_bad (_("unknown floating point format `%s'\n"), str
);
25423 arm_parse_float_abi (char * str
)
25425 const struct arm_option_value_table
* opt
;
25427 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
25428 if (streq (opt
->name
, str
))
25430 mfloat_abi_opt
= opt
->value
;
25434 as_bad (_("unknown floating point abi `%s'\n"), str
);
25440 arm_parse_eabi (char * str
)
25442 const struct arm_option_value_table
*opt
;
25444 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
25445 if (streq (opt
->name
, str
))
25447 meabi_flags
= opt
->value
;
25450 as_bad (_("unknown EABI `%s'\n"), str
);
25456 arm_parse_it_mode (char * str
)
25458 bfd_boolean ret
= TRUE
;
25460 if (streq ("arm", str
))
25461 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
25462 else if (streq ("thumb", str
))
25463 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
25464 else if (streq ("always", str
))
25465 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
25466 else if (streq ("never", str
))
25467 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
25470 as_bad (_("unknown implicit IT mode `%s', should be "\
25471 "arm, thumb, always, or never."), str
);
25479 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED
)
25481 codecomposer_syntax
= TRUE
;
25482 arm_comment_chars
[0] = ';';
25483 arm_line_separator_chars
[0] = 0;
25487 struct arm_long_option_table arm_long_opts
[] =
25489 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
25490 arm_parse_cpu
, NULL
},
25491 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
25492 arm_parse_arch
, NULL
},
25493 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
25494 arm_parse_fpu
, NULL
},
25495 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
25496 arm_parse_float_abi
, NULL
},
25498 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
25499 arm_parse_eabi
, NULL
},
25501 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
25502 arm_parse_it_mode
, NULL
},
25503 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
25504 arm_ccs_mode
, NULL
},
25505 {NULL
, NULL
, 0, NULL
}
25509 md_parse_option (int c
, char * arg
)
25511 struct arm_option_table
*opt
;
25512 const struct arm_legacy_option_table
*fopt
;
25513 struct arm_long_option_table
*lopt
;
25519 target_big_endian
= 1;
25525 target_big_endian
= 0;
25529 case OPTION_FIX_V4BX
:
25534 /* Listing option. Just ignore these, we don't support additional
25539 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
25541 if (c
== opt
->option
[0]
25542 && ((arg
== NULL
&& opt
->option
[1] == 0)
25543 || streq (arg
, opt
->option
+ 1)))
25545 /* If the option is deprecated, tell the user. */
25546 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
25547 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
25548 arg
? arg
: "", _(opt
->deprecated
));
25550 if (opt
->var
!= NULL
)
25551 *opt
->var
= opt
->value
;
25557 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
25559 if (c
== fopt
->option
[0]
25560 && ((arg
== NULL
&& fopt
->option
[1] == 0)
25561 || streq (arg
, fopt
->option
+ 1)))
25563 /* If the option is deprecated, tell the user. */
25564 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
25565 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
25566 arg
? arg
: "", _(fopt
->deprecated
));
25568 if (fopt
->var
!= NULL
)
25569 *fopt
->var
= &fopt
->value
;
25575 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
25577 /* These options are expected to have an argument. */
25578 if (c
== lopt
->option
[0]
25580 && strncmp (arg
, lopt
->option
+ 1,
25581 strlen (lopt
->option
+ 1)) == 0)
25583 /* If the option is deprecated, tell the user. */
25584 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
25585 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
25586 _(lopt
->deprecated
));
25588 /* Call the sup-option parser. */
25589 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
25600 md_show_usage (FILE * fp
)
25602 struct arm_option_table
*opt
;
25603 struct arm_long_option_table
*lopt
;
25605 fprintf (fp
, _(" ARM-specific assembler options:\n"));
25607 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
25608 if (opt
->help
!= NULL
)
25609 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
25611 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
25612 if (lopt
->help
!= NULL
)
25613 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
25617 -EB assemble code for a big-endian cpu\n"));
25622 -EL assemble code for a little-endian cpu\n"));
25626 --fix-v4bx Allow BX in ARMv4 code\n"));
25634 arm_feature_set flags
;
25635 } cpu_arch_ver_table
;
25637 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
25638 must be sorted least features first but some reordering is needed, eg. for
25639 Thumb-2 instructions to be detected as coming from ARMv6T2. */
25640 static const cpu_arch_ver_table cpu_arch_ver
[] =
25646 {4, ARM_ARCH_V5TE
},
25647 {5, ARM_ARCH_V5TEJ
},
25651 {11, ARM_ARCH_V6M
},
25652 {12, ARM_ARCH_V6SM
},
25653 {8, ARM_ARCH_V6T2
},
25654 {10, ARM_ARCH_V7VE
},
25655 {10, ARM_ARCH_V7R
},
25656 {10, ARM_ARCH_V7M
},
25657 {14, ARM_ARCH_V8A
},
25658 {16, ARM_ARCH_V8M_BASE
},
25659 {17, ARM_ARCH_V8M_MAIN
},
25663 /* Set an attribute if it has not already been set by the user. */
25665 aeabi_set_attribute_int (int tag
, int value
)
25668 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
25669 || !attributes_set_explicitly
[tag
])
25670 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
25674 aeabi_set_attribute_string (int tag
, const char *value
)
25677 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
25678 || !attributes_set_explicitly
[tag
])
25679 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
25682 /* Set the public EABI object attributes. */
25684 aeabi_set_public_attributes (void)
25689 int fp16_optional
= 0;
25690 arm_feature_set flags
;
25691 arm_feature_set tmp
;
25692 arm_feature_set arm_arch_v8m_base
= ARM_ARCH_V8M_BASE
;
25693 const cpu_arch_ver_table
*p
;
25695 /* Choose the architecture based on the capabilities of the requested cpu
25696 (if any) and/or the instructions actually used. */
25697 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
25698 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
25699 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
25701 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
25702 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
25704 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
25705 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
25707 selected_cpu
= flags
;
25709 /* Allow the user to override the reported architecture. */
25712 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
25713 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
25716 /* We need to make sure that the attributes do not identify us as v6S-M
25717 when the only v6S-M feature in use is the Operating System Extensions. */
25718 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_os
))
25719 if (!ARM_CPU_HAS_FEATURE (flags
, arm_arch_v6m_only
))
25720 ARM_CLEAR_FEATURE (flags
, flags
, arm_ext_os
);
25724 for (p
= cpu_arch_ver
; p
->val
; p
++)
25726 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
25729 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
25733 /* The table lookup above finds the last architecture to contribute
25734 a new feature. Unfortunately, Tag13 is a subset of the union of
25735 v6T2 and v7-M, so it is never seen as contributing a new feature.
25736 We can not search for the last entry which is entirely used,
25737 because if no CPU is specified we build up only those flags
25738 actually used. Perhaps we should separate out the specified
25739 and implicit cases. Avoid taking this path for -march=all by
25740 checking for contradictory v7-A / v7-M features. */
25741 if (arch
== TAG_CPU_ARCH_V7
25742 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
25743 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
)
25744 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v6_dsp
))
25745 arch
= TAG_CPU_ARCH_V7E_M
;
25747 ARM_CLEAR_FEATURE (tmp
, flags
, arm_arch_v8m_base
);
25748 if (arch
== TAG_CPU_ARCH_V8M_BASE
&& ARM_CPU_HAS_FEATURE (tmp
, arm_arch_any
))
25749 arch
= TAG_CPU_ARCH_V8M_MAIN
;
25751 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
25752 coming from ARMv8-A. However, since ARMv8-A has more instructions than
25753 ARMv8-M, -march=all must be detected as ARMv8-A. */
25754 if (arch
== TAG_CPU_ARCH_V8M_MAIN
25755 && ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
25756 arch
= TAG_CPU_ARCH_V8
;
25758 /* Tag_CPU_name. */
25759 if (selected_cpu_name
[0])
25763 q
= selected_cpu_name
;
25764 if (strncmp (q
, "armv", 4) == 0)
25769 for (i
= 0; q
[i
]; i
++)
25770 q
[i
] = TOUPPER (q
[i
]);
25772 aeabi_set_attribute_string (Tag_CPU_name
, q
);
25775 /* Tag_CPU_arch. */
25776 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
25778 /* Tag_CPU_arch_profile. */
25779 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
25780 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
25781 || (ARM_CPU_HAS_FEATURE (flags
, arm_ext_atomics
)
25782 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
)))
25784 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
25786 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_m
))
25791 if (profile
!= '\0')
25792 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
25794 /* Tag_ARM_ISA_use. */
25795 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
25797 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
25799 /* Tag_THUMB_ISA_use. */
25800 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
25805 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
25806 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
25808 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
25812 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
25815 /* Tag_VFP_arch. */
25816 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
25817 aeabi_set_attribute_int (Tag_VFP_arch
,
25818 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
25820 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
25821 aeabi_set_attribute_int (Tag_VFP_arch
,
25822 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
25824 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
25827 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
25829 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
25831 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
25834 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
25835 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
25836 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
25837 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
25838 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
25840 /* Tag_ABI_HardFP_use. */
25841 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
25842 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
25843 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
25845 /* Tag_WMMX_arch. */
25846 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
25847 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
25848 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
25849 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
25851 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
25852 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
25853 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
25854 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
25856 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
25858 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
25862 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
25867 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
25868 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
25869 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
25873 We set Tag_DIV_use to two when integer divide instructions have been used
25874 in ARM state, or when Thumb integer divide instructions have been used,
25875 but we have no architecture profile set, nor have we any ARM instructions.
25877 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
25878 by the base architecture.
25880 For new architectures we will have to check these tests. */
25881 gas_assert (arch
<= TAG_CPU_ARCH_V8
25882 || (arch
>= TAG_CPU_ARCH_V8M_BASE
25883 && arch
<= TAG_CPU_ARCH_V8M_MAIN
));
25884 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
25885 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
25886 aeabi_set_attribute_int (Tag_DIV_use
, 0);
25887 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
25888 || (profile
== '\0'
25889 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
25890 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
25891 aeabi_set_attribute_int (Tag_DIV_use
, 2);
25893 /* Tag_MP_extension_use. */
25894 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
25895 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
25897 /* Tag Virtualization_use. */
25898 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
25900 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
25903 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
25906 /* Add the default contents for the .ARM.attributes section. */
25910 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
25913 aeabi_set_public_attributes ();
25915 #endif /* OBJ_ELF */
25918 /* Parse a .cpu directive. */
25921 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
25923 const struct arm_cpu_option_table
*opt
;
25927 name
= input_line_pointer
;
25928 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
25929 input_line_pointer
++;
25930 saved_char
= *input_line_pointer
;
25931 *input_line_pointer
= 0;
25933 /* Skip the first "all" entry. */
25934 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
25935 if (streq (opt
->name
, name
))
25937 mcpu_cpu_opt
= &opt
->value
;
25938 selected_cpu
= opt
->value
;
25939 if (opt
->canonical_name
)
25940 strcpy (selected_cpu_name
, opt
->canonical_name
);
25944 for (i
= 0; opt
->name
[i
]; i
++)
25945 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
25947 selected_cpu_name
[i
] = 0;
25949 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
25950 *input_line_pointer
= saved_char
;
25951 demand_empty_rest_of_line ();
25954 as_bad (_("unknown cpu `%s'"), name
);
25955 *input_line_pointer
= saved_char
;
25956 ignore_rest_of_line ();
25960 /* Parse a .arch directive. */
25963 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
25965 const struct arm_arch_option_table
*opt
;
25969 name
= input_line_pointer
;
25970 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
25971 input_line_pointer
++;
25972 saved_char
= *input_line_pointer
;
25973 *input_line_pointer
= 0;
25975 /* Skip the first "all" entry. */
25976 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
25977 if (streq (opt
->name
, name
))
25979 mcpu_cpu_opt
= &opt
->value
;
25980 selected_cpu
= opt
->value
;
25981 strcpy (selected_cpu_name
, opt
->name
);
25982 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
25983 *input_line_pointer
= saved_char
;
25984 demand_empty_rest_of_line ();
25988 as_bad (_("unknown architecture `%s'\n"), name
);
25989 *input_line_pointer
= saved_char
;
25990 ignore_rest_of_line ();
25994 /* Parse a .object_arch directive. */
25997 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
25999 const struct arm_arch_option_table
*opt
;
26003 name
= input_line_pointer
;
26004 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26005 input_line_pointer
++;
26006 saved_char
= *input_line_pointer
;
26007 *input_line_pointer
= 0;
26009 /* Skip the first "all" entry. */
26010 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
26011 if (streq (opt
->name
, name
))
26013 object_arch
= &opt
->value
;
26014 *input_line_pointer
= saved_char
;
26015 demand_empty_rest_of_line ();
26019 as_bad (_("unknown architecture `%s'\n"), name
);
26020 *input_line_pointer
= saved_char
;
26021 ignore_rest_of_line ();
26024 /* Parse a .arch_extension directive. */
26027 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
26029 const struct arm_option_extension_value_table
*opt
;
26032 int adding_value
= 1;
26034 name
= input_line_pointer
;
26035 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26036 input_line_pointer
++;
26037 saved_char
= *input_line_pointer
;
26038 *input_line_pointer
= 0;
26040 if (strlen (name
) >= 2
26041 && strncmp (name
, "no", 2) == 0)
26047 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
26048 if (streq (opt
->name
, name
))
26050 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt
, opt
->allowed_archs
))
26052 as_bad (_("architectural extension `%s' is not allowed for the "
26053 "current base architecture"), name
);
26058 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_cpu
,
26061 ARM_CLEAR_FEATURE (selected_cpu
, selected_cpu
, opt
->clear_value
);
26063 mcpu_cpu_opt
= &selected_cpu
;
26064 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26065 *input_line_pointer
= saved_char
;
26066 demand_empty_rest_of_line ();
26070 if (opt
->name
== NULL
)
26071 as_bad (_("unknown architecture extension `%s'\n"), name
);
26073 *input_line_pointer
= saved_char
;
26074 ignore_rest_of_line ();
26077 /* Parse a .fpu directive. */
26080 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
26082 const struct arm_option_fpu_value_table
*opt
;
26086 name
= input_line_pointer
;
26087 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
26088 input_line_pointer
++;
26089 saved_char
= *input_line_pointer
;
26090 *input_line_pointer
= 0;
26092 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
26093 if (streq (opt
->name
, name
))
26095 mfpu_opt
= &opt
->value
;
26096 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
26097 *input_line_pointer
= saved_char
;
26098 demand_empty_rest_of_line ();
26102 as_bad (_("unknown floating point format `%s'\n"), name
);
26103 *input_line_pointer
= saved_char
;
26104 ignore_rest_of_line ();
26107 /* Copy symbol information. */
26110 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
26112 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
26116 /* Given a symbolic attribute NAME, return the proper integer value.
26117 Returns -1 if the attribute is not known. */
26120 arm_convert_symbolic_attribute (const char *name
)
26122 static const struct
26127 attribute_table
[] =
26129 /* When you modify this table you should
26130 also modify the list in doc/c-arm.texi. */
26131 #define T(tag) {#tag, tag}
26132 T (Tag_CPU_raw_name
),
26135 T (Tag_CPU_arch_profile
),
26136 T (Tag_ARM_ISA_use
),
26137 T (Tag_THUMB_ISA_use
),
26141 T (Tag_Advanced_SIMD_arch
),
26142 T (Tag_PCS_config
),
26143 T (Tag_ABI_PCS_R9_use
),
26144 T (Tag_ABI_PCS_RW_data
),
26145 T (Tag_ABI_PCS_RO_data
),
26146 T (Tag_ABI_PCS_GOT_use
),
26147 T (Tag_ABI_PCS_wchar_t
),
26148 T (Tag_ABI_FP_rounding
),
26149 T (Tag_ABI_FP_denormal
),
26150 T (Tag_ABI_FP_exceptions
),
26151 T (Tag_ABI_FP_user_exceptions
),
26152 T (Tag_ABI_FP_number_model
),
26153 T (Tag_ABI_align_needed
),
26154 T (Tag_ABI_align8_needed
),
26155 T (Tag_ABI_align_preserved
),
26156 T (Tag_ABI_align8_preserved
),
26157 T (Tag_ABI_enum_size
),
26158 T (Tag_ABI_HardFP_use
),
26159 T (Tag_ABI_VFP_args
),
26160 T (Tag_ABI_WMMX_args
),
26161 T (Tag_ABI_optimization_goals
),
26162 T (Tag_ABI_FP_optimization_goals
),
26163 T (Tag_compatibility
),
26164 T (Tag_CPU_unaligned_access
),
26165 T (Tag_FP_HP_extension
),
26166 T (Tag_VFP_HP_extension
),
26167 T (Tag_ABI_FP_16bit_format
),
26168 T (Tag_MPextension_use
),
26170 T (Tag_nodefaults
),
26171 T (Tag_also_compatible_with
),
26172 T (Tag_conformance
),
26174 T (Tag_Virtualization_use
),
26175 /* We deliberately do not include Tag_MPextension_use_legacy. */
26183 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
26184 if (streq (name
, attribute_table
[i
].name
))
26185 return attribute_table
[i
].tag
;
26191 /* Apply sym value for relocations only in the case that they are for
26192 local symbols in the same segment as the fixup and you have the
26193 respective architectural feature for blx and simple switches. */
26195 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
26198 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
26199 /* PR 17444: If the local symbol is in a different section then a reloc
26200 will always be generated for it, so applying the symbol value now
26201 will result in a double offset being stored in the relocation. */
26202 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
26203 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
26205 switch (fixP
->fx_r_type
)
26207 case BFD_RELOC_ARM_PCREL_BLX
:
26208 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
26209 if (ARM_IS_FUNC (fixP
->fx_addsy
))
26213 case BFD_RELOC_ARM_PCREL_CALL
:
26214 case BFD_RELOC_THUMB_PCREL_BLX
:
26215 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
26226 #endif /* OBJ_ELF */