1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
78 /* Whether --fdpic was given. */
83 /* Results from operand parsing worker functions. */
87 PARSE_OPERAND_SUCCESS
,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result
;
99 /* Types of processor to assemble for. */
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
125 #endif /* ifndef FPU_DEFAULT */
127 #define streq(a, b) (strcmp (a, b) == 0)
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant
;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used
;
136 static arm_feature_set thumb_arch_used
;
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26
= FALSE
;
140 static int atpcs
= FALSE
;
141 static int support_interwork
= FALSE
;
142 static int uses_apcs_float
= FALSE
;
143 static int pic_code
= FALSE
;
144 static int fix_v4bx
= FALSE
;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated
= TRUE
;
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax
= FALSE
;
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set
*legacy_cpu
= NULL
;
158 static const arm_feature_set
*legacy_fpu
= NULL
;
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
162 static arm_feature_set
*mcpu_ext_opt
= NULL
;
163 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set
*march_cpu_opt
= NULL
;
167 static arm_feature_set
*march_ext_opt
= NULL
;
168 static const arm_feature_set
*march_fpu_opt
= NULL
;
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set
*mfpu_opt
= NULL
;
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V1
;
176 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V3
;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_NEON_V1
;
179 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
180 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
182 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
184 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
187 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
190 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
191 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2
);
192 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
193 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
194 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
195 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
196 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
197 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
198 static const arm_feature_set arm_ext_v4t_5
=
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
200 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
201 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
202 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
203 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
204 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
205 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
206 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
207 /* Only for compatability of hint instructions. */
208 static const arm_feature_set arm_ext_v6k_v6t2
=
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
| ARM_EXT_V6T2
);
210 static const arm_feature_set arm_ext_v6_notm
=
211 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
212 static const arm_feature_set arm_ext_v6_dsp
=
213 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
214 static const arm_feature_set arm_ext_barrier
=
215 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
216 static const arm_feature_set arm_ext_msr
=
217 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
218 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
219 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
220 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
221 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
223 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
225 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
226 static const arm_feature_set arm_ext_m
=
227 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_V7M
,
228 ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
229 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
230 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
231 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
232 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
233 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
234 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
235 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
236 static const arm_feature_set arm_ext_v8m_main
=
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN
);
238 static const arm_feature_set arm_ext_v8_1m_main
=
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN
);
240 /* Instructions in ARMv8-M only found in M profile architectures. */
241 static const arm_feature_set arm_ext_v8m_m_only
=
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
243 static const arm_feature_set arm_ext_v6t2_v8m
=
244 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
245 /* Instructions shared between ARMv8-A and ARMv8-M. */
246 static const arm_feature_set arm_ext_atomics
=
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
249 /* DSP instructions Tag_DSP_extension refers to. */
250 static const arm_feature_set arm_ext_dsp
=
251 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
| ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
);
253 static const arm_feature_set arm_ext_ras
=
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
);
255 /* FP16 instructions. */
256 static const arm_feature_set arm_ext_fp16
=
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
258 static const arm_feature_set arm_ext_fp16_fml
=
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML
);
260 static const arm_feature_set arm_ext_v8_2
=
261 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A
);
262 static const arm_feature_set arm_ext_v8_3
=
263 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A
);
264 static const arm_feature_set arm_ext_sb
=
265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
);
266 static const arm_feature_set arm_ext_predres
=
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
);
269 static const arm_feature_set arm_arch_any
= ARM_ANY
;
271 static const arm_feature_set fpu_any
= FPU_ANY
;
273 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED
= ARM_FEATURE (-1, -1, -1);
274 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
275 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
277 static const arm_feature_set arm_cext_iwmmxt2
=
278 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
279 static const arm_feature_set arm_cext_iwmmxt
=
280 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
281 static const arm_feature_set arm_cext_xscale
=
282 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
283 static const arm_feature_set arm_cext_maverick
=
284 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
285 static const arm_feature_set fpu_fpa_ext_v1
=
286 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
287 static const arm_feature_set fpu_fpa_ext_v2
=
288 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
289 static const arm_feature_set fpu_vfp_ext_v1xd
=
290 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
291 static const arm_feature_set fpu_vfp_ext_v1
=
292 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
293 static const arm_feature_set fpu_vfp_ext_v2
=
294 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
295 static const arm_feature_set fpu_vfp_ext_v3xd
=
296 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
297 static const arm_feature_set fpu_vfp_ext_v3
=
298 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
299 static const arm_feature_set fpu_vfp_ext_d32
=
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
301 static const arm_feature_set fpu_neon_ext_v1
=
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
303 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
304 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
305 static const arm_feature_set mve_ext
=
306 ARM_FEATURE_COPROC (FPU_MVE
);
307 static const arm_feature_set mve_fp_ext
=
308 ARM_FEATURE_COPROC (FPU_MVE_FP
);
310 static const arm_feature_set fpu_vfp_fp16
=
311 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
312 static const arm_feature_set fpu_neon_ext_fma
=
313 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
315 static const arm_feature_set fpu_vfp_ext_fma
=
316 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
317 static const arm_feature_set fpu_vfp_ext_armv8
=
318 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
319 static const arm_feature_set fpu_vfp_ext_armv8xd
=
320 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
321 static const arm_feature_set fpu_neon_ext_armv8
=
322 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
323 static const arm_feature_set fpu_crypto_ext_armv8
=
324 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
325 static const arm_feature_set crc_ext_armv8
=
326 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
327 static const arm_feature_set fpu_neon_ext_v8_1
=
328 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA
);
329 static const arm_feature_set fpu_neon_ext_dotprod
=
330 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
);
332 static int mfloat_abi_opt
= -1;
333 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
335 static arm_feature_set selected_arch
= ARM_ARCH_NONE
;
336 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
338 static arm_feature_set selected_ext
= ARM_ARCH_NONE
;
339 /* Feature bits selected by the last -mcpu/-march or by the combination of the
340 last .cpu/.arch directive .arch_extension directives since that
342 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
343 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
344 static arm_feature_set selected_fpu
= FPU_NONE
;
345 /* Feature bits selected by the last .object_arch directive. */
346 static arm_feature_set selected_object_arch
= ARM_ARCH_NONE
;
347 /* Must be long enough to hold any of the names in arm_cpus. */
348 static char selected_cpu_name
[20];
350 extern FLONUM_TYPE generic_floating_point_number
;
352 /* Return if no cpu was selected on command-line. */
354 no_cpu_selected (void)
356 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
361 static int meabi_flags
= EABI_DEFAULT
;
363 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
366 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
371 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
376 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
377 symbolS
* GOT_symbol
;
380 /* 0: assemble for ARM,
381 1: assemble for Thumb,
382 2: assemble for Thumb even though target CPU does not support thumb
384 static int thumb_mode
= 0;
385 /* A value distinct from the possible values for thumb_mode that we
386 can use to record whether thumb_mode has been copied into the
387 tc_frag_data field of a frag. */
388 #define MODE_RECORDED (1 << 4)
390 /* Specifies the intrinsic IT insn behavior mode. */
391 enum implicit_it_mode
393 IMPLICIT_IT_MODE_NEVER
= 0x00,
394 IMPLICIT_IT_MODE_ARM
= 0x01,
395 IMPLICIT_IT_MODE_THUMB
= 0x02,
396 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
398 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
400 /* If unified_syntax is true, we are processing the new unified
401 ARM/Thumb syntax. Important differences from the old ARM mode:
403 - Immediate operands do not require a # prefix.
404 - Conditional affixes always appear at the end of the
405 instruction. (For backward compatibility, those instructions
406 that formerly had them in the middle, continue to accept them
408 - The IT instruction may appear, and if it does is validated
409 against subsequent conditional affixes. It does not generate
412 Important differences from the old Thumb mode:
414 - Immediate operands do not require a # prefix.
415 - Most of the V6T2 instructions are only available in unified mode.
416 - The .N and .W suffixes are recognized and honored (it is an error
417 if they cannot be honored).
418 - All instructions set the flags if and only if they have an 's' affix.
419 - Conditional affixes may be used. They are validated against
420 preceding IT instructions. Unlike ARM mode, you cannot use a
421 conditional affix except in the scope of an IT instruction. */
423 static bfd_boolean unified_syntax
= FALSE
;
425 /* An immediate operand can start with #, and ld*, st*, pld operands
426 can contain [ and ]. We need to tell APP not to elide whitespace
427 before a [, which can appear as the first operand for pld.
428 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
429 const char arm_symbol_chars
[] = "#[]{}";
444 enum neon_el_type type
;
448 #define NEON_MAX_TYPE_ELS 4
452 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
456 enum pred_instruction_type
462 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
463 if inside, should be the last one. */
464 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
465 i.e. BKPT and NOP. */
466 IT_INSN
, /* The IT insn has been parsed. */
467 VPT_INSN
, /* The VPT/VPST insn has been parsed. */
468 MVE_OUTSIDE_PRED_INSN
/* Instruction to indicate a MVE instruction without
469 a predication code. */
472 /* The maximum number of operands we need. */
473 #define ARM_IT_MAX_OPERANDS 6
474 #define ARM_IT_MAX_RELOCS 3
479 unsigned long instruction
;
483 /* "uncond_value" is set to the value in place of the conditional field in
484 unconditional versions of the instruction, or -1 if nothing is
487 struct neon_type vectype
;
488 /* This does not indicate an actual NEON instruction, only that
489 the mnemonic accepts neon-style type suffixes. */
491 /* Set to the opcode if the instruction needs relaxation.
492 Zero if the instruction is not relaxed. */
496 bfd_reloc_code_real_type type
;
499 } relocs
[ARM_IT_MAX_RELOCS
];
501 enum pred_instruction_type pred_insn_type
;
507 struct neon_type_el vectype
;
508 unsigned present
: 1; /* Operand present. */
509 unsigned isreg
: 1; /* Operand was a register. */
510 unsigned immisreg
: 1; /* .imm field is a second register. */
511 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
512 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
513 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
514 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
515 instructions. This allows us to disambiguate ARM <-> vector insns. */
516 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
517 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
518 unsigned isquad
: 1; /* Operand is SIMD quad register. */
519 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
520 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
521 unsigned writeback
: 1; /* Operand has trailing ! */
522 unsigned preind
: 1; /* Preindexed address. */
523 unsigned postind
: 1; /* Postindexed address. */
524 unsigned negative
: 1; /* Index register was negated. */
525 unsigned shifted
: 1; /* Shift applied to operation. */
526 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
527 } operands
[ARM_IT_MAX_OPERANDS
];
530 static struct arm_it inst
;
532 #define NUM_FLOAT_VALS 8
534 const char * fp_const
[] =
536 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
539 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
549 #define CP_T_X 0x00008000
550 #define CP_T_Y 0x00400000
552 #define CONDS_BIT 0x00100000
553 #define LOAD_BIT 0x00100000
555 #define DOUBLE_LOAD_FLAG 0x00000001
559 const char * template_name
;
563 #define COND_ALWAYS 0xE
567 const char * template_name
;
571 struct asm_barrier_opt
573 const char * template_name
;
575 const arm_feature_set arch
;
578 /* The bit that distinguishes CPSR and SPSR. */
579 #define SPSR_BIT (1 << 22)
581 /* The individual PSR flag bits. */
582 #define PSR_c (1 << 16)
583 #define PSR_x (1 << 17)
584 #define PSR_s (1 << 18)
585 #define PSR_f (1 << 19)
590 bfd_reloc_code_real_type reloc
;
595 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
596 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
601 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
604 /* Bits for DEFINED field in neon_typed_alias. */
605 #define NTA_HASTYPE 1
606 #define NTA_HASINDEX 2
608 struct neon_typed_alias
610 unsigned char defined
;
612 struct neon_type_el eltype
;
615 /* ARM register categories. This includes coprocessor numbers and various
616 architecture extensions' registers. Each entry should have an error message
617 in reg_expected_msgs below. */
646 /* Structure for a hash table entry for a register.
647 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
648 information which states whether a vector type or index is specified (for a
649 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
655 unsigned char builtin
;
656 struct neon_typed_alias
* neon
;
659 /* Diagnostics used when we don't get a register of the expected type. */
660 const char * const reg_expected_msgs
[] =
662 [REG_TYPE_RN
] = N_("ARM register expected"),
663 [REG_TYPE_CP
] = N_("bad or missing co-processor number"),
664 [REG_TYPE_CN
] = N_("co-processor register expected"),
665 [REG_TYPE_FN
] = N_("FPA register expected"),
666 [REG_TYPE_VFS
] = N_("VFP single precision register expected"),
667 [REG_TYPE_VFD
] = N_("VFP/Neon double precision register expected"),
668 [REG_TYPE_NQ
] = N_("Neon quad precision register expected"),
669 [REG_TYPE_VFSD
] = N_("VFP single or double precision register expected"),
670 [REG_TYPE_NDQ
] = N_("Neon double or quad precision register expected"),
671 [REG_TYPE_NSD
] = N_("Neon single or double precision register expected"),
672 [REG_TYPE_NSDQ
] = N_("VFP single, double or Neon quad precision register"
674 [REG_TYPE_VFC
] = N_("VFP system register expected"),
675 [REG_TYPE_MVF
] = N_("Maverick MVF register expected"),
676 [REG_TYPE_MVD
] = N_("Maverick MVD register expected"),
677 [REG_TYPE_MVFX
] = N_("Maverick MVFX register expected"),
678 [REG_TYPE_MVDX
] = N_("Maverick MVDX register expected"),
679 [REG_TYPE_MVAX
] = N_("Maverick MVAX register expected"),
680 [REG_TYPE_DSPSC
] = N_("Maverick DSPSC register expected"),
681 [REG_TYPE_MMXWR
] = N_("iWMMXt data register expected"),
682 [REG_TYPE_MMXWC
] = N_("iWMMXt control register expected"),
683 [REG_TYPE_MMXWCG
] = N_("iWMMXt scalar register expected"),
684 [REG_TYPE_XSCALE
] = N_("XScale accumulator register expected"),
685 [REG_TYPE_MQ
] = N_("MVE vector register expected"),
686 [REG_TYPE_RNB
] = N_("")
689 /* Some well known registers that we refer to directly elsewhere. */
695 /* ARM instructions take 4bytes in the object file, Thumb instructions
701 /* Basic string to match. */
702 const char * template_name
;
704 /* Parameters to instruction. */
705 unsigned int operands
[8];
707 /* Conditional tag - see opcode_lookup. */
708 unsigned int tag
: 4;
710 /* Basic instruction code. */
713 /* Thumb-format instruction code. */
716 /* Which architecture variant provides this instruction. */
717 const arm_feature_set
* avariant
;
718 const arm_feature_set
* tvariant
;
720 /* Function to call to encode instruction in ARM format. */
721 void (* aencode
) (void);
723 /* Function to call to encode instruction in Thumb format. */
724 void (* tencode
) (void);
726 /* Indicates whether this instruction may be vector predicated. */
727 unsigned int mayBeVecPred
: 1;
730 /* Defines for various bits that we will want to toggle. */
731 #define INST_IMMEDIATE 0x02000000
732 #define OFFSET_REG 0x02000000
733 #define HWOFFSET_IMM 0x00400000
734 #define SHIFT_BY_REG 0x00000010
735 #define PRE_INDEX 0x01000000
736 #define INDEX_UP 0x00800000
737 #define WRITE_BACK 0x00200000
738 #define LDM_TYPE_2_OR_3 0x00400000
739 #define CPSI_MMOD 0x00020000
741 #define LITERAL_MASK 0xf000f000
742 #define OPCODE_MASK 0xfe1fffff
743 #define V4_STR_BIT 0x00000020
744 #define VLDR_VMOV_SAME 0x0040f000
746 #define T2_SUBS_PC_LR 0xf3de8f00
748 #define DATA_OP_SHIFT 21
749 #define SBIT_SHIFT 20
751 #define T2_OPCODE_MASK 0xfe1fffff
752 #define T2_DATA_OP_SHIFT 21
753 #define T2_SBIT_SHIFT 20
755 #define A_COND_MASK 0xf0000000
756 #define A_PUSH_POP_OP_MASK 0x0fff0000
758 /* Opcodes for pushing/poping registers to/from the stack. */
759 #define A1_OPCODE_PUSH 0x092d0000
760 #define A2_OPCODE_PUSH 0x052d0004
761 #define A2_OPCODE_POP 0x049d0004
763 /* Codes to distinguish the arithmetic instructions. */
774 #define OPCODE_CMP 10
775 #define OPCODE_CMN 11
776 #define OPCODE_ORR 12
777 #define OPCODE_MOV 13
778 #define OPCODE_BIC 14
779 #define OPCODE_MVN 15
781 #define T2_OPCODE_AND 0
782 #define T2_OPCODE_BIC 1
783 #define T2_OPCODE_ORR 2
784 #define T2_OPCODE_ORN 3
785 #define T2_OPCODE_EOR 4
786 #define T2_OPCODE_ADD 8
787 #define T2_OPCODE_ADC 10
788 #define T2_OPCODE_SBC 11
789 #define T2_OPCODE_SUB 13
790 #define T2_OPCODE_RSB 14
792 #define T_OPCODE_MUL 0x4340
793 #define T_OPCODE_TST 0x4200
794 #define T_OPCODE_CMN 0x42c0
795 #define T_OPCODE_NEG 0x4240
796 #define T_OPCODE_MVN 0x43c0
798 #define T_OPCODE_ADD_R3 0x1800
799 #define T_OPCODE_SUB_R3 0x1a00
800 #define T_OPCODE_ADD_HI 0x4400
801 #define T_OPCODE_ADD_ST 0xb000
802 #define T_OPCODE_SUB_ST 0xb080
803 #define T_OPCODE_ADD_SP 0xa800
804 #define T_OPCODE_ADD_PC 0xa000
805 #define T_OPCODE_ADD_I8 0x3000
806 #define T_OPCODE_SUB_I8 0x3800
807 #define T_OPCODE_ADD_I3 0x1c00
808 #define T_OPCODE_SUB_I3 0x1e00
810 #define T_OPCODE_ASR_R 0x4100
811 #define T_OPCODE_LSL_R 0x4080
812 #define T_OPCODE_LSR_R 0x40c0
813 #define T_OPCODE_ROR_R 0x41c0
814 #define T_OPCODE_ASR_I 0x1000
815 #define T_OPCODE_LSL_I 0x0000
816 #define T_OPCODE_LSR_I 0x0800
818 #define T_OPCODE_MOV_I8 0x2000
819 #define T_OPCODE_CMP_I8 0x2800
820 #define T_OPCODE_CMP_LR 0x4280
821 #define T_OPCODE_MOV_HR 0x4600
822 #define T_OPCODE_CMP_HR 0x4500
824 #define T_OPCODE_LDR_PC 0x4800
825 #define T_OPCODE_LDR_SP 0x9800
826 #define T_OPCODE_STR_SP 0x9000
827 #define T_OPCODE_LDR_IW 0x6800
828 #define T_OPCODE_STR_IW 0x6000
829 #define T_OPCODE_LDR_IH 0x8800
830 #define T_OPCODE_STR_IH 0x8000
831 #define T_OPCODE_LDR_IB 0x7800
832 #define T_OPCODE_STR_IB 0x7000
833 #define T_OPCODE_LDR_RW 0x5800
834 #define T_OPCODE_STR_RW 0x5000
835 #define T_OPCODE_LDR_RH 0x5a00
836 #define T_OPCODE_STR_RH 0x5200
837 #define T_OPCODE_LDR_RB 0x5c00
838 #define T_OPCODE_STR_RB 0x5400
840 #define T_OPCODE_PUSH 0xb400
841 #define T_OPCODE_POP 0xbc00
843 #define T_OPCODE_BRANCH 0xe000
845 #define THUMB_SIZE 2 /* Size of thumb instruction. */
846 #define THUMB_PP_PC_LR 0x0100
847 #define THUMB_LOAD_BIT 0x0800
848 #define THUMB2_LOAD_BIT 0x00100000
850 #define BAD_SYNTAX _("syntax error")
851 #define BAD_ARGS _("bad arguments to instruction")
852 #define BAD_SP _("r13 not allowed here")
853 #define BAD_PC _("r15 not allowed here")
854 #define BAD_ODD _("Odd register not allowed here")
855 #define BAD_EVEN _("Even register not allowed here")
856 #define BAD_COND _("instruction cannot be conditional")
857 #define BAD_OVERLAP _("registers may not be the same")
858 #define BAD_HIREG _("lo register required")
859 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
860 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
861 #define BAD_BRANCH _("branch must be last instruction in IT block")
862 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
863 #define BAD_NOT_IT _("instruction not allowed in IT block")
864 #define BAD_NOT_VPT _("instruction missing MVE vector predication code")
865 #define BAD_FPU _("selected FPU does not support instruction")
866 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
867 #define BAD_OUT_VPT \
868 _("vector predicated instruction should be in VPT/VPST block")
869 #define BAD_IT_COND _("incorrect condition in IT block")
870 #define BAD_VPT_COND _("incorrect condition in VPT/VPST block")
871 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
872 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
873 #define BAD_PC_ADDRESSING \
874 _("cannot use register index with PC-relative addressing")
875 #define BAD_PC_WRITEBACK \
876 _("cannot use writeback with PC-relative addressing")
877 #define BAD_RANGE _("branch out of range")
878 #define BAD_FP16 _("selected processor does not support fp16 instruction")
879 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
880 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
881 #define MVE_NOT_IT _("Warning: instruction is UNPREDICTABLE in an IT " \
883 #define MVE_NOT_VPT _("Warning: instruction is UNPREDICTABLE in a VPT " \
885 #define MVE_BAD_PC _("Warning: instruction is UNPREDICTABLE with PC" \
887 #define MVE_BAD_SP _("Warning: instruction is UNPREDICTABLE with SP" \
889 #define BAD_SIMD_TYPE _("bad type in SIMD instruction")
891 static struct hash_control
* arm_ops_hsh
;
892 static struct hash_control
* arm_cond_hsh
;
893 static struct hash_control
* arm_vcond_hsh
;
894 static struct hash_control
* arm_shift_hsh
;
895 static struct hash_control
* arm_psr_hsh
;
896 static struct hash_control
* arm_v7m_psr_hsh
;
897 static struct hash_control
* arm_reg_hsh
;
898 static struct hash_control
* arm_reloc_hsh
;
899 static struct hash_control
* arm_barrier_opt_hsh
;
901 /* Stuff needed to resolve the label ambiguity
910 symbolS
* last_label_seen
;
911 static int label_is_thumb_function_name
= FALSE
;
913 /* Literal pool structure. Held on a per-section
914 and per-sub-section basis. */
916 #define MAX_LITERAL_POOL_SIZE 1024
917 typedef struct literal_pool
919 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
920 unsigned int next_free_entry
;
926 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
928 struct literal_pool
* next
;
929 unsigned int alignment
;
932 /* Pointer to a linked list of literal pools. */
933 literal_pool
* list_of_pools
= NULL
;
935 typedef enum asmfunc_states
938 WAITING_ASMFUNC_NAME
,
942 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
945 # define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred
947 static struct current_pred now_pred
;
951 now_pred_compatible (int cond
)
953 return (cond
& ~1) == (now_pred
.cc
& ~1);
957 conditional_insn (void)
959 return inst
.cond
!= COND_ALWAYS
;
962 static int in_pred_block (void);
964 static int handle_pred_state (void);
966 static void force_automatic_it_block_close (void);
968 static void it_fsm_post_encode (void);
970 #define set_pred_insn_type(type) \
973 inst.pred_insn_type = type; \
974 if (handle_pred_state () == FAIL) \
979 #define set_pred_insn_type_nonvoid(type, failret) \
982 inst.pred_insn_type = type; \
983 if (handle_pred_state () == FAIL) \
988 #define set_pred_insn_type_last() \
991 if (inst.cond == COND_ALWAYS) \
992 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); \
994 set_pred_insn_type (INSIDE_IT_LAST_INSN); \
1000 /* This array holds the chars that always start a comment. If the
1001 pre-processor is disabled, these aren't very useful. */
1002 char arm_comment_chars
[] = "@";
1004 /* This array holds the chars that only start a comment at the beginning of
1005 a line. If the line seems to have the form '# 123 filename'
1006 .line and .file directives will appear in the pre-processed output. */
1007 /* Note that input_file.c hand checks for '#' at the beginning of the
1008 first line of the input file. This is because the compiler outputs
1009 #NO_APP at the beginning of its output. */
1010 /* Also note that comments like this one will always work. */
1011 const char line_comment_chars
[] = "#";
1013 char arm_line_separator_chars
[] = ";";
1015 /* Chars that can be used to separate mant
1016 from exp in floating point numbers. */
1017 const char EXP_CHARS
[] = "eE";
1019 /* Chars that mean this number is a floating point constant. */
1020 /* As in 0f12.456 */
1021 /* or 0d1.2345e12 */
1023 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
1025 /* Prefix characters that indicate the start of an immediate
1027 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1029 /* Separator character handling. */
1031 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1034 skip_past_char (char ** str
, char c
)
1036 /* PR gas/14987: Allow for whitespace before the expected character. */
1037 skip_whitespace (*str
);
1048 #define skip_past_comma(str) skip_past_char (str, ',')
1050 /* Arithmetic expressions (possibly involving symbols). */
1052 /* Return TRUE if anything in the expression is a bignum. */
1055 walk_no_bignums (symbolS
* sp
)
1057 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
1060 if (symbol_get_value_expression (sp
)->X_add_symbol
)
1062 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
1063 || (symbol_get_value_expression (sp
)->X_op_symbol
1064 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
1070 static bfd_boolean in_my_get_expression
= FALSE
;
1072 /* Third argument to my_get_expression. */
1073 #define GE_NO_PREFIX 0
1074 #define GE_IMM_PREFIX 1
1075 #define GE_OPT_PREFIX 2
1076 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1077 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1078 #define GE_OPT_PREFIX_BIG 3
1081 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
1085 /* In unified syntax, all prefixes are optional. */
1087 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
1090 switch (prefix_mode
)
1092 case GE_NO_PREFIX
: break;
1094 if (!is_immediate_prefix (**str
))
1096 inst
.error
= _("immediate expression requires a # prefix");
1102 case GE_OPT_PREFIX_BIG
:
1103 if (is_immediate_prefix (**str
))
1110 memset (ep
, 0, sizeof (expressionS
));
1112 save_in
= input_line_pointer
;
1113 input_line_pointer
= *str
;
1114 in_my_get_expression
= TRUE
;
1116 in_my_get_expression
= FALSE
;
1118 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1120 /* We found a bad or missing expression in md_operand(). */
1121 *str
= input_line_pointer
;
1122 input_line_pointer
= save_in
;
1123 if (inst
.error
== NULL
)
1124 inst
.error
= (ep
->X_op
== O_absent
1125 ? _("missing expression") :_("bad expression"));
1129 /* Get rid of any bignums now, so that we don't generate an error for which
1130 we can't establish a line number later on. Big numbers are never valid
1131 in instructions, which is where this routine is always called. */
1132 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1133 && (ep
->X_op
== O_big
1134 || (ep
->X_add_symbol
1135 && (walk_no_bignums (ep
->X_add_symbol
)
1137 && walk_no_bignums (ep
->X_op_symbol
))))))
1139 inst
.error
= _("invalid constant");
1140 *str
= input_line_pointer
;
1141 input_line_pointer
= save_in
;
1145 *str
= input_line_pointer
;
1146 input_line_pointer
= save_in
;
1150 /* Turn a string in input_line_pointer into a floating point constant
1151 of type TYPE, and store the appropriate bytes in *LITP. The number
1152 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1153 returned, or NULL on OK.
1155 Note that fp constants aren't represent in the normal way on the ARM.
1156 In big endian mode, things are as expected. However, in little endian
1157 mode fp constants are big-endian word-wise, and little-endian byte-wise
1158 within the words. For example, (double) 1.1 in big endian mode is
1159 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1160 the byte sequence 99 99 f1 3f 9a 99 99 99.
1162 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1165 md_atof (int type
, char * litP
, int * sizeP
)
1168 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1200 return _("Unrecognized or unsupported floating point constant");
1203 t
= atof_ieee (input_line_pointer
, type
, words
);
1205 input_line_pointer
= t
;
1206 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1208 if (target_big_endian
)
1210 for (i
= 0; i
< prec
; i
++)
1212 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1213 litP
+= sizeof (LITTLENUM_TYPE
);
1218 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1219 for (i
= prec
- 1; i
>= 0; i
--)
1221 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1222 litP
+= sizeof (LITTLENUM_TYPE
);
1225 /* For a 4 byte float the order of elements in `words' is 1 0.
1226 For an 8 byte float the order is 1 0 3 2. */
1227 for (i
= 0; i
< prec
; i
+= 2)
1229 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1230 sizeof (LITTLENUM_TYPE
));
1231 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1232 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1233 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1240 /* We handle all bad expressions here, so that we can report the faulty
1241 instruction in the error message. */
1244 md_operand (expressionS
* exp
)
1246 if (in_my_get_expression
)
1247 exp
->X_op
= O_illegal
;
1250 /* Immediate values. */
1253 /* Generic immediate-value read function for use in directives.
1254 Accepts anything that 'expression' can fold to a constant.
1255 *val receives the number. */
1258 immediate_for_directive (int *val
)
1261 exp
.X_op
= O_illegal
;
1263 if (is_immediate_prefix (*input_line_pointer
))
1265 input_line_pointer
++;
1269 if (exp
.X_op
!= O_constant
)
1271 as_bad (_("expected #constant"));
1272 ignore_rest_of_line ();
1275 *val
= exp
.X_add_number
;
1280 /* Register parsing. */
1282 /* Generic register parser. CCP points to what should be the
1283 beginning of a register name. If it is indeed a valid register
1284 name, advance CCP over it and return the reg_entry structure;
1285 otherwise return NULL. Does not issue diagnostics. */
1287 static struct reg_entry
*
1288 arm_reg_parse_multi (char **ccp
)
1292 struct reg_entry
*reg
;
1294 skip_whitespace (start
);
1296 #ifdef REGISTER_PREFIX
1297 if (*start
!= REGISTER_PREFIX
)
1301 #ifdef OPTIONAL_REGISTER_PREFIX
1302 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1307 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1312 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1314 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1324 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1325 enum arm_reg_type type
)
1327 /* Alternative syntaxes are accepted for a few register classes. */
1334 /* Generic coprocessor register names are allowed for these. */
1335 if (reg
&& reg
->type
== REG_TYPE_CN
)
1340 /* For backward compatibility, a bare number is valid here. */
1342 unsigned long processor
= strtoul (start
, ccp
, 10);
1343 if (*ccp
!= start
&& processor
<= 15)
1348 case REG_TYPE_MMXWC
:
1349 /* WC includes WCG. ??? I'm not sure this is true for all
1350 instructions that take WC registers. */
1351 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1362 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1363 return value is the register number or FAIL. */
1366 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1369 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1372 /* Do not allow a scalar (reg+index) to parse as a register. */
1373 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1376 if (reg
&& reg
->type
== type
)
1379 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1386 /* Parse a Neon type specifier. *STR should point at the leading '.'
1387 character. Does no verification at this stage that the type fits the opcode
1394 Can all be legally parsed by this function.
1396 Fills in neon_type struct pointer with parsed information, and updates STR
1397 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1398 type, FAIL if not. */
1401 parse_neon_type (struct neon_type
*type
, char **str
)
1408 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1410 enum neon_el_type thistype
= NT_untyped
;
1411 unsigned thissize
= -1u;
1418 /* Just a size without an explicit type. */
1422 switch (TOLOWER (*ptr
))
1424 case 'i': thistype
= NT_integer
; break;
1425 case 'f': thistype
= NT_float
; break;
1426 case 'p': thistype
= NT_poly
; break;
1427 case 's': thistype
= NT_signed
; break;
1428 case 'u': thistype
= NT_unsigned
; break;
1430 thistype
= NT_float
;
1435 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1441 /* .f is an abbreviation for .f32. */
1442 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1447 thissize
= strtoul (ptr
, &ptr
, 10);
1449 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1452 as_bad (_("bad size %d in type specifier"), thissize
);
1460 type
->el
[type
->elems
].type
= thistype
;
1461 type
->el
[type
->elems
].size
= thissize
;
1466 /* Empty/missing type is not a successful parse. */
1467 if (type
->elems
== 0)
1475 /* Errors may be set multiple times during parsing or bit encoding
1476 (particularly in the Neon bits), but usually the earliest error which is set
1477 will be the most meaningful. Avoid overwriting it with later (cascading)
1478 errors by calling this function. */
1481 first_error (const char *err
)
1487 /* Parse a single type, e.g. ".s32", leading period included. */
1489 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1492 struct neon_type optype
;
1496 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1498 if (optype
.elems
== 1)
1499 *vectype
= optype
.el
[0];
1502 first_error (_("only one type should be specified for operand"));
1508 first_error (_("vector type expected"));
1520 /* Special meanings for indices (which have a range of 0-7), which will fit into
1523 #define NEON_ALL_LANES 15
1524 #define NEON_INTERLEAVE_LANES 14
1526 /* Record a use of the given feature. */
1528 record_feature_use (const arm_feature_set
*feature
)
1531 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
1533 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
1536 /* If the given feature available in the selected CPU, mark it as used.
1537 Returns TRUE iff feature is available. */
1539 mark_feature_used (const arm_feature_set
*feature
)
1541 /* Ensure the option is valid on the current architecture. */
1542 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
1545 /* Add the appropriate architecture feature for the barrier option used.
1547 record_feature_use (feature
);
1552 /* Parse either a register or a scalar, with an optional type. Return the
1553 register number, and optionally fill in the actual type of the register
1554 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1555 type/index information in *TYPEINFO. */
1558 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1559 enum arm_reg_type
*rtype
,
1560 struct neon_typed_alias
*typeinfo
)
1563 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1564 struct neon_typed_alias atype
;
1565 struct neon_type_el parsetype
;
1569 atype
.eltype
.type
= NT_invtype
;
1570 atype
.eltype
.size
= -1;
1572 /* Try alternate syntax for some types of register. Note these are mutually
1573 exclusive with the Neon syntax extensions. */
1576 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1584 /* Undo polymorphism when a set of register types may be accepted. */
1585 if ((type
== REG_TYPE_NDQ
1586 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1587 || (type
== REG_TYPE_VFSD
1588 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1589 || (type
== REG_TYPE_NSDQ
1590 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1591 || reg
->type
== REG_TYPE_NQ
))
1592 || (type
== REG_TYPE_NSD
1593 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1594 || (type
== REG_TYPE_MMXWC
1595 && (reg
->type
== REG_TYPE_MMXWCG
)))
1596 type
= (enum arm_reg_type
) reg
->type
;
1598 if (type
== REG_TYPE_MQ
)
1600 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
1603 if (!reg
|| reg
->type
!= REG_TYPE_NQ
)
1606 if (reg
->number
> 14 && !mark_feature_used (&fpu_vfp_ext_d32
))
1608 first_error (_("expected MVE register [q0..q7]"));
1613 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
1614 && (type
== REG_TYPE_NQ
))
1618 if (type
!= reg
->type
)
1624 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1626 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1628 first_error (_("can't redefine type for operand"));
1631 atype
.defined
|= NTA_HASTYPE
;
1632 atype
.eltype
= parsetype
;
1635 if (skip_past_char (&str
, '[') == SUCCESS
)
1637 if (type
!= REG_TYPE_VFD
1638 && !(type
== REG_TYPE_VFS
1639 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8_2
)))
1641 first_error (_("only D registers may be indexed"));
1645 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1647 first_error (_("can't change index for operand"));
1651 atype
.defined
|= NTA_HASINDEX
;
1653 if (skip_past_char (&str
, ']') == SUCCESS
)
1654 atype
.index
= NEON_ALL_LANES
;
1659 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1661 if (exp
.X_op
!= O_constant
)
1663 first_error (_("constant expression required"));
1667 if (skip_past_char (&str
, ']') == FAIL
)
1670 atype
.index
= exp
.X_add_number
;
1685 /* Like arm_reg_parse, but also allow the following extra features:
1686 - If RTYPE is non-zero, return the (possibly restricted) type of the
1687 register (e.g. Neon double or quad reg when either has been requested).
1688 - If this is a Neon vector type with additional type information, fill
1689 in the struct pointed to by VECTYPE (if non-NULL).
1690 This function will fault on encountering a scalar. */
1693 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1694 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1696 struct neon_typed_alias atype
;
1698 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1703 /* Do not allow regname(... to parse as a register. */
1707 /* Do not allow a scalar (reg+index) to parse as a register. */
1708 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1710 first_error (_("register operand expected, but got scalar"));
1715 *vectype
= atype
.eltype
;
1722 #define NEON_SCALAR_REG(X) ((X) >> 4)
1723 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1725 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1726 have enough information to be able to do a good job bounds-checking. So, we
1727 just do easy checks here, and do further checks later. */
1730 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1734 struct neon_typed_alias atype
;
1735 enum arm_reg_type reg_type
= REG_TYPE_VFD
;
1738 reg_type
= REG_TYPE_VFS
;
1740 reg
= parse_typed_reg_or_scalar (&str
, reg_type
, NULL
, &atype
);
1742 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1745 if (atype
.index
== NEON_ALL_LANES
)
1747 first_error (_("scalar must have an index"));
1750 else if (atype
.index
>= 64 / elsize
)
1752 first_error (_("scalar index out of range"));
1757 *type
= atype
.eltype
;
1761 return reg
* 16 + atype
.index
;
1764 /* Types of registers in a list. */
1777 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1780 parse_reg_list (char ** strp
, enum reg_list_els etype
)
1786 gas_assert (etype
== REGLIST_RN
|| etype
== REGLIST_CLRM
);
1788 /* We come back here if we get ranges concatenated by '+' or '|'. */
1791 skip_whitespace (str
);
1804 const char apsr_str
[] = "apsr";
1805 int apsr_str_len
= strlen (apsr_str
);
1807 reg
= arm_reg_parse (&str
, REGLIST_RN
);
1808 if (etype
== REGLIST_CLRM
)
1810 if (reg
== REG_SP
|| reg
== REG_PC
)
1812 else if (reg
== FAIL
1813 && !strncasecmp (str
, apsr_str
, apsr_str_len
)
1814 && !ISALPHA (*(str
+ apsr_str_len
)))
1817 str
+= apsr_str_len
;
1822 first_error (_("r0-r12, lr or APSR expected"));
1826 else /* etype == REGLIST_RN. */
1830 first_error (_(reg_expected_msgs
[REGLIST_RN
]));
1841 first_error (_("bad range in register list"));
1845 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1847 if (range
& (1 << i
))
1849 (_("Warning: duplicated register (r%d) in register list"),
1857 if (range
& (1 << reg
))
1858 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1860 else if (reg
<= cur_reg
)
1861 as_tsktsk (_("Warning: register range not in ascending order"));
1866 while (skip_past_comma (&str
) != FAIL
1867 || (in_range
= 1, *str
++ == '-'));
1870 if (skip_past_char (&str
, '}') == FAIL
)
1872 first_error (_("missing `}'"));
1876 else if (etype
== REGLIST_RN
)
1880 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1883 if (exp
.X_op
== O_constant
)
1885 if (exp
.X_add_number
1886 != (exp
.X_add_number
& 0x0000ffff))
1888 inst
.error
= _("invalid register mask");
1892 if ((range
& exp
.X_add_number
) != 0)
1894 int regno
= range
& exp
.X_add_number
;
1897 regno
= (1 << regno
) - 1;
1899 (_("Warning: duplicated register (r%d) in register list"),
1903 range
|= exp
.X_add_number
;
1907 if (inst
.relocs
[0].type
!= 0)
1909 inst
.error
= _("expression too complex");
1913 memcpy (&inst
.relocs
[0].exp
, &exp
, sizeof (expressionS
));
1914 inst
.relocs
[0].type
= BFD_RELOC_ARM_MULTI
;
1915 inst
.relocs
[0].pc_rel
= 0;
1919 if (*str
== '|' || *str
== '+')
1925 while (another_range
);
1931 /* Parse a VFP register list. If the string is invalid return FAIL.
1932 Otherwise return the number of registers, and set PBASE to the first
1933 register. Parses registers of type ETYPE.
1934 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1935 - Q registers can be used to specify pairs of D registers
1936 - { } can be omitted from around a singleton register list
1937 FIXME: This is not implemented, as it would require backtracking in
1940 This could be done (the meaning isn't really ambiguous), but doesn't
1941 fit in well with the current parsing framework.
1942 - 32 D registers may be used (also true for VFPv3).
1943 FIXME: Types are ignored in these register lists, which is probably a
1947 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
,
1948 bfd_boolean
*partial_match
)
1953 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1957 unsigned long mask
= 0;
1959 bfd_boolean vpr_seen
= FALSE
;
1960 bfd_boolean expect_vpr
=
1961 (etype
== REGLIST_VFP_S_VPR
) || (etype
== REGLIST_VFP_D_VPR
);
1963 if (skip_past_char (&str
, '{') == FAIL
)
1965 inst
.error
= _("expecting {");
1972 case REGLIST_VFP_S_VPR
:
1973 regtype
= REG_TYPE_VFS
;
1978 case REGLIST_VFP_D_VPR
:
1979 regtype
= REG_TYPE_VFD
;
1982 case REGLIST_NEON_D
:
1983 regtype
= REG_TYPE_NDQ
;
1990 if (etype
!= REGLIST_VFP_S
&& etype
!= REGLIST_VFP_S_VPR
)
1992 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1993 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1997 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
2000 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
2007 base_reg
= max_regs
;
2008 *partial_match
= FALSE
;
2012 int setmask
= 1, addregs
= 1;
2013 const char vpr_str
[] = "vpr";
2014 int vpr_str_len
= strlen (vpr_str
);
2016 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
2020 if (new_base
== FAIL
2021 && !strncasecmp (str
, vpr_str
, vpr_str_len
)
2022 && !ISALPHA (*(str
+ vpr_str_len
))
2028 base_reg
= 0; /* Canonicalize VPR only on d0 with 0 regs. */
2032 first_error (_("VPR expected last"));
2035 else if (new_base
== FAIL
)
2037 if (regtype
== REG_TYPE_VFS
)
2038 first_error (_("VFP single precision register or VPR "
2040 else /* regtype == REG_TYPE_VFD. */
2041 first_error (_("VFP/Neon double precision register or VPR "
2046 else if (new_base
== FAIL
)
2048 first_error (_(reg_expected_msgs
[regtype
]));
2052 *partial_match
= TRUE
;
2056 if (new_base
>= max_regs
)
2058 first_error (_("register out of range in list"));
2062 /* Note: a value of 2 * n is returned for the register Q<n>. */
2063 if (regtype
== REG_TYPE_NQ
)
2069 if (new_base
< base_reg
)
2070 base_reg
= new_base
;
2072 if (mask
& (setmask
<< new_base
))
2074 first_error (_("invalid register list"));
2078 if ((mask
>> new_base
) != 0 && ! warned
&& !vpr_seen
)
2080 as_tsktsk (_("register list not in ascending order"));
2084 mask
|= setmask
<< new_base
;
2087 if (*str
== '-') /* We have the start of a range expression */
2093 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
2096 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
2100 if (high_range
>= max_regs
)
2102 first_error (_("register out of range in list"));
2106 if (regtype
== REG_TYPE_NQ
)
2107 high_range
= high_range
+ 1;
2109 if (high_range
<= new_base
)
2111 inst
.error
= _("register range not in ascending order");
2115 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
2117 if (mask
& (setmask
<< new_base
))
2119 inst
.error
= _("invalid register list");
2123 mask
|= setmask
<< new_base
;
2128 while (skip_past_comma (&str
) != FAIL
);
2132 /* Sanity check -- should have raised a parse error above. */
2133 if ((!vpr_seen
&& count
== 0) || count
> max_regs
)
2138 if (expect_vpr
&& !vpr_seen
)
2140 first_error (_("VPR expected last"));
2144 /* Final test -- the registers must be consecutive. */
2146 for (i
= 0; i
< count
; i
++)
2148 if ((mask
& (1u << i
)) == 0)
2150 inst
.error
= _("non-contiguous register range");
2160 /* True if two alias types are the same. */
2163 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
2171 if (a
->defined
!= b
->defined
)
2174 if ((a
->defined
& NTA_HASTYPE
) != 0
2175 && (a
->eltype
.type
!= b
->eltype
.type
2176 || a
->eltype
.size
!= b
->eltype
.size
))
2179 if ((a
->defined
& NTA_HASINDEX
) != 0
2180 && (a
->index
!= b
->index
))
2186 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2187 The base register is put in *PBASE.
2188 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2190 The register stride (minus one) is put in bit 4 of the return value.
2191 Bits [6:5] encode the list length (minus one).
2192 The type of the list elements is put in *ELTYPE, if non-NULL. */
2194 #define NEON_LANE(X) ((X) & 0xf)
2195 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2196 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2199 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
2200 struct neon_type_el
*eltype
)
2207 int leading_brace
= 0;
2208 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
2209 const char *const incr_error
= _("register stride must be 1 or 2");
2210 const char *const type_error
= _("mismatched element/structure types in list");
2211 struct neon_typed_alias firsttype
;
2212 firsttype
.defined
= 0;
2213 firsttype
.eltype
.type
= NT_invtype
;
2214 firsttype
.eltype
.size
= -1;
2215 firsttype
.index
= -1;
2217 if (skip_past_char (&ptr
, '{') == SUCCESS
)
2222 struct neon_typed_alias atype
;
2223 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2227 first_error (_(reg_expected_msgs
[rtype
]));
2234 if (rtype
== REG_TYPE_NQ
)
2240 else if (reg_incr
== -1)
2242 reg_incr
= getreg
- base_reg
;
2243 if (reg_incr
< 1 || reg_incr
> 2)
2245 first_error (_(incr_error
));
2249 else if (getreg
!= base_reg
+ reg_incr
* count
)
2251 first_error (_(incr_error
));
2255 if (! neon_alias_types_same (&atype
, &firsttype
))
2257 first_error (_(type_error
));
2261 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2265 struct neon_typed_alias htype
;
2266 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2268 lane
= NEON_INTERLEAVE_LANES
;
2269 else if (lane
!= NEON_INTERLEAVE_LANES
)
2271 first_error (_(type_error
));
2276 else if (reg_incr
!= 1)
2278 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2282 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2285 first_error (_(reg_expected_msgs
[rtype
]));
2288 if (! neon_alias_types_same (&htype
, &firsttype
))
2290 first_error (_(type_error
));
2293 count
+= hireg
+ dregs
- getreg
;
2297 /* If we're using Q registers, we can't use [] or [n] syntax. */
2298 if (rtype
== REG_TYPE_NQ
)
2304 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2308 else if (lane
!= atype
.index
)
2310 first_error (_(type_error
));
2314 else if (lane
== -1)
2315 lane
= NEON_INTERLEAVE_LANES
;
2316 else if (lane
!= NEON_INTERLEAVE_LANES
)
2318 first_error (_(type_error
));
2323 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2325 /* No lane set by [x]. We must be interleaving structures. */
2327 lane
= NEON_INTERLEAVE_LANES
;
2330 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2331 || (count
> 1 && reg_incr
== -1))
2333 first_error (_("error parsing element/structure list"));
2337 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2339 first_error (_("expected }"));
2347 *eltype
= firsttype
.eltype
;
2352 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2355 /* Parse an explicit relocation suffix on an expression. This is
2356 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2357 arm_reloc_hsh contains no entries, so this function can only
2358 succeed if there is no () after the word. Returns -1 on error,
2359 BFD_RELOC_UNUSED if there wasn't any suffix. */
2362 parse_reloc (char **str
)
2364 struct reloc_entry
*r
;
2368 return BFD_RELOC_UNUSED
;
2373 while (*q
&& *q
!= ')' && *q
!= ',')
2378 if ((r
= (struct reloc_entry
*)
2379 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2386 /* Directives: register aliases. */
2388 static struct reg_entry
*
2389 insert_reg_alias (char *str
, unsigned number
, int type
)
2391 struct reg_entry
*new_reg
;
2394 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2396 if (new_reg
->builtin
)
2397 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2399 /* Only warn about a redefinition if it's not defined as the
2401 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2402 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2407 name
= xstrdup (str
);
2408 new_reg
= XNEW (struct reg_entry
);
2410 new_reg
->name
= name
;
2411 new_reg
->number
= number
;
2412 new_reg
->type
= type
;
2413 new_reg
->builtin
= FALSE
;
2414 new_reg
->neon
= NULL
;
2416 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2423 insert_neon_reg_alias (char *str
, int number
, int type
,
2424 struct neon_typed_alias
*atype
)
2426 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2430 first_error (_("attempt to redefine typed alias"));
2436 reg
->neon
= XNEW (struct neon_typed_alias
);
2437 *reg
->neon
= *atype
;
2441 /* Look for the .req directive. This is of the form:
2443 new_register_name .req existing_register_name
2445 If we find one, or if it looks sufficiently like one that we want to
2446 handle any error here, return TRUE. Otherwise return FALSE. */
2449 create_register_alias (char * newname
, char *p
)
2451 struct reg_entry
*old
;
2452 char *oldname
, *nbuf
;
2455 /* The input scrubber ensures that whitespace after the mnemonic is
2456 collapsed to single spaces. */
2458 if (strncmp (oldname
, " .req ", 6) != 0)
2462 if (*oldname
== '\0')
2465 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2468 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2472 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2473 the desired alias name, and p points to its end. If not, then
2474 the desired alias name is in the global original_case_string. */
2475 #ifdef TC_CASE_SENSITIVE
2478 newname
= original_case_string
;
2479 nlen
= strlen (newname
);
2482 nbuf
= xmemdup0 (newname
, nlen
);
2484 /* Create aliases under the new name as stated; an all-lowercase
2485 version of the new name; and an all-uppercase version of the new
2487 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2489 for (p
= nbuf
; *p
; p
++)
2492 if (strncmp (nbuf
, newname
, nlen
))
2494 /* If this attempt to create an additional alias fails, do not bother
2495 trying to create the all-lower case alias. We will fail and issue
2496 a second, duplicate error message. This situation arises when the
2497 programmer does something like:
2500 The second .req creates the "Foo" alias but then fails to create
2501 the artificial FOO alias because it has already been created by the
2503 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2510 for (p
= nbuf
; *p
; p
++)
2513 if (strncmp (nbuf
, newname
, nlen
))
2514 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2521 /* Create a Neon typed/indexed register alias using directives, e.g.:
2526 These typed registers can be used instead of the types specified after the
2527 Neon mnemonic, so long as all operands given have types. Types can also be
2528 specified directly, e.g.:
2529 vadd d0.s32, d1.s32, d2.s32 */
2532 create_neon_reg_alias (char *newname
, char *p
)
2534 enum arm_reg_type basetype
;
2535 struct reg_entry
*basereg
;
2536 struct reg_entry mybasereg
;
2537 struct neon_type ntype
;
2538 struct neon_typed_alias typeinfo
;
2539 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2542 typeinfo
.defined
= 0;
2543 typeinfo
.eltype
.type
= NT_invtype
;
2544 typeinfo
.eltype
.size
= -1;
2545 typeinfo
.index
= -1;
2549 if (strncmp (p
, " .dn ", 5) == 0)
2550 basetype
= REG_TYPE_VFD
;
2551 else if (strncmp (p
, " .qn ", 5) == 0)
2552 basetype
= REG_TYPE_NQ
;
2561 basereg
= arm_reg_parse_multi (&p
);
2563 if (basereg
&& basereg
->type
!= basetype
)
2565 as_bad (_("bad type for register"));
2569 if (basereg
== NULL
)
2572 /* Try parsing as an integer. */
2573 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2574 if (exp
.X_op
!= O_constant
)
2576 as_bad (_("expression must be constant"));
2579 basereg
= &mybasereg
;
2580 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2586 typeinfo
= *basereg
->neon
;
2588 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2590 /* We got a type. */
2591 if (typeinfo
.defined
& NTA_HASTYPE
)
2593 as_bad (_("can't redefine the type of a register alias"));
2597 typeinfo
.defined
|= NTA_HASTYPE
;
2598 if (ntype
.elems
!= 1)
2600 as_bad (_("you must specify a single type only"));
2603 typeinfo
.eltype
= ntype
.el
[0];
2606 if (skip_past_char (&p
, '[') == SUCCESS
)
2609 /* We got a scalar index. */
2611 if (typeinfo
.defined
& NTA_HASINDEX
)
2613 as_bad (_("can't redefine the index of a scalar alias"));
2617 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2619 if (exp
.X_op
!= O_constant
)
2621 as_bad (_("scalar index must be constant"));
2625 typeinfo
.defined
|= NTA_HASINDEX
;
2626 typeinfo
.index
= exp
.X_add_number
;
2628 if (skip_past_char (&p
, ']') == FAIL
)
2630 as_bad (_("expecting ]"));
2635 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2636 the desired alias name, and p points to its end. If not, then
2637 the desired alias name is in the global original_case_string. */
2638 #ifdef TC_CASE_SENSITIVE
2639 namelen
= nameend
- newname
;
2641 newname
= original_case_string
;
2642 namelen
= strlen (newname
);
2645 namebuf
= xmemdup0 (newname
, namelen
);
2647 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2648 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2650 /* Insert name in all uppercase. */
2651 for (p
= namebuf
; *p
; p
++)
2654 if (strncmp (namebuf
, newname
, namelen
))
2655 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2656 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2658 /* Insert name in all lowercase. */
2659 for (p
= namebuf
; *p
; p
++)
2662 if (strncmp (namebuf
, newname
, namelen
))
2663 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2664 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2670 /* Should never be called, as .req goes between the alias and the
2671 register name, not at the beginning of the line. */
2674 s_req (int a ATTRIBUTE_UNUSED
)
2676 as_bad (_("invalid syntax for .req directive"));
2680 s_dn (int a ATTRIBUTE_UNUSED
)
2682 as_bad (_("invalid syntax for .dn directive"));
2686 s_qn (int a ATTRIBUTE_UNUSED
)
2688 as_bad (_("invalid syntax for .qn directive"));
2691 /* The .unreq directive deletes an alias which was previously defined
2692 by .req. For example:
2698 s_unreq (int a ATTRIBUTE_UNUSED
)
2703 name
= input_line_pointer
;
2705 while (*input_line_pointer
!= 0
2706 && *input_line_pointer
!= ' '
2707 && *input_line_pointer
!= '\n')
2708 ++input_line_pointer
;
2710 saved_char
= *input_line_pointer
;
2711 *input_line_pointer
= 0;
2714 as_bad (_("invalid syntax for .unreq directive"));
2717 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2721 as_bad (_("unknown register alias '%s'"), name
);
2722 else if (reg
->builtin
)
2723 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2730 hash_delete (arm_reg_hsh
, name
, FALSE
);
2731 free ((char *) reg
->name
);
2736 /* Also locate the all upper case and all lower case versions.
2737 Do not complain if we cannot find one or the other as it
2738 was probably deleted above. */
2740 nbuf
= strdup (name
);
2741 for (p
= nbuf
; *p
; p
++)
2743 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2746 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2747 free ((char *) reg
->name
);
2753 for (p
= nbuf
; *p
; p
++)
2755 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2758 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2759 free ((char *) reg
->name
);
2769 *input_line_pointer
= saved_char
;
2770 demand_empty_rest_of_line ();
2773 /* Directives: Instruction set selection. */
2776 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2777 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2778 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2779 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2781 /* Create a new mapping symbol for the transition to STATE. */
2784 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2787 const char * symname
;
2794 type
= BSF_NO_FLAGS
;
2798 type
= BSF_NO_FLAGS
;
2802 type
= BSF_NO_FLAGS
;
2808 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2809 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2814 THUMB_SET_FUNC (symbolP
, 0);
2815 ARM_SET_THUMB (symbolP
, 0);
2816 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2820 THUMB_SET_FUNC (symbolP
, 1);
2821 ARM_SET_THUMB (symbolP
, 1);
2822 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2830 /* Save the mapping symbols for future reference. Also check that
2831 we do not place two mapping symbols at the same offset within a
2832 frag. We'll handle overlap between frags in
2833 check_mapping_symbols.
2835 If .fill or other data filling directive generates zero sized data,
2836 the mapping symbol for the following code will have the same value
2837 as the one generated for the data filling directive. In this case,
2838 we replace the old symbol with the new one at the same address. */
2841 if (frag
->tc_frag_data
.first_map
!= NULL
)
2843 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2844 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2846 frag
->tc_frag_data
.first_map
= symbolP
;
2848 if (frag
->tc_frag_data
.last_map
!= NULL
)
2850 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2851 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2852 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2854 frag
->tc_frag_data
.last_map
= symbolP
;
2857 /* We must sometimes convert a region marked as code to data during
2858 code alignment, if an odd number of bytes have to be padded. The
2859 code mapping symbol is pushed to an aligned address. */
2862 insert_data_mapping_symbol (enum mstate state
,
2863 valueT value
, fragS
*frag
, offsetT bytes
)
2865 /* If there was already a mapping symbol, remove it. */
2866 if (frag
->tc_frag_data
.last_map
!= NULL
2867 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2869 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2873 know (frag
->tc_frag_data
.first_map
== symp
);
2874 frag
->tc_frag_data
.first_map
= NULL
;
2876 frag
->tc_frag_data
.last_map
= NULL
;
2877 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2880 make_mapping_symbol (MAP_DATA
, value
, frag
);
2881 make_mapping_symbol (state
, value
+ bytes
, frag
);
2884 static void mapping_state_2 (enum mstate state
, int max_chars
);
2886 /* Set the mapping state to STATE. Only call this when about to
2887 emit some STATE bytes to the file. */
2889 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2891 mapping_state (enum mstate state
)
2893 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2895 if (mapstate
== state
)
2896 /* The mapping symbol has already been emitted.
2897 There is nothing else to do. */
2900 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2902 All ARM instructions require 4-byte alignment.
2903 (Almost) all Thumb instructions require 2-byte alignment.
2905 When emitting instructions into any section, mark the section
2908 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2909 but themselves require 2-byte alignment; this applies to some
2910 PC- relative forms. However, these cases will involve implicit
2911 literal pool generation or an explicit .align >=2, both of
2912 which will cause the section to me marked with sufficient
2913 alignment. Thus, we don't handle those cases here. */
2914 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2916 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2917 /* This case will be evaluated later. */
2920 mapping_state_2 (state
, 0);
2923 /* Same as mapping_state, but MAX_CHARS bytes have already been
2924 allocated. Put the mapping symbol that far back. */
2927 mapping_state_2 (enum mstate state
, int max_chars
)
2929 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2931 if (!SEG_NORMAL (now_seg
))
2934 if (mapstate
== state
)
2935 /* The mapping symbol has already been emitted.
2936 There is nothing else to do. */
2939 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2940 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2942 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2943 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2946 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2949 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2950 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2954 #define mapping_state(x) ((void)0)
2955 #define mapping_state_2(x, y) ((void)0)
2958 /* Find the real, Thumb encoded start of a Thumb function. */
2962 find_real_start (symbolS
* symbolP
)
2965 const char * name
= S_GET_NAME (symbolP
);
2966 symbolS
* new_target
;
2968 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2969 #define STUB_NAME ".real_start_of"
2974 /* The compiler may generate BL instructions to local labels because
2975 it needs to perform a branch to a far away location. These labels
2976 do not have a corresponding ".real_start_of" label. We check
2977 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2978 the ".real_start_of" convention for nonlocal branches. */
2979 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2982 real_start
= concat (STUB_NAME
, name
, NULL
);
2983 new_target
= symbol_find (real_start
);
2986 if (new_target
== NULL
)
2988 as_warn (_("Failed to find real start of function: %s\n"), name
);
2989 new_target
= symbolP
;
2997 opcode_select (int width
)
3004 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
3005 as_bad (_("selected processor does not support THUMB opcodes"));
3008 /* No need to force the alignment, since we will have been
3009 coming from ARM mode, which is word-aligned. */
3010 record_alignment (now_seg
, 1);
3017 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
3018 as_bad (_("selected processor does not support ARM opcodes"));
3023 frag_align (2, 0, 0);
3025 record_alignment (now_seg
, 1);
3030 as_bad (_("invalid instruction size selected (%d)"), width
);
3035 s_arm (int ignore ATTRIBUTE_UNUSED
)
3038 demand_empty_rest_of_line ();
3042 s_thumb (int ignore ATTRIBUTE_UNUSED
)
3045 demand_empty_rest_of_line ();
3049 s_code (int unused ATTRIBUTE_UNUSED
)
3053 temp
= get_absolute_expression ();
3058 opcode_select (temp
);
3062 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
3067 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
3069 /* If we are not already in thumb mode go into it, EVEN if
3070 the target processor does not support thumb instructions.
3071 This is used by gcc/config/arm/lib1funcs.asm for example
3072 to compile interworking support functions even if the
3073 target processor should not support interworking. */
3077 record_alignment (now_seg
, 1);
3080 demand_empty_rest_of_line ();
3084 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
3088 /* The following label is the name/address of the start of a Thumb function.
3089 We need to know this for the interworking support. */
3090 label_is_thumb_function_name
= TRUE
;
3093 /* Perform a .set directive, but also mark the alias as
3094 being a thumb function. */
3097 s_thumb_set (int equiv
)
3099 /* XXX the following is a duplicate of the code for s_set() in read.c
3100 We cannot just call that code as we need to get at the symbol that
3107 /* Especial apologies for the random logic:
3108 This just grew, and could be parsed much more simply!
3110 delim
= get_symbol_name (& name
);
3111 end_name
= input_line_pointer
;
3112 (void) restore_line_pointer (delim
);
3114 if (*input_line_pointer
!= ',')
3117 as_bad (_("expected comma after name \"%s\""), name
);
3119 ignore_rest_of_line ();
3123 input_line_pointer
++;
3126 if (name
[0] == '.' && name
[1] == '\0')
3128 /* XXX - this should not happen to .thumb_set. */
3132 if ((symbolP
= symbol_find (name
)) == NULL
3133 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
3136 /* When doing symbol listings, play games with dummy fragments living
3137 outside the normal fragment chain to record the file and line info
3139 if (listing
& LISTING_SYMBOLS
)
3141 extern struct list_info_struct
* listing_tail
;
3142 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
3144 memset (dummy_frag
, 0, sizeof (fragS
));
3145 dummy_frag
->fr_type
= rs_fill
;
3146 dummy_frag
->line
= listing_tail
;
3147 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
3148 dummy_frag
->fr_symbol
= symbolP
;
3152 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
3155 /* "set" symbols are local unless otherwise specified. */
3156 SF_SET_LOCAL (symbolP
);
3157 #endif /* OBJ_COFF */
3158 } /* Make a new symbol. */
3160 symbol_table_insert (symbolP
);
3165 && S_IS_DEFINED (symbolP
)
3166 && S_GET_SEGMENT (symbolP
) != reg_section
)
3167 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
3169 pseudo_set (symbolP
);
3171 demand_empty_rest_of_line ();
3173 /* XXX Now we come to the Thumb specific bit of code. */
3175 THUMB_SET_FUNC (symbolP
, 1);
3176 ARM_SET_THUMB (symbolP
, 1);
3177 #if defined OBJ_ELF || defined OBJ_COFF
3178 ARM_SET_INTERWORK (symbolP
, support_interwork
);
3182 /* Directives: Mode selection. */
3184 /* .syntax [unified|divided] - choose the new unified syntax
3185 (same for Arm and Thumb encoding, modulo slight differences in what
3186 can be represented) or the old divergent syntax for each mode. */
3188 s_syntax (int unused ATTRIBUTE_UNUSED
)
3192 delim
= get_symbol_name (& name
);
3194 if (!strcasecmp (name
, "unified"))
3195 unified_syntax
= TRUE
;
3196 else if (!strcasecmp (name
, "divided"))
3197 unified_syntax
= FALSE
;
3200 as_bad (_("unrecognized syntax mode \"%s\""), name
);
3203 (void) restore_line_pointer (delim
);
3204 demand_empty_rest_of_line ();
3207 /* Directives: sectioning and alignment. */
3210 s_bss (int ignore ATTRIBUTE_UNUSED
)
3212 /* We don't support putting frags in the BSS segment, we fake it by
3213 marking in_bss, then looking at s_skip for clues. */
3214 subseg_set (bss_section
, 0);
3215 demand_empty_rest_of_line ();
3217 #ifdef md_elf_section_change_hook
3218 md_elf_section_change_hook ();
3223 s_even (int ignore ATTRIBUTE_UNUSED
)
3225 /* Never make frag if expect extra pass. */
3227 frag_align (1, 0, 0);
3229 record_alignment (now_seg
, 1);
3231 demand_empty_rest_of_line ();
3234 /* Directives: CodeComposer Studio. */
3236 /* .ref (for CodeComposer Studio syntax only). */
3238 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3240 if (codecomposer_syntax
)
3241 ignore_rest_of_line ();
3243 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3246 /* If name is not NULL, then it is used for marking the beginning of a
3247 function, whereas if it is NULL then it means the function end. */
3249 asmfunc_debug (const char * name
)
3251 static const char * last_name
= NULL
;
3255 gas_assert (last_name
== NULL
);
3258 if (debug_type
== DEBUG_STABS
)
3259 stabs_generate_asm_func (name
, name
);
3263 gas_assert (last_name
!= NULL
);
3265 if (debug_type
== DEBUG_STABS
)
3266 stabs_generate_asm_endfunc (last_name
, last_name
);
3273 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3275 if (codecomposer_syntax
)
3277 switch (asmfunc_state
)
3279 case OUTSIDE_ASMFUNC
:
3280 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3283 case WAITING_ASMFUNC_NAME
:
3284 as_bad (_(".asmfunc repeated."));
3287 case WAITING_ENDASMFUNC
:
3288 as_bad (_(".asmfunc without function."));
3291 demand_empty_rest_of_line ();
3294 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3298 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3300 if (codecomposer_syntax
)
3302 switch (asmfunc_state
)
3304 case OUTSIDE_ASMFUNC
:
3305 as_bad (_(".endasmfunc without a .asmfunc."));
3308 case WAITING_ASMFUNC_NAME
:
3309 as_bad (_(".endasmfunc without function."));
3312 case WAITING_ENDASMFUNC
:
3313 asmfunc_state
= OUTSIDE_ASMFUNC
;
3314 asmfunc_debug (NULL
);
3317 demand_empty_rest_of_line ();
3320 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3324 s_ccs_def (int name
)
3326 if (codecomposer_syntax
)
3329 as_bad (_(".def pseudo-op only available with -mccs flag."));
3332 /* Directives: Literal pools. */
3334 static literal_pool
*
3335 find_literal_pool (void)
3337 literal_pool
* pool
;
3339 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3341 if (pool
->section
== now_seg
3342 && pool
->sub_section
== now_subseg
)
3349 static literal_pool
*
3350 find_or_make_literal_pool (void)
3352 /* Next literal pool ID number. */
3353 static unsigned int latest_pool_num
= 1;
3354 literal_pool
* pool
;
3356 pool
= find_literal_pool ();
3360 /* Create a new pool. */
3361 pool
= XNEW (literal_pool
);
3365 pool
->next_free_entry
= 0;
3366 pool
->section
= now_seg
;
3367 pool
->sub_section
= now_subseg
;
3368 pool
->next
= list_of_pools
;
3369 pool
->symbol
= NULL
;
3370 pool
->alignment
= 2;
3372 /* Add it to the list. */
3373 list_of_pools
= pool
;
3376 /* New pools, and emptied pools, will have a NULL symbol. */
3377 if (pool
->symbol
== NULL
)
3379 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3380 (valueT
) 0, &zero_address_frag
);
3381 pool
->id
= latest_pool_num
++;
3388 /* Add the literal in the global 'inst'
3389 structure to the relevant literal pool. */
3392 add_to_lit_pool (unsigned int nbytes
)
3394 #define PADDING_SLOT 0x1
3395 #define LIT_ENTRY_SIZE_MASK 0xFF
3396 literal_pool
* pool
;
3397 unsigned int entry
, pool_size
= 0;
3398 bfd_boolean padding_slot_p
= FALSE
;
3404 imm1
= inst
.operands
[1].imm
;
3405 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3406 : inst
.relocs
[0].exp
.X_unsigned
? 0
3407 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3408 if (target_big_endian
)
3411 imm2
= inst
.operands
[1].imm
;
3415 pool
= find_or_make_literal_pool ();
3417 /* Check if this literal value is already in the pool. */
3418 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3422 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3423 && (inst
.relocs
[0].exp
.X_op
== O_constant
)
3424 && (pool
->literals
[entry
].X_add_number
3425 == inst
.relocs
[0].exp
.X_add_number
)
3426 && (pool
->literals
[entry
].X_md
== nbytes
)
3427 && (pool
->literals
[entry
].X_unsigned
3428 == inst
.relocs
[0].exp
.X_unsigned
))
3431 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3432 && (inst
.relocs
[0].exp
.X_op
== O_symbol
)
3433 && (pool
->literals
[entry
].X_add_number
3434 == inst
.relocs
[0].exp
.X_add_number
)
3435 && (pool
->literals
[entry
].X_add_symbol
3436 == inst
.relocs
[0].exp
.X_add_symbol
)
3437 && (pool
->literals
[entry
].X_op_symbol
3438 == inst
.relocs
[0].exp
.X_op_symbol
)
3439 && (pool
->literals
[entry
].X_md
== nbytes
))
3442 else if ((nbytes
== 8)
3443 && !(pool_size
& 0x7)
3444 && ((entry
+ 1) != pool
->next_free_entry
)
3445 && (pool
->literals
[entry
].X_op
== O_constant
)
3446 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3447 && (pool
->literals
[entry
].X_unsigned
3448 == inst
.relocs
[0].exp
.X_unsigned
)
3449 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3450 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3451 && (pool
->literals
[entry
+ 1].X_unsigned
3452 == inst
.relocs
[0].exp
.X_unsigned
))
3455 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3456 if (padding_slot_p
&& (nbytes
== 4))
3462 /* Do we need to create a new entry? */
3463 if (entry
== pool
->next_free_entry
)
3465 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3467 inst
.error
= _("literal pool overflow");
3473 /* For 8-byte entries, we align to an 8-byte boundary,
3474 and split it into two 4-byte entries, because on 32-bit
3475 host, 8-byte constants are treated as big num, thus
3476 saved in "generic_bignum" which will be overwritten
3477 by later assignments.
3479 We also need to make sure there is enough space for
3482 We also check to make sure the literal operand is a
3484 if (!(inst
.relocs
[0].exp
.X_op
== O_constant
3485 || inst
.relocs
[0].exp
.X_op
== O_big
))
3487 inst
.error
= _("invalid type for literal pool");
3490 else if (pool_size
& 0x7)
3492 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3494 inst
.error
= _("literal pool overflow");
3498 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3499 pool
->literals
[entry
].X_op
= O_constant
;
3500 pool
->literals
[entry
].X_add_number
= 0;
3501 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3502 pool
->next_free_entry
+= 1;
3505 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3507 inst
.error
= _("literal pool overflow");
3511 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3512 pool
->literals
[entry
].X_op
= O_constant
;
3513 pool
->literals
[entry
].X_add_number
= imm1
;
3514 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3515 pool
->literals
[entry
++].X_md
= 4;
3516 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3517 pool
->literals
[entry
].X_op
= O_constant
;
3518 pool
->literals
[entry
].X_add_number
= imm2
;
3519 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3520 pool
->literals
[entry
].X_md
= 4;
3521 pool
->alignment
= 3;
3522 pool
->next_free_entry
+= 1;
3526 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3527 pool
->literals
[entry
].X_md
= 4;
3531 /* PR ld/12974: Record the location of the first source line to reference
3532 this entry in the literal pool. If it turns out during linking that the
3533 symbol does not exist we will be able to give an accurate line number for
3534 the (first use of the) missing reference. */
3535 if (debug_type
== DEBUG_DWARF2
)
3536 dwarf2_where (pool
->locs
+ entry
);
3538 pool
->next_free_entry
+= 1;
3540 else if (padding_slot_p
)
3542 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3543 pool
->literals
[entry
].X_md
= nbytes
;
3546 inst
.relocs
[0].exp
.X_op
= O_symbol
;
3547 inst
.relocs
[0].exp
.X_add_number
= pool_size
;
3548 inst
.relocs
[0].exp
.X_add_symbol
= pool
->symbol
;
3554 tc_start_label_without_colon (void)
3556 bfd_boolean ret
= TRUE
;
3558 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3560 const char *label
= input_line_pointer
;
3562 while (!is_end_of_line
[(int) label
[-1]])
3567 as_bad (_("Invalid label '%s'"), label
);
3571 asmfunc_debug (label
);
3573 asmfunc_state
= WAITING_ENDASMFUNC
;
3579 /* Can't use symbol_new here, so have to create a symbol and then at
3580 a later date assign it a value. That's what these functions do. */
3583 symbol_locate (symbolS
* symbolP
,
3584 const char * name
, /* It is copied, the caller can modify. */
3585 segT segment
, /* Segment identifier (SEG_<something>). */
3586 valueT valu
, /* Symbol value. */
3587 fragS
* frag
) /* Associated fragment. */
3590 char * preserved_copy_of_name
;
3592 name_length
= strlen (name
) + 1; /* +1 for \0. */
3593 obstack_grow (¬es
, name
, name_length
);
3594 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3596 #ifdef tc_canonicalize_symbol_name
3597 preserved_copy_of_name
=
3598 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3601 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3603 S_SET_SEGMENT (symbolP
, segment
);
3604 S_SET_VALUE (symbolP
, valu
);
3605 symbol_clear_list_pointers (symbolP
);
3607 symbol_set_frag (symbolP
, frag
);
3609 /* Link to end of symbol chain. */
3611 extern int symbol_table_frozen
;
3613 if (symbol_table_frozen
)
3617 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3619 obj_symbol_new_hook (symbolP
);
3621 #ifdef tc_symbol_new_hook
3622 tc_symbol_new_hook (symbolP
);
3626 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3627 #endif /* DEBUG_SYMS */
3631 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3634 literal_pool
* pool
;
3637 pool
= find_literal_pool ();
3639 || pool
->symbol
== NULL
3640 || pool
->next_free_entry
== 0)
3643 /* Align pool as you have word accesses.
3644 Only make a frag if we have to. */
3646 frag_align (pool
->alignment
, 0, 0);
3648 record_alignment (now_seg
, 2);
3651 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3652 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3654 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3656 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3657 (valueT
) frag_now_fix (), frag_now
);
3658 symbol_table_insert (pool
->symbol
);
3660 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3662 #if defined OBJ_COFF || defined OBJ_ELF
3663 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3666 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3669 if (debug_type
== DEBUG_DWARF2
)
3670 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3672 /* First output the expression in the instruction to the pool. */
3673 emit_expr (&(pool
->literals
[entry
]),
3674 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3677 /* Mark the pool as empty. */
3678 pool
->next_free_entry
= 0;
3679 pool
->symbol
= NULL
;
3683 /* Forward declarations for functions below, in the MD interface
3685 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3686 static valueT
create_unwind_entry (int);
3687 static void start_unwind_section (const segT
, int);
3688 static void add_unwind_opcode (valueT
, int);
3689 static void flush_pending_unwind (void);
3691 /* Directives: Data. */
3694 s_arm_elf_cons (int nbytes
)
3698 #ifdef md_flush_pending_output
3699 md_flush_pending_output ();
3702 if (is_it_end_of_statement ())
3704 demand_empty_rest_of_line ();
3708 #ifdef md_cons_align
3709 md_cons_align (nbytes
);
3712 mapping_state (MAP_DATA
);
3716 char *base
= input_line_pointer
;
3720 if (exp
.X_op
!= O_symbol
)
3721 emit_expr (&exp
, (unsigned int) nbytes
);
3724 char *before_reloc
= input_line_pointer
;
3725 reloc
= parse_reloc (&input_line_pointer
);
3728 as_bad (_("unrecognized relocation suffix"));
3729 ignore_rest_of_line ();
3732 else if (reloc
== BFD_RELOC_UNUSED
)
3733 emit_expr (&exp
, (unsigned int) nbytes
);
3736 reloc_howto_type
*howto
= (reloc_howto_type
*)
3737 bfd_reloc_type_lookup (stdoutput
,
3738 (bfd_reloc_code_real_type
) reloc
);
3739 int size
= bfd_get_reloc_size (howto
);
3741 if (reloc
== BFD_RELOC_ARM_PLT32
)
3743 as_bad (_("(plt) is only valid on branch targets"));
3744 reloc
= BFD_RELOC_UNUSED
;
3749 as_bad (ngettext ("%s relocations do not fit in %d byte",
3750 "%s relocations do not fit in %d bytes",
3752 howto
->name
, nbytes
);
3755 /* We've parsed an expression stopping at O_symbol.
3756 But there may be more expression left now that we
3757 have parsed the relocation marker. Parse it again.
3758 XXX Surely there is a cleaner way to do this. */
3759 char *p
= input_line_pointer
;
3761 char *save_buf
= XNEWVEC (char, input_line_pointer
- base
);
3763 memcpy (save_buf
, base
, input_line_pointer
- base
);
3764 memmove (base
+ (input_line_pointer
- before_reloc
),
3765 base
, before_reloc
- base
);
3767 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3769 memcpy (base
, save_buf
, p
- base
);
3771 offset
= nbytes
- size
;
3772 p
= frag_more (nbytes
);
3773 memset (p
, 0, nbytes
);
3774 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3775 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3781 while (*input_line_pointer
++ == ',');
3783 /* Put terminator back into stream. */
3784 input_line_pointer
--;
3785 demand_empty_rest_of_line ();
3788 /* Emit an expression containing a 32-bit thumb instruction.
3789 Implementation based on put_thumb32_insn. */
3792 emit_thumb32_expr (expressionS
* exp
)
3794 expressionS exp_high
= *exp
;
3796 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3797 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3798 exp
->X_add_number
&= 0xffff;
3799 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3802 /* Guess the instruction size based on the opcode. */
3805 thumb_insn_size (int opcode
)
3807 if ((unsigned int) opcode
< 0xe800u
)
3809 else if ((unsigned int) opcode
>= 0xe8000000u
)
3816 emit_insn (expressionS
*exp
, int nbytes
)
3820 if (exp
->X_op
== O_constant
)
3825 size
= thumb_insn_size (exp
->X_add_number
);
3829 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3831 as_bad (_(".inst.n operand too big. "\
3832 "Use .inst.w instead"));
3837 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
)
3838 set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN
, 0);
3840 set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3842 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3843 emit_thumb32_expr (exp
);
3845 emit_expr (exp
, (unsigned int) size
);
3847 it_fsm_post_encode ();
3851 as_bad (_("cannot determine Thumb instruction size. " \
3852 "Use .inst.n/.inst.w instead"));
3855 as_bad (_("constant expression required"));
3860 /* Like s_arm_elf_cons but do not use md_cons_align and
3861 set the mapping state to MAP_ARM/MAP_THUMB. */
3864 s_arm_elf_inst (int nbytes
)
3866 if (is_it_end_of_statement ())
3868 demand_empty_rest_of_line ();
3872 /* Calling mapping_state () here will not change ARM/THUMB,
3873 but will ensure not to be in DATA state. */
3876 mapping_state (MAP_THUMB
);
3881 as_bad (_("width suffixes are invalid in ARM mode"));
3882 ignore_rest_of_line ();
3888 mapping_state (MAP_ARM
);
3897 if (! emit_insn (& exp
, nbytes
))
3899 ignore_rest_of_line ();
3903 while (*input_line_pointer
++ == ',');
3905 /* Put terminator back into stream. */
3906 input_line_pointer
--;
3907 demand_empty_rest_of_line ();
3910 /* Parse a .rel31 directive. */
3913 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3920 if (*input_line_pointer
== '1')
3921 highbit
= 0x80000000;
3922 else if (*input_line_pointer
!= '0')
3923 as_bad (_("expected 0 or 1"));
3925 input_line_pointer
++;
3926 if (*input_line_pointer
!= ',')
3927 as_bad (_("missing comma"));
3928 input_line_pointer
++;
3930 #ifdef md_flush_pending_output
3931 md_flush_pending_output ();
3934 #ifdef md_cons_align
3938 mapping_state (MAP_DATA
);
3943 md_number_to_chars (p
, highbit
, 4);
3944 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3945 BFD_RELOC_ARM_PREL31
);
3947 demand_empty_rest_of_line ();
3950 /* Directives: AEABI stack-unwind tables. */
3952 /* Parse an unwind_fnstart directive. Simply records the current location. */
3955 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3957 demand_empty_rest_of_line ();
3958 if (unwind
.proc_start
)
3960 as_bad (_("duplicate .fnstart directive"));
3964 /* Mark the start of the function. */
3965 unwind
.proc_start
= expr_build_dot ();
3967 /* Reset the rest of the unwind info. */
3968 unwind
.opcode_count
= 0;
3969 unwind
.table_entry
= NULL
;
3970 unwind
.personality_routine
= NULL
;
3971 unwind
.personality_index
= -1;
3972 unwind
.frame_size
= 0;
3973 unwind
.fp_offset
= 0;
3974 unwind
.fp_reg
= REG_SP
;
3976 unwind
.sp_restored
= 0;
3980 /* Parse a handlerdata directive. Creates the exception handling table entry
3981 for the function. */
3984 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3986 demand_empty_rest_of_line ();
3987 if (!unwind
.proc_start
)
3988 as_bad (MISSING_FNSTART
);
3990 if (unwind
.table_entry
)
3991 as_bad (_("duplicate .handlerdata directive"));
3993 create_unwind_entry (1);
3996 /* Parse an unwind_fnend directive. Generates the index table entry. */
3999 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
4004 unsigned int marked_pr_dependency
;
4006 demand_empty_rest_of_line ();
4008 if (!unwind
.proc_start
)
4010 as_bad (_(".fnend directive without .fnstart"));
4014 /* Add eh table entry. */
4015 if (unwind
.table_entry
== NULL
)
4016 val
= create_unwind_entry (0);
4020 /* Add index table entry. This is two words. */
4021 start_unwind_section (unwind
.saved_seg
, 1);
4022 frag_align (2, 0, 0);
4023 record_alignment (now_seg
, 2);
4025 ptr
= frag_more (8);
4027 where
= frag_now_fix () - 8;
4029 /* Self relative offset of the function start. */
4030 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
4031 BFD_RELOC_ARM_PREL31
);
4033 /* Indicate dependency on EHABI-defined personality routines to the
4034 linker, if it hasn't been done already. */
4035 marked_pr_dependency
4036 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
4037 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
4038 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
4040 static const char *const name
[] =
4042 "__aeabi_unwind_cpp_pr0",
4043 "__aeabi_unwind_cpp_pr1",
4044 "__aeabi_unwind_cpp_pr2"
4046 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
4047 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
4048 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
4049 |= 1 << unwind
.personality_index
;
4053 /* Inline exception table entry. */
4054 md_number_to_chars (ptr
+ 4, val
, 4);
4056 /* Self relative offset of the table entry. */
4057 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
4058 BFD_RELOC_ARM_PREL31
);
4060 /* Restore the original section. */
4061 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
4063 unwind
.proc_start
= NULL
;
4067 /* Parse an unwind_cantunwind directive. */
4070 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
4072 demand_empty_rest_of_line ();
4073 if (!unwind
.proc_start
)
4074 as_bad (MISSING_FNSTART
);
4076 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4077 as_bad (_("personality routine specified for cantunwind frame"));
4079 unwind
.personality_index
= -2;
4083 /* Parse a personalityindex directive. */
4086 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
4090 if (!unwind
.proc_start
)
4091 as_bad (MISSING_FNSTART
);
4093 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4094 as_bad (_("duplicate .personalityindex directive"));
4098 if (exp
.X_op
!= O_constant
4099 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
4101 as_bad (_("bad personality routine number"));
4102 ignore_rest_of_line ();
4106 unwind
.personality_index
= exp
.X_add_number
;
4108 demand_empty_rest_of_line ();
4112 /* Parse a personality directive. */
4115 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
4119 if (!unwind
.proc_start
)
4120 as_bad (MISSING_FNSTART
);
4122 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4123 as_bad (_("duplicate .personality directive"));
4125 c
= get_symbol_name (& name
);
4126 p
= input_line_pointer
;
4128 ++ input_line_pointer
;
4129 unwind
.personality_routine
= symbol_find_or_make (name
);
4131 demand_empty_rest_of_line ();
4135 /* Parse a directive saving core registers. */
4138 s_arm_unwind_save_core (void)
4144 range
= parse_reg_list (&input_line_pointer
, REGLIST_RN
);
4147 as_bad (_("expected register list"));
4148 ignore_rest_of_line ();
4152 demand_empty_rest_of_line ();
4154 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4155 into .unwind_save {..., sp...}. We aren't bothered about the value of
4156 ip because it is clobbered by calls. */
4157 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
4158 && (range
& 0x3000) == 0x1000)
4160 unwind
.opcode_count
--;
4161 unwind
.sp_restored
= 0;
4162 range
= (range
| 0x2000) & ~0x1000;
4163 unwind
.pending_offset
= 0;
4169 /* See if we can use the short opcodes. These pop a block of up to 8
4170 registers starting with r4, plus maybe r14. */
4171 for (n
= 0; n
< 8; n
++)
4173 /* Break at the first non-saved register. */
4174 if ((range
& (1 << (n
+ 4))) == 0)
4177 /* See if there are any other bits set. */
4178 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
4180 /* Use the long form. */
4181 op
= 0x8000 | ((range
>> 4) & 0xfff);
4182 add_unwind_opcode (op
, 2);
4186 /* Use the short form. */
4188 op
= 0xa8; /* Pop r14. */
4190 op
= 0xa0; /* Do not pop r14. */
4192 add_unwind_opcode (op
, 1);
4199 op
= 0xb100 | (range
& 0xf);
4200 add_unwind_opcode (op
, 2);
4203 /* Record the number of bytes pushed. */
4204 for (n
= 0; n
< 16; n
++)
4206 if (range
& (1 << n
))
4207 unwind
.frame_size
+= 4;
4212 /* Parse a directive saving FPA registers. */
4215 s_arm_unwind_save_fpa (int reg
)
4221 /* Get Number of registers to transfer. */
4222 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4225 exp
.X_op
= O_illegal
;
4227 if (exp
.X_op
!= O_constant
)
4229 as_bad (_("expected , <constant>"));
4230 ignore_rest_of_line ();
4234 num_regs
= exp
.X_add_number
;
4236 if (num_regs
< 1 || num_regs
> 4)
4238 as_bad (_("number of registers must be in the range [1:4]"));
4239 ignore_rest_of_line ();
4243 demand_empty_rest_of_line ();
4248 op
= 0xb4 | (num_regs
- 1);
4249 add_unwind_opcode (op
, 1);
4254 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4255 add_unwind_opcode (op
, 2);
4257 unwind
.frame_size
+= num_regs
* 12;
4261 /* Parse a directive saving VFP registers for ARMv6 and above. */
4264 s_arm_unwind_save_vfp_armv6 (void)
4269 int num_vfpv3_regs
= 0;
4270 int num_regs_below_16
;
4271 bfd_boolean partial_match
;
4273 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
,
4277 as_bad (_("expected register list"));
4278 ignore_rest_of_line ();
4282 demand_empty_rest_of_line ();
4284 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4285 than FSTMX/FLDMX-style ones). */
4287 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4289 num_vfpv3_regs
= count
;
4290 else if (start
+ count
> 16)
4291 num_vfpv3_regs
= start
+ count
- 16;
4293 if (num_vfpv3_regs
> 0)
4295 int start_offset
= start
> 16 ? start
- 16 : 0;
4296 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4297 add_unwind_opcode (op
, 2);
4300 /* Generate opcode for registers numbered in the range 0 .. 15. */
4301 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4302 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4303 if (num_regs_below_16
> 0)
4305 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4306 add_unwind_opcode (op
, 2);
4309 unwind
.frame_size
+= count
* 8;
4313 /* Parse a directive saving VFP registers for pre-ARMv6. */
4316 s_arm_unwind_save_vfp (void)
4321 bfd_boolean partial_match
;
4323 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
,
4327 as_bad (_("expected register list"));
4328 ignore_rest_of_line ();
4332 demand_empty_rest_of_line ();
4337 op
= 0xb8 | (count
- 1);
4338 add_unwind_opcode (op
, 1);
4343 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4344 add_unwind_opcode (op
, 2);
4346 unwind
.frame_size
+= count
* 8 + 4;
4350 /* Parse a directive saving iWMMXt data registers. */
4353 s_arm_unwind_save_mmxwr (void)
4361 if (*input_line_pointer
== '{')
4362 input_line_pointer
++;
4366 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4370 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4375 as_tsktsk (_("register list not in ascending order"));
4378 if (*input_line_pointer
== '-')
4380 input_line_pointer
++;
4381 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4384 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4387 else if (reg
>= hi_reg
)
4389 as_bad (_("bad register range"));
4392 for (; reg
< hi_reg
; reg
++)
4396 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4398 skip_past_char (&input_line_pointer
, '}');
4400 demand_empty_rest_of_line ();
4402 /* Generate any deferred opcodes because we're going to be looking at
4404 flush_pending_unwind ();
4406 for (i
= 0; i
< 16; i
++)
4408 if (mask
& (1 << i
))
4409 unwind
.frame_size
+= 8;
4412 /* Attempt to combine with a previous opcode. We do this because gcc
4413 likes to output separate unwind directives for a single block of
4415 if (unwind
.opcode_count
> 0)
4417 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4418 if ((i
& 0xf8) == 0xc0)
4421 /* Only merge if the blocks are contiguous. */
4424 if ((mask
& 0xfe00) == (1 << 9))
4426 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4427 unwind
.opcode_count
--;
4430 else if (i
== 6 && unwind
.opcode_count
>= 2)
4432 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4436 op
= 0xffff << (reg
- 1);
4438 && ((mask
& op
) == (1u << (reg
- 1))))
4440 op
= (1 << (reg
+ i
+ 1)) - 1;
4441 op
&= ~((1 << reg
) - 1);
4443 unwind
.opcode_count
-= 2;
4450 /* We want to generate opcodes in the order the registers have been
4451 saved, ie. descending order. */
4452 for (reg
= 15; reg
>= -1; reg
--)
4454 /* Save registers in blocks. */
4456 || !(mask
& (1 << reg
)))
4458 /* We found an unsaved reg. Generate opcodes to save the
4465 op
= 0xc0 | (hi_reg
- 10);
4466 add_unwind_opcode (op
, 1);
4471 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4472 add_unwind_opcode (op
, 2);
4481 ignore_rest_of_line ();
4485 s_arm_unwind_save_mmxwcg (void)
4492 if (*input_line_pointer
== '{')
4493 input_line_pointer
++;
4495 skip_whitespace (input_line_pointer
);
4499 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4503 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4509 as_tsktsk (_("register list not in ascending order"));
4512 if (*input_line_pointer
== '-')
4514 input_line_pointer
++;
4515 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4518 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4521 else if (reg
>= hi_reg
)
4523 as_bad (_("bad register range"));
4526 for (; reg
< hi_reg
; reg
++)
4530 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4532 skip_past_char (&input_line_pointer
, '}');
4534 demand_empty_rest_of_line ();
4536 /* Generate any deferred opcodes because we're going to be looking at
4538 flush_pending_unwind ();
4540 for (reg
= 0; reg
< 16; reg
++)
4542 if (mask
& (1 << reg
))
4543 unwind
.frame_size
+= 4;
4546 add_unwind_opcode (op
, 2);
4549 ignore_rest_of_line ();
4553 /* Parse an unwind_save directive.
4554 If the argument is non-zero, this is a .vsave directive. */
4557 s_arm_unwind_save (int arch_v6
)
4560 struct reg_entry
*reg
;
4561 bfd_boolean had_brace
= FALSE
;
4563 if (!unwind
.proc_start
)
4564 as_bad (MISSING_FNSTART
);
4566 /* Figure out what sort of save we have. */
4567 peek
= input_line_pointer
;
4575 reg
= arm_reg_parse_multi (&peek
);
4579 as_bad (_("register expected"));
4580 ignore_rest_of_line ();
4589 as_bad (_("FPA .unwind_save does not take a register list"));
4590 ignore_rest_of_line ();
4593 input_line_pointer
= peek
;
4594 s_arm_unwind_save_fpa (reg
->number
);
4598 s_arm_unwind_save_core ();
4603 s_arm_unwind_save_vfp_armv6 ();
4605 s_arm_unwind_save_vfp ();
4608 case REG_TYPE_MMXWR
:
4609 s_arm_unwind_save_mmxwr ();
4612 case REG_TYPE_MMXWCG
:
4613 s_arm_unwind_save_mmxwcg ();
4617 as_bad (_(".unwind_save does not support this kind of register"));
4618 ignore_rest_of_line ();
4623 /* Parse an unwind_movsp directive. */
4626 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4632 if (!unwind
.proc_start
)
4633 as_bad (MISSING_FNSTART
);
4635 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4638 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4639 ignore_rest_of_line ();
4643 /* Optional constant. */
4644 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4646 if (immediate_for_directive (&offset
) == FAIL
)
4652 demand_empty_rest_of_line ();
4654 if (reg
== REG_SP
|| reg
== REG_PC
)
4656 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4660 if (unwind
.fp_reg
!= REG_SP
)
4661 as_bad (_("unexpected .unwind_movsp directive"));
4663 /* Generate opcode to restore the value. */
4665 add_unwind_opcode (op
, 1);
4667 /* Record the information for later. */
4668 unwind
.fp_reg
= reg
;
4669 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4670 unwind
.sp_restored
= 1;
4673 /* Parse an unwind_pad directive. */
4676 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4680 if (!unwind
.proc_start
)
4681 as_bad (MISSING_FNSTART
);
4683 if (immediate_for_directive (&offset
) == FAIL
)
4688 as_bad (_("stack increment must be multiple of 4"));
4689 ignore_rest_of_line ();
4693 /* Don't generate any opcodes, just record the details for later. */
4694 unwind
.frame_size
+= offset
;
4695 unwind
.pending_offset
+= offset
;
4697 demand_empty_rest_of_line ();
4700 /* Parse an unwind_setfp directive. */
4703 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4709 if (!unwind
.proc_start
)
4710 as_bad (MISSING_FNSTART
);
4712 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4713 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4716 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4718 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4720 as_bad (_("expected <reg>, <reg>"));
4721 ignore_rest_of_line ();
4725 /* Optional constant. */
4726 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4728 if (immediate_for_directive (&offset
) == FAIL
)
4734 demand_empty_rest_of_line ();
4736 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4738 as_bad (_("register must be either sp or set by a previous"
4739 "unwind_movsp directive"));
4743 /* Don't generate any opcodes, just record the information for later. */
4744 unwind
.fp_reg
= fp_reg
;
4746 if (sp_reg
== REG_SP
)
4747 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4749 unwind
.fp_offset
-= offset
;
4752 /* Parse an unwind_raw directive. */
4755 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4758 /* This is an arbitrary limit. */
4759 unsigned char op
[16];
4762 if (!unwind
.proc_start
)
4763 as_bad (MISSING_FNSTART
);
4766 if (exp
.X_op
== O_constant
4767 && skip_past_comma (&input_line_pointer
) != FAIL
)
4769 unwind
.frame_size
+= exp
.X_add_number
;
4773 exp
.X_op
= O_illegal
;
4775 if (exp
.X_op
!= O_constant
)
4777 as_bad (_("expected <offset>, <opcode>"));
4778 ignore_rest_of_line ();
4784 /* Parse the opcode. */
4789 as_bad (_("unwind opcode too long"));
4790 ignore_rest_of_line ();
4792 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4794 as_bad (_("invalid unwind opcode"));
4795 ignore_rest_of_line ();
4798 op
[count
++] = exp
.X_add_number
;
4800 /* Parse the next byte. */
4801 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4807 /* Add the opcode bytes in reverse order. */
4809 add_unwind_opcode (op
[count
], 1);
4811 demand_empty_rest_of_line ();
4815 /* Parse a .eabi_attribute directive. */
4818 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4820 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4822 if (tag
>= 0 && tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4823 attributes_set_explicitly
[tag
] = 1;
4826 /* Emit a tls fix for the symbol. */
4829 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4833 #ifdef md_flush_pending_output
4834 md_flush_pending_output ();
4837 #ifdef md_cons_align
4841 /* Since we're just labelling the code, there's no need to define a
4844 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4845 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4846 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4847 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4849 #endif /* OBJ_ELF */
4851 static void s_arm_arch (int);
4852 static void s_arm_object_arch (int);
4853 static void s_arm_cpu (int);
4854 static void s_arm_fpu (int);
4855 static void s_arm_arch_extension (int);
4860 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4867 if (exp
.X_op
== O_symbol
)
4868 exp
.X_op
= O_secrel
;
4870 emit_expr (&exp
, 4);
4872 while (*input_line_pointer
++ == ',');
4874 input_line_pointer
--;
4875 demand_empty_rest_of_line ();
4879 /* This table describes all the machine specific pseudo-ops the assembler
4880 has to support. The fields are:
4881 pseudo-op name without dot
4882 function to call to execute this pseudo-op
4883 Integer arg to pass to the function. */
4885 const pseudo_typeS md_pseudo_table
[] =
4887 /* Never called because '.req' does not start a line. */
4888 { "req", s_req
, 0 },
4889 /* Following two are likewise never called. */
4892 { "unreq", s_unreq
, 0 },
4893 { "bss", s_bss
, 0 },
4894 { "align", s_align_ptwo
, 2 },
4895 { "arm", s_arm
, 0 },
4896 { "thumb", s_thumb
, 0 },
4897 { "code", s_code
, 0 },
4898 { "force_thumb", s_force_thumb
, 0 },
4899 { "thumb_func", s_thumb_func
, 0 },
4900 { "thumb_set", s_thumb_set
, 0 },
4901 { "even", s_even
, 0 },
4902 { "ltorg", s_ltorg
, 0 },
4903 { "pool", s_ltorg
, 0 },
4904 { "syntax", s_syntax
, 0 },
4905 { "cpu", s_arm_cpu
, 0 },
4906 { "arch", s_arm_arch
, 0 },
4907 { "object_arch", s_arm_object_arch
, 0 },
4908 { "fpu", s_arm_fpu
, 0 },
4909 { "arch_extension", s_arm_arch_extension
, 0 },
4911 { "word", s_arm_elf_cons
, 4 },
4912 { "long", s_arm_elf_cons
, 4 },
4913 { "inst.n", s_arm_elf_inst
, 2 },
4914 { "inst.w", s_arm_elf_inst
, 4 },
4915 { "inst", s_arm_elf_inst
, 0 },
4916 { "rel31", s_arm_rel31
, 0 },
4917 { "fnstart", s_arm_unwind_fnstart
, 0 },
4918 { "fnend", s_arm_unwind_fnend
, 0 },
4919 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4920 { "personality", s_arm_unwind_personality
, 0 },
4921 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4922 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4923 { "save", s_arm_unwind_save
, 0 },
4924 { "vsave", s_arm_unwind_save
, 1 },
4925 { "movsp", s_arm_unwind_movsp
, 0 },
4926 { "pad", s_arm_unwind_pad
, 0 },
4927 { "setfp", s_arm_unwind_setfp
, 0 },
4928 { "unwind_raw", s_arm_unwind_raw
, 0 },
4929 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4930 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4934 /* These are used for dwarf. */
4938 /* These are used for dwarf2. */
4939 { "file", dwarf2_directive_file
, 0 },
4940 { "loc", dwarf2_directive_loc
, 0 },
4941 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4943 { "extend", float_cons
, 'x' },
4944 { "ldouble", float_cons
, 'x' },
4945 { "packed", float_cons
, 'p' },
4947 {"secrel32", pe_directive_secrel
, 0},
4950 /* These are for compatibility with CodeComposer Studio. */
4951 {"ref", s_ccs_ref
, 0},
4952 {"def", s_ccs_def
, 0},
4953 {"asmfunc", s_ccs_asmfunc
, 0},
4954 {"endasmfunc", s_ccs_endasmfunc
, 0},
4959 /* Parser functions used exclusively in instruction operands. */
4961 /* Generic immediate-value read function for use in insn parsing.
4962 STR points to the beginning of the immediate (the leading #);
4963 VAL receives the value; if the value is outside [MIN, MAX]
4964 issue an error. PREFIX_OPT is true if the immediate prefix is
4968 parse_immediate (char **str
, int *val
, int min
, int max
,
4969 bfd_boolean prefix_opt
)
4973 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4974 if (exp
.X_op
!= O_constant
)
4976 inst
.error
= _("constant expression required");
4980 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4982 inst
.error
= _("immediate value out of range");
4986 *val
= exp
.X_add_number
;
4990 /* Less-generic immediate-value read function with the possibility of loading a
4991 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4992 instructions. Puts the result directly in inst.operands[i]. */
4995 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
4996 bfd_boolean allow_symbol_p
)
4999 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
5002 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
5004 if (exp_p
->X_op
== O_constant
)
5006 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
5007 /* If we're on a 64-bit host, then a 64-bit number can be returned using
5008 O_constant. We have to be careful not to break compilation for
5009 32-bit X_add_number, though. */
5010 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
5012 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
5013 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
5015 inst
.operands
[i
].regisimm
= 1;
5018 else if (exp_p
->X_op
== O_big
5019 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
5021 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
5023 /* Bignums have their least significant bits in
5024 generic_bignum[0]. Make sure we put 32 bits in imm and
5025 32 bits in reg, in a (hopefully) portable way. */
5026 gas_assert (parts
!= 0);
5028 /* Make sure that the number is not too big.
5029 PR 11972: Bignums can now be sign-extended to the
5030 size of a .octa so check that the out of range bits
5031 are all zero or all one. */
5032 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
5034 LITTLENUM_TYPE m
= -1;
5036 if (generic_bignum
[parts
* 2] != 0
5037 && generic_bignum
[parts
* 2] != m
)
5040 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
5041 if (generic_bignum
[j
] != generic_bignum
[j
-1])
5045 inst
.operands
[i
].imm
= 0;
5046 for (j
= 0; j
< parts
; j
++, idx
++)
5047 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
5048 << (LITTLENUM_NUMBER_OF_BITS
* j
);
5049 inst
.operands
[i
].reg
= 0;
5050 for (j
= 0; j
< parts
; j
++, idx
++)
5051 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
5052 << (LITTLENUM_NUMBER_OF_BITS
* j
);
5053 inst
.operands
[i
].regisimm
= 1;
5055 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
5063 /* Returns the pseudo-register number of an FPA immediate constant,
5064 or FAIL if there isn't a valid constant here. */
5067 parse_fpa_immediate (char ** str
)
5069 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5075 /* First try and match exact strings, this is to guarantee
5076 that some formats will work even for cross assembly. */
5078 for (i
= 0; fp_const
[i
]; i
++)
5080 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
5084 *str
+= strlen (fp_const
[i
]);
5085 if (is_end_of_line
[(unsigned char) **str
])
5091 /* Just because we didn't get a match doesn't mean that the constant
5092 isn't valid, just that it is in a format that we don't
5093 automatically recognize. Try parsing it with the standard
5094 expression routines. */
5096 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
5098 /* Look for a raw floating point number. */
5099 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
5100 && is_end_of_line
[(unsigned char) *save_in
])
5102 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5104 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5106 if (words
[j
] != fp_values
[i
][j
])
5110 if (j
== MAX_LITTLENUMS
)
5118 /* Try and parse a more complex expression, this will probably fail
5119 unless the code uses a floating point prefix (eg "0f"). */
5120 save_in
= input_line_pointer
;
5121 input_line_pointer
= *str
;
5122 if (expression (&exp
) == absolute_section
5123 && exp
.X_op
== O_big
5124 && exp
.X_add_number
< 0)
5126 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5128 #define X_PRECISION 5
5129 #define E_PRECISION 15L
5130 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
5132 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5134 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5136 if (words
[j
] != fp_values
[i
][j
])
5140 if (j
== MAX_LITTLENUMS
)
5142 *str
= input_line_pointer
;
5143 input_line_pointer
= save_in
;
5150 *str
= input_line_pointer
;
5151 input_line_pointer
= save_in
;
5152 inst
.error
= _("invalid FPA immediate expression");
5156 /* Returns 1 if a number has "quarter-precision" float format
5157 0baBbbbbbc defgh000 00000000 00000000. */
5160 is_quarter_float (unsigned imm
)
5162 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
5163 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
5167 /* Detect the presence of a floating point or integer zero constant,
5171 parse_ifimm_zero (char **in
)
5175 if (!is_immediate_prefix (**in
))
5177 /* In unified syntax, all prefixes are optional. */
5178 if (!unified_syntax
)
5184 /* Accept #0x0 as a synonym for #0. */
5185 if (strncmp (*in
, "0x", 2) == 0)
5188 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
5193 error_code
= atof_generic (in
, ".", EXP_CHARS
,
5194 &generic_floating_point_number
);
5197 && generic_floating_point_number
.sign
== '+'
5198 && (generic_floating_point_number
.low
5199 > generic_floating_point_number
.leader
))
5205 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5206 0baBbbbbbc defgh000 00000000 00000000.
5207 The zero and minus-zero cases need special handling, since they can't be
5208 encoded in the "quarter-precision" float format, but can nonetheless be
5209 loaded as integer constants. */
5212 parse_qfloat_immediate (char **ccp
, int *immed
)
5216 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5217 int found_fpchar
= 0;
5219 skip_past_char (&str
, '#');
5221 /* We must not accidentally parse an integer as a floating-point number. Make
5222 sure that the value we parse is not an integer by checking for special
5223 characters '.' or 'e'.
5224 FIXME: This is a horrible hack, but doing better is tricky because type
5225 information isn't in a very usable state at parse time. */
5227 skip_whitespace (fpnum
);
5229 if (strncmp (fpnum
, "0x", 2) == 0)
5233 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
5234 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5244 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5246 unsigned fpword
= 0;
5249 /* Our FP word must be 32 bits (single-precision FP). */
5250 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5252 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5256 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5269 /* Shift operands. */
5272 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5275 struct asm_shift_name
5278 enum shift_kind kind
;
5281 /* Third argument to parse_shift. */
5282 enum parse_shift_mode
5284 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5285 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5286 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5287 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5288 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5291 /* Parse a <shift> specifier on an ARM data processing instruction.
5292 This has three forms:
5294 (LSL|LSR|ASL|ASR|ROR) Rs
5295 (LSL|LSR|ASL|ASR|ROR) #imm
5298 Note that ASL is assimilated to LSL in the instruction encoding, and
5299 RRX to ROR #0 (which cannot be written as such). */
5302 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5304 const struct asm_shift_name
*shift_name
;
5305 enum shift_kind shift
;
5310 for (p
= *str
; ISALPHA (*p
); p
++)
5315 inst
.error
= _("shift expression expected");
5319 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5322 if (shift_name
== NULL
)
5324 inst
.error
= _("shift expression expected");
5328 shift
= shift_name
->kind
;
5332 case NO_SHIFT_RESTRICT
:
5333 case SHIFT_IMMEDIATE
: break;
5335 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5336 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5338 inst
.error
= _("'LSL' or 'ASR' required");
5343 case SHIFT_LSL_IMMEDIATE
:
5344 if (shift
!= SHIFT_LSL
)
5346 inst
.error
= _("'LSL' required");
5351 case SHIFT_ASR_IMMEDIATE
:
5352 if (shift
!= SHIFT_ASR
)
5354 inst
.error
= _("'ASR' required");
5362 if (shift
!= SHIFT_RRX
)
5364 /* Whitespace can appear here if the next thing is a bare digit. */
5365 skip_whitespace (p
);
5367 if (mode
== NO_SHIFT_RESTRICT
5368 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5370 inst
.operands
[i
].imm
= reg
;
5371 inst
.operands
[i
].immisreg
= 1;
5373 else if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5376 inst
.operands
[i
].shift_kind
= shift
;
5377 inst
.operands
[i
].shifted
= 1;
5382 /* Parse a <shifter_operand> for an ARM data processing instruction:
5385 #<immediate>, <rotate>
5389 where <shift> is defined by parse_shift above, and <rotate> is a
5390 multiple of 2 between 0 and 30. Validation of immediate operands
5391 is deferred to md_apply_fix. */
5394 parse_shifter_operand (char **str
, int i
)
5399 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5401 inst
.operands
[i
].reg
= value
;
5402 inst
.operands
[i
].isreg
= 1;
5404 /* parse_shift will override this if appropriate */
5405 inst
.relocs
[0].exp
.X_op
= O_constant
;
5406 inst
.relocs
[0].exp
.X_add_number
= 0;
5408 if (skip_past_comma (str
) == FAIL
)
5411 /* Shift operation on register. */
5412 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5415 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_IMM_PREFIX
))
5418 if (skip_past_comma (str
) == SUCCESS
)
5420 /* #x, y -- ie explicit rotation by Y. */
5421 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5424 if (exp
.X_op
!= O_constant
|| inst
.relocs
[0].exp
.X_op
!= O_constant
)
5426 inst
.error
= _("constant expression expected");
5430 value
= exp
.X_add_number
;
5431 if (value
< 0 || value
> 30 || value
% 2 != 0)
5433 inst
.error
= _("invalid rotation");
5436 if (inst
.relocs
[0].exp
.X_add_number
< 0
5437 || inst
.relocs
[0].exp
.X_add_number
> 255)
5439 inst
.error
= _("invalid constant");
5443 /* Encode as specified. */
5444 inst
.operands
[i
].imm
= inst
.relocs
[0].exp
.X_add_number
| value
<< 7;
5448 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
5449 inst
.relocs
[0].pc_rel
= 0;
5453 /* Group relocation information. Each entry in the table contains the
5454 textual name of the relocation as may appear in assembler source
5455 and must end with a colon.
5456 Along with this textual name are the relocation codes to be used if
5457 the corresponding instruction is an ALU instruction (ADD or SUB only),
5458 an LDR, an LDRS, or an LDC. */
5460 struct group_reloc_table_entry
5471 /* Varieties of non-ALU group relocation. */
5478 static struct group_reloc_table_entry group_reloc_table
[] =
5479 { /* Program counter relative: */
5481 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5486 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5487 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5488 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5489 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5491 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5496 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5497 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5498 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5499 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5501 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5502 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5503 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5504 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5505 /* Section base relative */
5507 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5512 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5513 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5514 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5515 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5517 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5522 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5523 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5524 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5525 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5527 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5528 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5529 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5530 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5531 /* Absolute thumb alu relocations. */
5533 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5538 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5543 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5548 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5553 /* Given the address of a pointer pointing to the textual name of a group
5554 relocation as may appear in assembler source, attempt to find its details
5555 in group_reloc_table. The pointer will be updated to the character after
5556 the trailing colon. On failure, FAIL will be returned; SUCCESS
5557 otherwise. On success, *entry will be updated to point at the relevant
5558 group_reloc_table entry. */
5561 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5564 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5566 int length
= strlen (group_reloc_table
[i
].name
);
5568 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5569 && (*str
)[length
] == ':')
5571 *out
= &group_reloc_table
[i
];
5572 *str
+= (length
+ 1);
5580 /* Parse a <shifter_operand> for an ARM data processing instruction
5581 (as for parse_shifter_operand) where group relocations are allowed:
5584 #<immediate>, <rotate>
5585 #:<group_reloc>:<expression>
5589 where <group_reloc> is one of the strings defined in group_reloc_table.
5590 The hashes are optional.
5592 Everything else is as for parse_shifter_operand. */
5594 static parse_operand_result
5595 parse_shifter_operand_group_reloc (char **str
, int i
)
5597 /* Determine if we have the sequence of characters #: or just :
5598 coming next. If we do, then we check for a group relocation.
5599 If we don't, punt the whole lot to parse_shifter_operand. */
5601 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5602 || (*str
)[0] == ':')
5604 struct group_reloc_table_entry
*entry
;
5606 if ((*str
)[0] == '#')
5611 /* Try to parse a group relocation. Anything else is an error. */
5612 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5614 inst
.error
= _("unknown group relocation");
5615 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5618 /* We now have the group relocation table entry corresponding to
5619 the name in the assembler source. Next, we parse the expression. */
5620 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_NO_PREFIX
))
5621 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5623 /* Record the relocation type (always the ALU variant here). */
5624 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5625 gas_assert (inst
.relocs
[0].type
!= 0);
5627 return PARSE_OPERAND_SUCCESS
;
5630 return parse_shifter_operand (str
, i
) == SUCCESS
5631 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5633 /* Never reached. */
5636 /* Parse a Neon alignment expression. Information is written to
5637 inst.operands[i]. We assume the initial ':' has been skipped.
5639 align .imm = align << 8, .immisalign=1, .preind=0 */
5640 static parse_operand_result
5641 parse_neon_alignment (char **str
, int i
)
5646 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5648 if (exp
.X_op
!= O_constant
)
5650 inst
.error
= _("alignment must be constant");
5651 return PARSE_OPERAND_FAIL
;
5654 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5655 inst
.operands
[i
].immisalign
= 1;
5656 /* Alignments are not pre-indexes. */
5657 inst
.operands
[i
].preind
= 0;
5660 return PARSE_OPERAND_SUCCESS
;
5663 /* Parse all forms of an ARM address expression. Information is written
5664 to inst.operands[i] and/or inst.relocs[0].
5666 Preindexed addressing (.preind=1):
5668 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5669 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5670 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5671 .shift_kind=shift .relocs[0].exp=shift_imm
5673 These three may have a trailing ! which causes .writeback to be set also.
5675 Postindexed addressing (.postind=1, .writeback=1):
5677 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5678 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5679 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5680 .shift_kind=shift .relocs[0].exp=shift_imm
5682 Unindexed addressing (.preind=0, .postind=0):
5684 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5688 [Rn]{!} shorthand for [Rn,#0]{!}
5689 =immediate .isreg=0 .relocs[0].exp=immediate
5690 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5692 It is the caller's responsibility to check for addressing modes not
5693 supported by the instruction, and to set inst.relocs[0].type. */
5695 static parse_operand_result
5696 parse_address_main (char **str
, int i
, int group_relocations
,
5697 group_reloc_type group_type
)
5702 if (skip_past_char (&p
, '[') == FAIL
)
5704 if (skip_past_char (&p
, '=') == FAIL
)
5706 /* Bare address - translate to PC-relative offset. */
5707 inst
.relocs
[0].pc_rel
= 1;
5708 inst
.operands
[i
].reg
= REG_PC
;
5709 inst
.operands
[i
].isreg
= 1;
5710 inst
.operands
[i
].preind
= 1;
5712 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_OPT_PREFIX_BIG
))
5713 return PARSE_OPERAND_FAIL
;
5715 else if (parse_big_immediate (&p
, i
, &inst
.relocs
[0].exp
,
5716 /*allow_symbol_p=*/TRUE
))
5717 return PARSE_OPERAND_FAIL
;
5720 return PARSE_OPERAND_SUCCESS
;
5723 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5724 skip_whitespace (p
);
5726 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5728 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5729 return PARSE_OPERAND_FAIL
;
5731 inst
.operands
[i
].reg
= reg
;
5732 inst
.operands
[i
].isreg
= 1;
5734 if (skip_past_comma (&p
) == SUCCESS
)
5736 inst
.operands
[i
].preind
= 1;
5739 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5741 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5743 inst
.operands
[i
].imm
= reg
;
5744 inst
.operands
[i
].immisreg
= 1;
5746 if (skip_past_comma (&p
) == SUCCESS
)
5747 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5748 return PARSE_OPERAND_FAIL
;
5750 else if (skip_past_char (&p
, ':') == SUCCESS
)
5752 /* FIXME: '@' should be used here, but it's filtered out by generic
5753 code before we get to see it here. This may be subject to
5755 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5757 if (result
!= PARSE_OPERAND_SUCCESS
)
5762 if (inst
.operands
[i
].negative
)
5764 inst
.operands
[i
].negative
= 0;
5768 if (group_relocations
5769 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5771 struct group_reloc_table_entry
*entry
;
5773 /* Skip over the #: or : sequence. */
5779 /* Try to parse a group relocation. Anything else is an
5781 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5783 inst
.error
= _("unknown group relocation");
5784 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5787 /* We now have the group relocation table entry corresponding to
5788 the name in the assembler source. Next, we parse the
5790 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
5791 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5793 /* Record the relocation type. */
5798 = (bfd_reloc_code_real_type
) entry
->ldr_code
;
5803 = (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5808 = (bfd_reloc_code_real_type
) entry
->ldc_code
;
5815 if (inst
.relocs
[0].type
== 0)
5817 inst
.error
= _("this group relocation is not allowed on this instruction");
5818 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5825 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5826 return PARSE_OPERAND_FAIL
;
5827 /* If the offset is 0, find out if it's a +0 or -0. */
5828 if (inst
.relocs
[0].exp
.X_op
== O_constant
5829 && inst
.relocs
[0].exp
.X_add_number
== 0)
5831 skip_whitespace (q
);
5835 skip_whitespace (q
);
5838 inst
.operands
[i
].negative
= 1;
5843 else if (skip_past_char (&p
, ':') == SUCCESS
)
5845 /* FIXME: '@' should be used here, but it's filtered out by generic code
5846 before we get to see it here. This may be subject to change. */
5847 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5849 if (result
!= PARSE_OPERAND_SUCCESS
)
5853 if (skip_past_char (&p
, ']') == FAIL
)
5855 inst
.error
= _("']' expected");
5856 return PARSE_OPERAND_FAIL
;
5859 if (skip_past_char (&p
, '!') == SUCCESS
)
5860 inst
.operands
[i
].writeback
= 1;
5862 else if (skip_past_comma (&p
) == SUCCESS
)
5864 if (skip_past_char (&p
, '{') == SUCCESS
)
5866 /* [Rn], {expr} - unindexed, with option */
5867 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5868 0, 255, TRUE
) == FAIL
)
5869 return PARSE_OPERAND_FAIL
;
5871 if (skip_past_char (&p
, '}') == FAIL
)
5873 inst
.error
= _("'}' expected at end of 'option' field");
5874 return PARSE_OPERAND_FAIL
;
5876 if (inst
.operands
[i
].preind
)
5878 inst
.error
= _("cannot combine index with option");
5879 return PARSE_OPERAND_FAIL
;
5882 return PARSE_OPERAND_SUCCESS
;
5886 inst
.operands
[i
].postind
= 1;
5887 inst
.operands
[i
].writeback
= 1;
5889 if (inst
.operands
[i
].preind
)
5891 inst
.error
= _("cannot combine pre- and post-indexing");
5892 return PARSE_OPERAND_FAIL
;
5896 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5898 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5900 /* We might be using the immediate for alignment already. If we
5901 are, OR the register number into the low-order bits. */
5902 if (inst
.operands
[i
].immisalign
)
5903 inst
.operands
[i
].imm
|= reg
;
5905 inst
.operands
[i
].imm
= reg
;
5906 inst
.operands
[i
].immisreg
= 1;
5908 if (skip_past_comma (&p
) == SUCCESS
)
5909 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5910 return PARSE_OPERAND_FAIL
;
5916 if (inst
.operands
[i
].negative
)
5918 inst
.operands
[i
].negative
= 0;
5921 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5922 return PARSE_OPERAND_FAIL
;
5923 /* If the offset is 0, find out if it's a +0 or -0. */
5924 if (inst
.relocs
[0].exp
.X_op
== O_constant
5925 && inst
.relocs
[0].exp
.X_add_number
== 0)
5927 skip_whitespace (q
);
5931 skip_whitespace (q
);
5934 inst
.operands
[i
].negative
= 1;
5940 /* If at this point neither .preind nor .postind is set, we have a
5941 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5942 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5944 inst
.operands
[i
].preind
= 1;
5945 inst
.relocs
[0].exp
.X_op
= O_constant
;
5946 inst
.relocs
[0].exp
.X_add_number
= 0;
5949 return PARSE_OPERAND_SUCCESS
;
5953 parse_address (char **str
, int i
)
5955 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5959 static parse_operand_result
5960 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5962 return parse_address_main (str
, i
, 1, type
);
5965 /* Parse an operand for a MOVW or MOVT instruction. */
5967 parse_half (char **str
)
5972 skip_past_char (&p
, '#');
5973 if (strncasecmp (p
, ":lower16:", 9) == 0)
5974 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVW
;
5975 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5976 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVT
;
5978 if (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
)
5981 skip_whitespace (p
);
5984 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
5987 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
5989 if (inst
.relocs
[0].exp
.X_op
!= O_constant
)
5991 inst
.error
= _("constant expression expected");
5994 if (inst
.relocs
[0].exp
.X_add_number
< 0
5995 || inst
.relocs
[0].exp
.X_add_number
> 0xffff)
5997 inst
.error
= _("immediate value out of range");
6005 /* Miscellaneous. */
6007 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
6008 or a bitmask suitable to be or-ed into the ARM msr instruction. */
6010 parse_psr (char **str
, bfd_boolean lhs
)
6013 unsigned long psr_field
;
6014 const struct asm_psr
*psr
;
6016 bfd_boolean is_apsr
= FALSE
;
6017 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
6019 /* PR gas/12698: If the user has specified -march=all then m_profile will
6020 be TRUE, but we want to ignore it in this case as we are building for any
6021 CPU type, including non-m variants. */
6022 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
6025 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
6026 feature for ease of use and backwards compatibility. */
6028 if (strncasecmp (p
, "SPSR", 4) == 0)
6031 goto unsupported_psr
;
6033 psr_field
= SPSR_BIT
;
6035 else if (strncasecmp (p
, "CPSR", 4) == 0)
6038 goto unsupported_psr
;
6042 else if (strncasecmp (p
, "APSR", 4) == 0)
6044 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
6045 and ARMv7-R architecture CPUs. */
6054 while (ISALNUM (*p
) || *p
== '_');
6056 if (strncasecmp (start
, "iapsr", 5) == 0
6057 || strncasecmp (start
, "eapsr", 5) == 0
6058 || strncasecmp (start
, "xpsr", 4) == 0
6059 || strncasecmp (start
, "psr", 3) == 0)
6060 p
= start
+ strcspn (start
, "rR") + 1;
6062 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
6068 /* If APSR is being written, a bitfield may be specified. Note that
6069 APSR itself is handled above. */
6070 if (psr
->field
<= 3)
6072 psr_field
= psr
->field
;
6078 /* M-profile MSR instructions have the mask field set to "10", except
6079 *PSR variants which modify APSR, which may use a different mask (and
6080 have been handled already). Do that by setting the PSR_f field
6082 return psr
->field
| (lhs
? PSR_f
: 0);
6085 goto unsupported_psr
;
6091 /* A suffix follows. */
6097 while (ISALNUM (*p
) || *p
== '_');
6101 /* APSR uses a notation for bits, rather than fields. */
6102 unsigned int nzcvq_bits
= 0;
6103 unsigned int g_bit
= 0;
6106 for (bit
= start
; bit
!= p
; bit
++)
6108 switch (TOLOWER (*bit
))
6111 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
6115 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
6119 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
6123 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
6127 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
6131 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
6135 inst
.error
= _("unexpected bit specified after APSR");
6140 if (nzcvq_bits
== 0x1f)
6145 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
6147 inst
.error
= _("selected processor does not "
6148 "support DSP extension");
6155 if ((nzcvq_bits
& 0x20) != 0
6156 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
6157 || (g_bit
& 0x2) != 0)
6159 inst
.error
= _("bad bitmask specified after APSR");
6165 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
6170 psr_field
|= psr
->field
;
6176 goto error
; /* Garbage after "[CS]PSR". */
6178 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6179 is deprecated, but allow it anyway. */
6183 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6186 else if (!m_profile
)
6187 /* These bits are never right for M-profile devices: don't set them
6188 (only code paths which read/write APSR reach here). */
6189 psr_field
|= (PSR_c
| PSR_f
);
6195 inst
.error
= _("selected processor does not support requested special "
6196 "purpose register");
6200 inst
.error
= _("flag for {c}psr instruction expected");
6205 parse_sys_vldr_vstr (char **str
)
6214 {"FPSCR", 0x1, 0x0},
6215 {"FPSCR_nzcvqc", 0x2, 0x0},
6218 {"FPCXTNS", 0x6, 0x1},
6219 {"FPCXTS", 0x7, 0x1}
6221 char *op_end
= strchr (*str
, ',');
6222 size_t op_strlen
= op_end
- *str
;
6224 for (i
= 0; i
< sizeof (sysregs
) / sizeof (sysregs
[0]); i
++)
6226 if (!strncmp (*str
, sysregs
[i
].name
, op_strlen
))
6228 val
= sysregs
[i
].regl
| (sysregs
[i
].regh
<< 3);
6237 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6238 value suitable for splatting into the AIF field of the instruction. */
6241 parse_cps_flags (char **str
)
6250 case '\0': case ',':
6253 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
6254 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
6255 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
6258 inst
.error
= _("unrecognized CPS flag");
6263 if (saw_a_flag
== 0)
6265 inst
.error
= _("missing CPS flags");
6273 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6274 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6277 parse_endian_specifier (char **str
)
6282 if (strncasecmp (s
, "BE", 2))
6284 else if (strncasecmp (s
, "LE", 2))
6288 inst
.error
= _("valid endian specifiers are be or le");
6292 if (ISALNUM (s
[2]) || s
[2] == '_')
6294 inst
.error
= _("valid endian specifiers are be or le");
6299 return little_endian
;
6302 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6303 value suitable for poking into the rotate field of an sxt or sxta
6304 instruction, or FAIL on error. */
6307 parse_ror (char **str
)
6312 if (strncasecmp (s
, "ROR", 3) == 0)
6316 inst
.error
= _("missing rotation field after comma");
6320 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6325 case 0: *str
= s
; return 0x0;
6326 case 8: *str
= s
; return 0x1;
6327 case 16: *str
= s
; return 0x2;
6328 case 24: *str
= s
; return 0x3;
6331 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6336 /* Parse a conditional code (from conds[] below). The value returned is in the
6337 range 0 .. 14, or FAIL. */
6339 parse_cond (char **str
)
6342 const struct asm_cond
*c
;
6344 /* Condition codes are always 2 characters, so matching up to
6345 3 characters is sufficient. */
6350 while (ISALPHA (*q
) && n
< 3)
6352 cond
[n
] = TOLOWER (*q
);
6357 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6360 inst
.error
= _("condition required");
6368 /* Parse an option for a barrier instruction. Returns the encoding for the
6371 parse_barrier (char **str
)
6374 const struct asm_barrier_opt
*o
;
6377 while (ISALPHA (*q
))
6380 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6385 if (!mark_feature_used (&o
->arch
))
6392 /* Parse the operands of a table branch instruction. Similar to a memory
6395 parse_tb (char **str
)
6400 if (skip_past_char (&p
, '[') == FAIL
)
6402 inst
.error
= _("'[' expected");
6406 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6408 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6411 inst
.operands
[0].reg
= reg
;
6413 if (skip_past_comma (&p
) == FAIL
)
6415 inst
.error
= _("',' expected");
6419 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6421 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6424 inst
.operands
[0].imm
= reg
;
6426 if (skip_past_comma (&p
) == SUCCESS
)
6428 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6430 if (inst
.relocs
[0].exp
.X_add_number
!= 1)
6432 inst
.error
= _("invalid shift");
6435 inst
.operands
[0].shifted
= 1;
6438 if (skip_past_char (&p
, ']') == FAIL
)
6440 inst
.error
= _("']' expected");
6447 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6448 information on the types the operands can take and how they are encoded.
6449 Up to four operands may be read; this function handles setting the
6450 ".present" field for each read operand itself.
6451 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6452 else returns FAIL. */
6455 parse_neon_mov (char **str
, int *which_operand
)
6457 int i
= *which_operand
, val
;
6458 enum arm_reg_type rtype
;
6460 struct neon_type_el optype
;
6462 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6464 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6465 inst
.operands
[i
].reg
= val
;
6466 inst
.operands
[i
].isscalar
= 1;
6467 inst
.operands
[i
].vectype
= optype
;
6468 inst
.operands
[i
++].present
= 1;
6470 if (skip_past_comma (&ptr
) == FAIL
)
6473 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6476 inst
.operands
[i
].reg
= val
;
6477 inst
.operands
[i
].isreg
= 1;
6478 inst
.operands
[i
].present
= 1;
6480 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6483 /* Cases 0, 1, 2, 3, 5 (D only). */
6484 if (skip_past_comma (&ptr
) == FAIL
)
6487 inst
.operands
[i
].reg
= val
;
6488 inst
.operands
[i
].isreg
= 1;
6489 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6490 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6491 inst
.operands
[i
].isvec
= 1;
6492 inst
.operands
[i
].vectype
= optype
;
6493 inst
.operands
[i
++].present
= 1;
6495 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6497 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6498 Case 13: VMOV <Sd>, <Rm> */
6499 inst
.operands
[i
].reg
= val
;
6500 inst
.operands
[i
].isreg
= 1;
6501 inst
.operands
[i
].present
= 1;
6503 if (rtype
== REG_TYPE_NQ
)
6505 first_error (_("can't use Neon quad register here"));
6508 else if (rtype
!= REG_TYPE_VFS
)
6511 if (skip_past_comma (&ptr
) == FAIL
)
6513 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6515 inst
.operands
[i
].reg
= val
;
6516 inst
.operands
[i
].isreg
= 1;
6517 inst
.operands
[i
].present
= 1;
6520 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6523 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6524 Case 1: VMOV<c><q> <Dd>, <Dm>
6525 Case 8: VMOV.F32 <Sd>, <Sm>
6526 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6528 inst
.operands
[i
].reg
= val
;
6529 inst
.operands
[i
].isreg
= 1;
6530 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6531 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6532 inst
.operands
[i
].isvec
= 1;
6533 inst
.operands
[i
].vectype
= optype
;
6534 inst
.operands
[i
].present
= 1;
6536 if (skip_past_comma (&ptr
) == SUCCESS
)
6541 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6544 inst
.operands
[i
].reg
= val
;
6545 inst
.operands
[i
].isreg
= 1;
6546 inst
.operands
[i
++].present
= 1;
6548 if (skip_past_comma (&ptr
) == FAIL
)
6551 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6554 inst
.operands
[i
].reg
= val
;
6555 inst
.operands
[i
].isreg
= 1;
6556 inst
.operands
[i
].present
= 1;
6559 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6560 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6561 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6562 Case 10: VMOV.F32 <Sd>, #<imm>
6563 Case 11: VMOV.F64 <Dd>, #<imm> */
6564 inst
.operands
[i
].immisfloat
= 1;
6565 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6567 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6568 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6572 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6576 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6579 inst
.operands
[i
].reg
= val
;
6580 inst
.operands
[i
].isreg
= 1;
6581 inst
.operands
[i
++].present
= 1;
6583 if (skip_past_comma (&ptr
) == FAIL
)
6586 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6588 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6589 inst
.operands
[i
].reg
= val
;
6590 inst
.operands
[i
].isscalar
= 1;
6591 inst
.operands
[i
].present
= 1;
6592 inst
.operands
[i
].vectype
= optype
;
6594 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6596 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6597 inst
.operands
[i
].reg
= val
;
6598 inst
.operands
[i
].isreg
= 1;
6599 inst
.operands
[i
++].present
= 1;
6601 if (skip_past_comma (&ptr
) == FAIL
)
6604 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6607 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6611 inst
.operands
[i
].reg
= val
;
6612 inst
.operands
[i
].isreg
= 1;
6613 inst
.operands
[i
].isvec
= 1;
6614 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6615 inst
.operands
[i
].vectype
= optype
;
6616 inst
.operands
[i
].present
= 1;
6618 if (rtype
== REG_TYPE_VFS
)
6622 if (skip_past_comma (&ptr
) == FAIL
)
6624 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6627 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6630 inst
.operands
[i
].reg
= val
;
6631 inst
.operands
[i
].isreg
= 1;
6632 inst
.operands
[i
].isvec
= 1;
6633 inst
.operands
[i
].issingle
= 1;
6634 inst
.operands
[i
].vectype
= optype
;
6635 inst
.operands
[i
].present
= 1;
6638 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6642 inst
.operands
[i
].reg
= val
;
6643 inst
.operands
[i
].isreg
= 1;
6644 inst
.operands
[i
].isvec
= 1;
6645 inst
.operands
[i
].issingle
= 1;
6646 inst
.operands
[i
].vectype
= optype
;
6647 inst
.operands
[i
].present
= 1;
6652 first_error (_("parse error"));
6656 /* Successfully parsed the operands. Update args. */
6662 first_error (_("expected comma"));
6666 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6670 /* Use this macro when the operand constraints are different
6671 for ARM and THUMB (e.g. ldrd). */
6672 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6673 ((arm_operand) | ((thumb_operand) << 16))
6675 /* Matcher codes for parse_operands. */
6676 enum operand_parse_code
6678 OP_stop
, /* end of line */
6680 OP_RR
, /* ARM register */
6681 OP_RRnpc
, /* ARM register, not r15 */
6682 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6683 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6684 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6685 optional trailing ! */
6686 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6687 OP_RCP
, /* Coprocessor number */
6688 OP_RCN
, /* Coprocessor register */
6689 OP_RF
, /* FPA register */
6690 OP_RVS
, /* VFP single precision register */
6691 OP_RVD
, /* VFP double precision register (0..15) */
6692 OP_RND
, /* Neon double precision register (0..31) */
6693 OP_RNDMQ
, /* Neon double precision (0..31) or MVE vector register. */
6694 OP_RNDMQR
, /* Neon double precision (0..31), MVE vector or ARM register.
6696 OP_RNQ
, /* Neon quad precision register */
6697 OP_RNQMQ
, /* Neon quad or MVE vector register. */
6698 OP_RVSD
, /* VFP single or double precision register */
6699 OP_RNSD
, /* Neon single or double precision register */
6700 OP_RNDQ
, /* Neon double or quad precision register */
6701 OP_RNDQMQ
, /* Neon double, quad or MVE vector register. */
6702 OP_RNSDQ
, /* Neon single, double or quad precision register */
6703 OP_RNSC
, /* Neon scalar D[X] */
6704 OP_RVC
, /* VFP control register */
6705 OP_RMF
, /* Maverick F register */
6706 OP_RMD
, /* Maverick D register */
6707 OP_RMFX
, /* Maverick FX register */
6708 OP_RMDX
, /* Maverick DX register */
6709 OP_RMAX
, /* Maverick AX register */
6710 OP_RMDS
, /* Maverick DSPSC register */
6711 OP_RIWR
, /* iWMMXt wR register */
6712 OP_RIWC
, /* iWMMXt wC register */
6713 OP_RIWG
, /* iWMMXt wCG register */
6714 OP_RXA
, /* XScale accumulator register */
6716 OP_RNSDQMQ
, /* Neon single, double or quad register or MVE vector register
6718 OP_RNSDQMQR
, /* Neon single, double or quad register, MVE vector register or
6720 OP_RMQ
, /* MVE vector register. */
6722 /* New operands for Armv8.1-M Mainline. */
6723 OP_LR
, /* ARM LR register */
6724 OP_RRe
, /* ARM register, only even numbered. */
6725 OP_RRo
, /* ARM register, only odd numbered, not r13 or r15. */
6726 OP_RRnpcsp_I32
, /* ARM register (no BadReg) or literal 1 .. 32 */
6728 OP_REGLST
, /* ARM register list */
6729 OP_CLRMLST
, /* CLRM register list */
6730 OP_VRSLST
, /* VFP single-precision register list */
6731 OP_VRDLST
, /* VFP double-precision register list */
6732 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6733 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6734 OP_NSTRLST
, /* Neon element/structure list */
6735 OP_VRSDVLST
, /* VFP single or double-precision register list and VPR */
6737 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6738 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6739 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6740 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6741 OP_RNSD_RNSC
, /* Neon S or D reg, or Neon scalar. */
6742 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6743 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6744 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6745 OP_VMOV
, /* Neon VMOV operands. */
6746 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6747 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6748 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6749 OP_VLDR
, /* VLDR operand. */
6751 OP_I0
, /* immediate zero */
6752 OP_I7
, /* immediate value 0 .. 7 */
6753 OP_I15
, /* 0 .. 15 */
6754 OP_I16
, /* 1 .. 16 */
6755 OP_I16z
, /* 0 .. 16 */
6756 OP_I31
, /* 0 .. 31 */
6757 OP_I31w
, /* 0 .. 31, optional trailing ! */
6758 OP_I32
, /* 1 .. 32 */
6759 OP_I32z
, /* 0 .. 32 */
6760 OP_I63
, /* 0 .. 63 */
6761 OP_I63s
, /* -64 .. 63 */
6762 OP_I64
, /* 1 .. 64 */
6763 OP_I64z
, /* 0 .. 64 */
6764 OP_I255
, /* 0 .. 255 */
6766 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6767 OP_I7b
, /* 0 .. 7 */
6768 OP_I15b
, /* 0 .. 15 */
6769 OP_I31b
, /* 0 .. 31 */
6771 OP_SH
, /* shifter operand */
6772 OP_SHG
, /* shifter operand with possible group relocation */
6773 OP_ADDR
, /* Memory address expression (any mode) */
6774 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6775 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6776 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6777 OP_EXP
, /* arbitrary expression */
6778 OP_EXPi
, /* same, with optional immediate prefix */
6779 OP_EXPr
, /* same, with optional relocation suffix */
6780 OP_EXPs
, /* same, with optional non-first operand relocation suffix */
6781 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6782 OP_IROT1
, /* VCADD rotate immediate: 90, 270. */
6783 OP_IROT2
, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6785 OP_CPSF
, /* CPS flags */
6786 OP_ENDI
, /* Endianness specifier */
6787 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6788 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6789 OP_COND
, /* conditional code */
6790 OP_TB
, /* Table branch. */
6792 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6794 OP_RRnpc_I0
, /* ARM register or literal 0 */
6795 OP_RR_EXr
, /* ARM register or expression with opt. reloc stuff. */
6796 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6797 OP_RF_IF
, /* FPA register or immediate */
6798 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6799 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6801 /* Optional operands. */
6802 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6803 OP_oI31b
, /* 0 .. 31 */
6804 OP_oI32b
, /* 1 .. 32 */
6805 OP_oI32z
, /* 0 .. 32 */
6806 OP_oIffffb
, /* 0 .. 65535 */
6807 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6809 OP_oRR
, /* ARM register */
6810 OP_oLR
, /* ARM LR register */
6811 OP_oRRnpc
, /* ARM register, not the PC */
6812 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6813 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6814 OP_oRND
, /* Optional Neon double precision register */
6815 OP_oRNQ
, /* Optional Neon quad precision register */
6816 OP_oRNDQMQ
, /* Optional Neon double, quad or MVE vector register. */
6817 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6818 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6819 OP_oRNSDQMQ
, /* Optional single, double or quad register or MVE vector
6821 OP_oSHll
, /* LSL immediate */
6822 OP_oSHar
, /* ASR immediate */
6823 OP_oSHllar
, /* LSL or ASR immediate */
6824 OP_oROR
, /* ROR 0/8/16/24 */
6825 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6827 /* Some pre-defined mixed (ARM/THUMB) operands. */
6828 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6829 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6830 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6832 OP_FIRST_OPTIONAL
= OP_oI7b
6835 /* Generic instruction operand parser. This does no encoding and no
6836 semantic validation; it merely squirrels values away in the inst
6837 structure. Returns SUCCESS or FAIL depending on whether the
6838 specified grammar matched. */
6840 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6842 unsigned const int *upat
= pattern
;
6843 char *backtrack_pos
= 0;
6844 const char *backtrack_error
= 0;
6845 int i
, val
= 0, backtrack_index
= 0;
6846 enum arm_reg_type rtype
;
6847 parse_operand_result result
;
6848 unsigned int op_parse_code
;
6849 bfd_boolean partial_match
;
6851 #define po_char_or_fail(chr) \
6854 if (skip_past_char (&str, chr) == FAIL) \
6859 #define po_reg_or_fail(regtype) \
6862 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6863 & inst.operands[i].vectype); \
6866 first_error (_(reg_expected_msgs[regtype])); \
6869 inst.operands[i].reg = val; \
6870 inst.operands[i].isreg = 1; \
6871 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6872 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6873 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6874 || rtype == REG_TYPE_VFD \
6875 || rtype == REG_TYPE_NQ); \
6879 #define po_reg_or_goto(regtype, label) \
6882 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6883 & inst.operands[i].vectype); \
6887 inst.operands[i].reg = val; \
6888 inst.operands[i].isreg = 1; \
6889 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6890 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6891 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6892 || rtype == REG_TYPE_VFD \
6893 || rtype == REG_TYPE_NQ); \
6897 #define po_imm_or_fail(min, max, popt) \
6900 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6902 inst.operands[i].imm = val; \
6906 #define po_scalar_or_goto(elsz, label) \
6909 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6912 inst.operands[i].reg = val; \
6913 inst.operands[i].isscalar = 1; \
6917 #define po_misc_or_fail(expr) \
6925 #define po_misc_or_fail_no_backtrack(expr) \
6929 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6930 backtrack_pos = 0; \
6931 if (result != PARSE_OPERAND_SUCCESS) \
6936 #define po_barrier_or_imm(str) \
6939 val = parse_barrier (&str); \
6940 if (val == FAIL && ! ISALPHA (*str)) \
6943 /* ISB can only take SY as an option. */ \
6944 || ((inst.instruction & 0xf0) == 0x60 \
6947 inst.error = _("invalid barrier type"); \
6948 backtrack_pos = 0; \
6954 skip_whitespace (str
);
6956 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6958 op_parse_code
= upat
[i
];
6959 if (op_parse_code
>= 1<<16)
6960 op_parse_code
= thumb
? (op_parse_code
>> 16)
6961 : (op_parse_code
& ((1<<16)-1));
6963 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6965 /* Remember where we are in case we need to backtrack. */
6966 gas_assert (!backtrack_pos
);
6967 backtrack_pos
= str
;
6968 backtrack_error
= inst
.error
;
6969 backtrack_index
= i
;
6972 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6973 po_char_or_fail (',');
6975 switch (op_parse_code
)
6987 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6988 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6989 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6990 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6991 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6992 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6995 po_reg_or_goto (REG_TYPE_RN
, try_rndmq
);
6999 po_reg_or_goto (REG_TYPE_MQ
, try_rnd
);
7002 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
7004 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
7006 /* Also accept generic coprocessor regs for unknown registers. */
7008 po_reg_or_fail (REG_TYPE_CN
);
7010 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
7011 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
7012 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
7013 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
7014 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
7015 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
7016 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
7017 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
7018 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
7019 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
7022 po_reg_or_goto (REG_TYPE_MQ
, try_nq
);
7025 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
7026 case OP_RNSD
: po_reg_or_fail (REG_TYPE_NSD
); break;
7029 po_reg_or_goto (REG_TYPE_MQ
, try_rndq
);
7033 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
7034 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
7036 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
7038 po_reg_or_goto (REG_TYPE_RN
, try_mq
);
7043 po_reg_or_goto (REG_TYPE_MQ
, try_nsdq2
);
7046 po_reg_or_fail (REG_TYPE_NSDQ
);
7050 po_reg_or_fail (REG_TYPE_MQ
);
7052 /* Neon scalar. Using an element size of 8 means that some invalid
7053 scalars are accepted here, so deal with those in later code. */
7054 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
7058 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
7061 po_imm_or_fail (0, 0, TRUE
);
7066 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
7071 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
7074 if (parse_ifimm_zero (&str
))
7075 inst
.operands
[i
].imm
= 0;
7079 = _("only floating point zero is allowed as immediate value");
7087 po_scalar_or_goto (8, try_rr
);
7090 po_reg_or_fail (REG_TYPE_RN
);
7096 po_scalar_or_goto (8, try_nsdq
);
7099 po_reg_or_fail (REG_TYPE_NSDQ
);
7105 po_scalar_or_goto (8, try_s_scalar
);
7108 po_scalar_or_goto (4, try_nsd
);
7111 po_reg_or_fail (REG_TYPE_NSD
);
7117 po_scalar_or_goto (8, try_ndq
);
7120 po_reg_or_fail (REG_TYPE_NDQ
);
7126 po_scalar_or_goto (8, try_vfd
);
7129 po_reg_or_fail (REG_TYPE_VFD
);
7134 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7135 not careful then bad things might happen. */
7136 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
7141 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
7144 /* There's a possibility of getting a 64-bit immediate here, so
7145 we need special handling. */
7146 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
7149 inst
.error
= _("immediate value is out of range");
7157 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
7160 po_imm_or_fail (0, 63, TRUE
);
7165 po_char_or_fail ('[');
7166 po_reg_or_fail (REG_TYPE_RN
);
7167 po_char_or_fail (']');
7173 po_reg_or_fail (REG_TYPE_RN
);
7174 if (skip_past_char (&str
, '!') == SUCCESS
)
7175 inst
.operands
[i
].writeback
= 1;
7179 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
7180 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
7181 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
7182 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
7183 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
7184 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
7185 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
7186 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
7187 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
7188 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
7189 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
7190 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
7192 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
7194 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
7195 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
7197 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
7198 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
7199 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
7200 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
7202 /* Immediate variants */
7204 po_char_or_fail ('{');
7205 po_imm_or_fail (0, 255, TRUE
);
7206 po_char_or_fail ('}');
7210 /* The expression parser chokes on a trailing !, so we have
7211 to find it first and zap it. */
7214 while (*s
&& *s
!= ',')
7219 inst
.operands
[i
].writeback
= 1;
7221 po_imm_or_fail (0, 31, TRUE
);
7229 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7234 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7239 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7241 if (inst
.relocs
[0].exp
.X_op
== O_symbol
)
7243 val
= parse_reloc (&str
);
7246 inst
.error
= _("unrecognized relocation suffix");
7249 else if (val
!= BFD_RELOC_UNUSED
)
7251 inst
.operands
[i
].imm
= val
;
7252 inst
.operands
[i
].hasreloc
= 1;
7258 po_misc_or_fail (my_get_expression (&inst
.relocs
[i
].exp
, &str
,
7260 if (inst
.relocs
[i
].exp
.X_op
== O_symbol
)
7262 inst
.operands
[i
].hasreloc
= 1;
7264 else if (inst
.relocs
[i
].exp
.X_op
== O_constant
)
7266 inst
.operands
[i
].imm
= inst
.relocs
[i
].exp
.X_add_number
;
7267 inst
.operands
[i
].hasreloc
= 0;
7271 /* Operand for MOVW or MOVT. */
7273 po_misc_or_fail (parse_half (&str
));
7276 /* Register or expression. */
7277 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
7278 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
7280 /* Register or immediate. */
7281 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
7282 I0
: po_imm_or_fail (0, 0, FALSE
); break;
7284 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
7286 if (!is_immediate_prefix (*str
))
7289 val
= parse_fpa_immediate (&str
);
7292 /* FPA immediates are encoded as registers 8-15.
7293 parse_fpa_immediate has already applied the offset. */
7294 inst
.operands
[i
].reg
= val
;
7295 inst
.operands
[i
].isreg
= 1;
7298 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
7299 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
7301 /* Two kinds of register. */
7304 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7306 || (rege
->type
!= REG_TYPE_MMXWR
7307 && rege
->type
!= REG_TYPE_MMXWC
7308 && rege
->type
!= REG_TYPE_MMXWCG
))
7310 inst
.error
= _("iWMMXt data or control register expected");
7313 inst
.operands
[i
].reg
= rege
->number
;
7314 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
7320 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7322 || (rege
->type
!= REG_TYPE_MMXWC
7323 && rege
->type
!= REG_TYPE_MMXWCG
))
7325 inst
.error
= _("iWMMXt control register expected");
7328 inst
.operands
[i
].reg
= rege
->number
;
7329 inst
.operands
[i
].isreg
= 1;
7334 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
7335 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
7336 case OP_oROR
: val
= parse_ror (&str
); break;
7337 case OP_COND
: val
= parse_cond (&str
); break;
7338 case OP_oBARRIER_I15
:
7339 po_barrier_or_imm (str
); break;
7341 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
7347 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
7348 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
7350 inst
.error
= _("Banked registers are not available with this "
7356 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7360 po_reg_or_goto (REG_TYPE_VFSD
, try_sysreg
);
7363 val
= parse_sys_vldr_vstr (&str
);
7367 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7370 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7372 if (strncasecmp (str
, "APSR_", 5) == 0)
7379 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7380 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7381 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7382 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7383 default: found
= 16;
7387 inst
.operands
[i
].isvec
= 1;
7388 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7389 inst
.operands
[i
].reg
= REG_PC
;
7396 po_misc_or_fail (parse_tb (&str
));
7399 /* Register lists. */
7401 val
= parse_reg_list (&str
, REGLIST_RN
);
7404 inst
.operands
[i
].writeback
= 1;
7410 val
= parse_reg_list (&str
, REGLIST_CLRM
);
7414 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
,
7419 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
,
7424 /* Allow Q registers too. */
7425 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7426 REGLIST_NEON_D
, &partial_match
);
7430 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7431 REGLIST_VFP_S
, &partial_match
);
7432 inst
.operands
[i
].issingle
= 1;
7437 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7438 REGLIST_VFP_D_VPR
, &partial_match
);
7439 if (val
== FAIL
&& !partial_match
)
7442 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7443 REGLIST_VFP_S_VPR
, &partial_match
);
7444 inst
.operands
[i
].issingle
= 1;
7449 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7450 REGLIST_NEON_D
, &partial_match
);
7454 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7455 &inst
.operands
[i
].vectype
);
7458 /* Addressing modes */
7460 po_misc_or_fail (parse_address (&str
, i
));
7464 po_misc_or_fail_no_backtrack (
7465 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7469 po_misc_or_fail_no_backtrack (
7470 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7474 po_misc_or_fail_no_backtrack (
7475 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7479 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7483 po_misc_or_fail_no_backtrack (
7484 parse_shifter_operand_group_reloc (&str
, i
));
7488 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7492 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7496 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7500 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7503 /* Various value-based sanity checks and shared operations. We
7504 do not signal immediate failures for the register constraints;
7505 this allows a syntax error to take precedence. */
7506 switch (op_parse_code
)
7514 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7515 inst
.error
= BAD_PC
;
7520 if (inst
.operands
[i
].isreg
)
7522 if (inst
.operands
[i
].reg
== REG_PC
)
7523 inst
.error
= BAD_PC
;
7524 else if (inst
.operands
[i
].reg
== REG_SP
7525 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7526 relaxed since ARMv8-A. */
7527 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
7530 inst
.error
= BAD_SP
;
7536 if (inst
.operands
[i
].isreg
7537 && inst
.operands
[i
].reg
== REG_PC
7538 && (inst
.operands
[i
].writeback
|| thumb
))
7539 inst
.error
= BAD_PC
;
7543 if (inst
.operands
[i
].isreg
)
7552 case OP_oBARRIER_I15
:
7563 inst
.operands
[i
].imm
= val
;
7568 if (inst
.operands
[i
].reg
!= REG_LR
)
7569 inst
.error
= _("operand must be LR register");
7573 if (inst
.operands
[i
].isreg
7574 && (inst
.operands
[i
].reg
& 0x00000001) != 0)
7575 inst
.error
= BAD_ODD
;
7579 if (inst
.operands
[i
].isreg
)
7581 if ((inst
.operands
[i
].reg
& 0x00000001) != 1)
7582 inst
.error
= BAD_EVEN
;
7583 else if (inst
.operands
[i
].reg
== REG_SP
)
7584 as_tsktsk (MVE_BAD_SP
);
7585 else if (inst
.operands
[i
].reg
== REG_PC
)
7586 inst
.error
= BAD_PC
;
7594 /* If we get here, this operand was successfully parsed. */
7595 inst
.operands
[i
].present
= 1;
7599 inst
.error
= BAD_ARGS
;
7604 /* The parse routine should already have set inst.error, but set a
7605 default here just in case. */
7607 inst
.error
= BAD_SYNTAX
;
7611 /* Do not backtrack over a trailing optional argument that
7612 absorbed some text. We will only fail again, with the
7613 'garbage following instruction' error message, which is
7614 probably less helpful than the current one. */
7615 if (backtrack_index
== i
&& backtrack_pos
!= str
7616 && upat
[i
+1] == OP_stop
)
7619 inst
.error
= BAD_SYNTAX
;
7623 /* Try again, skipping the optional argument at backtrack_pos. */
7624 str
= backtrack_pos
;
7625 inst
.error
= backtrack_error
;
7626 inst
.operands
[backtrack_index
].present
= 0;
7627 i
= backtrack_index
;
7631 /* Check that we have parsed all the arguments. */
7632 if (*str
!= '\0' && !inst
.error
)
7633 inst
.error
= _("garbage following instruction");
7635 return inst
.error
? FAIL
: SUCCESS
;
7638 #undef po_char_or_fail
7639 #undef po_reg_or_fail
7640 #undef po_reg_or_goto
7641 #undef po_imm_or_fail
7642 #undef po_scalar_or_fail
7643 #undef po_barrier_or_imm
7645 /* Shorthand macro for instruction encoding functions issuing errors. */
7646 #define constraint(expr, err) \
7657 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7658 instructions are unpredictable if these registers are used. This
7659 is the BadReg predicate in ARM's Thumb-2 documentation.
7661 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7662 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7663 #define reject_bad_reg(reg) \
7665 if (reg == REG_PC) \
7667 inst.error = BAD_PC; \
7670 else if (reg == REG_SP \
7671 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7673 inst.error = BAD_SP; \
7678 /* If REG is R13 (the stack pointer), warn that its use is
7680 #define warn_deprecated_sp(reg) \
7682 if (warn_on_deprecated && reg == REG_SP) \
7683 as_tsktsk (_("use of r13 is deprecated")); \
7686 /* Functions for operand encoding. ARM, then Thumb. */
7688 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7690 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7692 The only binary encoding difference is the Coprocessor number. Coprocessor
7693 9 is used for half-precision calculations or conversions. The format of the
7694 instruction is the same as the equivalent Coprocessor 10 instruction that
7695 exists for Single-Precision operation. */
7698 do_scalar_fp16_v82_encode (void)
7700 if (inst
.cond
< COND_ALWAYS
)
7701 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7702 " the behaviour is UNPREDICTABLE"));
7703 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
7706 inst
.instruction
= (inst
.instruction
& 0xfffff0ff) | 0x900;
7707 mark_feature_used (&arm_ext_fp16
);
7710 /* If VAL can be encoded in the immediate field of an ARM instruction,
7711 return the encoded form. Otherwise, return FAIL. */
7714 encode_arm_immediate (unsigned int val
)
7721 for (i
= 2; i
< 32; i
+= 2)
7722 if ((a
= rotate_left (val
, i
)) <= 0xff)
7723 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7728 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7729 return the encoded form. Otherwise, return FAIL. */
7731 encode_thumb32_immediate (unsigned int val
)
7738 for (i
= 1; i
<= 24; i
++)
7741 if ((val
& ~(0xff << i
)) == 0)
7742 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7746 if (val
== ((a
<< 16) | a
))
7748 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7752 if (val
== ((a
<< 16) | a
))
7753 return 0x200 | (a
>> 8);
7757 /* Encode a VFP SP or DP register number into inst.instruction. */
7760 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7762 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7765 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7768 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7771 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7776 first_error (_("D register out of range for selected VFP version"));
7784 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7788 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7792 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7796 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7800 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7804 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7812 /* Encode a <shift> in an ARM-format instruction. The immediate,
7813 if any, is handled by md_apply_fix. */
7815 encode_arm_shift (int i
)
7817 /* register-shifted register. */
7818 if (inst
.operands
[i
].immisreg
)
7821 for (op_index
= 0; op_index
<= i
; ++op_index
)
7823 /* Check the operand only when it's presented. In pre-UAL syntax,
7824 if the destination register is the same as the first operand, two
7825 register form of the instruction can be used. */
7826 if (inst
.operands
[op_index
].present
&& inst
.operands
[op_index
].isreg
7827 && inst
.operands
[op_index
].reg
== REG_PC
)
7828 as_warn (UNPRED_REG ("r15"));
7831 if (inst
.operands
[i
].imm
== REG_PC
)
7832 as_warn (UNPRED_REG ("r15"));
7835 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7836 inst
.instruction
|= SHIFT_ROR
<< 5;
7839 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7840 if (inst
.operands
[i
].immisreg
)
7842 inst
.instruction
|= SHIFT_BY_REG
;
7843 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7846 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
7851 encode_arm_shifter_operand (int i
)
7853 if (inst
.operands
[i
].isreg
)
7855 inst
.instruction
|= inst
.operands
[i
].reg
;
7856 encode_arm_shift (i
);
7860 inst
.instruction
|= INST_IMMEDIATE
;
7861 if (inst
.relocs
[0].type
!= BFD_RELOC_ARM_IMMEDIATE
)
7862 inst
.instruction
|= inst
.operands
[i
].imm
;
7866 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7868 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7871 Generate an error if the operand is not a register. */
7872 constraint (!inst
.operands
[i
].isreg
,
7873 _("Instruction does not support =N addresses"));
7875 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7877 if (inst
.operands
[i
].preind
)
7881 inst
.error
= _("instruction does not accept preindexed addressing");
7884 inst
.instruction
|= PRE_INDEX
;
7885 if (inst
.operands
[i
].writeback
)
7886 inst
.instruction
|= WRITE_BACK
;
7889 else if (inst
.operands
[i
].postind
)
7891 gas_assert (inst
.operands
[i
].writeback
);
7893 inst
.instruction
|= WRITE_BACK
;
7895 else /* unindexed - only for coprocessor */
7897 inst
.error
= _("instruction does not accept unindexed addressing");
7901 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7902 && (((inst
.instruction
& 0x000f0000) >> 16)
7903 == ((inst
.instruction
& 0x0000f000) >> 12)))
7904 as_warn ((inst
.instruction
& LOAD_BIT
)
7905 ? _("destination register same as write-back base")
7906 : _("source register same as write-back base"));
7909 /* inst.operands[i] was set up by parse_address. Encode it into an
7910 ARM-format mode 2 load or store instruction. If is_t is true,
7911 reject forms that cannot be used with a T instruction (i.e. not
7914 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7916 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7918 encode_arm_addr_mode_common (i
, is_t
);
7920 if (inst
.operands
[i
].immisreg
)
7922 constraint ((inst
.operands
[i
].imm
== REG_PC
7923 || (is_pc
&& inst
.operands
[i
].writeback
)),
7925 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7926 inst
.instruction
|= inst
.operands
[i
].imm
;
7927 if (!inst
.operands
[i
].negative
)
7928 inst
.instruction
|= INDEX_UP
;
7929 if (inst
.operands
[i
].shifted
)
7931 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7932 inst
.instruction
|= SHIFT_ROR
<< 5;
7935 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7936 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
7940 else /* immediate offset in inst.relocs[0] */
7942 if (is_pc
&& !inst
.relocs
[0].pc_rel
)
7944 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7946 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7947 cannot use PC in addressing.
7948 PC cannot be used in writeback addressing, either. */
7949 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7952 /* Use of PC in str is deprecated for ARMv7. */
7953 if (warn_on_deprecated
7955 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
7956 as_tsktsk (_("use of PC in this instruction is deprecated"));
7959 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
7961 /* Prefer + for zero encoded value. */
7962 if (!inst
.operands
[i
].negative
)
7963 inst
.instruction
|= INDEX_UP
;
7964 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM
;
7969 /* inst.operands[i] was set up by parse_address. Encode it into an
7970 ARM-format mode 3 load or store instruction. Reject forms that
7971 cannot be used with such instructions. If is_t is true, reject
7972 forms that cannot be used with a T instruction (i.e. not
7975 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
7977 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
7979 inst
.error
= _("instruction does not accept scaled register index");
7983 encode_arm_addr_mode_common (i
, is_t
);
7985 if (inst
.operands
[i
].immisreg
)
7987 constraint ((inst
.operands
[i
].imm
== REG_PC
7988 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
7990 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
7992 inst
.instruction
|= inst
.operands
[i
].imm
;
7993 if (!inst
.operands
[i
].negative
)
7994 inst
.instruction
|= INDEX_UP
;
7996 else /* immediate offset in inst.relocs[0] */
7998 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.relocs
[0].pc_rel
7999 && inst
.operands
[i
].writeback
),
8001 inst
.instruction
|= HWOFFSET_IMM
;
8002 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
8004 /* Prefer + for zero encoded value. */
8005 if (!inst
.operands
[i
].negative
)
8006 inst
.instruction
|= INDEX_UP
;
8008 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM8
;
8013 /* Write immediate bits [7:0] to the following locations:
8015 |28/24|23 19|18 16|15 4|3 0|
8016 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
8018 This function is used by VMOV/VMVN/VORR/VBIC. */
8021 neon_write_immbits (unsigned immbits
)
8023 inst
.instruction
|= immbits
& 0xf;
8024 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
8025 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
8028 /* Invert low-order SIZE bits of XHI:XLO. */
8031 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
8033 unsigned immlo
= xlo
? *xlo
: 0;
8034 unsigned immhi
= xhi
? *xhi
: 0;
8039 immlo
= (~immlo
) & 0xff;
8043 immlo
= (~immlo
) & 0xffff;
8047 immhi
= (~immhi
) & 0xffffffff;
8051 immlo
= (~immlo
) & 0xffffffff;
8065 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
8069 neon_bits_same_in_bytes (unsigned imm
)
8071 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
8072 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
8073 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
8074 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
8077 /* For immediate of above form, return 0bABCD. */
8080 neon_squash_bits (unsigned imm
)
8082 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
8083 | ((imm
& 0x01000000) >> 21);
8086 /* Compress quarter-float representation to 0b...000 abcdefgh. */
8089 neon_qfloat_bits (unsigned imm
)
8091 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
8094 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
8095 the instruction. *OP is passed as the initial value of the op field, and
8096 may be set to a different value depending on the constant (i.e.
8097 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
8098 MVN). If the immediate looks like a repeated pattern then also
8099 try smaller element sizes. */
8102 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
8103 unsigned *immbits
, int *op
, int size
,
8104 enum neon_el_type type
)
8106 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
8108 if (type
== NT_float
&& !float_p
)
8111 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
8113 if (size
!= 32 || *op
== 1)
8115 *immbits
= neon_qfloat_bits (immlo
);
8121 if (neon_bits_same_in_bytes (immhi
)
8122 && neon_bits_same_in_bytes (immlo
))
8126 *immbits
= (neon_squash_bits (immhi
) << 4)
8127 | neon_squash_bits (immlo
);
8138 if (immlo
== (immlo
& 0x000000ff))
8143 else if (immlo
== (immlo
& 0x0000ff00))
8145 *immbits
= immlo
>> 8;
8148 else if (immlo
== (immlo
& 0x00ff0000))
8150 *immbits
= immlo
>> 16;
8153 else if (immlo
== (immlo
& 0xff000000))
8155 *immbits
= immlo
>> 24;
8158 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
8160 *immbits
= (immlo
>> 8) & 0xff;
8163 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
8165 *immbits
= (immlo
>> 16) & 0xff;
8169 if ((immlo
& 0xffff) != (immlo
>> 16))
8176 if (immlo
== (immlo
& 0x000000ff))
8181 else if (immlo
== (immlo
& 0x0000ff00))
8183 *immbits
= immlo
>> 8;
8187 if ((immlo
& 0xff) != (immlo
>> 8))
8192 if (immlo
== (immlo
& 0x000000ff))
8194 /* Don't allow MVN with 8-bit immediate. */
8204 #if defined BFD_HOST_64_BIT
8205 /* Returns TRUE if double precision value V may be cast
8206 to single precision without loss of accuracy. */
8209 is_double_a_single (bfd_int64_t v
)
8211 int exp
= (int)((v
>> 52) & 0x7FF);
8212 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
8214 return (exp
== 0 || exp
== 0x7FF
8215 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
8216 && (mantissa
& 0x1FFFFFFFl
) == 0;
8219 /* Returns a double precision value casted to single precision
8220 (ignoring the least significant bits in exponent and mantissa). */
8223 double_to_single (bfd_int64_t v
)
8225 int sign
= (int) ((v
>> 63) & 1l);
8226 int exp
= (int) ((v
>> 52) & 0x7FF);
8227 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
8233 exp
= exp
- 1023 + 127;
8242 /* No denormalized numbers. */
8248 return (sign
<< 31) | (exp
<< 23) | mantissa
;
8250 #endif /* BFD_HOST_64_BIT */
8259 static void do_vfp_nsyn_opcode (const char *);
8261 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8262 Determine whether it can be performed with a move instruction; if
8263 it can, convert inst.instruction to that move instruction and
8264 return TRUE; if it can't, convert inst.instruction to a literal-pool
8265 load and return FALSE. If this is not a valid thing to do in the
8266 current context, set inst.error and return TRUE.
8268 inst.operands[i] describes the destination register. */
8271 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
8274 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
8275 bfd_boolean arm_p
= (t
== CONST_ARM
);
8278 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
8282 if ((inst
.instruction
& tbit
) == 0)
8284 inst
.error
= _("invalid pseudo operation");
8288 if (inst
.relocs
[0].exp
.X_op
!= O_constant
8289 && inst
.relocs
[0].exp
.X_op
!= O_symbol
8290 && inst
.relocs
[0].exp
.X_op
!= O_big
)
8292 inst
.error
= _("constant expression expected");
8296 if (inst
.relocs
[0].exp
.X_op
== O_constant
8297 || inst
.relocs
[0].exp
.X_op
== O_big
)
8299 #if defined BFD_HOST_64_BIT
8304 if (inst
.relocs
[0].exp
.X_op
== O_big
)
8306 LITTLENUM_TYPE w
[X_PRECISION
];
8309 if (inst
.relocs
[0].exp
.X_add_number
== -1)
8311 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
8313 /* FIXME: Should we check words w[2..5] ? */
8318 #if defined BFD_HOST_64_BIT
8320 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
8321 << LITTLENUM_NUMBER_OF_BITS
)
8322 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
8323 << LITTLENUM_NUMBER_OF_BITS
)
8324 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
8325 << LITTLENUM_NUMBER_OF_BITS
)
8326 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
8328 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
8329 | (l
[0] & LITTLENUM_MASK
);
8333 v
= inst
.relocs
[0].exp
.X_add_number
;
8335 if (!inst
.operands
[i
].issingle
)
8339 /* LDR should not use lead in a flag-setting instruction being
8340 chosen so we do not check whether movs can be used. */
8342 if ((ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
8343 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8344 && inst
.operands
[i
].reg
!= 13
8345 && inst
.operands
[i
].reg
!= 15)
8347 /* Check if on thumb2 it can be done with a mov.w, mvn or
8348 movw instruction. */
8349 unsigned int newimm
;
8350 bfd_boolean isNegated
;
8352 newimm
= encode_thumb32_immediate (v
);
8353 if (newimm
!= (unsigned int) FAIL
)
8357 newimm
= encode_thumb32_immediate (~v
);
8358 if (newimm
!= (unsigned int) FAIL
)
8362 /* The number can be loaded with a mov.w or mvn
8364 if (newimm
!= (unsigned int) FAIL
8365 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
8367 inst
.instruction
= (0xf04f0000 /* MOV.W. */
8368 | (inst
.operands
[i
].reg
<< 8));
8369 /* Change to MOVN. */
8370 inst
.instruction
|= (isNegated
? 0x200000 : 0);
8371 inst
.instruction
|= (newimm
& 0x800) << 15;
8372 inst
.instruction
|= (newimm
& 0x700) << 4;
8373 inst
.instruction
|= (newimm
& 0x0ff);
8376 /* The number can be loaded with a movw instruction. */
8377 else if ((v
& ~0xFFFF) == 0
8378 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8380 int imm
= v
& 0xFFFF;
8382 inst
.instruction
= 0xf2400000; /* MOVW. */
8383 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
8384 inst
.instruction
|= (imm
& 0xf000) << 4;
8385 inst
.instruction
|= (imm
& 0x0800) << 15;
8386 inst
.instruction
|= (imm
& 0x0700) << 4;
8387 inst
.instruction
|= (imm
& 0x00ff);
8394 int value
= encode_arm_immediate (v
);
8398 /* This can be done with a mov instruction. */
8399 inst
.instruction
&= LITERAL_MASK
;
8400 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
8401 inst
.instruction
|= value
& 0xfff;
8405 value
= encode_arm_immediate (~ v
);
8408 /* This can be done with a mvn instruction. */
8409 inst
.instruction
&= LITERAL_MASK
;
8410 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
8411 inst
.instruction
|= value
& 0xfff;
8415 else if (t
== CONST_VEC
&& ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
8418 unsigned immbits
= 0;
8419 unsigned immlo
= inst
.operands
[1].imm
;
8420 unsigned immhi
= inst
.operands
[1].regisimm
8421 ? inst
.operands
[1].reg
8422 : inst
.relocs
[0].exp
.X_unsigned
8424 : ((bfd_int64_t
)((int) immlo
)) >> 32;
8425 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8426 &op
, 64, NT_invtype
);
8430 neon_invert_size (&immlo
, &immhi
, 64);
8432 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8433 &op
, 64, NT_invtype
);
8438 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
8444 /* Fill other bits in vmov encoding for both thumb and arm. */
8446 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
8448 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
8449 neon_write_immbits (immbits
);
8457 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8458 if (inst
.operands
[i
].issingle
8459 && is_quarter_float (inst
.operands
[1].imm
)
8460 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8462 inst
.operands
[1].imm
=
8463 neon_qfloat_bits (v
);
8464 do_vfp_nsyn_opcode ("fconsts");
8468 /* If our host does not support a 64-bit type then we cannot perform
8469 the following optimization. This mean that there will be a
8470 discrepancy between the output produced by an assembler built for
8471 a 32-bit-only host and the output produced from a 64-bit host, but
8472 this cannot be helped. */
8473 #if defined BFD_HOST_64_BIT
8474 else if (!inst
.operands
[1].issingle
8475 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8477 if (is_double_a_single (v
)
8478 && is_quarter_float (double_to_single (v
)))
8480 inst
.operands
[1].imm
=
8481 neon_qfloat_bits (double_to_single (v
));
8482 do_vfp_nsyn_opcode ("fconstd");
8490 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8491 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8494 inst
.operands
[1].reg
= REG_PC
;
8495 inst
.operands
[1].isreg
= 1;
8496 inst
.operands
[1].preind
= 1;
8497 inst
.relocs
[0].pc_rel
= 1;
8498 inst
.relocs
[0].type
= (thumb_p
8499 ? BFD_RELOC_ARM_THUMB_OFFSET
8501 ? BFD_RELOC_ARM_HWLITERAL
8502 : BFD_RELOC_ARM_LITERAL
));
8506 /* inst.operands[i] was set up by parse_address. Encode it into an
8507 ARM-format instruction. Reject all forms which cannot be encoded
8508 into a coprocessor load/store instruction. If wb_ok is false,
8509 reject use of writeback; if unind_ok is false, reject use of
8510 unindexed addressing. If reloc_override is not 0, use it instead
8511 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8512 (in which case it is preserved). */
8515 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8517 if (!inst
.operands
[i
].isreg
)
8520 if (! inst
.operands
[0].isvec
)
8522 inst
.error
= _("invalid co-processor operand");
8525 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8529 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8531 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8533 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8535 gas_assert (!inst
.operands
[i
].writeback
);
8538 inst
.error
= _("instruction does not support unindexed addressing");
8541 inst
.instruction
|= inst
.operands
[i
].imm
;
8542 inst
.instruction
|= INDEX_UP
;
8546 if (inst
.operands
[i
].preind
)
8547 inst
.instruction
|= PRE_INDEX
;
8549 if (inst
.operands
[i
].writeback
)
8551 if (inst
.operands
[i
].reg
== REG_PC
)
8553 inst
.error
= _("pc may not be used with write-back");
8558 inst
.error
= _("instruction does not support writeback");
8561 inst
.instruction
|= WRITE_BACK
;
8565 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) reloc_override
;
8566 else if ((inst
.relocs
[0].type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8567 || inst
.relocs
[0].type
> BFD_RELOC_ARM_LDC_SB_G2
)
8568 && inst
.relocs
[0].type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8571 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8573 inst
.relocs
[0].type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8576 /* Prefer + for zero encoded value. */
8577 if (!inst
.operands
[i
].negative
)
8578 inst
.instruction
|= INDEX_UP
;
8583 /* Functions for instruction encoding, sorted by sub-architecture.
8584 First some generics; their names are taken from the conventional
8585 bit positions for register arguments in ARM format instructions. */
8595 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8601 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8607 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8608 inst
.instruction
|= inst
.operands
[1].reg
;
8614 inst
.instruction
|= inst
.operands
[0].reg
;
8615 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8621 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8622 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8628 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8629 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8635 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8636 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8640 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8642 if (ARM_CPU_IS_ANY (cpu_variant
))
8644 as_tsktsk ("%s", msg
);
8647 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8659 unsigned Rn
= inst
.operands
[2].reg
;
8660 /* Enforce restrictions on SWP instruction. */
8661 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8663 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8664 _("Rn must not overlap other operands"));
8666 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8668 if (!check_obsolete (&arm_ext_v8
,
8669 _("swp{b} use is obsoleted for ARMv8 and later"))
8670 && warn_on_deprecated
8671 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8672 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8675 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8676 inst
.instruction
|= inst
.operands
[1].reg
;
8677 inst
.instruction
|= Rn
<< 16;
8683 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8684 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8685 inst
.instruction
|= inst
.operands
[2].reg
;
8691 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8692 constraint (((inst
.relocs
[0].exp
.X_op
!= O_constant
8693 && inst
.relocs
[0].exp
.X_op
!= O_illegal
)
8694 || inst
.relocs
[0].exp
.X_add_number
!= 0),
8696 inst
.instruction
|= inst
.operands
[0].reg
;
8697 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8698 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8704 inst
.instruction
|= inst
.operands
[0].imm
;
8710 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8711 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8714 /* ARM instructions, in alphabetical order by function name (except
8715 that wrapper functions appear immediately after the function they
8718 /* This is a pseudo-op of the form "adr rd, label" to be converted
8719 into a relative address of the form "add rd, pc, #label-.-8". */
8724 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8726 /* Frag hacking will turn this into a sub instruction if the offset turns
8727 out to be negative. */
8728 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
8729 inst
.relocs
[0].pc_rel
= 1;
8730 inst
.relocs
[0].exp
.X_add_number
-= 8;
8732 if (support_interwork
8733 && inst
.relocs
[0].exp
.X_op
== O_symbol
8734 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8735 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8736 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8737 inst
.relocs
[0].exp
.X_add_number
|= 1;
8740 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8741 into a relative address of the form:
8742 add rd, pc, #low(label-.-8)"
8743 add rd, rd, #high(label-.-8)" */
8748 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8750 /* Frag hacking will turn this into a sub instruction if the offset turns
8751 out to be negative. */
8752 inst
.relocs
[0].type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8753 inst
.relocs
[0].pc_rel
= 1;
8754 inst
.size
= INSN_SIZE
* 2;
8755 inst
.relocs
[0].exp
.X_add_number
-= 8;
8757 if (support_interwork
8758 && inst
.relocs
[0].exp
.X_op
== O_symbol
8759 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8760 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8761 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8762 inst
.relocs
[0].exp
.X_add_number
|= 1;
8768 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8769 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
8771 if (!inst
.operands
[1].present
)
8772 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8773 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8774 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8775 encode_arm_shifter_operand (2);
8781 if (inst
.operands
[0].present
)
8782 inst
.instruction
|= inst
.operands
[0].imm
;
8784 inst
.instruction
|= 0xf;
8790 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8791 constraint (msb
> 32, _("bit-field extends past end of register"));
8792 /* The instruction encoding stores the LSB and MSB,
8793 not the LSB and width. */
8794 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8795 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8796 inst
.instruction
|= (msb
- 1) << 16;
8804 /* #0 in second position is alternative syntax for bfc, which is
8805 the same instruction but with REG_PC in the Rm field. */
8806 if (!inst
.operands
[1].isreg
)
8807 inst
.operands
[1].reg
= REG_PC
;
8809 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8810 constraint (msb
> 32, _("bit-field extends past end of register"));
8811 /* The instruction encoding stores the LSB and MSB,
8812 not the LSB and width. */
8813 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8814 inst
.instruction
|= inst
.operands
[1].reg
;
8815 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8816 inst
.instruction
|= (msb
- 1) << 16;
8822 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8823 _("bit-field extends past end of register"));
8824 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8825 inst
.instruction
|= inst
.operands
[1].reg
;
8826 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8827 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8830 /* ARM V5 breakpoint instruction (argument parse)
8831 BKPT <16 bit unsigned immediate>
8832 Instruction is not conditional.
8833 The bit pattern given in insns[] has the COND_ALWAYS condition,
8834 and it is an error if the caller tried to override that. */
8839 /* Top 12 of 16 bits to bits 19:8. */
8840 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8842 /* Bottom 4 of 16 bits to bits 3:0. */
8843 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8847 encode_branch (int default_reloc
)
8849 if (inst
.operands
[0].hasreloc
)
8851 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8852 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8853 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8854 inst
.relocs
[0].type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8855 ? BFD_RELOC_ARM_PLT32
8856 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8859 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) default_reloc
;
8860 inst
.relocs
[0].pc_rel
= 1;
8867 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8868 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8871 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8878 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8880 if (inst
.cond
== COND_ALWAYS
)
8881 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8883 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8887 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8890 /* ARM V5 branch-link-exchange instruction (argument parse)
8891 BLX <target_addr> ie BLX(1)
8892 BLX{<condition>} <Rm> ie BLX(2)
8893 Unfortunately, there are two different opcodes for this mnemonic.
8894 So, the insns[].value is not used, and the code here zaps values
8895 into inst.instruction.
8896 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8901 if (inst
.operands
[0].isreg
)
8903 /* Arg is a register; the opcode provided by insns[] is correct.
8904 It is not illegal to do "blx pc", just useless. */
8905 if (inst
.operands
[0].reg
== REG_PC
)
8906 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8908 inst
.instruction
|= inst
.operands
[0].reg
;
8912 /* Arg is an address; this instruction cannot be executed
8913 conditionally, and the opcode must be adjusted.
8914 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8915 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8916 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8917 inst
.instruction
= 0xfa000000;
8918 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8925 bfd_boolean want_reloc
;
8927 if (inst
.operands
[0].reg
== REG_PC
)
8928 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8930 inst
.instruction
|= inst
.operands
[0].reg
;
8931 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8932 it is for ARMv4t or earlier. */
8933 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8934 if (!ARM_FEATURE_ZERO (selected_object_arch
)
8935 && !ARM_CPU_HAS_FEATURE (selected_object_arch
, arm_ext_v5
))
8939 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8944 inst
.relocs
[0].type
= BFD_RELOC_ARM_V4BX
;
8948 /* ARM v5TEJ. Jump to Jazelle code. */
8953 if (inst
.operands
[0].reg
== REG_PC
)
8954 as_tsktsk (_("use of r15 in bxj is not really useful"));
8956 inst
.instruction
|= inst
.operands
[0].reg
;
8959 /* Co-processor data operation:
8960 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8961 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8965 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8966 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
8967 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8968 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8969 inst
.instruction
|= inst
.operands
[4].reg
;
8970 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8976 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8977 encode_arm_shifter_operand (1);
8980 /* Transfer between coprocessor and ARM registers.
8981 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8986 No special properties. */
8988 struct deprecated_coproc_regs_s
8995 arm_feature_set deprecated
;
8996 arm_feature_set obsoleted
;
8997 const char *dep_msg
;
8998 const char *obs_msg
;
9001 #define DEPR_ACCESS_V8 \
9002 N_("This coprocessor register access is deprecated in ARMv8")
9004 /* Table of all deprecated coprocessor registers. */
9005 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
9007 {15, 0, 7, 10, 5, /* CP15DMB. */
9008 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9009 DEPR_ACCESS_V8
, NULL
},
9010 {15, 0, 7, 10, 4, /* CP15DSB. */
9011 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9012 DEPR_ACCESS_V8
, NULL
},
9013 {15, 0, 7, 5, 4, /* CP15ISB. */
9014 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9015 DEPR_ACCESS_V8
, NULL
},
9016 {14, 6, 1, 0, 0, /* TEEHBR. */
9017 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9018 DEPR_ACCESS_V8
, NULL
},
9019 {14, 6, 0, 0, 0, /* TEECR. */
9020 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9021 DEPR_ACCESS_V8
, NULL
},
9024 #undef DEPR_ACCESS_V8
9026 static const size_t deprecated_coproc_reg_count
=
9027 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
9035 Rd
= inst
.operands
[2].reg
;
9038 if (inst
.instruction
== 0xee000010
9039 || inst
.instruction
== 0xfe000010)
9041 reject_bad_reg (Rd
);
9042 else if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9044 constraint (Rd
== REG_SP
, BAD_SP
);
9049 if (inst
.instruction
== 0xe000010)
9050 constraint (Rd
== REG_PC
, BAD_PC
);
9053 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
9055 const struct deprecated_coproc_regs_s
*r
=
9056 deprecated_coproc_regs
+ i
;
9058 if (inst
.operands
[0].reg
== r
->cp
9059 && inst
.operands
[1].imm
== r
->opc1
9060 && inst
.operands
[3].reg
== r
->crn
9061 && inst
.operands
[4].reg
== r
->crm
9062 && inst
.operands
[5].imm
== r
->opc2
)
9064 if (! ARM_CPU_IS_ANY (cpu_variant
)
9065 && warn_on_deprecated
9066 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
9067 as_tsktsk ("%s", r
->dep_msg
);
9071 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9072 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
9073 inst
.instruction
|= Rd
<< 12;
9074 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9075 inst
.instruction
|= inst
.operands
[4].reg
;
9076 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
9079 /* Transfer between coprocessor register and pair of ARM registers.
9080 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
9085 Two XScale instructions are special cases of these:
9087 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
9088 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
9090 Result unpredictable if Rd or Rn is R15. */
9097 Rd
= inst
.operands
[2].reg
;
9098 Rn
= inst
.operands
[3].reg
;
9102 reject_bad_reg (Rd
);
9103 reject_bad_reg (Rn
);
9107 constraint (Rd
== REG_PC
, BAD_PC
);
9108 constraint (Rn
== REG_PC
, BAD_PC
);
9111 /* Only check the MRRC{2} variants. */
9112 if ((inst
.instruction
& 0x0FF00000) == 0x0C500000)
9114 /* If Rd == Rn, error that the operation is
9115 unpredictable (example MRRC p3,#1,r1,r1,c4). */
9116 constraint (Rd
== Rn
, BAD_OVERLAP
);
9119 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9120 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
9121 inst
.instruction
|= Rd
<< 12;
9122 inst
.instruction
|= Rn
<< 16;
9123 inst
.instruction
|= inst
.operands
[4].reg
;
9129 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
9130 if (inst
.operands
[1].present
)
9132 inst
.instruction
|= CPSI_MMOD
;
9133 inst
.instruction
|= inst
.operands
[1].imm
;
9140 inst
.instruction
|= inst
.operands
[0].imm
;
9146 unsigned Rd
, Rn
, Rm
;
9148 Rd
= inst
.operands
[0].reg
;
9149 Rn
= (inst
.operands
[1].present
9150 ? inst
.operands
[1].reg
: Rd
);
9151 Rm
= inst
.operands
[2].reg
;
9153 constraint ((Rd
== REG_PC
), BAD_PC
);
9154 constraint ((Rn
== REG_PC
), BAD_PC
);
9155 constraint ((Rm
== REG_PC
), BAD_PC
);
9157 inst
.instruction
|= Rd
<< 16;
9158 inst
.instruction
|= Rn
<< 0;
9159 inst
.instruction
|= Rm
<< 8;
9165 /* There is no IT instruction in ARM mode. We
9166 process it to do the validation as if in
9167 thumb mode, just in case the code gets
9168 assembled for thumb using the unified syntax. */
9173 set_pred_insn_type (IT_INSN
);
9174 now_pred
.mask
= (inst
.instruction
& 0xf) | 0x10;
9175 now_pred
.cc
= inst
.operands
[0].imm
;
9179 /* If there is only one register in the register list,
9180 then return its register number. Otherwise return -1. */
9182 only_one_reg_in_list (int range
)
9184 int i
= ffs (range
) - 1;
9185 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
9189 encode_ldmstm(int from_push_pop_mnem
)
9191 int base_reg
= inst
.operands
[0].reg
;
9192 int range
= inst
.operands
[1].imm
;
9195 inst
.instruction
|= base_reg
<< 16;
9196 inst
.instruction
|= range
;
9198 if (inst
.operands
[1].writeback
)
9199 inst
.instruction
|= LDM_TYPE_2_OR_3
;
9201 if (inst
.operands
[0].writeback
)
9203 inst
.instruction
|= WRITE_BACK
;
9204 /* Check for unpredictable uses of writeback. */
9205 if (inst
.instruction
& LOAD_BIT
)
9207 /* Not allowed in LDM type 2. */
9208 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
9209 && ((range
& (1 << REG_PC
)) == 0))
9210 as_warn (_("writeback of base register is UNPREDICTABLE"));
9211 /* Only allowed if base reg not in list for other types. */
9212 else if (range
& (1 << base_reg
))
9213 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9217 /* Not allowed for type 2. */
9218 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
9219 as_warn (_("writeback of base register is UNPREDICTABLE"));
9220 /* Only allowed if base reg not in list, or first in list. */
9221 else if ((range
& (1 << base_reg
))
9222 && (range
& ((1 << base_reg
) - 1)))
9223 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9227 /* If PUSH/POP has only one register, then use the A2 encoding. */
9228 one_reg
= only_one_reg_in_list (range
);
9229 if (from_push_pop_mnem
&& one_reg
>= 0)
9231 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
9233 if (is_push
&& one_reg
== 13 /* SP */)
9234 /* PR 22483: The A2 encoding cannot be used when
9235 pushing the stack pointer as this is UNPREDICTABLE. */
9238 inst
.instruction
&= A_COND_MASK
;
9239 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
9240 inst
.instruction
|= one_reg
<< 12;
9247 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
9250 /* ARMv5TE load-consecutive (argument parse)
9259 constraint (inst
.operands
[0].reg
% 2 != 0,
9260 _("first transfer register must be even"));
9261 constraint (inst
.operands
[1].present
9262 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9263 _("can only transfer two consecutive registers"));
9264 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9265 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
9267 if (!inst
.operands
[1].present
)
9268 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9270 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9271 register and the first register written; we have to diagnose
9272 overlap between the base and the second register written here. */
9274 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
9275 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
9276 as_warn (_("base register written back, and overlaps "
9277 "second transfer register"));
9279 if (!(inst
.instruction
& V4_STR_BIT
))
9281 /* For an index-register load, the index register must not overlap the
9282 destination (even if not write-back). */
9283 if (inst
.operands
[2].immisreg
9284 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
9285 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
9286 as_warn (_("index register overlaps transfer register"));
9288 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9289 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
9295 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9296 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9297 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9298 || inst
.operands
[1].negative
9299 /* This can arise if the programmer has written
9301 or if they have mistakenly used a register name as the last
9304 It is very difficult to distinguish between these two cases
9305 because "rX" might actually be a label. ie the register
9306 name has been occluded by a symbol of the same name. So we
9307 just generate a general 'bad addressing mode' type error
9308 message and leave it up to the programmer to discover the
9309 true cause and fix their mistake. */
9310 || (inst
.operands
[1].reg
== REG_PC
),
9313 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9314 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9315 _("offset must be zero in ARM encoding"));
9317 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
9319 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9320 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9321 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
9327 constraint (inst
.operands
[0].reg
% 2 != 0,
9328 _("even register required"));
9329 constraint (inst
.operands
[1].present
9330 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9331 _("can only load two consecutive registers"));
9332 /* If op 1 were present and equal to PC, this function wouldn't
9333 have been called in the first place. */
9334 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9336 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9337 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9340 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9341 which is not a multiple of four is UNPREDICTABLE. */
9343 check_ldr_r15_aligned (void)
9345 constraint (!(inst
.operands
[1].immisreg
)
9346 && (inst
.operands
[0].reg
== REG_PC
9347 && inst
.operands
[1].reg
== REG_PC
9348 && (inst
.relocs
[0].exp
.X_add_number
& 0x3)),
9349 _("ldr to register 15 must be 4-byte aligned"));
9355 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9356 if (!inst
.operands
[1].isreg
)
9357 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
9359 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
9360 check_ldr_r15_aligned ();
9366 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9368 if (inst
.operands
[1].preind
)
9370 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9371 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9372 _("this instruction requires a post-indexed address"));
9374 inst
.operands
[1].preind
= 0;
9375 inst
.operands
[1].postind
= 1;
9376 inst
.operands
[1].writeback
= 1;
9378 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9379 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
9382 /* Halfword and signed-byte load/store operations. */
9387 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9388 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9389 if (!inst
.operands
[1].isreg
)
9390 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
9392 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
9398 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9400 if (inst
.operands
[1].preind
)
9402 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9403 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9404 _("this instruction requires a post-indexed address"));
9406 inst
.operands
[1].preind
= 0;
9407 inst
.operands
[1].postind
= 1;
9408 inst
.operands
[1].writeback
= 1;
9410 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9411 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
9414 /* Co-processor register load/store.
9415 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9419 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9420 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9421 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9427 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9428 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9429 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
9430 && !(inst
.instruction
& 0x00400000))
9431 as_tsktsk (_("Rd and Rm should be different in mla"));
9433 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9434 inst
.instruction
|= inst
.operands
[1].reg
;
9435 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9436 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9442 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9443 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
9445 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9446 encode_arm_shifter_operand (1);
9449 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9456 top
= (inst
.instruction
& 0x00400000) != 0;
9457 constraint (top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
,
9458 _(":lower16: not allowed in this instruction"));
9459 constraint (!top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
,
9460 _(":upper16: not allowed in this instruction"));
9461 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9462 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
9464 imm
= inst
.relocs
[0].exp
.X_add_number
;
9465 /* The value is in two pieces: 0:11, 16:19. */
9466 inst
.instruction
|= (imm
& 0x00000fff);
9467 inst
.instruction
|= (imm
& 0x0000f000) << 4;
9472 do_vfp_nsyn_mrs (void)
9474 if (inst
.operands
[0].isvec
)
9476 if (inst
.operands
[1].reg
!= 1)
9477 first_error (_("operand 1 must be FPSCR"));
9478 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
9479 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
9480 do_vfp_nsyn_opcode ("fmstat");
9482 else if (inst
.operands
[1].isvec
)
9483 do_vfp_nsyn_opcode ("fmrx");
9491 do_vfp_nsyn_msr (void)
9493 if (inst
.operands
[0].isvec
)
9494 do_vfp_nsyn_opcode ("fmxr");
9504 unsigned Rt
= inst
.operands
[0].reg
;
9506 if (thumb_mode
&& Rt
== REG_SP
)
9508 inst
.error
= BAD_SP
;
9512 /* MVFR2 is only valid at ARMv8-A. */
9513 if (inst
.operands
[1].reg
== 5)
9514 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9517 /* APSR_ sets isvec. All other refs to PC are illegal. */
9518 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9520 inst
.error
= BAD_PC
;
9524 /* If we get through parsing the register name, we just insert the number
9525 generated into the instruction without further validation. */
9526 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9527 inst
.instruction
|= (Rt
<< 12);
9533 unsigned Rt
= inst
.operands
[1].reg
;
9536 reject_bad_reg (Rt
);
9537 else if (Rt
== REG_PC
)
9539 inst
.error
= BAD_PC
;
9543 /* MVFR2 is only valid for ARMv8-A. */
9544 if (inst
.operands
[0].reg
== 5)
9545 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9548 /* If we get through parsing the register name, we just insert the number
9549 generated into the instruction without further validation. */
9550 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9551 inst
.instruction
|= (Rt
<< 12);
9559 if (do_vfp_nsyn_mrs () == SUCCESS
)
9562 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9563 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9565 if (inst
.operands
[1].isreg
)
9567 br
= inst
.operands
[1].reg
;
9568 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf0000))
9569 as_bad (_("bad register for mrs"));
9573 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9574 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9576 _("'APSR', 'CPSR' or 'SPSR' expected"));
9577 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9580 inst
.instruction
|= br
;
9583 /* Two possible forms:
9584 "{C|S}PSR_<field>, Rm",
9585 "{C|S}PSR_f, #expression". */
9590 if (do_vfp_nsyn_msr () == SUCCESS
)
9593 inst
.instruction
|= inst
.operands
[0].imm
;
9594 if (inst
.operands
[1].isreg
)
9595 inst
.instruction
|= inst
.operands
[1].reg
;
9598 inst
.instruction
|= INST_IMMEDIATE
;
9599 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
9600 inst
.relocs
[0].pc_rel
= 0;
9607 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9609 if (!inst
.operands
[2].present
)
9610 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9611 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9612 inst
.instruction
|= inst
.operands
[1].reg
;
9613 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9615 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9616 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9617 as_tsktsk (_("Rd and Rm should be different in mul"));
9620 /* Long Multiply Parser
9621 UMULL RdLo, RdHi, Rm, Rs
9622 SMULL RdLo, RdHi, Rm, Rs
9623 UMLAL RdLo, RdHi, Rm, Rs
9624 SMLAL RdLo, RdHi, Rm, Rs. */
9629 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9630 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9631 inst
.instruction
|= inst
.operands
[2].reg
;
9632 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9634 /* rdhi and rdlo must be different. */
9635 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9636 as_tsktsk (_("rdhi and rdlo must be different"));
9638 /* rdhi, rdlo and rm must all be different before armv6. */
9639 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9640 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9641 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9642 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9648 if (inst
.operands
[0].present
9649 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9651 /* Architectural NOP hints are CPSR sets with no bits selected. */
9652 inst
.instruction
&= 0xf0000000;
9653 inst
.instruction
|= 0x0320f000;
9654 if (inst
.operands
[0].present
)
9655 inst
.instruction
|= inst
.operands
[0].imm
;
9659 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9660 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9661 Condition defaults to COND_ALWAYS.
9662 Error if Rd, Rn or Rm are R15. */
9667 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9668 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9669 inst
.instruction
|= inst
.operands
[2].reg
;
9670 if (inst
.operands
[3].present
)
9671 encode_arm_shift (3);
9674 /* ARM V6 PKHTB (Argument Parse). */
9679 if (!inst
.operands
[3].present
)
9681 /* If the shift specifier is omitted, turn the instruction
9682 into pkhbt rd, rm, rn. */
9683 inst
.instruction
&= 0xfff00010;
9684 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9685 inst
.instruction
|= inst
.operands
[1].reg
;
9686 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9690 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9691 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9692 inst
.instruction
|= inst
.operands
[2].reg
;
9693 encode_arm_shift (3);
9697 /* ARMv5TE: Preload-Cache
9698 MP Extensions: Preload for write
9702 Syntactically, like LDR with B=1, W=0, L=1. */
9707 constraint (!inst
.operands
[0].isreg
,
9708 _("'[' expected after PLD mnemonic"));
9709 constraint (inst
.operands
[0].postind
,
9710 _("post-indexed expression used in preload instruction"));
9711 constraint (inst
.operands
[0].writeback
,
9712 _("writeback used in preload instruction"));
9713 constraint (!inst
.operands
[0].preind
,
9714 _("unindexed addressing used in preload instruction"));
9715 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9718 /* ARMv7: PLI <addr_mode> */
9722 constraint (!inst
.operands
[0].isreg
,
9723 _("'[' expected after PLI mnemonic"));
9724 constraint (inst
.operands
[0].postind
,
9725 _("post-indexed expression used in preload instruction"));
9726 constraint (inst
.operands
[0].writeback
,
9727 _("writeback used in preload instruction"));
9728 constraint (!inst
.operands
[0].preind
,
9729 _("unindexed addressing used in preload instruction"));
9730 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9731 inst
.instruction
&= ~PRE_INDEX
;
9737 constraint (inst
.operands
[0].writeback
,
9738 _("push/pop do not support {reglist}^"));
9739 inst
.operands
[1] = inst
.operands
[0];
9740 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9741 inst
.operands
[0].isreg
= 1;
9742 inst
.operands
[0].writeback
= 1;
9743 inst
.operands
[0].reg
= REG_SP
;
9744 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9747 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9748 word at the specified address and the following word
9750 Unconditionally executed.
9751 Error if Rn is R15. */
9756 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9757 if (inst
.operands
[0].writeback
)
9758 inst
.instruction
|= WRITE_BACK
;
9761 /* ARM V6 ssat (argument parse). */
9766 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9767 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9768 inst
.instruction
|= inst
.operands
[2].reg
;
9770 if (inst
.operands
[3].present
)
9771 encode_arm_shift (3);
9774 /* ARM V6 usat (argument parse). */
9779 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9780 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9781 inst
.instruction
|= inst
.operands
[2].reg
;
9783 if (inst
.operands
[3].present
)
9784 encode_arm_shift (3);
9787 /* ARM V6 ssat16 (argument parse). */
9792 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9793 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9794 inst
.instruction
|= inst
.operands
[2].reg
;
9800 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9801 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9802 inst
.instruction
|= inst
.operands
[2].reg
;
9805 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9806 preserving the other bits.
9808 setend <endian_specifier>, where <endian_specifier> is either
9814 if (warn_on_deprecated
9815 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9816 as_tsktsk (_("setend use is deprecated for ARMv8"));
9818 if (inst
.operands
[0].imm
)
9819 inst
.instruction
|= 0x200;
9825 unsigned int Rm
= (inst
.operands
[1].present
9826 ? inst
.operands
[1].reg
9827 : inst
.operands
[0].reg
);
9829 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9830 inst
.instruction
|= Rm
;
9831 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9833 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9834 inst
.instruction
|= SHIFT_BY_REG
;
9835 /* PR 12854: Error on extraneous shifts. */
9836 constraint (inst
.operands
[2].shifted
,
9837 _("extraneous shift as part of operand to shift insn"));
9840 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
9846 inst
.relocs
[0].type
= BFD_RELOC_ARM_SMC
;
9847 inst
.relocs
[0].pc_rel
= 0;
9853 inst
.relocs
[0].type
= BFD_RELOC_ARM_HVC
;
9854 inst
.relocs
[0].pc_rel
= 0;
9860 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
9861 inst
.relocs
[0].pc_rel
= 0;
9867 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9868 _("selected processor does not support SETPAN instruction"));
9870 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9876 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9877 _("selected processor does not support SETPAN instruction"));
9879 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9882 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9883 SMLAxy{cond} Rd,Rm,Rs,Rn
9884 SMLAWy{cond} Rd,Rm,Rs,Rn
9885 Error if any register is R15. */
9890 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9891 inst
.instruction
|= inst
.operands
[1].reg
;
9892 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9893 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9896 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9897 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9898 Error if any register is R15.
9899 Warning if Rdlo == Rdhi. */
9904 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9905 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9906 inst
.instruction
|= inst
.operands
[2].reg
;
9907 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9909 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9910 as_tsktsk (_("rdhi and rdlo must be different"));
9913 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9914 SMULxy{cond} Rd,Rm,Rs
9915 Error if any register is R15. */
9920 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9921 inst
.instruction
|= inst
.operands
[1].reg
;
9922 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9925 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9926 the same for both ARM and Thumb-2. */
9933 if (inst
.operands
[0].present
)
9935 reg
= inst
.operands
[0].reg
;
9936 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9941 inst
.instruction
|= reg
<< 16;
9942 inst
.instruction
|= inst
.operands
[1].imm
;
9943 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9944 inst
.instruction
|= WRITE_BACK
;
9947 /* ARM V6 strex (argument parse). */
9952 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9953 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9954 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9955 || inst
.operands
[2].negative
9956 /* See comment in do_ldrex(). */
9957 || (inst
.operands
[2].reg
== REG_PC
),
9960 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9961 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9963 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9964 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9965 _("offset must be zero in ARM encoding"));
9967 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9968 inst
.instruction
|= inst
.operands
[1].reg
;
9969 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9970 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
9976 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9977 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9978 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9979 || inst
.operands
[2].negative
,
9982 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9983 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9991 constraint (inst
.operands
[1].reg
% 2 != 0,
9992 _("even register required"));
9993 constraint (inst
.operands
[2].present
9994 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
9995 _("can only store two consecutive registers"));
9996 /* If op 2 were present and equal to PC, this function wouldn't
9997 have been called in the first place. */
9998 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
10000 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10001 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
10002 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
10005 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10006 inst
.instruction
|= inst
.operands
[1].reg
;
10007 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
10014 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10015 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10023 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10024 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10029 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
10030 extends it to 32-bits, and adds the result to a value in another
10031 register. You can specify a rotation by 0, 8, 16, or 24 bits
10032 before extracting the 16-bit value.
10033 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
10034 Condition defaults to COND_ALWAYS.
10035 Error if any register uses R15. */
10040 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10041 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10042 inst
.instruction
|= inst
.operands
[2].reg
;
10043 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
10048 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
10049 Condition defaults to COND_ALWAYS.
10050 Error if any register uses R15. */
10055 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10056 inst
.instruction
|= inst
.operands
[1].reg
;
10057 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
10060 /* VFP instructions. In a logical order: SP variant first, monad
10061 before dyad, arithmetic then move then load/store. */
10064 do_vfp_sp_monadic (void)
10066 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10067 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
10071 do_vfp_sp_dyadic (void)
10073 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10074 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
10075 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
10079 do_vfp_sp_compare_z (void)
10081 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10085 do_vfp_dp_sp_cvt (void)
10087 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10088 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
10092 do_vfp_sp_dp_cvt (void)
10094 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10095 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
10099 do_vfp_reg_from_sp (void)
10101 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10102 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
10106 do_vfp_reg2_from_sp2 (void)
10108 constraint (inst
.operands
[2].imm
!= 2,
10109 _("only two consecutive VFP SP registers allowed here"));
10110 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10111 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10112 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
10116 do_vfp_sp_from_reg (void)
10118 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
10119 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10123 do_vfp_sp2_from_reg2 (void)
10125 constraint (inst
.operands
[0].imm
!= 2,
10126 _("only two consecutive VFP SP registers allowed here"));
10127 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
10128 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10129 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10133 do_vfp_sp_ldst (void)
10135 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10136 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
10140 do_vfp_dp_ldst (void)
10142 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10143 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
10148 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
10150 if (inst
.operands
[0].writeback
)
10151 inst
.instruction
|= WRITE_BACK
;
10153 constraint (ldstm_type
!= VFP_LDSTMIA
,
10154 _("this addressing mode requires base-register writeback"));
10155 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10156 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
10157 inst
.instruction
|= inst
.operands
[1].imm
;
10161 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
10165 if (inst
.operands
[0].writeback
)
10166 inst
.instruction
|= WRITE_BACK
;
10168 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
10169 _("this addressing mode requires base-register writeback"));
10171 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10172 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10174 count
= inst
.operands
[1].imm
<< 1;
10175 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
10178 inst
.instruction
|= count
;
10182 do_vfp_sp_ldstmia (void)
10184 vfp_sp_ldstm (VFP_LDSTMIA
);
10188 do_vfp_sp_ldstmdb (void)
10190 vfp_sp_ldstm (VFP_LDSTMDB
);
10194 do_vfp_dp_ldstmia (void)
10196 vfp_dp_ldstm (VFP_LDSTMIA
);
10200 do_vfp_dp_ldstmdb (void)
10202 vfp_dp_ldstm (VFP_LDSTMDB
);
10206 do_vfp_xp_ldstmia (void)
10208 vfp_dp_ldstm (VFP_LDSTMIAX
);
10212 do_vfp_xp_ldstmdb (void)
10214 vfp_dp_ldstm (VFP_LDSTMDBX
);
10218 do_vfp_dp_rd_rm (void)
10220 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10221 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
10225 do_vfp_dp_rn_rd (void)
10227 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
10228 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10232 do_vfp_dp_rd_rn (void)
10234 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10235 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10239 do_vfp_dp_rd_rn_rm (void)
10241 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10242 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10243 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
10247 do_vfp_dp_rd (void)
10249 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10253 do_vfp_dp_rm_rd_rn (void)
10255 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
10256 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10257 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
10260 /* VFPv3 instructions. */
10262 do_vfp_sp_const (void)
10264 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10265 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10266 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10270 do_vfp_dp_const (void)
10272 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10273 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10274 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10278 vfp_conv (int srcsize
)
10280 int immbits
= srcsize
- inst
.operands
[1].imm
;
10282 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
10284 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10285 i.e. immbits must be in range 0 - 16. */
10286 inst
.error
= _("immediate value out of range, expected range [0, 16]");
10289 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
10291 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10292 i.e. immbits must be in range 0 - 31. */
10293 inst
.error
= _("immediate value out of range, expected range [1, 32]");
10297 inst
.instruction
|= (immbits
& 1) << 5;
10298 inst
.instruction
|= (immbits
>> 1);
10302 do_vfp_sp_conv_16 (void)
10304 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10309 do_vfp_dp_conv_16 (void)
10311 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10316 do_vfp_sp_conv_32 (void)
10318 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10323 do_vfp_dp_conv_32 (void)
10325 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10329 /* FPA instructions. Also in a logical order. */
10334 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10335 inst
.instruction
|= inst
.operands
[1].reg
;
10339 do_fpa_ldmstm (void)
10341 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10342 switch (inst
.operands
[1].imm
)
10344 case 1: inst
.instruction
|= CP_T_X
; break;
10345 case 2: inst
.instruction
|= CP_T_Y
; break;
10346 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
10351 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
10353 /* The instruction specified "ea" or "fd", so we can only accept
10354 [Rn]{!}. The instruction does not really support stacking or
10355 unstacking, so we have to emulate these by setting appropriate
10356 bits and offsets. */
10357 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
10358 || inst
.relocs
[0].exp
.X_add_number
!= 0,
10359 _("this instruction does not support indexing"));
10361 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
10362 inst
.relocs
[0].exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
10364 if (!(inst
.instruction
& INDEX_UP
))
10365 inst
.relocs
[0].exp
.X_add_number
= -inst
.relocs
[0].exp
.X_add_number
;
10367 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
10369 inst
.operands
[2].preind
= 0;
10370 inst
.operands
[2].postind
= 1;
10374 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
10377 /* iWMMXt instructions: strictly in alphabetical order. */
10380 do_iwmmxt_tandorc (void)
10382 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
10386 do_iwmmxt_textrc (void)
10388 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10389 inst
.instruction
|= inst
.operands
[1].imm
;
10393 do_iwmmxt_textrm (void)
10395 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10396 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10397 inst
.instruction
|= inst
.operands
[2].imm
;
10401 do_iwmmxt_tinsr (void)
10403 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10404 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10405 inst
.instruction
|= inst
.operands
[2].imm
;
10409 do_iwmmxt_tmia (void)
10411 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10412 inst
.instruction
|= inst
.operands
[1].reg
;
10413 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10417 do_iwmmxt_waligni (void)
10419 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10420 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10421 inst
.instruction
|= inst
.operands
[2].reg
;
10422 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
10426 do_iwmmxt_wmerge (void)
10428 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10429 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10430 inst
.instruction
|= inst
.operands
[2].reg
;
10431 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
10435 do_iwmmxt_wmov (void)
10437 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10438 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10439 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10440 inst
.instruction
|= inst
.operands
[1].reg
;
10444 do_iwmmxt_wldstbh (void)
10447 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10449 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
10451 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
10452 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
10456 do_iwmmxt_wldstw (void)
10458 /* RIWR_RIWC clears .isreg for a control register. */
10459 if (!inst
.operands
[0].isreg
)
10461 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
10462 inst
.instruction
|= 0xf0000000;
10465 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10466 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
10470 do_iwmmxt_wldstd (void)
10472 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10473 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
10474 && inst
.operands
[1].immisreg
)
10476 inst
.instruction
&= ~0x1a000ff;
10477 inst
.instruction
|= (0xfU
<< 28);
10478 if (inst
.operands
[1].preind
)
10479 inst
.instruction
|= PRE_INDEX
;
10480 if (!inst
.operands
[1].negative
)
10481 inst
.instruction
|= INDEX_UP
;
10482 if (inst
.operands
[1].writeback
)
10483 inst
.instruction
|= WRITE_BACK
;
10484 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10485 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10486 inst
.instruction
|= inst
.operands
[1].imm
;
10489 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
10493 do_iwmmxt_wshufh (void)
10495 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10496 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10497 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
10498 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
10502 do_iwmmxt_wzero (void)
10504 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10505 inst
.instruction
|= inst
.operands
[0].reg
;
10506 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10507 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10511 do_iwmmxt_wrwrwr_or_imm5 (void)
10513 if (inst
.operands
[2].isreg
)
10516 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10517 _("immediate operand requires iWMMXt2"));
10519 if (inst
.operands
[2].imm
== 0)
10521 switch ((inst
.instruction
>> 20) & 0xf)
10527 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10528 inst
.operands
[2].imm
= 16;
10529 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10535 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10536 inst
.operands
[2].imm
= 32;
10537 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10544 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10546 wrn
= (inst
.instruction
>> 16) & 0xf;
10547 inst
.instruction
&= 0xff0fff0f;
10548 inst
.instruction
|= wrn
;
10549 /* Bail out here; the instruction is now assembled. */
10554 /* Map 32 -> 0, etc. */
10555 inst
.operands
[2].imm
&= 0x1f;
10556 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10560 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10561 operations first, then control, shift, and load/store. */
10563 /* Insns like "foo X,Y,Z". */
10566 do_mav_triple (void)
10568 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10569 inst
.instruction
|= inst
.operands
[1].reg
;
10570 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10573 /* Insns like "foo W,X,Y,Z".
10574 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10579 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10580 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10581 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10582 inst
.instruction
|= inst
.operands
[3].reg
;
10585 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10587 do_mav_dspsc (void)
10589 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10592 /* Maverick shift immediate instructions.
10593 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10594 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10597 do_mav_shift (void)
10599 int imm
= inst
.operands
[2].imm
;
10601 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10602 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10604 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10605 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10606 Bit 4 should be 0. */
10607 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10609 inst
.instruction
|= imm
;
10612 /* XScale instructions. Also sorted arithmetic before move. */
10614 /* Xscale multiply-accumulate (argument parse)
10617 MIAxycc acc0,Rm,Rs. */
10622 inst
.instruction
|= inst
.operands
[1].reg
;
10623 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10626 /* Xscale move-accumulator-register (argument parse)
10628 MARcc acc0,RdLo,RdHi. */
10633 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10634 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10637 /* Xscale move-register-accumulator (argument parse)
10639 MRAcc RdLo,RdHi,acc0. */
10644 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10645 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10646 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10649 /* Encoding functions relevant only to Thumb. */
10651 /* inst.operands[i] is a shifted-register operand; encode
10652 it into inst.instruction in the format used by Thumb32. */
10655 encode_thumb32_shifted_operand (int i
)
10657 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
10658 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10660 constraint (inst
.operands
[i
].immisreg
,
10661 _("shift by register not allowed in thumb mode"));
10662 inst
.instruction
|= inst
.operands
[i
].reg
;
10663 if (shift
== SHIFT_RRX
)
10664 inst
.instruction
|= SHIFT_ROR
<< 4;
10667 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10668 _("expression too complex"));
10670 constraint (value
> 32
10671 || (value
== 32 && (shift
== SHIFT_LSL
10672 || shift
== SHIFT_ROR
)),
10673 _("shift expression is too large"));
10677 else if (value
== 32)
10680 inst
.instruction
|= shift
<< 4;
10681 inst
.instruction
|= (value
& 0x1c) << 10;
10682 inst
.instruction
|= (value
& 0x03) << 6;
10687 /* inst.operands[i] was set up by parse_address. Encode it into a
10688 Thumb32 format load or store instruction. Reject forms that cannot
10689 be used with such instructions. If is_t is true, reject forms that
10690 cannot be used with a T instruction; if is_d is true, reject forms
10691 that cannot be used with a D instruction. If it is a store insn,
10692 reject PC in Rn. */
10695 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10697 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10699 constraint (!inst
.operands
[i
].isreg
,
10700 _("Instruction does not support =N addresses"));
10702 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10703 if (inst
.operands
[i
].immisreg
)
10705 constraint (is_pc
, BAD_PC_ADDRESSING
);
10706 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10707 constraint (inst
.operands
[i
].negative
,
10708 _("Thumb does not support negative register indexing"));
10709 constraint (inst
.operands
[i
].postind
,
10710 _("Thumb does not support register post-indexing"));
10711 constraint (inst
.operands
[i
].writeback
,
10712 _("Thumb does not support register indexing with writeback"));
10713 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10714 _("Thumb supports only LSL in shifted register indexing"));
10716 inst
.instruction
|= inst
.operands
[i
].imm
;
10717 if (inst
.operands
[i
].shifted
)
10719 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10720 _("expression too complex"));
10721 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
10722 || inst
.relocs
[0].exp
.X_add_number
> 3,
10723 _("shift out of range"));
10724 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10726 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10728 else if (inst
.operands
[i
].preind
)
10730 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10731 constraint (is_t
&& inst
.operands
[i
].writeback
,
10732 _("cannot use writeback with this instruction"));
10733 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10734 BAD_PC_ADDRESSING
);
10738 inst
.instruction
|= 0x01000000;
10739 if (inst
.operands
[i
].writeback
)
10740 inst
.instruction
|= 0x00200000;
10744 inst
.instruction
|= 0x00000c00;
10745 if (inst
.operands
[i
].writeback
)
10746 inst
.instruction
|= 0x00000100;
10748 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10750 else if (inst
.operands
[i
].postind
)
10752 gas_assert (inst
.operands
[i
].writeback
);
10753 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10754 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10757 inst
.instruction
|= 0x00200000;
10759 inst
.instruction
|= 0x00000900;
10760 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10762 else /* unindexed - only for coprocessor */
10763 inst
.error
= _("instruction does not accept unindexed addressing");
10766 /* Table of Thumb instructions which exist in both 16- and 32-bit
10767 encodings (the latter only in post-V6T2 cores). The index is the
10768 value used in the insns table below. When there is more than one
10769 possible 16-bit encoding for the instruction, this table always
10771 Also contains several pseudo-instructions used during relaxation. */
10772 #define T16_32_TAB \
10773 X(_adc, 4140, eb400000), \
10774 X(_adcs, 4140, eb500000), \
10775 X(_add, 1c00, eb000000), \
10776 X(_adds, 1c00, eb100000), \
10777 X(_addi, 0000, f1000000), \
10778 X(_addis, 0000, f1100000), \
10779 X(_add_pc,000f, f20f0000), \
10780 X(_add_sp,000d, f10d0000), \
10781 X(_adr, 000f, f20f0000), \
10782 X(_and, 4000, ea000000), \
10783 X(_ands, 4000, ea100000), \
10784 X(_asr, 1000, fa40f000), \
10785 X(_asrs, 1000, fa50f000), \
10786 X(_b, e000, f000b000), \
10787 X(_bcond, d000, f0008000), \
10788 X(_bf, 0000, f040e001), \
10789 X(_bfcsel,0000, f000e001), \
10790 X(_bfx, 0000, f060e001), \
10791 X(_bfl, 0000, f000c001), \
10792 X(_bflx, 0000, f070e001), \
10793 X(_bic, 4380, ea200000), \
10794 X(_bics, 4380, ea300000), \
10795 X(_cmn, 42c0, eb100f00), \
10796 X(_cmp, 2800, ebb00f00), \
10797 X(_cpsie, b660, f3af8400), \
10798 X(_cpsid, b670, f3af8600), \
10799 X(_cpy, 4600, ea4f0000), \
10800 X(_dec_sp,80dd, f1ad0d00), \
10801 X(_dls, 0000, f040e001), \
10802 X(_eor, 4040, ea800000), \
10803 X(_eors, 4040, ea900000), \
10804 X(_inc_sp,00dd, f10d0d00), \
10805 X(_ldmia, c800, e8900000), \
10806 X(_ldr, 6800, f8500000), \
10807 X(_ldrb, 7800, f8100000), \
10808 X(_ldrh, 8800, f8300000), \
10809 X(_ldrsb, 5600, f9100000), \
10810 X(_ldrsh, 5e00, f9300000), \
10811 X(_ldr_pc,4800, f85f0000), \
10812 X(_ldr_pc2,4800, f85f0000), \
10813 X(_ldr_sp,9800, f85d0000), \
10814 X(_le, 0000, f00fc001), \
10815 X(_lsl, 0000, fa00f000), \
10816 X(_lsls, 0000, fa10f000), \
10817 X(_lsr, 0800, fa20f000), \
10818 X(_lsrs, 0800, fa30f000), \
10819 X(_mov, 2000, ea4f0000), \
10820 X(_movs, 2000, ea5f0000), \
10821 X(_mul, 4340, fb00f000), \
10822 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10823 X(_mvn, 43c0, ea6f0000), \
10824 X(_mvns, 43c0, ea7f0000), \
10825 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10826 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10827 X(_orr, 4300, ea400000), \
10828 X(_orrs, 4300, ea500000), \
10829 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10830 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10831 X(_rev, ba00, fa90f080), \
10832 X(_rev16, ba40, fa90f090), \
10833 X(_revsh, bac0, fa90f0b0), \
10834 X(_ror, 41c0, fa60f000), \
10835 X(_rors, 41c0, fa70f000), \
10836 X(_sbc, 4180, eb600000), \
10837 X(_sbcs, 4180, eb700000), \
10838 X(_stmia, c000, e8800000), \
10839 X(_str, 6000, f8400000), \
10840 X(_strb, 7000, f8000000), \
10841 X(_strh, 8000, f8200000), \
10842 X(_str_sp,9000, f84d0000), \
10843 X(_sub, 1e00, eba00000), \
10844 X(_subs, 1e00, ebb00000), \
10845 X(_subi, 8000, f1a00000), \
10846 X(_subis, 8000, f1b00000), \
10847 X(_sxtb, b240, fa4ff080), \
10848 X(_sxth, b200, fa0ff080), \
10849 X(_tst, 4200, ea100f00), \
10850 X(_uxtb, b2c0, fa5ff080), \
10851 X(_uxth, b280, fa1ff080), \
10852 X(_nop, bf00, f3af8000), \
10853 X(_yield, bf10, f3af8001), \
10854 X(_wfe, bf20, f3af8002), \
10855 X(_wfi, bf30, f3af8003), \
10856 X(_wls, 0000, f040c001), \
10857 X(_sev, bf40, f3af8004), \
10858 X(_sevl, bf50, f3af8005), \
10859 X(_udf, de00, f7f0a000)
10861 /* To catch errors in encoding functions, the codes are all offset by
10862 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10863 as 16-bit instructions. */
10864 #define X(a,b,c) T_MNEM##a
10865 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10868 #define X(a,b,c) 0x##b
10869 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10870 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10873 #define X(a,b,c) 0x##c
10874 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10875 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10876 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10880 /* Thumb instruction encoders, in alphabetical order. */
10882 /* ADDW or SUBW. */
10885 do_t_add_sub_w (void)
10889 Rd
= inst
.operands
[0].reg
;
10890 Rn
= inst
.operands
[1].reg
;
10892 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10893 is the SP-{plus,minus}-immediate form of the instruction. */
10895 constraint (Rd
== REG_PC
, BAD_PC
);
10897 reject_bad_reg (Rd
);
10899 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10900 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
10903 /* Parse an add or subtract instruction. We get here with inst.instruction
10904 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10907 do_t_add_sub (void)
10911 Rd
= inst
.operands
[0].reg
;
10912 Rs
= (inst
.operands
[1].present
10913 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10914 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10917 set_pred_insn_type_last ();
10919 if (unified_syntax
)
10922 bfd_boolean narrow
;
10925 flags
= (inst
.instruction
== T_MNEM_adds
10926 || inst
.instruction
== T_MNEM_subs
);
10928 narrow
= !in_pred_block ();
10930 narrow
= in_pred_block ();
10931 if (!inst
.operands
[2].isreg
)
10935 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
10936 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10938 add
= (inst
.instruction
== T_MNEM_add
10939 || inst
.instruction
== T_MNEM_adds
);
10941 if (inst
.size_req
!= 4)
10943 /* Attempt to use a narrow opcode, with relaxation if
10945 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10946 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10947 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10948 opcode
= T_MNEM_add_sp
;
10949 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10950 opcode
= T_MNEM_add_pc
;
10951 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
10954 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
10956 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
10960 inst
.instruction
= THUMB_OP16(opcode
);
10961 inst
.instruction
|= (Rd
<< 4) | Rs
;
10962 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10963 || (inst
.relocs
[0].type
10964 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
))
10966 if (inst
.size_req
== 2)
10967 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
10969 inst
.relax
= opcode
;
10973 constraint (inst
.size_req
== 2, BAD_HIREG
);
10975 if (inst
.size_req
== 4
10976 || (inst
.size_req
!= 2 && !opcode
))
10978 constraint ((inst
.relocs
[0].type
10979 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
10980 && (inst
.relocs
[0].type
10981 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
10982 THUMB1_RELOC_ONLY
);
10985 constraint (add
, BAD_PC
);
10986 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
10987 _("only SUBS PC, LR, #const allowed"));
10988 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10989 _("expression too complex"));
10990 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
10991 || inst
.relocs
[0].exp
.X_add_number
> 0xff,
10992 _("immediate value out of range"));
10993 inst
.instruction
= T2_SUBS_PC_LR
10994 | inst
.relocs
[0].exp
.X_add_number
;
10995 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10998 else if (Rs
== REG_PC
)
11000 /* Always use addw/subw. */
11001 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
11002 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
11006 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11007 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
11010 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11012 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_IMM
;
11014 inst
.instruction
|= Rd
<< 8;
11015 inst
.instruction
|= Rs
<< 16;
11020 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
11021 unsigned int shift
= inst
.operands
[2].shift_kind
;
11023 Rn
= inst
.operands
[2].reg
;
11024 /* See if we can do this with a 16-bit instruction. */
11025 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
11027 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
11032 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
11033 || inst
.instruction
== T_MNEM_add
)
11035 : T_OPCODE_SUB_R3
);
11036 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
11040 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
11042 /* Thumb-1 cores (except v6-M) require at least one high
11043 register in a narrow non flag setting add. */
11044 if (Rd
> 7 || Rn
> 7
11045 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
11046 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
11053 inst
.instruction
= T_OPCODE_ADD_HI
;
11054 inst
.instruction
|= (Rd
& 8) << 4;
11055 inst
.instruction
|= (Rd
& 7);
11056 inst
.instruction
|= Rn
<< 3;
11062 constraint (Rd
== REG_PC
, BAD_PC
);
11063 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
11064 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
11065 constraint (Rs
== REG_PC
, BAD_PC
);
11066 reject_bad_reg (Rn
);
11068 /* If we get here, it can't be done in 16 bits. */
11069 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
11070 _("shift must be constant"));
11071 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11072 inst
.instruction
|= Rd
<< 8;
11073 inst
.instruction
|= Rs
<< 16;
11074 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
11075 _("shift value over 3 not allowed in thumb mode"));
11076 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
11077 _("only LSL shift allowed in thumb mode"));
11078 encode_thumb32_shifted_operand (2);
11083 constraint (inst
.instruction
== T_MNEM_adds
11084 || inst
.instruction
== T_MNEM_subs
,
11087 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
11089 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
11090 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
11093 inst
.instruction
= (inst
.instruction
== T_MNEM_add
11094 ? 0x0000 : 0x8000);
11095 inst
.instruction
|= (Rd
<< 4) | Rs
;
11096 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11100 Rn
= inst
.operands
[2].reg
;
11101 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
11103 /* We now have Rd, Rs, and Rn set to registers. */
11104 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
11106 /* Can't do this for SUB. */
11107 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
11108 inst
.instruction
= T_OPCODE_ADD_HI
;
11109 inst
.instruction
|= (Rd
& 8) << 4;
11110 inst
.instruction
|= (Rd
& 7);
11112 inst
.instruction
|= Rn
<< 3;
11114 inst
.instruction
|= Rs
<< 3;
11116 constraint (1, _("dest must overlap one source register"));
11120 inst
.instruction
= (inst
.instruction
== T_MNEM_add
11121 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
11122 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
11132 Rd
= inst
.operands
[0].reg
;
11133 reject_bad_reg (Rd
);
11135 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
11137 /* Defer to section relaxation. */
11138 inst
.relax
= inst
.instruction
;
11139 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11140 inst
.instruction
|= Rd
<< 4;
11142 else if (unified_syntax
&& inst
.size_req
!= 2)
11144 /* Generate a 32-bit opcode. */
11145 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11146 inst
.instruction
|= Rd
<< 8;
11147 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_PC12
;
11148 inst
.relocs
[0].pc_rel
= 1;
11152 /* Generate a 16-bit opcode. */
11153 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11154 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11155 inst
.relocs
[0].exp
.X_add_number
-= 4; /* PC relative adjust. */
11156 inst
.relocs
[0].pc_rel
= 1;
11157 inst
.instruction
|= Rd
<< 4;
11160 if (inst
.relocs
[0].exp
.X_op
== O_symbol
11161 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11162 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11163 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11164 inst
.relocs
[0].exp
.X_add_number
+= 1;
11167 /* Arithmetic instructions for which there is just one 16-bit
11168 instruction encoding, and it allows only two low registers.
11169 For maximal compatibility with ARM syntax, we allow three register
11170 operands even when Thumb-32 instructions are not available, as long
11171 as the first two are identical. For instance, both "sbc r0,r1" and
11172 "sbc r0,r0,r1" are allowed. */
11178 Rd
= inst
.operands
[0].reg
;
11179 Rs
= (inst
.operands
[1].present
11180 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11181 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11182 Rn
= inst
.operands
[2].reg
;
11184 reject_bad_reg (Rd
);
11185 reject_bad_reg (Rs
);
11186 if (inst
.operands
[2].isreg
)
11187 reject_bad_reg (Rn
);
11189 if (unified_syntax
)
11191 if (!inst
.operands
[2].isreg
)
11193 /* For an immediate, we always generate a 32-bit opcode;
11194 section relaxation will shrink it later if possible. */
11195 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11196 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11197 inst
.instruction
|= Rd
<< 8;
11198 inst
.instruction
|= Rs
<< 16;
11199 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11203 bfd_boolean narrow
;
11205 /* See if we can do this with a 16-bit instruction. */
11206 if (THUMB_SETS_FLAGS (inst
.instruction
))
11207 narrow
= !in_pred_block ();
11209 narrow
= in_pred_block ();
11211 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11213 if (inst
.operands
[2].shifted
)
11215 if (inst
.size_req
== 4)
11221 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11222 inst
.instruction
|= Rd
;
11223 inst
.instruction
|= Rn
<< 3;
11227 /* If we get here, it can't be done in 16 bits. */
11228 constraint (inst
.operands
[2].shifted
11229 && inst
.operands
[2].immisreg
,
11230 _("shift must be constant"));
11231 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11232 inst
.instruction
|= Rd
<< 8;
11233 inst
.instruction
|= Rs
<< 16;
11234 encode_thumb32_shifted_operand (2);
11239 /* On its face this is a lie - the instruction does set the
11240 flags. However, the only supported mnemonic in this mode
11241 says it doesn't. */
11242 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11244 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11245 _("unshifted register required"));
11246 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11247 constraint (Rd
!= Rs
,
11248 _("dest and source1 must be the same register"));
11250 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11251 inst
.instruction
|= Rd
;
11252 inst
.instruction
|= Rn
<< 3;
11256 /* Similarly, but for instructions where the arithmetic operation is
11257 commutative, so we can allow either of them to be different from
11258 the destination operand in a 16-bit instruction. For instance, all
11259 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11266 Rd
= inst
.operands
[0].reg
;
11267 Rs
= (inst
.operands
[1].present
11268 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11269 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11270 Rn
= inst
.operands
[2].reg
;
11272 reject_bad_reg (Rd
);
11273 reject_bad_reg (Rs
);
11274 if (inst
.operands
[2].isreg
)
11275 reject_bad_reg (Rn
);
11277 if (unified_syntax
)
11279 if (!inst
.operands
[2].isreg
)
11281 /* For an immediate, we always generate a 32-bit opcode;
11282 section relaxation will shrink it later if possible. */
11283 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11284 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11285 inst
.instruction
|= Rd
<< 8;
11286 inst
.instruction
|= Rs
<< 16;
11287 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11291 bfd_boolean narrow
;
11293 /* See if we can do this with a 16-bit instruction. */
11294 if (THUMB_SETS_FLAGS (inst
.instruction
))
11295 narrow
= !in_pred_block ();
11297 narrow
= in_pred_block ();
11299 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11301 if (inst
.operands
[2].shifted
)
11303 if (inst
.size_req
== 4)
11310 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11311 inst
.instruction
|= Rd
;
11312 inst
.instruction
|= Rn
<< 3;
11317 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11318 inst
.instruction
|= Rd
;
11319 inst
.instruction
|= Rs
<< 3;
11324 /* If we get here, it can't be done in 16 bits. */
11325 constraint (inst
.operands
[2].shifted
11326 && inst
.operands
[2].immisreg
,
11327 _("shift must be constant"));
11328 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11329 inst
.instruction
|= Rd
<< 8;
11330 inst
.instruction
|= Rs
<< 16;
11331 encode_thumb32_shifted_operand (2);
11336 /* On its face this is a lie - the instruction does set the
11337 flags. However, the only supported mnemonic in this mode
11338 says it doesn't. */
11339 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11341 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11342 _("unshifted register required"));
11343 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11345 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11346 inst
.instruction
|= Rd
;
11349 inst
.instruction
|= Rn
<< 3;
11351 inst
.instruction
|= Rs
<< 3;
11353 constraint (1, _("dest must overlap one source register"));
11361 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
11362 constraint (msb
> 32, _("bit-field extends past end of register"));
11363 /* The instruction encoding stores the LSB and MSB,
11364 not the LSB and width. */
11365 Rd
= inst
.operands
[0].reg
;
11366 reject_bad_reg (Rd
);
11367 inst
.instruction
|= Rd
<< 8;
11368 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
11369 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
11370 inst
.instruction
|= msb
- 1;
11379 Rd
= inst
.operands
[0].reg
;
11380 reject_bad_reg (Rd
);
11382 /* #0 in second position is alternative syntax for bfc, which is
11383 the same instruction but with REG_PC in the Rm field. */
11384 if (!inst
.operands
[1].isreg
)
11388 Rn
= inst
.operands
[1].reg
;
11389 reject_bad_reg (Rn
);
11392 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
11393 constraint (msb
> 32, _("bit-field extends past end of register"));
11394 /* The instruction encoding stores the LSB and MSB,
11395 not the LSB and width. */
11396 inst
.instruction
|= Rd
<< 8;
11397 inst
.instruction
|= Rn
<< 16;
11398 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11399 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11400 inst
.instruction
|= msb
- 1;
11408 Rd
= inst
.operands
[0].reg
;
11409 Rn
= inst
.operands
[1].reg
;
11411 reject_bad_reg (Rd
);
11412 reject_bad_reg (Rn
);
11414 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
11415 _("bit-field extends past end of register"));
11416 inst
.instruction
|= Rd
<< 8;
11417 inst
.instruction
|= Rn
<< 16;
11418 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11419 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11420 inst
.instruction
|= inst
.operands
[3].imm
- 1;
11423 /* ARM V5 Thumb BLX (argument parse)
11424 BLX <target_addr> which is BLX(1)
11425 BLX <Rm> which is BLX(2)
11426 Unfortunately, there are two different opcodes for this mnemonic.
11427 So, the insns[].value is not used, and the code here zaps values
11428 into inst.instruction.
11430 ??? How to take advantage of the additional two bits of displacement
11431 available in Thumb32 mode? Need new relocation? */
11436 set_pred_insn_type_last ();
11438 if (inst
.operands
[0].isreg
)
11440 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
11441 /* We have a register, so this is BLX(2). */
11442 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11446 /* No register. This must be BLX(1). */
11447 inst
.instruction
= 0xf000e800;
11448 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
11457 bfd_reloc_code_real_type reloc
;
11460 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN
);
11462 if (in_pred_block ())
11464 /* Conditional branches inside IT blocks are encoded as unconditional
11466 cond
= COND_ALWAYS
;
11471 if (cond
!= COND_ALWAYS
)
11472 opcode
= T_MNEM_bcond
;
11474 opcode
= inst
.instruction
;
11477 && (inst
.size_req
== 4
11478 || (inst
.size_req
!= 2
11479 && (inst
.operands
[0].hasreloc
11480 || inst
.relocs
[0].exp
.X_op
== O_constant
))))
11482 inst
.instruction
= THUMB_OP32(opcode
);
11483 if (cond
== COND_ALWAYS
)
11484 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
11487 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
11488 _("selected architecture does not support "
11489 "wide conditional branch instruction"));
11491 gas_assert (cond
!= 0xF);
11492 inst
.instruction
|= cond
<< 22;
11493 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
11498 inst
.instruction
= THUMB_OP16(opcode
);
11499 if (cond
== COND_ALWAYS
)
11500 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
11503 inst
.instruction
|= cond
<< 8;
11504 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
11506 /* Allow section relaxation. */
11507 if (unified_syntax
&& inst
.size_req
!= 2)
11508 inst
.relax
= opcode
;
11510 inst
.relocs
[0].type
= reloc
;
11511 inst
.relocs
[0].pc_rel
= 1;
11514 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11515 between the two is the maximum immediate allowed - which is passed in
11518 do_t_bkpt_hlt1 (int range
)
11520 constraint (inst
.cond
!= COND_ALWAYS
,
11521 _("instruction is always unconditional"));
11522 if (inst
.operands
[0].present
)
11524 constraint (inst
.operands
[0].imm
> range
,
11525 _("immediate value out of range"));
11526 inst
.instruction
|= inst
.operands
[0].imm
;
11529 set_pred_insn_type (NEUTRAL_IT_INSN
);
11535 do_t_bkpt_hlt1 (63);
11541 do_t_bkpt_hlt1 (255);
11545 do_t_branch23 (void)
11547 set_pred_insn_type_last ();
11548 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11550 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11551 this file. We used to simply ignore the PLT reloc type here --
11552 the branch encoding is now needed to deal with TLSCALL relocs.
11553 So if we see a PLT reloc now, put it back to how it used to be to
11554 keep the preexisting behaviour. */
11555 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_PLT32
)
11556 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11558 #if defined(OBJ_COFF)
11559 /* If the destination of the branch is a defined symbol which does not have
11560 the THUMB_FUNC attribute, then we must be calling a function which has
11561 the (interfacearm) attribute. We look for the Thumb entry point to that
11562 function and change the branch to refer to that function instead. */
11563 if ( inst
.relocs
[0].exp
.X_op
== O_symbol
11564 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11565 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11566 && ! THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11567 inst
.relocs
[0].exp
.X_add_symbol
11568 = find_real_start (inst
.relocs
[0].exp
.X_add_symbol
);
11575 set_pred_insn_type_last ();
11576 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11577 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11578 should cause the alignment to be checked once it is known. This is
11579 because BX PC only works if the instruction is word aligned. */
11587 set_pred_insn_type_last ();
11588 Rm
= inst
.operands
[0].reg
;
11589 reject_bad_reg (Rm
);
11590 inst
.instruction
|= Rm
<< 16;
11599 Rd
= inst
.operands
[0].reg
;
11600 Rm
= inst
.operands
[1].reg
;
11602 reject_bad_reg (Rd
);
11603 reject_bad_reg (Rm
);
11605 inst
.instruction
|= Rd
<< 8;
11606 inst
.instruction
|= Rm
<< 16;
11607 inst
.instruction
|= Rm
;
11613 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11619 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11620 inst
.instruction
|= inst
.operands
[0].imm
;
11626 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11628 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11629 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11631 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11632 inst
.instruction
= 0xf3af8000;
11633 inst
.instruction
|= imod
<< 9;
11634 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11635 if (inst
.operands
[1].present
)
11636 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11640 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11641 && (inst
.operands
[0].imm
& 4),
11642 _("selected processor does not support 'A' form "
11643 "of this instruction"));
11644 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11645 _("Thumb does not support the 2-argument "
11646 "form of this instruction"));
11647 inst
.instruction
|= inst
.operands
[0].imm
;
11651 /* THUMB CPY instruction (argument parse). */
11656 if (inst
.size_req
== 4)
11658 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11659 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11660 inst
.instruction
|= inst
.operands
[1].reg
;
11664 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11665 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11666 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11673 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11674 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11675 inst
.instruction
|= inst
.operands
[0].reg
;
11676 inst
.relocs
[0].pc_rel
= 1;
11677 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11683 inst
.instruction
|= inst
.operands
[0].imm
;
11689 unsigned Rd
, Rn
, Rm
;
11691 Rd
= inst
.operands
[0].reg
;
11692 Rn
= (inst
.operands
[1].present
11693 ? inst
.operands
[1].reg
: Rd
);
11694 Rm
= inst
.operands
[2].reg
;
11696 reject_bad_reg (Rd
);
11697 reject_bad_reg (Rn
);
11698 reject_bad_reg (Rm
);
11700 inst
.instruction
|= Rd
<< 8;
11701 inst
.instruction
|= Rn
<< 16;
11702 inst
.instruction
|= Rm
;
11708 if (unified_syntax
&& inst
.size_req
== 4)
11709 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11711 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11717 unsigned int cond
= inst
.operands
[0].imm
;
11719 set_pred_insn_type (IT_INSN
);
11720 now_pred
.mask
= (inst
.instruction
& 0xf) | 0x10;
11721 now_pred
.cc
= cond
;
11722 now_pred
.warn_deprecated
= FALSE
;
11723 now_pred
.type
= SCALAR_PRED
;
11725 /* If the condition is a negative condition, invert the mask. */
11726 if ((cond
& 0x1) == 0x0)
11728 unsigned int mask
= inst
.instruction
& 0x000f;
11730 if ((mask
& 0x7) == 0)
11732 /* No conversion needed. */
11733 now_pred
.block_length
= 1;
11735 else if ((mask
& 0x3) == 0)
11738 now_pred
.block_length
= 2;
11740 else if ((mask
& 0x1) == 0)
11743 now_pred
.block_length
= 3;
11748 now_pred
.block_length
= 4;
11751 inst
.instruction
&= 0xfff0;
11752 inst
.instruction
|= mask
;
11755 inst
.instruction
|= cond
<< 4;
11761 /* We are dealing with a vector predicated block. */
11762 set_pred_insn_type (VPT_INSN
);
11764 now_pred
.mask
= ((inst
.instruction
& 0x00400000) >> 19)
11765 | ((inst
.instruction
& 0xe000) >> 13);
11766 now_pred
.warn_deprecated
= FALSE
;
11767 now_pred
.type
= VECTOR_PRED
;
11770 /* Helper function used for both push/pop and ldm/stm. */
11772 encode_thumb2_multi (bfd_boolean do_io
, int base
, unsigned mask
,
11773 bfd_boolean writeback
)
11775 bfd_boolean load
, store
;
11777 gas_assert (base
!= -1 || !do_io
);
11778 load
= do_io
&& ((inst
.instruction
& (1 << 20)) != 0);
11779 store
= do_io
&& !load
;
11781 if (mask
& (1 << 13))
11782 inst
.error
= _("SP not allowed in register list");
11784 if (do_io
&& (mask
& (1 << base
)) != 0
11786 inst
.error
= _("having the base register in the register list when "
11787 "using write back is UNPREDICTABLE");
11791 if (mask
& (1 << 15))
11793 if (mask
& (1 << 14))
11794 inst
.error
= _("LR and PC should not both be in register list");
11796 set_pred_insn_type_last ();
11801 if (mask
& (1 << 15))
11802 inst
.error
= _("PC not allowed in register list");
11805 if (do_io
&& ((mask
& (mask
- 1)) == 0))
11807 /* Single register transfers implemented as str/ldr. */
11810 if (inst
.instruction
& (1 << 23))
11811 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11813 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11817 if (inst
.instruction
& (1 << 23))
11818 inst
.instruction
= 0x00800000; /* ia -> [base] */
11820 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11823 inst
.instruction
|= 0xf8400000;
11825 inst
.instruction
|= 0x00100000;
11827 mask
= ffs (mask
) - 1;
11830 else if (writeback
)
11831 inst
.instruction
|= WRITE_BACK
;
11833 inst
.instruction
|= mask
;
11835 inst
.instruction
|= base
<< 16;
11841 /* This really doesn't seem worth it. */
11842 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
11843 _("expression too complex"));
11844 constraint (inst
.operands
[1].writeback
,
11845 _("Thumb load/store multiple does not support {reglist}^"));
11847 if (unified_syntax
)
11849 bfd_boolean narrow
;
11853 /* See if we can use a 16-bit instruction. */
11854 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11855 && inst
.size_req
!= 4
11856 && !(inst
.operands
[1].imm
& ~0xff))
11858 mask
= 1 << inst
.operands
[0].reg
;
11860 if (inst
.operands
[0].reg
<= 7)
11862 if (inst
.instruction
== T_MNEM_stmia
11863 ? inst
.operands
[0].writeback
11864 : (inst
.operands
[0].writeback
11865 == !(inst
.operands
[1].imm
& mask
)))
11867 if (inst
.instruction
== T_MNEM_stmia
11868 && (inst
.operands
[1].imm
& mask
)
11869 && (inst
.operands
[1].imm
& (mask
- 1)))
11870 as_warn (_("value stored for r%d is UNKNOWN"),
11871 inst
.operands
[0].reg
);
11873 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11874 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11875 inst
.instruction
|= inst
.operands
[1].imm
;
11878 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11880 /* This means 1 register in reg list one of 3 situations:
11881 1. Instruction is stmia, but without writeback.
11882 2. lmdia without writeback, but with Rn not in
11884 3. ldmia with writeback, but with Rn in reglist.
11885 Case 3 is UNPREDICTABLE behaviour, so we handle
11886 case 1 and 2 which can be converted into a 16-bit
11887 str or ldr. The SP cases are handled below. */
11888 unsigned long opcode
;
11889 /* First, record an error for Case 3. */
11890 if (inst
.operands
[1].imm
& mask
11891 && inst
.operands
[0].writeback
)
11893 _("having the base register in the register list when "
11894 "using write back is UNPREDICTABLE");
11896 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11898 inst
.instruction
= THUMB_OP16 (opcode
);
11899 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11900 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11904 else if (inst
.operands
[0] .reg
== REG_SP
)
11906 if (inst
.operands
[0].writeback
)
11909 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11910 ? T_MNEM_push
: T_MNEM_pop
);
11911 inst
.instruction
|= inst
.operands
[1].imm
;
11914 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11917 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11918 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11919 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11927 if (inst
.instruction
< 0xffff)
11928 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11930 encode_thumb2_multi (TRUE
/* do_io */, inst
.operands
[0].reg
,
11931 inst
.operands
[1].imm
,
11932 inst
.operands
[0].writeback
);
11937 constraint (inst
.operands
[0].reg
> 7
11938 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11939 constraint (inst
.instruction
!= T_MNEM_ldmia
11940 && inst
.instruction
!= T_MNEM_stmia
,
11941 _("Thumb-2 instruction only valid in unified syntax"));
11942 if (inst
.instruction
== T_MNEM_stmia
)
11944 if (!inst
.operands
[0].writeback
)
11945 as_warn (_("this instruction will write back the base register"));
11946 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11947 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11948 as_warn (_("value stored for r%d is UNKNOWN"),
11949 inst
.operands
[0].reg
);
11953 if (!inst
.operands
[0].writeback
11954 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11955 as_warn (_("this instruction will write back the base register"));
11956 else if (inst
.operands
[0].writeback
11957 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11958 as_warn (_("this instruction will not write back the base register"));
11961 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11962 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11963 inst
.instruction
|= inst
.operands
[1].imm
;
11970 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11971 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11972 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
11973 || inst
.operands
[1].negative
,
11976 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
11978 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11979 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11980 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11986 if (!inst
.operands
[1].present
)
11988 constraint (inst
.operands
[0].reg
== REG_LR
,
11989 _("r14 not allowed as first register "
11990 "when second register is omitted"));
11991 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11993 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11996 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11997 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11998 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
12004 unsigned long opcode
;
12007 if (inst
.operands
[0].isreg
12008 && !inst
.operands
[0].preind
12009 && inst
.operands
[0].reg
== REG_PC
)
12010 set_pred_insn_type_last ();
12012 opcode
= inst
.instruction
;
12013 if (unified_syntax
)
12015 if (!inst
.operands
[1].isreg
)
12017 if (opcode
<= 0xffff)
12018 inst
.instruction
= THUMB_OP32 (opcode
);
12019 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
12022 if (inst
.operands
[1].isreg
12023 && !inst
.operands
[1].writeback
12024 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
12025 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
12026 && opcode
<= 0xffff
12027 && inst
.size_req
!= 4)
12029 /* Insn may have a 16-bit form. */
12030 Rn
= inst
.operands
[1].reg
;
12031 if (inst
.operands
[1].immisreg
)
12033 inst
.instruction
= THUMB_OP16 (opcode
);
12035 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
12037 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
12038 reject_bad_reg (inst
.operands
[1].imm
);
12040 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
12041 && opcode
!= T_MNEM_ldrsb
)
12042 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
12043 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
12050 if (inst
.relocs
[0].pc_rel
)
12051 opcode
= T_MNEM_ldr_pc2
;
12053 opcode
= T_MNEM_ldr_pc
;
12057 if (opcode
== T_MNEM_ldr
)
12058 opcode
= T_MNEM_ldr_sp
;
12060 opcode
= T_MNEM_str_sp
;
12062 inst
.instruction
= inst
.operands
[0].reg
<< 8;
12066 inst
.instruction
= inst
.operands
[0].reg
;
12067 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12069 inst
.instruction
|= THUMB_OP16 (opcode
);
12070 if (inst
.size_req
== 2)
12071 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12073 inst
.relax
= opcode
;
12077 /* Definitely a 32-bit variant. */
12079 /* Warning for Erratum 752419. */
12080 if (opcode
== T_MNEM_ldr
12081 && inst
.operands
[0].reg
== REG_SP
12082 && inst
.operands
[1].writeback
== 1
12083 && !inst
.operands
[1].immisreg
)
12085 if (no_cpu_selected ()
12086 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
12087 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
12088 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
12089 as_warn (_("This instruction may be unpredictable "
12090 "if executed on M-profile cores "
12091 "with interrupts enabled."));
12094 /* Do some validations regarding addressing modes. */
12095 if (inst
.operands
[1].immisreg
)
12096 reject_bad_reg (inst
.operands
[1].imm
);
12098 constraint (inst
.operands
[1].writeback
== 1
12099 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
12102 inst
.instruction
= THUMB_OP32 (opcode
);
12103 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12104 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12105 check_ldr_r15_aligned ();
12109 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
12111 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
12113 /* Only [Rn,Rm] is acceptable. */
12114 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
12115 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
12116 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
12117 || inst
.operands
[1].negative
,
12118 _("Thumb does not support this addressing mode"));
12119 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12123 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12124 if (!inst
.operands
[1].isreg
)
12125 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
12128 constraint (!inst
.operands
[1].preind
12129 || inst
.operands
[1].shifted
12130 || inst
.operands
[1].writeback
,
12131 _("Thumb does not support this addressing mode"));
12132 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
12134 constraint (inst
.instruction
& 0x0600,
12135 _("byte or halfword not valid for base register"));
12136 constraint (inst
.operands
[1].reg
== REG_PC
12137 && !(inst
.instruction
& THUMB_LOAD_BIT
),
12138 _("r15 based store not allowed"));
12139 constraint (inst
.operands
[1].immisreg
,
12140 _("invalid base register for register offset"));
12142 if (inst
.operands
[1].reg
== REG_PC
)
12143 inst
.instruction
= T_OPCODE_LDR_PC
;
12144 else if (inst
.instruction
& THUMB_LOAD_BIT
)
12145 inst
.instruction
= T_OPCODE_LDR_SP
;
12147 inst
.instruction
= T_OPCODE_STR_SP
;
12149 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12150 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12154 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
12155 if (!inst
.operands
[1].immisreg
)
12157 /* Immediate offset. */
12158 inst
.instruction
|= inst
.operands
[0].reg
;
12159 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12160 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12164 /* Register offset. */
12165 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
12166 constraint (inst
.operands
[1].negative
,
12167 _("Thumb does not support this addressing mode"));
12170 switch (inst
.instruction
)
12172 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
12173 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
12174 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
12175 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
12176 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
12177 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
12178 case 0x5600 /* ldrsb */:
12179 case 0x5e00 /* ldrsh */: break;
12183 inst
.instruction
|= inst
.operands
[0].reg
;
12184 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12185 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
12191 if (!inst
.operands
[1].present
)
12193 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
12194 constraint (inst
.operands
[0].reg
== REG_LR
,
12195 _("r14 not allowed here"));
12196 constraint (inst
.operands
[0].reg
== REG_R12
,
12197 _("r12 not allowed here"));
12200 if (inst
.operands
[2].writeback
12201 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
12202 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
12203 as_warn (_("base register written back, and overlaps "
12204 "one of transfer registers"));
12206 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12207 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
12208 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
12214 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12215 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
12221 unsigned Rd
, Rn
, Rm
, Ra
;
12223 Rd
= inst
.operands
[0].reg
;
12224 Rn
= inst
.operands
[1].reg
;
12225 Rm
= inst
.operands
[2].reg
;
12226 Ra
= inst
.operands
[3].reg
;
12228 reject_bad_reg (Rd
);
12229 reject_bad_reg (Rn
);
12230 reject_bad_reg (Rm
);
12231 reject_bad_reg (Ra
);
12233 inst
.instruction
|= Rd
<< 8;
12234 inst
.instruction
|= Rn
<< 16;
12235 inst
.instruction
|= Rm
;
12236 inst
.instruction
|= Ra
<< 12;
12242 unsigned RdLo
, RdHi
, Rn
, Rm
;
12244 RdLo
= inst
.operands
[0].reg
;
12245 RdHi
= inst
.operands
[1].reg
;
12246 Rn
= inst
.operands
[2].reg
;
12247 Rm
= inst
.operands
[3].reg
;
12249 reject_bad_reg (RdLo
);
12250 reject_bad_reg (RdHi
);
12251 reject_bad_reg (Rn
);
12252 reject_bad_reg (Rm
);
12254 inst
.instruction
|= RdLo
<< 12;
12255 inst
.instruction
|= RdHi
<< 8;
12256 inst
.instruction
|= Rn
<< 16;
12257 inst
.instruction
|= Rm
;
12261 do_t_mov_cmp (void)
12265 Rn
= inst
.operands
[0].reg
;
12266 Rm
= inst
.operands
[1].reg
;
12269 set_pred_insn_type_last ();
12271 if (unified_syntax
)
12273 int r0off
= (inst
.instruction
== T_MNEM_mov
12274 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
12275 unsigned long opcode
;
12276 bfd_boolean narrow
;
12277 bfd_boolean low_regs
;
12279 low_regs
= (Rn
<= 7 && Rm
<= 7);
12280 opcode
= inst
.instruction
;
12281 if (in_pred_block ())
12282 narrow
= opcode
!= T_MNEM_movs
;
12284 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
12285 if (inst
.size_req
== 4
12286 || inst
.operands
[1].shifted
)
12289 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12290 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
12291 && !inst
.operands
[1].shifted
12295 inst
.instruction
= T2_SUBS_PC_LR
;
12299 if (opcode
== T_MNEM_cmp
)
12301 constraint (Rn
== REG_PC
, BAD_PC
);
12304 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12306 warn_deprecated_sp (Rm
);
12307 /* R15 was documented as a valid choice for Rm in ARMv6,
12308 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12309 tools reject R15, so we do too. */
12310 constraint (Rm
== REG_PC
, BAD_PC
);
12313 reject_bad_reg (Rm
);
12315 else if (opcode
== T_MNEM_mov
12316 || opcode
== T_MNEM_movs
)
12318 if (inst
.operands
[1].isreg
)
12320 if (opcode
== T_MNEM_movs
)
12322 reject_bad_reg (Rn
);
12323 reject_bad_reg (Rm
);
12327 /* This is mov.n. */
12328 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
12329 && (Rm
== REG_SP
|| Rm
== REG_PC
))
12331 as_tsktsk (_("Use of r%u as a source register is "
12332 "deprecated when r%u is the destination "
12333 "register."), Rm
, Rn
);
12338 /* This is mov.w. */
12339 constraint (Rn
== REG_PC
, BAD_PC
);
12340 constraint (Rm
== REG_PC
, BAD_PC
);
12341 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12342 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
12346 reject_bad_reg (Rn
);
12349 if (!inst
.operands
[1].isreg
)
12351 /* Immediate operand. */
12352 if (!in_pred_block () && opcode
== T_MNEM_mov
)
12354 if (low_regs
&& narrow
)
12356 inst
.instruction
= THUMB_OP16 (opcode
);
12357 inst
.instruction
|= Rn
<< 8;
12358 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12359 || inst
.relocs
[0].type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
12361 if (inst
.size_req
== 2)
12362 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12364 inst
.relax
= opcode
;
12369 constraint ((inst
.relocs
[0].type
12370 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
12371 && (inst
.relocs
[0].type
12372 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
12373 THUMB1_RELOC_ONLY
);
12375 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12376 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12377 inst
.instruction
|= Rn
<< r0off
;
12378 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12381 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
12382 && (inst
.instruction
== T_MNEM_mov
12383 || inst
.instruction
== T_MNEM_movs
))
12385 /* Register shifts are encoded as separate shift instructions. */
12386 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
12388 if (in_pred_block ())
12393 if (inst
.size_req
== 4)
12396 if (!low_regs
|| inst
.operands
[1].imm
> 7)
12402 switch (inst
.operands
[1].shift_kind
)
12405 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
12408 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
12411 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
12414 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
12420 inst
.instruction
= opcode
;
12423 inst
.instruction
|= Rn
;
12424 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
12429 inst
.instruction
|= CONDS_BIT
;
12431 inst
.instruction
|= Rn
<< 8;
12432 inst
.instruction
|= Rm
<< 16;
12433 inst
.instruction
|= inst
.operands
[1].imm
;
12438 /* Some mov with immediate shift have narrow variants.
12439 Register shifts are handled above. */
12440 if (low_regs
&& inst
.operands
[1].shifted
12441 && (inst
.instruction
== T_MNEM_mov
12442 || inst
.instruction
== T_MNEM_movs
))
12444 if (in_pred_block ())
12445 narrow
= (inst
.instruction
== T_MNEM_mov
);
12447 narrow
= (inst
.instruction
== T_MNEM_movs
);
12452 switch (inst
.operands
[1].shift_kind
)
12454 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12455 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12456 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12457 default: narrow
= FALSE
; break;
12463 inst
.instruction
|= Rn
;
12464 inst
.instruction
|= Rm
<< 3;
12465 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12469 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12470 inst
.instruction
|= Rn
<< r0off
;
12471 encode_thumb32_shifted_operand (1);
12475 switch (inst
.instruction
)
12478 /* In v4t or v5t a move of two lowregs produces unpredictable
12479 results. Don't allow this. */
12482 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
12483 "MOV Rd, Rs with two low registers is not "
12484 "permitted on this architecture");
12485 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12489 inst
.instruction
= T_OPCODE_MOV_HR
;
12490 inst
.instruction
|= (Rn
& 0x8) << 4;
12491 inst
.instruction
|= (Rn
& 0x7);
12492 inst
.instruction
|= Rm
<< 3;
12496 /* We know we have low registers at this point.
12497 Generate LSLS Rd, Rs, #0. */
12498 inst
.instruction
= T_OPCODE_LSL_I
;
12499 inst
.instruction
|= Rn
;
12500 inst
.instruction
|= Rm
<< 3;
12506 inst
.instruction
= T_OPCODE_CMP_LR
;
12507 inst
.instruction
|= Rn
;
12508 inst
.instruction
|= Rm
<< 3;
12512 inst
.instruction
= T_OPCODE_CMP_HR
;
12513 inst
.instruction
|= (Rn
& 0x8) << 4;
12514 inst
.instruction
|= (Rn
& 0x7);
12515 inst
.instruction
|= Rm
<< 3;
12522 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12524 /* PR 10443: Do not silently ignore shifted operands. */
12525 constraint (inst
.operands
[1].shifted
,
12526 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12528 if (inst
.operands
[1].isreg
)
12530 if (Rn
< 8 && Rm
< 8)
12532 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12533 since a MOV instruction produces unpredictable results. */
12534 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12535 inst
.instruction
= T_OPCODE_ADD_I3
;
12537 inst
.instruction
= T_OPCODE_CMP_LR
;
12539 inst
.instruction
|= Rn
;
12540 inst
.instruction
|= Rm
<< 3;
12544 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12545 inst
.instruction
= T_OPCODE_MOV_HR
;
12547 inst
.instruction
= T_OPCODE_CMP_HR
;
12553 constraint (Rn
> 7,
12554 _("only lo regs allowed with immediate"));
12555 inst
.instruction
|= Rn
<< 8;
12556 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12567 top
= (inst
.instruction
& 0x00800000) != 0;
12568 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
)
12570 constraint (top
, _(":lower16: not allowed in this instruction"));
12571 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVW
;
12573 else if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
)
12575 constraint (!top
, _(":upper16: not allowed in this instruction"));
12576 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVT
;
12579 Rd
= inst
.operands
[0].reg
;
12580 reject_bad_reg (Rd
);
12582 inst
.instruction
|= Rd
<< 8;
12583 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
12585 imm
= inst
.relocs
[0].exp
.X_add_number
;
12586 inst
.instruction
|= (imm
& 0xf000) << 4;
12587 inst
.instruction
|= (imm
& 0x0800) << 15;
12588 inst
.instruction
|= (imm
& 0x0700) << 4;
12589 inst
.instruction
|= (imm
& 0x00ff);
12594 do_t_mvn_tst (void)
12598 Rn
= inst
.operands
[0].reg
;
12599 Rm
= inst
.operands
[1].reg
;
12601 if (inst
.instruction
== T_MNEM_cmp
12602 || inst
.instruction
== T_MNEM_cmn
)
12603 constraint (Rn
== REG_PC
, BAD_PC
);
12605 reject_bad_reg (Rn
);
12606 reject_bad_reg (Rm
);
12608 if (unified_syntax
)
12610 int r0off
= (inst
.instruction
== T_MNEM_mvn
12611 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12612 bfd_boolean narrow
;
12614 if (inst
.size_req
== 4
12615 || inst
.instruction
> 0xffff
12616 || inst
.operands
[1].shifted
12617 || Rn
> 7 || Rm
> 7)
12619 else if (inst
.instruction
== T_MNEM_cmn
12620 || inst
.instruction
== T_MNEM_tst
)
12622 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12623 narrow
= !in_pred_block ();
12625 narrow
= in_pred_block ();
12627 if (!inst
.operands
[1].isreg
)
12629 /* For an immediate, we always generate a 32-bit opcode;
12630 section relaxation will shrink it later if possible. */
12631 if (inst
.instruction
< 0xffff)
12632 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12633 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12634 inst
.instruction
|= Rn
<< r0off
;
12635 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12639 /* See if we can do this with a 16-bit instruction. */
12642 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12643 inst
.instruction
|= Rn
;
12644 inst
.instruction
|= Rm
<< 3;
12648 constraint (inst
.operands
[1].shifted
12649 && inst
.operands
[1].immisreg
,
12650 _("shift must be constant"));
12651 if (inst
.instruction
< 0xffff)
12652 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12653 inst
.instruction
|= Rn
<< r0off
;
12654 encode_thumb32_shifted_operand (1);
12660 constraint (inst
.instruction
> 0xffff
12661 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12662 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12663 _("unshifted register required"));
12664 constraint (Rn
> 7 || Rm
> 7,
12667 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12668 inst
.instruction
|= Rn
;
12669 inst
.instruction
|= Rm
<< 3;
12678 if (do_vfp_nsyn_mrs () == SUCCESS
)
12681 Rd
= inst
.operands
[0].reg
;
12682 reject_bad_reg (Rd
);
12683 inst
.instruction
|= Rd
<< 8;
12685 if (inst
.operands
[1].isreg
)
12687 unsigned br
= inst
.operands
[1].reg
;
12688 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12689 as_bad (_("bad register for mrs"));
12691 inst
.instruction
|= br
& (0xf << 16);
12692 inst
.instruction
|= (br
& 0x300) >> 4;
12693 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12697 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12699 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12701 /* PR gas/12698: The constraint is only applied for m_profile.
12702 If the user has specified -march=all, we want to ignore it as
12703 we are building for any CPU type, including non-m variants. */
12704 bfd_boolean m_profile
=
12705 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12706 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12707 "not support requested special purpose register"));
12710 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12712 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12713 _("'APSR', 'CPSR' or 'SPSR' expected"));
12715 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12716 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12717 inst
.instruction
|= 0xf0000;
12727 if (do_vfp_nsyn_msr () == SUCCESS
)
12730 constraint (!inst
.operands
[1].isreg
,
12731 _("Thumb encoding does not support an immediate here"));
12733 if (inst
.operands
[0].isreg
)
12734 flags
= (int)(inst
.operands
[0].reg
);
12736 flags
= inst
.operands
[0].imm
;
12738 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12740 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12742 /* PR gas/12698: The constraint is only applied for m_profile.
12743 If the user has specified -march=all, we want to ignore it as
12744 we are building for any CPU type, including non-m variants. */
12745 bfd_boolean m_profile
=
12746 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12747 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12748 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12749 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12750 && bits
!= PSR_f
)) && m_profile
,
12751 _("selected processor does not support requested special "
12752 "purpose register"));
12755 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12756 "requested special purpose register"));
12758 Rn
= inst
.operands
[1].reg
;
12759 reject_bad_reg (Rn
);
12761 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12762 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12763 inst
.instruction
|= (flags
& 0x300) >> 4;
12764 inst
.instruction
|= (flags
& 0xff);
12765 inst
.instruction
|= Rn
<< 16;
12771 bfd_boolean narrow
;
12772 unsigned Rd
, Rn
, Rm
;
12774 if (!inst
.operands
[2].present
)
12775 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12777 Rd
= inst
.operands
[0].reg
;
12778 Rn
= inst
.operands
[1].reg
;
12779 Rm
= inst
.operands
[2].reg
;
12781 if (unified_syntax
)
12783 if (inst
.size_req
== 4
12789 else if (inst
.instruction
== T_MNEM_muls
)
12790 narrow
= !in_pred_block ();
12792 narrow
= in_pred_block ();
12796 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12797 constraint (Rn
> 7 || Rm
> 7,
12804 /* 16-bit MULS/Conditional MUL. */
12805 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12806 inst
.instruction
|= Rd
;
12809 inst
.instruction
|= Rm
<< 3;
12811 inst
.instruction
|= Rn
<< 3;
12813 constraint (1, _("dest must overlap one source register"));
12817 constraint (inst
.instruction
!= T_MNEM_mul
,
12818 _("Thumb-2 MUL must not set flags"));
12820 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12821 inst
.instruction
|= Rd
<< 8;
12822 inst
.instruction
|= Rn
<< 16;
12823 inst
.instruction
|= Rm
<< 0;
12825 reject_bad_reg (Rd
);
12826 reject_bad_reg (Rn
);
12827 reject_bad_reg (Rm
);
12834 unsigned RdLo
, RdHi
, Rn
, Rm
;
12836 RdLo
= inst
.operands
[0].reg
;
12837 RdHi
= inst
.operands
[1].reg
;
12838 Rn
= inst
.operands
[2].reg
;
12839 Rm
= inst
.operands
[3].reg
;
12841 reject_bad_reg (RdLo
);
12842 reject_bad_reg (RdHi
);
12843 reject_bad_reg (Rn
);
12844 reject_bad_reg (Rm
);
12846 inst
.instruction
|= RdLo
<< 12;
12847 inst
.instruction
|= RdHi
<< 8;
12848 inst
.instruction
|= Rn
<< 16;
12849 inst
.instruction
|= Rm
;
12852 as_tsktsk (_("rdhi and rdlo must be different"));
12858 set_pred_insn_type (NEUTRAL_IT_INSN
);
12860 if (unified_syntax
)
12862 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12864 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12865 inst
.instruction
|= inst
.operands
[0].imm
;
12869 /* PR9722: Check for Thumb2 availability before
12870 generating a thumb2 nop instruction. */
12871 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12873 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12874 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12877 inst
.instruction
= 0x46c0;
12882 constraint (inst
.operands
[0].present
,
12883 _("Thumb does not support NOP with hints"));
12884 inst
.instruction
= 0x46c0;
12891 if (unified_syntax
)
12893 bfd_boolean narrow
;
12895 if (THUMB_SETS_FLAGS (inst
.instruction
))
12896 narrow
= !in_pred_block ();
12898 narrow
= in_pred_block ();
12899 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12901 if (inst
.size_req
== 4)
12906 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12907 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12908 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12912 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12913 inst
.instruction
|= inst
.operands
[0].reg
;
12914 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12919 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12921 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12923 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12924 inst
.instruction
|= inst
.operands
[0].reg
;
12925 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12934 Rd
= inst
.operands
[0].reg
;
12935 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12937 reject_bad_reg (Rd
);
12938 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12939 reject_bad_reg (Rn
);
12941 inst
.instruction
|= Rd
<< 8;
12942 inst
.instruction
|= Rn
<< 16;
12944 if (!inst
.operands
[2].isreg
)
12946 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12947 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12953 Rm
= inst
.operands
[2].reg
;
12954 reject_bad_reg (Rm
);
12956 constraint (inst
.operands
[2].shifted
12957 && inst
.operands
[2].immisreg
,
12958 _("shift must be constant"));
12959 encode_thumb32_shifted_operand (2);
12966 unsigned Rd
, Rn
, Rm
;
12968 Rd
= inst
.operands
[0].reg
;
12969 Rn
= inst
.operands
[1].reg
;
12970 Rm
= inst
.operands
[2].reg
;
12972 reject_bad_reg (Rd
);
12973 reject_bad_reg (Rn
);
12974 reject_bad_reg (Rm
);
12976 inst
.instruction
|= Rd
<< 8;
12977 inst
.instruction
|= Rn
<< 16;
12978 inst
.instruction
|= Rm
;
12979 if (inst
.operands
[3].present
)
12981 unsigned int val
= inst
.relocs
[0].exp
.X_add_number
;
12982 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
12983 _("expression too complex"));
12984 inst
.instruction
|= (val
& 0x1c) << 10;
12985 inst
.instruction
|= (val
& 0x03) << 6;
12992 if (!inst
.operands
[3].present
)
12996 inst
.instruction
&= ~0x00000020;
12998 /* PR 10168. Swap the Rm and Rn registers. */
12999 Rtmp
= inst
.operands
[1].reg
;
13000 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
13001 inst
.operands
[2].reg
= Rtmp
;
13009 if (inst
.operands
[0].immisreg
)
13010 reject_bad_reg (inst
.operands
[0].imm
);
13012 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
13016 do_t_push_pop (void)
13020 constraint (inst
.operands
[0].writeback
,
13021 _("push/pop do not support {reglist}^"));
13022 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
13023 _("expression too complex"));
13025 mask
= inst
.operands
[0].imm
;
13026 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
13027 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
13028 else if (inst
.size_req
!= 4
13029 && (mask
& ~0xff) == (1U << (inst
.instruction
== T_MNEM_push
13030 ? REG_LR
: REG_PC
)))
13032 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13033 inst
.instruction
|= THUMB_PP_PC_LR
;
13034 inst
.instruction
|= mask
& 0xff;
13036 else if (unified_syntax
)
13038 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13039 encode_thumb2_multi (TRUE
/* do_io */, 13, mask
, TRUE
);
13043 inst
.error
= _("invalid register list to push/pop instruction");
13051 if (unified_syntax
)
13052 encode_thumb2_multi (FALSE
/* do_io */, -1, inst
.operands
[0].imm
, FALSE
);
13055 inst
.error
= _("invalid register list to push/pop instruction");
13061 do_t_vscclrm (void)
13063 if (inst
.operands
[0].issingle
)
13065 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1) << 22;
13066 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1e) << 11;
13067 inst
.instruction
|= inst
.operands
[0].imm
;
13071 inst
.instruction
|= (inst
.operands
[0].reg
& 0x10) << 18;
13072 inst
.instruction
|= (inst
.operands
[0].reg
& 0xf) << 12;
13073 inst
.instruction
|= 1 << 8;
13074 inst
.instruction
|= inst
.operands
[0].imm
<< 1;
13083 Rd
= inst
.operands
[0].reg
;
13084 Rm
= inst
.operands
[1].reg
;
13086 reject_bad_reg (Rd
);
13087 reject_bad_reg (Rm
);
13089 inst
.instruction
|= Rd
<< 8;
13090 inst
.instruction
|= Rm
<< 16;
13091 inst
.instruction
|= Rm
;
13099 Rd
= inst
.operands
[0].reg
;
13100 Rm
= inst
.operands
[1].reg
;
13102 reject_bad_reg (Rd
);
13103 reject_bad_reg (Rm
);
13105 if (Rd
<= 7 && Rm
<= 7
13106 && inst
.size_req
!= 4)
13108 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13109 inst
.instruction
|= Rd
;
13110 inst
.instruction
|= Rm
<< 3;
13112 else if (unified_syntax
)
13114 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13115 inst
.instruction
|= Rd
<< 8;
13116 inst
.instruction
|= Rm
<< 16;
13117 inst
.instruction
|= Rm
;
13120 inst
.error
= BAD_HIREG
;
13128 Rd
= inst
.operands
[0].reg
;
13129 Rm
= inst
.operands
[1].reg
;
13131 reject_bad_reg (Rd
);
13132 reject_bad_reg (Rm
);
13134 inst
.instruction
|= Rd
<< 8;
13135 inst
.instruction
|= Rm
;
13143 Rd
= inst
.operands
[0].reg
;
13144 Rs
= (inst
.operands
[1].present
13145 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
13146 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
13148 reject_bad_reg (Rd
);
13149 reject_bad_reg (Rs
);
13150 if (inst
.operands
[2].isreg
)
13151 reject_bad_reg (inst
.operands
[2].reg
);
13153 inst
.instruction
|= Rd
<< 8;
13154 inst
.instruction
|= Rs
<< 16;
13155 if (!inst
.operands
[2].isreg
)
13157 bfd_boolean narrow
;
13159 if ((inst
.instruction
& 0x00100000) != 0)
13160 narrow
= !in_pred_block ();
13162 narrow
= in_pred_block ();
13164 if (Rd
> 7 || Rs
> 7)
13167 if (inst
.size_req
== 4 || !unified_syntax
)
13170 if (inst
.relocs
[0].exp
.X_op
!= O_constant
13171 || inst
.relocs
[0].exp
.X_add_number
!= 0)
13174 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13175 relaxation, but it doesn't seem worth the hassle. */
13178 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13179 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
13180 inst
.instruction
|= Rs
<< 3;
13181 inst
.instruction
|= Rd
;
13185 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
13186 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
13190 encode_thumb32_shifted_operand (2);
13196 if (warn_on_deprecated
13197 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13198 as_tsktsk (_("setend use is deprecated for ARMv8"));
13200 set_pred_insn_type (OUTSIDE_PRED_INSN
);
13201 if (inst
.operands
[0].imm
)
13202 inst
.instruction
|= 0x8;
13208 if (!inst
.operands
[1].present
)
13209 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
13211 if (unified_syntax
)
13213 bfd_boolean narrow
;
13216 switch (inst
.instruction
)
13219 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
13221 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
13223 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
13225 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
13229 if (THUMB_SETS_FLAGS (inst
.instruction
))
13230 narrow
= !in_pred_block ();
13232 narrow
= in_pred_block ();
13233 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
13235 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
13237 if (inst
.operands
[2].isreg
13238 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
13239 || inst
.operands
[2].reg
> 7))
13241 if (inst
.size_req
== 4)
13244 reject_bad_reg (inst
.operands
[0].reg
);
13245 reject_bad_reg (inst
.operands
[1].reg
);
13249 if (inst
.operands
[2].isreg
)
13251 reject_bad_reg (inst
.operands
[2].reg
);
13252 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13253 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13254 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13255 inst
.instruction
|= inst
.operands
[2].reg
;
13257 /* PR 12854: Error on extraneous shifts. */
13258 constraint (inst
.operands
[2].shifted
,
13259 _("extraneous shift as part of operand to shift insn"));
13263 inst
.operands
[1].shifted
= 1;
13264 inst
.operands
[1].shift_kind
= shift_kind
;
13265 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
13266 ? T_MNEM_movs
: T_MNEM_mov
);
13267 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13268 encode_thumb32_shifted_operand (1);
13269 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13270 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13275 if (inst
.operands
[2].isreg
)
13277 switch (shift_kind
)
13279 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13280 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13281 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13282 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13286 inst
.instruction
|= inst
.operands
[0].reg
;
13287 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13289 /* PR 12854: Error on extraneous shifts. */
13290 constraint (inst
.operands
[2].shifted
,
13291 _("extraneous shift as part of operand to shift insn"));
13295 switch (shift_kind
)
13297 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13298 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13299 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13302 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13303 inst
.instruction
|= inst
.operands
[0].reg
;
13304 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13310 constraint (inst
.operands
[0].reg
> 7
13311 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
13312 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
13314 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
13316 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
13317 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13318 _("source1 and dest must be same register"));
13320 switch (inst
.instruction
)
13322 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13323 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13324 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13325 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13329 inst
.instruction
|= inst
.operands
[0].reg
;
13330 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13332 /* PR 12854: Error on extraneous shifts. */
13333 constraint (inst
.operands
[2].shifted
,
13334 _("extraneous shift as part of operand to shift insn"));
13338 switch (inst
.instruction
)
13340 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13341 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13342 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13343 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
13346 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13347 inst
.instruction
|= inst
.operands
[0].reg
;
13348 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13356 unsigned Rd
, Rn
, Rm
;
13358 Rd
= inst
.operands
[0].reg
;
13359 Rn
= inst
.operands
[1].reg
;
13360 Rm
= inst
.operands
[2].reg
;
13362 reject_bad_reg (Rd
);
13363 reject_bad_reg (Rn
);
13364 reject_bad_reg (Rm
);
13366 inst
.instruction
|= Rd
<< 8;
13367 inst
.instruction
|= Rn
<< 16;
13368 inst
.instruction
|= Rm
;
13374 unsigned Rd
, Rn
, Rm
;
13376 Rd
= inst
.operands
[0].reg
;
13377 Rm
= inst
.operands
[1].reg
;
13378 Rn
= inst
.operands
[2].reg
;
13380 reject_bad_reg (Rd
);
13381 reject_bad_reg (Rn
);
13382 reject_bad_reg (Rm
);
13384 inst
.instruction
|= Rd
<< 8;
13385 inst
.instruction
|= Rn
<< 16;
13386 inst
.instruction
|= Rm
;
13392 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13393 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
13394 _("SMC is not permitted on this architecture"));
13395 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13396 _("expression too complex"));
13397 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13398 inst
.instruction
|= (value
& 0xf000) >> 12;
13399 inst
.instruction
|= (value
& 0x0ff0);
13400 inst
.instruction
|= (value
& 0x000f) << 16;
13401 /* PR gas/15623: SMC instructions must be last in an IT block. */
13402 set_pred_insn_type_last ();
13408 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13410 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13411 inst
.instruction
|= (value
& 0x0fff);
13412 inst
.instruction
|= (value
& 0xf000) << 4;
13416 do_t_ssat_usat (int bias
)
13420 Rd
= inst
.operands
[0].reg
;
13421 Rn
= inst
.operands
[2].reg
;
13423 reject_bad_reg (Rd
);
13424 reject_bad_reg (Rn
);
13426 inst
.instruction
|= Rd
<< 8;
13427 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
13428 inst
.instruction
|= Rn
<< 16;
13430 if (inst
.operands
[3].present
)
13432 offsetT shift_amount
= inst
.relocs
[0].exp
.X_add_number
;
13434 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13436 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13437 _("expression too complex"));
13439 if (shift_amount
!= 0)
13441 constraint (shift_amount
> 31,
13442 _("shift expression is too large"));
13444 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
13445 inst
.instruction
|= 0x00200000; /* sh bit. */
13447 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
13448 inst
.instruction
|= (shift_amount
& 0x03) << 6;
13456 do_t_ssat_usat (1);
13464 Rd
= inst
.operands
[0].reg
;
13465 Rn
= inst
.operands
[2].reg
;
13467 reject_bad_reg (Rd
);
13468 reject_bad_reg (Rn
);
13470 inst
.instruction
|= Rd
<< 8;
13471 inst
.instruction
|= inst
.operands
[1].imm
- 1;
13472 inst
.instruction
|= Rn
<< 16;
13478 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
13479 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
13480 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
13481 || inst
.operands
[2].negative
,
13484 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
13486 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13487 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13488 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
13489 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
13495 if (!inst
.operands
[2].present
)
13496 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
13498 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
13499 || inst
.operands
[0].reg
== inst
.operands
[2].reg
13500 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
13503 inst
.instruction
|= inst
.operands
[0].reg
;
13504 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13505 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
13506 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
13512 unsigned Rd
, Rn
, Rm
;
13514 Rd
= inst
.operands
[0].reg
;
13515 Rn
= inst
.operands
[1].reg
;
13516 Rm
= inst
.operands
[2].reg
;
13518 reject_bad_reg (Rd
);
13519 reject_bad_reg (Rn
);
13520 reject_bad_reg (Rm
);
13522 inst
.instruction
|= Rd
<< 8;
13523 inst
.instruction
|= Rn
<< 16;
13524 inst
.instruction
|= Rm
;
13525 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
13533 Rd
= inst
.operands
[0].reg
;
13534 Rm
= inst
.operands
[1].reg
;
13536 reject_bad_reg (Rd
);
13537 reject_bad_reg (Rm
);
13539 if (inst
.instruction
<= 0xffff
13540 && inst
.size_req
!= 4
13541 && Rd
<= 7 && Rm
<= 7
13542 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
13544 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13545 inst
.instruction
|= Rd
;
13546 inst
.instruction
|= Rm
<< 3;
13548 else if (unified_syntax
)
13550 if (inst
.instruction
<= 0xffff)
13551 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13552 inst
.instruction
|= Rd
<< 8;
13553 inst
.instruction
|= Rm
;
13554 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
13558 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
13559 _("Thumb encoding does not support rotation"));
13560 constraint (1, BAD_HIREG
);
13567 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
13576 half
= (inst
.instruction
& 0x10) != 0;
13577 set_pred_insn_type_last ();
13578 constraint (inst
.operands
[0].immisreg
,
13579 _("instruction requires register index"));
13581 Rn
= inst
.operands
[0].reg
;
13582 Rm
= inst
.operands
[0].imm
;
13584 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13585 constraint (Rn
== REG_SP
, BAD_SP
);
13586 reject_bad_reg (Rm
);
13588 constraint (!half
&& inst
.operands
[0].shifted
,
13589 _("instruction does not allow shifted index"));
13590 inst
.instruction
|= (Rn
<< 16) | Rm
;
13596 if (!inst
.operands
[0].present
)
13597 inst
.operands
[0].imm
= 0;
13599 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13601 constraint (inst
.size_req
== 2,
13602 _("immediate value out of range"));
13603 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13604 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13605 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13609 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13610 inst
.instruction
|= inst
.operands
[0].imm
;
13613 set_pred_insn_type (NEUTRAL_IT_INSN
);
13620 do_t_ssat_usat (0);
13628 Rd
= inst
.operands
[0].reg
;
13629 Rn
= inst
.operands
[2].reg
;
13631 reject_bad_reg (Rd
);
13632 reject_bad_reg (Rn
);
13634 inst
.instruction
|= Rd
<< 8;
13635 inst
.instruction
|= inst
.operands
[1].imm
;
13636 inst
.instruction
|= Rn
<< 16;
13639 /* Checking the range of the branch offset (VAL) with NBITS bits
13640 and IS_SIGNED signedness. Also checks the LSB to be 0. */
13642 v8_1_branch_value_check (int val
, int nbits
, int is_signed
)
13644 gas_assert (nbits
> 0 && nbits
<= 32);
13647 int cmp
= (1 << (nbits
- 1));
13648 if ((val
< -cmp
) || (val
>= cmp
) || (val
& 0x01))
13653 if ((val
<= 0) || (val
>= (1 << nbits
)) || (val
& 0x1))
13659 /* For branches in Armv8.1-M Mainline. */
13661 do_t_branch_future (void)
13663 unsigned long insn
= inst
.instruction
;
13665 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13666 if (inst
.operands
[0].hasreloc
== 0)
13668 if (v8_1_branch_value_check (inst
.operands
[0].imm
, 5, FALSE
) == FAIL
)
13669 as_bad (BAD_BRANCH_OFF
);
13671 inst
.instruction
|= ((inst
.operands
[0].imm
& 0x1f) >> 1) << 23;
13675 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH5
;
13676 inst
.relocs
[0].pc_rel
= 1;
13682 if (inst
.operands
[1].hasreloc
== 0)
13684 int val
= inst
.operands
[1].imm
;
13685 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 17, TRUE
) == FAIL
)
13686 as_bad (BAD_BRANCH_OFF
);
13688 int immA
= (val
& 0x0001f000) >> 12;
13689 int immB
= (val
& 0x00000ffc) >> 2;
13690 int immC
= (val
& 0x00000002) >> 1;
13691 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13695 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF17
;
13696 inst
.relocs
[1].pc_rel
= 1;
13701 if (inst
.operands
[1].hasreloc
== 0)
13703 int val
= inst
.operands
[1].imm
;
13704 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 19, TRUE
) == FAIL
)
13705 as_bad (BAD_BRANCH_OFF
);
13707 int immA
= (val
& 0x0007f000) >> 12;
13708 int immB
= (val
& 0x00000ffc) >> 2;
13709 int immC
= (val
& 0x00000002) >> 1;
13710 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13714 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF19
;
13715 inst
.relocs
[1].pc_rel
= 1;
13719 case T_MNEM_bfcsel
:
13721 if (inst
.operands
[1].hasreloc
== 0)
13723 int val
= inst
.operands
[1].imm
;
13724 int immA
= (val
& 0x00001000) >> 12;
13725 int immB
= (val
& 0x00000ffc) >> 2;
13726 int immC
= (val
& 0x00000002) >> 1;
13727 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13731 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF13
;
13732 inst
.relocs
[1].pc_rel
= 1;
13736 if (inst
.operands
[2].hasreloc
== 0)
13738 constraint ((inst
.operands
[0].hasreloc
!= 0), BAD_ARGS
);
13739 int val2
= inst
.operands
[2].imm
;
13740 int val0
= inst
.operands
[0].imm
& 0x1f;
13741 int diff
= val2
- val0
;
13743 inst
.instruction
|= 1 << 17; /* T bit. */
13744 else if (diff
!= 2)
13745 as_bad (_("out of range label-relative fixup value"));
13749 constraint ((inst
.operands
[0].hasreloc
== 0), BAD_ARGS
);
13750 inst
.relocs
[2].type
= BFD_RELOC_THUMB_PCREL_BFCSEL
;
13751 inst
.relocs
[2].pc_rel
= 1;
13755 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
13756 inst
.instruction
|= (inst
.operands
[3].imm
& 0xf) << 18;
13761 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13768 /* Helper function for do_t_loloop to handle relocations. */
13770 v8_1_loop_reloc (int is_le
)
13772 if (inst
.relocs
[0].exp
.X_op
== O_constant
)
13774 int value
= inst
.relocs
[0].exp
.X_add_number
;
13775 value
= (is_le
) ? -value
: value
;
13777 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
13778 as_bad (BAD_BRANCH_OFF
);
13782 immh
= (value
& 0x00000ffc) >> 2;
13783 imml
= (value
& 0x00000002) >> 1;
13785 inst
.instruction
|= (imml
<< 11) | (immh
<< 1);
13789 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_LOOP12
;
13790 inst
.relocs
[0].pc_rel
= 1;
13794 /* To handle the Scalar Low Overhead Loop instructions
13795 in Armv8.1-M Mainline. */
13799 unsigned long insn
= inst
.instruction
;
13801 set_pred_insn_type (OUTSIDE_PRED_INSN
);
13802 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13808 if (!inst
.operands
[0].present
)
13809 inst
.instruction
|= 1 << 21;
13811 v8_1_loop_reloc (TRUE
);
13815 v8_1_loop_reloc (FALSE
);
13816 /* Fall through. */
13818 constraint (inst
.operands
[1].isreg
!= 1, BAD_ARGS
);
13819 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
13826 /* MVE instruction encoder helpers. */
13827 #define M_MNEM_vabav 0xee800f01
13828 #define M_MNEM_vmladav 0xeef00e00
13829 #define M_MNEM_vmladava 0xeef00e20
13830 #define M_MNEM_vmladavx 0xeef01e00
13831 #define M_MNEM_vmladavax 0xeef01e20
13832 #define M_MNEM_vmlsdav 0xeef00e01
13833 #define M_MNEM_vmlsdava 0xeef00e21
13834 #define M_MNEM_vmlsdavx 0xeef01e01
13835 #define M_MNEM_vmlsdavax 0xeef01e21
13837 /* Neon instruction encoder helpers. */
13839 /* Encodings for the different types for various Neon opcodes. */
13841 /* An "invalid" code for the following tables. */
13844 struct neon_tab_entry
13847 unsigned float_or_poly
;
13848 unsigned scalar_or_imm
;
13851 /* Map overloaded Neon opcodes to their respective encodings. */
13852 #define NEON_ENC_TAB \
13853 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13854 X(vabdl, 0x0800700, N_INV, N_INV), \
13855 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13856 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13857 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13858 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13859 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13860 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13861 X(vaddl, 0x0800000, N_INV, N_INV), \
13862 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13863 X(vsubl, 0x0800200, N_INV, N_INV), \
13864 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13865 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13866 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13867 /* Register variants of the following two instructions are encoded as
13868 vcge / vcgt with the operands reversed. */ \
13869 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13870 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13871 X(vfma, N_INV, 0x0000c10, N_INV), \
13872 X(vfms, N_INV, 0x0200c10, N_INV), \
13873 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13874 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13875 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13876 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13877 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13878 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13879 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13880 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13881 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13882 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13883 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13884 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13885 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13886 X(vshl, 0x0000400, N_INV, 0x0800510), \
13887 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13888 X(vand, 0x0000110, N_INV, 0x0800030), \
13889 X(vbic, 0x0100110, N_INV, 0x0800030), \
13890 X(veor, 0x1000110, N_INV, N_INV), \
13891 X(vorn, 0x0300110, N_INV, 0x0800010), \
13892 X(vorr, 0x0200110, N_INV, 0x0800010), \
13893 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13894 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13895 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13896 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13897 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13898 X(vst1, 0x0000000, 0x0800000, N_INV), \
13899 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13900 X(vst2, 0x0000100, 0x0800100, N_INV), \
13901 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13902 X(vst3, 0x0000200, 0x0800200, N_INV), \
13903 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13904 X(vst4, 0x0000300, 0x0800300, N_INV), \
13905 X(vmovn, 0x1b20200, N_INV, N_INV), \
13906 X(vtrn, 0x1b20080, N_INV, N_INV), \
13907 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13908 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13909 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13910 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13911 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13912 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13913 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13914 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13915 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13916 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13917 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13918 X(vseleq, 0xe000a00, N_INV, N_INV), \
13919 X(vselvs, 0xe100a00, N_INV, N_INV), \
13920 X(vselge, 0xe200a00, N_INV, N_INV), \
13921 X(vselgt, 0xe300a00, N_INV, N_INV), \
13922 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13923 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13924 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13925 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13926 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13927 X(aes, 0x3b00300, N_INV, N_INV), \
13928 X(sha3op, 0x2000c00, N_INV, N_INV), \
13929 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13930 X(sha2op, 0x3ba0380, N_INV, N_INV)
13934 #define X(OPC,I,F,S) N_MNEM_##OPC
13939 static const struct neon_tab_entry neon_enc_tab
[] =
13941 #define X(OPC,I,F,S) { (I), (F), (S) }
13946 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13947 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13948 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13949 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13950 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13951 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13952 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13953 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13954 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13955 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13956 #define NEON_ENC_SINGLE_(X) \
13957 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13958 #define NEON_ENC_DOUBLE_(X) \
13959 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13960 #define NEON_ENC_FPV8_(X) \
13961 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13963 #define NEON_ENCODE(type, inst) \
13966 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13967 inst.is_neon = 1; \
13971 #define check_neon_suffixes \
13974 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13976 as_bad (_("invalid neon suffix for non neon instruction")); \
13982 /* Define shapes for instruction operands. The following mnemonic characters
13983 are used in this table:
13985 F - VFP S<n> register
13986 D - Neon D<n> register
13987 Q - Neon Q<n> register
13991 L - D<n> register list
13993 This table is used to generate various data:
13994 - enumerations of the form NS_DDR to be used as arguments to
13996 - a table classifying shapes into single, double, quad, mixed.
13997 - a table used to drive neon_select_shape. */
13999 #define NEON_SHAPE_DEF \
14000 X(3, (R, Q, Q), QUAD), \
14001 X(3, (D, D, D), DOUBLE), \
14002 X(3, (Q, Q, Q), QUAD), \
14003 X(3, (D, D, I), DOUBLE), \
14004 X(3, (Q, Q, I), QUAD), \
14005 X(3, (D, D, S), DOUBLE), \
14006 X(3, (Q, Q, S), QUAD), \
14007 X(3, (Q, Q, R), QUAD), \
14008 X(2, (D, D), DOUBLE), \
14009 X(2, (Q, Q), QUAD), \
14010 X(2, (D, S), DOUBLE), \
14011 X(2, (Q, S), QUAD), \
14012 X(2, (D, R), DOUBLE), \
14013 X(2, (Q, R), QUAD), \
14014 X(2, (D, I), DOUBLE), \
14015 X(2, (Q, I), QUAD), \
14016 X(3, (D, L, D), DOUBLE), \
14017 X(2, (D, Q), MIXED), \
14018 X(2, (Q, D), MIXED), \
14019 X(3, (D, Q, I), MIXED), \
14020 X(3, (Q, D, I), MIXED), \
14021 X(3, (Q, D, D), MIXED), \
14022 X(3, (D, Q, Q), MIXED), \
14023 X(3, (Q, Q, D), MIXED), \
14024 X(3, (Q, D, S), MIXED), \
14025 X(3, (D, Q, S), MIXED), \
14026 X(4, (D, D, D, I), DOUBLE), \
14027 X(4, (Q, Q, Q, I), QUAD), \
14028 X(4, (D, D, S, I), DOUBLE), \
14029 X(4, (Q, Q, S, I), QUAD), \
14030 X(2, (F, F), SINGLE), \
14031 X(3, (F, F, F), SINGLE), \
14032 X(2, (F, I), SINGLE), \
14033 X(2, (F, D), MIXED), \
14034 X(2, (D, F), MIXED), \
14035 X(3, (F, F, I), MIXED), \
14036 X(4, (R, R, F, F), SINGLE), \
14037 X(4, (F, F, R, R), SINGLE), \
14038 X(3, (D, R, R), DOUBLE), \
14039 X(3, (R, R, D), DOUBLE), \
14040 X(2, (S, R), SINGLE), \
14041 X(2, (R, S), SINGLE), \
14042 X(2, (F, R), SINGLE), \
14043 X(2, (R, F), SINGLE), \
14044 /* Half float shape supported so far. */\
14045 X (2, (H, D), MIXED), \
14046 X (2, (D, H), MIXED), \
14047 X (2, (H, F), MIXED), \
14048 X (2, (F, H), MIXED), \
14049 X (2, (H, H), HALF), \
14050 X (2, (H, R), HALF), \
14051 X (2, (R, H), HALF), \
14052 X (2, (H, I), HALF), \
14053 X (3, (H, H, H), HALF), \
14054 X (3, (H, F, I), MIXED), \
14055 X (3, (F, H, I), MIXED), \
14056 X (3, (D, H, H), MIXED), \
14057 X (3, (D, H, S), MIXED)
14059 #define S2(A,B) NS_##A##B
14060 #define S3(A,B,C) NS_##A##B##C
14061 #define S4(A,B,C,D) NS_##A##B##C##D
14063 #define X(N, L, C) S##N L
14076 enum neon_shape_class
14085 #define X(N, L, C) SC_##C
14087 static enum neon_shape_class neon_shape_class
[] =
14106 /* Register widths of above. */
14107 static unsigned neon_shape_el_size
[] =
14119 struct neon_shape_info
14122 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
14125 #define S2(A,B) { SE_##A, SE_##B }
14126 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
14127 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
14129 #define X(N, L, C) { N, S##N L }
14131 static struct neon_shape_info neon_shape_tab
[] =
14141 /* Bit masks used in type checking given instructions.
14142 'N_EQK' means the type must be the same as (or based on in some way) the key
14143 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14144 set, various other bits can be set as well in order to modify the meaning of
14145 the type constraint. */
14147 enum neon_type_mask
14171 N_KEY
= 0x1000000, /* Key element (main type specifier). */
14172 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
14173 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
14174 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
14175 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
14176 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
14177 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
14178 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
14179 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
14180 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
14181 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
14183 N_MAX_NONSPECIAL
= N_P64
14186 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
14188 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
14189 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14190 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
14191 #define N_S_32 (N_S8 | N_S16 | N_S32)
14192 #define N_F_16_32 (N_F16 | N_F32)
14193 #define N_SUF_32 (N_SU_32 | N_F_16_32)
14194 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
14195 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
14196 #define N_F_ALL (N_F16 | N_F32 | N_F64)
14197 #define N_I_MVE (N_I8 | N_I16 | N_I32)
14198 #define N_F_MVE (N_F16 | N_F32)
14199 #define N_SU_MVE (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14201 /* Pass this as the first type argument to neon_check_type to ignore types
14203 #define N_IGNORE_TYPE (N_KEY | N_EQK)
14205 /* Select a "shape" for the current instruction (describing register types or
14206 sizes) from a list of alternatives. Return NS_NULL if the current instruction
14207 doesn't fit. For non-polymorphic shapes, checking is usually done as a
14208 function of operand parsing, so this function doesn't need to be called.
14209 Shapes should be listed in order of decreasing length. */
14211 static enum neon_shape
14212 neon_select_shape (enum neon_shape shape
, ...)
14215 enum neon_shape first_shape
= shape
;
14217 /* Fix missing optional operands. FIXME: we don't know at this point how
14218 many arguments we should have, so this makes the assumption that we have
14219 > 1. This is true of all current Neon opcodes, I think, but may not be
14220 true in the future. */
14221 if (!inst
.operands
[1].present
)
14222 inst
.operands
[1] = inst
.operands
[0];
14224 va_start (ap
, shape
);
14226 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
14231 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
14233 if (!inst
.operands
[j
].present
)
14239 switch (neon_shape_tab
[shape
].el
[j
])
14241 /* If a .f16, .16, .u16, .s16 type specifier is given over
14242 a VFP single precision register operand, it's essentially
14243 means only half of the register is used.
14245 If the type specifier is given after the mnemonics, the
14246 information is stored in inst.vectype. If the type specifier
14247 is given after register operand, the information is stored
14248 in inst.operands[].vectype.
14250 When there is only one type specifier, and all the register
14251 operands are the same type of hardware register, the type
14252 specifier applies to all register operands.
14254 If no type specifier is given, the shape is inferred from
14255 operand information.
14258 vadd.f16 s0, s1, s2: NS_HHH
14259 vabs.f16 s0, s1: NS_HH
14260 vmov.f16 s0, r1: NS_HR
14261 vmov.f16 r0, s1: NS_RH
14262 vcvt.f16 r0, s1: NS_RH
14263 vcvt.f16.s32 s2, s2, #29: NS_HFI
14264 vcvt.f16.s32 s2, s2: NS_HF
14267 if (!(inst
.operands
[j
].isreg
14268 && inst
.operands
[j
].isvec
14269 && inst
.operands
[j
].issingle
14270 && !inst
.operands
[j
].isquad
14271 && ((inst
.vectype
.elems
== 1
14272 && inst
.vectype
.el
[0].size
== 16)
14273 || (inst
.vectype
.elems
> 1
14274 && inst
.vectype
.el
[j
].size
== 16)
14275 || (inst
.vectype
.elems
== 0
14276 && inst
.operands
[j
].vectype
.type
!= NT_invtype
14277 && inst
.operands
[j
].vectype
.size
== 16))))
14282 if (!(inst
.operands
[j
].isreg
14283 && inst
.operands
[j
].isvec
14284 && inst
.operands
[j
].issingle
14285 && !inst
.operands
[j
].isquad
14286 && ((inst
.vectype
.elems
== 1 && inst
.vectype
.el
[0].size
== 32)
14287 || (inst
.vectype
.elems
> 1 && inst
.vectype
.el
[j
].size
== 32)
14288 || (inst
.vectype
.elems
== 0
14289 && (inst
.operands
[j
].vectype
.size
== 32
14290 || inst
.operands
[j
].vectype
.type
== NT_invtype
)))))
14295 if (!(inst
.operands
[j
].isreg
14296 && inst
.operands
[j
].isvec
14297 && !inst
.operands
[j
].isquad
14298 && !inst
.operands
[j
].issingle
))
14303 if (!(inst
.operands
[j
].isreg
14304 && !inst
.operands
[j
].isvec
))
14309 if (!(inst
.operands
[j
].isreg
14310 && inst
.operands
[j
].isvec
14311 && inst
.operands
[j
].isquad
14312 && !inst
.operands
[j
].issingle
))
14317 if (!(!inst
.operands
[j
].isreg
14318 && !inst
.operands
[j
].isscalar
))
14323 if (!(!inst
.operands
[j
].isreg
14324 && inst
.operands
[j
].isscalar
))
14334 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
14335 /* We've matched all the entries in the shape table, and we don't
14336 have any left over operands which have not been matched. */
14342 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
14343 first_error (_("invalid instruction shape"));
14348 /* True if SHAPE is predominantly a quadword operation (most of the time, this
14349 means the Q bit should be set). */
14352 neon_quad (enum neon_shape shape
)
14354 return neon_shape_class
[shape
] == SC_QUAD
;
14358 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
14361 /* Allow modification to be made to types which are constrained to be
14362 based on the key element, based on bits set alongside N_EQK. */
14363 if ((typebits
& N_EQK
) != 0)
14365 if ((typebits
& N_HLF
) != 0)
14367 else if ((typebits
& N_DBL
) != 0)
14369 if ((typebits
& N_SGN
) != 0)
14370 *g_type
= NT_signed
;
14371 else if ((typebits
& N_UNS
) != 0)
14372 *g_type
= NT_unsigned
;
14373 else if ((typebits
& N_INT
) != 0)
14374 *g_type
= NT_integer
;
14375 else if ((typebits
& N_FLT
) != 0)
14376 *g_type
= NT_float
;
14377 else if ((typebits
& N_SIZ
) != 0)
14378 *g_type
= NT_untyped
;
14382 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
14383 operand type, i.e. the single type specified in a Neon instruction when it
14384 is the only one given. */
14386 static struct neon_type_el
14387 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
14389 struct neon_type_el dest
= *key
;
14391 gas_assert ((thisarg
& N_EQK
) != 0);
14393 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
14398 /* Convert Neon type and size into compact bitmask representation. */
14400 static enum neon_type_mask
14401 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
14408 case 8: return N_8
;
14409 case 16: return N_16
;
14410 case 32: return N_32
;
14411 case 64: return N_64
;
14419 case 8: return N_I8
;
14420 case 16: return N_I16
;
14421 case 32: return N_I32
;
14422 case 64: return N_I64
;
14430 case 16: return N_F16
;
14431 case 32: return N_F32
;
14432 case 64: return N_F64
;
14440 case 8: return N_P8
;
14441 case 16: return N_P16
;
14442 case 64: return N_P64
;
14450 case 8: return N_S8
;
14451 case 16: return N_S16
;
14452 case 32: return N_S32
;
14453 case 64: return N_S64
;
14461 case 8: return N_U8
;
14462 case 16: return N_U16
;
14463 case 32: return N_U32
;
14464 case 64: return N_U64
;
14475 /* Convert compact Neon bitmask type representation to a type and size. Only
14476 handles the case where a single bit is set in the mask. */
14479 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
14480 enum neon_type_mask mask
)
14482 if ((mask
& N_EQK
) != 0)
14485 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
14487 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
14489 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
14491 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
14496 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
14498 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
14499 *type
= NT_unsigned
;
14500 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
14501 *type
= NT_integer
;
14502 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
14503 *type
= NT_untyped
;
14504 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
14506 else if ((mask
& (N_F_ALL
)) != 0)
14514 /* Modify a bitmask of allowed types. This is only needed for type
14518 modify_types_allowed (unsigned allowed
, unsigned mods
)
14521 enum neon_el_type type
;
14527 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
14529 if (el_type_of_type_chk (&type
, &size
,
14530 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
14532 neon_modify_type_size (mods
, &type
, &size
);
14533 destmask
|= type_chk_of_el_type (type
, size
);
14540 /* Check type and return type classification.
14541 The manual states (paraphrase): If one datatype is given, it indicates the
14543 - the second operand, if there is one
14544 - the operand, if there is no second operand
14545 - the result, if there are no operands.
14546 This isn't quite good enough though, so we use a concept of a "key" datatype
14547 which is set on a per-instruction basis, which is the one which matters when
14548 only one data type is written.
14549 Note: this function has side-effects (e.g. filling in missing operands). All
14550 Neon instructions should call it before performing bit encoding. */
14552 static struct neon_type_el
14553 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
14556 unsigned i
, pass
, key_el
= 0;
14557 unsigned types
[NEON_MAX_TYPE_ELS
];
14558 enum neon_el_type k_type
= NT_invtype
;
14559 unsigned k_size
= -1u;
14560 struct neon_type_el badtype
= {NT_invtype
, -1};
14561 unsigned key_allowed
= 0;
14563 /* Optional registers in Neon instructions are always (not) in operand 1.
14564 Fill in the missing operand here, if it was omitted. */
14565 if (els
> 1 && !inst
.operands
[1].present
)
14566 inst
.operands
[1] = inst
.operands
[0];
14568 /* Suck up all the varargs. */
14570 for (i
= 0; i
< els
; i
++)
14572 unsigned thisarg
= va_arg (ap
, unsigned);
14573 if (thisarg
== N_IGNORE_TYPE
)
14578 types
[i
] = thisarg
;
14579 if ((thisarg
& N_KEY
) != 0)
14584 if (inst
.vectype
.elems
> 0)
14585 for (i
= 0; i
< els
; i
++)
14586 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
14588 first_error (_("types specified in both the mnemonic and operands"));
14592 /* Duplicate inst.vectype elements here as necessary.
14593 FIXME: No idea if this is exactly the same as the ARM assembler,
14594 particularly when an insn takes one register and one non-register
14596 if (inst
.vectype
.elems
== 1 && els
> 1)
14599 inst
.vectype
.elems
= els
;
14600 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
14601 for (j
= 0; j
< els
; j
++)
14603 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14606 else if (inst
.vectype
.elems
== 0 && els
> 0)
14609 /* No types were given after the mnemonic, so look for types specified
14610 after each operand. We allow some flexibility here; as long as the
14611 "key" operand has a type, we can infer the others. */
14612 for (j
= 0; j
< els
; j
++)
14613 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
14614 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
14616 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
14618 for (j
= 0; j
< els
; j
++)
14619 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
14620 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14625 first_error (_("operand types can't be inferred"));
14629 else if (inst
.vectype
.elems
!= els
)
14631 first_error (_("type specifier has the wrong number of parts"));
14635 for (pass
= 0; pass
< 2; pass
++)
14637 for (i
= 0; i
< els
; i
++)
14639 unsigned thisarg
= types
[i
];
14640 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
14641 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
14642 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
14643 unsigned g_size
= inst
.vectype
.el
[i
].size
;
14645 /* Decay more-specific signed & unsigned types to sign-insensitive
14646 integer types if sign-specific variants are unavailable. */
14647 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
14648 && (types_allowed
& N_SU_ALL
) == 0)
14649 g_type
= NT_integer
;
14651 /* If only untyped args are allowed, decay any more specific types to
14652 them. Some instructions only care about signs for some element
14653 sizes, so handle that properly. */
14654 if (((types_allowed
& N_UNT
) == 0)
14655 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
14656 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
14657 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
14658 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
14659 g_type
= NT_untyped
;
14663 if ((thisarg
& N_KEY
) != 0)
14667 key_allowed
= thisarg
& ~N_KEY
;
14669 /* Check architecture constraint on FP16 extension. */
14671 && k_type
== NT_float
14672 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14674 inst
.error
= _(BAD_FP16
);
14681 if ((thisarg
& N_VFP
) != 0)
14683 enum neon_shape_el regshape
;
14684 unsigned regwidth
, match
;
14686 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14689 first_error (_("invalid instruction shape"));
14692 regshape
= neon_shape_tab
[ns
].el
[i
];
14693 regwidth
= neon_shape_el_size
[regshape
];
14695 /* In VFP mode, operands must match register widths. If we
14696 have a key operand, use its width, else use the width of
14697 the current operand. */
14703 /* FP16 will use a single precision register. */
14704 if (regwidth
== 32 && match
== 16)
14706 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14710 inst
.error
= _(BAD_FP16
);
14715 if (regwidth
!= match
)
14717 first_error (_("operand size must match register width"));
14722 if ((thisarg
& N_EQK
) == 0)
14724 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
14726 if ((given_type
& types_allowed
) == 0)
14728 first_error (BAD_SIMD_TYPE
);
14734 enum neon_el_type mod_k_type
= k_type
;
14735 unsigned mod_k_size
= k_size
;
14736 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
14737 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
14739 first_error (_("inconsistent types in Neon instruction"));
14747 return inst
.vectype
.el
[key_el
];
14750 /* Neon-style VFP instruction forwarding. */
14752 /* Thumb VFP instructions have 0xE in the condition field. */
14755 do_vfp_cond_or_thumb (void)
14760 inst
.instruction
|= 0xe0000000;
14762 inst
.instruction
|= inst
.cond
<< 28;
14765 /* Look up and encode a simple mnemonic, for use as a helper function for the
14766 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14767 etc. It is assumed that operand parsing has already been done, and that the
14768 operands are in the form expected by the given opcode (this isn't necessarily
14769 the same as the form in which they were parsed, hence some massaging must
14770 take place before this function is called).
14771 Checks current arch version against that in the looked-up opcode. */
14774 do_vfp_nsyn_opcode (const char *opname
)
14776 const struct asm_opcode
*opcode
;
14778 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
14783 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
14784 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
14791 inst
.instruction
= opcode
->tvalue
;
14792 opcode
->tencode ();
14796 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
14797 opcode
->aencode ();
14802 do_vfp_nsyn_add_sub (enum neon_shape rs
)
14804 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
14806 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14809 do_vfp_nsyn_opcode ("fadds");
14811 do_vfp_nsyn_opcode ("fsubs");
14813 /* ARMv8.2 fp16 instruction. */
14815 do_scalar_fp16_v82_encode ();
14820 do_vfp_nsyn_opcode ("faddd");
14822 do_vfp_nsyn_opcode ("fsubd");
14826 /* Check operand types to see if this is a VFP instruction, and if so call
14830 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
14832 enum neon_shape rs
;
14833 struct neon_type_el et
;
14838 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14839 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14843 rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14844 et
= neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14845 N_F_ALL
| N_KEY
| N_VFP
);
14852 if (et
.type
!= NT_invtype
)
14863 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
14865 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
14867 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14870 do_vfp_nsyn_opcode ("fmacs");
14872 do_vfp_nsyn_opcode ("fnmacs");
14874 /* ARMv8.2 fp16 instruction. */
14876 do_scalar_fp16_v82_encode ();
14881 do_vfp_nsyn_opcode ("fmacd");
14883 do_vfp_nsyn_opcode ("fnmacd");
14888 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
14890 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
14892 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14895 do_vfp_nsyn_opcode ("ffmas");
14897 do_vfp_nsyn_opcode ("ffnmas");
14899 /* ARMv8.2 fp16 instruction. */
14901 do_scalar_fp16_v82_encode ();
14906 do_vfp_nsyn_opcode ("ffmad");
14908 do_vfp_nsyn_opcode ("ffnmad");
14913 do_vfp_nsyn_mul (enum neon_shape rs
)
14915 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14917 do_vfp_nsyn_opcode ("fmuls");
14919 /* ARMv8.2 fp16 instruction. */
14921 do_scalar_fp16_v82_encode ();
14924 do_vfp_nsyn_opcode ("fmuld");
14928 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
14930 int is_neg
= (inst
.instruction
& 0x80) != 0;
14931 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_VFP
| N_KEY
);
14933 if (rs
== NS_FF
|| rs
== NS_HH
)
14936 do_vfp_nsyn_opcode ("fnegs");
14938 do_vfp_nsyn_opcode ("fabss");
14940 /* ARMv8.2 fp16 instruction. */
14942 do_scalar_fp16_v82_encode ();
14947 do_vfp_nsyn_opcode ("fnegd");
14949 do_vfp_nsyn_opcode ("fabsd");
14953 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14954 insns belong to Neon, and are handled elsewhere. */
14957 do_vfp_nsyn_ldm_stm (int is_dbmode
)
14959 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
14963 do_vfp_nsyn_opcode ("fldmdbs");
14965 do_vfp_nsyn_opcode ("fldmias");
14970 do_vfp_nsyn_opcode ("fstmdbs");
14972 do_vfp_nsyn_opcode ("fstmias");
14977 do_vfp_nsyn_sqrt (void)
14979 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14980 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14982 if (rs
== NS_FF
|| rs
== NS_HH
)
14984 do_vfp_nsyn_opcode ("fsqrts");
14986 /* ARMv8.2 fp16 instruction. */
14988 do_scalar_fp16_v82_encode ();
14991 do_vfp_nsyn_opcode ("fsqrtd");
14995 do_vfp_nsyn_div (void)
14997 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14998 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14999 N_F_ALL
| N_KEY
| N_VFP
);
15001 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15003 do_vfp_nsyn_opcode ("fdivs");
15005 /* ARMv8.2 fp16 instruction. */
15007 do_scalar_fp16_v82_encode ();
15010 do_vfp_nsyn_opcode ("fdivd");
15014 do_vfp_nsyn_nmul (void)
15016 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
15017 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
15018 N_F_ALL
| N_KEY
| N_VFP
);
15020 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15022 NEON_ENCODE (SINGLE
, inst
);
15023 do_vfp_sp_dyadic ();
15025 /* ARMv8.2 fp16 instruction. */
15027 do_scalar_fp16_v82_encode ();
15031 NEON_ENCODE (DOUBLE
, inst
);
15032 do_vfp_dp_rd_rn_rm ();
15034 do_vfp_cond_or_thumb ();
15039 do_vfp_nsyn_cmp (void)
15041 enum neon_shape rs
;
15042 if (inst
.operands
[1].isreg
)
15044 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
15045 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
15047 if (rs
== NS_FF
|| rs
== NS_HH
)
15049 NEON_ENCODE (SINGLE
, inst
);
15050 do_vfp_sp_monadic ();
15054 NEON_ENCODE (DOUBLE
, inst
);
15055 do_vfp_dp_rd_rm ();
15060 rs
= neon_select_shape (NS_HI
, NS_FI
, NS_DI
, NS_NULL
);
15061 neon_check_type (2, rs
, N_F_ALL
| N_KEY
| N_VFP
, N_EQK
);
15063 switch (inst
.instruction
& 0x0fffffff)
15066 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
15069 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
15075 if (rs
== NS_FI
|| rs
== NS_HI
)
15077 NEON_ENCODE (SINGLE
, inst
);
15078 do_vfp_sp_compare_z ();
15082 NEON_ENCODE (DOUBLE
, inst
);
15086 do_vfp_cond_or_thumb ();
15088 /* ARMv8.2 fp16 instruction. */
15089 if (rs
== NS_HI
|| rs
== NS_HH
)
15090 do_scalar_fp16_v82_encode ();
15094 nsyn_insert_sp (void)
15096 inst
.operands
[1] = inst
.operands
[0];
15097 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
15098 inst
.operands
[0].reg
= REG_SP
;
15099 inst
.operands
[0].isreg
= 1;
15100 inst
.operands
[0].writeback
= 1;
15101 inst
.operands
[0].present
= 1;
15105 do_vfp_nsyn_push (void)
15109 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
15110 _("register list must contain at least 1 and at most 16 "
15113 if (inst
.operands
[1].issingle
)
15114 do_vfp_nsyn_opcode ("fstmdbs");
15116 do_vfp_nsyn_opcode ("fstmdbd");
15120 do_vfp_nsyn_pop (void)
15124 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
15125 _("register list must contain at least 1 and at most 16 "
15128 if (inst
.operands
[1].issingle
)
15129 do_vfp_nsyn_opcode ("fldmias");
15131 do_vfp_nsyn_opcode ("fldmiad");
15134 /* Fix up Neon data-processing instructions, ORing in the correct bits for
15135 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
15138 neon_dp_fixup (struct arm_it
* insn
)
15140 unsigned int i
= insn
->instruction
;
15145 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
15156 insn
->instruction
= i
;
15159 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15163 neon_logbits (unsigned x
)
15165 return ffs (x
) - 4;
15168 #define LOW4(R) ((R) & 0xf)
15169 #define HI1(R) (((R) >> 4) & 1)
15172 mve_encode_qqr (int size
, int fp
)
15174 if (inst
.operands
[2].reg
== REG_SP
)
15175 as_tsktsk (MVE_BAD_SP
);
15176 else if (inst
.operands
[2].reg
== REG_PC
)
15177 as_tsktsk (MVE_BAD_PC
);
15182 if (((unsigned)inst
.instruction
) == 0xd00)
15183 inst
.instruction
= 0xee300f40;
15185 else if (((unsigned)inst
.instruction
) == 0x200d00)
15186 inst
.instruction
= 0xee301f40;
15188 /* Setting size which is 1 for F16 and 0 for F32. */
15189 inst
.instruction
|= (size
== 16) << 28;
15194 if (((unsigned)inst
.instruction
) == 0x800)
15195 inst
.instruction
= 0xee010f40;
15197 else if (((unsigned)inst
.instruction
) == 0x1000800)
15198 inst
.instruction
= 0xee011f40;
15199 /* Setting bits for size. */
15200 inst
.instruction
|= neon_logbits (size
) << 20;
15202 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15203 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15204 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15205 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15206 inst
.instruction
|= inst
.operands
[2].reg
;
15211 mve_encode_rqq (unsigned bit28
, unsigned size
)
15213 inst
.instruction
|= bit28
<< 28;
15214 inst
.instruction
|= neon_logbits (size
) << 20;
15215 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15216 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
15217 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15218 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15219 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15223 /* Encode insns with bit pattern:
15225 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15226 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
15228 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
15229 different meaning for some instruction. */
15232 neon_three_same (int isquad
, int ubit
, int size
)
15234 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15235 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15236 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15237 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15238 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15239 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15240 inst
.instruction
|= (isquad
!= 0) << 6;
15241 inst
.instruction
|= (ubit
!= 0) << 24;
15243 inst
.instruction
|= neon_logbits (size
) << 20;
15245 neon_dp_fixup (&inst
);
15248 /* Encode instructions of the form:
15250 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
15251 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
15253 Don't write size if SIZE == -1. */
15256 neon_two_same (int qbit
, int ubit
, int size
)
15258 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15259 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15260 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15261 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15262 inst
.instruction
|= (qbit
!= 0) << 6;
15263 inst
.instruction
|= (ubit
!= 0) << 24;
15266 inst
.instruction
|= neon_logbits (size
) << 18;
15268 neon_dp_fixup (&inst
);
15271 /* Neon instruction encoders, in approximate order of appearance. */
15274 do_neon_dyadic_i_su (void)
15276 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15277 struct neon_type_el et
= neon_check_type (3, rs
,
15278 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
15279 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15283 do_neon_dyadic_i64_su (void)
15285 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15286 struct neon_type_el et
= neon_check_type (3, rs
,
15287 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
15288 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15292 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
15295 unsigned size
= et
.size
>> 3;
15296 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15297 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15298 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15299 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15300 inst
.instruction
|= (isquad
!= 0) << 6;
15301 inst
.instruction
|= immbits
<< 16;
15302 inst
.instruction
|= (size
>> 3) << 7;
15303 inst
.instruction
|= (size
& 0x7) << 19;
15305 inst
.instruction
|= (uval
!= 0) << 24;
15307 neon_dp_fixup (&inst
);
15311 do_neon_shl_imm (void)
15313 if (!inst
.operands
[2].isreg
)
15315 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15316 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
15317 int imm
= inst
.operands
[2].imm
;
15319 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15320 _("immediate out of range for shift"));
15321 NEON_ENCODE (IMMED
, inst
);
15322 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15326 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15327 struct neon_type_el et
= neon_check_type (3, rs
,
15328 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
15331 /* VSHL/VQSHL 3-register variants have syntax such as:
15333 whereas other 3-register operations encoded by neon_three_same have
15336 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
15338 tmp
= inst
.operands
[2].reg
;
15339 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15340 inst
.operands
[1].reg
= tmp
;
15341 NEON_ENCODE (INTEGER
, inst
);
15342 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15347 do_neon_qshl_imm (void)
15349 if (!inst
.operands
[2].isreg
)
15351 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15352 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
15353 int imm
= inst
.operands
[2].imm
;
15355 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15356 _("immediate out of range for shift"));
15357 NEON_ENCODE (IMMED
, inst
);
15358 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
15362 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15363 struct neon_type_el et
= neon_check_type (3, rs
,
15364 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
15367 /* See note in do_neon_shl_imm. */
15368 tmp
= inst
.operands
[2].reg
;
15369 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15370 inst
.operands
[1].reg
= tmp
;
15371 NEON_ENCODE (INTEGER
, inst
);
15372 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15377 do_neon_rshl (void)
15379 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15380 struct neon_type_el et
= neon_check_type (3, rs
,
15381 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
15384 tmp
= inst
.operands
[2].reg
;
15385 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15386 inst
.operands
[1].reg
= tmp
;
15387 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15391 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
15393 /* Handle .I8 pseudo-instructions. */
15396 /* Unfortunately, this will make everything apart from zero out-of-range.
15397 FIXME is this the intended semantics? There doesn't seem much point in
15398 accepting .I8 if so. */
15399 immediate
|= immediate
<< 8;
15405 if (immediate
== (immediate
& 0x000000ff))
15407 *immbits
= immediate
;
15410 else if (immediate
== (immediate
& 0x0000ff00))
15412 *immbits
= immediate
>> 8;
15415 else if (immediate
== (immediate
& 0x00ff0000))
15417 *immbits
= immediate
>> 16;
15420 else if (immediate
== (immediate
& 0xff000000))
15422 *immbits
= immediate
>> 24;
15425 if ((immediate
& 0xffff) != (immediate
>> 16))
15426 goto bad_immediate
;
15427 immediate
&= 0xffff;
15430 if (immediate
== (immediate
& 0x000000ff))
15432 *immbits
= immediate
;
15435 else if (immediate
== (immediate
& 0x0000ff00))
15437 *immbits
= immediate
>> 8;
15442 first_error (_("immediate value out of range"));
15447 do_neon_logic (void)
15449 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
15451 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15452 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15453 /* U bit and size field were set as part of the bitmask. */
15454 NEON_ENCODE (INTEGER
, inst
);
15455 neon_three_same (neon_quad (rs
), 0, -1);
15459 const int three_ops_form
= (inst
.operands
[2].present
15460 && !inst
.operands
[2].isreg
);
15461 const int immoperand
= (three_ops_form
? 2 : 1);
15462 enum neon_shape rs
= (three_ops_form
15463 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
15464 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
15465 struct neon_type_el et
= neon_check_type (2, rs
,
15466 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15467 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
15471 if (et
.type
== NT_invtype
)
15474 if (three_ops_form
)
15475 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15476 _("first and second operands shall be the same register"));
15478 NEON_ENCODE (IMMED
, inst
);
15480 immbits
= inst
.operands
[immoperand
].imm
;
15483 /* .i64 is a pseudo-op, so the immediate must be a repeating
15485 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
15486 inst
.operands
[immoperand
].reg
: 0))
15488 /* Set immbits to an invalid constant. */
15489 immbits
= 0xdeadbeef;
15496 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15500 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15504 /* Pseudo-instruction for VBIC. */
15505 neon_invert_size (&immbits
, 0, et
.size
);
15506 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15510 /* Pseudo-instruction for VORR. */
15511 neon_invert_size (&immbits
, 0, et
.size
);
15512 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15522 inst
.instruction
|= neon_quad (rs
) << 6;
15523 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15524 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15525 inst
.instruction
|= cmode
<< 8;
15526 neon_write_immbits (immbits
);
15528 neon_dp_fixup (&inst
);
15533 do_neon_bitfield (void)
15535 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15536 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15537 neon_three_same (neon_quad (rs
), 0, -1);
15541 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
15544 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
15545 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
15547 if (et
.type
== NT_float
)
15549 NEON_ENCODE (FLOAT
, inst
);
15551 mve_encode_qqr (et
.size
, 1);
15553 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15557 NEON_ENCODE (INTEGER
, inst
);
15559 mve_encode_qqr (et
.size
, 0);
15561 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
15567 do_neon_dyadic_if_su_d (void)
15569 /* This version only allow D registers, but that constraint is enforced during
15570 operand parsing so we don't need to do anything extra here. */
15571 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
15575 do_neon_dyadic_if_i_d (void)
15577 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15578 affected if we specify unsigned args. */
15579 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15582 enum vfp_or_neon_is_neon_bits
15585 NEON_CHECK_ARCH
= 2,
15586 NEON_CHECK_ARCH8
= 4
15589 /* Call this function if an instruction which may have belonged to the VFP or
15590 Neon instruction sets, but turned out to be a Neon instruction (due to the
15591 operand types involved, etc.). We have to check and/or fix-up a couple of
15594 - Make sure the user hasn't attempted to make a Neon instruction
15596 - Alter the value in the condition code field if necessary.
15597 - Make sure that the arch supports Neon instructions.
15599 Which of these operations take place depends on bits from enum
15600 vfp_or_neon_is_neon_bits.
15602 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
15603 current instruction's condition is COND_ALWAYS, the condition field is
15604 changed to inst.uncond_value. This is necessary because instructions shared
15605 between VFP and Neon may be conditional for the VFP variants only, and the
15606 unconditional Neon version must have, e.g., 0xF in the condition field. */
15609 vfp_or_neon_is_neon (unsigned check
)
15611 /* Conditions are always legal in Thumb mode (IT blocks). */
15612 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
15614 if (inst
.cond
!= COND_ALWAYS
)
15616 first_error (_(BAD_COND
));
15619 if (inst
.uncond_value
!= -1)
15620 inst
.instruction
|= inst
.uncond_value
<< 28;
15624 if (((check
& NEON_CHECK_ARCH
) && !mark_feature_used (&fpu_neon_ext_v1
))
15625 || ((check
& NEON_CHECK_ARCH8
)
15626 && !mark_feature_used (&fpu_neon_ext_armv8
)))
15628 first_error (_(BAD_FPU
));
15636 check_simd_pred_availability (int fp
, unsigned check
)
15638 if (inst
.cond
> COND_ALWAYS
)
15640 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
15642 inst
.error
= BAD_FPU
;
15645 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
15647 else if (inst
.cond
< COND_ALWAYS
)
15649 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
15650 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
15651 else if (vfp_or_neon_is_neon (check
) == FAIL
)
15656 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fp
? mve_fp_ext
: mve_ext
)
15657 && vfp_or_neon_is_neon (check
) == FAIL
)
15660 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
15661 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
15667 do_neon_dyadic_if_su (void)
15669 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
15670 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15673 if (check_simd_pred_availability (et
.type
== NT_float
,
15674 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
15677 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
15681 do_neon_addsub_if_i (void)
15683 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1xd
)
15684 && try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
15687 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
15688 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
,
15689 N_EQK
, N_IF_32
| N_I64
| N_KEY
);
15691 constraint (rs
== NS_QQR
&& et
.size
== 64, BAD_FPU
);
15692 /* If we are parsing Q registers and the element types match MVE, which NEON
15693 also supports, then we must check whether this is an instruction that can
15694 be used by both MVE/NEON. This distinction can be made based on whether
15695 they are predicated or not. */
15696 if ((rs
== NS_QQQ
|| rs
== NS_QQR
) && et
.size
!= 64)
15698 if (check_simd_pred_availability (et
.type
== NT_float
,
15699 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
15704 /* If they are either in a D register or are using an unsupported. */
15706 && vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15710 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15711 affected if we specify unsigned args. */
15712 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
15715 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
15717 V<op> A,B (A is operand 0, B is operand 2)
15722 so handle that case specially. */
15725 neon_exchange_operands (void)
15727 if (inst
.operands
[1].present
)
15729 void *scratch
= xmalloc (sizeof (inst
.operands
[0]));
15731 /* Swap operands[1] and operands[2]. */
15732 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
15733 inst
.operands
[1] = inst
.operands
[2];
15734 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
15739 inst
.operands
[1] = inst
.operands
[2];
15740 inst
.operands
[2] = inst
.operands
[0];
15745 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
15747 if (inst
.operands
[2].isreg
)
15750 neon_exchange_operands ();
15751 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
15755 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15756 struct neon_type_el et
= neon_check_type (2, rs
,
15757 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
15759 NEON_ENCODE (IMMED
, inst
);
15760 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15761 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15762 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15763 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15764 inst
.instruction
|= neon_quad (rs
) << 6;
15765 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15766 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15768 neon_dp_fixup (&inst
);
15775 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, FALSE
);
15779 do_neon_cmp_inv (void)
15781 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, TRUE
);
15787 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
15790 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15791 scalars, which are encoded in 5 bits, M : Rm.
15792 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15793 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15796 Dot Product instructions are similar to multiply instructions except elsize
15797 should always be 32.
15799 This function translates SCALAR, which is GAS's internal encoding of indexed
15800 scalar register, to raw encoding. There is also register and index range
15801 check based on ELSIZE. */
15804 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
15806 unsigned regno
= NEON_SCALAR_REG (scalar
);
15807 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
15812 if (regno
> 7 || elno
> 3)
15814 return regno
| (elno
<< 3);
15817 if (regno
> 15 || elno
> 1)
15819 return regno
| (elno
<< 4);
15823 first_error (_("scalar out of range for multiply instruction"));
15829 /* Encode multiply / multiply-accumulate scalar instructions. */
15832 neon_mul_mac (struct neon_type_el et
, int ubit
)
15836 /* Give a more helpful error message if we have an invalid type. */
15837 if (et
.type
== NT_invtype
)
15840 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
15841 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15842 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15843 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15844 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15845 inst
.instruction
|= LOW4 (scalar
);
15846 inst
.instruction
|= HI1 (scalar
) << 5;
15847 inst
.instruction
|= (et
.type
== NT_float
) << 8;
15848 inst
.instruction
|= neon_logbits (et
.size
) << 20;
15849 inst
.instruction
|= (ubit
!= 0) << 24;
15851 neon_dp_fixup (&inst
);
15855 do_neon_mac_maybe_scalar (void)
15857 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
15860 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15863 if (inst
.operands
[2].isscalar
)
15865 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15866 struct neon_type_el et
= neon_check_type (3, rs
,
15867 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F_16_32
| N_KEY
);
15868 NEON_ENCODE (SCALAR
, inst
);
15869 neon_mul_mac (et
, neon_quad (rs
));
15873 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15874 affected if we specify unsigned args. */
15875 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15880 do_neon_fmac (void)
15882 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
15885 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15888 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15894 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15895 struct neon_type_el et
= neon_check_type (3, rs
,
15896 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15897 neon_three_same (neon_quad (rs
), 0, et
.size
);
15900 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15901 same types as the MAC equivalents. The polynomial type for this instruction
15902 is encoded the same as the integer type. */
15907 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
15910 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15913 if (inst
.operands
[2].isscalar
)
15914 do_neon_mac_maybe_scalar ();
15916 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F16
| N_F32
| N_P8
, 0);
15920 do_neon_qdmulh (void)
15922 if (inst
.operands
[2].isscalar
)
15924 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15925 struct neon_type_el et
= neon_check_type (3, rs
,
15926 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15927 NEON_ENCODE (SCALAR
, inst
);
15928 neon_mul_mac (et
, neon_quad (rs
));
15932 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15933 struct neon_type_el et
= neon_check_type (3, rs
,
15934 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15935 NEON_ENCODE (INTEGER
, inst
);
15936 /* The U bit (rounding) comes from bit mask. */
15937 neon_three_same (neon_quad (rs
), 0, et
.size
);
15942 do_mve_vabav (void)
15944 enum neon_shape rs
= neon_select_shape (NS_RQQ
, NS_NULL
);
15949 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
15952 struct neon_type_el et
= neon_check_type (2, NS_NULL
, N_EQK
, N_KEY
| N_S8
15953 | N_S16
| N_S32
| N_U8
| N_U16
15956 if (inst
.cond
> COND_ALWAYS
)
15957 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
15959 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
15961 mve_encode_rqq (et
.type
== NT_unsigned
, et
.size
);
15965 do_mve_vmladav (void)
15967 enum neon_shape rs
= neon_select_shape (NS_RQQ
, NS_NULL
);
15968 struct neon_type_el et
= neon_check_type (3, rs
,
15969 N_EQK
, N_EQK
, N_SU_MVE
| N_KEY
);
15971 if (et
.type
== NT_unsigned
15972 && (inst
.instruction
== M_MNEM_vmladavx
15973 || inst
.instruction
== M_MNEM_vmladavax
15974 || inst
.instruction
== M_MNEM_vmlsdav
15975 || inst
.instruction
== M_MNEM_vmlsdava
15976 || inst
.instruction
== M_MNEM_vmlsdavx
15977 || inst
.instruction
== M_MNEM_vmlsdavax
))
15978 first_error (BAD_SIMD_TYPE
);
15980 constraint (inst
.operands
[2].reg
> 14,
15981 _("MVE vector register in the range [Q0..Q7] expected"));
15983 if (inst
.cond
> COND_ALWAYS
)
15984 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
15986 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
15988 if (inst
.instruction
== M_MNEM_vmlsdav
15989 || inst
.instruction
== M_MNEM_vmlsdava
15990 || inst
.instruction
== M_MNEM_vmlsdavx
15991 || inst
.instruction
== M_MNEM_vmlsdavax
)
15992 inst
.instruction
|= (et
.size
== 8) << 28;
15994 inst
.instruction
|= (et
.size
== 8) << 8;
15996 mve_encode_rqq (et
.type
== NT_unsigned
, 64);
15997 inst
.instruction
|= (et
.size
== 32) << 16;
16001 do_neon_qrdmlah (void)
16003 /* Check we're on the correct architecture. */
16004 if (!mark_feature_used (&fpu_neon_ext_armv8
))
16006 _("instruction form not available on this architecture.");
16007 else if (!mark_feature_used (&fpu_neon_ext_v8_1
))
16009 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
16010 record_feature_use (&fpu_neon_ext_v8_1
);
16013 if (inst
.operands
[2].isscalar
)
16015 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
16016 struct neon_type_el et
= neon_check_type (3, rs
,
16017 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
16018 NEON_ENCODE (SCALAR
, inst
);
16019 neon_mul_mac (et
, neon_quad (rs
));
16023 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16024 struct neon_type_el et
= neon_check_type (3, rs
,
16025 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
16026 NEON_ENCODE (INTEGER
, inst
);
16027 /* The U bit (rounding) comes from bit mask. */
16028 neon_three_same (neon_quad (rs
), 0, et
.size
);
16033 do_neon_fcmp_absolute (void)
16035 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16036 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16037 N_F_16_32
| N_KEY
);
16038 /* Size field comes from bit mask. */
16039 neon_three_same (neon_quad (rs
), 1, et
.size
== 16 ? (int) et
.size
: -1);
16043 do_neon_fcmp_absolute_inv (void)
16045 neon_exchange_operands ();
16046 do_neon_fcmp_absolute ();
16050 do_neon_step (void)
16052 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16053 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16054 N_F_16_32
| N_KEY
);
16055 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
16059 do_neon_abs_neg (void)
16061 enum neon_shape rs
;
16062 struct neon_type_el et
;
16064 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
16067 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16068 et
= neon_check_type (2, rs
, N_EQK
, N_S_32
| N_F_16_32
| N_KEY
);
16070 if (check_simd_pred_availability (et
.type
== NT_float
,
16071 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
16074 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16075 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16076 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16077 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16078 inst
.instruction
|= neon_quad (rs
) << 6;
16079 inst
.instruction
|= (et
.type
== NT_float
) << 10;
16080 inst
.instruction
|= neon_logbits (et
.size
) << 18;
16082 neon_dp_fixup (&inst
);
16088 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16089 struct neon_type_el et
= neon_check_type (2, rs
,
16090 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16091 int imm
= inst
.operands
[2].imm
;
16092 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
16093 _("immediate out of range for insert"));
16094 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
16100 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16101 struct neon_type_el et
= neon_check_type (2, rs
,
16102 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16103 int imm
= inst
.operands
[2].imm
;
16104 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16105 _("immediate out of range for insert"));
16106 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
16110 do_neon_qshlu_imm (void)
16112 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16113 struct neon_type_el et
= neon_check_type (2, rs
,
16114 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
16115 int imm
= inst
.operands
[2].imm
;
16116 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
16117 _("immediate out of range for shift"));
16118 /* Only encodes the 'U present' variant of the instruction.
16119 In this case, signed types have OP (bit 8) set to 0.
16120 Unsigned types have OP set to 1. */
16121 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
16122 /* The rest of the bits are the same as other immediate shifts. */
16123 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
16127 do_neon_qmovn (void)
16129 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
16130 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
16131 /* Saturating move where operands can be signed or unsigned, and the
16132 destination has the same signedness. */
16133 NEON_ENCODE (INTEGER
, inst
);
16134 if (et
.type
== NT_unsigned
)
16135 inst
.instruction
|= 0xc0;
16137 inst
.instruction
|= 0x80;
16138 neon_two_same (0, 1, et
.size
/ 2);
16142 do_neon_qmovun (void)
16144 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
16145 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
16146 /* Saturating move with unsigned results. Operands must be signed. */
16147 NEON_ENCODE (INTEGER
, inst
);
16148 neon_two_same (0, 1, et
.size
/ 2);
16152 do_neon_rshift_sat_narrow (void)
16154 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16155 or unsigned. If operands are unsigned, results must also be unsigned. */
16156 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
16157 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
16158 int imm
= inst
.operands
[2].imm
;
16159 /* This gets the bounds check, size encoding and immediate bits calculation
16163 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
16164 VQMOVN.I<size> <Dd>, <Qm>. */
16167 inst
.operands
[2].present
= 0;
16168 inst
.instruction
= N_MNEM_vqmovn
;
16173 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16174 _("immediate out of range"));
16175 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
16179 do_neon_rshift_sat_narrow_u (void)
16181 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16182 or unsigned. If operands are unsigned, results must also be unsigned. */
16183 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
16184 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
16185 int imm
= inst
.operands
[2].imm
;
16186 /* This gets the bounds check, size encoding and immediate bits calculation
16190 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
16191 VQMOVUN.I<size> <Dd>, <Qm>. */
16194 inst
.operands
[2].present
= 0;
16195 inst
.instruction
= N_MNEM_vqmovun
;
16200 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16201 _("immediate out of range"));
16202 /* FIXME: The manual is kind of unclear about what value U should have in
16203 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
16205 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
16209 do_neon_movn (void)
16211 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
16212 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
16213 NEON_ENCODE (INTEGER
, inst
);
16214 neon_two_same (0, 1, et
.size
/ 2);
16218 do_neon_rshift_narrow (void)
16220 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
16221 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
16222 int imm
= inst
.operands
[2].imm
;
16223 /* This gets the bounds check, size encoding and immediate bits calculation
16227 /* If immediate is zero then we are a pseudo-instruction for
16228 VMOVN.I<size> <Dd>, <Qm> */
16231 inst
.operands
[2].present
= 0;
16232 inst
.instruction
= N_MNEM_vmovn
;
16237 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16238 _("immediate out of range for narrowing operation"));
16239 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
16243 do_neon_shll (void)
16245 /* FIXME: Type checking when lengthening. */
16246 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
16247 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
16248 unsigned imm
= inst
.operands
[2].imm
;
16250 if (imm
== et
.size
)
16252 /* Maximum shift variant. */
16253 NEON_ENCODE (INTEGER
, inst
);
16254 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16255 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16256 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16257 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16258 inst
.instruction
|= neon_logbits (et
.size
) << 18;
16260 neon_dp_fixup (&inst
);
16264 /* A more-specific type check for non-max versions. */
16265 et
= neon_check_type (2, NS_QDI
,
16266 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16267 NEON_ENCODE (IMMED
, inst
);
16268 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
16272 /* Check the various types for the VCVT instruction, and return which version
16273 the current instruction is. */
16275 #define CVT_FLAVOUR_VAR \
16276 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
16277 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
16278 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
16279 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
16280 /* Half-precision conversions. */ \
16281 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16282 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16283 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
16284 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
16285 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
16286 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
16287 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
16288 Compared with single/double precision variants, only the co-processor \
16289 field is different, so the encoding flow is reused here. */ \
16290 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
16291 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
16292 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
16293 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
16294 /* VFP instructions. */ \
16295 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
16296 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
16297 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
16298 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
16299 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
16300 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
16301 /* VFP instructions with bitshift. */ \
16302 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
16303 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
16304 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
16305 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
16306 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
16307 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
16308 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
16309 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
16311 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
16312 neon_cvt_flavour_##C,
16314 /* The different types of conversions we can do. */
16315 enum neon_cvt_flavour
16318 neon_cvt_flavour_invalid
,
16319 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
16324 static enum neon_cvt_flavour
16325 get_neon_cvt_flavour (enum neon_shape rs
)
16327 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
16328 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
16329 if (et.type != NT_invtype) \
16331 inst.error = NULL; \
16332 return (neon_cvt_flavour_##C); \
16335 struct neon_type_el et
;
16336 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
16337 || rs
== NS_FF
) ? N_VFP
: 0;
16338 /* The instruction versions which take an immediate take one register
16339 argument, which is extended to the width of the full register. Thus the
16340 "source" and "destination" registers must have the same width. Hack that
16341 here by making the size equal to the key (wider, in this case) operand. */
16342 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
16346 return neon_cvt_flavour_invalid
;
16361 /* Neon-syntax VFP conversions. */
16364 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
16366 const char *opname
= 0;
16368 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
16369 || rs
== NS_FHI
|| rs
== NS_HFI
)
16371 /* Conversions with immediate bitshift. */
16372 const char *enc
[] =
16374 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
16380 if (flavour
< (int) ARRAY_SIZE (enc
))
16382 opname
= enc
[flavour
];
16383 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
16384 _("operands 0 and 1 must be the same register"));
16385 inst
.operands
[1] = inst
.operands
[2];
16386 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
16391 /* Conversions without bitshift. */
16392 const char *enc
[] =
16394 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
16400 if (flavour
< (int) ARRAY_SIZE (enc
))
16401 opname
= enc
[flavour
];
16405 do_vfp_nsyn_opcode (opname
);
16407 /* ARMv8.2 fp16 VCVT instruction. */
16408 if (flavour
== neon_cvt_flavour_s32_f16
16409 || flavour
== neon_cvt_flavour_u32_f16
16410 || flavour
== neon_cvt_flavour_f16_u32
16411 || flavour
== neon_cvt_flavour_f16_s32
)
16412 do_scalar_fp16_v82_encode ();
16416 do_vfp_nsyn_cvtz (void)
16418 enum neon_shape rs
= neon_select_shape (NS_FH
, NS_FF
, NS_FD
, NS_NULL
);
16419 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
16420 const char *enc
[] =
16422 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
16428 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
16429 do_vfp_nsyn_opcode (enc
[flavour
]);
16433 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
16434 enum neon_cvt_mode mode
)
16439 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16440 D register operands. */
16441 if (flavour
== neon_cvt_flavour_s32_f64
16442 || flavour
== neon_cvt_flavour_u32_f64
)
16443 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16446 if (flavour
== neon_cvt_flavour_s32_f16
16447 || flavour
== neon_cvt_flavour_u32_f16
)
16448 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
16451 set_pred_insn_type (OUTSIDE_PRED_INSN
);
16455 case neon_cvt_flavour_s32_f64
:
16459 case neon_cvt_flavour_s32_f32
:
16463 case neon_cvt_flavour_s32_f16
:
16467 case neon_cvt_flavour_u32_f64
:
16471 case neon_cvt_flavour_u32_f32
:
16475 case neon_cvt_flavour_u32_f16
:
16480 first_error (_("invalid instruction shape"));
16486 case neon_cvt_mode_a
: rm
= 0; break;
16487 case neon_cvt_mode_n
: rm
= 1; break;
16488 case neon_cvt_mode_p
: rm
= 2; break;
16489 case neon_cvt_mode_m
: rm
= 3; break;
16490 default: first_error (_("invalid rounding mode")); return;
16493 NEON_ENCODE (FPV8
, inst
);
16494 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
16495 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
16496 inst
.instruction
|= sz
<< 8;
16498 /* ARMv8.2 fp16 VCVT instruction. */
16499 if (flavour
== neon_cvt_flavour_s32_f16
16500 ||flavour
== neon_cvt_flavour_u32_f16
)
16501 do_scalar_fp16_v82_encode ();
16502 inst
.instruction
|= op
<< 7;
16503 inst
.instruction
|= rm
<< 16;
16504 inst
.instruction
|= 0xf0000000;
16505 inst
.is_neon
= TRUE
;
16509 do_neon_cvt_1 (enum neon_cvt_mode mode
)
16511 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
16512 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
,
16513 NS_FH
, NS_HF
, NS_FHI
, NS_HFI
,
16515 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
16517 if (flavour
== neon_cvt_flavour_invalid
)
16520 /* PR11109: Handle round-to-zero for VCVT conversions. */
16521 if (mode
== neon_cvt_mode_z
16522 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
16523 && (flavour
== neon_cvt_flavour_s16_f16
16524 || flavour
== neon_cvt_flavour_u16_f16
16525 || flavour
== neon_cvt_flavour_s32_f32
16526 || flavour
== neon_cvt_flavour_u32_f32
16527 || flavour
== neon_cvt_flavour_s32_f64
16528 || flavour
== neon_cvt_flavour_u32_f64
)
16529 && (rs
== NS_FD
|| rs
== NS_FF
))
16531 do_vfp_nsyn_cvtz ();
16535 /* ARMv8.2 fp16 VCVT conversions. */
16536 if (mode
== neon_cvt_mode_z
16537 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
)
16538 && (flavour
== neon_cvt_flavour_s32_f16
16539 || flavour
== neon_cvt_flavour_u32_f16
)
16542 do_vfp_nsyn_cvtz ();
16543 do_scalar_fp16_v82_encode ();
16547 /* VFP rather than Neon conversions. */
16548 if (flavour
>= neon_cvt_flavour_first_fp
)
16550 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
16551 do_vfp_nsyn_cvt (rs
, flavour
);
16553 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
16564 unsigned enctab
[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
16565 0x0000100, 0x1000100, 0x0, 0x1000000};
16567 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16570 /* Fixed-point conversion with #0 immediate is encoded as an
16571 integer conversion. */
16572 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
16574 NEON_ENCODE (IMMED
, inst
);
16575 if (flavour
!= neon_cvt_flavour_invalid
)
16576 inst
.instruction
|= enctab
[flavour
];
16577 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16578 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16579 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16580 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16581 inst
.instruction
|= neon_quad (rs
) << 6;
16582 inst
.instruction
|= 1 << 21;
16583 if (flavour
< neon_cvt_flavour_s16_f16
)
16585 inst
.instruction
|= 1 << 21;
16586 immbits
= 32 - inst
.operands
[2].imm
;
16587 inst
.instruction
|= immbits
<< 16;
16591 inst
.instruction
|= 3 << 20;
16592 immbits
= 16 - inst
.operands
[2].imm
;
16593 inst
.instruction
|= immbits
<< 16;
16594 inst
.instruction
&= ~(1 << 9);
16597 neon_dp_fixup (&inst
);
16603 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
16605 NEON_ENCODE (FLOAT
, inst
);
16606 set_pred_insn_type (OUTSIDE_PRED_INSN
);
16608 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16611 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16612 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16613 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16614 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16615 inst
.instruction
|= neon_quad (rs
) << 6;
16616 inst
.instruction
|= (flavour
== neon_cvt_flavour_u16_f16
16617 || flavour
== neon_cvt_flavour_u32_f32
) << 7;
16618 inst
.instruction
|= mode
<< 8;
16619 if (flavour
== neon_cvt_flavour_u16_f16
16620 || flavour
== neon_cvt_flavour_s16_f16
)
16621 /* Mask off the original size bits and reencode them. */
16622 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff) | (1 << 18));
16625 inst
.instruction
|= 0xfc000000;
16627 inst
.instruction
|= 0xf0000000;
16633 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080,
16634 0x100, 0x180, 0x0, 0x080};
16636 NEON_ENCODE (INTEGER
, inst
);
16638 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16641 if (flavour
!= neon_cvt_flavour_invalid
)
16642 inst
.instruction
|= enctab
[flavour
];
16644 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16645 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16646 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16647 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16648 inst
.instruction
|= neon_quad (rs
) << 6;
16649 if (flavour
>= neon_cvt_flavour_s16_f16
16650 && flavour
<= neon_cvt_flavour_f16_u16
)
16651 /* Half precision. */
16652 inst
.instruction
|= 1 << 18;
16654 inst
.instruction
|= 2 << 18;
16656 neon_dp_fixup (&inst
);
16661 /* Half-precision conversions for Advanced SIMD -- neon. */
16664 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16668 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
16670 as_bad (_("operand size must match register width"));
16675 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
16677 as_bad (_("operand size must match register width"));
16682 inst
.instruction
= 0x3b60600;
16684 inst
.instruction
= 0x3b60700;
16686 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16687 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16688 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16689 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16690 neon_dp_fixup (&inst
);
16694 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
16695 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
16696 do_vfp_nsyn_cvt (rs
, flavour
);
16698 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
16703 do_neon_cvtr (void)
16705 do_neon_cvt_1 (neon_cvt_mode_x
);
16711 do_neon_cvt_1 (neon_cvt_mode_z
);
16715 do_neon_cvta (void)
16717 do_neon_cvt_1 (neon_cvt_mode_a
);
16721 do_neon_cvtn (void)
16723 do_neon_cvt_1 (neon_cvt_mode_n
);
16727 do_neon_cvtp (void)
16729 do_neon_cvt_1 (neon_cvt_mode_p
);
16733 do_neon_cvtm (void)
16735 do_neon_cvt_1 (neon_cvt_mode_m
);
16739 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
16742 mark_feature_used (&fpu_vfp_ext_armv8
);
16744 encode_arm_vfp_reg (inst
.operands
[0].reg
,
16745 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
16746 encode_arm_vfp_reg (inst
.operands
[1].reg
,
16747 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
16748 inst
.instruction
|= to
? 0x10000 : 0;
16749 inst
.instruction
|= t
? 0x80 : 0;
16750 inst
.instruction
|= is_double
? 0x100 : 0;
16751 do_vfp_cond_or_thumb ();
16755 do_neon_cvttb_1 (bfd_boolean t
)
16757 enum neon_shape rs
= neon_select_shape (NS_HF
, NS_HD
, NS_FH
, NS_FF
, NS_FD
,
16758 NS_DF
, NS_DH
, NS_NULL
);
16762 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
16765 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
16767 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
16770 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
16772 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
16774 /* The VCVTB and VCVTT instructions with D-register operands
16775 don't work for SP only targets. */
16776 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16780 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
16782 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
16784 /* The VCVTB and VCVTT instructions with D-register operands
16785 don't work for SP only targets. */
16786 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16790 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
16797 do_neon_cvtb (void)
16799 do_neon_cvttb_1 (FALSE
);
16804 do_neon_cvtt (void)
16806 do_neon_cvttb_1 (TRUE
);
16810 neon_move_immediate (void)
16812 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
16813 struct neon_type_el et
= neon_check_type (2, rs
,
16814 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
16815 unsigned immlo
, immhi
= 0, immbits
;
16816 int op
, cmode
, float_p
;
16818 constraint (et
.type
== NT_invtype
,
16819 _("operand size must be specified for immediate VMOV"));
16821 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16822 op
= (inst
.instruction
& (1 << 5)) != 0;
16824 immlo
= inst
.operands
[1].imm
;
16825 if (inst
.operands
[1].regisimm
)
16826 immhi
= inst
.operands
[1].reg
;
16828 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
16829 _("immediate has bits set outside the operand size"));
16831 float_p
= inst
.operands
[1].immisfloat
;
16833 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
16834 et
.size
, et
.type
)) == FAIL
)
16836 /* Invert relevant bits only. */
16837 neon_invert_size (&immlo
, &immhi
, et
.size
);
16838 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16839 with one or the other; those cases are caught by
16840 neon_cmode_for_move_imm. */
16842 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
16843 &op
, et
.size
, et
.type
)) == FAIL
)
16845 first_error (_("immediate out of range"));
16850 inst
.instruction
&= ~(1 << 5);
16851 inst
.instruction
|= op
<< 5;
16853 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16854 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16855 inst
.instruction
|= neon_quad (rs
) << 6;
16856 inst
.instruction
|= cmode
<< 8;
16858 neon_write_immbits (immbits
);
16864 if (inst
.operands
[1].isreg
)
16866 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16868 NEON_ENCODE (INTEGER
, inst
);
16869 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16870 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16871 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16872 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16873 inst
.instruction
|= neon_quad (rs
) << 6;
16877 NEON_ENCODE (IMMED
, inst
);
16878 neon_move_immediate ();
16881 neon_dp_fixup (&inst
);
16884 /* Encode instructions of form:
16886 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16887 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16890 neon_mixed_length (struct neon_type_el et
, unsigned size
)
16892 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16893 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16894 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16895 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16896 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16897 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16898 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
16899 inst
.instruction
|= neon_logbits (size
) << 20;
16901 neon_dp_fixup (&inst
);
16905 do_neon_dyadic_long (void)
16907 enum neon_shape rs
= neon_select_shape (NS_QDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
16910 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH
| NEON_CHECK_CC
) == FAIL
)
16913 NEON_ENCODE (INTEGER
, inst
);
16914 /* FIXME: Type checking for lengthening op. */
16915 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16916 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16917 neon_mixed_length (et
, et
.size
);
16919 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
16920 && (inst
.cond
== 0xf || inst
.cond
== 0x10))
16922 /* If parsing for MVE, vaddl/vsubl/vabdl{e,t} can only be vadd/vsub/vabd
16923 in an IT block with le/lt conditions. */
16925 if (inst
.cond
== 0xf)
16927 else if (inst
.cond
== 0x10)
16930 inst
.pred_insn_type
= INSIDE_IT_INSN
;
16932 if (inst
.instruction
== N_MNEM_vaddl
)
16934 inst
.instruction
= N_MNEM_vadd
;
16935 do_neon_addsub_if_i ();
16937 else if (inst
.instruction
== N_MNEM_vsubl
)
16939 inst
.instruction
= N_MNEM_vsub
;
16940 do_neon_addsub_if_i ();
16942 else if (inst
.instruction
== N_MNEM_vabdl
)
16944 inst
.instruction
= N_MNEM_vabd
;
16945 do_neon_dyadic_if_su ();
16949 first_error (BAD_FPU
);
16953 do_neon_abal (void)
16955 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16956 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16957 neon_mixed_length (et
, et
.size
);
16961 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
16963 if (inst
.operands
[2].isscalar
)
16965 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
16966 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
16967 NEON_ENCODE (SCALAR
, inst
);
16968 neon_mul_mac (et
, et
.type
== NT_unsigned
);
16972 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16973 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
16974 NEON_ENCODE (INTEGER
, inst
);
16975 neon_mixed_length (et
, et
.size
);
16980 do_neon_mac_maybe_scalar_long (void)
16982 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
16985 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
16986 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
16989 neon_scalar_for_fmac_fp16_long (unsigned scalar
, unsigned quad_p
)
16991 unsigned regno
= NEON_SCALAR_REG (scalar
);
16992 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
16996 if (regno
> 7 || elno
> 3)
16999 return ((regno
& 0x7)
17000 | ((elno
& 0x1) << 3)
17001 | (((elno
>> 1) & 0x1) << 5));
17005 if (regno
> 15 || elno
> 1)
17008 return (((regno
& 0x1) << 5)
17009 | ((regno
>> 1) & 0x7)
17010 | ((elno
& 0x1) << 3));
17014 first_error (_("scalar out of range for multiply instruction"));
17019 do_neon_fmac_maybe_scalar_long (int subtype
)
17021 enum neon_shape rs
;
17023 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
17024 field (bits[21:20]) has different meaning. For scalar index variant, it's
17025 used to differentiate add and subtract, otherwise it's with fixed value
17029 if (inst
.cond
!= COND_ALWAYS
)
17030 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
17031 "behaviour is UNPREDICTABLE"));
17033 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16_fml
),
17036 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
17039 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
17040 be a scalar index register. */
17041 if (inst
.operands
[2].isscalar
)
17043 high8
= 0xfe000000;
17046 rs
= neon_select_shape (NS_DHS
, NS_QDS
, NS_NULL
);
17050 high8
= 0xfc000000;
17053 inst
.instruction
|= (0x1 << 23);
17054 rs
= neon_select_shape (NS_DHH
, NS_QDD
, NS_NULL
);
17057 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_F16
);
17059 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
17060 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
17061 so we simply pass -1 as size. */
17062 unsigned quad_p
= (rs
== NS_QDD
|| rs
== NS_QDS
);
17063 neon_three_same (quad_p
, 0, size
);
17065 /* Undo neon_dp_fixup. Redo the high eight bits. */
17066 inst
.instruction
&= 0x00ffffff;
17067 inst
.instruction
|= high8
;
17069 #define LOW1(R) ((R) & 0x1)
17070 #define HI4(R) (((R) >> 1) & 0xf)
17071 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
17072 whether the instruction is in Q form and whether Vm is a scalar indexed
17074 if (inst
.operands
[2].isscalar
)
17077 = neon_scalar_for_fmac_fp16_long (inst
.operands
[2].reg
, quad_p
);
17078 inst
.instruction
&= 0xffffffd0;
17079 inst
.instruction
|= rm
;
17083 /* Redo Rn as well. */
17084 inst
.instruction
&= 0xfff0ff7f;
17085 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
17086 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
17091 /* Redo Rn and Rm. */
17092 inst
.instruction
&= 0xfff0ff50;
17093 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
17094 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
17095 inst
.instruction
|= HI4 (inst
.operands
[2].reg
);
17096 inst
.instruction
|= LOW1 (inst
.operands
[2].reg
) << 5;
17101 do_neon_vfmal (void)
17103 return do_neon_fmac_maybe_scalar_long (0);
17107 do_neon_vfmsl (void)
17109 return do_neon_fmac_maybe_scalar_long (1);
17113 do_neon_dyadic_wide (void)
17115 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
17116 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
17117 neon_mixed_length (et
, et
.size
);
17121 do_neon_dyadic_narrow (void)
17123 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17124 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
17125 /* Operand sign is unimportant, and the U bit is part of the opcode,
17126 so force the operand type to integer. */
17127 et
.type
= NT_integer
;
17128 neon_mixed_length (et
, et
.size
/ 2);
17132 do_neon_mul_sat_scalar_long (void)
17134 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
17138 do_neon_vmull (void)
17140 if (inst
.operands
[2].isscalar
)
17141 do_neon_mac_maybe_scalar_long ();
17144 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17145 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
17147 if (et
.type
== NT_poly
)
17148 NEON_ENCODE (POLY
, inst
);
17150 NEON_ENCODE (INTEGER
, inst
);
17152 /* For polynomial encoding the U bit must be zero, and the size must
17153 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
17154 obviously, as 0b10). */
17157 /* Check we're on the correct architecture. */
17158 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
17160 _("Instruction form not available on this architecture.");
17165 neon_mixed_length (et
, et
.size
);
17172 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
17173 struct neon_type_el et
= neon_check_type (3, rs
,
17174 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
17175 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
17177 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
17178 _("shift out of range"));
17179 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17180 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17181 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17182 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17183 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17184 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17185 inst
.instruction
|= neon_quad (rs
) << 6;
17186 inst
.instruction
|= imm
<< 8;
17188 neon_dp_fixup (&inst
);
17194 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17195 struct neon_type_el et
= neon_check_type (2, rs
,
17196 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17197 unsigned op
= (inst
.instruction
>> 7) & 3;
17198 /* N (width of reversed regions) is encoded as part of the bitmask. We
17199 extract it here to check the elements to be reversed are smaller.
17200 Otherwise we'd get a reserved instruction. */
17201 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
17202 gas_assert (elsize
!= 0);
17203 constraint (et
.size
>= elsize
,
17204 _("elements must be smaller than reversal region"));
17205 neon_two_same (neon_quad (rs
), 1, et
.size
);
17211 if (inst
.operands
[1].isscalar
)
17213 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
17214 struct neon_type_el et
= neon_check_type (2, rs
,
17215 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17216 unsigned sizebits
= et
.size
>> 3;
17217 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
17218 int logsize
= neon_logbits (et
.size
);
17219 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
17221 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
17224 NEON_ENCODE (SCALAR
, inst
);
17225 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17226 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17227 inst
.instruction
|= LOW4 (dm
);
17228 inst
.instruction
|= HI1 (dm
) << 5;
17229 inst
.instruction
|= neon_quad (rs
) << 6;
17230 inst
.instruction
|= x
<< 17;
17231 inst
.instruction
|= sizebits
<< 16;
17233 neon_dp_fixup (&inst
);
17237 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
17238 struct neon_type_el et
= neon_check_type (2, rs
,
17239 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
17240 /* Duplicate ARM register to lanes of vector. */
17241 NEON_ENCODE (ARMREG
, inst
);
17244 case 8: inst
.instruction
|= 0x400000; break;
17245 case 16: inst
.instruction
|= 0x000020; break;
17246 case 32: inst
.instruction
|= 0x000000; break;
17249 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
17250 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
17251 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
17252 inst
.instruction
|= neon_quad (rs
) << 21;
17253 /* The encoding for this instruction is identical for the ARM and Thumb
17254 variants, except for the condition field. */
17255 do_vfp_cond_or_thumb ();
17259 /* VMOV has particularly many variations. It can be one of:
17260 0. VMOV<c><q> <Qd>, <Qm>
17261 1. VMOV<c><q> <Dd>, <Dm>
17262 (Register operations, which are VORR with Rm = Rn.)
17263 2. VMOV<c><q>.<dt> <Qd>, #<imm>
17264 3. VMOV<c><q>.<dt> <Dd>, #<imm>
17266 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
17267 (ARM register to scalar.)
17268 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
17269 (Two ARM registers to vector.)
17270 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
17271 (Scalar to ARM register.)
17272 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
17273 (Vector to two ARM registers.)
17274 8. VMOV.F32 <Sd>, <Sm>
17275 9. VMOV.F64 <Dd>, <Dm>
17276 (VFP register moves.)
17277 10. VMOV.F32 <Sd>, #imm
17278 11. VMOV.F64 <Dd>, #imm
17279 (VFP float immediate load.)
17280 12. VMOV <Rd>, <Sm>
17281 (VFP single to ARM reg.)
17282 13. VMOV <Sd>, <Rm>
17283 (ARM reg to VFP single.)
17284 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
17285 (Two ARM regs to two VFP singles.)
17286 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
17287 (Two VFP singles to two ARM regs.)
17289 These cases can be disambiguated using neon_select_shape, except cases 1/9
17290 and 3/11 which depend on the operand type too.
17292 All the encoded bits are hardcoded by this function.
17294 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
17295 Cases 5, 7 may be used with VFPv2 and above.
17297 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
17298 can specify a type where it doesn't make sense to, and is ignored). */
17303 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
17304 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
,
17305 NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
17306 NS_HR
, NS_RH
, NS_HI
, NS_NULL
);
17307 struct neon_type_el et
;
17308 const char *ldconst
= 0;
17312 case NS_DD
: /* case 1/9. */
17313 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
17314 /* It is not an error here if no type is given. */
17316 if (et
.type
== NT_float
&& et
.size
== 64)
17318 do_vfp_nsyn_opcode ("fcpyd");
17321 /* fall through. */
17323 case NS_QQ
: /* case 0/1. */
17325 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
17327 /* The architecture manual I have doesn't explicitly state which
17328 value the U bit should have for register->register moves, but
17329 the equivalent VORR instruction has U = 0, so do that. */
17330 inst
.instruction
= 0x0200110;
17331 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17332 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17333 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17334 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17335 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17336 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17337 inst
.instruction
|= neon_quad (rs
) << 6;
17339 neon_dp_fixup (&inst
);
17343 case NS_DI
: /* case 3/11. */
17344 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
17346 if (et
.type
== NT_float
&& et
.size
== 64)
17348 /* case 11 (fconstd). */
17349 ldconst
= "fconstd";
17350 goto encode_fconstd
;
17352 /* fall through. */
17354 case NS_QI
: /* case 2/3. */
17355 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
17357 inst
.instruction
= 0x0800010;
17358 neon_move_immediate ();
17359 neon_dp_fixup (&inst
);
17362 case NS_SR
: /* case 4. */
17364 unsigned bcdebits
= 0;
17366 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
17367 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
17369 /* .<size> is optional here, defaulting to .32. */
17370 if (inst
.vectype
.elems
== 0
17371 && inst
.operands
[0].vectype
.type
== NT_invtype
17372 && inst
.operands
[1].vectype
.type
== NT_invtype
)
17374 inst
.vectype
.el
[0].type
= NT_untyped
;
17375 inst
.vectype
.el
[0].size
= 32;
17376 inst
.vectype
.elems
= 1;
17379 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
17380 logsize
= neon_logbits (et
.size
);
17382 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
17384 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
17385 && et
.size
!= 32, _(BAD_FPU
));
17386 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
17387 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
17391 case 8: bcdebits
= 0x8; break;
17392 case 16: bcdebits
= 0x1; break;
17393 case 32: bcdebits
= 0x0; break;
17397 bcdebits
|= x
<< logsize
;
17399 inst
.instruction
= 0xe000b10;
17400 do_vfp_cond_or_thumb ();
17401 inst
.instruction
|= LOW4 (dn
) << 16;
17402 inst
.instruction
|= HI1 (dn
) << 7;
17403 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
17404 inst
.instruction
|= (bcdebits
& 3) << 5;
17405 inst
.instruction
|= (bcdebits
>> 2) << 21;
17409 case NS_DRR
: /* case 5 (fmdrr). */
17410 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
17413 inst
.instruction
= 0xc400b10;
17414 do_vfp_cond_or_thumb ();
17415 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
17416 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
17417 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
17418 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
17421 case NS_RS
: /* case 6. */
17424 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
17425 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
17426 unsigned abcdebits
= 0;
17428 /* .<dt> is optional here, defaulting to .32. */
17429 if (inst
.vectype
.elems
== 0
17430 && inst
.operands
[0].vectype
.type
== NT_invtype
17431 && inst
.operands
[1].vectype
.type
== NT_invtype
)
17433 inst
.vectype
.el
[0].type
= NT_untyped
;
17434 inst
.vectype
.el
[0].size
= 32;
17435 inst
.vectype
.elems
= 1;
17438 et
= neon_check_type (2, NS_NULL
,
17439 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
17440 logsize
= neon_logbits (et
.size
);
17442 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
17444 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
17445 && et
.size
!= 32, _(BAD_FPU
));
17446 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
17447 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
17451 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
17452 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
17453 case 32: abcdebits
= 0x00; break;
17457 abcdebits
|= x
<< logsize
;
17458 inst
.instruction
= 0xe100b10;
17459 do_vfp_cond_or_thumb ();
17460 inst
.instruction
|= LOW4 (dn
) << 16;
17461 inst
.instruction
|= HI1 (dn
) << 7;
17462 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
17463 inst
.instruction
|= (abcdebits
& 3) << 5;
17464 inst
.instruction
|= (abcdebits
>> 2) << 21;
17468 case NS_RRD
: /* case 7 (fmrrd). */
17469 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
17472 inst
.instruction
= 0xc500b10;
17473 do_vfp_cond_or_thumb ();
17474 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
17475 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17476 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17477 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17480 case NS_FF
: /* case 8 (fcpys). */
17481 do_vfp_nsyn_opcode ("fcpys");
17485 case NS_FI
: /* case 10 (fconsts). */
17486 ldconst
= "fconsts";
17488 if (!inst
.operands
[1].immisfloat
)
17491 /* Immediate has to fit in 8 bits so float is enough. */
17492 float imm
= (float) inst
.operands
[1].imm
;
17493 memcpy (&new_imm
, &imm
, sizeof (float));
17494 /* But the assembly may have been written to provide an integer
17495 bit pattern that equates to a float, so check that the
17496 conversion has worked. */
17497 if (is_quarter_float (new_imm
))
17499 if (is_quarter_float (inst
.operands
[1].imm
))
17500 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
17502 inst
.operands
[1].imm
= new_imm
;
17503 inst
.operands
[1].immisfloat
= 1;
17507 if (is_quarter_float (inst
.operands
[1].imm
))
17509 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
17510 do_vfp_nsyn_opcode (ldconst
);
17512 /* ARMv8.2 fp16 vmov.f16 instruction. */
17514 do_scalar_fp16_v82_encode ();
17517 first_error (_("immediate out of range"));
17521 case NS_RF
: /* case 12 (fmrs). */
17522 do_vfp_nsyn_opcode ("fmrs");
17523 /* ARMv8.2 fp16 vmov.f16 instruction. */
17525 do_scalar_fp16_v82_encode ();
17529 case NS_FR
: /* case 13 (fmsr). */
17530 do_vfp_nsyn_opcode ("fmsr");
17531 /* ARMv8.2 fp16 vmov.f16 instruction. */
17533 do_scalar_fp16_v82_encode ();
17536 /* The encoders for the fmrrs and fmsrr instructions expect three operands
17537 (one of which is a list), but we have parsed four. Do some fiddling to
17538 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
17540 case NS_RRFF
: /* case 14 (fmrrs). */
17541 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
17542 _("VFP registers must be adjacent"));
17543 inst
.operands
[2].imm
= 2;
17544 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
17545 do_vfp_nsyn_opcode ("fmrrs");
17548 case NS_FFRR
: /* case 15 (fmsrr). */
17549 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
17550 _("VFP registers must be adjacent"));
17551 inst
.operands
[1] = inst
.operands
[2];
17552 inst
.operands
[2] = inst
.operands
[3];
17553 inst
.operands
[0].imm
= 2;
17554 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
17555 do_vfp_nsyn_opcode ("fmsrr");
17559 /* neon_select_shape has determined that the instruction
17560 shape is wrong and has already set the error message. */
17569 do_neon_rshift_round_imm (void)
17571 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
17572 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
17573 int imm
= inst
.operands
[2].imm
;
17575 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
17578 inst
.operands
[2].present
= 0;
17583 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
17584 _("immediate out of range for shift"));
17585 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
17590 do_neon_movhf (void)
17592 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_NULL
);
17593 constraint (rs
!= NS_HH
, _("invalid suffix"));
17595 if (inst
.cond
!= COND_ALWAYS
)
17599 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
17600 " the behaviour is UNPREDICTABLE"));
17604 inst
.error
= BAD_COND
;
17609 do_vfp_sp_monadic ();
17612 inst
.instruction
|= 0xf0000000;
17616 do_neon_movl (void)
17618 struct neon_type_el et
= neon_check_type (2, NS_QD
,
17619 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
17620 unsigned sizebits
= et
.size
>> 3;
17621 inst
.instruction
|= sizebits
<< 19;
17622 neon_two_same (0, et
.type
== NT_unsigned
, -1);
17628 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17629 struct neon_type_el et
= neon_check_type (2, rs
,
17630 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17631 NEON_ENCODE (INTEGER
, inst
);
17632 neon_two_same (neon_quad (rs
), 1, et
.size
);
17636 do_neon_zip_uzp (void)
17638 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17639 struct neon_type_el et
= neon_check_type (2, rs
,
17640 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17641 if (rs
== NS_DD
&& et
.size
== 32)
17643 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
17644 inst
.instruction
= N_MNEM_vtrn
;
17648 neon_two_same (neon_quad (rs
), 1, et
.size
);
17652 do_neon_sat_abs_neg (void)
17654 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17655 struct neon_type_el et
= neon_check_type (2, rs
,
17656 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
17657 neon_two_same (neon_quad (rs
), 1, et
.size
);
17661 do_neon_pair_long (void)
17663 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17664 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
17665 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
17666 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
17667 neon_two_same (neon_quad (rs
), 1, et
.size
);
17671 do_neon_recip_est (void)
17673 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17674 struct neon_type_el et
= neon_check_type (2, rs
,
17675 N_EQK
| N_FLT
, N_F_16_32
| N_U32
| N_KEY
);
17676 inst
.instruction
|= (et
.type
== NT_float
) << 8;
17677 neon_two_same (neon_quad (rs
), 1, et
.size
);
17683 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17684 struct neon_type_el et
= neon_check_type (2, rs
,
17685 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
17686 neon_two_same (neon_quad (rs
), 1, et
.size
);
17692 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17693 struct neon_type_el et
= neon_check_type (2, rs
,
17694 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
17695 neon_two_same (neon_quad (rs
), 1, et
.size
);
17701 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17702 struct neon_type_el et
= neon_check_type (2, rs
,
17703 N_EQK
| N_INT
, N_8
| N_KEY
);
17704 neon_two_same (neon_quad (rs
), 1, et
.size
);
17710 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17711 neon_two_same (neon_quad (rs
), 1, -1);
17715 do_neon_tbl_tbx (void)
17717 unsigned listlenbits
;
17718 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
17720 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
17722 first_error (_("bad list length for table lookup"));
17726 listlenbits
= inst
.operands
[1].imm
- 1;
17727 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17728 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17729 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17730 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17731 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17732 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17733 inst
.instruction
|= listlenbits
<< 8;
17735 neon_dp_fixup (&inst
);
17739 do_neon_ldm_stm (void)
17741 /* P, U and L bits are part of bitmask. */
17742 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
17743 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
17745 if (inst
.operands
[1].issingle
)
17747 do_vfp_nsyn_ldm_stm (is_dbmode
);
17751 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
17752 _("writeback (!) must be used for VLDMDB and VSTMDB"));
17754 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
17755 _("register list must contain at least 1 and at most 16 "
17758 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
17759 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
17760 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
17761 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
17763 inst
.instruction
|= offsetbits
;
17765 do_vfp_cond_or_thumb ();
17769 do_neon_ldr_str (void)
17771 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
17773 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
17774 And is UNPREDICTABLE in thumb mode. */
17776 && inst
.operands
[1].reg
== REG_PC
17777 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
17780 inst
.error
= _("Use of PC here is UNPREDICTABLE");
17781 else if (warn_on_deprecated
)
17782 as_tsktsk (_("Use of PC here is deprecated"));
17785 if (inst
.operands
[0].issingle
)
17788 do_vfp_nsyn_opcode ("flds");
17790 do_vfp_nsyn_opcode ("fsts");
17792 /* ARMv8.2 vldr.16/vstr.16 instruction. */
17793 if (inst
.vectype
.el
[0].size
== 16)
17794 do_scalar_fp16_v82_encode ();
17799 do_vfp_nsyn_opcode ("fldd");
17801 do_vfp_nsyn_opcode ("fstd");
17806 do_t_vldr_vstr_sysreg (void)
17808 int fp_vldr_bitno
= 20, sysreg_vldr_bitno
= 20;
17809 bfd_boolean is_vldr
= ((inst
.instruction
& (1 << fp_vldr_bitno
)) != 0);
17811 /* Use of PC is UNPREDICTABLE. */
17812 if (inst
.operands
[1].reg
== REG_PC
)
17813 inst
.error
= _("Use of PC here is UNPREDICTABLE");
17815 if (inst
.operands
[1].immisreg
)
17816 inst
.error
= _("instruction does not accept register index");
17818 if (!inst
.operands
[1].isreg
)
17819 inst
.error
= _("instruction does not accept PC-relative addressing");
17821 if (abs (inst
.operands
[1].imm
) >= (1 << 7))
17822 inst
.error
= _("immediate value out of range");
17824 inst
.instruction
= 0xec000f80;
17826 inst
.instruction
|= 1 << sysreg_vldr_bitno
;
17827 encode_arm_cp_address (1, TRUE
, FALSE
, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
);
17828 inst
.instruction
|= (inst
.operands
[0].imm
& 0x7) << 13;
17829 inst
.instruction
|= (inst
.operands
[0].imm
& 0x8) << 19;
17833 do_vldr_vstr (void)
17835 bfd_boolean sysreg_op
= !inst
.operands
[0].isreg
;
17837 /* VLDR/VSTR (System Register). */
17840 if (!mark_feature_used (&arm_ext_v8_1m_main
))
17841 as_bad (_("Instruction not permitted on this architecture"));
17843 do_t_vldr_vstr_sysreg ();
17848 if (!mark_feature_used (&fpu_vfp_ext_v1xd
))
17849 as_bad (_("Instruction not permitted on this architecture"));
17850 do_neon_ldr_str ();
17854 /* "interleave" version also handles non-interleaving register VLD1/VST1
17858 do_neon_ld_st_interleave (void)
17860 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
17861 N_8
| N_16
| N_32
| N_64
);
17862 unsigned alignbits
= 0;
17864 /* The bits in this table go:
17865 0: register stride of one (0) or two (1)
17866 1,2: register list length, minus one (1, 2, 3, 4).
17867 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
17868 We use -1 for invalid entries. */
17869 const int typetable
[] =
17871 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
17872 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
17873 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
17874 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
17878 if (et
.type
== NT_invtype
)
17881 if (inst
.operands
[1].immisalign
)
17882 switch (inst
.operands
[1].imm
>> 8)
17884 case 64: alignbits
= 1; break;
17886 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
17887 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
17888 goto bad_alignment
;
17892 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
17893 goto bad_alignment
;
17898 first_error (_("bad alignment"));
17902 inst
.instruction
|= alignbits
<< 4;
17903 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17905 /* Bits [4:6] of the immediate in a list specifier encode register stride
17906 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
17907 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
17908 up the right value for "type" in a table based on this value and the given
17909 list style, then stick it back. */
17910 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
17911 | (((inst
.instruction
>> 8) & 3) << 3);
17913 typebits
= typetable
[idx
];
17915 constraint (typebits
== -1, _("bad list type for instruction"));
17916 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
17917 _("bad element type for instruction"));
17919 inst
.instruction
&= ~0xf00;
17920 inst
.instruction
|= typebits
<< 8;
17923 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
17924 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
17925 otherwise. The variable arguments are a list of pairs of legal (size, align)
17926 values, terminated with -1. */
17929 neon_alignment_bit (int size
, int align
, int *do_alignment
, ...)
17932 int result
= FAIL
, thissize
, thisalign
;
17934 if (!inst
.operands
[1].immisalign
)
17940 va_start (ap
, do_alignment
);
17944 thissize
= va_arg (ap
, int);
17945 if (thissize
== -1)
17947 thisalign
= va_arg (ap
, int);
17949 if (size
== thissize
&& align
== thisalign
)
17952 while (result
!= SUCCESS
);
17956 if (result
== SUCCESS
)
17959 first_error (_("unsupported alignment for instruction"));
17965 do_neon_ld_st_lane (void)
17967 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
17968 int align_good
, do_alignment
= 0;
17969 int logsize
= neon_logbits (et
.size
);
17970 int align
= inst
.operands
[1].imm
>> 8;
17971 int n
= (inst
.instruction
>> 8) & 3;
17972 int max_el
= 64 / et
.size
;
17974 if (et
.type
== NT_invtype
)
17977 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
17978 _("bad list length"));
17979 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
17980 _("scalar index out of range"));
17981 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
17983 _("stride of 2 unavailable when element size is 8"));
17987 case 0: /* VLD1 / VST1. */
17988 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 16, 16,
17990 if (align_good
== FAIL
)
17994 unsigned alignbits
= 0;
17997 case 16: alignbits
= 0x1; break;
17998 case 32: alignbits
= 0x3; break;
18001 inst
.instruction
|= alignbits
<< 4;
18005 case 1: /* VLD2 / VST2. */
18006 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 16,
18007 16, 32, 32, 64, -1);
18008 if (align_good
== FAIL
)
18011 inst
.instruction
|= 1 << 4;
18014 case 2: /* VLD3 / VST3. */
18015 constraint (inst
.operands
[1].immisalign
,
18016 _("can't use alignment with this instruction"));
18019 case 3: /* VLD4 / VST4. */
18020 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
18021 16, 64, 32, 64, 32, 128, -1);
18022 if (align_good
== FAIL
)
18026 unsigned alignbits
= 0;
18029 case 8: alignbits
= 0x1; break;
18030 case 16: alignbits
= 0x1; break;
18031 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
18034 inst
.instruction
|= alignbits
<< 4;
18041 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
18042 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18043 inst
.instruction
|= 1 << (4 + logsize
);
18045 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
18046 inst
.instruction
|= logsize
<< 10;
18049 /* Encode single n-element structure to all lanes VLD<n> instructions. */
18052 do_neon_ld_dup (void)
18054 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
18055 int align_good
, do_alignment
= 0;
18057 if (et
.type
== NT_invtype
)
18060 switch ((inst
.instruction
>> 8) & 3)
18062 case 0: /* VLD1. */
18063 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
18064 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
18065 &do_alignment
, 16, 16, 32, 32, -1);
18066 if (align_good
== FAIL
)
18068 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
18071 case 2: inst
.instruction
|= 1 << 5; break;
18072 default: first_error (_("bad list length")); return;
18074 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18077 case 1: /* VLD2. */
18078 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
18079 &do_alignment
, 8, 16, 16, 32, 32, 64,
18081 if (align_good
== FAIL
)
18083 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
18084 _("bad list length"));
18085 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18086 inst
.instruction
|= 1 << 5;
18087 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18090 case 2: /* VLD3. */
18091 constraint (inst
.operands
[1].immisalign
,
18092 _("can't use alignment with this instruction"));
18093 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
18094 _("bad list length"));
18095 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18096 inst
.instruction
|= 1 << 5;
18097 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18100 case 3: /* VLD4. */
18102 int align
= inst
.operands
[1].imm
>> 8;
18103 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
18104 16, 64, 32, 64, 32, 128, -1);
18105 if (align_good
== FAIL
)
18107 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
18108 _("bad list length"));
18109 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18110 inst
.instruction
|= 1 << 5;
18111 if (et
.size
== 32 && align
== 128)
18112 inst
.instruction
|= 0x3 << 6;
18114 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18121 inst
.instruction
|= do_alignment
<< 4;
18124 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
18125 apart from bits [11:4]. */
18128 do_neon_ldx_stx (void)
18130 if (inst
.operands
[1].isreg
)
18131 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
18133 switch (NEON_LANE (inst
.operands
[0].imm
))
18135 case NEON_INTERLEAVE_LANES
:
18136 NEON_ENCODE (INTERLV
, inst
);
18137 do_neon_ld_st_interleave ();
18140 case NEON_ALL_LANES
:
18141 NEON_ENCODE (DUP
, inst
);
18142 if (inst
.instruction
== N_INV
)
18144 first_error ("only loads support such operands");
18151 NEON_ENCODE (LANE
, inst
);
18152 do_neon_ld_st_lane ();
18155 /* L bit comes from bit mask. */
18156 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18157 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18158 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
18160 if (inst
.operands
[1].postind
)
18162 int postreg
= inst
.operands
[1].imm
& 0xf;
18163 constraint (!inst
.operands
[1].immisreg
,
18164 _("post-index must be a register"));
18165 constraint (postreg
== 0xd || postreg
== 0xf,
18166 _("bad register for post-index"));
18167 inst
.instruction
|= postreg
;
18171 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
18172 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
18173 || inst
.relocs
[0].exp
.X_add_number
!= 0,
18176 if (inst
.operands
[1].writeback
)
18178 inst
.instruction
|= 0xd;
18181 inst
.instruction
|= 0xf;
18185 inst
.instruction
|= 0xf9000000;
18187 inst
.instruction
|= 0xf4000000;
18192 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
18194 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18195 D register operands. */
18196 if (neon_shape_class
[rs
] == SC_DOUBLE
)
18197 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
18200 NEON_ENCODE (FPV8
, inst
);
18202 if (rs
== NS_FFF
|| rs
== NS_HHH
)
18204 do_vfp_sp_dyadic ();
18206 /* ARMv8.2 fp16 instruction. */
18208 do_scalar_fp16_v82_encode ();
18211 do_vfp_dp_rd_rn_rm ();
18214 inst
.instruction
|= 0x100;
18216 inst
.instruction
|= 0xf0000000;
18222 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18224 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
18225 first_error (_("invalid instruction shape"));
18231 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18233 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
18236 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
18239 neon_dyadic_misc (NT_untyped
, N_F_16_32
, 0);
18243 do_vrint_1 (enum neon_cvt_mode mode
)
18245 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
18246 struct neon_type_el et
;
18251 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18252 D register operands. */
18253 if (neon_shape_class
[rs
] == SC_DOUBLE
)
18254 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
18257 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
18259 if (et
.type
!= NT_invtype
)
18261 /* VFP encodings. */
18262 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
18263 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
18264 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18266 NEON_ENCODE (FPV8
, inst
);
18267 if (rs
== NS_FF
|| rs
== NS_HH
)
18268 do_vfp_sp_monadic ();
18270 do_vfp_dp_rd_rm ();
18274 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
18275 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
18276 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
18277 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
18278 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
18279 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
18280 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
18284 inst
.instruction
|= (rs
== NS_DD
) << 8;
18285 do_vfp_cond_or_thumb ();
18287 /* ARMv8.2 fp16 vrint instruction. */
18289 do_scalar_fp16_v82_encode ();
18293 /* Neon encodings (or something broken...). */
18295 et
= neon_check_type (2, rs
, N_EQK
, N_F_16_32
| N_KEY
);
18297 if (et
.type
== NT_invtype
)
18300 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18301 NEON_ENCODE (FLOAT
, inst
);
18303 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
18306 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18307 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18308 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
18309 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
18310 inst
.instruction
|= neon_quad (rs
) << 6;
18311 /* Mask off the original size bits and reencode them. */
18312 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff)
18313 | neon_logbits (et
.size
) << 18);
18317 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
18318 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
18319 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
18320 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
18321 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
18322 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
18323 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
18328 inst
.instruction
|= 0xfc000000;
18330 inst
.instruction
|= 0xf0000000;
18337 do_vrint_1 (neon_cvt_mode_x
);
18343 do_vrint_1 (neon_cvt_mode_z
);
18349 do_vrint_1 (neon_cvt_mode_r
);
18355 do_vrint_1 (neon_cvt_mode_a
);
18361 do_vrint_1 (neon_cvt_mode_n
);
18367 do_vrint_1 (neon_cvt_mode_p
);
18373 do_vrint_1 (neon_cvt_mode_m
);
18377 neon_scalar_for_vcmla (unsigned opnd
, unsigned elsize
)
18379 unsigned regno
= NEON_SCALAR_REG (opnd
);
18380 unsigned elno
= NEON_SCALAR_INDEX (opnd
);
18382 if (elsize
== 16 && elno
< 2 && regno
< 16)
18383 return regno
| (elno
<< 4);
18384 else if (elsize
== 32 && elno
== 0)
18387 first_error (_("scalar out of range"));
18394 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18396 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
18397 _("expression too complex"));
18398 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
18399 constraint (rot
!= 0 && rot
!= 90 && rot
!= 180 && rot
!= 270,
18400 _("immediate out of range"));
18402 if (inst
.operands
[2].isscalar
)
18404 enum neon_shape rs
= neon_select_shape (NS_DDSI
, NS_QQSI
, NS_NULL
);
18405 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18406 N_KEY
| N_F16
| N_F32
).size
;
18407 unsigned m
= neon_scalar_for_vcmla (inst
.operands
[2].reg
, size
);
18409 inst
.instruction
= 0xfe000800;
18410 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18411 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18412 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
18413 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
18414 inst
.instruction
|= LOW4 (m
);
18415 inst
.instruction
|= HI1 (m
) << 5;
18416 inst
.instruction
|= neon_quad (rs
) << 6;
18417 inst
.instruction
|= rot
<< 20;
18418 inst
.instruction
|= (size
== 32) << 23;
18422 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
18423 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18424 N_KEY
| N_F16
| N_F32
).size
;
18425 neon_three_same (neon_quad (rs
), 0, -1);
18426 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
18427 inst
.instruction
|= 0xfc200800;
18428 inst
.instruction
|= rot
<< 23;
18429 inst
.instruction
|= (size
== 32) << 20;
18436 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18438 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
18439 _("expression too complex"));
18440 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
18441 constraint (rot
!= 90 && rot
!= 270, _("immediate out of range"));
18442 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
18443 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18444 N_KEY
| N_F16
| N_F32
).size
;
18445 neon_three_same (neon_quad (rs
), 0, -1);
18446 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
18447 inst
.instruction
|= 0xfc800800;
18448 inst
.instruction
|= (rot
== 270) << 24;
18449 inst
.instruction
|= (size
== 32) << 20;
18452 /* Dot Product instructions encoding support. */
18455 do_neon_dotproduct (int unsigned_p
)
18457 enum neon_shape rs
;
18458 unsigned scalar_oprd2
= 0;
18461 if (inst
.cond
!= COND_ALWAYS
)
18462 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
18463 "is UNPREDICTABLE"));
18465 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18468 /* Dot Product instructions are in three-same D/Q register format or the third
18469 operand can be a scalar index register. */
18470 if (inst
.operands
[2].isscalar
)
18472 scalar_oprd2
= neon_scalar_for_mul (inst
.operands
[2].reg
, 32);
18473 high8
= 0xfe000000;
18474 rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
18478 high8
= 0xfc000000;
18479 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
18483 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_U8
);
18485 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_S8
);
18487 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
18488 Product instruction, so we pass 0 as the "ubit" parameter. And the
18489 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
18490 neon_three_same (neon_quad (rs
), 0, 32);
18492 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
18493 different NEON three-same encoding. */
18494 inst
.instruction
&= 0x00ffffff;
18495 inst
.instruction
|= high8
;
18496 /* Encode 'U' bit which indicates signedness. */
18497 inst
.instruction
|= (unsigned_p
? 1 : 0) << 4;
18498 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
18499 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
18500 the instruction encoding. */
18501 if (inst
.operands
[2].isscalar
)
18503 inst
.instruction
&= 0xffffffd0;
18504 inst
.instruction
|= LOW4 (scalar_oprd2
);
18505 inst
.instruction
|= HI1 (scalar_oprd2
) << 5;
18509 /* Dot Product instructions for signed integer. */
18512 do_neon_dotproduct_s (void)
18514 return do_neon_dotproduct (0);
18517 /* Dot Product instructions for unsigned integer. */
18520 do_neon_dotproduct_u (void)
18522 return do_neon_dotproduct (1);
18525 /* Crypto v1 instructions. */
18527 do_crypto_2op_1 (unsigned elttype
, int op
)
18529 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18531 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
18537 NEON_ENCODE (INTEGER
, inst
);
18538 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18539 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18540 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
18541 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
18543 inst
.instruction
|= op
<< 6;
18546 inst
.instruction
|= 0xfc000000;
18548 inst
.instruction
|= 0xf0000000;
18552 do_crypto_3op_1 (int u
, int op
)
18554 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18556 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
18557 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
18562 NEON_ENCODE (INTEGER
, inst
);
18563 neon_three_same (1, u
, 8 << op
);
18569 do_crypto_2op_1 (N_8
, 0);
18575 do_crypto_2op_1 (N_8
, 1);
18581 do_crypto_2op_1 (N_8
, 2);
18587 do_crypto_2op_1 (N_8
, 3);
18593 do_crypto_3op_1 (0, 0);
18599 do_crypto_3op_1 (0, 1);
18605 do_crypto_3op_1 (0, 2);
18611 do_crypto_3op_1 (0, 3);
18617 do_crypto_3op_1 (1, 0);
18623 do_crypto_3op_1 (1, 1);
18627 do_sha256su1 (void)
18629 do_crypto_3op_1 (1, 2);
18635 do_crypto_2op_1 (N_32
, -1);
18641 do_crypto_2op_1 (N_32
, 0);
18645 do_sha256su0 (void)
18647 do_crypto_2op_1 (N_32
, 1);
18651 do_crc32_1 (unsigned int poly
, unsigned int sz
)
18653 unsigned int Rd
= inst
.operands
[0].reg
;
18654 unsigned int Rn
= inst
.operands
[1].reg
;
18655 unsigned int Rm
= inst
.operands
[2].reg
;
18657 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18658 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
18659 inst
.instruction
|= LOW4 (Rn
) << 16;
18660 inst
.instruction
|= LOW4 (Rm
);
18661 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
18662 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
18664 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
18665 as_warn (UNPRED_REG ("r15"));
18707 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
18709 neon_check_type (2, NS_FD
, N_S32
, N_F64
);
18710 do_vfp_sp_dp_cvt ();
18711 do_vfp_cond_or_thumb ();
18715 /* Overall per-instruction processing. */
18717 /* We need to be able to fix up arbitrary expressions in some statements.
18718 This is so that we can handle symbols that are an arbitrary distance from
18719 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
18720 which returns part of an address in a form which will be valid for
18721 a data instruction. We do this by pushing the expression into a symbol
18722 in the expr_section, and creating a fix for that. */
18725 fix_new_arm (fragS
* frag
,
18739 /* Create an absolute valued symbol, so we have something to
18740 refer to in the object file. Unfortunately for us, gas's
18741 generic expression parsing will already have folded out
18742 any use of .set foo/.type foo %function that may have
18743 been used to set type information of the target location,
18744 that's being specified symbolically. We have to presume
18745 the user knows what they are doing. */
18749 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
18751 symbol
= symbol_find_or_make (name
);
18752 S_SET_SEGMENT (symbol
, absolute_section
);
18753 symbol_set_frag (symbol
, &zero_address_frag
);
18754 S_SET_VALUE (symbol
, exp
->X_add_number
);
18755 exp
->X_op
= O_symbol
;
18756 exp
->X_add_symbol
= symbol
;
18757 exp
->X_add_number
= 0;
18763 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
18764 (enum bfd_reloc_code_real
) reloc
);
18768 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
18769 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
18773 /* Mark whether the fix is to a THUMB instruction, or an ARM
18775 new_fix
->tc_fix_data
= thumb_mode
;
18778 /* Create a frg for an instruction requiring relaxation. */
18780 output_relax_insn (void)
18786 /* The size of the instruction is unknown, so tie the debug info to the
18787 start of the instruction. */
18788 dwarf2_emit_insn (0);
18790 switch (inst
.relocs
[0].exp
.X_op
)
18793 sym
= inst
.relocs
[0].exp
.X_add_symbol
;
18794 offset
= inst
.relocs
[0].exp
.X_add_number
;
18798 offset
= inst
.relocs
[0].exp
.X_add_number
;
18801 sym
= make_expr_symbol (&inst
.relocs
[0].exp
);
18805 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
18806 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
18807 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
18810 /* Write a 32-bit thumb instruction to buf. */
18812 put_thumb32_insn (char * buf
, unsigned long insn
)
18814 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
18815 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
18819 output_inst (const char * str
)
18825 as_bad ("%s -- `%s'", inst
.error
, str
);
18830 output_relax_insn ();
18833 if (inst
.size
== 0)
18836 to
= frag_more (inst
.size
);
18837 /* PR 9814: Record the thumb mode into the current frag so that we know
18838 what type of NOP padding to use, if necessary. We override any previous
18839 setting so that if the mode has changed then the NOPS that we use will
18840 match the encoding of the last instruction in the frag. */
18841 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
18843 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
18845 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
18846 put_thumb32_insn (to
, inst
.instruction
);
18848 else if (inst
.size
> INSN_SIZE
)
18850 gas_assert (inst
.size
== (2 * INSN_SIZE
));
18851 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
18852 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
18855 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
18858 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
18860 if (inst
.relocs
[r
].type
!= BFD_RELOC_UNUSED
)
18861 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
18862 inst
.size
, & inst
.relocs
[r
].exp
, inst
.relocs
[r
].pc_rel
,
18863 inst
.relocs
[r
].type
);
18866 dwarf2_emit_insn (inst
.size
);
18870 output_it_inst (int cond
, int mask
, char * to
)
18872 unsigned long instruction
= 0xbf00;
18875 instruction
|= mask
;
18876 instruction
|= cond
<< 4;
18880 to
= frag_more (2);
18882 dwarf2_emit_insn (2);
18886 md_number_to_chars (to
, instruction
, 2);
18891 /* Tag values used in struct asm_opcode's tag field. */
18894 OT_unconditional
, /* Instruction cannot be conditionalized.
18895 The ARM condition field is still 0xE. */
18896 OT_unconditionalF
, /* Instruction cannot be conditionalized
18897 and carries 0xF in its ARM condition field. */
18898 OT_csuffix
, /* Instruction takes a conditional suffix. */
18899 OT_csuffixF
, /* Some forms of the instruction take a scalar
18900 conditional suffix, others place 0xF where the
18901 condition field would be, others take a vector
18902 conditional suffix. */
18903 OT_cinfix3
, /* Instruction takes a conditional infix,
18904 beginning at character index 3. (In
18905 unified mode, it becomes a suffix.) */
18906 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
18907 tsts, cmps, cmns, and teqs. */
18908 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
18909 character index 3, even in unified mode. Used for
18910 legacy instructions where suffix and infix forms
18911 may be ambiguous. */
18912 OT_csuf_or_in3
, /* Instruction takes either a conditional
18913 suffix or an infix at character index 3. */
18914 OT_odd_infix_unc
, /* This is the unconditional variant of an
18915 instruction that takes a conditional infix
18916 at an unusual position. In unified mode,
18917 this variant will accept a suffix. */
18918 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
18919 are the conditional variants of instructions that
18920 take conditional infixes in unusual positions.
18921 The infix appears at character index
18922 (tag - OT_odd_infix_0). These are not accepted
18923 in unified mode. */
18926 /* Subroutine of md_assemble, responsible for looking up the primary
18927 opcode from the mnemonic the user wrote. STR points to the
18928 beginning of the mnemonic.
18930 This is not simply a hash table lookup, because of conditional
18931 variants. Most instructions have conditional variants, which are
18932 expressed with a _conditional affix_ to the mnemonic. If we were
18933 to encode each conditional variant as a literal string in the opcode
18934 table, it would have approximately 20,000 entries.
18936 Most mnemonics take this affix as a suffix, and in unified syntax,
18937 'most' is upgraded to 'all'. However, in the divided syntax, some
18938 instructions take the affix as an infix, notably the s-variants of
18939 the arithmetic instructions. Of those instructions, all but six
18940 have the infix appear after the third character of the mnemonic.
18942 Accordingly, the algorithm for looking up primary opcodes given
18945 1. Look up the identifier in the opcode table.
18946 If we find a match, go to step U.
18948 2. Look up the last two characters of the identifier in the
18949 conditions table. If we find a match, look up the first N-2
18950 characters of the identifier in the opcode table. If we
18951 find a match, go to step CE.
18953 3. Look up the fourth and fifth characters of the identifier in
18954 the conditions table. If we find a match, extract those
18955 characters from the identifier, and look up the remaining
18956 characters in the opcode table. If we find a match, go
18961 U. Examine the tag field of the opcode structure, in case this is
18962 one of the six instructions with its conditional infix in an
18963 unusual place. If it is, the tag tells us where to find the
18964 infix; look it up in the conditions table and set inst.cond
18965 accordingly. Otherwise, this is an unconditional instruction.
18966 Again set inst.cond accordingly. Return the opcode structure.
18968 CE. Examine the tag field to make sure this is an instruction that
18969 should receive a conditional suffix. If it is not, fail.
18970 Otherwise, set inst.cond from the suffix we already looked up,
18971 and return the opcode structure.
18973 CM. Examine the tag field to make sure this is an instruction that
18974 should receive a conditional infix after the third character.
18975 If it is not, fail. Otherwise, undo the edits to the current
18976 line of input and proceed as for case CE. */
18978 static const struct asm_opcode
*
18979 opcode_lookup (char **str
)
18983 const struct asm_opcode
*opcode
;
18984 const struct asm_cond
*cond
;
18987 /* Scan up to the end of the mnemonic, which must end in white space,
18988 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
18989 for (base
= end
= *str
; *end
!= '\0'; end
++)
18990 if (*end
== ' ' || *end
== '.')
18996 /* Handle a possible width suffix and/or Neon type suffix. */
19001 /* The .w and .n suffixes are only valid if the unified syntax is in
19003 if (unified_syntax
&& end
[1] == 'w')
19005 else if (unified_syntax
&& end
[1] == 'n')
19010 inst
.vectype
.elems
= 0;
19012 *str
= end
+ offset
;
19014 if (end
[offset
] == '.')
19016 /* See if we have a Neon type suffix (possible in either unified or
19017 non-unified ARM syntax mode). */
19018 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
19021 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
19027 /* Look for unaffixed or special-case affixed mnemonic. */
19028 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19033 if (opcode
->tag
< OT_odd_infix_0
)
19035 inst
.cond
= COND_ALWAYS
;
19039 if (warn_on_deprecated
&& unified_syntax
)
19040 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19041 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
19042 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
19045 inst
.cond
= cond
->value
;
19048 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
19050 /* Cannot have a conditional suffix on a mnemonic of less than a character.
19052 if (end
- base
< 2)
19055 cond
= (const struct asm_cond
*) hash_find_n (arm_vcond_hsh
, affix
, 1);
19056 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19058 /* If this opcode can not be vector predicated then don't accept it with a
19059 vector predication code. */
19060 if (opcode
&& !opcode
->mayBeVecPred
)
19063 if (!opcode
|| !cond
)
19065 /* Cannot have a conditional suffix on a mnemonic of less than two
19067 if (end
- base
< 3)
19070 /* Look for suffixed mnemonic. */
19072 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
19073 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19077 if (opcode
&& cond
)
19080 switch (opcode
->tag
)
19082 case OT_cinfix3_legacy
:
19083 /* Ignore conditional suffixes matched on infix only mnemonics. */
19087 case OT_cinfix3_deprecated
:
19088 case OT_odd_infix_unc
:
19089 if (!unified_syntax
)
19091 /* Fall through. */
19095 case OT_csuf_or_in3
:
19096 inst
.cond
= cond
->value
;
19099 case OT_unconditional
:
19100 case OT_unconditionalF
:
19102 inst
.cond
= cond
->value
;
19105 /* Delayed diagnostic. */
19106 inst
.error
= BAD_COND
;
19107 inst
.cond
= COND_ALWAYS
;
19116 /* Cannot have a usual-position infix on a mnemonic of less than
19117 six characters (five would be a suffix). */
19118 if (end
- base
< 6)
19121 /* Look for infixed mnemonic in the usual position. */
19123 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
19127 memcpy (save
, affix
, 2);
19128 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
19129 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19131 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
19132 memcpy (affix
, save
, 2);
19135 && (opcode
->tag
== OT_cinfix3
19136 || opcode
->tag
== OT_cinfix3_deprecated
19137 || opcode
->tag
== OT_csuf_or_in3
19138 || opcode
->tag
== OT_cinfix3_legacy
))
19141 if (warn_on_deprecated
&& unified_syntax
19142 && (opcode
->tag
== OT_cinfix3
19143 || opcode
->tag
== OT_cinfix3_deprecated
))
19144 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19146 inst
.cond
= cond
->value
;
19153 /* This function generates an initial IT instruction, leaving its block
19154 virtually open for the new instructions. Eventually,
19155 the mask will be updated by now_pred_add_mask () each time
19156 a new instruction needs to be included in the IT block.
19157 Finally, the block is closed with close_automatic_it_block ().
19158 The block closure can be requested either from md_assemble (),
19159 a tencode (), or due to a label hook. */
19162 new_automatic_it_block (int cond
)
19164 now_pred
.state
= AUTOMATIC_PRED_BLOCK
;
19165 now_pred
.mask
= 0x18;
19166 now_pred
.cc
= cond
;
19167 now_pred
.block_length
= 1;
19168 mapping_state (MAP_THUMB
);
19169 now_pred
.insn
= output_it_inst (cond
, now_pred
.mask
, NULL
);
19170 now_pred
.warn_deprecated
= FALSE
;
19171 now_pred
.insn_cond
= TRUE
;
19174 /* Close an automatic IT block.
19175 See comments in new_automatic_it_block (). */
19178 close_automatic_it_block (void)
19180 now_pred
.mask
= 0x10;
19181 now_pred
.block_length
= 0;
19184 /* Update the mask of the current automatically-generated IT
19185 instruction. See comments in new_automatic_it_block (). */
19188 now_pred_add_mask (int cond
)
19190 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
19191 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
19192 | ((bitvalue) << (nbit)))
19193 const int resulting_bit
= (cond
& 1);
19195 now_pred
.mask
&= 0xf;
19196 now_pred
.mask
= SET_BIT_VALUE (now_pred
.mask
,
19198 (5 - now_pred
.block_length
));
19199 now_pred
.mask
= SET_BIT_VALUE (now_pred
.mask
,
19201 ((5 - now_pred
.block_length
) - 1));
19202 output_it_inst (now_pred
.cc
, now_pred
.mask
, now_pred
.insn
);
19205 #undef SET_BIT_VALUE
19208 /* The IT blocks handling machinery is accessed through the these functions:
19209 it_fsm_pre_encode () from md_assemble ()
19210 set_pred_insn_type () optional, from the tencode functions
19211 set_pred_insn_type_last () ditto
19212 in_pred_block () ditto
19213 it_fsm_post_encode () from md_assemble ()
19214 force_automatic_it_block_close () from label handling functions
19217 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
19218 initializing the IT insn type with a generic initial value depending
19219 on the inst.condition.
19220 2) During the tencode function, two things may happen:
19221 a) The tencode function overrides the IT insn type by
19222 calling either set_pred_insn_type (type) or
19223 set_pred_insn_type_last ().
19224 b) The tencode function queries the IT block state by
19225 calling in_pred_block () (i.e. to determine narrow/not narrow mode).
19227 Both set_pred_insn_type and in_pred_block run the internal FSM state
19228 handling function (handle_pred_state), because: a) setting the IT insn
19229 type may incur in an invalid state (exiting the function),
19230 and b) querying the state requires the FSM to be updated.
19231 Specifically we want to avoid creating an IT block for conditional
19232 branches, so it_fsm_pre_encode is actually a guess and we can't
19233 determine whether an IT block is required until the tencode () routine
19234 has decided what type of instruction this actually it.
19235 Because of this, if set_pred_insn_type and in_pred_block have to be
19236 used, set_pred_insn_type has to be called first.
19238 set_pred_insn_type_last () is a wrapper of set_pred_insn_type (type),
19239 that determines the insn IT type depending on the inst.cond code.
19240 When a tencode () routine encodes an instruction that can be
19241 either outside an IT block, or, in the case of being inside, has to be
19242 the last one, set_pred_insn_type_last () will determine the proper
19243 IT instruction type based on the inst.cond code. Otherwise,
19244 set_pred_insn_type can be called for overriding that logic or
19245 for covering other cases.
19247 Calling handle_pred_state () may not transition the IT block state to
19248 OUTSIDE_PRED_BLOCK immediately, since the (current) state could be
19249 still queried. Instead, if the FSM determines that the state should
19250 be transitioned to OUTSIDE_PRED_BLOCK, a flag is marked to be closed
19251 after the tencode () function: that's what it_fsm_post_encode () does.
19253 Since in_pred_block () calls the state handling function to get an
19254 updated state, an error may occur (due to invalid insns combination).
19255 In that case, inst.error is set.
19256 Therefore, inst.error has to be checked after the execution of
19257 the tencode () routine.
19259 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
19260 any pending state change (if any) that didn't take place in
19261 handle_pred_state () as explained above. */
19264 it_fsm_pre_encode (void)
19266 if (inst
.cond
!= COND_ALWAYS
)
19267 inst
.pred_insn_type
= INSIDE_IT_INSN
;
19269 inst
.pred_insn_type
= OUTSIDE_PRED_INSN
;
19271 now_pred
.state_handled
= 0;
19274 /* IT state FSM handling function. */
19275 /* MVE instructions and non-MVE instructions are handled differently because of
19276 the introduction of VPT blocks.
19277 Specifications say that any non-MVE instruction inside a VPT block is
19278 UNPREDICTABLE, with the exception of the BKPT instruction. Whereas most MVE
19279 instructions are deemed to be UNPREDICTABLE if inside an IT block. For the
19280 few exceptions this will be handled at their respective handler functions.
19281 The error messages provided depending on the different combinations possible
19282 are described in the cases below:
19283 For 'most' MVE instructions:
19284 1) In an IT block, with an IT code: syntax error
19285 2) In an IT block, with a VPT code: error: must be in a VPT block
19286 3) In an IT block, with no code: warning: UNPREDICTABLE
19287 4) In a VPT block, with an IT code: syntax error
19288 5) In a VPT block, with a VPT code: OK!
19289 6) In a VPT block, with no code: error: missing code
19290 7) Outside a pred block, with an IT code: error: syntax error
19291 8) Outside a pred block, with a VPT code: error: should be in a VPT block
19292 9) Outside a pred block, with no code: OK!
19293 For non-MVE instructions:
19294 10) In an IT block, with an IT code: OK!
19295 11) In an IT block, with a VPT code: syntax error
19296 12) In an IT block, with no code: error: missing code
19297 13) In a VPT block, with an IT code: error: should be in an IT block
19298 14) In a VPT block, with a VPT code: syntax error
19299 15) In a VPT block, with no code: UNPREDICTABLE
19300 16) Outside a pred block, with an IT code: error: should be in an IT block
19301 17) Outside a pred block, with a VPT code: syntax error
19302 18) Outside a pred block, with no code: OK!
19307 handle_pred_state (void)
19309 now_pred
.state_handled
= 1;
19310 now_pred
.insn_cond
= FALSE
;
19312 switch (now_pred
.state
)
19314 case OUTSIDE_PRED_BLOCK
:
19315 switch (inst
.pred_insn_type
)
19317 case MVE_OUTSIDE_PRED_INSN
:
19318 if (inst
.cond
< COND_ALWAYS
)
19320 /* Case 7: Outside a pred block, with an IT code: error: syntax
19322 inst
.error
= BAD_SYNTAX
;
19325 /* Case 9: Outside a pred block, with no code: OK! */
19327 case OUTSIDE_PRED_INSN
:
19328 if (inst
.cond
> COND_ALWAYS
)
19330 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19332 inst
.error
= BAD_SYNTAX
;
19335 /* Case 18: Outside a pred block, with no code: OK! */
19338 case INSIDE_VPT_INSN
:
19339 /* Case 8: Outside a pred block, with a VPT code: error: should be in
19341 inst
.error
= BAD_OUT_VPT
;
19344 case INSIDE_IT_INSN
:
19345 case INSIDE_IT_LAST_INSN
:
19346 if (inst
.cond
< COND_ALWAYS
)
19348 /* Case 16: Outside a pred block, with an IT code: error: should
19349 be in an IT block. */
19350 if (thumb_mode
== 0)
19353 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
19354 as_tsktsk (_("Warning: conditional outside an IT block"\
19359 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
19360 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
19362 /* Automatically generate the IT instruction. */
19363 new_automatic_it_block (inst
.cond
);
19364 if (inst
.pred_insn_type
== INSIDE_IT_LAST_INSN
)
19365 close_automatic_it_block ();
19369 inst
.error
= BAD_OUT_IT
;
19375 else if (inst
.cond
> COND_ALWAYS
)
19377 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19379 inst
.error
= BAD_SYNTAX
;
19384 case IF_INSIDE_IT_LAST_INSN
:
19385 case NEUTRAL_IT_INSN
:
19389 if (inst
.cond
!= COND_ALWAYS
)
19390 first_error (BAD_SYNTAX
);
19391 now_pred
.state
= MANUAL_PRED_BLOCK
;
19392 now_pred
.block_length
= 0;
19393 now_pred
.type
= VECTOR_PRED
;
19397 now_pred
.state
= MANUAL_PRED_BLOCK
;
19398 now_pred
.block_length
= 0;
19399 now_pred
.type
= SCALAR_PRED
;
19404 case AUTOMATIC_PRED_BLOCK
:
19405 /* Three things may happen now:
19406 a) We should increment current it block size;
19407 b) We should close current it block (closing insn or 4 insns);
19408 c) We should close current it block and start a new one (due
19409 to incompatible conditions or
19410 4 insns-length block reached). */
19412 switch (inst
.pred_insn_type
)
19414 case INSIDE_VPT_INSN
:
19416 case MVE_OUTSIDE_PRED_INSN
:
19418 case OUTSIDE_PRED_INSN
:
19419 /* The closure of the block shall happen immediately,
19420 so any in_pred_block () call reports the block as closed. */
19421 force_automatic_it_block_close ();
19424 case INSIDE_IT_INSN
:
19425 case INSIDE_IT_LAST_INSN
:
19426 case IF_INSIDE_IT_LAST_INSN
:
19427 now_pred
.block_length
++;
19429 if (now_pred
.block_length
> 4
19430 || !now_pred_compatible (inst
.cond
))
19432 force_automatic_it_block_close ();
19433 if (inst
.pred_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
19434 new_automatic_it_block (inst
.cond
);
19438 now_pred
.insn_cond
= TRUE
;
19439 now_pred_add_mask (inst
.cond
);
19442 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
19443 && (inst
.pred_insn_type
== INSIDE_IT_LAST_INSN
19444 || inst
.pred_insn_type
== IF_INSIDE_IT_LAST_INSN
))
19445 close_automatic_it_block ();
19448 case NEUTRAL_IT_INSN
:
19449 now_pred
.block_length
++;
19450 now_pred
.insn_cond
= TRUE
;
19452 if (now_pred
.block_length
> 4)
19453 force_automatic_it_block_close ();
19455 now_pred_add_mask (now_pred
.cc
& 1);
19459 close_automatic_it_block ();
19460 now_pred
.state
= MANUAL_PRED_BLOCK
;
19465 case MANUAL_PRED_BLOCK
:
19468 if (now_pred
.type
== SCALAR_PRED
)
19470 /* Check conditional suffixes. */
19471 cond
= now_pred
.cc
^ ((now_pred
.mask
>> 4) & 1) ^ 1;
19472 now_pred
.mask
<<= 1;
19473 now_pred
.mask
&= 0x1f;
19474 is_last
= (now_pred
.mask
== 0x10);
19478 now_pred
.cc
^= (now_pred
.mask
>> 4);
19479 cond
= now_pred
.cc
+ 0xf;
19480 now_pred
.mask
<<= 1;
19481 now_pred
.mask
&= 0x1f;
19482 is_last
= now_pred
.mask
== 0x10;
19484 now_pred
.insn_cond
= TRUE
;
19486 switch (inst
.pred_insn_type
)
19488 case OUTSIDE_PRED_INSN
:
19489 if (now_pred
.type
== SCALAR_PRED
)
19491 if (inst
.cond
== COND_ALWAYS
)
19493 /* Case 12: In an IT block, with no code: error: missing
19495 inst
.error
= BAD_NOT_IT
;
19498 else if (inst
.cond
> COND_ALWAYS
)
19500 /* Case 11: In an IT block, with a VPT code: syntax error.
19502 inst
.error
= BAD_SYNTAX
;
19505 else if (thumb_mode
)
19507 /* This is for some special cases where a non-MVE
19508 instruction is not allowed in an IT block, such as cbz,
19509 but are put into one with a condition code.
19510 You could argue this should be a syntax error, but we
19511 gave the 'not allowed in IT block' diagnostic in the
19512 past so we will keep doing so. */
19513 inst
.error
= BAD_NOT_IT
;
19520 /* Case 15: In a VPT block, with no code: UNPREDICTABLE. */
19521 as_tsktsk (MVE_NOT_VPT
);
19524 case MVE_OUTSIDE_PRED_INSN
:
19525 if (now_pred
.type
== SCALAR_PRED
)
19527 if (inst
.cond
== COND_ALWAYS
)
19529 /* Case 3: In an IT block, with no code: warning:
19531 as_tsktsk (MVE_NOT_IT
);
19534 else if (inst
.cond
< COND_ALWAYS
)
19536 /* Case 1: In an IT block, with an IT code: syntax error.
19538 inst
.error
= BAD_SYNTAX
;
19546 if (inst
.cond
< COND_ALWAYS
)
19548 /* Case 4: In a VPT block, with an IT code: syntax error.
19550 inst
.error
= BAD_SYNTAX
;
19553 else if (inst
.cond
== COND_ALWAYS
)
19555 /* Case 6: In a VPT block, with no code: error: missing
19557 inst
.error
= BAD_NOT_VPT
;
19565 case INSIDE_IT_INSN
:
19566 if (inst
.cond
> COND_ALWAYS
)
19568 /* Case 11: In an IT block, with a VPT code: syntax error. */
19569 /* Case 14: In a VPT block, with a VPT code: syntax error. */
19570 inst
.error
= BAD_SYNTAX
;
19573 else if (now_pred
.type
== SCALAR_PRED
)
19575 /* Case 10: In an IT block, with an IT code: OK! */
19576 if (cond
!= inst
.cond
)
19578 inst
.error
= now_pred
.type
== SCALAR_PRED
? BAD_IT_COND
:
19585 /* Case 13: In a VPT block, with an IT code: error: should be
19587 inst
.error
= BAD_OUT_IT
;
19592 case INSIDE_VPT_INSN
:
19593 if (now_pred
.type
== SCALAR_PRED
)
19595 /* Case 2: In an IT block, with a VPT code: error: must be in a
19597 inst
.error
= BAD_OUT_VPT
;
19600 /* Case 5: In a VPT block, with a VPT code: OK! */
19601 else if (cond
!= inst
.cond
)
19603 inst
.error
= BAD_VPT_COND
;
19607 case INSIDE_IT_LAST_INSN
:
19608 case IF_INSIDE_IT_LAST_INSN
:
19609 if (now_pred
.type
== VECTOR_PRED
|| inst
.cond
> COND_ALWAYS
)
19611 /* Case 4: In a VPT block, with an IT code: syntax error. */
19612 /* Case 11: In an IT block, with a VPT code: syntax error. */
19613 inst
.error
= BAD_SYNTAX
;
19616 else if (cond
!= inst
.cond
)
19618 inst
.error
= BAD_IT_COND
;
19623 inst
.error
= BAD_BRANCH
;
19628 case NEUTRAL_IT_INSN
:
19629 /* The BKPT instruction is unconditional even in a IT or VPT
19634 if (now_pred
.type
== SCALAR_PRED
)
19636 inst
.error
= BAD_IT_IT
;
19639 /* fall through. */
19641 if (inst
.cond
== COND_ALWAYS
)
19643 /* Executing a VPT/VPST instruction inside an IT block or a
19644 VPT/VPST/IT instruction inside a VPT block is UNPREDICTABLE.
19646 if (now_pred
.type
== SCALAR_PRED
)
19647 as_tsktsk (MVE_NOT_IT
);
19649 as_tsktsk (MVE_NOT_VPT
);
19654 /* VPT/VPST do not accept condition codes. */
19655 inst
.error
= BAD_SYNTAX
;
19666 struct depr_insn_mask
19668 unsigned long pattern
;
19669 unsigned long mask
;
19670 const char* description
;
19673 /* List of 16-bit instruction patterns deprecated in an IT block in
19675 static const struct depr_insn_mask depr_it_insns
[] = {
19676 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
19677 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
19678 { 0xa000, 0xb800, N_("ADR") },
19679 { 0x4800, 0xf800, N_("Literal loads") },
19680 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
19681 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
19682 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
19683 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
19684 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
19689 it_fsm_post_encode (void)
19693 if (!now_pred
.state_handled
)
19694 handle_pred_state ();
19696 if (now_pred
.insn_cond
19697 && !now_pred
.warn_deprecated
19698 && warn_on_deprecated
19699 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
)
19700 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
))
19702 if (inst
.instruction
>= 0x10000)
19704 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
19705 "performance deprecated in ARMv8-A and ARMv8-R"));
19706 now_pred
.warn_deprecated
= TRUE
;
19710 const struct depr_insn_mask
*p
= depr_it_insns
;
19712 while (p
->mask
!= 0)
19714 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
19716 as_tsktsk (_("IT blocks containing 16-bit Thumb "
19717 "instructions of the following class are "
19718 "performance deprecated in ARMv8-A and "
19719 "ARMv8-R: %s"), p
->description
);
19720 now_pred
.warn_deprecated
= TRUE
;
19728 if (now_pred
.block_length
> 1)
19730 as_tsktsk (_("IT blocks containing more than one conditional "
19731 "instruction are performance deprecated in ARMv8-A and "
19733 now_pred
.warn_deprecated
= TRUE
;
19737 is_last
= (now_pred
.mask
== 0x10);
19740 now_pred
.state
= OUTSIDE_PRED_BLOCK
;
19746 force_automatic_it_block_close (void)
19748 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
)
19750 close_automatic_it_block ();
19751 now_pred
.state
= OUTSIDE_PRED_BLOCK
;
19757 in_pred_block (void)
19759 if (!now_pred
.state_handled
)
19760 handle_pred_state ();
19762 return now_pred
.state
!= OUTSIDE_PRED_BLOCK
;
19765 /* Whether OPCODE only has T32 encoding. Since this function is only used by
19766 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
19767 here, hence the "known" in the function name. */
19770 known_t32_only_insn (const struct asm_opcode
*opcode
)
19772 /* Original Thumb-1 wide instruction. */
19773 if (opcode
->tencode
== do_t_blx
19774 || opcode
->tencode
== do_t_branch23
19775 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
19776 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
19779 /* Wide-only instruction added to ARMv8-M Baseline. */
19780 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m_m_only
)
19781 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
19782 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
19783 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
19789 /* Whether wide instruction variant can be used if available for a valid OPCODE
19793 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
19795 if (known_t32_only_insn (opcode
))
19798 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
19799 of variant T3 of B.W is checked in do_t_branch. */
19800 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
19801 && opcode
->tencode
== do_t_branch
)
19804 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
19805 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
19806 && opcode
->tencode
== do_t_mov_cmp
19807 /* Make sure CMP instruction is not affected. */
19808 && opcode
->aencode
== do_mov
)
19811 /* Wide instruction variants of all instructions with narrow *and* wide
19812 variants become available with ARMv6t2. Other opcodes are either
19813 narrow-only or wide-only and are thus available if OPCODE is valid. */
19814 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
19817 /* OPCODE with narrow only instruction variant or wide variant not
19823 md_assemble (char *str
)
19826 const struct asm_opcode
* opcode
;
19828 /* Align the previous label if needed. */
19829 if (last_label_seen
!= NULL
)
19831 symbol_set_frag (last_label_seen
, frag_now
);
19832 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
19833 S_SET_SEGMENT (last_label_seen
, now_seg
);
19836 memset (&inst
, '\0', sizeof (inst
));
19838 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
19839 inst
.relocs
[r
].type
= BFD_RELOC_UNUSED
;
19841 opcode
= opcode_lookup (&p
);
19844 /* It wasn't an instruction, but it might be a register alias of
19845 the form alias .req reg, or a Neon .dn/.qn directive. */
19846 if (! create_register_alias (str
, p
)
19847 && ! create_neon_reg_alias (str
, p
))
19848 as_bad (_("bad instruction `%s'"), str
);
19853 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
19854 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
19856 /* The value which unconditional instructions should have in place of the
19857 condition field. */
19858 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
19862 arm_feature_set variant
;
19864 variant
= cpu_variant
;
19865 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
19866 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
19867 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
19868 /* Check that this instruction is supported for this CPU. */
19869 if (!opcode
->tvariant
19870 || (thumb_mode
== 1
19871 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
19873 if (opcode
->tencode
== do_t_swi
)
19874 as_bad (_("SVC is not permitted on this architecture"));
19876 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
19879 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
19880 && opcode
->tencode
!= do_t_branch
)
19882 as_bad (_("Thumb does not support conditional execution"));
19886 /* Two things are addressed here:
19887 1) Implicit require narrow instructions on Thumb-1.
19888 This avoids relaxation accidentally introducing Thumb-2
19890 2) Reject wide instructions in non Thumb-2 cores.
19892 Only instructions with narrow and wide variants need to be handled
19893 but selecting all non wide-only instructions is easier. */
19894 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
19895 && !t32_insn_ok (variant
, opcode
))
19897 if (inst
.size_req
== 0)
19899 else if (inst
.size_req
== 4)
19901 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
19902 as_bad (_("selected processor does not support 32bit wide "
19903 "variant of instruction `%s'"), str
);
19905 as_bad (_("selected processor does not support `%s' in "
19906 "Thumb-2 mode"), str
);
19911 inst
.instruction
= opcode
->tvalue
;
19913 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
19915 /* Prepare the pred_insn_type for those encodings that don't set
19917 it_fsm_pre_encode ();
19919 opcode
->tencode ();
19921 it_fsm_post_encode ();
19924 if (!(inst
.error
|| inst
.relax
))
19926 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
19927 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
19928 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
19930 as_bad (_("cannot honor width suffix -- `%s'"), str
);
19935 /* Something has gone badly wrong if we try to relax a fixed size
19937 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
19939 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
19940 *opcode
->tvariant
);
19941 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
19942 set those bits when Thumb-2 32-bit instructions are seen. The impact
19943 of relaxable instructions will be considered later after we finish all
19945 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
19946 variant
= arm_arch_none
;
19948 variant
= cpu_variant
;
19949 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
19950 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
19953 check_neon_suffixes
;
19957 mapping_state (MAP_THUMB
);
19960 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
19964 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
19965 is_bx
= (opcode
->aencode
== do_bx
);
19967 /* Check that this instruction is supported for this CPU. */
19968 if (!(is_bx
&& fix_v4bx
)
19969 && !(opcode
->avariant
&&
19970 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
19972 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
19977 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
19981 inst
.instruction
= opcode
->avalue
;
19982 if (opcode
->tag
== OT_unconditionalF
)
19983 inst
.instruction
|= 0xFU
<< 28;
19985 inst
.instruction
|= inst
.cond
<< 28;
19986 inst
.size
= INSN_SIZE
;
19987 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
19989 it_fsm_pre_encode ();
19990 opcode
->aencode ();
19991 it_fsm_post_encode ();
19993 /* Arm mode bx is marked as both v4T and v5 because it's still required
19994 on a hypothetical non-thumb v5 core. */
19996 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
19998 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
19999 *opcode
->avariant
);
20001 check_neon_suffixes
;
20005 mapping_state (MAP_ARM
);
20010 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
20018 check_pred_blocks_finished (void)
20023 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
20024 if (seg_info (sect
)->tc_segment_info_data
.current_pred
.state
20025 == MANUAL_PRED_BLOCK
)
20027 if (now_pred
.type
== SCALAR_PRED
)
20028 as_warn (_("section '%s' finished with an open IT block."),
20031 as_warn (_("section '%s' finished with an open VPT/VPST block."),
20035 if (now_pred
.state
== MANUAL_PRED_BLOCK
)
20037 if (now_pred
.type
== SCALAR_PRED
)
20038 as_warn (_("file finished with an open IT block."));
20040 as_warn (_("file finished with an open VPT/VPST block."));
20045 /* Various frobbings of labels and their addresses. */
20048 arm_start_line_hook (void)
20050 last_label_seen
= NULL
;
20054 arm_frob_label (symbolS
* sym
)
20056 last_label_seen
= sym
;
20058 ARM_SET_THUMB (sym
, thumb_mode
);
20060 #if defined OBJ_COFF || defined OBJ_ELF
20061 ARM_SET_INTERWORK (sym
, support_interwork
);
20064 force_automatic_it_block_close ();
20066 /* Note - do not allow local symbols (.Lxxx) to be labelled
20067 as Thumb functions. This is because these labels, whilst
20068 they exist inside Thumb code, are not the entry points for
20069 possible ARM->Thumb calls. Also, these labels can be used
20070 as part of a computed goto or switch statement. eg gcc
20071 can generate code that looks like this:
20073 ldr r2, [pc, .Laaa]
20083 The first instruction loads the address of the jump table.
20084 The second instruction converts a table index into a byte offset.
20085 The third instruction gets the jump address out of the table.
20086 The fourth instruction performs the jump.
20088 If the address stored at .Laaa is that of a symbol which has the
20089 Thumb_Func bit set, then the linker will arrange for this address
20090 to have the bottom bit set, which in turn would mean that the
20091 address computation performed by the third instruction would end
20092 up with the bottom bit set. Since the ARM is capable of unaligned
20093 word loads, the instruction would then load the incorrect address
20094 out of the jump table, and chaos would ensue. */
20095 if (label_is_thumb_function_name
20096 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
20097 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
20099 /* When the address of a Thumb function is taken the bottom
20100 bit of that address should be set. This will allow
20101 interworking between Arm and Thumb functions to work
20104 THUMB_SET_FUNC (sym
, 1);
20106 label_is_thumb_function_name
= FALSE
;
20109 dwarf2_emit_label (sym
);
20113 arm_data_in_code (void)
20115 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
20117 *input_line_pointer
= '/';
20118 input_line_pointer
+= 5;
20119 *input_line_pointer
= 0;
20127 arm_canonicalize_symbol_name (char * name
)
20131 if (thumb_mode
&& (len
= strlen (name
)) > 5
20132 && streq (name
+ len
- 5, "/data"))
20133 *(name
+ len
- 5) = 0;
20138 /* Table of all register names defined by default. The user can
20139 define additional names with .req. Note that all register names
20140 should appear in both upper and lowercase variants. Some registers
20141 also have mixed-case names. */
20143 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
20144 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
20145 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
20146 #define REGSET(p,t) \
20147 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
20148 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
20149 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
20150 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
20151 #define REGSETH(p,t) \
20152 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
20153 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
20154 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
20155 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
20156 #define REGSET2(p,t) \
20157 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
20158 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
20159 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
20160 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
20161 #define SPLRBANK(base,bank,t) \
20162 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
20163 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
20164 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
20165 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
20166 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
20167 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
20169 static const struct reg_entry reg_names
[] =
20171 /* ARM integer registers. */
20172 REGSET(r
, RN
), REGSET(R
, RN
),
20174 /* ATPCS synonyms. */
20175 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
20176 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
20177 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
20179 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
20180 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
20181 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
20183 /* Well-known aliases. */
20184 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
20185 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
20187 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
20188 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
20190 /* Coprocessor numbers. */
20191 REGSET(p
, CP
), REGSET(P
, CP
),
20193 /* Coprocessor register numbers. The "cr" variants are for backward
20195 REGSET(c
, CN
), REGSET(C
, CN
),
20196 REGSET(cr
, CN
), REGSET(CR
, CN
),
20198 /* ARM banked registers. */
20199 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
20200 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
20201 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
20202 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
20203 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
20204 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
20205 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
20207 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
20208 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
20209 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
20210 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
20211 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
20212 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
20213 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
20214 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
20216 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
20217 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
20218 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
20219 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
20220 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
20221 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
20222 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
20223 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
20224 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
20226 /* FPA registers. */
20227 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
20228 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
20230 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
20231 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
20233 /* VFP SP registers. */
20234 REGSET(s
,VFS
), REGSET(S
,VFS
),
20235 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
20237 /* VFP DP Registers. */
20238 REGSET(d
,VFD
), REGSET(D
,VFD
),
20239 /* Extra Neon DP registers. */
20240 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
20242 /* Neon QP registers. */
20243 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
20245 /* VFP control registers. */
20246 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
20247 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
20248 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
20249 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
20250 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
20251 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
20252 REGDEF(mvfr2
,5,VFC
), REGDEF(MVFR2
,5,VFC
),
20254 /* Maverick DSP coprocessor registers. */
20255 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
20256 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
20258 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
20259 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
20260 REGDEF(dspsc
,0,DSPSC
),
20262 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
20263 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
20264 REGDEF(DSPSC
,0,DSPSC
),
20266 /* iWMMXt data registers - p0, c0-15. */
20267 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
20269 /* iWMMXt control registers - p1, c0-3. */
20270 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
20271 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
20272 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
20273 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
20275 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
20276 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
20277 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
20278 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
20279 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
20281 /* XScale accumulator registers. */
20282 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
20288 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
20289 within psr_required_here. */
20290 static const struct asm_psr psrs
[] =
20292 /* Backward compatibility notation. Note that "all" is no longer
20293 truly all possible PSR bits. */
20294 {"all", PSR_c
| PSR_f
},
20298 /* Individual flags. */
20304 /* Combinations of flags. */
20305 {"fs", PSR_f
| PSR_s
},
20306 {"fx", PSR_f
| PSR_x
},
20307 {"fc", PSR_f
| PSR_c
},
20308 {"sf", PSR_s
| PSR_f
},
20309 {"sx", PSR_s
| PSR_x
},
20310 {"sc", PSR_s
| PSR_c
},
20311 {"xf", PSR_x
| PSR_f
},
20312 {"xs", PSR_x
| PSR_s
},
20313 {"xc", PSR_x
| PSR_c
},
20314 {"cf", PSR_c
| PSR_f
},
20315 {"cs", PSR_c
| PSR_s
},
20316 {"cx", PSR_c
| PSR_x
},
20317 {"fsx", PSR_f
| PSR_s
| PSR_x
},
20318 {"fsc", PSR_f
| PSR_s
| PSR_c
},
20319 {"fxs", PSR_f
| PSR_x
| PSR_s
},
20320 {"fxc", PSR_f
| PSR_x
| PSR_c
},
20321 {"fcs", PSR_f
| PSR_c
| PSR_s
},
20322 {"fcx", PSR_f
| PSR_c
| PSR_x
},
20323 {"sfx", PSR_s
| PSR_f
| PSR_x
},
20324 {"sfc", PSR_s
| PSR_f
| PSR_c
},
20325 {"sxf", PSR_s
| PSR_x
| PSR_f
},
20326 {"sxc", PSR_s
| PSR_x
| PSR_c
},
20327 {"scf", PSR_s
| PSR_c
| PSR_f
},
20328 {"scx", PSR_s
| PSR_c
| PSR_x
},
20329 {"xfs", PSR_x
| PSR_f
| PSR_s
},
20330 {"xfc", PSR_x
| PSR_f
| PSR_c
},
20331 {"xsf", PSR_x
| PSR_s
| PSR_f
},
20332 {"xsc", PSR_x
| PSR_s
| PSR_c
},
20333 {"xcf", PSR_x
| PSR_c
| PSR_f
},
20334 {"xcs", PSR_x
| PSR_c
| PSR_s
},
20335 {"cfs", PSR_c
| PSR_f
| PSR_s
},
20336 {"cfx", PSR_c
| PSR_f
| PSR_x
},
20337 {"csf", PSR_c
| PSR_s
| PSR_f
},
20338 {"csx", PSR_c
| PSR_s
| PSR_x
},
20339 {"cxf", PSR_c
| PSR_x
| PSR_f
},
20340 {"cxs", PSR_c
| PSR_x
| PSR_s
},
20341 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
20342 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
20343 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
20344 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
20345 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
20346 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
20347 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
20348 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
20349 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
20350 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
20351 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
20352 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
20353 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
20354 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
20355 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
20356 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
20357 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
20358 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
20359 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
20360 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
20361 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
20362 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
20363 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
20364 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
20367 /* Table of V7M psr names. */
20368 static const struct asm_psr v7m_psrs
[] =
20370 {"apsr", 0x0 }, {"APSR", 0x0 },
20371 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
20372 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
20373 {"psr", 0x3 }, {"PSR", 0x3 },
20374 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
20375 {"ipsr", 0x5 }, {"IPSR", 0x5 },
20376 {"epsr", 0x6 }, {"EPSR", 0x6 },
20377 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
20378 {"msp", 0x8 }, {"MSP", 0x8 },
20379 {"psp", 0x9 }, {"PSP", 0x9 },
20380 {"msplim", 0xa }, {"MSPLIM", 0xa },
20381 {"psplim", 0xb }, {"PSPLIM", 0xb },
20382 {"primask", 0x10}, {"PRIMASK", 0x10},
20383 {"basepri", 0x11}, {"BASEPRI", 0x11},
20384 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
20385 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
20386 {"control", 0x14}, {"CONTROL", 0x14},
20387 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
20388 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
20389 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
20390 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
20391 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
20392 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
20393 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
20394 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
20395 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
20398 /* Table of all shift-in-operand names. */
20399 static const struct asm_shift_name shift_names
[] =
20401 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
20402 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
20403 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
20404 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
20405 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
20406 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
20409 /* Table of all explicit relocation names. */
20411 static struct reloc_entry reloc_names
[] =
20413 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
20414 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
20415 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
20416 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
20417 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
20418 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
20419 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
20420 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
20421 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
20422 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
20423 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
20424 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
20425 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
20426 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
20427 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
20428 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
20429 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
20430 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
},
20431 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC
},
20432 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC
},
20433 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
20434 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
20435 { "funcdesc", BFD_RELOC_ARM_FUNCDESC
},
20436 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC
},
20437 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC
}, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC
},
20438 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC
}, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC
},
20439 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC
}, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC
},
20443 /* Table of all conditional affixes. */
20444 static const struct asm_cond conds
[] =
20448 {"cs", 0x2}, {"hs", 0x2},
20449 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
20462 static const struct asm_cond vconds
[] =
20468 #define UL_BARRIER(L,U,CODE,FEAT) \
20469 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
20470 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
20472 static struct asm_barrier_opt barrier_opt_names
[] =
20474 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
20475 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
20476 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
20477 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
20478 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
20479 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
20480 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
20481 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
20482 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
20483 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
20484 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
20485 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
20486 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
20487 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
20488 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
20489 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
20494 /* Table of ARM-format instructions. */
20496 /* Macros for gluing together operand strings. N.B. In all cases
20497 other than OPS0, the trailing OP_stop comes from default
20498 zero-initialization of the unspecified elements of the array. */
20499 #define OPS0() { OP_stop, }
20500 #define OPS1(a) { OP_##a, }
20501 #define OPS2(a,b) { OP_##a,OP_##b, }
20502 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
20503 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
20504 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
20505 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
20507 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
20508 This is useful when mixing operands for ARM and THUMB, i.e. using the
20509 MIX_ARM_THUMB_OPERANDS macro.
20510 In order to use these macros, prefix the number of operands with _
20512 #define OPS_1(a) { a, }
20513 #define OPS_2(a,b) { a,b, }
20514 #define OPS_3(a,b,c) { a,b,c, }
20515 #define OPS_4(a,b,c,d) { a,b,c,d, }
20516 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
20517 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
20519 /* These macros abstract out the exact format of the mnemonic table and
20520 save some repeated characters. */
20522 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
20523 #define TxCE(mnem, op, top, nops, ops, ae, te) \
20524 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
20525 THUMB_VARIANT, do_##ae, do_##te, 0 }
20527 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
20528 a T_MNEM_xyz enumerator. */
20529 #define TCE(mnem, aop, top, nops, ops, ae, te) \
20530 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
20531 #define tCE(mnem, aop, top, nops, ops, ae, te) \
20532 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
20534 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
20535 infix after the third character. */
20536 #define TxC3(mnem, op, top, nops, ops, ae, te) \
20537 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
20538 THUMB_VARIANT, do_##ae, do_##te, 0 }
20539 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
20540 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
20541 THUMB_VARIANT, do_##ae, do_##te, 0 }
20542 #define TC3(mnem, aop, top, nops, ops, ae, te) \
20543 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
20544 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
20545 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
20546 #define tC3(mnem, aop, top, nops, ops, ae, te) \
20547 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
20548 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
20549 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
20551 /* Mnemonic that cannot be conditionalized. The ARM condition-code
20552 field is still 0xE. Many of the Thumb variants can be executed
20553 conditionally, so this is checked separately. */
20554 #define TUE(mnem, op, top, nops, ops, ae, te) \
20555 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
20556 THUMB_VARIANT, do_##ae, do_##te, 0 }
20558 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
20559 Used by mnemonics that have very minimal differences in the encoding for
20560 ARM and Thumb variants and can be handled in a common function. */
20561 #define TUEc(mnem, op, top, nops, ops, en) \
20562 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
20563 THUMB_VARIANT, do_##en, do_##en, 0 }
20565 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
20566 condition code field. */
20567 #define TUF(mnem, op, top, nops, ops, ae, te) \
20568 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
20569 THUMB_VARIANT, do_##ae, do_##te, 0 }
20571 /* ARM-only variants of all the above. */
20572 #define CE(mnem, op, nops, ops, ae) \
20573 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20575 #define C3(mnem, op, nops, ops, ae) \
20576 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20578 /* Thumb-only variants of TCE and TUE. */
20579 #define ToC(mnem, top, nops, ops, te) \
20580 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
20583 #define ToU(mnem, top, nops, ops, te) \
20584 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
20587 /* T_MNEM_xyz enumerator variants of ToC. */
20588 #define toC(mnem, top, nops, ops, te) \
20589 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
20592 /* T_MNEM_xyz enumerator variants of ToU. */
20593 #define toU(mnem, top, nops, ops, te) \
20594 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
20597 /* Legacy mnemonics that always have conditional infix after the third
20599 #define CL(mnem, op, nops, ops, ae) \
20600 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
20601 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20603 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
20604 #define cCE(mnem, op, nops, ops, ae) \
20605 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
20607 /* Legacy coprocessor instructions where conditional infix and conditional
20608 suffix are ambiguous. For consistency this includes all FPA instructions,
20609 not just the potentially ambiguous ones. */
20610 #define cCL(mnem, op, nops, ops, ae) \
20611 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
20612 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
20614 /* Coprocessor, takes either a suffix or a position-3 infix
20615 (for an FPA corner case). */
20616 #define C3E(mnem, op, nops, ops, ae) \
20617 { mnem, OPS##nops ops, OT_csuf_or_in3, \
20618 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
20620 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
20621 { m1 #m2 m3, OPS##nops ops, \
20622 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
20623 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20625 #define CM(m1, m2, op, nops, ops, ae) \
20626 xCM_ (m1, , m2, op, nops, ops, ae), \
20627 xCM_ (m1, eq, m2, op, nops, ops, ae), \
20628 xCM_ (m1, ne, m2, op, nops, ops, ae), \
20629 xCM_ (m1, cs, m2, op, nops, ops, ae), \
20630 xCM_ (m1, hs, m2, op, nops, ops, ae), \
20631 xCM_ (m1, cc, m2, op, nops, ops, ae), \
20632 xCM_ (m1, ul, m2, op, nops, ops, ae), \
20633 xCM_ (m1, lo, m2, op, nops, ops, ae), \
20634 xCM_ (m1, mi, m2, op, nops, ops, ae), \
20635 xCM_ (m1, pl, m2, op, nops, ops, ae), \
20636 xCM_ (m1, vs, m2, op, nops, ops, ae), \
20637 xCM_ (m1, vc, m2, op, nops, ops, ae), \
20638 xCM_ (m1, hi, m2, op, nops, ops, ae), \
20639 xCM_ (m1, ls, m2, op, nops, ops, ae), \
20640 xCM_ (m1, ge, m2, op, nops, ops, ae), \
20641 xCM_ (m1, lt, m2, op, nops, ops, ae), \
20642 xCM_ (m1, gt, m2, op, nops, ops, ae), \
20643 xCM_ (m1, le, m2, op, nops, ops, ae), \
20644 xCM_ (m1, al, m2, op, nops, ops, ae)
20646 #define UE(mnem, op, nops, ops, ae) \
20647 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20649 #define UF(mnem, op, nops, ops, ae) \
20650 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
20652 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
20653 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
20654 use the same encoding function for each. */
20655 #define NUF(mnem, op, nops, ops, enc) \
20656 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
20657 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
20659 /* Neon data processing, version which indirects through neon_enc_tab for
20660 the various overloaded versions of opcodes. */
20661 #define nUF(mnem, op, nops, ops, enc) \
20662 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
20663 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
20665 /* Neon insn with conditional suffix for the ARM version, non-overloaded
20667 #define NCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
20668 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
20669 THUMB_VARIANT, do_##enc, do_##enc, mve_p }
20671 #define NCE(mnem, op, nops, ops, enc) \
20672 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
20674 #define NCEF(mnem, op, nops, ops, enc) \
20675 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
20677 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
20678 #define nCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
20679 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
20680 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, mve_p }
20682 #define nCE(mnem, op, nops, ops, enc) \
20683 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
20685 #define nCEF(mnem, op, nops, ops, enc) \
20686 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
20689 #define mCEF(mnem, op, nops, ops, enc) \
20690 { #mnem, OPS##nops ops, OT_csuffixF, M_MNEM##op, M_MNEM##op, \
20691 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
20694 /* nCEF but for MVE predicated instructions. */
20695 #define mnCEF(mnem, op, nops, ops, enc) \
20696 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
20698 /* nCE but for MVE predicated instructions. */
20699 #define mnCE(mnem, op, nops, ops, enc) \
20700 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
20702 /* NUF but for potentially MVE predicated instructions. */
20703 #define MNUF(mnem, op, nops, ops, enc) \
20704 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
20705 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
20707 /* nUF but for potentially MVE predicated instructions. */
20708 #define mnUF(mnem, op, nops, ops, enc) \
20709 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
20710 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
20712 /* ToC but for potentially MVE predicated instructions. */
20713 #define mToC(mnem, top, nops, ops, te) \
20714 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
20717 /* NCE but for MVE predicated instructions. */
20718 #define MNCE(mnem, op, nops, ops, enc) \
20719 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
20721 /* NCEF but for MVE predicated instructions. */
20722 #define MNCEF(mnem, op, nops, ops, enc) \
20723 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
20726 static const struct asm_opcode insns
[] =
20728 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
20729 #define THUMB_VARIANT & arm_ext_v4t
20730 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20731 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20732 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20733 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20734 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
20735 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
20736 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
20737 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
20738 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20739 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20740 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20741 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20742 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20743 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20744 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20745 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20747 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
20748 for setting PSR flag bits. They are obsolete in V6 and do not
20749 have Thumb equivalents. */
20750 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20751 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20752 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
20753 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
20754 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
20755 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
20756 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20757 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20758 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
20760 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
20761 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
20762 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
20763 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
20765 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
20766 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
20767 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
20769 OP_ADDRGLDR
),ldst
, t_ldst
),
20770 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
20772 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20773 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20774 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20775 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20776 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20777 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20779 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
20780 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
20783 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
20784 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
20785 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
20786 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
20788 /* Thumb-compatibility pseudo ops. */
20789 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20790 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20791 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20792 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20793 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20794 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20795 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20796 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20797 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
20798 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
20799 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
20800 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
20802 /* These may simplify to neg. */
20803 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
20804 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
20806 #undef THUMB_VARIANT
20807 #define THUMB_VARIANT & arm_ext_os
20809 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
20810 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
20812 #undef THUMB_VARIANT
20813 #define THUMB_VARIANT & arm_ext_v6
20815 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
20817 /* V1 instructions with no Thumb analogue prior to V6T2. */
20818 #undef THUMB_VARIANT
20819 #define THUMB_VARIANT & arm_ext_v6t2
20821 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20822 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20823 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
20825 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
20826 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
20827 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
20828 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
20830 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20831 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20833 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20834 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20836 /* V1 instructions with no Thumb analogue at all. */
20837 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
20838 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
20840 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
20841 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
20842 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
20843 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
20844 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
20845 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
20846 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
20847 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
20850 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
20851 #undef THUMB_VARIANT
20852 #define THUMB_VARIANT & arm_ext_v4t
20854 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
20855 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
20857 #undef THUMB_VARIANT
20858 #define THUMB_VARIANT & arm_ext_v6t2
20860 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
20861 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
20863 /* Generic coprocessor instructions. */
20864 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
20865 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20866 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20867 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20868 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20869 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20870 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20873 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
20875 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
20876 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
20879 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
20880 #undef THUMB_VARIANT
20881 #define THUMB_VARIANT & arm_ext_msr
20883 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
20884 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
20887 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
20888 #undef THUMB_VARIANT
20889 #define THUMB_VARIANT & arm_ext_v6t2
20891 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20892 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20893 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20894 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20895 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20896 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20897 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20898 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20901 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
20902 #undef THUMB_VARIANT
20903 #define THUMB_VARIANT & arm_ext_v4t
20905 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20906 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20907 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20908 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20909 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20910 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20913 #define ARM_VARIANT & arm_ext_v4t_5
20915 /* ARM Architecture 4T. */
20916 /* Note: bx (and blx) are required on V5, even if the processor does
20917 not support Thumb. */
20918 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
20921 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
20922 #undef THUMB_VARIANT
20923 #define THUMB_VARIANT & arm_ext_v5t
20925 /* Note: blx has 2 variants; the .value coded here is for
20926 BLX(2). Only this variant has conditional execution. */
20927 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
20928 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
20930 #undef THUMB_VARIANT
20931 #define THUMB_VARIANT & arm_ext_v6t2
20933 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
20934 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20935 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20936 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20937 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20938 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
20939 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20940 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20943 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
20944 #undef THUMB_VARIANT
20945 #define THUMB_VARIANT & arm_ext_v5exp
20947 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20948 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20949 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20950 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20952 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20953 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20955 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20956 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20957 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20958 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20960 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20961 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20962 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20963 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20965 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20966 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20968 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20969 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20970 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20971 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20974 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
20975 #undef THUMB_VARIANT
20976 #define THUMB_VARIANT & arm_ext_v6t2
20978 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
20979 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
20981 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
20982 ADDRGLDRS
), ldrd
, t_ldstd
),
20984 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20985 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20988 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
20990 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
20993 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
20994 #undef THUMB_VARIANT
20995 #define THUMB_VARIANT & arm_ext_v6
20997 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
20998 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
20999 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
21000 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
21001 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
21002 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21003 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21004 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21005 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21006 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
21008 #undef THUMB_VARIANT
21009 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21011 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
21012 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
21014 #undef THUMB_VARIANT
21015 #define THUMB_VARIANT & arm_ext_v6t2
21017 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21018 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21020 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
21021 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
21023 /* ARM V6 not included in V7M. */
21024 #undef THUMB_VARIANT
21025 #define THUMB_VARIANT & arm_ext_v6_notm
21026 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
21027 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
21028 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
21029 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
21030 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
21031 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
21032 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
21033 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
21034 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
21035 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
21036 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
21037 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
21038 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
21039 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
21040 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
21041 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
21042 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
21043 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
21044 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
21046 /* ARM V6 not included in V7M (eg. integer SIMD). */
21047 #undef THUMB_VARIANT
21048 #define THUMB_VARIANT & arm_ext_v6_dsp
21049 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
21050 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
21051 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21052 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21053 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21054 /* Old name for QASX. */
21055 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21056 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21057 /* Old name for QSAX. */
21058 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21059 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21060 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21061 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21062 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21063 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21064 /* Old name for SASX. */
21065 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21066 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21067 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21068 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21069 /* Old name for SHASX. */
21070 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21071 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21072 /* Old name for SHSAX. */
21073 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21074 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21075 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21076 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21077 /* Old name for SSAX. */
21078 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21079 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21080 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21081 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21082 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21083 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21084 /* Old name for UASX. */
21085 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21086 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21087 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21088 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21089 /* Old name for UHASX. */
21090 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21091 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21092 /* Old name for UHSAX. */
21093 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21094 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21095 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21096 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21097 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21098 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21099 /* Old name for UQASX. */
21100 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21101 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21102 /* Old name for UQSAX. */
21103 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21104 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21105 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21106 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21107 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21108 /* Old name for USAX. */
21109 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21110 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21111 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21112 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21113 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21114 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21115 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21116 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21117 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21118 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21119 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21120 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21121 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21122 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21123 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21124 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21125 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21126 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21127 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21128 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21129 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21130 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21131 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21132 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21133 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21134 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21135 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21136 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21137 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21138 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
21139 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
21140 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21141 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21142 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
21145 #define ARM_VARIANT & arm_ext_v6k_v6t2
21146 #undef THUMB_VARIANT
21147 #define THUMB_VARIANT & arm_ext_v6k_v6t2
21149 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
21150 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
21151 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
21152 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
21154 #undef THUMB_VARIANT
21155 #define THUMB_VARIANT & arm_ext_v6_notm
21156 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
21158 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
21159 RRnpcb
), strexd
, t_strexd
),
21161 #undef THUMB_VARIANT
21162 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21163 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
21165 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
21167 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
21169 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
21171 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
21174 #define ARM_VARIANT & arm_ext_sec
21175 #undef THUMB_VARIANT
21176 #define THUMB_VARIANT & arm_ext_sec
21178 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
21181 #define ARM_VARIANT & arm_ext_virt
21182 #undef THUMB_VARIANT
21183 #define THUMB_VARIANT & arm_ext_virt
21185 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
21186 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
21189 #define ARM_VARIANT & arm_ext_pan
21190 #undef THUMB_VARIANT
21191 #define THUMB_VARIANT & arm_ext_pan
21193 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
21196 #define ARM_VARIANT & arm_ext_v6t2
21197 #undef THUMB_VARIANT
21198 #define THUMB_VARIANT & arm_ext_v6t2
21200 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
21201 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
21202 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
21203 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
21205 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
21206 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
21208 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21209 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21210 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21211 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21214 #define ARM_VARIANT & arm_ext_v3
21215 #undef THUMB_VARIANT
21216 #define THUMB_VARIANT & arm_ext_v6t2
21218 TUE("csdb", 320f014
, f3af8014
, 0, (), noargs
, t_csdb
),
21219 TUF("ssbb", 57ff040
, f3bf8f40
, 0, (), noargs
, t_csdb
),
21220 TUF("pssbb", 57ff044
, f3bf8f44
, 0, (), noargs
, t_csdb
),
21223 #define ARM_VARIANT & arm_ext_v6t2
21224 #undef THUMB_VARIANT
21225 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21226 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
21227 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
21229 /* Thumb-only instructions. */
21231 #define ARM_VARIANT NULL
21232 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
21233 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
21235 /* ARM does not really have an IT instruction, so always allow it.
21236 The opcode is copied from Thumb in order to allow warnings in
21237 -mimplicit-it=[never | arm] modes. */
21239 #define ARM_VARIANT & arm_ext_v1
21240 #undef THUMB_VARIANT
21241 #define THUMB_VARIANT & arm_ext_v6t2
21243 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
21244 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
21245 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
21246 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
21247 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
21248 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
21249 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
21250 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
21251 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
21252 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
21253 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
21254 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
21255 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
21256 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
21257 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
21258 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
21259 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
21260 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
21262 /* Thumb2 only instructions. */
21264 #define ARM_VARIANT NULL
21266 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
21267 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
21268 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
21269 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
21270 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
21271 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
21273 /* Hardware division instructions. */
21275 #define ARM_VARIANT & arm_ext_adiv
21276 #undef THUMB_VARIANT
21277 #define THUMB_VARIANT & arm_ext_div
21279 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
21280 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
21282 /* ARM V6M/V7 instructions. */
21284 #define ARM_VARIANT & arm_ext_barrier
21285 #undef THUMB_VARIANT
21286 #define THUMB_VARIANT & arm_ext_barrier
21288 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
21289 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
21290 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
21292 /* ARM V7 instructions. */
21294 #define ARM_VARIANT & arm_ext_v7
21295 #undef THUMB_VARIANT
21296 #define THUMB_VARIANT & arm_ext_v7
21298 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
21299 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
21302 #define ARM_VARIANT & arm_ext_mp
21303 #undef THUMB_VARIANT
21304 #define THUMB_VARIANT & arm_ext_mp
21306 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
21308 /* AArchv8 instructions. */
21310 #define ARM_VARIANT & arm_ext_v8
21312 /* Instructions shared between armv8-a and armv8-m. */
21313 #undef THUMB_VARIANT
21314 #define THUMB_VARIANT & arm_ext_atomics
21316 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21317 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21318 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21319 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
21320 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
21321 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
21322 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21323 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
21324 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21325 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
21327 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
21329 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
21331 #undef THUMB_VARIANT
21332 #define THUMB_VARIANT & arm_ext_v8
21334 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
21335 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
21337 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
21340 /* Defined in V8 but is in undefined encoding space for earlier
21341 architectures. However earlier architectures are required to treat
21342 this instuction as a semihosting trap as well. Hence while not explicitly
21343 defined as such, it is in fact correct to define the instruction for all
21345 #undef THUMB_VARIANT
21346 #define THUMB_VARIANT & arm_ext_v1
21348 #define ARM_VARIANT & arm_ext_v1
21349 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
21351 /* ARMv8 T32 only. */
21353 #define ARM_VARIANT NULL
21354 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
21355 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
21356 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
21358 /* FP for ARMv8. */
21360 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
21361 #undef THUMB_VARIANT
21362 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
21364 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21365 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21366 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21367 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21368 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
21369 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
21370 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
21371 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
21372 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
21373 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
21374 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
21375 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
21376 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
21377 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
21378 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
21379 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
21380 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
21382 /* Crypto v1 extensions. */
21384 #define ARM_VARIANT & fpu_crypto_ext_armv8
21385 #undef THUMB_VARIANT
21386 #define THUMB_VARIANT & fpu_crypto_ext_armv8
21388 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
21389 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
21390 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
21391 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
21392 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
21393 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
21394 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
21395 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
21396 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
21397 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
21398 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
21399 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
21400 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
21401 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
21404 #define ARM_VARIANT & crc_ext_armv8
21405 #undef THUMB_VARIANT
21406 #define THUMB_VARIANT & crc_ext_armv8
21407 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
21408 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
21409 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
21410 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
21411 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
21412 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
21414 /* ARMv8.2 RAS extension. */
21416 #define ARM_VARIANT & arm_ext_ras
21417 #undef THUMB_VARIANT
21418 #define THUMB_VARIANT & arm_ext_ras
21419 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
21422 #define ARM_VARIANT & arm_ext_v8_3
21423 #undef THUMB_VARIANT
21424 #define THUMB_VARIANT & arm_ext_v8_3
21425 NCE (vjcvt
, eb90bc0
, 2, (RVS
, RVD
), vjcvt
),
21426 NUF (vcmla
, 0, 4, (RNDQ
, RNDQ
, RNDQ_RNSC
, EXPi
), vcmla
),
21427 NUF (vcadd
, 0, 4, (RNDQ
, RNDQ
, RNDQ
, EXPi
), vcadd
),
21430 #define ARM_VARIANT & fpu_neon_ext_dotprod
21431 #undef THUMB_VARIANT
21432 #define THUMB_VARIANT & fpu_neon_ext_dotprod
21433 NUF (vsdot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_s
),
21434 NUF (vudot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_u
),
21437 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
21438 #undef THUMB_VARIANT
21439 #define THUMB_VARIANT NULL
21441 cCE("wfs", e200110
, 1, (RR
), rd
),
21442 cCE("rfs", e300110
, 1, (RR
), rd
),
21443 cCE("wfc", e400110
, 1, (RR
), rd
),
21444 cCE("rfc", e500110
, 1, (RR
), rd
),
21446 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21447 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21448 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21449 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21451 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21452 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21453 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21454 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21456 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
21457 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
21458 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
21459 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
21460 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
21461 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
21462 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
21463 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
21464 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
21465 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
21466 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
21467 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
21469 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
21470 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
21471 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
21472 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
21473 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
21474 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
21475 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
21476 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
21477 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
21478 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
21479 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
21480 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
21482 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
21483 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
21484 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
21485 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
21486 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
21487 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
21488 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
21489 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
21490 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
21491 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
21492 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
21493 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
21495 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
21496 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
21497 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
21498 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
21499 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
21500 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
21501 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
21502 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
21503 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
21504 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
21505 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
21506 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
21508 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
21509 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
21510 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
21511 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
21512 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
21513 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
21514 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
21515 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
21516 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
21517 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
21518 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
21519 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
21521 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
21522 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
21523 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
21524 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
21525 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
21526 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
21527 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
21528 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
21529 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
21530 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
21531 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
21532 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
21534 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
21535 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
21536 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
21537 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
21538 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
21539 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
21540 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
21541 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
21542 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
21543 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
21544 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
21545 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
21547 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
21548 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
21549 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
21550 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
21551 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
21552 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
21553 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
21554 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
21555 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
21556 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
21557 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
21558 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
21560 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
21561 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
21562 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
21563 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
21564 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
21565 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
21566 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
21567 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
21568 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
21569 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
21570 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
21571 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
21573 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
21574 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
21575 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
21576 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
21577 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
21578 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
21579 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
21580 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
21581 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
21582 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
21583 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
21584 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
21586 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
21587 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
21588 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
21589 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
21590 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
21591 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
21592 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
21593 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
21594 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
21595 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
21596 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
21597 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
21599 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
21600 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
21601 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
21602 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
21603 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
21604 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
21605 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
21606 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
21607 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
21608 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
21609 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
21610 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
21612 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
21613 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
21614 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
21615 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
21616 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
21617 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
21618 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
21619 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
21620 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
21621 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
21622 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
21623 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
21625 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
21626 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
21627 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
21628 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
21629 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
21630 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
21631 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
21632 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
21633 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
21634 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
21635 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
21636 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
21638 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
21639 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
21640 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
21641 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
21642 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
21643 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
21644 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
21645 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
21646 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
21647 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
21648 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
21649 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
21651 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
21652 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
21653 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
21654 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
21655 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
21656 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
21657 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
21658 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
21659 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
21660 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
21661 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
21662 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
21664 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21665 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21666 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21667 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21668 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21669 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21670 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21671 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21672 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21673 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21674 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21675 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21677 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21678 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21679 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21680 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21681 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21682 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21683 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21684 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21685 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21686 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21687 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21688 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21690 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21691 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21692 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21693 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21694 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21695 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21696 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21697 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21698 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21699 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21700 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21701 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21703 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21704 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21705 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21706 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21707 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21708 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21709 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21710 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21711 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21712 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21713 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21714 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21716 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21717 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21718 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21719 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21720 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21721 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21722 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21723 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21724 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21725 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21726 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21727 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21729 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21730 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21731 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21732 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21733 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21734 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21735 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21736 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21737 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21738 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21739 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21740 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21742 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21743 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21744 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21745 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21746 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21747 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21748 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21749 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21750 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21751 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21752 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21753 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21755 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21756 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21757 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21758 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21759 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21760 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21761 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21762 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21763 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21764 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21765 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21766 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21768 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21769 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21770 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21771 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21772 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21773 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21774 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21775 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21776 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21777 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21778 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21779 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21781 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21782 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21783 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21784 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21785 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21786 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21787 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21788 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21789 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21790 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21791 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21792 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21794 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21795 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21796 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21797 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21798 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21799 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21800 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21801 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21802 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21803 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21804 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21805 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21807 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21808 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21809 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21810 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21811 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21812 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21813 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21814 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21815 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21816 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21817 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21818 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21820 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21821 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21822 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21823 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21824 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21825 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21826 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21827 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21828 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21829 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21830 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21831 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21833 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21834 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21835 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21836 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21838 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
21839 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
21840 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
21841 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
21842 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
21843 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
21844 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
21845 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
21846 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
21847 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
21848 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
21849 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
21851 /* The implementation of the FIX instruction is broken on some
21852 assemblers, in that it accepts a precision specifier as well as a
21853 rounding specifier, despite the fact that this is meaningless.
21854 To be more compatible, we accept it as well, though of course it
21855 does not set any bits. */
21856 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
21857 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
21858 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
21859 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
21860 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
21861 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
21862 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
21863 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
21864 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
21865 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
21866 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
21867 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
21868 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
21870 /* Instructions that were new with the real FPA, call them V2. */
21872 #define ARM_VARIANT & fpu_fpa_ext_v2
21874 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21875 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21876 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21877 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21878 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21879 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21882 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
21884 /* Moves and type conversions. */
21885 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21886 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
21887 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
21888 cCE("fmstat", ef1fa10
, 0, (), noargs
),
21889 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
21890 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
21891 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21892 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21893 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21894 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21895 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21896 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21897 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
21898 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
21900 /* Memory operations. */
21901 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
21902 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
21903 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21904 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21905 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21906 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21907 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21908 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21909 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21910 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21911 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21912 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21913 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21914 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21915 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21916 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21917 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21918 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21920 /* Monadic operations. */
21921 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21922 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21923 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21925 /* Dyadic operations. */
21926 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21927 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21928 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21929 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21930 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21931 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21932 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21933 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21934 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21937 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21938 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
21939 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21940 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
21942 /* Double precision load/store are still present on single precision
21943 implementations. */
21944 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
21945 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
21946 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21947 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21948 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21949 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21950 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21951 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21952 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21953 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21956 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
21958 /* Moves and type conversions. */
21959 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21960 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
21961 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21962 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
21963 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
21964 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
21965 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
21966 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
21967 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
21968 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21969 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21970 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21971 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21973 /* Monadic operations. */
21974 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21975 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21976 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21978 /* Dyadic operations. */
21979 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21980 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21981 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21982 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21983 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21984 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21985 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21986 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21987 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21990 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21991 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
21992 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21993 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
21996 #define ARM_VARIANT & fpu_vfp_ext_v2
21998 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
21999 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
22000 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
22001 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
22003 /* Instructions which may belong to either the Neon or VFP instruction sets.
22004 Individual encoder functions perform additional architecture checks. */
22006 #define ARM_VARIANT & fpu_vfp_ext_v1xd
22007 #undef THUMB_VARIANT
22008 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
22010 /* These mnemonics are unique to VFP. */
22011 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
22012 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
22013 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22014 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22015 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22016 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
22017 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
22018 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
22019 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
22020 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
22022 /* Mnemonics shared by Neon and VFP. */
22023 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
22024 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
22025 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
22027 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22028 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22029 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22030 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22031 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22032 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22034 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
22035 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
22036 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
22037 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
22040 /* NOTE: All VMOV encoding is special-cased! */
22041 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
22042 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
22044 #undef THUMB_VARIANT
22045 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
22046 by different feature bits. Since we are setting the Thumb guard, we can
22047 require Thumb-1 which makes it a nop guard and set the right feature bit in
22048 do_vldr_vstr (). */
22049 #define THUMB_VARIANT & arm_ext_v4t
22050 NCE(vldr
, d100b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
22051 NCE(vstr
, d000b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
22054 #define ARM_VARIANT & arm_ext_fp16
22055 #undef THUMB_VARIANT
22056 #define THUMB_VARIANT & arm_ext_fp16
22057 /* New instructions added from v8.2, allowing the extraction and insertion of
22058 the upper 16 bits of a 32-bit vector register. */
22059 NCE (vmovx
, eb00a40
, 2, (RVS
, RVS
), neon_movhf
),
22060 NCE (vins
, eb00ac0
, 2, (RVS
, RVS
), neon_movhf
),
22062 /* New backported fma/fms instructions optional in v8.2. */
22063 NCE (vfmal
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmal
),
22064 NCE (vfmsl
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmsl
),
22066 #undef THUMB_VARIANT
22067 #define THUMB_VARIANT & fpu_neon_ext_v1
22069 #define ARM_VARIANT & fpu_neon_ext_v1
22071 /* Data processing with three registers of the same length. */
22072 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
22073 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
22074 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
22075 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
22076 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
22077 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
22078 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
22079 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
22080 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
22081 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
22082 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
22083 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
22084 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
22085 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
22086 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
22087 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
22088 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
22089 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
22090 /* If not immediate, fall back to neon_dyadic_i64_su.
22091 shl_imm should accept I8 I16 I32 I64,
22092 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
22093 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
22094 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
22095 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
22096 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
22097 /* Logic ops, types optional & ignored. */
22098 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22099 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22100 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22101 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22102 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22103 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22104 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22105 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22106 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
22107 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
22108 /* Bitfield ops, untyped. */
22109 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
22110 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
22111 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
22112 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
22113 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
22114 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
22115 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
22116 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
22117 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
22118 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
22119 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
22120 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
22121 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
22122 back to neon_dyadic_if_su. */
22123 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
22124 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
22125 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
22126 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
22127 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
22128 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
22129 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
22130 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
22131 /* Comparison. Type I8 I16 I32 F32. */
22132 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
22133 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
22134 /* As above, D registers only. */
22135 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
22136 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
22137 /* Int and float variants, signedness unimportant. */
22138 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
22139 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
22140 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
22141 /* Add/sub take types I8 I16 I32 I64 F32. */
22142 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
22143 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
22144 /* vtst takes sizes 8, 16, 32. */
22145 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
22146 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
22147 /* VMUL takes I8 I16 I32 F32 P8. */
22148 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
22149 /* VQD{R}MULH takes S16 S32. */
22150 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
22151 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
22152 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
22153 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
22154 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
22155 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
22156 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
22157 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
22158 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
22159 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
22160 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
22161 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
22162 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
22163 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
22164 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
22165 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
22166 /* ARM v8.1 extension. */
22167 nUF (vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
22168 nUF (vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
22169 nUF (vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
22170 nUF (vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
22172 /* Two address, int/float. Types S8 S16 S32 F32. */
22173 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
22174 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
22176 /* Data processing with two registers and a shift amount. */
22177 /* Right shifts, and variants with rounding.
22178 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
22179 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
22180 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
22181 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
22182 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
22183 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
22184 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
22185 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
22186 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
22187 /* Shift and insert. Sizes accepted 8 16 32 64. */
22188 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
22189 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
22190 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
22191 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
22192 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
22193 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
22194 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
22195 /* Right shift immediate, saturating & narrowing, with rounding variants.
22196 Types accepted S16 S32 S64 U16 U32 U64. */
22197 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
22198 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
22199 /* As above, unsigned. Types accepted S16 S32 S64. */
22200 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
22201 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
22202 /* Right shift narrowing. Types accepted I16 I32 I64. */
22203 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
22204 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
22205 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
22206 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
22207 /* CVT with optional immediate for fixed-point variant. */
22208 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
22210 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
22211 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
22213 /* Data processing, three registers of different lengths. */
22214 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
22215 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
22216 /* If not scalar, fall back to neon_dyadic_long.
22217 Vector types as above, scalar types S16 S32 U16 U32. */
22218 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
22219 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
22220 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
22221 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
22222 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
22223 /* Dyadic, narrowing insns. Types I16 I32 I64. */
22224 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22225 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22226 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22227 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22228 /* Saturating doubling multiplies. Types S16 S32. */
22229 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
22230 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
22231 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
22232 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
22233 S16 S32 U16 U32. */
22234 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
22236 /* Extract. Size 8. */
22237 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
22238 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
22240 /* Two registers, miscellaneous. */
22241 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
22242 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
22243 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
22244 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
22245 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
22246 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
22247 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
22248 /* Vector replicate. Sizes 8 16 32. */
22249 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
22250 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
22251 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
22252 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
22253 /* VMOVN. Types I16 I32 I64. */
22254 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
22255 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
22256 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
22257 /* VQMOVUN. Types S16 S32 S64. */
22258 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
22259 /* VZIP / VUZP. Sizes 8 16 32. */
22260 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
22261 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
22262 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
22263 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
22264 /* VQABS / VQNEG. Types S8 S16 S32. */
22265 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
22266 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
22267 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
22268 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
22269 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
22270 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
22271 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
22272 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
22273 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
22274 /* Reciprocal estimates. Types U32 F16 F32. */
22275 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
22276 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
22277 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
22278 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
22279 /* VCLS. Types S8 S16 S32. */
22280 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
22281 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
22282 /* VCLZ. Types I8 I16 I32. */
22283 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
22284 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
22285 /* VCNT. Size 8. */
22286 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
22287 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
22288 /* Two address, untyped. */
22289 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
22290 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
22291 /* VTRN. Sizes 8 16 32. */
22292 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
22293 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
22295 /* Table lookup. Size 8. */
22296 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
22297 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
22299 #undef THUMB_VARIANT
22300 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
22302 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
22304 /* Neon element/structure load/store. */
22305 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22306 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22307 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22308 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22309 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22310 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22311 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22312 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22314 #undef THUMB_VARIANT
22315 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
22317 #define ARM_VARIANT & fpu_vfp_ext_v3xd
22318 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
22319 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22320 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22321 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22322 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22323 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22324 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22325 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22326 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22328 #undef THUMB_VARIANT
22329 #define THUMB_VARIANT & fpu_vfp_ext_v3
22331 #define ARM_VARIANT & fpu_vfp_ext_v3
22333 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
22334 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22335 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22336 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22337 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22338 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22339 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22340 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22341 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22344 #define ARM_VARIANT & fpu_vfp_ext_fma
22345 #undef THUMB_VARIANT
22346 #define THUMB_VARIANT & fpu_vfp_ext_fma
22347 /* Mnemonics shared by Neon and VFP. These are included in the
22348 VFP FMA variant; NEON and VFP FMA always includes the NEON
22349 FMA instructions. */
22350 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
22351 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
22352 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
22353 the v form should always be used. */
22354 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22355 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22356 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22357 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22358 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22359 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22361 #undef THUMB_VARIANT
22363 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
22365 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22366 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22367 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22368 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22369 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22370 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22371 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
22372 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
22375 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
22377 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
22378 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
22379 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
22380 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
22381 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
22382 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
22383 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
22384 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
22385 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
22386 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22387 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22388 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22389 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22390 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22391 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22392 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
22393 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
22394 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
22395 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
22396 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
22397 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22398 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22399 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22400 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22401 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22402 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22403 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
22404 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
22405 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
22406 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
22407 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
22408 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
22409 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
22410 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
22411 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22412 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22413 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22414 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22415 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22416 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22417 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22418 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22419 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22420 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22421 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22422 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22423 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
22424 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22425 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22426 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22427 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22428 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22429 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22430 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22431 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22432 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22433 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22434 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22435 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22436 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22437 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22438 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22439 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22440 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22441 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22442 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22443 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
22444 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
22445 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
22446 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
22447 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22448 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22449 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22450 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22451 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22452 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22453 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22454 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22455 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22456 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22457 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22458 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22459 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22460 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22461 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22462 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22463 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22464 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22465 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
22466 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22467 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22468 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22469 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22470 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22471 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22472 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22473 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22474 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22475 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22476 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22477 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22478 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22479 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22480 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22481 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22482 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22483 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22484 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22485 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22486 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22487 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
22488 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22489 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22490 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22491 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22492 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22493 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22494 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22495 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22496 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22497 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22498 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22499 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22500 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22501 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22502 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22503 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22504 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22505 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22506 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
22507 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
22508 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
22509 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
22510 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22511 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22512 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22513 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22514 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22515 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22516 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22517 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22518 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22519 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22520 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22521 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22522 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22523 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22524 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
22525 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22526 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22527 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22528 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22529 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22530 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22531 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22532 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22533 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
22534 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22535 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22536 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22537 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22538 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
22541 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
22543 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
22544 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
22545 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
22546 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22547 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22548 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22549 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22550 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22551 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22552 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22553 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22554 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22555 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22556 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22557 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22558 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22559 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22560 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22561 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22562 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22563 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
22564 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22565 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22566 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22567 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22568 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22569 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22570 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22571 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22572 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22573 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22574 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22575 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22576 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22577 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22578 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22579 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22580 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22581 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22582 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22583 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22584 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22585 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22586 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22587 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22588 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22589 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22590 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22591 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22592 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22593 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22594 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22595 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22596 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22597 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22598 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22599 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22602 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
22604 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
22605 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
22606 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
22607 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
22608 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
22609 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
22610 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
22611 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
22612 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
22613 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
22614 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
22615 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
22616 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
22617 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
22618 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
22619 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
22620 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
22621 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
22622 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
22623 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
22624 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
22625 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
22626 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
22627 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
22628 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
22629 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
22630 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
22631 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
22632 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
22633 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
22634 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
22635 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
22636 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
22637 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
22638 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
22639 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
22640 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
22641 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
22642 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
22643 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
22644 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
22645 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
22646 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
22647 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
22648 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
22649 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
22650 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
22651 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
22652 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
22653 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
22654 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
22655 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
22656 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
22657 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
22658 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22659 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22660 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22661 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22662 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22663 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22664 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
22665 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
22666 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
22667 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
22668 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22669 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22670 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22671 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22672 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22673 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22674 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22675 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22676 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
22677 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
22678 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
22679 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
22681 /* ARMv8.5-A instructions. */
22683 #define ARM_VARIANT & arm_ext_sb
22684 #undef THUMB_VARIANT
22685 #define THUMB_VARIANT & arm_ext_sb
22686 TUF("sb", 57ff070
, f3bf8f70
, 0, (), noargs
, noargs
),
22689 #define ARM_VARIANT & arm_ext_predres
22690 #undef THUMB_VARIANT
22691 #define THUMB_VARIANT & arm_ext_predres
22692 CE("cfprctx", e070f93
, 1, (RRnpc
), rd
),
22693 CE("dvprctx", e070fb3
, 1, (RRnpc
), rd
),
22694 CE("cpprctx", e070ff3
, 1, (RRnpc
), rd
),
22696 /* ARMv8-M instructions. */
22698 #define ARM_VARIANT NULL
22699 #undef THUMB_VARIANT
22700 #define THUMB_VARIANT & arm_ext_v8m
22701 ToU("sg", e97fe97f
, 0, (), noargs
),
22702 ToC("blxns", 4784, 1, (RRnpc
), t_blx
),
22703 ToC("bxns", 4704, 1, (RRnpc
), t_bx
),
22704 ToC("tt", e840f000
, 2, (RRnpc
, RRnpc
), tt
),
22705 ToC("ttt", e840f040
, 2, (RRnpc
, RRnpc
), tt
),
22706 ToC("tta", e840f080
, 2, (RRnpc
, RRnpc
), tt
),
22707 ToC("ttat", e840f0c0
, 2, (RRnpc
, RRnpc
), tt
),
22709 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
22710 instructions behave as nop if no VFP is present. */
22711 #undef THUMB_VARIANT
22712 #define THUMB_VARIANT & arm_ext_v8m_main
22713 ToC("vlldm", ec300a00
, 1, (RRnpc
), rn
),
22714 ToC("vlstm", ec200a00
, 1, (RRnpc
), rn
),
22716 /* Armv8.1-M Mainline instructions. */
22717 #undef THUMB_VARIANT
22718 #define THUMB_VARIANT & arm_ext_v8_1m_main
22719 toC("bf", _bf
, 2, (EXPs
, EXPs
), t_branch_future
),
22720 toU("bfcsel", _bfcsel
, 4, (EXPs
, EXPs
, EXPs
, COND
), t_branch_future
),
22721 toC("bfx", _bfx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
22722 toC("bfl", _bfl
, 2, (EXPs
, EXPs
), t_branch_future
),
22723 toC("bflx", _bflx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
22725 toU("dls", _dls
, 2, (LR
, RRnpcsp
), t_loloop
),
22726 toU("wls", _wls
, 3, (LR
, RRnpcsp
, EXP
), t_loloop
),
22727 toU("le", _le
, 2, (oLR
, EXP
), t_loloop
),
22729 ToC("clrm", e89f0000
, 1, (CLRMLST
), t_clrm
),
22730 ToC("vscclrm", ec9f0a00
, 1, (VRSDVLST
), t_vscclrm
),
22732 #undef THUMB_VARIANT
22733 #define THUMB_VARIANT & mve_ext
22734 ToC("vpst", fe710f4d
, 0, (), mve_vpt
),
22735 ToC("vpstt", fe318f4d
, 0, (), mve_vpt
),
22736 ToC("vpste", fe718f4d
, 0, (), mve_vpt
),
22737 ToC("vpsttt", fe314f4d
, 0, (), mve_vpt
),
22738 ToC("vpstte", fe31cf4d
, 0, (), mve_vpt
),
22739 ToC("vpstet", fe71cf4d
, 0, (), mve_vpt
),
22740 ToC("vpstee", fe714f4d
, 0, (), mve_vpt
),
22741 ToC("vpstttt", fe312f4d
, 0, (), mve_vpt
),
22742 ToC("vpsttte", fe316f4d
, 0, (), mve_vpt
),
22743 ToC("vpsttet", fe31ef4d
, 0, (), mve_vpt
),
22744 ToC("vpsttee", fe31af4d
, 0, (), mve_vpt
),
22745 ToC("vpstett", fe71af4d
, 0, (), mve_vpt
),
22746 ToC("vpstete", fe71ef4d
, 0, (), mve_vpt
),
22747 ToC("vpsteet", fe716f4d
, 0, (), mve_vpt
),
22748 ToC("vpsteee", fe712f4d
, 0, (), mve_vpt
),
22750 /* MVE and MVE FP only. */
22751 mCEF(vabav
, _vabav
, 3, (RRnpcsp
, RMQ
, RMQ
), mve_vabav
),
22752 mCEF(vmladav
, _vmladav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22753 mCEF(vmladava
, _vmladava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22754 mCEF(vmladavx
, _vmladavx
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22755 mCEF(vmladavax
, _vmladavax
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22756 mCEF(vmlav
, _vmladav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22757 mCEF(vmlava
, _vmladava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22758 mCEF(vmlsdav
, _vmlsdav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22759 mCEF(vmlsdava
, _vmlsdava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22760 mCEF(vmlsdavx
, _vmlsdavx
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22761 mCEF(vmlsdavax
, _vmlsdavax
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
22764 #define ARM_VARIANT & fpu_vfp_ext_v1xd
22765 #undef THUMB_VARIANT
22766 #define THUMB_VARIANT & arm_ext_v6t2
22768 mnCEF(vadd
, _vadd
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQMQR
), neon_addsub_if_i
),
22769 mnCEF(vsub
, _vsub
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQMQR
), neon_addsub_if_i
),
22771 MNCEF(vabs
, 1b10300
, 2, (RNSDQMQ
, RNSDQMQ
), neon_abs_neg
),
22772 MNCEF(vneg
, 1b10380
, 2, (RNSDQMQ
, RNSDQMQ
), neon_abs_neg
),
22775 #define ARM_VARIANT & fpu_neon_ext_v1
22776 mnUF(vabd
, _vabd
, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQ
), neon_dyadic_if_su
),
22777 mnUF(vabdl
, _vabdl
, 3, (RNQMQ
, RNDMQ
, RNDMQ
), neon_dyadic_long
),
22778 mnUF(vaddl
, _vaddl
, 3, (RNQMQ
, RNDMQ
, RNDMQR
), neon_dyadic_long
),
22779 mnUF(vsubl
, _vsubl
, 3, (RNQMQ
, RNDMQ
, RNDMQR
), neon_dyadic_long
),
22782 #undef THUMB_VARIANT
22814 /* MD interface: bits in the object file. */
22816 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
22817 for use in the a.out file, and stores them in the array pointed to by buf.
22818 This knows about the endian-ness of the target machine and does
22819 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
22820 2 (short) and 4 (long) Floating numbers are put out as a series of
22821 LITTLENUMS (shorts, here at least). */
22824 md_number_to_chars (char * buf
, valueT val
, int n
)
22826 if (target_big_endian
)
22827 number_to_chars_bigendian (buf
, val
, n
);
22829 number_to_chars_littleendian (buf
, val
, n
);
22833 md_chars_to_number (char * buf
, int n
)
22836 unsigned char * where
= (unsigned char *) buf
;
22838 if (target_big_endian
)
22843 result
|= (*where
++ & 255);
22851 result
|= (where
[n
] & 255);
22858 /* MD interface: Sections. */
22860 /* Calculate the maximum variable size (i.e., excluding fr_fix)
22861 that an rs_machine_dependent frag may reach. */
22864 arm_frag_max_var (fragS
*fragp
)
22866 /* We only use rs_machine_dependent for variable-size Thumb instructions,
22867 which are either THUMB_SIZE (2) or INSN_SIZE (4).
22869 Note that we generate relaxable instructions even for cases that don't
22870 really need it, like an immediate that's a trivial constant. So we're
22871 overestimating the instruction size for some of those cases. Rather
22872 than putting more intelligence here, it would probably be better to
22873 avoid generating a relaxation frag in the first place when it can be
22874 determined up front that a short instruction will suffice. */
22876 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
22880 /* Estimate the size of a frag before relaxing. Assume everything fits in
22884 md_estimate_size_before_relax (fragS
* fragp
,
22885 segT segtype ATTRIBUTE_UNUSED
)
22891 /* Convert a machine dependent frag. */
22894 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
22896 unsigned long insn
;
22897 unsigned long old_op
;
22905 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
22907 old_op
= bfd_get_16(abfd
, buf
);
22908 if (fragp
->fr_symbol
)
22910 exp
.X_op
= O_symbol
;
22911 exp
.X_add_symbol
= fragp
->fr_symbol
;
22915 exp
.X_op
= O_constant
;
22917 exp
.X_add_number
= fragp
->fr_offset
;
22918 opcode
= fragp
->fr_subtype
;
22921 case T_MNEM_ldr_pc
:
22922 case T_MNEM_ldr_pc2
:
22923 case T_MNEM_ldr_sp
:
22924 case T_MNEM_str_sp
:
22931 if (fragp
->fr_var
== 4)
22933 insn
= THUMB_OP32 (opcode
);
22934 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
22936 insn
|= (old_op
& 0x700) << 4;
22940 insn
|= (old_op
& 7) << 12;
22941 insn
|= (old_op
& 0x38) << 13;
22943 insn
|= 0x00000c00;
22944 put_thumb32_insn (buf
, insn
);
22945 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
22949 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
22951 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
22954 if (fragp
->fr_var
== 4)
22956 insn
= THUMB_OP32 (opcode
);
22957 insn
|= (old_op
& 0xf0) << 4;
22958 put_thumb32_insn (buf
, insn
);
22959 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
22963 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
22964 exp
.X_add_number
-= 4;
22972 if (fragp
->fr_var
== 4)
22974 int r0off
= (opcode
== T_MNEM_mov
22975 || opcode
== T_MNEM_movs
) ? 0 : 8;
22976 insn
= THUMB_OP32 (opcode
);
22977 insn
= (insn
& 0xe1ffffff) | 0x10000000;
22978 insn
|= (old_op
& 0x700) << r0off
;
22979 put_thumb32_insn (buf
, insn
);
22980 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
22984 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
22989 if (fragp
->fr_var
== 4)
22991 insn
= THUMB_OP32(opcode
);
22992 put_thumb32_insn (buf
, insn
);
22993 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
22996 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
23000 if (fragp
->fr_var
== 4)
23002 insn
= THUMB_OP32(opcode
);
23003 insn
|= (old_op
& 0xf00) << 14;
23004 put_thumb32_insn (buf
, insn
);
23005 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
23008 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
23011 case T_MNEM_add_sp
:
23012 case T_MNEM_add_pc
:
23013 case T_MNEM_inc_sp
:
23014 case T_MNEM_dec_sp
:
23015 if (fragp
->fr_var
== 4)
23017 /* ??? Choose between add and addw. */
23018 insn
= THUMB_OP32 (opcode
);
23019 insn
|= (old_op
& 0xf0) << 4;
23020 put_thumb32_insn (buf
, insn
);
23021 if (opcode
== T_MNEM_add_pc
)
23022 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
23024 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
23027 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
23035 if (fragp
->fr_var
== 4)
23037 insn
= THUMB_OP32 (opcode
);
23038 insn
|= (old_op
& 0xf0) << 4;
23039 insn
|= (old_op
& 0xf) << 16;
23040 put_thumb32_insn (buf
, insn
);
23041 if (insn
& (1 << 20))
23042 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
23044 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
23047 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
23053 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
23054 (enum bfd_reloc_code_real
) reloc_type
);
23055 fixp
->fx_file
= fragp
->fr_file
;
23056 fixp
->fx_line
= fragp
->fr_line
;
23057 fragp
->fr_fix
+= fragp
->fr_var
;
23059 /* Set whether we use thumb-2 ISA based on final relaxation results. */
23060 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
23061 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
23062 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
23065 /* Return the size of a relaxable immediate operand instruction.
23066 SHIFT and SIZE specify the form of the allowable immediate. */
23068 relax_immediate (fragS
*fragp
, int size
, int shift
)
23074 /* ??? Should be able to do better than this. */
23075 if (fragp
->fr_symbol
)
23078 low
= (1 << shift
) - 1;
23079 mask
= (1 << (shift
+ size
)) - (1 << shift
);
23080 offset
= fragp
->fr_offset
;
23081 /* Force misaligned offsets to 32-bit variant. */
23084 if (offset
& ~mask
)
23089 /* Get the address of a symbol during relaxation. */
23091 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
23097 sym
= fragp
->fr_symbol
;
23098 sym_frag
= symbol_get_frag (sym
);
23099 know (S_GET_SEGMENT (sym
) != absolute_section
23100 || sym_frag
== &zero_address_frag
);
23101 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
23103 /* If frag has yet to be reached on this pass, assume it will
23104 move by STRETCH just as we did. If this is not so, it will
23105 be because some frag between grows, and that will force
23109 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
23113 /* Adjust stretch for any alignment frag. Note that if have
23114 been expanding the earlier code, the symbol may be
23115 defined in what appears to be an earlier frag. FIXME:
23116 This doesn't handle the fr_subtype field, which specifies
23117 a maximum number of bytes to skip when doing an
23119 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
23121 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
23124 stretch
= - ((- stretch
)
23125 & ~ ((1 << (int) f
->fr_offset
) - 1));
23127 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
23139 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
23142 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
23147 /* Assume worst case for symbols not known to be in the same section. */
23148 if (fragp
->fr_symbol
== NULL
23149 || !S_IS_DEFINED (fragp
->fr_symbol
)
23150 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
23151 || S_IS_WEAK (fragp
->fr_symbol
))
23154 val
= relaxed_symbol_addr (fragp
, stretch
);
23155 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
23156 addr
= (addr
+ 4) & ~3;
23157 /* Force misaligned targets to 32-bit variant. */
23161 if (val
< 0 || val
> 1020)
23166 /* Return the size of a relaxable add/sub immediate instruction. */
23168 relax_addsub (fragS
*fragp
, asection
*sec
)
23173 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
23174 op
= bfd_get_16(sec
->owner
, buf
);
23175 if ((op
& 0xf) == ((op
>> 4) & 0xf))
23176 return relax_immediate (fragp
, 8, 0);
23178 return relax_immediate (fragp
, 3, 0);
23181 /* Return TRUE iff the definition of symbol S could be pre-empted
23182 (overridden) at link or load time. */
23184 symbol_preemptible (symbolS
*s
)
23186 /* Weak symbols can always be pre-empted. */
23190 /* Non-global symbols cannot be pre-empted. */
23191 if (! S_IS_EXTERNAL (s
))
23195 /* In ELF, a global symbol can be marked protected, or private. In that
23196 case it can't be pre-empted (other definitions in the same link unit
23197 would violate the ODR). */
23198 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
23202 /* Other global symbols might be pre-empted. */
23206 /* Return the size of a relaxable branch instruction. BITS is the
23207 size of the offset field in the narrow instruction. */
23210 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
23216 /* Assume worst case for symbols not known to be in the same section. */
23217 if (!S_IS_DEFINED (fragp
->fr_symbol
)
23218 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
23219 || S_IS_WEAK (fragp
->fr_symbol
))
23223 /* A branch to a function in ARM state will require interworking. */
23224 if (S_IS_DEFINED (fragp
->fr_symbol
)
23225 && ARM_IS_FUNC (fragp
->fr_symbol
))
23229 if (symbol_preemptible (fragp
->fr_symbol
))
23232 val
= relaxed_symbol_addr (fragp
, stretch
);
23233 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
23236 /* Offset is a signed value *2 */
23238 if (val
>= limit
|| val
< -limit
)
23244 /* Relax a machine dependent frag. This returns the amount by which
23245 the current size of the frag should change. */
23248 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
23253 oldsize
= fragp
->fr_var
;
23254 switch (fragp
->fr_subtype
)
23256 case T_MNEM_ldr_pc2
:
23257 newsize
= relax_adr (fragp
, sec
, stretch
);
23259 case T_MNEM_ldr_pc
:
23260 case T_MNEM_ldr_sp
:
23261 case T_MNEM_str_sp
:
23262 newsize
= relax_immediate (fragp
, 8, 2);
23266 newsize
= relax_immediate (fragp
, 5, 2);
23270 newsize
= relax_immediate (fragp
, 5, 1);
23274 newsize
= relax_immediate (fragp
, 5, 0);
23277 newsize
= relax_adr (fragp
, sec
, stretch
);
23283 newsize
= relax_immediate (fragp
, 8, 0);
23286 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
23289 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
23291 case T_MNEM_add_sp
:
23292 case T_MNEM_add_pc
:
23293 newsize
= relax_immediate (fragp
, 8, 2);
23295 case T_MNEM_inc_sp
:
23296 case T_MNEM_dec_sp
:
23297 newsize
= relax_immediate (fragp
, 7, 2);
23303 newsize
= relax_addsub (fragp
, sec
);
23309 fragp
->fr_var
= newsize
;
23310 /* Freeze wide instructions that are at or before the same location as
23311 in the previous pass. This avoids infinite loops.
23312 Don't freeze them unconditionally because targets may be artificially
23313 misaligned by the expansion of preceding frags. */
23314 if (stretch
<= 0 && newsize
> 2)
23316 md_convert_frag (sec
->owner
, sec
, fragp
);
23320 return newsize
- oldsize
;
23323 /* Round up a section size to the appropriate boundary. */
23326 md_section_align (segT segment ATTRIBUTE_UNUSED
,
23332 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
23333 of an rs_align_code fragment. */
23336 arm_handle_align (fragS
* fragP
)
23338 static unsigned char const arm_noop
[2][2][4] =
23341 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
23342 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
23345 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
23346 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
23349 static unsigned char const thumb_noop
[2][2][2] =
23352 {0xc0, 0x46}, /* LE */
23353 {0x46, 0xc0}, /* BE */
23356 {0x00, 0xbf}, /* LE */
23357 {0xbf, 0x00} /* BE */
23360 static unsigned char const wide_thumb_noop
[2][4] =
23361 { /* Wide Thumb-2 */
23362 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
23363 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
23366 unsigned bytes
, fix
, noop_size
;
23368 const unsigned char * noop
;
23369 const unsigned char *narrow_noop
= NULL
;
23374 if (fragP
->fr_type
!= rs_align_code
)
23377 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
23378 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
23381 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
23382 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
23384 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
23386 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
23388 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
23389 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
23391 narrow_noop
= thumb_noop
[1][target_big_endian
];
23392 noop
= wide_thumb_noop
[target_big_endian
];
23395 noop
= thumb_noop
[0][target_big_endian
];
23403 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
23404 ? selected_cpu
: arm_arch_none
,
23406 [target_big_endian
];
23413 fragP
->fr_var
= noop_size
;
23415 if (bytes
& (noop_size
- 1))
23417 fix
= bytes
& (noop_size
- 1);
23419 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
23421 memset (p
, 0, fix
);
23428 if (bytes
& noop_size
)
23430 /* Insert a narrow noop. */
23431 memcpy (p
, narrow_noop
, noop_size
);
23433 bytes
-= noop_size
;
23437 /* Use wide noops for the remainder */
23441 while (bytes
>= noop_size
)
23443 memcpy (p
, noop
, noop_size
);
23445 bytes
-= noop_size
;
23449 fragP
->fr_fix
+= fix
;
23452 /* Called from md_do_align. Used to create an alignment
23453 frag in a code section. */
23456 arm_frag_align_code (int n
, int max
)
23460 /* We assume that there will never be a requirement
23461 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
23462 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
23467 _("alignments greater than %d bytes not supported in .text sections."),
23468 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
23469 as_fatal ("%s", err_msg
);
23472 p
= frag_var (rs_align_code
,
23473 MAX_MEM_FOR_RS_ALIGN_CODE
,
23475 (relax_substateT
) max
,
23482 /* Perform target specific initialisation of a frag.
23483 Note - despite the name this initialisation is not done when the frag
23484 is created, but only when its type is assigned. A frag can be created
23485 and used a long time before its type is set, so beware of assuming that
23486 this initialisation is performed first. */
23490 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
23492 /* Record whether this frag is in an ARM or a THUMB area. */
23493 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
23496 #else /* OBJ_ELF is defined. */
23498 arm_init_frag (fragS
* fragP
, int max_chars
)
23500 bfd_boolean frag_thumb_mode
;
23502 /* If the current ARM vs THUMB mode has not already
23503 been recorded into this frag then do so now. */
23504 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
23505 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
23507 /* PR 21809: Do not set a mapping state for debug sections
23508 - it just confuses other tools. */
23509 if (bfd_get_section_flags (NULL
, now_seg
) & SEC_DEBUGGING
)
23512 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
23514 /* Record a mapping symbol for alignment frags. We will delete this
23515 later if the alignment ends up empty. */
23516 switch (fragP
->fr_type
)
23519 case rs_align_test
:
23521 mapping_state_2 (MAP_DATA
, max_chars
);
23523 case rs_align_code
:
23524 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
23531 /* When we change sections we need to issue a new mapping symbol. */
23534 arm_elf_change_section (void)
23536 /* Link an unlinked unwind index table section to the .text section. */
23537 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
23538 && elf_linked_to_section (now_seg
) == NULL
)
23539 elf_linked_to_section (now_seg
) = text_section
;
23543 arm_elf_section_type (const char * str
, size_t len
)
23545 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
23546 return SHT_ARM_EXIDX
;
23551 /* Code to deal with unwinding tables. */
23553 static void add_unwind_adjustsp (offsetT
);
23555 /* Generate any deferred unwind frame offset. */
23558 flush_pending_unwind (void)
23562 offset
= unwind
.pending_offset
;
23563 unwind
.pending_offset
= 0;
23565 add_unwind_adjustsp (offset
);
23568 /* Add an opcode to this list for this function. Two-byte opcodes should
23569 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
23573 add_unwind_opcode (valueT op
, int length
)
23575 /* Add any deferred stack adjustment. */
23576 if (unwind
.pending_offset
)
23577 flush_pending_unwind ();
23579 unwind
.sp_restored
= 0;
23581 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
23583 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
23584 if (unwind
.opcodes
)
23585 unwind
.opcodes
= XRESIZEVEC (unsigned char, unwind
.opcodes
,
23586 unwind
.opcode_alloc
);
23588 unwind
.opcodes
= XNEWVEC (unsigned char, unwind
.opcode_alloc
);
23593 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
23595 unwind
.opcode_count
++;
23599 /* Add unwind opcodes to adjust the stack pointer. */
23602 add_unwind_adjustsp (offsetT offset
)
23606 if (offset
> 0x200)
23608 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
23613 /* Long form: 0xb2, uleb128. */
23614 /* This might not fit in a word so add the individual bytes,
23615 remembering the list is built in reverse order. */
23616 o
= (valueT
) ((offset
- 0x204) >> 2);
23618 add_unwind_opcode (0, 1);
23620 /* Calculate the uleb128 encoding of the offset. */
23624 bytes
[n
] = o
& 0x7f;
23630 /* Add the insn. */
23632 add_unwind_opcode (bytes
[n
- 1], 1);
23633 add_unwind_opcode (0xb2, 1);
23635 else if (offset
> 0x100)
23637 /* Two short opcodes. */
23638 add_unwind_opcode (0x3f, 1);
23639 op
= (offset
- 0x104) >> 2;
23640 add_unwind_opcode (op
, 1);
23642 else if (offset
> 0)
23644 /* Short opcode. */
23645 op
= (offset
- 4) >> 2;
23646 add_unwind_opcode (op
, 1);
23648 else if (offset
< 0)
23651 while (offset
> 0x100)
23653 add_unwind_opcode (0x7f, 1);
23656 op
= ((offset
- 4) >> 2) | 0x40;
23657 add_unwind_opcode (op
, 1);
23661 /* Finish the list of unwind opcodes for this function. */
23664 finish_unwind_opcodes (void)
23668 if (unwind
.fp_used
)
23670 /* Adjust sp as necessary. */
23671 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
23672 flush_pending_unwind ();
23674 /* After restoring sp from the frame pointer. */
23675 op
= 0x90 | unwind
.fp_reg
;
23676 add_unwind_opcode (op
, 1);
23679 flush_pending_unwind ();
23683 /* Start an exception table entry. If idx is nonzero this is an index table
23687 start_unwind_section (const segT text_seg
, int idx
)
23689 const char * text_name
;
23690 const char * prefix
;
23691 const char * prefix_once
;
23692 const char * group_name
;
23700 prefix
= ELF_STRING_ARM_unwind
;
23701 prefix_once
= ELF_STRING_ARM_unwind_once
;
23702 type
= SHT_ARM_EXIDX
;
23706 prefix
= ELF_STRING_ARM_unwind_info
;
23707 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
23708 type
= SHT_PROGBITS
;
23711 text_name
= segment_name (text_seg
);
23712 if (streq (text_name
, ".text"))
23715 if (strncmp (text_name
, ".gnu.linkonce.t.",
23716 strlen (".gnu.linkonce.t.")) == 0)
23718 prefix
= prefix_once
;
23719 text_name
+= strlen (".gnu.linkonce.t.");
23722 sec_name
= concat (prefix
, text_name
, (char *) NULL
);
23728 /* Handle COMDAT group. */
23729 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
23731 group_name
= elf_group_name (text_seg
);
23732 if (group_name
== NULL
)
23734 as_bad (_("Group section `%s' has no group signature"),
23735 segment_name (text_seg
));
23736 ignore_rest_of_line ();
23739 flags
|= SHF_GROUP
;
23743 obj_elf_change_section (sec_name
, type
, 0, flags
, 0, group_name
,
23746 /* Set the section link for index tables. */
23748 elf_linked_to_section (now_seg
) = text_seg
;
23752 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
23753 personality routine data. Returns zero, or the index table value for
23754 an inline entry. */
23757 create_unwind_entry (int have_data
)
23762 /* The current word of data. */
23764 /* The number of bytes left in this word. */
23767 finish_unwind_opcodes ();
23769 /* Remember the current text section. */
23770 unwind
.saved_seg
= now_seg
;
23771 unwind
.saved_subseg
= now_subseg
;
23773 start_unwind_section (now_seg
, 0);
23775 if (unwind
.personality_routine
== NULL
)
23777 if (unwind
.personality_index
== -2)
23780 as_bad (_("handlerdata in cantunwind frame"));
23781 return 1; /* EXIDX_CANTUNWIND. */
23784 /* Use a default personality routine if none is specified. */
23785 if (unwind
.personality_index
== -1)
23787 if (unwind
.opcode_count
> 3)
23788 unwind
.personality_index
= 1;
23790 unwind
.personality_index
= 0;
23793 /* Space for the personality routine entry. */
23794 if (unwind
.personality_index
== 0)
23796 if (unwind
.opcode_count
> 3)
23797 as_bad (_("too many unwind opcodes for personality routine 0"));
23801 /* All the data is inline in the index table. */
23804 while (unwind
.opcode_count
> 0)
23806 unwind
.opcode_count
--;
23807 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
23811 /* Pad with "finish" opcodes. */
23813 data
= (data
<< 8) | 0xb0;
23820 /* We get two opcodes "free" in the first word. */
23821 size
= unwind
.opcode_count
- 2;
23825 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
23826 if (unwind
.personality_index
!= -1)
23828 as_bad (_("attempt to recreate an unwind entry"));
23832 /* An extra byte is required for the opcode count. */
23833 size
= unwind
.opcode_count
+ 1;
23836 size
= (size
+ 3) >> 2;
23838 as_bad (_("too many unwind opcodes"));
23840 frag_align (2, 0, 0);
23841 record_alignment (now_seg
, 2);
23842 unwind
.table_entry
= expr_build_dot ();
23844 /* Allocate the table entry. */
23845 ptr
= frag_more ((size
<< 2) + 4);
23846 /* PR 13449: Zero the table entries in case some of them are not used. */
23847 memset (ptr
, 0, (size
<< 2) + 4);
23848 where
= frag_now_fix () - ((size
<< 2) + 4);
23850 switch (unwind
.personality_index
)
23853 /* ??? Should this be a PLT generating relocation? */
23854 /* Custom personality routine. */
23855 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
23856 BFD_RELOC_ARM_PREL31
);
23861 /* Set the first byte to the number of additional words. */
23862 data
= size
> 0 ? size
- 1 : 0;
23866 /* ABI defined personality routines. */
23868 /* Three opcodes bytes are packed into the first word. */
23875 /* The size and first two opcode bytes go in the first word. */
23876 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
23881 /* Should never happen. */
23885 /* Pack the opcodes into words (MSB first), reversing the list at the same
23887 while (unwind
.opcode_count
> 0)
23891 md_number_to_chars (ptr
, data
, 4);
23896 unwind
.opcode_count
--;
23898 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
23901 /* Finish off the last word. */
23904 /* Pad with "finish" opcodes. */
23906 data
= (data
<< 8) | 0xb0;
23908 md_number_to_chars (ptr
, data
, 4);
23913 /* Add an empty descriptor if there is no user-specified data. */
23914 ptr
= frag_more (4);
23915 md_number_to_chars (ptr
, 0, 4);
23922 /* Initialize the DWARF-2 unwind information for this procedure. */
23925 tc_arm_frame_initial_instructions (void)
23927 cfi_add_CFA_def_cfa (REG_SP
, 0);
23929 #endif /* OBJ_ELF */
23931 /* Convert REGNAME to a DWARF-2 register number. */
23934 tc_arm_regname_to_dw2regnum (char *regname
)
23936 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
23940 /* PR 16694: Allow VFP registers as well. */
23941 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
23945 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
23954 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
23958 exp
.X_op
= O_secrel
;
23959 exp
.X_add_symbol
= symbol
;
23960 exp
.X_add_number
= 0;
23961 emit_expr (&exp
, size
);
23965 /* MD interface: Symbol and relocation handling. */
23967 /* Return the address within the segment that a PC-relative fixup is
23968 relative to. For ARM, PC-relative fixups applied to instructions
23969 are generally relative to the location of the fixup plus 8 bytes.
23970 Thumb branches are offset by 4, and Thumb loads relative to PC
23971 require special handling. */
23974 md_pcrel_from_section (fixS
* fixP
, segT seg
)
23976 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23978 /* If this is pc-relative and we are going to emit a relocation
23979 then we just want to put out any pipeline compensation that the linker
23980 will need. Otherwise we want to use the calculated base.
23981 For WinCE we skip the bias for externals as well, since this
23982 is how the MS ARM-CE assembler behaves and we want to be compatible. */
23984 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
23985 || (arm_force_relocation (fixP
)
23987 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
23993 switch (fixP
->fx_r_type
)
23995 /* PC relative addressing on the Thumb is slightly odd as the
23996 bottom two bits of the PC are forced to zero for the
23997 calculation. This happens *after* application of the
23998 pipeline offset. However, Thumb adrl already adjusts for
23999 this, so we need not do it again. */
24000 case BFD_RELOC_ARM_THUMB_ADD
:
24003 case BFD_RELOC_ARM_THUMB_OFFSET
:
24004 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
24005 case BFD_RELOC_ARM_T32_ADD_PC12
:
24006 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
24007 return (base
+ 4) & ~3;
24009 /* Thumb branches are simply offset by +4. */
24010 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
24011 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
24012 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
24013 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
24014 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24015 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24016 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
24017 case BFD_RELOC_ARM_THUMB_BF17
:
24018 case BFD_RELOC_ARM_THUMB_BF19
:
24019 case BFD_RELOC_ARM_THUMB_BF13
:
24020 case BFD_RELOC_ARM_THUMB_LOOP12
:
24023 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24025 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24026 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24027 && ARM_IS_FUNC (fixP
->fx_addsy
)
24028 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24029 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24032 /* BLX is like branches above, but forces the low two bits of PC to
24034 case BFD_RELOC_THUMB_PCREL_BLX
:
24036 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24037 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24038 && THUMB_IS_FUNC (fixP
->fx_addsy
)
24039 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24040 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24041 return (base
+ 4) & ~3;
24043 /* ARM mode branches are offset by +8. However, the Windows CE
24044 loader expects the relocation not to take this into account. */
24045 case BFD_RELOC_ARM_PCREL_BLX
:
24047 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24048 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24049 && ARM_IS_FUNC (fixP
->fx_addsy
)
24050 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24051 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24054 case BFD_RELOC_ARM_PCREL_CALL
:
24056 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24057 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24058 && THUMB_IS_FUNC (fixP
->fx_addsy
)
24059 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24060 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24063 case BFD_RELOC_ARM_PCREL_BRANCH
:
24064 case BFD_RELOC_ARM_PCREL_JUMP
:
24065 case BFD_RELOC_ARM_PLT32
:
24067 /* When handling fixups immediately, because we have already
24068 discovered the value of a symbol, or the address of the frag involved
24069 we must account for the offset by +8, as the OS loader will never see the reloc.
24070 see fixup_segment() in write.c
24071 The S_IS_EXTERNAL test handles the case of global symbols.
24072 Those need the calculated base, not just the pipe compensation the linker will need. */
24074 && fixP
->fx_addsy
!= NULL
24075 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24076 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
24084 /* ARM mode loads relative to PC are also offset by +8. Unlike
24085 branches, the Windows CE loader *does* expect the relocation
24086 to take this into account. */
24087 case BFD_RELOC_ARM_OFFSET_IMM
:
24088 case BFD_RELOC_ARM_OFFSET_IMM8
:
24089 case BFD_RELOC_ARM_HWLITERAL
:
24090 case BFD_RELOC_ARM_LITERAL
:
24091 case BFD_RELOC_ARM_CP_OFF_IMM
:
24095 /* Other PC-relative relocations are un-offset. */
24101 static bfd_boolean flag_warn_syms
= TRUE
;
24104 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
24106 /* PR 18347 - Warn if the user attempts to create a symbol with the same
24107 name as an ARM instruction. Whilst strictly speaking it is allowed, it
24108 does mean that the resulting code might be very confusing to the reader.
24109 Also this warning can be triggered if the user omits an operand before
24110 an immediate address, eg:
24114 GAS treats this as an assignment of the value of the symbol foo to a
24115 symbol LDR, and so (without this code) it will not issue any kind of
24116 warning or error message.
24118 Note - ARM instructions are case-insensitive but the strings in the hash
24119 table are all stored in lower case, so we must first ensure that name is
24121 if (flag_warn_syms
&& arm_ops_hsh
)
24123 char * nbuf
= strdup (name
);
24126 for (p
= nbuf
; *p
; p
++)
24128 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
24130 static struct hash_control
* already_warned
= NULL
;
24132 if (already_warned
== NULL
)
24133 already_warned
= hash_new ();
24134 /* Only warn about the symbol once. To keep the code
24135 simple we let hash_insert do the lookup for us. */
24136 if (hash_insert (already_warned
, nbuf
, NULL
) == NULL
)
24137 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
24146 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
24147 Otherwise we have no need to default values of symbols. */
24150 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
24153 if (name
[0] == '_' && name
[1] == 'G'
24154 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
24158 if (symbol_find (name
))
24159 as_bad (_("GOT already in the symbol table"));
24161 GOT_symbol
= symbol_new (name
, undefined_section
,
24162 (valueT
) 0, & zero_address_frag
);
24172 /* Subroutine of md_apply_fix. Check to see if an immediate can be
24173 computed as two separate immediate values, added together. We
24174 already know that this value cannot be computed by just one ARM
24177 static unsigned int
24178 validate_immediate_twopart (unsigned int val
,
24179 unsigned int * highpart
)
24184 for (i
= 0; i
< 32; i
+= 2)
24185 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
24191 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
24193 else if (a
& 0xff0000)
24195 if (a
& 0xff000000)
24197 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
24201 gas_assert (a
& 0xff000000);
24202 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
24205 return (a
& 0xff) | (i
<< 7);
24212 validate_offset_imm (unsigned int val
, int hwse
)
24214 if ((hwse
&& val
> 255) || val
> 4095)
24219 /* Subroutine of md_apply_fix. Do those data_ops which can take a
24220 negative immediate constant by altering the instruction. A bit of
24225 by inverting the second operand, and
24228 by negating the second operand. */
24231 negate_data_op (unsigned long * instruction
,
24232 unsigned long value
)
24235 unsigned long negated
, inverted
;
24237 negated
= encode_arm_immediate (-value
);
24238 inverted
= encode_arm_immediate (~value
);
24240 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
24243 /* First negates. */
24244 case OPCODE_SUB
: /* ADD <-> SUB */
24245 new_inst
= OPCODE_ADD
;
24250 new_inst
= OPCODE_SUB
;
24254 case OPCODE_CMP
: /* CMP <-> CMN */
24255 new_inst
= OPCODE_CMN
;
24260 new_inst
= OPCODE_CMP
;
24264 /* Now Inverted ops. */
24265 case OPCODE_MOV
: /* MOV <-> MVN */
24266 new_inst
= OPCODE_MVN
;
24271 new_inst
= OPCODE_MOV
;
24275 case OPCODE_AND
: /* AND <-> BIC */
24276 new_inst
= OPCODE_BIC
;
24281 new_inst
= OPCODE_AND
;
24285 case OPCODE_ADC
: /* ADC <-> SBC */
24286 new_inst
= OPCODE_SBC
;
24291 new_inst
= OPCODE_ADC
;
24295 /* We cannot do anything. */
24300 if (value
== (unsigned) FAIL
)
24303 *instruction
&= OPCODE_MASK
;
24304 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
24308 /* Like negate_data_op, but for Thumb-2. */
24310 static unsigned int
24311 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
24315 unsigned int negated
, inverted
;
24317 negated
= encode_thumb32_immediate (-value
);
24318 inverted
= encode_thumb32_immediate (~value
);
24320 rd
= (*instruction
>> 8) & 0xf;
24321 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
24324 /* ADD <-> SUB. Includes CMP <-> CMN. */
24325 case T2_OPCODE_SUB
:
24326 new_inst
= T2_OPCODE_ADD
;
24330 case T2_OPCODE_ADD
:
24331 new_inst
= T2_OPCODE_SUB
;
24335 /* ORR <-> ORN. Includes MOV <-> MVN. */
24336 case T2_OPCODE_ORR
:
24337 new_inst
= T2_OPCODE_ORN
;
24341 case T2_OPCODE_ORN
:
24342 new_inst
= T2_OPCODE_ORR
;
24346 /* AND <-> BIC. TST has no inverted equivalent. */
24347 case T2_OPCODE_AND
:
24348 new_inst
= T2_OPCODE_BIC
;
24355 case T2_OPCODE_BIC
:
24356 new_inst
= T2_OPCODE_AND
;
24361 case T2_OPCODE_ADC
:
24362 new_inst
= T2_OPCODE_SBC
;
24366 case T2_OPCODE_SBC
:
24367 new_inst
= T2_OPCODE_ADC
;
24371 /* We cannot do anything. */
24376 if (value
== (unsigned int)FAIL
)
24379 *instruction
&= T2_OPCODE_MASK
;
24380 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
24384 /* Read a 32-bit thumb instruction from buf. */
24386 static unsigned long
24387 get_thumb32_insn (char * buf
)
24389 unsigned long insn
;
24390 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
24391 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24396 /* We usually want to set the low bit on the address of thumb function
24397 symbols. In particular .word foo - . should have the low bit set.
24398 Generic code tries to fold the difference of two symbols to
24399 a constant. Prevent this and force a relocation when the first symbols
24400 is a thumb function. */
24403 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
24405 if (op
== O_subtract
24406 && l
->X_op
== O_symbol
24407 && r
->X_op
== O_symbol
24408 && THUMB_IS_FUNC (l
->X_add_symbol
))
24410 l
->X_op
= O_subtract
;
24411 l
->X_op_symbol
= r
->X_add_symbol
;
24412 l
->X_add_number
-= r
->X_add_number
;
24416 /* Process as normal. */
24420 /* Encode Thumb2 unconditional branches and calls. The encoding
24421 for the 2 are identical for the immediate values. */
24424 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
24426 #define T2I1I2MASK ((1 << 13) | (1 << 11))
24429 addressT S
, I1
, I2
, lo
, hi
;
24431 S
= (value
>> 24) & 0x01;
24432 I1
= (value
>> 23) & 0x01;
24433 I2
= (value
>> 22) & 0x01;
24434 hi
= (value
>> 12) & 0x3ff;
24435 lo
= (value
>> 1) & 0x7ff;
24436 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24437 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24438 newval
|= (S
<< 10) | hi
;
24439 newval2
&= ~T2I1I2MASK
;
24440 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
24441 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24442 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
24446 md_apply_fix (fixS
* fixP
,
24450 offsetT value
= * valP
;
24452 unsigned int newimm
;
24453 unsigned long temp
;
24455 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
24457 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
24459 /* Note whether this will delete the relocation. */
24461 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
24464 /* On a 64-bit host, silently truncate 'value' to 32 bits for
24465 consistency with the behaviour on 32-bit hosts. Remember value
24467 value
&= 0xffffffff;
24468 value
^= 0x80000000;
24469 value
-= 0x80000000;
24472 fixP
->fx_addnumber
= value
;
24474 /* Same treatment for fixP->fx_offset. */
24475 fixP
->fx_offset
&= 0xffffffff;
24476 fixP
->fx_offset
^= 0x80000000;
24477 fixP
->fx_offset
-= 0x80000000;
24479 switch (fixP
->fx_r_type
)
24481 case BFD_RELOC_NONE
:
24482 /* This will need to go in the object file. */
24486 case BFD_RELOC_ARM_IMMEDIATE
:
24487 /* We claim that this fixup has been processed here,
24488 even if in fact we generate an error because we do
24489 not have a reloc for it, so tc_gen_reloc will reject it. */
24492 if (fixP
->fx_addsy
)
24494 const char *msg
= 0;
24496 if (! S_IS_DEFINED (fixP
->fx_addsy
))
24497 msg
= _("undefined symbol %s used as an immediate value");
24498 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
24499 msg
= _("symbol %s is in a different section");
24500 else if (S_IS_WEAK (fixP
->fx_addsy
))
24501 msg
= _("symbol %s is weak and may be overridden later");
24505 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24506 msg
, S_GET_NAME (fixP
->fx_addsy
));
24511 temp
= md_chars_to_number (buf
, INSN_SIZE
);
24513 /* If the offset is negative, we should use encoding A2 for ADR. */
24514 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
24515 newimm
= negate_data_op (&temp
, value
);
24518 newimm
= encode_arm_immediate (value
);
24520 /* If the instruction will fail, see if we can fix things up by
24521 changing the opcode. */
24522 if (newimm
== (unsigned int) FAIL
)
24523 newimm
= negate_data_op (&temp
, value
);
24524 /* MOV accepts both ARM modified immediate (A1 encoding) and
24525 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
24526 When disassembling, MOV is preferred when there is no encoding
24528 if (newimm
== (unsigned int) FAIL
24529 && ((temp
>> DATA_OP_SHIFT
) & 0xf) == OPCODE_MOV
24530 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
24531 && !((temp
>> SBIT_SHIFT
) & 0x1)
24532 && value
>= 0 && value
<= 0xffff)
24534 /* Clear bits[23:20] to change encoding from A1 to A2. */
24535 temp
&= 0xff0fffff;
24536 /* Encoding high 4bits imm. Code below will encode the remaining
24538 temp
|= (value
& 0x0000f000) << 4;
24539 newimm
= value
& 0x00000fff;
24543 if (newimm
== (unsigned int) FAIL
)
24545 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24546 _("invalid constant (%lx) after fixup"),
24547 (unsigned long) value
);
24551 newimm
|= (temp
& 0xfffff000);
24552 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
24555 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
24557 unsigned int highpart
= 0;
24558 unsigned int newinsn
= 0xe1a00000; /* nop. */
24560 if (fixP
->fx_addsy
)
24562 const char *msg
= 0;
24564 if (! S_IS_DEFINED (fixP
->fx_addsy
))
24565 msg
= _("undefined symbol %s used as an immediate value");
24566 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
24567 msg
= _("symbol %s is in a different section");
24568 else if (S_IS_WEAK (fixP
->fx_addsy
))
24569 msg
= _("symbol %s is weak and may be overridden later");
24573 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24574 msg
, S_GET_NAME (fixP
->fx_addsy
));
24579 newimm
= encode_arm_immediate (value
);
24580 temp
= md_chars_to_number (buf
, INSN_SIZE
);
24582 /* If the instruction will fail, see if we can fix things up by
24583 changing the opcode. */
24584 if (newimm
== (unsigned int) FAIL
24585 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
24587 /* No ? OK - try using two ADD instructions to generate
24589 newimm
= validate_immediate_twopart (value
, & highpart
);
24591 /* Yes - then make sure that the second instruction is
24593 if (newimm
!= (unsigned int) FAIL
)
24595 /* Still No ? Try using a negated value. */
24596 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
24597 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
24598 /* Otherwise - give up. */
24601 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24602 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
24607 /* Replace the first operand in the 2nd instruction (which
24608 is the PC) with the destination register. We have
24609 already added in the PC in the first instruction and we
24610 do not want to do it again. */
24611 newinsn
&= ~ 0xf0000;
24612 newinsn
|= ((newinsn
& 0x0f000) << 4);
24615 newimm
|= (temp
& 0xfffff000);
24616 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
24618 highpart
|= (newinsn
& 0xfffff000);
24619 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
24623 case BFD_RELOC_ARM_OFFSET_IMM
:
24624 if (!fixP
->fx_done
&& seg
->use_rela_p
)
24626 /* Fall through. */
24628 case BFD_RELOC_ARM_LITERAL
:
24634 if (validate_offset_imm (value
, 0) == FAIL
)
24636 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
24637 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24638 _("invalid literal constant: pool needs to be closer"));
24640 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24641 _("bad immediate value for offset (%ld)"),
24646 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24648 newval
&= 0xfffff000;
24651 newval
&= 0xff7ff000;
24652 newval
|= value
| (sign
? INDEX_UP
: 0);
24654 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24657 case BFD_RELOC_ARM_OFFSET_IMM8
:
24658 case BFD_RELOC_ARM_HWLITERAL
:
24664 if (validate_offset_imm (value
, 1) == FAIL
)
24666 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
24667 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24668 _("invalid literal constant: pool needs to be closer"));
24670 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24671 _("bad immediate value for 8-bit offset (%ld)"),
24676 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24678 newval
&= 0xfffff0f0;
24681 newval
&= 0xff7ff0f0;
24682 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
24684 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24687 case BFD_RELOC_ARM_T32_OFFSET_U8
:
24688 if (value
< 0 || value
> 1020 || value
% 4 != 0)
24689 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24690 _("bad immediate value for offset (%ld)"), (long) value
);
24693 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
24695 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
24698 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
24699 /* This is a complicated relocation used for all varieties of Thumb32
24700 load/store instruction with immediate offset:
24702 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
24703 *4, optional writeback(W)
24704 (doubleword load/store)
24706 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
24707 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
24708 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
24709 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
24710 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
24712 Uppercase letters indicate bits that are already encoded at
24713 this point. Lowercase letters are our problem. For the
24714 second block of instructions, the secondary opcode nybble
24715 (bits 8..11) is present, and bit 23 is zero, even if this is
24716 a PC-relative operation. */
24717 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24719 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
24721 if ((newval
& 0xf0000000) == 0xe0000000)
24723 /* Doubleword load/store: 8-bit offset, scaled by 4. */
24725 newval
|= (1 << 23);
24728 if (value
% 4 != 0)
24730 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24731 _("offset not a multiple of 4"));
24737 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24738 _("offset out of range"));
24743 else if ((newval
& 0x000f0000) == 0x000f0000)
24745 /* PC-relative, 12-bit offset. */
24747 newval
|= (1 << 23);
24752 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24753 _("offset out of range"));
24758 else if ((newval
& 0x00000100) == 0x00000100)
24760 /* Writeback: 8-bit, +/- offset. */
24762 newval
|= (1 << 9);
24767 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24768 _("offset out of range"));
24773 else if ((newval
& 0x00000f00) == 0x00000e00)
24775 /* T-instruction: positive 8-bit offset. */
24776 if (value
< 0 || value
> 0xff)
24778 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24779 _("offset out of range"));
24787 /* Positive 12-bit or negative 8-bit offset. */
24791 newval
|= (1 << 23);
24801 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24802 _("offset out of range"));
24809 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
24810 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
24813 case BFD_RELOC_ARM_SHIFT_IMM
:
24814 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24815 if (((unsigned long) value
) > 32
24817 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
24819 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24820 _("shift expression is too large"));
24825 /* Shifts of zero must be done as lsl. */
24827 else if (value
== 32)
24829 newval
&= 0xfffff07f;
24830 newval
|= (value
& 0x1f) << 7;
24831 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24834 case BFD_RELOC_ARM_T32_IMMEDIATE
:
24835 case BFD_RELOC_ARM_T32_ADD_IMM
:
24836 case BFD_RELOC_ARM_T32_IMM12
:
24837 case BFD_RELOC_ARM_T32_ADD_PC12
:
24838 /* We claim that this fixup has been processed here,
24839 even if in fact we generate an error because we do
24840 not have a reloc for it, so tc_gen_reloc will reject it. */
24844 && ! S_IS_DEFINED (fixP
->fx_addsy
))
24846 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24847 _("undefined symbol %s used as an immediate value"),
24848 S_GET_NAME (fixP
->fx_addsy
));
24852 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24854 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
24857 if ((fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
24858 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
24859 Thumb2 modified immediate encoding (T2). */
24860 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
24861 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
24863 newimm
= encode_thumb32_immediate (value
);
24864 if (newimm
== (unsigned int) FAIL
)
24865 newimm
= thumb32_negate_data_op (&newval
, value
);
24867 if (newimm
== (unsigned int) FAIL
)
24869 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
)
24871 /* Turn add/sum into addw/subw. */
24872 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
24873 newval
= (newval
& 0xfeffffff) | 0x02000000;
24874 /* No flat 12-bit imm encoding for addsw/subsw. */
24875 if ((newval
& 0x00100000) == 0)
24877 /* 12 bit immediate for addw/subw. */
24881 newval
^= 0x00a00000;
24884 newimm
= (unsigned int) FAIL
;
24891 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
24892 UINT16 (T3 encoding), MOVW only accepts UINT16. When
24893 disassembling, MOV is preferred when there is no encoding
24895 if (((newval
>> T2_DATA_OP_SHIFT
) & 0xf) == T2_OPCODE_ORR
24896 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
24897 but with the Rn field [19:16] set to 1111. */
24898 && (((newval
>> 16) & 0xf) == 0xf)
24899 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
)
24900 && !((newval
>> T2_SBIT_SHIFT
) & 0x1)
24901 && value
>= 0 && value
<= 0xffff)
24903 /* Toggle bit[25] to change encoding from T2 to T3. */
24905 /* Clear bits[19:16]. */
24906 newval
&= 0xfff0ffff;
24907 /* Encoding high 4bits imm. Code below will encode the
24908 remaining low 12bits. */
24909 newval
|= (value
& 0x0000f000) << 4;
24910 newimm
= value
& 0x00000fff;
24915 if (newimm
== (unsigned int)FAIL
)
24917 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24918 _("invalid constant (%lx) after fixup"),
24919 (unsigned long) value
);
24923 newval
|= (newimm
& 0x800) << 15;
24924 newval
|= (newimm
& 0x700) << 4;
24925 newval
|= (newimm
& 0x0ff);
24927 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
24928 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
24931 case BFD_RELOC_ARM_SMC
:
24932 if (((unsigned long) value
) > 0xffff)
24933 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24934 _("invalid smc expression"));
24935 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24936 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
24937 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24940 case BFD_RELOC_ARM_HVC
:
24941 if (((unsigned long) value
) > 0xffff)
24942 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24943 _("invalid hvc expression"));
24944 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24945 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
24946 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24949 case BFD_RELOC_ARM_SWI
:
24950 if (fixP
->tc_fix_data
!= 0)
24952 if (((unsigned long) value
) > 0xff)
24953 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24954 _("invalid swi expression"));
24955 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24957 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24961 if (((unsigned long) value
) > 0x00ffffff)
24962 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24963 _("invalid swi expression"));
24964 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24966 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24970 case BFD_RELOC_ARM_MULTI
:
24971 if (((unsigned long) value
) > 0xffff)
24972 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24973 _("invalid expression in load/store multiple"));
24974 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
24975 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24979 case BFD_RELOC_ARM_PCREL_CALL
:
24981 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
24983 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24984 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24985 && THUMB_IS_FUNC (fixP
->fx_addsy
))
24986 /* Flip the bl to blx. This is a simple flip
24987 bit here because we generate PCREL_CALL for
24988 unconditional bls. */
24990 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24991 newval
= newval
| 0x10000000;
24992 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24998 goto arm_branch_common
;
25000 case BFD_RELOC_ARM_PCREL_JUMP
:
25001 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
25003 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25004 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25005 && THUMB_IS_FUNC (fixP
->fx_addsy
))
25007 /* This would map to a bl<cond>, b<cond>,
25008 b<always> to a Thumb function. We
25009 need to force a relocation for this particular
25011 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25014 /* Fall through. */
25016 case BFD_RELOC_ARM_PLT32
:
25018 case BFD_RELOC_ARM_PCREL_BRANCH
:
25020 goto arm_branch_common
;
25022 case BFD_RELOC_ARM_PCREL_BLX
:
25025 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
25027 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25028 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25029 && ARM_IS_FUNC (fixP
->fx_addsy
))
25031 /* Flip the blx to a bl and warn. */
25032 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
25033 newval
= 0xeb000000;
25034 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
25035 _("blx to '%s' an ARM ISA state function changed to bl"),
25037 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25043 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
25044 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
25048 /* We are going to store value (shifted right by two) in the
25049 instruction, in a 24 bit, signed field. Bits 26 through 32 either
25050 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
25053 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25054 _("misaligned branch destination"));
25055 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
25056 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
25057 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25059 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25061 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25062 newval
|= (value
>> 2) & 0x00ffffff;
25063 /* Set the H bit on BLX instructions. */
25067 newval
|= 0x01000000;
25069 newval
&= ~0x01000000;
25071 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25075 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
25076 /* CBZ can only branch forward. */
25078 /* Attempts to use CBZ to branch to the next instruction
25079 (which, strictly speaking, are prohibited) will be turned into
25082 FIXME: It may be better to remove the instruction completely and
25083 perform relaxation. */
25086 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25087 newval
= 0xbf00; /* NOP encoding T1 */
25088 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25093 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25095 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25097 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25098 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
25099 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25104 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
25105 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
25106 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25108 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25110 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25111 newval
|= (value
& 0x1ff) >> 1;
25112 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25116 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
25117 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
25118 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25120 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25122 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25123 newval
|= (value
& 0xfff) >> 1;
25124 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25128 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
25130 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25131 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25132 && ARM_IS_FUNC (fixP
->fx_addsy
)
25133 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
25135 /* Force a relocation for a branch 20 bits wide. */
25138 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
25139 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25140 _("conditional branch out of range"));
25142 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25145 addressT S
, J1
, J2
, lo
, hi
;
25147 S
= (value
& 0x00100000) >> 20;
25148 J2
= (value
& 0x00080000) >> 19;
25149 J1
= (value
& 0x00040000) >> 18;
25150 hi
= (value
& 0x0003f000) >> 12;
25151 lo
= (value
& 0x00000ffe) >> 1;
25153 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25154 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25155 newval
|= (S
<< 10) | hi
;
25156 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
25157 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25158 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25162 case BFD_RELOC_THUMB_PCREL_BLX
:
25163 /* If there is a blx from a thumb state function to
25164 another thumb function flip this to a bl and warn
25168 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25169 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25170 && THUMB_IS_FUNC (fixP
->fx_addsy
))
25172 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
25173 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
25174 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
25176 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25177 newval
= newval
| 0x1000;
25178 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
25179 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
25184 goto thumb_bl_common
;
25186 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
25187 /* A bl from Thumb state ISA to an internal ARM state function
25188 is converted to a blx. */
25190 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25191 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25192 && ARM_IS_FUNC (fixP
->fx_addsy
)
25193 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
25195 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25196 newval
= newval
& ~0x1000;
25197 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
25198 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
25204 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
25205 /* For a BLX instruction, make sure that the relocation is rounded up
25206 to a word boundary. This follows the semantics of the instruction
25207 which specifies that bit 1 of the target address will come from bit
25208 1 of the base address. */
25209 value
= (value
+ 3) & ~ 3;
25212 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
25213 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
25214 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
25217 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
25219 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
25220 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25221 else if ((value
& ~0x1ffffff)
25222 && ((value
& ~0x1ffffff) != ~0x1ffffff))
25223 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25224 _("Thumb2 branch out of range"));
25227 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25228 encode_thumb2_b_bl_offset (buf
, value
);
25232 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
25233 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
25234 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25236 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25237 encode_thumb2_b_bl_offset (buf
, value
);
25242 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25247 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25248 md_number_to_chars (buf
, value
, 2);
25252 case BFD_RELOC_ARM_TLS_CALL
:
25253 case BFD_RELOC_ARM_THM_TLS_CALL
:
25254 case BFD_RELOC_ARM_TLS_DESCSEQ
:
25255 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
25256 case BFD_RELOC_ARM_TLS_GOTDESC
:
25257 case BFD_RELOC_ARM_TLS_GD32
:
25258 case BFD_RELOC_ARM_TLS_LE32
:
25259 case BFD_RELOC_ARM_TLS_IE32
:
25260 case BFD_RELOC_ARM_TLS_LDM32
:
25261 case BFD_RELOC_ARM_TLS_LDO32
:
25262 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
25265 /* Same handling as above, but with the arm_fdpic guard. */
25266 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
25267 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
25268 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
25271 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
25275 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25276 _("Relocation supported only in FDPIC mode"));
25280 case BFD_RELOC_ARM_GOT32
:
25281 case BFD_RELOC_ARM_GOTOFF
:
25284 case BFD_RELOC_ARM_GOT_PREL
:
25285 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25286 md_number_to_chars (buf
, value
, 4);
25289 case BFD_RELOC_ARM_TARGET2
:
25290 /* TARGET2 is not partial-inplace, so we need to write the
25291 addend here for REL targets, because it won't be written out
25292 during reloc processing later. */
25293 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25294 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
25297 /* Relocations for FDPIC. */
25298 case BFD_RELOC_ARM_GOTFUNCDESC
:
25299 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
25300 case BFD_RELOC_ARM_FUNCDESC
:
25303 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25304 md_number_to_chars (buf
, 0, 4);
25308 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25309 _("Relocation supported only in FDPIC mode"));
25314 case BFD_RELOC_RVA
:
25316 case BFD_RELOC_ARM_TARGET1
:
25317 case BFD_RELOC_ARM_ROSEGREL32
:
25318 case BFD_RELOC_ARM_SBREL32
:
25319 case BFD_RELOC_32_PCREL
:
25321 case BFD_RELOC_32_SECREL
:
25323 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25325 /* For WinCE we only do this for pcrel fixups. */
25326 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
25328 md_number_to_chars (buf
, value
, 4);
25332 case BFD_RELOC_ARM_PREL31
:
25333 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25335 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
25336 if ((value
^ (value
>> 1)) & 0x40000000)
25338 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25339 _("rel31 relocation overflow"));
25341 newval
|= value
& 0x7fffffff;
25342 md_number_to_chars (buf
, newval
, 4);
25347 case BFD_RELOC_ARM_CP_OFF_IMM
:
25348 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
25349 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
:
25350 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
)
25351 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25353 newval
= get_thumb32_insn (buf
);
25354 if ((newval
& 0x0f200f00) == 0x0d000900)
25356 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
25357 has permitted values that are multiples of 2, in the range 0
25359 if (value
< -510 || value
> 510 || (value
& 1))
25360 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25361 _("co-processor offset out of range"));
25363 else if ((newval
& 0xfe001f80) == 0xec000f80)
25365 if (value
< -511 || value
> 512 || (value
& 3))
25366 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25367 _("co-processor offset out of range"));
25369 else if (value
< -1023 || value
> 1023 || (value
& 3))
25370 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25371 _("co-processor offset out of range"));
25376 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
25377 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
25378 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25380 newval
= get_thumb32_insn (buf
);
25383 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
25384 newval
&= 0xffffff80;
25386 newval
&= 0xffffff00;
25390 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
25391 newval
&= 0xff7fff80;
25393 newval
&= 0xff7fff00;
25394 if ((newval
& 0x0f200f00) == 0x0d000900)
25396 /* This is a fp16 vstr/vldr.
25398 It requires the immediate offset in the instruction is shifted
25399 left by 1 to be a half-word offset.
25401 Here, left shift by 1 first, and later right shift by 2
25402 should get the right offset. */
25405 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
25407 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
25408 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
25409 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25411 put_thumb32_insn (buf
, newval
);
25414 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
25415 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
25416 if (value
< -255 || value
> 255)
25417 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25418 _("co-processor offset out of range"));
25420 goto cp_off_common
;
25422 case BFD_RELOC_ARM_THUMB_OFFSET
:
25423 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25424 /* Exactly what ranges, and where the offset is inserted depends
25425 on the type of instruction, we can establish this from the
25427 switch (newval
>> 12)
25429 case 4: /* PC load. */
25430 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
25431 forced to zero for these loads; md_pcrel_from has already
25432 compensated for this. */
25434 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25435 _("invalid offset, target not word aligned (0x%08lX)"),
25436 (((unsigned long) fixP
->fx_frag
->fr_address
25437 + (unsigned long) fixP
->fx_where
) & ~3)
25438 + (unsigned long) value
);
25440 if (value
& ~0x3fc)
25441 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25442 _("invalid offset, value too big (0x%08lX)"),
25445 newval
|= value
>> 2;
25448 case 9: /* SP load/store. */
25449 if (value
& ~0x3fc)
25450 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25451 _("invalid offset, value too big (0x%08lX)"),
25453 newval
|= value
>> 2;
25456 case 6: /* Word load/store. */
25458 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25459 _("invalid offset, value too big (0x%08lX)"),
25461 newval
|= value
<< 4; /* 6 - 2. */
25464 case 7: /* Byte load/store. */
25466 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25467 _("invalid offset, value too big (0x%08lX)"),
25469 newval
|= value
<< 6;
25472 case 8: /* Halfword load/store. */
25474 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25475 _("invalid offset, value too big (0x%08lX)"),
25477 newval
|= value
<< 5; /* 6 - 1. */
25481 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25482 "Unable to process relocation for thumb opcode: %lx",
25483 (unsigned long) newval
);
25486 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25489 case BFD_RELOC_ARM_THUMB_ADD
:
25490 /* This is a complicated relocation, since we use it for all of
25491 the following immediate relocations:
25495 9bit ADD/SUB SP word-aligned
25496 10bit ADD PC/SP word-aligned
25498 The type of instruction being processed is encoded in the
25505 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25507 int rd
= (newval
>> 4) & 0xf;
25508 int rs
= newval
& 0xf;
25509 int subtract
= !!(newval
& 0x8000);
25511 /* Check for HI regs, only very restricted cases allowed:
25512 Adjusting SP, and using PC or SP to get an address. */
25513 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
25514 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
25515 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25516 _("invalid Hi register with immediate"));
25518 /* If value is negative, choose the opposite instruction. */
25522 subtract
= !subtract
;
25524 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25525 _("immediate value out of range"));
25530 if (value
& ~0x1fc)
25531 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25532 _("invalid immediate for stack address calculation"));
25533 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
25534 newval
|= value
>> 2;
25536 else if (rs
== REG_PC
|| rs
== REG_SP
)
25538 /* PR gas/18541. If the addition is for a defined symbol
25539 within range of an ADR instruction then accept it. */
25542 && fixP
->fx_addsy
!= NULL
)
25546 if (! S_IS_DEFINED (fixP
->fx_addsy
)
25547 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
25548 || S_IS_WEAK (fixP
->fx_addsy
))
25550 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25551 _("address calculation needs a strongly defined nearby symbol"));
25555 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
25557 /* Round up to the next 4-byte boundary. */
25562 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
25566 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25567 _("symbol too far away"));
25577 if (subtract
|| value
& ~0x3fc)
25578 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25579 _("invalid immediate for address calculation (value = 0x%08lX)"),
25580 (unsigned long) (subtract
? - value
: value
));
25581 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
25583 newval
|= value
>> 2;
25588 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25589 _("immediate value out of range"));
25590 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
25591 newval
|= (rd
<< 8) | value
;
25596 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25597 _("immediate value out of range"));
25598 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
25599 newval
|= rd
| (rs
<< 3) | (value
<< 6);
25602 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25605 case BFD_RELOC_ARM_THUMB_IMM
:
25606 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25607 if (value
< 0 || value
> 255)
25608 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25609 _("invalid immediate: %ld is out of range"),
25612 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25615 case BFD_RELOC_ARM_THUMB_SHIFT
:
25616 /* 5bit shift value (0..32). LSL cannot take 32. */
25617 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
25618 temp
= newval
& 0xf800;
25619 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
25620 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25621 _("invalid shift value: %ld"), (long) value
);
25622 /* Shifts of zero must be encoded as LSL. */
25624 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
25625 /* Shifts of 32 are encoded as zero. */
25626 else if (value
== 32)
25628 newval
|= value
<< 6;
25629 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25632 case BFD_RELOC_VTABLE_INHERIT
:
25633 case BFD_RELOC_VTABLE_ENTRY
:
25637 case BFD_RELOC_ARM_MOVW
:
25638 case BFD_RELOC_ARM_MOVT
:
25639 case BFD_RELOC_ARM_THUMB_MOVW
:
25640 case BFD_RELOC_ARM_THUMB_MOVT
:
25641 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25643 /* REL format relocations are limited to a 16-bit addend. */
25644 if (!fixP
->fx_done
)
25646 if (value
< -0x8000 || value
> 0x7fff)
25647 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25648 _("offset out of range"));
25650 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
25651 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
25656 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
25657 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
25659 newval
= get_thumb32_insn (buf
);
25660 newval
&= 0xfbf08f00;
25661 newval
|= (value
& 0xf000) << 4;
25662 newval
|= (value
& 0x0800) << 15;
25663 newval
|= (value
& 0x0700) << 4;
25664 newval
|= (value
& 0x00ff);
25665 put_thumb32_insn (buf
, newval
);
25669 newval
= md_chars_to_number (buf
, 4);
25670 newval
&= 0xfff0f000;
25671 newval
|= value
& 0x0fff;
25672 newval
|= (value
& 0xf000) << 4;
25673 md_number_to_chars (buf
, newval
, 4);
25678 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
25679 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
25680 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
25681 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
25682 gas_assert (!fixP
->fx_done
);
25685 bfd_boolean is_mov
;
25686 bfd_vma encoded_addend
= value
;
25688 /* Check that addend can be encoded in instruction. */
25689 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
25690 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25691 _("the offset 0x%08lX is not representable"),
25692 (unsigned long) encoded_addend
);
25694 /* Extract the instruction. */
25695 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
25696 is_mov
= (insn
& 0xf800) == 0x2000;
25701 if (!seg
->use_rela_p
)
25702 insn
|= encoded_addend
;
25708 /* Extract the instruction. */
25709 /* Encoding is the following
25714 /* The following conditions must be true :
25719 rd
= (insn
>> 4) & 0xf;
25721 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
25722 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25723 _("Unable to process relocation for thumb opcode: %lx"),
25724 (unsigned long) insn
);
25726 /* Encode as ADD immediate8 thumb 1 code. */
25727 insn
= 0x3000 | (rd
<< 8);
25729 /* Place the encoded addend into the first 8 bits of the
25731 if (!seg
->use_rela_p
)
25732 insn
|= encoded_addend
;
25735 /* Update the instruction. */
25736 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
25740 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
25741 case BFD_RELOC_ARM_ALU_PC_G0
:
25742 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
25743 case BFD_RELOC_ARM_ALU_PC_G1
:
25744 case BFD_RELOC_ARM_ALU_PC_G2
:
25745 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
25746 case BFD_RELOC_ARM_ALU_SB_G0
:
25747 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
25748 case BFD_RELOC_ARM_ALU_SB_G1
:
25749 case BFD_RELOC_ARM_ALU_SB_G2
:
25750 gas_assert (!fixP
->fx_done
);
25751 if (!seg
->use_rela_p
)
25754 bfd_vma encoded_addend
;
25755 bfd_vma addend_abs
= llabs (value
);
25757 /* Check that the absolute value of the addend can be
25758 expressed as an 8-bit constant plus a rotation. */
25759 encoded_addend
= encode_arm_immediate (addend_abs
);
25760 if (encoded_addend
== (unsigned int) FAIL
)
25761 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25762 _("the offset 0x%08lX is not representable"),
25763 (unsigned long) addend_abs
);
25765 /* Extract the instruction. */
25766 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25768 /* If the addend is positive, use an ADD instruction.
25769 Otherwise use a SUB. Take care not to destroy the S bit. */
25770 insn
&= 0xff1fffff;
25776 /* Place the encoded addend into the first 12 bits of the
25778 insn
&= 0xfffff000;
25779 insn
|= encoded_addend
;
25781 /* Update the instruction. */
25782 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25786 case BFD_RELOC_ARM_LDR_PC_G0
:
25787 case BFD_RELOC_ARM_LDR_PC_G1
:
25788 case BFD_RELOC_ARM_LDR_PC_G2
:
25789 case BFD_RELOC_ARM_LDR_SB_G0
:
25790 case BFD_RELOC_ARM_LDR_SB_G1
:
25791 case BFD_RELOC_ARM_LDR_SB_G2
:
25792 gas_assert (!fixP
->fx_done
);
25793 if (!seg
->use_rela_p
)
25796 bfd_vma addend_abs
= llabs (value
);
25798 /* Check that the absolute value of the addend can be
25799 encoded in 12 bits. */
25800 if (addend_abs
>= 0x1000)
25801 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25802 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
25803 (unsigned long) addend_abs
);
25805 /* Extract the instruction. */
25806 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25808 /* If the addend is negative, clear bit 23 of the instruction.
25809 Otherwise set it. */
25811 insn
&= ~(1 << 23);
25815 /* Place the absolute value of the addend into the first 12 bits
25816 of the instruction. */
25817 insn
&= 0xfffff000;
25818 insn
|= addend_abs
;
25820 /* Update the instruction. */
25821 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25825 case BFD_RELOC_ARM_LDRS_PC_G0
:
25826 case BFD_RELOC_ARM_LDRS_PC_G1
:
25827 case BFD_RELOC_ARM_LDRS_PC_G2
:
25828 case BFD_RELOC_ARM_LDRS_SB_G0
:
25829 case BFD_RELOC_ARM_LDRS_SB_G1
:
25830 case BFD_RELOC_ARM_LDRS_SB_G2
:
25831 gas_assert (!fixP
->fx_done
);
25832 if (!seg
->use_rela_p
)
25835 bfd_vma addend_abs
= llabs (value
);
25837 /* Check that the absolute value of the addend can be
25838 encoded in 8 bits. */
25839 if (addend_abs
>= 0x100)
25840 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25841 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
25842 (unsigned long) addend_abs
);
25844 /* Extract the instruction. */
25845 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25847 /* If the addend is negative, clear bit 23 of the instruction.
25848 Otherwise set it. */
25850 insn
&= ~(1 << 23);
25854 /* Place the first four bits of the absolute value of the addend
25855 into the first 4 bits of the instruction, and the remaining
25856 four into bits 8 .. 11. */
25857 insn
&= 0xfffff0f0;
25858 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
25860 /* Update the instruction. */
25861 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25865 case BFD_RELOC_ARM_LDC_PC_G0
:
25866 case BFD_RELOC_ARM_LDC_PC_G1
:
25867 case BFD_RELOC_ARM_LDC_PC_G2
:
25868 case BFD_RELOC_ARM_LDC_SB_G0
:
25869 case BFD_RELOC_ARM_LDC_SB_G1
:
25870 case BFD_RELOC_ARM_LDC_SB_G2
:
25871 gas_assert (!fixP
->fx_done
);
25872 if (!seg
->use_rela_p
)
25875 bfd_vma addend_abs
= llabs (value
);
25877 /* Check that the absolute value of the addend is a multiple of
25878 four and, when divided by four, fits in 8 bits. */
25879 if (addend_abs
& 0x3)
25880 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25881 _("bad offset 0x%08lX (must be word-aligned)"),
25882 (unsigned long) addend_abs
);
25884 if ((addend_abs
>> 2) > 0xff)
25885 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25886 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
25887 (unsigned long) addend_abs
);
25889 /* Extract the instruction. */
25890 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25892 /* If the addend is negative, clear bit 23 of the instruction.
25893 Otherwise set it. */
25895 insn
&= ~(1 << 23);
25899 /* Place the addend (divided by four) into the first eight
25900 bits of the instruction. */
25901 insn
&= 0xfffffff0;
25902 insn
|= addend_abs
>> 2;
25904 /* Update the instruction. */
25905 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25909 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
25911 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25912 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25913 && ARM_IS_FUNC (fixP
->fx_addsy
)
25914 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25916 /* Force a relocation for a branch 5 bits wide. */
25919 if (v8_1_branch_value_check (value
, 5, FALSE
) == FAIL
)
25920 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25923 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25925 addressT boff
= value
>> 1;
25927 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25928 newval
|= (boff
<< 7);
25929 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25933 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
25935 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25936 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25937 && ARM_IS_FUNC (fixP
->fx_addsy
)
25938 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25942 if ((value
& ~0x7f) && ((value
& ~0x3f) != ~0x3f))
25943 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25944 _("branch out of range"));
25946 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25948 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25950 addressT boff
= ((newval
& 0x0780) >> 7) << 1;
25951 addressT diff
= value
- boff
;
25955 newval
|= 1 << 1; /* T bit. */
25957 else if (diff
!= 2)
25959 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25960 _("out of range label-relative fixup value"));
25962 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25966 case BFD_RELOC_ARM_THUMB_BF17
:
25968 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25969 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25970 && ARM_IS_FUNC (fixP
->fx_addsy
)
25971 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25973 /* Force a relocation for a branch 17 bits wide. */
25977 if (v8_1_branch_value_check (value
, 17, TRUE
) == FAIL
)
25978 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25981 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25984 addressT immA
, immB
, immC
;
25986 immA
= (value
& 0x0001f000) >> 12;
25987 immB
= (value
& 0x00000ffc) >> 2;
25988 immC
= (value
& 0x00000002) >> 1;
25990 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25991 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25993 newval2
|= (immC
<< 11) | (immB
<< 1);
25994 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25995 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25999 case BFD_RELOC_ARM_THUMB_BF19
:
26001 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26002 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26003 && ARM_IS_FUNC (fixP
->fx_addsy
)
26004 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26006 /* Force a relocation for a branch 19 bits wide. */
26010 if (v8_1_branch_value_check (value
, 19, TRUE
) == FAIL
)
26011 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26014 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26017 addressT immA
, immB
, immC
;
26019 immA
= (value
& 0x0007f000) >> 12;
26020 immB
= (value
& 0x00000ffc) >> 2;
26021 immC
= (value
& 0x00000002) >> 1;
26023 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26024 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26026 newval2
|= (immC
<< 11) | (immB
<< 1);
26027 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26028 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
26032 case BFD_RELOC_ARM_THUMB_BF13
:
26034 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26035 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26036 && ARM_IS_FUNC (fixP
->fx_addsy
)
26037 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26039 /* Force a relocation for a branch 13 bits wide. */
26043 if (v8_1_branch_value_check (value
, 13, TRUE
) == FAIL
)
26044 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26047 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26050 addressT immA
, immB
, immC
;
26052 immA
= (value
& 0x00001000) >> 12;
26053 immB
= (value
& 0x00000ffc) >> 2;
26054 immC
= (value
& 0x00000002) >> 1;
26056 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26057 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26059 newval2
|= (immC
<< 11) | (immB
<< 1);
26060 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26061 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
26065 case BFD_RELOC_ARM_THUMB_LOOP12
:
26067 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26068 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26069 && ARM_IS_FUNC (fixP
->fx_addsy
)
26070 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26072 /* Force a relocation for a branch 12 bits wide. */
26076 bfd_vma insn
= get_thumb32_insn (buf
);
26077 /* le lr, <label> or le <label> */
26078 if (((insn
& 0xffffffff) == 0xf00fc001)
26079 || ((insn
& 0xffffffff) == 0xf02fc001))
26082 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
26083 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26085 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26087 addressT imml
, immh
;
26089 immh
= (value
& 0x00000ffc) >> 2;
26090 imml
= (value
& 0x00000002) >> 1;
26092 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26093 newval
|= (imml
<< 11) | (immh
<< 1);
26094 md_number_to_chars (buf
+ THUMB_SIZE
, newval
, THUMB_SIZE
);
26098 case BFD_RELOC_ARM_V4BX
:
26099 /* This will need to go in the object file. */
26103 case BFD_RELOC_UNUSED
:
26105 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26106 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
26110 /* Translate internal representation of relocation info to BFD target
26114 tc_gen_reloc (asection
*section
, fixS
*fixp
)
26117 bfd_reloc_code_real_type code
;
26119 reloc
= XNEW (arelent
);
26121 reloc
->sym_ptr_ptr
= XNEW (asymbol
*);
26122 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
26123 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
26125 if (fixp
->fx_pcrel
)
26127 if (section
->use_rela_p
)
26128 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
26130 fixp
->fx_offset
= reloc
->address
;
26132 reloc
->addend
= fixp
->fx_offset
;
26134 switch (fixp
->fx_r_type
)
26137 if (fixp
->fx_pcrel
)
26139 code
= BFD_RELOC_8_PCREL
;
26142 /* Fall through. */
26145 if (fixp
->fx_pcrel
)
26147 code
= BFD_RELOC_16_PCREL
;
26150 /* Fall through. */
26153 if (fixp
->fx_pcrel
)
26155 code
= BFD_RELOC_32_PCREL
;
26158 /* Fall through. */
26160 case BFD_RELOC_ARM_MOVW
:
26161 if (fixp
->fx_pcrel
)
26163 code
= BFD_RELOC_ARM_MOVW_PCREL
;
26166 /* Fall through. */
26168 case BFD_RELOC_ARM_MOVT
:
26169 if (fixp
->fx_pcrel
)
26171 code
= BFD_RELOC_ARM_MOVT_PCREL
;
26174 /* Fall through. */
26176 case BFD_RELOC_ARM_THUMB_MOVW
:
26177 if (fixp
->fx_pcrel
)
26179 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
26182 /* Fall through. */
26184 case BFD_RELOC_ARM_THUMB_MOVT
:
26185 if (fixp
->fx_pcrel
)
26187 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
26190 /* Fall through. */
26192 case BFD_RELOC_NONE
:
26193 case BFD_RELOC_ARM_PCREL_BRANCH
:
26194 case BFD_RELOC_ARM_PCREL_BLX
:
26195 case BFD_RELOC_RVA
:
26196 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
26197 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
26198 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
26199 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
26200 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
26201 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
26202 case BFD_RELOC_VTABLE_ENTRY
:
26203 case BFD_RELOC_VTABLE_INHERIT
:
26205 case BFD_RELOC_32_SECREL
:
26207 code
= fixp
->fx_r_type
;
26210 case BFD_RELOC_THUMB_PCREL_BLX
:
26212 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
26213 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
26216 code
= BFD_RELOC_THUMB_PCREL_BLX
;
26219 case BFD_RELOC_ARM_LITERAL
:
26220 case BFD_RELOC_ARM_HWLITERAL
:
26221 /* If this is called then the a literal has
26222 been referenced across a section boundary. */
26223 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26224 _("literal referenced across section boundary"));
26228 case BFD_RELOC_ARM_TLS_CALL
:
26229 case BFD_RELOC_ARM_THM_TLS_CALL
:
26230 case BFD_RELOC_ARM_TLS_DESCSEQ
:
26231 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
26232 case BFD_RELOC_ARM_GOT32
:
26233 case BFD_RELOC_ARM_GOTOFF
:
26234 case BFD_RELOC_ARM_GOT_PREL
:
26235 case BFD_RELOC_ARM_PLT32
:
26236 case BFD_RELOC_ARM_TARGET1
:
26237 case BFD_RELOC_ARM_ROSEGREL32
:
26238 case BFD_RELOC_ARM_SBREL32
:
26239 case BFD_RELOC_ARM_PREL31
:
26240 case BFD_RELOC_ARM_TARGET2
:
26241 case BFD_RELOC_ARM_TLS_LDO32
:
26242 case BFD_RELOC_ARM_PCREL_CALL
:
26243 case BFD_RELOC_ARM_PCREL_JUMP
:
26244 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
26245 case BFD_RELOC_ARM_ALU_PC_G0
:
26246 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
26247 case BFD_RELOC_ARM_ALU_PC_G1
:
26248 case BFD_RELOC_ARM_ALU_PC_G2
:
26249 case BFD_RELOC_ARM_LDR_PC_G0
:
26250 case BFD_RELOC_ARM_LDR_PC_G1
:
26251 case BFD_RELOC_ARM_LDR_PC_G2
:
26252 case BFD_RELOC_ARM_LDRS_PC_G0
:
26253 case BFD_RELOC_ARM_LDRS_PC_G1
:
26254 case BFD_RELOC_ARM_LDRS_PC_G2
:
26255 case BFD_RELOC_ARM_LDC_PC_G0
:
26256 case BFD_RELOC_ARM_LDC_PC_G1
:
26257 case BFD_RELOC_ARM_LDC_PC_G2
:
26258 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
26259 case BFD_RELOC_ARM_ALU_SB_G0
:
26260 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
26261 case BFD_RELOC_ARM_ALU_SB_G1
:
26262 case BFD_RELOC_ARM_ALU_SB_G2
:
26263 case BFD_RELOC_ARM_LDR_SB_G0
:
26264 case BFD_RELOC_ARM_LDR_SB_G1
:
26265 case BFD_RELOC_ARM_LDR_SB_G2
:
26266 case BFD_RELOC_ARM_LDRS_SB_G0
:
26267 case BFD_RELOC_ARM_LDRS_SB_G1
:
26268 case BFD_RELOC_ARM_LDRS_SB_G2
:
26269 case BFD_RELOC_ARM_LDC_SB_G0
:
26270 case BFD_RELOC_ARM_LDC_SB_G1
:
26271 case BFD_RELOC_ARM_LDC_SB_G2
:
26272 case BFD_RELOC_ARM_V4BX
:
26273 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
26274 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
26275 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
26276 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
26277 case BFD_RELOC_ARM_GOTFUNCDESC
:
26278 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
26279 case BFD_RELOC_ARM_FUNCDESC
:
26280 case BFD_RELOC_ARM_THUMB_BF17
:
26281 case BFD_RELOC_ARM_THUMB_BF19
:
26282 case BFD_RELOC_ARM_THUMB_BF13
:
26283 code
= fixp
->fx_r_type
;
26286 case BFD_RELOC_ARM_TLS_GOTDESC
:
26287 case BFD_RELOC_ARM_TLS_GD32
:
26288 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
26289 case BFD_RELOC_ARM_TLS_LE32
:
26290 case BFD_RELOC_ARM_TLS_IE32
:
26291 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
26292 case BFD_RELOC_ARM_TLS_LDM32
:
26293 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
26294 /* BFD will include the symbol's address in the addend.
26295 But we don't want that, so subtract it out again here. */
26296 if (!S_IS_COMMON (fixp
->fx_addsy
))
26297 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
26298 code
= fixp
->fx_r_type
;
26302 case BFD_RELOC_ARM_IMMEDIATE
:
26303 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26304 _("internal relocation (type: IMMEDIATE) not fixed up"));
26307 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
26308 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26309 _("ADRL used for a symbol not defined in the same file"));
26312 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
26313 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
26314 case BFD_RELOC_ARM_THUMB_LOOP12
:
26315 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26316 _("%s used for a symbol not defined in the same file"),
26317 bfd_get_reloc_code_name (fixp
->fx_r_type
));
26320 case BFD_RELOC_ARM_OFFSET_IMM
:
26321 if (section
->use_rela_p
)
26323 code
= fixp
->fx_r_type
;
26327 if (fixp
->fx_addsy
!= NULL
26328 && !S_IS_DEFINED (fixp
->fx_addsy
)
26329 && S_IS_LOCAL (fixp
->fx_addsy
))
26331 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26332 _("undefined local label `%s'"),
26333 S_GET_NAME (fixp
->fx_addsy
));
26337 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26338 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
26345 switch (fixp
->fx_r_type
)
26347 case BFD_RELOC_NONE
: type
= "NONE"; break;
26348 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
26349 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
26350 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
26351 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
26352 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
26353 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
26354 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
26355 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
26356 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
26357 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
26358 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
26359 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
26360 default: type
= _("<unknown>"); break;
26362 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26363 _("cannot represent %s relocation in this object file format"),
26370 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
26372 && fixp
->fx_addsy
== GOT_symbol
)
26374 code
= BFD_RELOC_ARM_GOTPC
;
26375 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
26379 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
26381 if (reloc
->howto
== NULL
)
26383 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26384 _("cannot represent %s relocation in this object file format"),
26385 bfd_get_reloc_code_name (code
));
26389 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
26390 vtable entry to be used in the relocation's section offset. */
26391 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
26392 reloc
->address
= fixp
->fx_offset
;
26397 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
26400 cons_fix_new_arm (fragS
* frag
,
26404 bfd_reloc_code_real_type reloc
)
26409 FIXME: @@ Should look at CPU word size. */
26413 reloc
= BFD_RELOC_8
;
26416 reloc
= BFD_RELOC_16
;
26420 reloc
= BFD_RELOC_32
;
26423 reloc
= BFD_RELOC_64
;
26428 if (exp
->X_op
== O_secrel
)
26430 exp
->X_op
= O_symbol
;
26431 reloc
= BFD_RELOC_32_SECREL
;
26435 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
26438 #if defined (OBJ_COFF)
26440 arm_validate_fix (fixS
* fixP
)
26442 /* If the destination of the branch is a defined symbol which does not have
26443 the THUMB_FUNC attribute, then we must be calling a function which has
26444 the (interfacearm) attribute. We look for the Thumb entry point to that
26445 function and change the branch to refer to that function instead. */
26446 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
26447 && fixP
->fx_addsy
!= NULL
26448 && S_IS_DEFINED (fixP
->fx_addsy
)
26449 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
26451 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
26458 arm_force_relocation (struct fix
* fixp
)
26460 #if defined (OBJ_COFF) && defined (TE_PE)
26461 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
26465 /* In case we have a call or a branch to a function in ARM ISA mode from
26466 a thumb function or vice-versa force the relocation. These relocations
26467 are cleared off for some cores that might have blx and simple transformations
26471 switch (fixp
->fx_r_type
)
26473 case BFD_RELOC_ARM_PCREL_JUMP
:
26474 case BFD_RELOC_ARM_PCREL_CALL
:
26475 case BFD_RELOC_THUMB_PCREL_BLX
:
26476 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
26480 case BFD_RELOC_ARM_PCREL_BLX
:
26481 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
26482 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
26483 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
26484 if (ARM_IS_FUNC (fixp
->fx_addsy
))
26493 /* Resolve these relocations even if the symbol is extern or weak.
26494 Technically this is probably wrong due to symbol preemption.
26495 In practice these relocations do not have enough range to be useful
26496 at dynamic link time, and some code (e.g. in the Linux kernel)
26497 expects these references to be resolved. */
26498 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
26499 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
26500 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
26501 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
26502 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
26503 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
26504 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
26505 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
26506 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
26507 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
26508 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
26509 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
26510 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
26511 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
26514 /* Always leave these relocations for the linker. */
26515 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
26516 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
26517 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
26520 /* Always generate relocations against function symbols. */
26521 if (fixp
->fx_r_type
== BFD_RELOC_32
26523 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
26526 return generic_force_reloc (fixp
);
26529 #if defined (OBJ_ELF) || defined (OBJ_COFF)
26530 /* Relocations against function names must be left unadjusted,
26531 so that the linker can use this information to generate interworking
26532 stubs. The MIPS version of this function
26533 also prevents relocations that are mips-16 specific, but I do not
26534 know why it does this.
26537 There is one other problem that ought to be addressed here, but
26538 which currently is not: Taking the address of a label (rather
26539 than a function) and then later jumping to that address. Such
26540 addresses also ought to have their bottom bit set (assuming that
26541 they reside in Thumb code), but at the moment they will not. */
26544 arm_fix_adjustable (fixS
* fixP
)
26546 if (fixP
->fx_addsy
== NULL
)
26549 /* Preserve relocations against symbols with function type. */
26550 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
26553 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
26554 && fixP
->fx_subsy
== NULL
)
26557 /* We need the symbol name for the VTABLE entries. */
26558 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
26559 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
26562 /* Don't allow symbols to be discarded on GOT related relocs. */
26563 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
26564 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
26565 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
26566 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
26567 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32_FDPIC
26568 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
26569 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
26570 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32_FDPIC
26571 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
26572 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32_FDPIC
26573 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
26574 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
26575 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
26576 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
26577 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
26578 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
26579 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
26582 /* Similarly for group relocations. */
26583 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
26584 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
26585 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
26588 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
26589 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
26590 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
26591 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
26592 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
26593 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
26594 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
26595 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
26596 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
26599 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
26600 offsets, so keep these symbols. */
26601 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
26602 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
26607 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
26611 elf32_arm_target_format (void)
26614 return (target_big_endian
26615 ? "elf32-bigarm-symbian"
26616 : "elf32-littlearm-symbian");
26617 #elif defined (TE_VXWORKS)
26618 return (target_big_endian
26619 ? "elf32-bigarm-vxworks"
26620 : "elf32-littlearm-vxworks");
26621 #elif defined (TE_NACL)
26622 return (target_big_endian
26623 ? "elf32-bigarm-nacl"
26624 : "elf32-littlearm-nacl");
26628 if (target_big_endian
)
26629 return "elf32-bigarm-fdpic";
26631 return "elf32-littlearm-fdpic";
26635 if (target_big_endian
)
26636 return "elf32-bigarm";
26638 return "elf32-littlearm";
26644 armelf_frob_symbol (symbolS
* symp
,
26647 elf_frob_symbol (symp
, puntp
);
26651 /* MD interface: Finalization. */
26656 literal_pool
* pool
;
26658 /* Ensure that all the predication blocks are properly closed. */
26659 check_pred_blocks_finished ();
26661 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
26663 /* Put it at the end of the relevant section. */
26664 subseg_set (pool
->section
, pool
->sub_section
);
26666 arm_elf_change_section ();
26673 /* Remove any excess mapping symbols generated for alignment frags in
26674 SEC. We may have created a mapping symbol before a zero byte
26675 alignment; remove it if there's a mapping symbol after the
26678 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
26679 void *dummy ATTRIBUTE_UNUSED
)
26681 segment_info_type
*seginfo
= seg_info (sec
);
26684 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
26687 for (fragp
= seginfo
->frchainP
->frch_root
;
26689 fragp
= fragp
->fr_next
)
26691 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
26692 fragS
*next
= fragp
->fr_next
;
26694 /* Variable-sized frags have been converted to fixed size by
26695 this point. But if this was variable-sized to start with,
26696 there will be a fixed-size frag after it. So don't handle
26698 if (sym
== NULL
|| next
== NULL
)
26701 if (S_GET_VALUE (sym
) < next
->fr_address
)
26702 /* Not at the end of this frag. */
26704 know (S_GET_VALUE (sym
) == next
->fr_address
);
26708 if (next
->tc_frag_data
.first_map
!= NULL
)
26710 /* Next frag starts with a mapping symbol. Discard this
26712 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
26716 if (next
->fr_next
== NULL
)
26718 /* This mapping symbol is at the end of the section. Discard
26720 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
26721 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
26725 /* As long as we have empty frags without any mapping symbols,
26727 /* If the next frag is non-empty and does not start with a
26728 mapping symbol, then this mapping symbol is required. */
26729 if (next
->fr_address
!= next
->fr_next
->fr_address
)
26732 next
= next
->fr_next
;
26734 while (next
!= NULL
);
26739 /* Adjust the symbol table. This marks Thumb symbols as distinct from
26743 arm_adjust_symtab (void)
26748 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
26750 if (ARM_IS_THUMB (sym
))
26752 if (THUMB_IS_FUNC (sym
))
26754 /* Mark the symbol as a Thumb function. */
26755 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
26756 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
26757 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
26759 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
26760 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
26762 as_bad (_("%s: unexpected function type: %d"),
26763 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
26765 else switch (S_GET_STORAGE_CLASS (sym
))
26768 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
26771 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
26774 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
26782 if (ARM_IS_INTERWORK (sym
))
26783 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
26790 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
26792 if (ARM_IS_THUMB (sym
))
26794 elf_symbol_type
* elf_sym
;
26796 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
26797 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
26799 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
26800 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
26802 /* If it's a .thumb_func, declare it as so,
26803 otherwise tag label as .code 16. */
26804 if (THUMB_IS_FUNC (sym
))
26805 ARM_SET_SYM_BRANCH_TYPE (elf_sym
->internal_elf_sym
.st_target_internal
,
26806 ST_BRANCH_TO_THUMB
);
26807 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
26808 elf_sym
->internal_elf_sym
.st_info
=
26809 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
26814 /* Remove any overlapping mapping symbols generated by alignment frags. */
26815 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
26816 /* Now do generic ELF adjustments. */
26817 elf_adjust_symtab ();
26821 /* MD interface: Initialization. */
26824 set_constant_flonums (void)
26828 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
26829 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
26833 /* Auto-select Thumb mode if it's the only available instruction set for the
26834 given architecture. */
26837 autoselect_thumb_from_cpu_variant (void)
26839 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
26840 opcode_select (16);
26849 if ( (arm_ops_hsh
= hash_new ()) == NULL
26850 || (arm_cond_hsh
= hash_new ()) == NULL
26851 || (arm_vcond_hsh
= hash_new ()) == NULL
26852 || (arm_shift_hsh
= hash_new ()) == NULL
26853 || (arm_psr_hsh
= hash_new ()) == NULL
26854 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
26855 || (arm_reg_hsh
= hash_new ()) == NULL
26856 || (arm_reloc_hsh
= hash_new ()) == NULL
26857 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
26858 as_fatal (_("virtual memory exhausted"));
26860 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
26861 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
26862 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
26863 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
26864 for (i
= 0; i
< sizeof (vconds
) / sizeof (struct asm_cond
); i
++)
26865 hash_insert (arm_vcond_hsh
, vconds
[i
].template_name
, (void *) (vconds
+ i
));
26866 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
26867 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
26868 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
26869 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
26870 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
26871 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
26872 (void *) (v7m_psrs
+ i
));
26873 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
26874 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
26876 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
26878 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
26879 (void *) (barrier_opt_names
+ i
));
26881 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
26883 struct reloc_entry
* entry
= reloc_names
+ i
;
26885 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
26886 /* This makes encode_branch() use the EABI versions of this relocation. */
26887 entry
->reloc
= BFD_RELOC_UNUSED
;
26889 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
26893 set_constant_flonums ();
26895 /* Set the cpu variant based on the command-line options. We prefer
26896 -mcpu= over -march= if both are set (as for GCC); and we prefer
26897 -mfpu= over any other way of setting the floating point unit.
26898 Use of legacy options with new options are faulted. */
26901 if (mcpu_cpu_opt
|| march_cpu_opt
)
26902 as_bad (_("use of old and new-style options to set CPU type"));
26904 selected_arch
= *legacy_cpu
;
26906 else if (mcpu_cpu_opt
)
26908 selected_arch
= *mcpu_cpu_opt
;
26909 selected_ext
= *mcpu_ext_opt
;
26911 else if (march_cpu_opt
)
26913 selected_arch
= *march_cpu_opt
;
26914 selected_ext
= *march_ext_opt
;
26916 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
26921 as_bad (_("use of old and new-style options to set FPU type"));
26923 selected_fpu
= *legacy_fpu
;
26926 selected_fpu
= *mfpu_opt
;
26929 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
26930 || defined (TE_NetBSD) || defined (TE_VXWORKS))
26931 /* Some environments specify a default FPU. If they don't, infer it
26932 from the processor. */
26934 selected_fpu
= *mcpu_fpu_opt
;
26935 else if (march_fpu_opt
)
26936 selected_fpu
= *march_fpu_opt
;
26938 selected_fpu
= fpu_default
;
26942 if (ARM_FEATURE_ZERO (selected_fpu
))
26944 if (!no_cpu_selected ())
26945 selected_fpu
= fpu_default
;
26947 selected_fpu
= fpu_arch_fpa
;
26951 if (ARM_FEATURE_ZERO (selected_arch
))
26953 selected_arch
= cpu_default
;
26954 selected_cpu
= selected_arch
;
26956 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
26958 /* Autodection of feature mode: allow all features in cpu_variant but leave
26959 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
26960 after all instruction have been processed and we can decide what CPU
26961 should be selected. */
26962 if (ARM_FEATURE_ZERO (selected_arch
))
26963 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
26965 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
26968 autoselect_thumb_from_cpu_variant ();
26970 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
26972 #if defined OBJ_COFF || defined OBJ_ELF
26974 unsigned int flags
= 0;
26976 #if defined OBJ_ELF
26977 flags
= meabi_flags
;
26979 switch (meabi_flags
)
26981 case EF_ARM_EABI_UNKNOWN
:
26983 /* Set the flags in the private structure. */
26984 if (uses_apcs_26
) flags
|= F_APCS26
;
26985 if (support_interwork
) flags
|= F_INTERWORK
;
26986 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
26987 if (pic_code
) flags
|= F_PIC
;
26988 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
26989 flags
|= F_SOFT_FLOAT
;
26991 switch (mfloat_abi_opt
)
26993 case ARM_FLOAT_ABI_SOFT
:
26994 case ARM_FLOAT_ABI_SOFTFP
:
26995 flags
|= F_SOFT_FLOAT
;
26998 case ARM_FLOAT_ABI_HARD
:
26999 if (flags
& F_SOFT_FLOAT
)
27000 as_bad (_("hard-float conflicts with specified fpu"));
27004 /* Using pure-endian doubles (even if soft-float). */
27005 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
27006 flags
|= F_VFP_FLOAT
;
27008 #if defined OBJ_ELF
27009 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
27010 flags
|= EF_ARM_MAVERICK_FLOAT
;
27013 case EF_ARM_EABI_VER4
:
27014 case EF_ARM_EABI_VER5
:
27015 /* No additional flags to set. */
27022 bfd_set_private_flags (stdoutput
, flags
);
27024 /* We have run out flags in the COFF header to encode the
27025 status of ATPCS support, so instead we create a dummy,
27026 empty, debug section called .arm.atpcs. */
27031 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
27035 bfd_set_section_flags
27036 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
27037 bfd_set_section_size (stdoutput
, sec
, 0);
27038 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
27044 /* Record the CPU type as well. */
27045 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
27046 mach
= bfd_mach_arm_iWMMXt2
;
27047 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
27048 mach
= bfd_mach_arm_iWMMXt
;
27049 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
27050 mach
= bfd_mach_arm_XScale
;
27051 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
27052 mach
= bfd_mach_arm_ep9312
;
27053 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
27054 mach
= bfd_mach_arm_5TE
;
27055 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
27057 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
27058 mach
= bfd_mach_arm_5T
;
27060 mach
= bfd_mach_arm_5
;
27062 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
27064 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
27065 mach
= bfd_mach_arm_4T
;
27067 mach
= bfd_mach_arm_4
;
27069 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
27070 mach
= bfd_mach_arm_3M
;
27071 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
27072 mach
= bfd_mach_arm_3
;
27073 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
27074 mach
= bfd_mach_arm_2a
;
27075 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
27076 mach
= bfd_mach_arm_2
;
27078 mach
= bfd_mach_arm_unknown
;
27080 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
27083 /* Command line processing. */
27086 Invocation line includes a switch not recognized by the base assembler.
27087 See if it's a processor-specific option.
27089 This routine is somewhat complicated by the need for backwards
27090 compatibility (since older releases of gcc can't be changed).
27091 The new options try to make the interface as compatible as
27094 New options (supported) are:
27096 -mcpu=<cpu name> Assemble for selected processor
27097 -march=<architecture name> Assemble for selected architecture
27098 -mfpu=<fpu architecture> Assemble for selected FPU.
27099 -EB/-mbig-endian Big-endian
27100 -EL/-mlittle-endian Little-endian
27101 -k Generate PIC code
27102 -mthumb Start in Thumb mode
27103 -mthumb-interwork Code supports ARM/Thumb interworking
27105 -m[no-]warn-deprecated Warn about deprecated features
27106 -m[no-]warn-syms Warn when symbols match instructions
27108 For now we will also provide support for:
27110 -mapcs-32 32-bit Program counter
27111 -mapcs-26 26-bit Program counter
27112 -macps-float Floats passed in FP registers
27113 -mapcs-reentrant Reentrant code
27115 (sometime these will probably be replaced with -mapcs=<list of options>
27116 and -matpcs=<list of options>)
27118 The remaining options are only supported for back-wards compatibility.
27119 Cpu variants, the arm part is optional:
27120 -m[arm]1 Currently not supported.
27121 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
27122 -m[arm]3 Arm 3 processor
27123 -m[arm]6[xx], Arm 6 processors
27124 -m[arm]7[xx][t][[d]m] Arm 7 processors
27125 -m[arm]8[10] Arm 8 processors
27126 -m[arm]9[20][tdmi] Arm 9 processors
27127 -mstrongarm[110[0]] StrongARM processors
27128 -mxscale XScale processors
27129 -m[arm]v[2345[t[e]]] Arm architectures
27130 -mall All (except the ARM1)
27132 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
27133 -mfpe-old (No float load/store multiples)
27134 -mvfpxd VFP Single precision
27136 -mno-fpu Disable all floating point instructions
27138 The following CPU names are recognized:
27139 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
27140 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
27141 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
27142 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
27143 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
27144 arm10t arm10e, arm1020t, arm1020e, arm10200e,
27145 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
27149 const char * md_shortopts
= "m:k";
27151 #ifdef ARM_BI_ENDIAN
27152 #define OPTION_EB (OPTION_MD_BASE + 0)
27153 #define OPTION_EL (OPTION_MD_BASE + 1)
27155 #if TARGET_BYTES_BIG_ENDIAN
27156 #define OPTION_EB (OPTION_MD_BASE + 0)
27158 #define OPTION_EL (OPTION_MD_BASE + 1)
27161 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
27162 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
27164 struct option md_longopts
[] =
27167 {"EB", no_argument
, NULL
, OPTION_EB
},
27170 {"EL", no_argument
, NULL
, OPTION_EL
},
27172 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
27174 {"fdpic", no_argument
, NULL
, OPTION_FDPIC
},
27176 {NULL
, no_argument
, NULL
, 0}
27179 size_t md_longopts_size
= sizeof (md_longopts
);
27181 struct arm_option_table
27183 const char * option
; /* Option name to match. */
27184 const char * help
; /* Help information. */
27185 int * var
; /* Variable to change. */
27186 int value
; /* What to change it to. */
27187 const char * deprecated
; /* If non-null, print this message. */
27190 struct arm_option_table arm_opts
[] =
27192 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
27193 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
27194 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
27195 &support_interwork
, 1, NULL
},
27196 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
27197 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
27198 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
27200 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
27201 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
27202 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
27203 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
27206 /* These are recognized by the assembler, but have no affect on code. */
27207 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
27208 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
27210 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
27211 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
27212 &warn_on_deprecated
, 0, NULL
},
27213 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
27214 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
27215 {NULL
, NULL
, NULL
, 0, NULL
}
27218 struct arm_legacy_option_table
27220 const char * option
; /* Option name to match. */
27221 const arm_feature_set
** var
; /* Variable to change. */
27222 const arm_feature_set value
; /* What to change it to. */
27223 const char * deprecated
; /* If non-null, print this message. */
27226 const struct arm_legacy_option_table arm_legacy_opts
[] =
27228 /* DON'T add any new processors to this list -- we want the whole list
27229 to go away... Add them to the processors table instead. */
27230 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
27231 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
27232 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
27233 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
27234 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
27235 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
27236 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
27237 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
27238 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
27239 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
27240 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
27241 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
27242 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
27243 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
27244 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
27245 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
27246 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
27247 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
27248 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
27249 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
27250 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
27251 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
27252 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
27253 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
27254 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
27255 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
27256 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
27257 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
27258 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
27259 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
27260 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
27261 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
27262 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
27263 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
27264 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
27265 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
27266 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
27267 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
27268 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
27269 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
27270 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
27271 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
27272 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
27273 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
27274 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
27275 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
27276 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27277 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27278 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27279 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27280 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
27281 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
27282 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
27283 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
27284 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
27285 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
27286 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
27287 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
27288 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
27289 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
27290 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
27291 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
27292 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
27293 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
27294 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
27295 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
27296 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
27297 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
27298 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
27299 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
27300 N_("use -mcpu=strongarm110")},
27301 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
27302 N_("use -mcpu=strongarm1100")},
27303 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
27304 N_("use -mcpu=strongarm1110")},
27305 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
27306 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
27307 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
27309 /* Architecture variants -- don't add any more to this list either. */
27310 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
27311 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
27312 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
27313 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
27314 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
27315 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
27316 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
27317 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
27318 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
27319 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
27320 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
27321 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
27322 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
27323 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
27324 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
27325 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
27326 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
27327 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
27329 /* Floating point variants -- don't add any more to this list either. */
27330 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
27331 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
27332 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
27333 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
27334 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
27336 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
27339 struct arm_cpu_option_table
27343 const arm_feature_set value
;
27344 const arm_feature_set ext
;
27345 /* For some CPUs we assume an FPU unless the user explicitly sets
27347 const arm_feature_set default_fpu
;
27348 /* The canonical name of the CPU, or NULL to use NAME converted to upper
27350 const char * canonical_name
;
27353 /* This list should, at a minimum, contain all the cpu names
27354 recognized by GCC. */
27355 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
27357 static const struct arm_cpu_option_table arm_cpus
[] =
27359 ARM_CPU_OPT ("all", NULL
, ARM_ANY
,
27362 ARM_CPU_OPT ("arm1", NULL
, ARM_ARCH_V1
,
27365 ARM_CPU_OPT ("arm2", NULL
, ARM_ARCH_V2
,
27368 ARM_CPU_OPT ("arm250", NULL
, ARM_ARCH_V2S
,
27371 ARM_CPU_OPT ("arm3", NULL
, ARM_ARCH_V2S
,
27374 ARM_CPU_OPT ("arm6", NULL
, ARM_ARCH_V3
,
27377 ARM_CPU_OPT ("arm60", NULL
, ARM_ARCH_V3
,
27380 ARM_CPU_OPT ("arm600", NULL
, ARM_ARCH_V3
,
27383 ARM_CPU_OPT ("arm610", NULL
, ARM_ARCH_V3
,
27386 ARM_CPU_OPT ("arm620", NULL
, ARM_ARCH_V3
,
27389 ARM_CPU_OPT ("arm7", NULL
, ARM_ARCH_V3
,
27392 ARM_CPU_OPT ("arm7m", NULL
, ARM_ARCH_V3M
,
27395 ARM_CPU_OPT ("arm7d", NULL
, ARM_ARCH_V3
,
27398 ARM_CPU_OPT ("arm7dm", NULL
, ARM_ARCH_V3M
,
27401 ARM_CPU_OPT ("arm7di", NULL
, ARM_ARCH_V3
,
27404 ARM_CPU_OPT ("arm7dmi", NULL
, ARM_ARCH_V3M
,
27407 ARM_CPU_OPT ("arm70", NULL
, ARM_ARCH_V3
,
27410 ARM_CPU_OPT ("arm700", NULL
, ARM_ARCH_V3
,
27413 ARM_CPU_OPT ("arm700i", NULL
, ARM_ARCH_V3
,
27416 ARM_CPU_OPT ("arm710", NULL
, ARM_ARCH_V3
,
27419 ARM_CPU_OPT ("arm710t", NULL
, ARM_ARCH_V4T
,
27422 ARM_CPU_OPT ("arm720", NULL
, ARM_ARCH_V3
,
27425 ARM_CPU_OPT ("arm720t", NULL
, ARM_ARCH_V4T
,
27428 ARM_CPU_OPT ("arm740t", NULL
, ARM_ARCH_V4T
,
27431 ARM_CPU_OPT ("arm710c", NULL
, ARM_ARCH_V3
,
27434 ARM_CPU_OPT ("arm7100", NULL
, ARM_ARCH_V3
,
27437 ARM_CPU_OPT ("arm7500", NULL
, ARM_ARCH_V3
,
27440 ARM_CPU_OPT ("arm7500fe", NULL
, ARM_ARCH_V3
,
27443 ARM_CPU_OPT ("arm7t", NULL
, ARM_ARCH_V4T
,
27446 ARM_CPU_OPT ("arm7tdmi", NULL
, ARM_ARCH_V4T
,
27449 ARM_CPU_OPT ("arm7tdmi-s", NULL
, ARM_ARCH_V4T
,
27452 ARM_CPU_OPT ("arm8", NULL
, ARM_ARCH_V4
,
27455 ARM_CPU_OPT ("arm810", NULL
, ARM_ARCH_V4
,
27458 ARM_CPU_OPT ("strongarm", NULL
, ARM_ARCH_V4
,
27461 ARM_CPU_OPT ("strongarm1", NULL
, ARM_ARCH_V4
,
27464 ARM_CPU_OPT ("strongarm110", NULL
, ARM_ARCH_V4
,
27467 ARM_CPU_OPT ("strongarm1100", NULL
, ARM_ARCH_V4
,
27470 ARM_CPU_OPT ("strongarm1110", NULL
, ARM_ARCH_V4
,
27473 ARM_CPU_OPT ("arm9", NULL
, ARM_ARCH_V4T
,
27476 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T
,
27479 ARM_CPU_OPT ("arm920t", NULL
, ARM_ARCH_V4T
,
27482 ARM_CPU_OPT ("arm922t", NULL
, ARM_ARCH_V4T
,
27485 ARM_CPU_OPT ("arm940t", NULL
, ARM_ARCH_V4T
,
27488 ARM_CPU_OPT ("arm9tdmi", NULL
, ARM_ARCH_V4T
,
27491 ARM_CPU_OPT ("fa526", NULL
, ARM_ARCH_V4
,
27494 ARM_CPU_OPT ("fa626", NULL
, ARM_ARCH_V4
,
27498 /* For V5 or later processors we default to using VFP; but the user
27499 should really set the FPU type explicitly. */
27500 ARM_CPU_OPT ("arm9e-r0", NULL
, ARM_ARCH_V5TExP
,
27503 ARM_CPU_OPT ("arm9e", NULL
, ARM_ARCH_V5TE
,
27506 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
27509 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
27512 ARM_CPU_OPT ("arm926ej-s", NULL
, ARM_ARCH_V5TEJ
,
27515 ARM_CPU_OPT ("arm946e-r0", NULL
, ARM_ARCH_V5TExP
,
27518 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE
,
27521 ARM_CPU_OPT ("arm946e-s", NULL
, ARM_ARCH_V5TE
,
27524 ARM_CPU_OPT ("arm966e-r0", NULL
, ARM_ARCH_V5TExP
,
27527 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE
,
27530 ARM_CPU_OPT ("arm966e-s", NULL
, ARM_ARCH_V5TE
,
27533 ARM_CPU_OPT ("arm968e-s", NULL
, ARM_ARCH_V5TE
,
27536 ARM_CPU_OPT ("arm10t", NULL
, ARM_ARCH_V5T
,
27539 ARM_CPU_OPT ("arm10tdmi", NULL
, ARM_ARCH_V5T
,
27542 ARM_CPU_OPT ("arm10e", NULL
, ARM_ARCH_V5TE
,
27545 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE
,
27548 ARM_CPU_OPT ("arm1020t", NULL
, ARM_ARCH_V5T
,
27551 ARM_CPU_OPT ("arm1020e", NULL
, ARM_ARCH_V5TE
,
27554 ARM_CPU_OPT ("arm1022e", NULL
, ARM_ARCH_V5TE
,
27557 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ
,
27560 ARM_CPU_OPT ("arm1026ej-s", NULL
, ARM_ARCH_V5TEJ
,
27563 ARM_CPU_OPT ("fa606te", NULL
, ARM_ARCH_V5TE
,
27566 ARM_CPU_OPT ("fa616te", NULL
, ARM_ARCH_V5TE
,
27569 ARM_CPU_OPT ("fa626te", NULL
, ARM_ARCH_V5TE
,
27572 ARM_CPU_OPT ("fmp626", NULL
, ARM_ARCH_V5TE
,
27575 ARM_CPU_OPT ("fa726te", NULL
, ARM_ARCH_V5TE
,
27578 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6
,
27581 ARM_CPU_OPT ("arm1136j-s", NULL
, ARM_ARCH_V6
,
27584 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6
,
27587 ARM_CPU_OPT ("arm1136jf-s", NULL
, ARM_ARCH_V6
,
27590 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K
,
27593 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K
,
27596 ARM_CPU_OPT ("arm1156t2-s", NULL
, ARM_ARCH_V6T2
,
27599 ARM_CPU_OPT ("arm1156t2f-s", NULL
, ARM_ARCH_V6T2
,
27602 ARM_CPU_OPT ("arm1176jz-s", NULL
, ARM_ARCH_V6KZ
,
27605 ARM_CPU_OPT ("arm1176jzf-s", NULL
, ARM_ARCH_V6KZ
,
27608 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A
,
27609 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27611 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE
,
27613 FPU_ARCH_NEON_VFP_V4
),
27614 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A
,
27615 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
27616 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
27617 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A
,
27618 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27619 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
27620 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE
,
27622 FPU_ARCH_NEON_VFP_V4
),
27623 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE
,
27625 FPU_ARCH_NEON_VFP_V4
),
27626 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE
,
27628 FPU_ARCH_NEON_VFP_V4
),
27629 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A
,
27630 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27631 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27632 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A
,
27633 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27634 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27635 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A
,
27636 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27637 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27638 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A
,
27639 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27640 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27641 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A
,
27642 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27643 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27644 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A
,
27645 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27646 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27647 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A
,
27648 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27649 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27650 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A
,
27651 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27652 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27653 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A
,
27654 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27655 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27656 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A
,
27657 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27658 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27659 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R
,
27662 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R
,
27664 FPU_ARCH_VFP_V3D16
),
27665 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R
,
27666 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
27668 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R
,
27669 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
27670 FPU_ARCH_VFP_V3D16
),
27671 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R
,
27672 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
27673 FPU_ARCH_VFP_V3D16
),
27674 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R
,
27675 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27676 FPU_ARCH_NEON_VFP_ARMV8
),
27677 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN
,
27678 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27680 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE
,
27683 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM
,
27686 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM
,
27689 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M
,
27692 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM
,
27695 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM
,
27698 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM
,
27701 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A
,
27702 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27703 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27704 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A
,
27705 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27706 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27707 /* ??? XSCALE is really an architecture. */
27708 ARM_CPU_OPT ("xscale", NULL
, ARM_ARCH_XSCALE
,
27712 /* ??? iwmmxt is not a processor. */
27713 ARM_CPU_OPT ("iwmmxt", NULL
, ARM_ARCH_IWMMXT
,
27716 ARM_CPU_OPT ("iwmmxt2", NULL
, ARM_ARCH_IWMMXT2
,
27719 ARM_CPU_OPT ("i80200", NULL
, ARM_ARCH_XSCALE
,
27724 ARM_CPU_OPT ("ep9312", "ARM920T",
27725 ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
27726 ARM_ARCH_NONE
, FPU_ARCH_MAVERICK
),
27728 /* Marvell processors. */
27729 ARM_CPU_OPT ("marvell-pj4", NULL
, ARM_ARCH_V7A
,
27730 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27731 FPU_ARCH_VFP_V3D16
),
27732 ARM_CPU_OPT ("marvell-whitney", NULL
, ARM_ARCH_V7A
,
27733 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27734 FPU_ARCH_NEON_VFP_V4
),
27736 /* APM X-Gene family. */
27737 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A
,
27739 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27740 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A
,
27741 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27742 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27744 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
27748 struct arm_ext_table
27752 const arm_feature_set merge
;
27753 const arm_feature_set clear
;
27756 struct arm_arch_option_table
27760 const arm_feature_set value
;
27761 const arm_feature_set default_fpu
;
27762 const struct arm_ext_table
* ext_table
;
27765 /* Used to add support for +E and +noE extension. */
27766 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
27767 /* Used to add support for a +E extension. */
27768 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
27769 /* Used to add support for a +noE extension. */
27770 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
27772 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
27773 ~0 & ~FPU_ENDIAN_PURE)
27775 static const struct arm_ext_table armv5te_ext_table
[] =
27777 ARM_EXT ("fp", FPU_ARCH_VFP_V2
, ALL_FP
),
27778 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27781 static const struct arm_ext_table armv7_ext_table
[] =
27783 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
27784 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27787 static const struct arm_ext_table armv7ve_ext_table
[] =
27789 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16
, ALL_FP
),
27790 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
),
27791 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
27792 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
27793 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
27794 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
), /* Alias for +fp. */
27795 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
27797 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4
,
27798 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
27800 /* Aliases for +simd. */
27801 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
27803 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27804 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27805 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
27807 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27810 static const struct arm_ext_table armv7a_ext_table
[] =
27812 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
27813 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
27814 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
27815 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
27816 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
27817 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
),
27818 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
27820 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1
,
27821 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
27823 /* Aliases for +simd. */
27824 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27825 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27827 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
27828 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
27830 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
)),
27831 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
)),
27832 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27835 static const struct arm_ext_table armv7r_ext_table
[] =
27837 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD
),
27838 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD
), /* Alias for +fp.sp. */
27839 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
27840 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
27841 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
),
27842 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
27843 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
27844 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
)),
27845 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27848 static const struct arm_ext_table armv7em_ext_table
[] =
27850 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16
, ALL_FP
),
27851 /* Alias for +fp, used to be known as fpv4-sp-d16. */
27852 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
),
27853 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16
),
27854 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
27855 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16
),
27856 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27859 static const struct arm_ext_table armv8a_ext_table
[] =
27861 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
27862 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
27863 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
27864 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27866 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27867 should use the +simd option to turn on FP. */
27868 ARM_REMOVE ("fp", ALL_FP
),
27869 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27870 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27871 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27875 static const struct arm_ext_table armv81a_ext_table
[] =
27877 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
27878 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
27879 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27881 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27882 should use the +simd option to turn on FP. */
27883 ARM_REMOVE ("fp", ALL_FP
),
27884 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27885 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27886 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27889 static const struct arm_ext_table armv82a_ext_table
[] =
27891 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
27892 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16
),
27893 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML
),
27894 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
27895 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27896 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
27898 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27899 should use the +simd option to turn on FP. */
27900 ARM_REMOVE ("fp", ALL_FP
),
27901 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27902 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27903 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27906 static const struct arm_ext_table armv84a_ext_table
[] =
27908 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
27909 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
27910 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
27911 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27913 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27914 should use the +simd option to turn on FP. */
27915 ARM_REMOVE ("fp", ALL_FP
),
27916 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27917 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27918 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27921 static const struct arm_ext_table armv85a_ext_table
[] =
27923 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
27924 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
27925 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
27926 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27928 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27929 should use the +simd option to turn on FP. */
27930 ARM_REMOVE ("fp", ALL_FP
),
27931 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27934 static const struct arm_ext_table armv8m_main_ext_table
[] =
27936 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27937 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
27938 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16
, ALL_FP
),
27939 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
27940 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27943 static const struct arm_ext_table armv8_1m_main_ext_table
[] =
27945 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27946 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
27948 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
27949 FPU_VFP_V5_SP_D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
),
27952 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
27953 FPU_VFP_V5D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
)),
27954 ARM_EXT ("mve", ARM_FEATURE_COPROC (FPU_MVE
),
27955 ARM_FEATURE_COPROC (FPU_MVE
| FPU_MVE_FP
)),
27957 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
27958 FPU_MVE
| FPU_MVE_FP
| FPU_VFP_V5_SP_D16
|
27959 FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
)),
27960 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27963 static const struct arm_ext_table armv8r_ext_table
[] =
27965 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
27966 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
27967 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
27968 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27969 ARM_REMOVE ("fp", ALL_FP
),
27970 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16
),
27971 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27974 /* This list should, at a minimum, contain all the architecture names
27975 recognized by GCC. */
27976 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
27977 #define ARM_ARCH_OPT2(N, V, DF, ext) \
27978 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
27980 static const struct arm_arch_option_table arm_archs
[] =
27982 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
27983 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
27984 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
27985 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
27986 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
27987 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
27988 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
27989 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
27990 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
27991 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
27992 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
27993 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
27994 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
27995 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
27996 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
, armv5te
),
27997 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
, armv5te
),
27998 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
, armv5te
),
27999 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
28000 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
28001 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
, armv5te
),
28002 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
, armv5te
),
28003 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
28004 kept to preserve existing behaviour. */
28005 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
28006 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
28007 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
, armv5te
),
28008 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
, armv5te
),
28009 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
, armv5te
),
28010 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
28011 kept to preserve existing behaviour. */
28012 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
28013 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
28014 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
28015 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
28016 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
, armv7
),
28017 /* The official spelling of the ARMv7 profile variants is the dashed form.
28018 Accept the non-dashed form for compatibility with old toolchains. */
28019 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
28020 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
, armv7ve
),
28021 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
28022 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
28023 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
28024 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
28025 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
28026 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
, armv7em
),
28027 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
28028 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
,
28030 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN
, FPU_ARCH_VFP
,
28032 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
, armv8a
),
28033 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
, armv81a
),
28034 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
, armv82a
),
28035 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A
, FPU_ARCH_VFP
, armv82a
),
28036 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R
, FPU_ARCH_VFP
, armv8r
),
28037 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A
, FPU_ARCH_VFP
, armv84a
),
28038 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A
, FPU_ARCH_VFP
, armv85a
),
28039 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
28040 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
28041 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
, FPU_ARCH_VFP
),
28042 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
28044 #undef ARM_ARCH_OPT
28046 /* ISA extensions in the co-processor and main instruction set space. */
28048 struct arm_option_extension_value_table
28052 const arm_feature_set merge_value
;
28053 const arm_feature_set clear_value
;
28054 /* List of architectures for which an extension is available. ARM_ARCH_NONE
28055 indicates that an extension is available for all architectures while
28056 ARM_ANY marks an empty entry. */
28057 const arm_feature_set allowed_archs
[2];
28060 /* The following table must be in alphabetical order with a NULL last entry. */
28062 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
28063 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
28065 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
28066 use the context sensitive approach using arm_ext_table's. */
28067 static const struct arm_option_extension_value_table arm_extensions
[] =
28069 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28070 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28071 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
28072 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
28073 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28074 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
,
28075 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
),
28077 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28078 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28079 ARM_FEATURE_CORE (ARM_EXT_V7M
, ARM_EXT2_V8M
)),
28080 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
28081 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28082 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28083 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28085 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28086 | ARM_EXT2_FP16_FML
),
28087 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28088 | ARM_EXT2_FP16_FML
),
28090 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
28091 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
28092 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
28093 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
28094 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
28095 Thumb divide instruction. Due to this having the same name as the
28096 previous entry, this will be ignored when doing command-line parsing and
28097 only considered by build attribute selection code. */
28098 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
28099 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
28100 ARM_FEATURE_CORE_LOW (ARM_EXT_V7
)),
28101 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
28102 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ARCH_NONE
),
28103 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
28104 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ARCH_NONE
),
28105 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
28106 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ARCH_NONE
),
28107 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
28108 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
28109 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
28110 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
28111 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
28112 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
28113 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
28114 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
28115 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
28116 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
28117 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
28118 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
28120 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
),
28121 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_RAS
, 0),
28122 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
28123 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1
,
28124 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
28125 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
28126 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
28127 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
28129 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
28130 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
28131 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
),
28132 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
28133 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
28134 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
28135 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28136 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
28138 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
28139 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
28140 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
28141 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ARCH_NONE
),
28142 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, { ARM_ARCH_NONE
, ARM_ARCH_NONE
} }
28146 /* ISA floating-point and Advanced SIMD extensions. */
28147 struct arm_option_fpu_value_table
28150 const arm_feature_set value
;
28153 /* This list should, at a minimum, contain all the fpu names
28154 recognized by GCC. */
28155 static const struct arm_option_fpu_value_table arm_fpus
[] =
28157 {"softfpa", FPU_NONE
},
28158 {"fpe", FPU_ARCH_FPE
},
28159 {"fpe2", FPU_ARCH_FPE
},
28160 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
28161 {"fpa", FPU_ARCH_FPA
},
28162 {"fpa10", FPU_ARCH_FPA
},
28163 {"fpa11", FPU_ARCH_FPA
},
28164 {"arm7500fe", FPU_ARCH_FPA
},
28165 {"softvfp", FPU_ARCH_VFP
},
28166 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
28167 {"vfp", FPU_ARCH_VFP_V2
},
28168 {"vfp9", FPU_ARCH_VFP_V2
},
28169 {"vfp3", FPU_ARCH_VFP_V3
}, /* Undocumented, use vfpv3. */
28170 {"vfp10", FPU_ARCH_VFP_V2
},
28171 {"vfp10-r0", FPU_ARCH_VFP_V1
},
28172 {"vfpxd", FPU_ARCH_VFP_V1xD
},
28173 {"vfpv2", FPU_ARCH_VFP_V2
},
28174 {"vfpv3", FPU_ARCH_VFP_V3
},
28175 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
28176 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
28177 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
28178 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
28179 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
28180 {"arm1020t", FPU_ARCH_VFP_V1
},
28181 {"arm1020e", FPU_ARCH_VFP_V2
},
28182 {"arm1136jfs", FPU_ARCH_VFP_V2
}, /* Undocumented, use arm1136jf-s. */
28183 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
28184 {"maverick", FPU_ARCH_MAVERICK
},
28185 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
28186 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
28187 {"neon-fp16", FPU_ARCH_NEON_FP16
},
28188 {"vfpv4", FPU_ARCH_VFP_V4
},
28189 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
28190 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
28191 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
28192 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
28193 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
28194 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
28195 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
28196 {"crypto-neon-fp-armv8",
28197 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
28198 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
28199 {"crypto-neon-fp-armv8.1",
28200 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
28201 {NULL
, ARM_ARCH_NONE
}
28204 struct arm_option_value_table
28210 static const struct arm_option_value_table arm_float_abis
[] =
28212 {"hard", ARM_FLOAT_ABI_HARD
},
28213 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
28214 {"soft", ARM_FLOAT_ABI_SOFT
},
28219 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
28220 static const struct arm_option_value_table arm_eabis
[] =
28222 {"gnu", EF_ARM_EABI_UNKNOWN
},
28223 {"4", EF_ARM_EABI_VER4
},
28224 {"5", EF_ARM_EABI_VER5
},
28229 struct arm_long_option_table
28231 const char * option
; /* Substring to match. */
28232 const char * help
; /* Help information. */
28233 int (* func
) (const char * subopt
); /* Function to decode sub-option. */
28234 const char * deprecated
; /* If non-null, print this message. */
28238 arm_parse_extension (const char *str
, const arm_feature_set
*opt_set
,
28239 arm_feature_set
*ext_set
,
28240 const struct arm_ext_table
*ext_table
)
28242 /* We insist on extensions being specified in alphabetical order, and with
28243 extensions being added before being removed. We achieve this by having
28244 the global ARM_EXTENSIONS table in alphabetical order, and using the
28245 ADDING_VALUE variable to indicate whether we are adding an extension (1)
28246 or removing it (0) and only allowing it to change in the order
28248 const struct arm_option_extension_value_table
* opt
= NULL
;
28249 const arm_feature_set arm_any
= ARM_ANY
;
28250 int adding_value
= -1;
28252 while (str
!= NULL
&& *str
!= 0)
28259 as_bad (_("invalid architectural extension"));
28264 ext
= strchr (str
, '+');
28269 len
= strlen (str
);
28271 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
28273 if (adding_value
!= 0)
28276 opt
= arm_extensions
;
28284 if (adding_value
== -1)
28287 opt
= arm_extensions
;
28289 else if (adding_value
!= 1)
28291 as_bad (_("must specify extensions to add before specifying "
28292 "those to remove"));
28299 as_bad (_("missing architectural extension"));
28303 gas_assert (adding_value
!= -1);
28304 gas_assert (opt
!= NULL
);
28306 if (ext_table
!= NULL
)
28308 const struct arm_ext_table
* ext_opt
= ext_table
;
28309 bfd_boolean found
= FALSE
;
28310 for (; ext_opt
->name
!= NULL
; ext_opt
++)
28311 if (ext_opt
->name_len
== len
28312 && strncmp (ext_opt
->name
, str
, len
) == 0)
28316 if (ARM_FEATURE_ZERO (ext_opt
->merge
))
28317 /* TODO: Option not supported. When we remove the
28318 legacy table this case should error out. */
28321 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, ext_opt
->merge
);
28325 if (ARM_FEATURE_ZERO (ext_opt
->clear
))
28326 /* TODO: Option not supported. When we remove the
28327 legacy table this case should error out. */
28329 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, ext_opt
->clear
);
28341 /* Scan over the options table trying to find an exact match. */
28342 for (; opt
->name
!= NULL
; opt
++)
28343 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
28345 int i
, nb_allowed_archs
=
28346 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
28347 /* Check we can apply the extension to this architecture. */
28348 for (i
= 0; i
< nb_allowed_archs
; i
++)
28351 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
28353 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *opt_set
))
28356 if (i
== nb_allowed_archs
)
28358 as_bad (_("extension does not apply to the base architecture"));
28362 /* Add or remove the extension. */
28364 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
28366 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
28368 /* Allowing Thumb division instructions for ARMv7 in autodetection
28369 rely on this break so that duplicate extensions (extensions
28370 with the same name as a previous extension in the list) are not
28371 considered for command-line parsing. */
28375 if (opt
->name
== NULL
)
28377 /* Did we fail to find an extension because it wasn't specified in
28378 alphabetical order, or because it does not exist? */
28380 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
28381 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
28384 if (opt
->name
== NULL
)
28385 as_bad (_("unknown architectural extension `%s'"), str
);
28387 as_bad (_("architectural extensions must be specified in "
28388 "alphabetical order"));
28394 /* We should skip the extension we've just matched the next time
28406 arm_parse_cpu (const char *str
)
28408 const struct arm_cpu_option_table
*opt
;
28409 const char *ext
= strchr (str
, '+');
28415 len
= strlen (str
);
28419 as_bad (_("missing cpu name `%s'"), str
);
28423 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
28424 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
28426 mcpu_cpu_opt
= &opt
->value
;
28427 if (mcpu_ext_opt
== NULL
)
28428 mcpu_ext_opt
= XNEW (arm_feature_set
);
28429 *mcpu_ext_opt
= opt
->ext
;
28430 mcpu_fpu_opt
= &opt
->default_fpu
;
28431 if (opt
->canonical_name
)
28433 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
28434 strcpy (selected_cpu_name
, opt
->canonical_name
);
28440 if (len
>= sizeof selected_cpu_name
)
28441 len
= (sizeof selected_cpu_name
) - 1;
28443 for (i
= 0; i
< len
; i
++)
28444 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
28445 selected_cpu_name
[i
] = 0;
28449 return arm_parse_extension (ext
, mcpu_cpu_opt
, mcpu_ext_opt
, NULL
);
28454 as_bad (_("unknown cpu `%s'"), str
);
28459 arm_parse_arch (const char *str
)
28461 const struct arm_arch_option_table
*opt
;
28462 const char *ext
= strchr (str
, '+');
28468 len
= strlen (str
);
28472 as_bad (_("missing architecture name `%s'"), str
);
28476 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
28477 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
28479 march_cpu_opt
= &opt
->value
;
28480 if (march_ext_opt
== NULL
)
28481 march_ext_opt
= XNEW (arm_feature_set
);
28482 *march_ext_opt
= arm_arch_none
;
28483 march_fpu_opt
= &opt
->default_fpu
;
28484 strcpy (selected_cpu_name
, opt
->name
);
28487 return arm_parse_extension (ext
, march_cpu_opt
, march_ext_opt
,
28493 as_bad (_("unknown architecture `%s'\n"), str
);
28498 arm_parse_fpu (const char * str
)
28500 const struct arm_option_fpu_value_table
* opt
;
28502 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
28503 if (streq (opt
->name
, str
))
28505 mfpu_opt
= &opt
->value
;
28509 as_bad (_("unknown floating point format `%s'\n"), str
);
28514 arm_parse_float_abi (const char * str
)
28516 const struct arm_option_value_table
* opt
;
28518 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
28519 if (streq (opt
->name
, str
))
28521 mfloat_abi_opt
= opt
->value
;
28525 as_bad (_("unknown floating point abi `%s'\n"), str
);
28531 arm_parse_eabi (const char * str
)
28533 const struct arm_option_value_table
*opt
;
28535 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
28536 if (streq (opt
->name
, str
))
28538 meabi_flags
= opt
->value
;
28541 as_bad (_("unknown EABI `%s'\n"), str
);
28547 arm_parse_it_mode (const char * str
)
28549 bfd_boolean ret
= TRUE
;
28551 if (streq ("arm", str
))
28552 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
28553 else if (streq ("thumb", str
))
28554 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
28555 else if (streq ("always", str
))
28556 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
28557 else if (streq ("never", str
))
28558 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
28561 as_bad (_("unknown implicit IT mode `%s', should be "\
28562 "arm, thumb, always, or never."), str
);
28570 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED
)
28572 codecomposer_syntax
= TRUE
;
28573 arm_comment_chars
[0] = ';';
28574 arm_line_separator_chars
[0] = 0;
28578 struct arm_long_option_table arm_long_opts
[] =
28580 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
28581 arm_parse_cpu
, NULL
},
28582 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
28583 arm_parse_arch
, NULL
},
28584 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
28585 arm_parse_fpu
, NULL
},
28586 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
28587 arm_parse_float_abi
, NULL
},
28589 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
28590 arm_parse_eabi
, NULL
},
28592 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
28593 arm_parse_it_mode
, NULL
},
28594 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
28595 arm_ccs_mode
, NULL
},
28596 {NULL
, NULL
, 0, NULL
}
28600 md_parse_option (int c
, const char * arg
)
28602 struct arm_option_table
*opt
;
28603 const struct arm_legacy_option_table
*fopt
;
28604 struct arm_long_option_table
*lopt
;
28610 target_big_endian
= 1;
28616 target_big_endian
= 0;
28620 case OPTION_FIX_V4BX
:
28628 #endif /* OBJ_ELF */
28631 /* Listing option. Just ignore these, we don't support additional
28636 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
28638 if (c
== opt
->option
[0]
28639 && ((arg
== NULL
&& opt
->option
[1] == 0)
28640 || streq (arg
, opt
->option
+ 1)))
28642 /* If the option is deprecated, tell the user. */
28643 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
28644 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
28645 arg
? arg
: "", _(opt
->deprecated
));
28647 if (opt
->var
!= NULL
)
28648 *opt
->var
= opt
->value
;
28654 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
28656 if (c
== fopt
->option
[0]
28657 && ((arg
== NULL
&& fopt
->option
[1] == 0)
28658 || streq (arg
, fopt
->option
+ 1)))
28660 /* If the option is deprecated, tell the user. */
28661 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
28662 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
28663 arg
? arg
: "", _(fopt
->deprecated
));
28665 if (fopt
->var
!= NULL
)
28666 *fopt
->var
= &fopt
->value
;
28672 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
28674 /* These options are expected to have an argument. */
28675 if (c
== lopt
->option
[0]
28677 && strncmp (arg
, lopt
->option
+ 1,
28678 strlen (lopt
->option
+ 1)) == 0)
28680 /* If the option is deprecated, tell the user. */
28681 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
28682 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
28683 _(lopt
->deprecated
));
28685 /* Call the sup-option parser. */
28686 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
28697 md_show_usage (FILE * fp
)
28699 struct arm_option_table
*opt
;
28700 struct arm_long_option_table
*lopt
;
28702 fprintf (fp
, _(" ARM-specific assembler options:\n"));
28704 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
28705 if (opt
->help
!= NULL
)
28706 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
28708 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
28709 if (lopt
->help
!= NULL
)
28710 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
28714 -EB assemble code for a big-endian cpu\n"));
28719 -EL assemble code for a little-endian cpu\n"));
28723 --fix-v4bx Allow BX in ARMv4 code\n"));
28727 --fdpic generate an FDPIC object file\n"));
28728 #endif /* OBJ_ELF */
28736 arm_feature_set flags
;
28737 } cpu_arch_ver_table
;
28739 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
28740 chronologically for architectures, with an exception for ARMv6-M and
28741 ARMv6S-M due to legacy reasons. No new architecture should have a
28742 special case. This allows for build attribute selection results to be
28743 stable when new architectures are added. */
28744 static const cpu_arch_ver_table cpu_arch_ver
[] =
28746 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V1
},
28747 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2
},
28748 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2S
},
28749 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3
},
28750 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3M
},
28751 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4xM
},
28752 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4
},
28753 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4TxM
},
28754 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4T
},
28755 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5xM
},
28756 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5
},
28757 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5TxM
},
28758 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5T
},
28759 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TExP
},
28760 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TE
},
28761 {TAG_CPU_ARCH_V5TEJ
, ARM_ARCH_V5TEJ
},
28762 {TAG_CPU_ARCH_V6
, ARM_ARCH_V6
},
28763 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6Z
},
28764 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6KZ
},
28765 {TAG_CPU_ARCH_V6K
, ARM_ARCH_V6K
},
28766 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6T2
},
28767 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KT2
},
28768 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6ZT2
},
28769 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KZT2
},
28771 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
28772 always selected build attributes to match those of ARMv6-M
28773 (resp. ARMv6S-M). However, due to these architectures being a strict
28774 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
28775 would be selected when fully respecting chronology of architectures.
28776 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
28777 move them before ARMv7 architectures. */
28778 {TAG_CPU_ARCH_V6_M
, ARM_ARCH_V6M
},
28779 {TAG_CPU_ARCH_V6S_M
, ARM_ARCH_V6SM
},
28781 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7
},
28782 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7A
},
28783 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7R
},
28784 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7M
},
28785 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7VE
},
28786 {TAG_CPU_ARCH_V7E_M
, ARM_ARCH_V7EM
},
28787 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8A
},
28788 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_1A
},
28789 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_2A
},
28790 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_3A
},
28791 {TAG_CPU_ARCH_V8M_BASE
, ARM_ARCH_V8M_BASE
},
28792 {TAG_CPU_ARCH_V8M_MAIN
, ARM_ARCH_V8M_MAIN
},
28793 {TAG_CPU_ARCH_V8R
, ARM_ARCH_V8R
},
28794 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_4A
},
28795 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_5A
},
28796 {TAG_CPU_ARCH_V8_1M_MAIN
, ARM_ARCH_V8_1M_MAIN
},
28797 {-1, ARM_ARCH_NONE
}
28800 /* Set an attribute if it has not already been set by the user. */
28803 aeabi_set_attribute_int (int tag
, int value
)
28806 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
28807 || !attributes_set_explicitly
[tag
])
28808 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
28812 aeabi_set_attribute_string (int tag
, const char *value
)
28815 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
28816 || !attributes_set_explicitly
[tag
])
28817 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
28820 /* Return whether features in the *NEEDED feature set are available via
28821 extensions for the architecture whose feature set is *ARCH_FSET. */
28824 have_ext_for_needed_feat_p (const arm_feature_set
*arch_fset
,
28825 const arm_feature_set
*needed
)
28827 int i
, nb_allowed_archs
;
28828 arm_feature_set ext_fset
;
28829 const struct arm_option_extension_value_table
*opt
;
28831 ext_fset
= arm_arch_none
;
28832 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
28834 /* Extension does not provide any feature we need. */
28835 if (!ARM_CPU_HAS_FEATURE (*needed
, opt
->merge_value
))
28839 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
28840 for (i
= 0; i
< nb_allowed_archs
; i
++)
28843 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_arch_any
))
28846 /* Extension is available, add it. */
28847 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *arch_fset
))
28848 ARM_MERGE_FEATURE_SETS (ext_fset
, ext_fset
, opt
->merge_value
);
28852 /* Can we enable all features in *needed? */
28853 return ARM_FSET_CPU_SUBSET (*needed
, ext_fset
);
28856 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
28857 a given architecture feature set *ARCH_EXT_FSET including extension feature
28858 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
28859 - if true, check for an exact match of the architecture modulo extensions;
28860 - otherwise, select build attribute value of the first superset
28861 architecture released so that results remains stable when new architectures
28863 For -march/-mcpu=all the build attribute value of the most featureful
28864 architecture is returned. Tag_CPU_arch_profile result is returned in
28868 get_aeabi_cpu_arch_from_fset (const arm_feature_set
*arch_ext_fset
,
28869 const arm_feature_set
*ext_fset
,
28870 char *profile
, int exact_match
)
28872 arm_feature_set arch_fset
;
28873 const cpu_arch_ver_table
*p_ver
, *p_ver_ret
= NULL
;
28875 /* Select most featureful architecture with all its extensions if building
28876 for -march=all as the feature sets used to set build attributes. */
28877 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, arm_arch_any
))
28879 /* Force revisiting of decision for each new architecture. */
28880 gas_assert (MAX_TAG_CPU_ARCH
<= TAG_CPU_ARCH_V8_1M_MAIN
);
28882 return TAG_CPU_ARCH_V8
;
28885 ARM_CLEAR_FEATURE (arch_fset
, *arch_ext_fset
, *ext_fset
);
28887 for (p_ver
= cpu_arch_ver
; p_ver
->val
!= -1; p_ver
++)
28889 arm_feature_set known_arch_fset
;
28891 ARM_CLEAR_FEATURE (known_arch_fset
, p_ver
->flags
, fpu_any
);
28894 /* Base architecture match user-specified architecture and
28895 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
28896 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, known_arch_fset
))
28901 /* Base architecture match user-specified architecture only
28902 (eg. ARMv6-M in the same case as above). Record it in case we
28903 find a match with above condition. */
28904 else if (p_ver_ret
== NULL
28905 && ARM_FEATURE_EQUAL (arch_fset
, known_arch_fset
))
28911 /* Architecture has all features wanted. */
28912 if (ARM_FSET_CPU_SUBSET (arch_fset
, known_arch_fset
))
28914 arm_feature_set added_fset
;
28916 /* Compute features added by this architecture over the one
28917 recorded in p_ver_ret. */
28918 if (p_ver_ret
!= NULL
)
28919 ARM_CLEAR_FEATURE (added_fset
, known_arch_fset
,
28921 /* First architecture that match incl. with extensions, or the
28922 only difference in features over the recorded match is
28923 features that were optional and are now mandatory. */
28924 if (p_ver_ret
== NULL
28925 || ARM_FSET_CPU_SUBSET (added_fset
, arch_fset
))
28931 else if (p_ver_ret
== NULL
)
28933 arm_feature_set needed_ext_fset
;
28935 ARM_CLEAR_FEATURE (needed_ext_fset
, arch_fset
, known_arch_fset
);
28937 /* Architecture has all features needed when using some
28938 extensions. Record it and continue searching in case there
28939 exist an architecture providing all needed features without
28940 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
28942 if (have_ext_for_needed_feat_p (&known_arch_fset
,
28949 if (p_ver_ret
== NULL
)
28953 /* Tag_CPU_arch_profile. */
28954 if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7a
)
28955 || ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8
)
28956 || (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_atomics
)
28957 && !ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8m_m_only
)))
28959 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7r
))
28961 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_m
))
28965 return p_ver_ret
->val
;
28968 /* Set the public EABI object attributes. */
28971 aeabi_set_public_attributes (void)
28973 char profile
= '\0';
28976 int fp16_optional
= 0;
28977 int skip_exact_match
= 0;
28978 arm_feature_set flags
, flags_arch
, flags_ext
;
28980 /* Autodetection mode, choose the architecture based the instructions
28982 if (no_cpu_selected ())
28984 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
28986 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
28987 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
28989 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
28990 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
28992 /* Code run during relaxation relies on selected_cpu being set. */
28993 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
28994 flags_ext
= arm_arch_none
;
28995 ARM_CLEAR_FEATURE (selected_arch
, flags_arch
, flags_ext
);
28996 selected_ext
= flags_ext
;
28997 selected_cpu
= flags
;
28999 /* Otherwise, choose the architecture based on the capabilities of the
29003 ARM_MERGE_FEATURE_SETS (flags_arch
, selected_arch
, selected_ext
);
29004 ARM_CLEAR_FEATURE (flags_arch
, flags_arch
, fpu_any
);
29005 flags_ext
= selected_ext
;
29006 flags
= selected_cpu
;
29008 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_fpu
);
29010 /* Allow the user to override the reported architecture. */
29011 if (!ARM_FEATURE_ZERO (selected_object_arch
))
29013 ARM_CLEAR_FEATURE (flags_arch
, selected_object_arch
, fpu_any
);
29014 flags_ext
= arm_arch_none
;
29017 skip_exact_match
= ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_any
);
29019 /* When this function is run again after relaxation has happened there is no
29020 way to determine whether an architecture or CPU was specified by the user:
29021 - selected_cpu is set above for relaxation to work;
29022 - march_cpu_opt is not set if only -mcpu or .cpu is used;
29023 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
29024 Therefore, if not in -march=all case we first try an exact match and fall
29025 back to autodetection. */
29026 if (!skip_exact_match
)
29027 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 1);
29029 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 0);
29031 as_bad (_("no architecture contains all the instructions used\n"));
29033 /* Tag_CPU_name. */
29034 if (selected_cpu_name
[0])
29038 q
= selected_cpu_name
;
29039 if (strncmp (q
, "armv", 4) == 0)
29044 for (i
= 0; q
[i
]; i
++)
29045 q
[i
] = TOUPPER (q
[i
]);
29047 aeabi_set_attribute_string (Tag_CPU_name
, q
);
29050 /* Tag_CPU_arch. */
29051 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
29053 /* Tag_CPU_arch_profile. */
29054 if (profile
!= '\0')
29055 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
29057 /* Tag_DSP_extension. */
29058 if (ARM_CPU_HAS_FEATURE (selected_ext
, arm_ext_dsp
))
29059 aeabi_set_attribute_int (Tag_DSP_extension
, 1);
29061 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
29062 /* Tag_ARM_ISA_use. */
29063 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
29064 || ARM_FEATURE_ZERO (flags_arch
))
29065 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
29067 /* Tag_THUMB_ISA_use. */
29068 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
29069 || ARM_FEATURE_ZERO (flags_arch
))
29073 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
29074 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
))
29076 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
29080 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
29083 /* Tag_VFP_arch. */
29084 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
29085 aeabi_set_attribute_int (Tag_VFP_arch
,
29086 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
29088 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
29089 aeabi_set_attribute_int (Tag_VFP_arch
,
29090 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
29092 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
29095 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
29097 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
29099 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
29102 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
29103 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
29104 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
29105 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
29106 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
29108 /* Tag_ABI_HardFP_use. */
29109 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
29110 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
29111 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
29113 /* Tag_WMMX_arch. */
29114 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
29115 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
29116 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
29117 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
29119 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
29120 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v8_1
))
29121 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 4);
29122 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
29123 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
29124 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
29126 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
29128 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
29132 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
29137 if (ARM_CPU_HAS_FEATURE (flags
, mve_fp_ext
))
29138 aeabi_set_attribute_int (Tag_MVE_arch
, 2);
29139 else if (ARM_CPU_HAS_FEATURE (flags
, mve_ext
))
29140 aeabi_set_attribute_int (Tag_MVE_arch
, 1);
29142 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
29143 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
29144 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
29148 We set Tag_DIV_use to two when integer divide instructions have been used
29149 in ARM state, or when Thumb integer divide instructions have been used,
29150 but we have no architecture profile set, nor have we any ARM instructions.
29152 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
29153 by the base architecture.
29155 For new architectures we will have to check these tests. */
29156 gas_assert (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
29157 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
29158 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
29159 aeabi_set_attribute_int (Tag_DIV_use
, 0);
29160 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
29161 || (profile
== '\0'
29162 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
29163 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
29164 aeabi_set_attribute_int (Tag_DIV_use
, 2);
29166 /* Tag_MP_extension_use. */
29167 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
29168 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
29170 /* Tag Virtualization_use. */
29171 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
29173 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
29176 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
29179 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
29180 finished and free extension feature bits which will not be used anymore. */
29183 arm_md_post_relax (void)
29185 aeabi_set_public_attributes ();
29186 XDELETE (mcpu_ext_opt
);
29187 mcpu_ext_opt
= NULL
;
29188 XDELETE (march_ext_opt
);
29189 march_ext_opt
= NULL
;
29192 /* Add the default contents for the .ARM.attributes section. */
29197 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
29200 aeabi_set_public_attributes ();
29202 #endif /* OBJ_ELF */
29204 /* Parse a .cpu directive. */
29207 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
29209 const struct arm_cpu_option_table
*opt
;
29213 name
= input_line_pointer
;
29214 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29215 input_line_pointer
++;
29216 saved_char
= *input_line_pointer
;
29217 *input_line_pointer
= 0;
29219 /* Skip the first "all" entry. */
29220 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
29221 if (streq (opt
->name
, name
))
29223 selected_arch
= opt
->value
;
29224 selected_ext
= opt
->ext
;
29225 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
29226 if (opt
->canonical_name
)
29227 strcpy (selected_cpu_name
, opt
->canonical_name
);
29231 for (i
= 0; opt
->name
[i
]; i
++)
29232 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
29234 selected_cpu_name
[i
] = 0;
29236 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29238 *input_line_pointer
= saved_char
;
29239 demand_empty_rest_of_line ();
29242 as_bad (_("unknown cpu `%s'"), name
);
29243 *input_line_pointer
= saved_char
;
29244 ignore_rest_of_line ();
29247 /* Parse a .arch directive. */
29250 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
29252 const struct arm_arch_option_table
*opt
;
29256 name
= input_line_pointer
;
29257 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29258 input_line_pointer
++;
29259 saved_char
= *input_line_pointer
;
29260 *input_line_pointer
= 0;
29262 /* Skip the first "all" entry. */
29263 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
29264 if (streq (opt
->name
, name
))
29266 selected_arch
= opt
->value
;
29267 selected_ext
= arm_arch_none
;
29268 selected_cpu
= selected_arch
;
29269 strcpy (selected_cpu_name
, opt
->name
);
29270 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29271 *input_line_pointer
= saved_char
;
29272 demand_empty_rest_of_line ();
29276 as_bad (_("unknown architecture `%s'\n"), name
);
29277 *input_line_pointer
= saved_char
;
29278 ignore_rest_of_line ();
29281 /* Parse a .object_arch directive. */
29284 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
29286 const struct arm_arch_option_table
*opt
;
29290 name
= input_line_pointer
;
29291 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29292 input_line_pointer
++;
29293 saved_char
= *input_line_pointer
;
29294 *input_line_pointer
= 0;
29296 /* Skip the first "all" entry. */
29297 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
29298 if (streq (opt
->name
, name
))
29300 selected_object_arch
= opt
->value
;
29301 *input_line_pointer
= saved_char
;
29302 demand_empty_rest_of_line ();
29306 as_bad (_("unknown architecture `%s'\n"), name
);
29307 *input_line_pointer
= saved_char
;
29308 ignore_rest_of_line ();
29311 /* Parse a .arch_extension directive. */
29314 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
29316 const struct arm_option_extension_value_table
*opt
;
29319 int adding_value
= 1;
29321 name
= input_line_pointer
;
29322 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29323 input_line_pointer
++;
29324 saved_char
= *input_line_pointer
;
29325 *input_line_pointer
= 0;
29327 if (strlen (name
) >= 2
29328 && strncmp (name
, "no", 2) == 0)
29334 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
29335 if (streq (opt
->name
, name
))
29337 int i
, nb_allowed_archs
=
29338 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[i
]);
29339 for (i
= 0; i
< nb_allowed_archs
; i
++)
29342 if (ARM_CPU_IS_ANY (opt
->allowed_archs
[i
]))
29344 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], selected_arch
))
29348 if (i
== nb_allowed_archs
)
29350 as_bad (_("architectural extension `%s' is not allowed for the "
29351 "current base architecture"), name
);
29356 ARM_MERGE_FEATURE_SETS (selected_ext
, selected_ext
,
29359 ARM_CLEAR_FEATURE (selected_ext
, selected_ext
, opt
->clear_value
);
29361 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
29362 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29363 *input_line_pointer
= saved_char
;
29364 demand_empty_rest_of_line ();
29365 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
29366 on this return so that duplicate extensions (extensions with the
29367 same name as a previous extension in the list) are not considered
29368 for command-line parsing. */
29372 if (opt
->name
== NULL
)
29373 as_bad (_("unknown architecture extension `%s'\n"), name
);
29375 *input_line_pointer
= saved_char
;
29376 ignore_rest_of_line ();
29379 /* Parse a .fpu directive. */
29382 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
29384 const struct arm_option_fpu_value_table
*opt
;
29388 name
= input_line_pointer
;
29389 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29390 input_line_pointer
++;
29391 saved_char
= *input_line_pointer
;
29392 *input_line_pointer
= 0;
29394 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
29395 if (streq (opt
->name
, name
))
29397 selected_fpu
= opt
->value
;
29398 #ifndef CPU_DEFAULT
29399 if (no_cpu_selected ())
29400 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
29403 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29404 *input_line_pointer
= saved_char
;
29405 demand_empty_rest_of_line ();
29409 as_bad (_("unknown floating point format `%s'\n"), name
);
29410 *input_line_pointer
= saved_char
;
29411 ignore_rest_of_line ();
29414 /* Copy symbol information. */
29417 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
29419 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
29423 /* Given a symbolic attribute NAME, return the proper integer value.
29424 Returns -1 if the attribute is not known. */
29427 arm_convert_symbolic_attribute (const char *name
)
29429 static const struct
29434 attribute_table
[] =
29436 /* When you modify this table you should
29437 also modify the list in doc/c-arm.texi. */
29438 #define T(tag) {#tag, tag}
29439 T (Tag_CPU_raw_name
),
29442 T (Tag_CPU_arch_profile
),
29443 T (Tag_ARM_ISA_use
),
29444 T (Tag_THUMB_ISA_use
),
29448 T (Tag_Advanced_SIMD_arch
),
29449 T (Tag_PCS_config
),
29450 T (Tag_ABI_PCS_R9_use
),
29451 T (Tag_ABI_PCS_RW_data
),
29452 T (Tag_ABI_PCS_RO_data
),
29453 T (Tag_ABI_PCS_GOT_use
),
29454 T (Tag_ABI_PCS_wchar_t
),
29455 T (Tag_ABI_FP_rounding
),
29456 T (Tag_ABI_FP_denormal
),
29457 T (Tag_ABI_FP_exceptions
),
29458 T (Tag_ABI_FP_user_exceptions
),
29459 T (Tag_ABI_FP_number_model
),
29460 T (Tag_ABI_align_needed
),
29461 T (Tag_ABI_align8_needed
),
29462 T (Tag_ABI_align_preserved
),
29463 T (Tag_ABI_align8_preserved
),
29464 T (Tag_ABI_enum_size
),
29465 T (Tag_ABI_HardFP_use
),
29466 T (Tag_ABI_VFP_args
),
29467 T (Tag_ABI_WMMX_args
),
29468 T (Tag_ABI_optimization_goals
),
29469 T (Tag_ABI_FP_optimization_goals
),
29470 T (Tag_compatibility
),
29471 T (Tag_CPU_unaligned_access
),
29472 T (Tag_FP_HP_extension
),
29473 T (Tag_VFP_HP_extension
),
29474 T (Tag_ABI_FP_16bit_format
),
29475 T (Tag_MPextension_use
),
29477 T (Tag_nodefaults
),
29478 T (Tag_also_compatible_with
),
29479 T (Tag_conformance
),
29481 T (Tag_Virtualization_use
),
29482 T (Tag_DSP_extension
),
29484 /* We deliberately do not include Tag_MPextension_use_legacy. */
29492 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
29493 if (streq (name
, attribute_table
[i
].name
))
29494 return attribute_table
[i
].tag
;
29499 /* Apply sym value for relocations only in the case that they are for
29500 local symbols in the same segment as the fixup and you have the
29501 respective architectural feature for blx and simple switches. */
29504 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
29507 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
29508 /* PR 17444: If the local symbol is in a different section then a reloc
29509 will always be generated for it, so applying the symbol value now
29510 will result in a double offset being stored in the relocation. */
29511 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
29512 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
29514 switch (fixP
->fx_r_type
)
29516 case BFD_RELOC_ARM_PCREL_BLX
:
29517 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
29518 if (ARM_IS_FUNC (fixP
->fx_addsy
))
29522 case BFD_RELOC_ARM_PCREL_CALL
:
29523 case BFD_RELOC_THUMB_PCREL_BLX
:
29524 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
29535 #endif /* OBJ_ELF */