1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2017 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
80 /* Results from operand parsing worker functions. */
84 PARSE_OPERAND_SUCCESS
,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result
;
96 /* Types of processor to assemble for. */
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
122 #endif /* ifndef FPU_DEFAULT */
124 #define streq(a, b) (strcmp (a, b) == 0)
126 static arm_feature_set cpu_variant
;
127 static arm_feature_set arm_arch_used
;
128 static arm_feature_set thumb_arch_used
;
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26
= FALSE
;
132 static int atpcs
= FALSE
;
133 static int support_interwork
= FALSE
;
134 static int uses_apcs_float
= FALSE
;
135 static int pic_code
= FALSE
;
136 static int fix_v4bx
= FALSE
;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated
= TRUE
;
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax
= FALSE
;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set
* legacy_cpu
= NULL
;
147 static const arm_feature_set
* legacy_fpu
= NULL
;
149 static const arm_feature_set
* mcpu_cpu_opt
= NULL
;
150 static arm_feature_set
* dyn_mcpu_ext_opt
= NULL
;
151 static const arm_feature_set
* mcpu_fpu_opt
= NULL
;
152 static const arm_feature_set
* march_cpu_opt
= NULL
;
153 static arm_feature_set
* dyn_march_ext_opt
= NULL
;
154 static const arm_feature_set
* march_fpu_opt
= NULL
;
155 static const arm_feature_set
* mfpu_opt
= NULL
;
156 static const arm_feature_set
* object_arch
= NULL
;
158 /* Constants for known architecture features. */
159 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
160 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V1
;
161 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
162 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V3
;
163 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_NEON_V1
;
164 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
165 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
167 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
169 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
172 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
175 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
176 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2
);
177 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
178 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
179 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
180 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
181 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
182 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
183 static const arm_feature_set arm_ext_v4t_5
=
184 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
185 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
186 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
187 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
188 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
189 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
190 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
191 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
192 static const arm_feature_set arm_ext_v6_notm
=
193 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
194 static const arm_feature_set arm_ext_v6_dsp
=
195 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
196 static const arm_feature_set arm_ext_barrier
=
197 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
198 static const arm_feature_set arm_ext_msr
=
199 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
200 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
201 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
202 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
203 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
205 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
207 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
208 static const arm_feature_set arm_ext_m
=
209 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_V7M
,
210 ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
211 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
212 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
213 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
214 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
215 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
216 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
217 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
218 static const arm_feature_set arm_ext_v8m_main
=
219 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN
);
220 /* Instructions in ARMv8-M only found in M profile architectures. */
221 static const arm_feature_set arm_ext_v8m_m_only
=
222 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
223 static const arm_feature_set arm_ext_v6t2_v8m
=
224 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
225 /* Instructions shared between ARMv8-A and ARMv8-M. */
226 static const arm_feature_set arm_ext_atomics
=
227 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
229 /* DSP instructions Tag_DSP_extension refers to. */
230 static const arm_feature_set arm_ext_dsp
=
231 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
| ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
);
233 static const arm_feature_set arm_ext_ras
=
234 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
);
235 /* FP16 instructions. */
236 static const arm_feature_set arm_ext_fp16
=
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
238 static const arm_feature_set arm_ext_v8_3
=
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A
);
241 static const arm_feature_set arm_arch_any
= ARM_ANY
;
243 static const arm_feature_set fpu_any
= FPU_ANY
;
245 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED
= ARM_FEATURE (-1, -1, -1);
246 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
247 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
249 static const arm_feature_set arm_cext_iwmmxt2
=
250 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
251 static const arm_feature_set arm_cext_iwmmxt
=
252 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
253 static const arm_feature_set arm_cext_xscale
=
254 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
255 static const arm_feature_set arm_cext_maverick
=
256 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
257 static const arm_feature_set fpu_fpa_ext_v1
=
258 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
259 static const arm_feature_set fpu_fpa_ext_v2
=
260 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
261 static const arm_feature_set fpu_vfp_ext_v1xd
=
262 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
263 static const arm_feature_set fpu_vfp_ext_v1
=
264 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
265 static const arm_feature_set fpu_vfp_ext_v2
=
266 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
267 static const arm_feature_set fpu_vfp_ext_v3xd
=
268 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
269 static const arm_feature_set fpu_vfp_ext_v3
=
270 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
271 static const arm_feature_set fpu_vfp_ext_d32
=
272 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
273 static const arm_feature_set fpu_neon_ext_v1
=
274 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
275 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
276 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
278 static const arm_feature_set fpu_vfp_fp16
=
279 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
280 static const arm_feature_set fpu_neon_ext_fma
=
281 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
283 static const arm_feature_set fpu_vfp_ext_fma
=
284 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
285 static const arm_feature_set fpu_vfp_ext_armv8
=
286 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
287 static const arm_feature_set fpu_vfp_ext_armv8xd
=
288 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
289 static const arm_feature_set fpu_neon_ext_armv8
=
290 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
291 static const arm_feature_set fpu_crypto_ext_armv8
=
292 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
293 static const arm_feature_set crc_ext_armv8
=
294 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
295 static const arm_feature_set fpu_neon_ext_v8_1
=
296 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA
);
297 static const arm_feature_set fpu_neon_ext_dotprod
=
298 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
);
300 static int mfloat_abi_opt
= -1;
301 /* Record user cpu selection for object attributes. */
302 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
303 /* Must be long enough to hold any of the names in arm_cpus. */
304 static char selected_cpu_name
[20];
306 extern FLONUM_TYPE generic_floating_point_number
;
308 /* Return if no cpu was selected on command-line. */
310 no_cpu_selected (void)
312 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
317 static int meabi_flags
= EABI_DEFAULT
;
319 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
322 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
327 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
332 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
333 symbolS
* GOT_symbol
;
336 /* 0: assemble for ARM,
337 1: assemble for Thumb,
338 2: assemble for Thumb even though target CPU does not support thumb
340 static int thumb_mode
= 0;
341 /* A value distinct from the possible values for thumb_mode that we
342 can use to record whether thumb_mode has been copied into the
343 tc_frag_data field of a frag. */
344 #define MODE_RECORDED (1 << 4)
346 /* Specifies the intrinsic IT insn behavior mode. */
347 enum implicit_it_mode
349 IMPLICIT_IT_MODE_NEVER
= 0x00,
350 IMPLICIT_IT_MODE_ARM
= 0x01,
351 IMPLICIT_IT_MODE_THUMB
= 0x02,
352 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
354 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
356 /* If unified_syntax is true, we are processing the new unified
357 ARM/Thumb syntax. Important differences from the old ARM mode:
359 - Immediate operands do not require a # prefix.
360 - Conditional affixes always appear at the end of the
361 instruction. (For backward compatibility, those instructions
362 that formerly had them in the middle, continue to accept them
364 - The IT instruction may appear, and if it does is validated
365 against subsequent conditional affixes. It does not generate
368 Important differences from the old Thumb mode:
370 - Immediate operands do not require a # prefix.
371 - Most of the V6T2 instructions are only available in unified mode.
372 - The .N and .W suffixes are recognized and honored (it is an error
373 if they cannot be honored).
374 - All instructions set the flags if and only if they have an 's' affix.
375 - Conditional affixes may be used. They are validated against
376 preceding IT instructions. Unlike ARM mode, you cannot use a
377 conditional affix except in the scope of an IT instruction. */
379 static bfd_boolean unified_syntax
= FALSE
;
381 /* An immediate operand can start with #, and ld*, st*, pld operands
382 can contain [ and ]. We need to tell APP not to elide whitespace
383 before a [, which can appear as the first operand for pld.
384 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
385 const char arm_symbol_chars
[] = "#[]{}";
400 enum neon_el_type type
;
404 #define NEON_MAX_TYPE_ELS 4
408 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
412 enum it_instruction_type
417 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
418 if inside, should be the last one. */
419 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
420 i.e. BKPT and NOP. */
421 IT_INSN
/* The IT insn has been parsed. */
424 /* The maximum number of operands we need. */
425 #define ARM_IT_MAX_OPERANDS 6
430 unsigned long instruction
;
434 /* "uncond_value" is set to the value in place of the conditional field in
435 unconditional versions of the instruction, or -1 if nothing is
438 struct neon_type vectype
;
439 /* This does not indicate an actual NEON instruction, only that
440 the mnemonic accepts neon-style type suffixes. */
442 /* Set to the opcode if the instruction needs relaxation.
443 Zero if the instruction is not relaxed. */
447 bfd_reloc_code_real_type type
;
452 enum it_instruction_type it_insn_type
;
458 struct neon_type_el vectype
;
459 unsigned present
: 1; /* Operand present. */
460 unsigned isreg
: 1; /* Operand was a register. */
461 unsigned immisreg
: 1; /* .imm field is a second register. */
462 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
463 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
464 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
465 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
466 instructions. This allows us to disambiguate ARM <-> vector insns. */
467 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
468 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
469 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
470 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
471 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
472 unsigned writeback
: 1; /* Operand has trailing ! */
473 unsigned preind
: 1; /* Preindexed address. */
474 unsigned postind
: 1; /* Postindexed address. */
475 unsigned negative
: 1; /* Index register was negated. */
476 unsigned shifted
: 1; /* Shift applied to operation. */
477 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
478 } operands
[ARM_IT_MAX_OPERANDS
];
481 static struct arm_it inst
;
483 #define NUM_FLOAT_VALS 8
485 const char * fp_const
[] =
487 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
490 /* Number of littlenums required to hold an extended precision number. */
491 #define MAX_LITTLENUMS 6
493 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
503 #define CP_T_X 0x00008000
504 #define CP_T_Y 0x00400000
506 #define CONDS_BIT 0x00100000
507 #define LOAD_BIT 0x00100000
509 #define DOUBLE_LOAD_FLAG 0x00000001
513 const char * template_name
;
517 #define COND_ALWAYS 0xE
521 const char * template_name
;
525 struct asm_barrier_opt
527 const char * template_name
;
529 const arm_feature_set arch
;
532 /* The bit that distinguishes CPSR and SPSR. */
533 #define SPSR_BIT (1 << 22)
535 /* The individual PSR flag bits. */
536 #define PSR_c (1 << 16)
537 #define PSR_x (1 << 17)
538 #define PSR_s (1 << 18)
539 #define PSR_f (1 << 19)
544 bfd_reloc_code_real_type reloc
;
549 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
550 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
555 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
558 /* Bits for DEFINED field in neon_typed_alias. */
559 #define NTA_HASTYPE 1
560 #define NTA_HASINDEX 2
562 struct neon_typed_alias
564 unsigned char defined
;
566 struct neon_type_el eltype
;
569 /* ARM register categories. This includes coprocessor numbers and various
570 architecture extensions' registers. */
597 /* Structure for a hash table entry for a register.
598 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
599 information which states whether a vector type or index is specified (for a
600 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
606 unsigned char builtin
;
607 struct neon_typed_alias
* neon
;
610 /* Diagnostics used when we don't get a register of the expected type. */
611 const char * const reg_expected_msgs
[] =
613 N_("ARM register expected"),
614 N_("bad or missing co-processor number"),
615 N_("co-processor register expected"),
616 N_("FPA register expected"),
617 N_("VFP single precision register expected"),
618 N_("VFP/Neon double precision register expected"),
619 N_("Neon quad precision register expected"),
620 N_("VFP single or double precision register expected"),
621 N_("Neon double or quad precision register expected"),
622 N_("VFP single, double or Neon quad precision register expected"),
623 N_("VFP system register expected"),
624 N_("Maverick MVF register expected"),
625 N_("Maverick MVD register expected"),
626 N_("Maverick MVFX register expected"),
627 N_("Maverick MVDX register expected"),
628 N_("Maverick MVAX register expected"),
629 N_("Maverick DSPSC register expected"),
630 N_("iWMMXt data register expected"),
631 N_("iWMMXt control register expected"),
632 N_("iWMMXt scalar register expected"),
633 N_("XScale accumulator register expected"),
636 /* Some well known registers that we refer to directly elsewhere. */
642 /* ARM instructions take 4bytes in the object file, Thumb instructions
648 /* Basic string to match. */
649 const char * template_name
;
651 /* Parameters to instruction. */
652 unsigned int operands
[8];
654 /* Conditional tag - see opcode_lookup. */
655 unsigned int tag
: 4;
657 /* Basic instruction code. */
658 unsigned int avalue
: 28;
660 /* Thumb-format instruction code. */
663 /* Which architecture variant provides this instruction. */
664 const arm_feature_set
* avariant
;
665 const arm_feature_set
* tvariant
;
667 /* Function to call to encode instruction in ARM format. */
668 void (* aencode
) (void);
670 /* Function to call to encode instruction in Thumb format. */
671 void (* tencode
) (void);
674 /* Defines for various bits that we will want to toggle. */
675 #define INST_IMMEDIATE 0x02000000
676 #define OFFSET_REG 0x02000000
677 #define HWOFFSET_IMM 0x00400000
678 #define SHIFT_BY_REG 0x00000010
679 #define PRE_INDEX 0x01000000
680 #define INDEX_UP 0x00800000
681 #define WRITE_BACK 0x00200000
682 #define LDM_TYPE_2_OR_3 0x00400000
683 #define CPSI_MMOD 0x00020000
685 #define LITERAL_MASK 0xf000f000
686 #define OPCODE_MASK 0xfe1fffff
687 #define V4_STR_BIT 0x00000020
688 #define VLDR_VMOV_SAME 0x0040f000
690 #define T2_SUBS_PC_LR 0xf3de8f00
692 #define DATA_OP_SHIFT 21
693 #define SBIT_SHIFT 20
695 #define T2_OPCODE_MASK 0xfe1fffff
696 #define T2_DATA_OP_SHIFT 21
697 #define T2_SBIT_SHIFT 20
699 #define A_COND_MASK 0xf0000000
700 #define A_PUSH_POP_OP_MASK 0x0fff0000
702 /* Opcodes for pushing/poping registers to/from the stack. */
703 #define A1_OPCODE_PUSH 0x092d0000
704 #define A2_OPCODE_PUSH 0x052d0004
705 #define A2_OPCODE_POP 0x049d0004
707 /* Codes to distinguish the arithmetic instructions. */
718 #define OPCODE_CMP 10
719 #define OPCODE_CMN 11
720 #define OPCODE_ORR 12
721 #define OPCODE_MOV 13
722 #define OPCODE_BIC 14
723 #define OPCODE_MVN 15
725 #define T2_OPCODE_AND 0
726 #define T2_OPCODE_BIC 1
727 #define T2_OPCODE_ORR 2
728 #define T2_OPCODE_ORN 3
729 #define T2_OPCODE_EOR 4
730 #define T2_OPCODE_ADD 8
731 #define T2_OPCODE_ADC 10
732 #define T2_OPCODE_SBC 11
733 #define T2_OPCODE_SUB 13
734 #define T2_OPCODE_RSB 14
736 #define T_OPCODE_MUL 0x4340
737 #define T_OPCODE_TST 0x4200
738 #define T_OPCODE_CMN 0x42c0
739 #define T_OPCODE_NEG 0x4240
740 #define T_OPCODE_MVN 0x43c0
742 #define T_OPCODE_ADD_R3 0x1800
743 #define T_OPCODE_SUB_R3 0x1a00
744 #define T_OPCODE_ADD_HI 0x4400
745 #define T_OPCODE_ADD_ST 0xb000
746 #define T_OPCODE_SUB_ST 0xb080
747 #define T_OPCODE_ADD_SP 0xa800
748 #define T_OPCODE_ADD_PC 0xa000
749 #define T_OPCODE_ADD_I8 0x3000
750 #define T_OPCODE_SUB_I8 0x3800
751 #define T_OPCODE_ADD_I3 0x1c00
752 #define T_OPCODE_SUB_I3 0x1e00
754 #define T_OPCODE_ASR_R 0x4100
755 #define T_OPCODE_LSL_R 0x4080
756 #define T_OPCODE_LSR_R 0x40c0
757 #define T_OPCODE_ROR_R 0x41c0
758 #define T_OPCODE_ASR_I 0x1000
759 #define T_OPCODE_LSL_I 0x0000
760 #define T_OPCODE_LSR_I 0x0800
762 #define T_OPCODE_MOV_I8 0x2000
763 #define T_OPCODE_CMP_I8 0x2800
764 #define T_OPCODE_CMP_LR 0x4280
765 #define T_OPCODE_MOV_HR 0x4600
766 #define T_OPCODE_CMP_HR 0x4500
768 #define T_OPCODE_LDR_PC 0x4800
769 #define T_OPCODE_LDR_SP 0x9800
770 #define T_OPCODE_STR_SP 0x9000
771 #define T_OPCODE_LDR_IW 0x6800
772 #define T_OPCODE_STR_IW 0x6000
773 #define T_OPCODE_LDR_IH 0x8800
774 #define T_OPCODE_STR_IH 0x8000
775 #define T_OPCODE_LDR_IB 0x7800
776 #define T_OPCODE_STR_IB 0x7000
777 #define T_OPCODE_LDR_RW 0x5800
778 #define T_OPCODE_STR_RW 0x5000
779 #define T_OPCODE_LDR_RH 0x5a00
780 #define T_OPCODE_STR_RH 0x5200
781 #define T_OPCODE_LDR_RB 0x5c00
782 #define T_OPCODE_STR_RB 0x5400
784 #define T_OPCODE_PUSH 0xb400
785 #define T_OPCODE_POP 0xbc00
787 #define T_OPCODE_BRANCH 0xe000
789 #define THUMB_SIZE 2 /* Size of thumb instruction. */
790 #define THUMB_PP_PC_LR 0x0100
791 #define THUMB_LOAD_BIT 0x0800
792 #define THUMB2_LOAD_BIT 0x00100000
794 #define BAD_ARGS _("bad arguments to instruction")
795 #define BAD_SP _("r13 not allowed here")
796 #define BAD_PC _("r15 not allowed here")
797 #define BAD_COND _("instruction cannot be conditional")
798 #define BAD_OVERLAP _("registers may not be the same")
799 #define BAD_HIREG _("lo register required")
800 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
801 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
802 #define BAD_BRANCH _("branch must be last instruction in IT block")
803 #define BAD_NOT_IT _("instruction not allowed in IT block")
804 #define BAD_FPU _("selected FPU does not support instruction")
805 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
806 #define BAD_IT_COND _("incorrect condition in IT block")
807 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
808 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
809 #define BAD_PC_ADDRESSING \
810 _("cannot use register index with PC-relative addressing")
811 #define BAD_PC_WRITEBACK \
812 _("cannot use writeback with PC-relative addressing")
813 #define BAD_RANGE _("branch out of range")
814 #define BAD_FP16 _("selected processor does not support fp16 instruction")
815 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
816 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
818 static struct hash_control
* arm_ops_hsh
;
819 static struct hash_control
* arm_cond_hsh
;
820 static struct hash_control
* arm_shift_hsh
;
821 static struct hash_control
* arm_psr_hsh
;
822 static struct hash_control
* arm_v7m_psr_hsh
;
823 static struct hash_control
* arm_reg_hsh
;
824 static struct hash_control
* arm_reloc_hsh
;
825 static struct hash_control
* arm_barrier_opt_hsh
;
827 /* Stuff needed to resolve the label ambiguity
836 symbolS
* last_label_seen
;
837 static int label_is_thumb_function_name
= FALSE
;
839 /* Literal pool structure. Held on a per-section
840 and per-sub-section basis. */
842 #define MAX_LITERAL_POOL_SIZE 1024
843 typedef struct literal_pool
845 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
846 unsigned int next_free_entry
;
852 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
854 struct literal_pool
* next
;
855 unsigned int alignment
;
858 /* Pointer to a linked list of literal pools. */
859 literal_pool
* list_of_pools
= NULL
;
861 typedef enum asmfunc_states
864 WAITING_ASMFUNC_NAME
,
868 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
871 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
873 static struct current_it now_it
;
877 now_it_compatible (int cond
)
879 return (cond
& ~1) == (now_it
.cc
& ~1);
883 conditional_insn (void)
885 return inst
.cond
!= COND_ALWAYS
;
888 static int in_it_block (void);
890 static int handle_it_state (void);
892 static void force_automatic_it_block_close (void);
894 static void it_fsm_post_encode (void);
896 #define set_it_insn_type(type) \
899 inst.it_insn_type = type; \
900 if (handle_it_state () == FAIL) \
905 #define set_it_insn_type_nonvoid(type, failret) \
908 inst.it_insn_type = type; \
909 if (handle_it_state () == FAIL) \
914 #define set_it_insn_type_last() \
917 if (inst.cond == COND_ALWAYS) \
918 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
920 set_it_insn_type (INSIDE_IT_LAST_INSN); \
926 /* This array holds the chars that always start a comment. If the
927 pre-processor is disabled, these aren't very useful. */
928 char arm_comment_chars
[] = "@";
930 /* This array holds the chars that only start a comment at the beginning of
931 a line. If the line seems to have the form '# 123 filename'
932 .line and .file directives will appear in the pre-processed output. */
933 /* Note that input_file.c hand checks for '#' at the beginning of the
934 first line of the input file. This is because the compiler outputs
935 #NO_APP at the beginning of its output. */
936 /* Also note that comments like this one will always work. */
937 const char line_comment_chars
[] = "#";
939 char arm_line_separator_chars
[] = ";";
941 /* Chars that can be used to separate mant
942 from exp in floating point numbers. */
943 const char EXP_CHARS
[] = "eE";
945 /* Chars that mean this number is a floating point constant. */
949 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
951 /* Prefix characters that indicate the start of an immediate
953 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
955 /* Separator character handling. */
957 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
960 skip_past_char (char ** str
, char c
)
962 /* PR gas/14987: Allow for whitespace before the expected character. */
963 skip_whitespace (*str
);
974 #define skip_past_comma(str) skip_past_char (str, ',')
976 /* Arithmetic expressions (possibly involving symbols). */
978 /* Return TRUE if anything in the expression is a bignum. */
981 walk_no_bignums (symbolS
* sp
)
983 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
986 if (symbol_get_value_expression (sp
)->X_add_symbol
)
988 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
989 || (symbol_get_value_expression (sp
)->X_op_symbol
990 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
996 static bfd_boolean in_my_get_expression
= FALSE
;
998 /* Third argument to my_get_expression. */
999 #define GE_NO_PREFIX 0
1000 #define GE_IMM_PREFIX 1
1001 #define GE_OPT_PREFIX 2
1002 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1003 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1004 #define GE_OPT_PREFIX_BIG 3
1007 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
1012 /* In unified syntax, all prefixes are optional. */
1014 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
1017 switch (prefix_mode
)
1019 case GE_NO_PREFIX
: break;
1021 if (!is_immediate_prefix (**str
))
1023 inst
.error
= _("immediate expression requires a # prefix");
1029 case GE_OPT_PREFIX_BIG
:
1030 if (is_immediate_prefix (**str
))
1037 memset (ep
, 0, sizeof (expressionS
));
1039 save_in
= input_line_pointer
;
1040 input_line_pointer
= *str
;
1041 in_my_get_expression
= TRUE
;
1042 seg
= expression (ep
);
1043 in_my_get_expression
= FALSE
;
1045 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1047 /* We found a bad or missing expression in md_operand(). */
1048 *str
= input_line_pointer
;
1049 input_line_pointer
= save_in
;
1050 if (inst
.error
== NULL
)
1051 inst
.error
= (ep
->X_op
== O_absent
1052 ? _("missing expression") :_("bad expression"));
1057 if (seg
!= absolute_section
1058 && seg
!= text_section
1059 && seg
!= data_section
1060 && seg
!= bss_section
1061 && seg
!= undefined_section
)
1063 inst
.error
= _("bad segment");
1064 *str
= input_line_pointer
;
1065 input_line_pointer
= save_in
;
1072 /* Get rid of any bignums now, so that we don't generate an error for which
1073 we can't establish a line number later on. Big numbers are never valid
1074 in instructions, which is where this routine is always called. */
1075 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1076 && (ep
->X_op
== O_big
1077 || (ep
->X_add_symbol
1078 && (walk_no_bignums (ep
->X_add_symbol
)
1080 && walk_no_bignums (ep
->X_op_symbol
))))))
1082 inst
.error
= _("invalid constant");
1083 *str
= input_line_pointer
;
1084 input_line_pointer
= save_in
;
1088 *str
= input_line_pointer
;
1089 input_line_pointer
= save_in
;
1093 /* Turn a string in input_line_pointer into a floating point constant
1094 of type TYPE, and store the appropriate bytes in *LITP. The number
1095 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1096 returned, or NULL on OK.
1098 Note that fp constants aren't represent in the normal way on the ARM.
1099 In big endian mode, things are as expected. However, in little endian
1100 mode fp constants are big-endian word-wise, and little-endian byte-wise
1101 within the words. For example, (double) 1.1 in big endian mode is
1102 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1103 the byte sequence 99 99 f1 3f 9a 99 99 99.
1105 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1108 md_atof (int type
, char * litP
, int * sizeP
)
1111 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1143 return _("Unrecognized or unsupported floating point constant");
1146 t
= atof_ieee (input_line_pointer
, type
, words
);
1148 input_line_pointer
= t
;
1149 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1151 if (target_big_endian
)
1153 for (i
= 0; i
< prec
; i
++)
1155 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1156 litP
+= sizeof (LITTLENUM_TYPE
);
1161 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1162 for (i
= prec
- 1; i
>= 0; i
--)
1164 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1165 litP
+= sizeof (LITTLENUM_TYPE
);
1168 /* For a 4 byte float the order of elements in `words' is 1 0.
1169 For an 8 byte float the order is 1 0 3 2. */
1170 for (i
= 0; i
< prec
; i
+= 2)
1172 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1173 sizeof (LITTLENUM_TYPE
));
1174 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1175 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1176 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1183 /* We handle all bad expressions here, so that we can report the faulty
1184 instruction in the error message. */
1187 md_operand (expressionS
* exp
)
1189 if (in_my_get_expression
)
1190 exp
->X_op
= O_illegal
;
1193 /* Immediate values. */
1196 /* Generic immediate-value read function for use in directives.
1197 Accepts anything that 'expression' can fold to a constant.
1198 *val receives the number. */
1201 immediate_for_directive (int *val
)
1204 exp
.X_op
= O_illegal
;
1206 if (is_immediate_prefix (*input_line_pointer
))
1208 input_line_pointer
++;
1212 if (exp
.X_op
!= O_constant
)
1214 as_bad (_("expected #constant"));
1215 ignore_rest_of_line ();
1218 *val
= exp
.X_add_number
;
1223 /* Register parsing. */
1225 /* Generic register parser. CCP points to what should be the
1226 beginning of a register name. If it is indeed a valid register
1227 name, advance CCP over it and return the reg_entry structure;
1228 otherwise return NULL. Does not issue diagnostics. */
1230 static struct reg_entry
*
1231 arm_reg_parse_multi (char **ccp
)
1235 struct reg_entry
*reg
;
1237 skip_whitespace (start
);
1239 #ifdef REGISTER_PREFIX
1240 if (*start
!= REGISTER_PREFIX
)
1244 #ifdef OPTIONAL_REGISTER_PREFIX
1245 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1250 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1255 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1257 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1267 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1268 enum arm_reg_type type
)
1270 /* Alternative syntaxes are accepted for a few register classes. */
1277 /* Generic coprocessor register names are allowed for these. */
1278 if (reg
&& reg
->type
== REG_TYPE_CN
)
1283 /* For backward compatibility, a bare number is valid here. */
1285 unsigned long processor
= strtoul (start
, ccp
, 10);
1286 if (*ccp
!= start
&& processor
<= 15)
1291 case REG_TYPE_MMXWC
:
1292 /* WC includes WCG. ??? I'm not sure this is true for all
1293 instructions that take WC registers. */
1294 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1305 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1306 return value is the register number or FAIL. */
1309 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1312 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1315 /* Do not allow a scalar (reg+index) to parse as a register. */
1316 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1319 if (reg
&& reg
->type
== type
)
1322 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1329 /* Parse a Neon type specifier. *STR should point at the leading '.'
1330 character. Does no verification at this stage that the type fits the opcode
1337 Can all be legally parsed by this function.
1339 Fills in neon_type struct pointer with parsed information, and updates STR
1340 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1341 type, FAIL if not. */
1344 parse_neon_type (struct neon_type
*type
, char **str
)
1351 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1353 enum neon_el_type thistype
= NT_untyped
;
1354 unsigned thissize
= -1u;
1361 /* Just a size without an explicit type. */
1365 switch (TOLOWER (*ptr
))
1367 case 'i': thistype
= NT_integer
; break;
1368 case 'f': thistype
= NT_float
; break;
1369 case 'p': thistype
= NT_poly
; break;
1370 case 's': thistype
= NT_signed
; break;
1371 case 'u': thistype
= NT_unsigned
; break;
1373 thistype
= NT_float
;
1378 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1384 /* .f is an abbreviation for .f32. */
1385 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1390 thissize
= strtoul (ptr
, &ptr
, 10);
1392 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1395 as_bad (_("bad size %d in type specifier"), thissize
);
1403 type
->el
[type
->elems
].type
= thistype
;
1404 type
->el
[type
->elems
].size
= thissize
;
1409 /* Empty/missing type is not a successful parse. */
1410 if (type
->elems
== 0)
1418 /* Errors may be set multiple times during parsing or bit encoding
1419 (particularly in the Neon bits), but usually the earliest error which is set
1420 will be the most meaningful. Avoid overwriting it with later (cascading)
1421 errors by calling this function. */
1424 first_error (const char *err
)
1430 /* Parse a single type, e.g. ".s32", leading period included. */
1432 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1435 struct neon_type optype
;
1439 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1441 if (optype
.elems
== 1)
1442 *vectype
= optype
.el
[0];
1445 first_error (_("only one type should be specified for operand"));
1451 first_error (_("vector type expected"));
1463 /* Special meanings for indices (which have a range of 0-7), which will fit into
1466 #define NEON_ALL_LANES 15
1467 #define NEON_INTERLEAVE_LANES 14
1469 /* Parse either a register or a scalar, with an optional type. Return the
1470 register number, and optionally fill in the actual type of the register
1471 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1472 type/index information in *TYPEINFO. */
1475 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1476 enum arm_reg_type
*rtype
,
1477 struct neon_typed_alias
*typeinfo
)
1480 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1481 struct neon_typed_alias atype
;
1482 struct neon_type_el parsetype
;
1486 atype
.eltype
.type
= NT_invtype
;
1487 atype
.eltype
.size
= -1;
1489 /* Try alternate syntax for some types of register. Note these are mutually
1490 exclusive with the Neon syntax extensions. */
1493 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1501 /* Undo polymorphism when a set of register types may be accepted. */
1502 if ((type
== REG_TYPE_NDQ
1503 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1504 || (type
== REG_TYPE_VFSD
1505 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1506 || (type
== REG_TYPE_NSDQ
1507 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1508 || reg
->type
== REG_TYPE_NQ
))
1509 || (type
== REG_TYPE_MMXWC
1510 && (reg
->type
== REG_TYPE_MMXWCG
)))
1511 type
= (enum arm_reg_type
) reg
->type
;
1513 if (type
!= reg
->type
)
1519 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1521 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1523 first_error (_("can't redefine type for operand"));
1526 atype
.defined
|= NTA_HASTYPE
;
1527 atype
.eltype
= parsetype
;
1530 if (skip_past_char (&str
, '[') == SUCCESS
)
1532 if (type
!= REG_TYPE_VFD
)
1534 first_error (_("only D registers may be indexed"));
1538 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1540 first_error (_("can't change index for operand"));
1544 atype
.defined
|= NTA_HASINDEX
;
1546 if (skip_past_char (&str
, ']') == SUCCESS
)
1547 atype
.index
= NEON_ALL_LANES
;
1552 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1554 if (exp
.X_op
!= O_constant
)
1556 first_error (_("constant expression required"));
1560 if (skip_past_char (&str
, ']') == FAIL
)
1563 atype
.index
= exp
.X_add_number
;
1578 /* Like arm_reg_parse, but allow allow the following extra features:
1579 - If RTYPE is non-zero, return the (possibly restricted) type of the
1580 register (e.g. Neon double or quad reg when either has been requested).
1581 - If this is a Neon vector type with additional type information, fill
1582 in the struct pointed to by VECTYPE (if non-NULL).
1583 This function will fault on encountering a scalar. */
1586 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1587 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1589 struct neon_typed_alias atype
;
1591 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1596 /* Do not allow regname(... to parse as a register. */
1600 /* Do not allow a scalar (reg+index) to parse as a register. */
1601 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1603 first_error (_("register operand expected, but got scalar"));
1608 *vectype
= atype
.eltype
;
1615 #define NEON_SCALAR_REG(X) ((X) >> 4)
1616 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1618 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1619 have enough information to be able to do a good job bounds-checking. So, we
1620 just do easy checks here, and do further checks later. */
1623 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1627 struct neon_typed_alias atype
;
1629 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1631 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1634 if (atype
.index
== NEON_ALL_LANES
)
1636 first_error (_("scalar must have an index"));
1639 else if (atype
.index
>= 64 / elsize
)
1641 first_error (_("scalar index out of range"));
1646 *type
= atype
.eltype
;
1650 return reg
* 16 + atype
.index
;
1653 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1656 parse_reg_list (char ** strp
)
1658 char * str
= * strp
;
1662 /* We come back here if we get ranges concatenated by '+' or '|'. */
1665 skip_whitespace (str
);
1679 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1681 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1691 first_error (_("bad range in register list"));
1695 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1697 if (range
& (1 << i
))
1699 (_("Warning: duplicated register (r%d) in register list"),
1707 if (range
& (1 << reg
))
1708 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1710 else if (reg
<= cur_reg
)
1711 as_tsktsk (_("Warning: register range not in ascending order"));
1716 while (skip_past_comma (&str
) != FAIL
1717 || (in_range
= 1, *str
++ == '-'));
1720 if (skip_past_char (&str
, '}') == FAIL
)
1722 first_error (_("missing `}'"));
1730 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1733 if (exp
.X_op
== O_constant
)
1735 if (exp
.X_add_number
1736 != (exp
.X_add_number
& 0x0000ffff))
1738 inst
.error
= _("invalid register mask");
1742 if ((range
& exp
.X_add_number
) != 0)
1744 int regno
= range
& exp
.X_add_number
;
1747 regno
= (1 << regno
) - 1;
1749 (_("Warning: duplicated register (r%d) in register list"),
1753 range
|= exp
.X_add_number
;
1757 if (inst
.reloc
.type
!= 0)
1759 inst
.error
= _("expression too complex");
1763 memcpy (&inst
.reloc
.exp
, &exp
, sizeof (expressionS
));
1764 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1765 inst
.reloc
.pc_rel
= 0;
1769 if (*str
== '|' || *str
== '+')
1775 while (another_range
);
1781 /* Types of registers in a list. */
1790 /* Parse a VFP register list. If the string is invalid return FAIL.
1791 Otherwise return the number of registers, and set PBASE to the first
1792 register. Parses registers of type ETYPE.
1793 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1794 - Q registers can be used to specify pairs of D registers
1795 - { } can be omitted from around a singleton register list
1796 FIXME: This is not implemented, as it would require backtracking in
1799 This could be done (the meaning isn't really ambiguous), but doesn't
1800 fit in well with the current parsing framework.
1801 - 32 D registers may be used (also true for VFPv3).
1802 FIXME: Types are ignored in these register lists, which is probably a
1806 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1811 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1815 unsigned long mask
= 0;
1818 if (skip_past_char (&str
, '{') == FAIL
)
1820 inst
.error
= _("expecting {");
1827 regtype
= REG_TYPE_VFS
;
1832 regtype
= REG_TYPE_VFD
;
1835 case REGLIST_NEON_D
:
1836 regtype
= REG_TYPE_NDQ
;
1840 if (etype
!= REGLIST_VFP_S
)
1842 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1843 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1847 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1850 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1857 base_reg
= max_regs
;
1861 int setmask
= 1, addregs
= 1;
1863 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1865 if (new_base
== FAIL
)
1867 first_error (_(reg_expected_msgs
[regtype
]));
1871 if (new_base
>= max_regs
)
1873 first_error (_("register out of range in list"));
1877 /* Note: a value of 2 * n is returned for the register Q<n>. */
1878 if (regtype
== REG_TYPE_NQ
)
1884 if (new_base
< base_reg
)
1885 base_reg
= new_base
;
1887 if (mask
& (setmask
<< new_base
))
1889 first_error (_("invalid register list"));
1893 if ((mask
>> new_base
) != 0 && ! warned
)
1895 as_tsktsk (_("register list not in ascending order"));
1899 mask
|= setmask
<< new_base
;
1902 if (*str
== '-') /* We have the start of a range expression */
1908 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1911 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1915 if (high_range
>= max_regs
)
1917 first_error (_("register out of range in list"));
1921 if (regtype
== REG_TYPE_NQ
)
1922 high_range
= high_range
+ 1;
1924 if (high_range
<= new_base
)
1926 inst
.error
= _("register range not in ascending order");
1930 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1932 if (mask
& (setmask
<< new_base
))
1934 inst
.error
= _("invalid register list");
1938 mask
|= setmask
<< new_base
;
1943 while (skip_past_comma (&str
) != FAIL
);
1947 /* Sanity check -- should have raised a parse error above. */
1948 if (count
== 0 || count
> max_regs
)
1953 /* Final test -- the registers must be consecutive. */
1955 for (i
= 0; i
< count
; i
++)
1957 if ((mask
& (1u << i
)) == 0)
1959 inst
.error
= _("non-contiguous register range");
1969 /* True if two alias types are the same. */
1972 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1980 if (a
->defined
!= b
->defined
)
1983 if ((a
->defined
& NTA_HASTYPE
) != 0
1984 && (a
->eltype
.type
!= b
->eltype
.type
1985 || a
->eltype
.size
!= b
->eltype
.size
))
1988 if ((a
->defined
& NTA_HASINDEX
) != 0
1989 && (a
->index
!= b
->index
))
1995 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1996 The base register is put in *PBASE.
1997 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1999 The register stride (minus one) is put in bit 4 of the return value.
2000 Bits [6:5] encode the list length (minus one).
2001 The type of the list elements is put in *ELTYPE, if non-NULL. */
2003 #define NEON_LANE(X) ((X) & 0xf)
2004 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2005 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2008 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
2009 struct neon_type_el
*eltype
)
2016 int leading_brace
= 0;
2017 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
2018 const char *const incr_error
= _("register stride must be 1 or 2");
2019 const char *const type_error
= _("mismatched element/structure types in list");
2020 struct neon_typed_alias firsttype
;
2021 firsttype
.defined
= 0;
2022 firsttype
.eltype
.type
= NT_invtype
;
2023 firsttype
.eltype
.size
= -1;
2024 firsttype
.index
= -1;
2026 if (skip_past_char (&ptr
, '{') == SUCCESS
)
2031 struct neon_typed_alias atype
;
2032 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2036 first_error (_(reg_expected_msgs
[rtype
]));
2043 if (rtype
== REG_TYPE_NQ
)
2049 else if (reg_incr
== -1)
2051 reg_incr
= getreg
- base_reg
;
2052 if (reg_incr
< 1 || reg_incr
> 2)
2054 first_error (_(incr_error
));
2058 else if (getreg
!= base_reg
+ reg_incr
* count
)
2060 first_error (_(incr_error
));
2064 if (! neon_alias_types_same (&atype
, &firsttype
))
2066 first_error (_(type_error
));
2070 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2074 struct neon_typed_alias htype
;
2075 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2077 lane
= NEON_INTERLEAVE_LANES
;
2078 else if (lane
!= NEON_INTERLEAVE_LANES
)
2080 first_error (_(type_error
));
2085 else if (reg_incr
!= 1)
2087 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2091 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2094 first_error (_(reg_expected_msgs
[rtype
]));
2097 if (! neon_alias_types_same (&htype
, &firsttype
))
2099 first_error (_(type_error
));
2102 count
+= hireg
+ dregs
- getreg
;
2106 /* If we're using Q registers, we can't use [] or [n] syntax. */
2107 if (rtype
== REG_TYPE_NQ
)
2113 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2117 else if (lane
!= atype
.index
)
2119 first_error (_(type_error
));
2123 else if (lane
== -1)
2124 lane
= NEON_INTERLEAVE_LANES
;
2125 else if (lane
!= NEON_INTERLEAVE_LANES
)
2127 first_error (_(type_error
));
2132 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2134 /* No lane set by [x]. We must be interleaving structures. */
2136 lane
= NEON_INTERLEAVE_LANES
;
2139 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2140 || (count
> 1 && reg_incr
== -1))
2142 first_error (_("error parsing element/structure list"));
2146 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2148 first_error (_("expected }"));
2156 *eltype
= firsttype
.eltype
;
2161 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2164 /* Parse an explicit relocation suffix on an expression. This is
2165 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2166 arm_reloc_hsh contains no entries, so this function can only
2167 succeed if there is no () after the word. Returns -1 on error,
2168 BFD_RELOC_UNUSED if there wasn't any suffix. */
2171 parse_reloc (char **str
)
2173 struct reloc_entry
*r
;
2177 return BFD_RELOC_UNUSED
;
2182 while (*q
&& *q
!= ')' && *q
!= ',')
2187 if ((r
= (struct reloc_entry
*)
2188 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2195 /* Directives: register aliases. */
2197 static struct reg_entry
*
2198 insert_reg_alias (char *str
, unsigned number
, int type
)
2200 struct reg_entry
*new_reg
;
2203 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2205 if (new_reg
->builtin
)
2206 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2208 /* Only warn about a redefinition if it's not defined as the
2210 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2211 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2216 name
= xstrdup (str
);
2217 new_reg
= XNEW (struct reg_entry
);
2219 new_reg
->name
= name
;
2220 new_reg
->number
= number
;
2221 new_reg
->type
= type
;
2222 new_reg
->builtin
= FALSE
;
2223 new_reg
->neon
= NULL
;
2225 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2232 insert_neon_reg_alias (char *str
, int number
, int type
,
2233 struct neon_typed_alias
*atype
)
2235 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2239 first_error (_("attempt to redefine typed alias"));
2245 reg
->neon
= XNEW (struct neon_typed_alias
);
2246 *reg
->neon
= *atype
;
2250 /* Look for the .req directive. This is of the form:
2252 new_register_name .req existing_register_name
2254 If we find one, or if it looks sufficiently like one that we want to
2255 handle any error here, return TRUE. Otherwise return FALSE. */
2258 create_register_alias (char * newname
, char *p
)
2260 struct reg_entry
*old
;
2261 char *oldname
, *nbuf
;
2264 /* The input scrubber ensures that whitespace after the mnemonic is
2265 collapsed to single spaces. */
2267 if (strncmp (oldname
, " .req ", 6) != 0)
2271 if (*oldname
== '\0')
2274 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2277 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2281 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2282 the desired alias name, and p points to its end. If not, then
2283 the desired alias name is in the global original_case_string. */
2284 #ifdef TC_CASE_SENSITIVE
2287 newname
= original_case_string
;
2288 nlen
= strlen (newname
);
2291 nbuf
= xmemdup0 (newname
, nlen
);
2293 /* Create aliases under the new name as stated; an all-lowercase
2294 version of the new name; and an all-uppercase version of the new
2296 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2298 for (p
= nbuf
; *p
; p
++)
2301 if (strncmp (nbuf
, newname
, nlen
))
2303 /* If this attempt to create an additional alias fails, do not bother
2304 trying to create the all-lower case alias. We will fail and issue
2305 a second, duplicate error message. This situation arises when the
2306 programmer does something like:
2309 The second .req creates the "Foo" alias but then fails to create
2310 the artificial FOO alias because it has already been created by the
2312 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2319 for (p
= nbuf
; *p
; p
++)
2322 if (strncmp (nbuf
, newname
, nlen
))
2323 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2330 /* Create a Neon typed/indexed register alias using directives, e.g.:
2335 These typed registers can be used instead of the types specified after the
2336 Neon mnemonic, so long as all operands given have types. Types can also be
2337 specified directly, e.g.:
2338 vadd d0.s32, d1.s32, d2.s32 */
2341 create_neon_reg_alias (char *newname
, char *p
)
2343 enum arm_reg_type basetype
;
2344 struct reg_entry
*basereg
;
2345 struct reg_entry mybasereg
;
2346 struct neon_type ntype
;
2347 struct neon_typed_alias typeinfo
;
2348 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2351 typeinfo
.defined
= 0;
2352 typeinfo
.eltype
.type
= NT_invtype
;
2353 typeinfo
.eltype
.size
= -1;
2354 typeinfo
.index
= -1;
2358 if (strncmp (p
, " .dn ", 5) == 0)
2359 basetype
= REG_TYPE_VFD
;
2360 else if (strncmp (p
, " .qn ", 5) == 0)
2361 basetype
= REG_TYPE_NQ
;
2370 basereg
= arm_reg_parse_multi (&p
);
2372 if (basereg
&& basereg
->type
!= basetype
)
2374 as_bad (_("bad type for register"));
2378 if (basereg
== NULL
)
2381 /* Try parsing as an integer. */
2382 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2383 if (exp
.X_op
!= O_constant
)
2385 as_bad (_("expression must be constant"));
2388 basereg
= &mybasereg
;
2389 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2395 typeinfo
= *basereg
->neon
;
2397 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2399 /* We got a type. */
2400 if (typeinfo
.defined
& NTA_HASTYPE
)
2402 as_bad (_("can't redefine the type of a register alias"));
2406 typeinfo
.defined
|= NTA_HASTYPE
;
2407 if (ntype
.elems
!= 1)
2409 as_bad (_("you must specify a single type only"));
2412 typeinfo
.eltype
= ntype
.el
[0];
2415 if (skip_past_char (&p
, '[') == SUCCESS
)
2418 /* We got a scalar index. */
2420 if (typeinfo
.defined
& NTA_HASINDEX
)
2422 as_bad (_("can't redefine the index of a scalar alias"));
2426 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2428 if (exp
.X_op
!= O_constant
)
2430 as_bad (_("scalar index must be constant"));
2434 typeinfo
.defined
|= NTA_HASINDEX
;
2435 typeinfo
.index
= exp
.X_add_number
;
2437 if (skip_past_char (&p
, ']') == FAIL
)
2439 as_bad (_("expecting ]"));
2444 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2445 the desired alias name, and p points to its end. If not, then
2446 the desired alias name is in the global original_case_string. */
2447 #ifdef TC_CASE_SENSITIVE
2448 namelen
= nameend
- newname
;
2450 newname
= original_case_string
;
2451 namelen
= strlen (newname
);
2454 namebuf
= xmemdup0 (newname
, namelen
);
2456 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2457 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2459 /* Insert name in all uppercase. */
2460 for (p
= namebuf
; *p
; p
++)
2463 if (strncmp (namebuf
, newname
, namelen
))
2464 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2465 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2467 /* Insert name in all lowercase. */
2468 for (p
= namebuf
; *p
; p
++)
2471 if (strncmp (namebuf
, newname
, namelen
))
2472 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2473 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2479 /* Should never be called, as .req goes between the alias and the
2480 register name, not at the beginning of the line. */
2483 s_req (int a ATTRIBUTE_UNUSED
)
2485 as_bad (_("invalid syntax for .req directive"));
2489 s_dn (int a ATTRIBUTE_UNUSED
)
2491 as_bad (_("invalid syntax for .dn directive"));
2495 s_qn (int a ATTRIBUTE_UNUSED
)
2497 as_bad (_("invalid syntax for .qn directive"));
2500 /* The .unreq directive deletes an alias which was previously defined
2501 by .req. For example:
2507 s_unreq (int a ATTRIBUTE_UNUSED
)
2512 name
= input_line_pointer
;
2514 while (*input_line_pointer
!= 0
2515 && *input_line_pointer
!= ' '
2516 && *input_line_pointer
!= '\n')
2517 ++input_line_pointer
;
2519 saved_char
= *input_line_pointer
;
2520 *input_line_pointer
= 0;
2523 as_bad (_("invalid syntax for .unreq directive"));
2526 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2530 as_bad (_("unknown register alias '%s'"), name
);
2531 else if (reg
->builtin
)
2532 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2539 hash_delete (arm_reg_hsh
, name
, FALSE
);
2540 free ((char *) reg
->name
);
2545 /* Also locate the all upper case and all lower case versions.
2546 Do not complain if we cannot find one or the other as it
2547 was probably deleted above. */
2549 nbuf
= strdup (name
);
2550 for (p
= nbuf
; *p
; p
++)
2552 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2555 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2556 free ((char *) reg
->name
);
2562 for (p
= nbuf
; *p
; p
++)
2564 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2567 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2568 free ((char *) reg
->name
);
2578 *input_line_pointer
= saved_char
;
2579 demand_empty_rest_of_line ();
2582 /* Directives: Instruction set selection. */
2585 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2586 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2587 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2588 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2590 /* Create a new mapping symbol for the transition to STATE. */
2593 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2596 const char * symname
;
2603 type
= BSF_NO_FLAGS
;
2607 type
= BSF_NO_FLAGS
;
2611 type
= BSF_NO_FLAGS
;
2617 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2618 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2623 THUMB_SET_FUNC (symbolP
, 0);
2624 ARM_SET_THUMB (symbolP
, 0);
2625 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2629 THUMB_SET_FUNC (symbolP
, 1);
2630 ARM_SET_THUMB (symbolP
, 1);
2631 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2639 /* Save the mapping symbols for future reference. Also check that
2640 we do not place two mapping symbols at the same offset within a
2641 frag. We'll handle overlap between frags in
2642 check_mapping_symbols.
2644 If .fill or other data filling directive generates zero sized data,
2645 the mapping symbol for the following code will have the same value
2646 as the one generated for the data filling directive. In this case,
2647 we replace the old symbol with the new one at the same address. */
2650 if (frag
->tc_frag_data
.first_map
!= NULL
)
2652 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2653 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2655 frag
->tc_frag_data
.first_map
= symbolP
;
2657 if (frag
->tc_frag_data
.last_map
!= NULL
)
2659 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2660 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2661 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2663 frag
->tc_frag_data
.last_map
= symbolP
;
2666 /* We must sometimes convert a region marked as code to data during
2667 code alignment, if an odd number of bytes have to be padded. The
2668 code mapping symbol is pushed to an aligned address. */
2671 insert_data_mapping_symbol (enum mstate state
,
2672 valueT value
, fragS
*frag
, offsetT bytes
)
2674 /* If there was already a mapping symbol, remove it. */
2675 if (frag
->tc_frag_data
.last_map
!= NULL
2676 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2678 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2682 know (frag
->tc_frag_data
.first_map
== symp
);
2683 frag
->tc_frag_data
.first_map
= NULL
;
2685 frag
->tc_frag_data
.last_map
= NULL
;
2686 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2689 make_mapping_symbol (MAP_DATA
, value
, frag
);
2690 make_mapping_symbol (state
, value
+ bytes
, frag
);
2693 static void mapping_state_2 (enum mstate state
, int max_chars
);
2695 /* Set the mapping state to STATE. Only call this when about to
2696 emit some STATE bytes to the file. */
2698 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2700 mapping_state (enum mstate state
)
2702 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2704 if (mapstate
== state
)
2705 /* The mapping symbol has already been emitted.
2706 There is nothing else to do. */
2709 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2711 All ARM instructions require 4-byte alignment.
2712 (Almost) all Thumb instructions require 2-byte alignment.
2714 When emitting instructions into any section, mark the section
2717 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2718 but themselves require 2-byte alignment; this applies to some
2719 PC- relative forms. However, these cases will involve implicit
2720 literal pool generation or an explicit .align >=2, both of
2721 which will cause the section to me marked with sufficient
2722 alignment. Thus, we don't handle those cases here. */
2723 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2725 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2726 /* This case will be evaluated later. */
2729 mapping_state_2 (state
, 0);
2732 /* Same as mapping_state, but MAX_CHARS bytes have already been
2733 allocated. Put the mapping symbol that far back. */
2736 mapping_state_2 (enum mstate state
, int max_chars
)
2738 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2740 if (!SEG_NORMAL (now_seg
))
2743 if (mapstate
== state
)
2744 /* The mapping symbol has already been emitted.
2745 There is nothing else to do. */
2748 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2749 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2751 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2752 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2755 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2758 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2759 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2763 #define mapping_state(x) ((void)0)
2764 #define mapping_state_2(x, y) ((void)0)
2767 /* Find the real, Thumb encoded start of a Thumb function. */
2771 find_real_start (symbolS
* symbolP
)
2774 const char * name
= S_GET_NAME (symbolP
);
2775 symbolS
* new_target
;
2777 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2778 #define STUB_NAME ".real_start_of"
2783 /* The compiler may generate BL instructions to local labels because
2784 it needs to perform a branch to a far away location. These labels
2785 do not have a corresponding ".real_start_of" label. We check
2786 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2787 the ".real_start_of" convention for nonlocal branches. */
2788 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2791 real_start
= concat (STUB_NAME
, name
, NULL
);
2792 new_target
= symbol_find (real_start
);
2795 if (new_target
== NULL
)
2797 as_warn (_("Failed to find real start of function: %s\n"), name
);
2798 new_target
= symbolP
;
2806 opcode_select (int width
)
2813 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2814 as_bad (_("selected processor does not support THUMB opcodes"));
2817 /* No need to force the alignment, since we will have been
2818 coming from ARM mode, which is word-aligned. */
2819 record_alignment (now_seg
, 1);
2826 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2827 as_bad (_("selected processor does not support ARM opcodes"));
2832 frag_align (2, 0, 0);
2834 record_alignment (now_seg
, 1);
2839 as_bad (_("invalid instruction size selected (%d)"), width
);
2844 s_arm (int ignore ATTRIBUTE_UNUSED
)
2847 demand_empty_rest_of_line ();
2851 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2854 demand_empty_rest_of_line ();
2858 s_code (int unused ATTRIBUTE_UNUSED
)
2862 temp
= get_absolute_expression ();
2867 opcode_select (temp
);
2871 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2876 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2878 /* If we are not already in thumb mode go into it, EVEN if
2879 the target processor does not support thumb instructions.
2880 This is used by gcc/config/arm/lib1funcs.asm for example
2881 to compile interworking support functions even if the
2882 target processor should not support interworking. */
2886 record_alignment (now_seg
, 1);
2889 demand_empty_rest_of_line ();
2893 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2897 /* The following label is the name/address of the start of a Thumb function.
2898 We need to know this for the interworking support. */
2899 label_is_thumb_function_name
= TRUE
;
2902 /* Perform a .set directive, but also mark the alias as
2903 being a thumb function. */
2906 s_thumb_set (int equiv
)
2908 /* XXX the following is a duplicate of the code for s_set() in read.c
2909 We cannot just call that code as we need to get at the symbol that
2916 /* Especial apologies for the random logic:
2917 This just grew, and could be parsed much more simply!
2919 delim
= get_symbol_name (& name
);
2920 end_name
= input_line_pointer
;
2921 (void) restore_line_pointer (delim
);
2923 if (*input_line_pointer
!= ',')
2926 as_bad (_("expected comma after name \"%s\""), name
);
2928 ignore_rest_of_line ();
2932 input_line_pointer
++;
2935 if (name
[0] == '.' && name
[1] == '\0')
2937 /* XXX - this should not happen to .thumb_set. */
2941 if ((symbolP
= symbol_find (name
)) == NULL
2942 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2945 /* When doing symbol listings, play games with dummy fragments living
2946 outside the normal fragment chain to record the file and line info
2948 if (listing
& LISTING_SYMBOLS
)
2950 extern struct list_info_struct
* listing_tail
;
2951 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
2953 memset (dummy_frag
, 0, sizeof (fragS
));
2954 dummy_frag
->fr_type
= rs_fill
;
2955 dummy_frag
->line
= listing_tail
;
2956 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2957 dummy_frag
->fr_symbol
= symbolP
;
2961 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2964 /* "set" symbols are local unless otherwise specified. */
2965 SF_SET_LOCAL (symbolP
);
2966 #endif /* OBJ_COFF */
2967 } /* Make a new symbol. */
2969 symbol_table_insert (symbolP
);
2974 && S_IS_DEFINED (symbolP
)
2975 && S_GET_SEGMENT (symbolP
) != reg_section
)
2976 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2978 pseudo_set (symbolP
);
2980 demand_empty_rest_of_line ();
2982 /* XXX Now we come to the Thumb specific bit of code. */
2984 THUMB_SET_FUNC (symbolP
, 1);
2985 ARM_SET_THUMB (symbolP
, 1);
2986 #if defined OBJ_ELF || defined OBJ_COFF
2987 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2991 /* Directives: Mode selection. */
2993 /* .syntax [unified|divided] - choose the new unified syntax
2994 (same for Arm and Thumb encoding, modulo slight differences in what
2995 can be represented) or the old divergent syntax for each mode. */
2997 s_syntax (int unused ATTRIBUTE_UNUSED
)
3001 delim
= get_symbol_name (& name
);
3003 if (!strcasecmp (name
, "unified"))
3004 unified_syntax
= TRUE
;
3005 else if (!strcasecmp (name
, "divided"))
3006 unified_syntax
= FALSE
;
3009 as_bad (_("unrecognized syntax mode \"%s\""), name
);
3012 (void) restore_line_pointer (delim
);
3013 demand_empty_rest_of_line ();
3016 /* Directives: sectioning and alignment. */
3019 s_bss (int ignore ATTRIBUTE_UNUSED
)
3021 /* We don't support putting frags in the BSS segment, we fake it by
3022 marking in_bss, then looking at s_skip for clues. */
3023 subseg_set (bss_section
, 0);
3024 demand_empty_rest_of_line ();
3026 #ifdef md_elf_section_change_hook
3027 md_elf_section_change_hook ();
3032 s_even (int ignore ATTRIBUTE_UNUSED
)
3034 /* Never make frag if expect extra pass. */
3036 frag_align (1, 0, 0);
3038 record_alignment (now_seg
, 1);
3040 demand_empty_rest_of_line ();
3043 /* Directives: CodeComposer Studio. */
3045 /* .ref (for CodeComposer Studio syntax only). */
3047 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3049 if (codecomposer_syntax
)
3050 ignore_rest_of_line ();
3052 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3055 /* If name is not NULL, then it is used for marking the beginning of a
3056 function, whereas if it is NULL then it means the function end. */
3058 asmfunc_debug (const char * name
)
3060 static const char * last_name
= NULL
;
3064 gas_assert (last_name
== NULL
);
3067 if (debug_type
== DEBUG_STABS
)
3068 stabs_generate_asm_func (name
, name
);
3072 gas_assert (last_name
!= NULL
);
3074 if (debug_type
== DEBUG_STABS
)
3075 stabs_generate_asm_endfunc (last_name
, last_name
);
3082 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3084 if (codecomposer_syntax
)
3086 switch (asmfunc_state
)
3088 case OUTSIDE_ASMFUNC
:
3089 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3092 case WAITING_ASMFUNC_NAME
:
3093 as_bad (_(".asmfunc repeated."));
3096 case WAITING_ENDASMFUNC
:
3097 as_bad (_(".asmfunc without function."));
3100 demand_empty_rest_of_line ();
3103 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3107 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3109 if (codecomposer_syntax
)
3111 switch (asmfunc_state
)
3113 case OUTSIDE_ASMFUNC
:
3114 as_bad (_(".endasmfunc without a .asmfunc."));
3117 case WAITING_ASMFUNC_NAME
:
3118 as_bad (_(".endasmfunc without function."));
3121 case WAITING_ENDASMFUNC
:
3122 asmfunc_state
= OUTSIDE_ASMFUNC
;
3123 asmfunc_debug (NULL
);
3126 demand_empty_rest_of_line ();
3129 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3133 s_ccs_def (int name
)
3135 if (codecomposer_syntax
)
3138 as_bad (_(".def pseudo-op only available with -mccs flag."));
3141 /* Directives: Literal pools. */
3143 static literal_pool
*
3144 find_literal_pool (void)
3146 literal_pool
* pool
;
3148 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3150 if (pool
->section
== now_seg
3151 && pool
->sub_section
== now_subseg
)
3158 static literal_pool
*
3159 find_or_make_literal_pool (void)
3161 /* Next literal pool ID number. */
3162 static unsigned int latest_pool_num
= 1;
3163 literal_pool
* pool
;
3165 pool
= find_literal_pool ();
3169 /* Create a new pool. */
3170 pool
= XNEW (literal_pool
);
3174 pool
->next_free_entry
= 0;
3175 pool
->section
= now_seg
;
3176 pool
->sub_section
= now_subseg
;
3177 pool
->next
= list_of_pools
;
3178 pool
->symbol
= NULL
;
3179 pool
->alignment
= 2;
3181 /* Add it to the list. */
3182 list_of_pools
= pool
;
3185 /* New pools, and emptied pools, will have a NULL symbol. */
3186 if (pool
->symbol
== NULL
)
3188 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3189 (valueT
) 0, &zero_address_frag
);
3190 pool
->id
= latest_pool_num
++;
3197 /* Add the literal in the global 'inst'
3198 structure to the relevant literal pool. */
3201 add_to_lit_pool (unsigned int nbytes
)
3203 #define PADDING_SLOT 0x1
3204 #define LIT_ENTRY_SIZE_MASK 0xFF
3205 literal_pool
* pool
;
3206 unsigned int entry
, pool_size
= 0;
3207 bfd_boolean padding_slot_p
= FALSE
;
3213 imm1
= inst
.operands
[1].imm
;
3214 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3215 : inst
.reloc
.exp
.X_unsigned
? 0
3216 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3217 if (target_big_endian
)
3220 imm2
= inst
.operands
[1].imm
;
3224 pool
= find_or_make_literal_pool ();
3226 /* Check if this literal value is already in the pool. */
3227 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3231 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3232 && (inst
.reloc
.exp
.X_op
== O_constant
)
3233 && (pool
->literals
[entry
].X_add_number
3234 == inst
.reloc
.exp
.X_add_number
)
3235 && (pool
->literals
[entry
].X_md
== nbytes
)
3236 && (pool
->literals
[entry
].X_unsigned
3237 == inst
.reloc
.exp
.X_unsigned
))
3240 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3241 && (inst
.reloc
.exp
.X_op
== O_symbol
)
3242 && (pool
->literals
[entry
].X_add_number
3243 == inst
.reloc
.exp
.X_add_number
)
3244 && (pool
->literals
[entry
].X_add_symbol
3245 == inst
.reloc
.exp
.X_add_symbol
)
3246 && (pool
->literals
[entry
].X_op_symbol
3247 == inst
.reloc
.exp
.X_op_symbol
)
3248 && (pool
->literals
[entry
].X_md
== nbytes
))
3251 else if ((nbytes
== 8)
3252 && !(pool_size
& 0x7)
3253 && ((entry
+ 1) != pool
->next_free_entry
)
3254 && (pool
->literals
[entry
].X_op
== O_constant
)
3255 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3256 && (pool
->literals
[entry
].X_unsigned
3257 == inst
.reloc
.exp
.X_unsigned
)
3258 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3259 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3260 && (pool
->literals
[entry
+ 1].X_unsigned
3261 == inst
.reloc
.exp
.X_unsigned
))
3264 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3265 if (padding_slot_p
&& (nbytes
== 4))
3271 /* Do we need to create a new entry? */
3272 if (entry
== pool
->next_free_entry
)
3274 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3276 inst
.error
= _("literal pool overflow");
3282 /* For 8-byte entries, we align to an 8-byte boundary,
3283 and split it into two 4-byte entries, because on 32-bit
3284 host, 8-byte constants are treated as big num, thus
3285 saved in "generic_bignum" which will be overwritten
3286 by later assignments.
3288 We also need to make sure there is enough space for
3291 We also check to make sure the literal operand is a
3293 if (!(inst
.reloc
.exp
.X_op
== O_constant
3294 || inst
.reloc
.exp
.X_op
== O_big
))
3296 inst
.error
= _("invalid type for literal pool");
3299 else if (pool_size
& 0x7)
3301 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3303 inst
.error
= _("literal pool overflow");
3307 pool
->literals
[entry
] = inst
.reloc
.exp
;
3308 pool
->literals
[entry
].X_op
= O_constant
;
3309 pool
->literals
[entry
].X_add_number
= 0;
3310 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3311 pool
->next_free_entry
+= 1;
3314 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3316 inst
.error
= _("literal pool overflow");
3320 pool
->literals
[entry
] = inst
.reloc
.exp
;
3321 pool
->literals
[entry
].X_op
= O_constant
;
3322 pool
->literals
[entry
].X_add_number
= imm1
;
3323 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3324 pool
->literals
[entry
++].X_md
= 4;
3325 pool
->literals
[entry
] = inst
.reloc
.exp
;
3326 pool
->literals
[entry
].X_op
= O_constant
;
3327 pool
->literals
[entry
].X_add_number
= imm2
;
3328 pool
->literals
[entry
].X_unsigned
= inst
.reloc
.exp
.X_unsigned
;
3329 pool
->literals
[entry
].X_md
= 4;
3330 pool
->alignment
= 3;
3331 pool
->next_free_entry
+= 1;
3335 pool
->literals
[entry
] = inst
.reloc
.exp
;
3336 pool
->literals
[entry
].X_md
= 4;
3340 /* PR ld/12974: Record the location of the first source line to reference
3341 this entry in the literal pool. If it turns out during linking that the
3342 symbol does not exist we will be able to give an accurate line number for
3343 the (first use of the) missing reference. */
3344 if (debug_type
== DEBUG_DWARF2
)
3345 dwarf2_where (pool
->locs
+ entry
);
3347 pool
->next_free_entry
+= 1;
3349 else if (padding_slot_p
)
3351 pool
->literals
[entry
] = inst
.reloc
.exp
;
3352 pool
->literals
[entry
].X_md
= nbytes
;
3355 inst
.reloc
.exp
.X_op
= O_symbol
;
3356 inst
.reloc
.exp
.X_add_number
= pool_size
;
3357 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
3363 tc_start_label_without_colon (void)
3365 bfd_boolean ret
= TRUE
;
3367 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3369 const char *label
= input_line_pointer
;
3371 while (!is_end_of_line
[(int) label
[-1]])
3376 as_bad (_("Invalid label '%s'"), label
);
3380 asmfunc_debug (label
);
3382 asmfunc_state
= WAITING_ENDASMFUNC
;
3388 /* Can't use symbol_new here, so have to create a symbol and then at
3389 a later date assign it a value. That's what these functions do. */
3392 symbol_locate (symbolS
* symbolP
,
3393 const char * name
, /* It is copied, the caller can modify. */
3394 segT segment
, /* Segment identifier (SEG_<something>). */
3395 valueT valu
, /* Symbol value. */
3396 fragS
* frag
) /* Associated fragment. */
3399 char * preserved_copy_of_name
;
3401 name_length
= strlen (name
) + 1; /* +1 for \0. */
3402 obstack_grow (¬es
, name
, name_length
);
3403 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3405 #ifdef tc_canonicalize_symbol_name
3406 preserved_copy_of_name
=
3407 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3410 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3412 S_SET_SEGMENT (symbolP
, segment
);
3413 S_SET_VALUE (symbolP
, valu
);
3414 symbol_clear_list_pointers (symbolP
);
3416 symbol_set_frag (symbolP
, frag
);
3418 /* Link to end of symbol chain. */
3420 extern int symbol_table_frozen
;
3422 if (symbol_table_frozen
)
3426 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3428 obj_symbol_new_hook (symbolP
);
3430 #ifdef tc_symbol_new_hook
3431 tc_symbol_new_hook (symbolP
);
3435 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3436 #endif /* DEBUG_SYMS */
3440 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3443 literal_pool
* pool
;
3446 pool
= find_literal_pool ();
3448 || pool
->symbol
== NULL
3449 || pool
->next_free_entry
== 0)
3452 /* Align pool as you have word accesses.
3453 Only make a frag if we have to. */
3455 frag_align (pool
->alignment
, 0, 0);
3457 record_alignment (now_seg
, 2);
3460 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3461 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3463 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3465 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3466 (valueT
) frag_now_fix (), frag_now
);
3467 symbol_table_insert (pool
->symbol
);
3469 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3471 #if defined OBJ_COFF || defined OBJ_ELF
3472 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3475 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3478 if (debug_type
== DEBUG_DWARF2
)
3479 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3481 /* First output the expression in the instruction to the pool. */
3482 emit_expr (&(pool
->literals
[entry
]),
3483 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3486 /* Mark the pool as empty. */
3487 pool
->next_free_entry
= 0;
3488 pool
->symbol
= NULL
;
3492 /* Forward declarations for functions below, in the MD interface
3494 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3495 static valueT
create_unwind_entry (int);
3496 static void start_unwind_section (const segT
, int);
3497 static void add_unwind_opcode (valueT
, int);
3498 static void flush_pending_unwind (void);
3500 /* Directives: Data. */
3503 s_arm_elf_cons (int nbytes
)
3507 #ifdef md_flush_pending_output
3508 md_flush_pending_output ();
3511 if (is_it_end_of_statement ())
3513 demand_empty_rest_of_line ();
3517 #ifdef md_cons_align
3518 md_cons_align (nbytes
);
3521 mapping_state (MAP_DATA
);
3525 char *base
= input_line_pointer
;
3529 if (exp
.X_op
!= O_symbol
)
3530 emit_expr (&exp
, (unsigned int) nbytes
);
3533 char *before_reloc
= input_line_pointer
;
3534 reloc
= parse_reloc (&input_line_pointer
);
3537 as_bad (_("unrecognized relocation suffix"));
3538 ignore_rest_of_line ();
3541 else if (reloc
== BFD_RELOC_UNUSED
)
3542 emit_expr (&exp
, (unsigned int) nbytes
);
3545 reloc_howto_type
*howto
= (reloc_howto_type
*)
3546 bfd_reloc_type_lookup (stdoutput
,
3547 (bfd_reloc_code_real_type
) reloc
);
3548 int size
= bfd_get_reloc_size (howto
);
3550 if (reloc
== BFD_RELOC_ARM_PLT32
)
3552 as_bad (_("(plt) is only valid on branch targets"));
3553 reloc
= BFD_RELOC_UNUSED
;
3558 as_bad (ngettext ("%s relocations do not fit in %d byte",
3559 "%s relocations do not fit in %d bytes",
3561 howto
->name
, nbytes
);
3564 /* We've parsed an expression stopping at O_symbol.
3565 But there may be more expression left now that we
3566 have parsed the relocation marker. Parse it again.
3567 XXX Surely there is a cleaner way to do this. */
3568 char *p
= input_line_pointer
;
3570 char *save_buf
= XNEWVEC (char, input_line_pointer
- base
);
3572 memcpy (save_buf
, base
, input_line_pointer
- base
);
3573 memmove (base
+ (input_line_pointer
- before_reloc
),
3574 base
, before_reloc
- base
);
3576 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3578 memcpy (base
, save_buf
, p
- base
);
3580 offset
= nbytes
- size
;
3581 p
= frag_more (nbytes
);
3582 memset (p
, 0, nbytes
);
3583 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3584 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3590 while (*input_line_pointer
++ == ',');
3592 /* Put terminator back into stream. */
3593 input_line_pointer
--;
3594 demand_empty_rest_of_line ();
3597 /* Emit an expression containing a 32-bit thumb instruction.
3598 Implementation based on put_thumb32_insn. */
3601 emit_thumb32_expr (expressionS
* exp
)
3603 expressionS exp_high
= *exp
;
3605 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3606 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3607 exp
->X_add_number
&= 0xffff;
3608 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3611 /* Guess the instruction size based on the opcode. */
3614 thumb_insn_size (int opcode
)
3616 if ((unsigned int) opcode
< 0xe800u
)
3618 else if ((unsigned int) opcode
>= 0xe8000000u
)
3625 emit_insn (expressionS
*exp
, int nbytes
)
3629 if (exp
->X_op
== O_constant
)
3634 size
= thumb_insn_size (exp
->X_add_number
);
3638 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3640 as_bad (_(".inst.n operand too big. "\
3641 "Use .inst.w instead"));
3646 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3647 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3649 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3651 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3652 emit_thumb32_expr (exp
);
3654 emit_expr (exp
, (unsigned int) size
);
3656 it_fsm_post_encode ();
3660 as_bad (_("cannot determine Thumb instruction size. " \
3661 "Use .inst.n/.inst.w instead"));
3664 as_bad (_("constant expression required"));
3669 /* Like s_arm_elf_cons but do not use md_cons_align and
3670 set the mapping state to MAP_ARM/MAP_THUMB. */
3673 s_arm_elf_inst (int nbytes
)
3675 if (is_it_end_of_statement ())
3677 demand_empty_rest_of_line ();
3681 /* Calling mapping_state () here will not change ARM/THUMB,
3682 but will ensure not to be in DATA state. */
3685 mapping_state (MAP_THUMB
);
3690 as_bad (_("width suffixes are invalid in ARM mode"));
3691 ignore_rest_of_line ();
3697 mapping_state (MAP_ARM
);
3706 if (! emit_insn (& exp
, nbytes
))
3708 ignore_rest_of_line ();
3712 while (*input_line_pointer
++ == ',');
3714 /* Put terminator back into stream. */
3715 input_line_pointer
--;
3716 demand_empty_rest_of_line ();
3719 /* Parse a .rel31 directive. */
3722 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3729 if (*input_line_pointer
== '1')
3730 highbit
= 0x80000000;
3731 else if (*input_line_pointer
!= '0')
3732 as_bad (_("expected 0 or 1"));
3734 input_line_pointer
++;
3735 if (*input_line_pointer
!= ',')
3736 as_bad (_("missing comma"));
3737 input_line_pointer
++;
3739 #ifdef md_flush_pending_output
3740 md_flush_pending_output ();
3743 #ifdef md_cons_align
3747 mapping_state (MAP_DATA
);
3752 md_number_to_chars (p
, highbit
, 4);
3753 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3754 BFD_RELOC_ARM_PREL31
);
3756 demand_empty_rest_of_line ();
3759 /* Directives: AEABI stack-unwind tables. */
3761 /* Parse an unwind_fnstart directive. Simply records the current location. */
3764 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3766 demand_empty_rest_of_line ();
3767 if (unwind
.proc_start
)
3769 as_bad (_("duplicate .fnstart directive"));
3773 /* Mark the start of the function. */
3774 unwind
.proc_start
= expr_build_dot ();
3776 /* Reset the rest of the unwind info. */
3777 unwind
.opcode_count
= 0;
3778 unwind
.table_entry
= NULL
;
3779 unwind
.personality_routine
= NULL
;
3780 unwind
.personality_index
= -1;
3781 unwind
.frame_size
= 0;
3782 unwind
.fp_offset
= 0;
3783 unwind
.fp_reg
= REG_SP
;
3785 unwind
.sp_restored
= 0;
3789 /* Parse a handlerdata directive. Creates the exception handling table entry
3790 for the function. */
3793 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3795 demand_empty_rest_of_line ();
3796 if (!unwind
.proc_start
)
3797 as_bad (MISSING_FNSTART
);
3799 if (unwind
.table_entry
)
3800 as_bad (_("duplicate .handlerdata directive"));
3802 create_unwind_entry (1);
3805 /* Parse an unwind_fnend directive. Generates the index table entry. */
3808 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3813 unsigned int marked_pr_dependency
;
3815 demand_empty_rest_of_line ();
3817 if (!unwind
.proc_start
)
3819 as_bad (_(".fnend directive without .fnstart"));
3823 /* Add eh table entry. */
3824 if (unwind
.table_entry
== NULL
)
3825 val
= create_unwind_entry (0);
3829 /* Add index table entry. This is two words. */
3830 start_unwind_section (unwind
.saved_seg
, 1);
3831 frag_align (2, 0, 0);
3832 record_alignment (now_seg
, 2);
3834 ptr
= frag_more (8);
3836 where
= frag_now_fix () - 8;
3838 /* Self relative offset of the function start. */
3839 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3840 BFD_RELOC_ARM_PREL31
);
3842 /* Indicate dependency on EHABI-defined personality routines to the
3843 linker, if it hasn't been done already. */
3844 marked_pr_dependency
3845 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3846 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3847 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3849 static const char *const name
[] =
3851 "__aeabi_unwind_cpp_pr0",
3852 "__aeabi_unwind_cpp_pr1",
3853 "__aeabi_unwind_cpp_pr2"
3855 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3856 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3857 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3858 |= 1 << unwind
.personality_index
;
3862 /* Inline exception table entry. */
3863 md_number_to_chars (ptr
+ 4, val
, 4);
3865 /* Self relative offset of the table entry. */
3866 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3867 BFD_RELOC_ARM_PREL31
);
3869 /* Restore the original section. */
3870 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3872 unwind
.proc_start
= NULL
;
3876 /* Parse an unwind_cantunwind directive. */
3879 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3881 demand_empty_rest_of_line ();
3882 if (!unwind
.proc_start
)
3883 as_bad (MISSING_FNSTART
);
3885 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3886 as_bad (_("personality routine specified for cantunwind frame"));
3888 unwind
.personality_index
= -2;
3892 /* Parse a personalityindex directive. */
3895 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3899 if (!unwind
.proc_start
)
3900 as_bad (MISSING_FNSTART
);
3902 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3903 as_bad (_("duplicate .personalityindex directive"));
3907 if (exp
.X_op
!= O_constant
3908 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3910 as_bad (_("bad personality routine number"));
3911 ignore_rest_of_line ();
3915 unwind
.personality_index
= exp
.X_add_number
;
3917 demand_empty_rest_of_line ();
3921 /* Parse a personality directive. */
3924 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3928 if (!unwind
.proc_start
)
3929 as_bad (MISSING_FNSTART
);
3931 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3932 as_bad (_("duplicate .personality directive"));
3934 c
= get_symbol_name (& name
);
3935 p
= input_line_pointer
;
3937 ++ input_line_pointer
;
3938 unwind
.personality_routine
= symbol_find_or_make (name
);
3940 demand_empty_rest_of_line ();
3944 /* Parse a directive saving core registers. */
3947 s_arm_unwind_save_core (void)
3953 range
= parse_reg_list (&input_line_pointer
);
3956 as_bad (_("expected register list"));
3957 ignore_rest_of_line ();
3961 demand_empty_rest_of_line ();
3963 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3964 into .unwind_save {..., sp...}. We aren't bothered about the value of
3965 ip because it is clobbered by calls. */
3966 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3967 && (range
& 0x3000) == 0x1000)
3969 unwind
.opcode_count
--;
3970 unwind
.sp_restored
= 0;
3971 range
= (range
| 0x2000) & ~0x1000;
3972 unwind
.pending_offset
= 0;
3978 /* See if we can use the short opcodes. These pop a block of up to 8
3979 registers starting with r4, plus maybe r14. */
3980 for (n
= 0; n
< 8; n
++)
3982 /* Break at the first non-saved register. */
3983 if ((range
& (1 << (n
+ 4))) == 0)
3986 /* See if there are any other bits set. */
3987 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3989 /* Use the long form. */
3990 op
= 0x8000 | ((range
>> 4) & 0xfff);
3991 add_unwind_opcode (op
, 2);
3995 /* Use the short form. */
3997 op
= 0xa8; /* Pop r14. */
3999 op
= 0xa0; /* Do not pop r14. */
4001 add_unwind_opcode (op
, 1);
4008 op
= 0xb100 | (range
& 0xf);
4009 add_unwind_opcode (op
, 2);
4012 /* Record the number of bytes pushed. */
4013 for (n
= 0; n
< 16; n
++)
4015 if (range
& (1 << n
))
4016 unwind
.frame_size
+= 4;
4021 /* Parse a directive saving FPA registers. */
4024 s_arm_unwind_save_fpa (int reg
)
4030 /* Get Number of registers to transfer. */
4031 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4034 exp
.X_op
= O_illegal
;
4036 if (exp
.X_op
!= O_constant
)
4038 as_bad (_("expected , <constant>"));
4039 ignore_rest_of_line ();
4043 num_regs
= exp
.X_add_number
;
4045 if (num_regs
< 1 || num_regs
> 4)
4047 as_bad (_("number of registers must be in the range [1:4]"));
4048 ignore_rest_of_line ();
4052 demand_empty_rest_of_line ();
4057 op
= 0xb4 | (num_regs
- 1);
4058 add_unwind_opcode (op
, 1);
4063 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4064 add_unwind_opcode (op
, 2);
4066 unwind
.frame_size
+= num_regs
* 12;
4070 /* Parse a directive saving VFP registers for ARMv6 and above. */
4073 s_arm_unwind_save_vfp_armv6 (void)
4078 int num_vfpv3_regs
= 0;
4079 int num_regs_below_16
;
4081 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
4084 as_bad (_("expected register list"));
4085 ignore_rest_of_line ();
4089 demand_empty_rest_of_line ();
4091 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4092 than FSTMX/FLDMX-style ones). */
4094 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4096 num_vfpv3_regs
= count
;
4097 else if (start
+ count
> 16)
4098 num_vfpv3_regs
= start
+ count
- 16;
4100 if (num_vfpv3_regs
> 0)
4102 int start_offset
= start
> 16 ? start
- 16 : 0;
4103 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4104 add_unwind_opcode (op
, 2);
4107 /* Generate opcode for registers numbered in the range 0 .. 15. */
4108 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4109 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4110 if (num_regs_below_16
> 0)
4112 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4113 add_unwind_opcode (op
, 2);
4116 unwind
.frame_size
+= count
* 8;
4120 /* Parse a directive saving VFP registers for pre-ARMv6. */
4123 s_arm_unwind_save_vfp (void)
4129 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
4132 as_bad (_("expected register list"));
4133 ignore_rest_of_line ();
4137 demand_empty_rest_of_line ();
4142 op
= 0xb8 | (count
- 1);
4143 add_unwind_opcode (op
, 1);
4148 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4149 add_unwind_opcode (op
, 2);
4151 unwind
.frame_size
+= count
* 8 + 4;
4155 /* Parse a directive saving iWMMXt data registers. */
4158 s_arm_unwind_save_mmxwr (void)
4166 if (*input_line_pointer
== '{')
4167 input_line_pointer
++;
4171 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4175 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4180 as_tsktsk (_("register list not in ascending order"));
4183 if (*input_line_pointer
== '-')
4185 input_line_pointer
++;
4186 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4189 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4192 else if (reg
>= hi_reg
)
4194 as_bad (_("bad register range"));
4197 for (; reg
< hi_reg
; reg
++)
4201 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4203 skip_past_char (&input_line_pointer
, '}');
4205 demand_empty_rest_of_line ();
4207 /* Generate any deferred opcodes because we're going to be looking at
4209 flush_pending_unwind ();
4211 for (i
= 0; i
< 16; i
++)
4213 if (mask
& (1 << i
))
4214 unwind
.frame_size
+= 8;
4217 /* Attempt to combine with a previous opcode. We do this because gcc
4218 likes to output separate unwind directives for a single block of
4220 if (unwind
.opcode_count
> 0)
4222 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4223 if ((i
& 0xf8) == 0xc0)
4226 /* Only merge if the blocks are contiguous. */
4229 if ((mask
& 0xfe00) == (1 << 9))
4231 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4232 unwind
.opcode_count
--;
4235 else if (i
== 6 && unwind
.opcode_count
>= 2)
4237 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4241 op
= 0xffff << (reg
- 1);
4243 && ((mask
& op
) == (1u << (reg
- 1))))
4245 op
= (1 << (reg
+ i
+ 1)) - 1;
4246 op
&= ~((1 << reg
) - 1);
4248 unwind
.opcode_count
-= 2;
4255 /* We want to generate opcodes in the order the registers have been
4256 saved, ie. descending order. */
4257 for (reg
= 15; reg
>= -1; reg
--)
4259 /* Save registers in blocks. */
4261 || !(mask
& (1 << reg
)))
4263 /* We found an unsaved reg. Generate opcodes to save the
4270 op
= 0xc0 | (hi_reg
- 10);
4271 add_unwind_opcode (op
, 1);
4276 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4277 add_unwind_opcode (op
, 2);
4286 ignore_rest_of_line ();
4290 s_arm_unwind_save_mmxwcg (void)
4297 if (*input_line_pointer
== '{')
4298 input_line_pointer
++;
4300 skip_whitespace (input_line_pointer
);
4304 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4308 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4314 as_tsktsk (_("register list not in ascending order"));
4317 if (*input_line_pointer
== '-')
4319 input_line_pointer
++;
4320 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4323 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4326 else if (reg
>= hi_reg
)
4328 as_bad (_("bad register range"));
4331 for (; reg
< hi_reg
; reg
++)
4335 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4337 skip_past_char (&input_line_pointer
, '}');
4339 demand_empty_rest_of_line ();
4341 /* Generate any deferred opcodes because we're going to be looking at
4343 flush_pending_unwind ();
4345 for (reg
= 0; reg
< 16; reg
++)
4347 if (mask
& (1 << reg
))
4348 unwind
.frame_size
+= 4;
4351 add_unwind_opcode (op
, 2);
4354 ignore_rest_of_line ();
4358 /* Parse an unwind_save directive.
4359 If the argument is non-zero, this is a .vsave directive. */
4362 s_arm_unwind_save (int arch_v6
)
4365 struct reg_entry
*reg
;
4366 bfd_boolean had_brace
= FALSE
;
4368 if (!unwind
.proc_start
)
4369 as_bad (MISSING_FNSTART
);
4371 /* Figure out what sort of save we have. */
4372 peek
= input_line_pointer
;
4380 reg
= arm_reg_parse_multi (&peek
);
4384 as_bad (_("register expected"));
4385 ignore_rest_of_line ();
4394 as_bad (_("FPA .unwind_save does not take a register list"));
4395 ignore_rest_of_line ();
4398 input_line_pointer
= peek
;
4399 s_arm_unwind_save_fpa (reg
->number
);
4403 s_arm_unwind_save_core ();
4408 s_arm_unwind_save_vfp_armv6 ();
4410 s_arm_unwind_save_vfp ();
4413 case REG_TYPE_MMXWR
:
4414 s_arm_unwind_save_mmxwr ();
4417 case REG_TYPE_MMXWCG
:
4418 s_arm_unwind_save_mmxwcg ();
4422 as_bad (_(".unwind_save does not support this kind of register"));
4423 ignore_rest_of_line ();
4428 /* Parse an unwind_movsp directive. */
4431 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4437 if (!unwind
.proc_start
)
4438 as_bad (MISSING_FNSTART
);
4440 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4443 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4444 ignore_rest_of_line ();
4448 /* Optional constant. */
4449 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4451 if (immediate_for_directive (&offset
) == FAIL
)
4457 demand_empty_rest_of_line ();
4459 if (reg
== REG_SP
|| reg
== REG_PC
)
4461 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4465 if (unwind
.fp_reg
!= REG_SP
)
4466 as_bad (_("unexpected .unwind_movsp directive"));
4468 /* Generate opcode to restore the value. */
4470 add_unwind_opcode (op
, 1);
4472 /* Record the information for later. */
4473 unwind
.fp_reg
= reg
;
4474 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4475 unwind
.sp_restored
= 1;
4478 /* Parse an unwind_pad directive. */
4481 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4485 if (!unwind
.proc_start
)
4486 as_bad (MISSING_FNSTART
);
4488 if (immediate_for_directive (&offset
) == FAIL
)
4493 as_bad (_("stack increment must be multiple of 4"));
4494 ignore_rest_of_line ();
4498 /* Don't generate any opcodes, just record the details for later. */
4499 unwind
.frame_size
+= offset
;
4500 unwind
.pending_offset
+= offset
;
4502 demand_empty_rest_of_line ();
4505 /* Parse an unwind_setfp directive. */
4508 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4514 if (!unwind
.proc_start
)
4515 as_bad (MISSING_FNSTART
);
4517 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4518 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4521 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4523 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4525 as_bad (_("expected <reg>, <reg>"));
4526 ignore_rest_of_line ();
4530 /* Optional constant. */
4531 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4533 if (immediate_for_directive (&offset
) == FAIL
)
4539 demand_empty_rest_of_line ();
4541 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4543 as_bad (_("register must be either sp or set by a previous"
4544 "unwind_movsp directive"));
4548 /* Don't generate any opcodes, just record the information for later. */
4549 unwind
.fp_reg
= fp_reg
;
4551 if (sp_reg
== REG_SP
)
4552 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4554 unwind
.fp_offset
-= offset
;
4557 /* Parse an unwind_raw directive. */
4560 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4563 /* This is an arbitrary limit. */
4564 unsigned char op
[16];
4567 if (!unwind
.proc_start
)
4568 as_bad (MISSING_FNSTART
);
4571 if (exp
.X_op
== O_constant
4572 && skip_past_comma (&input_line_pointer
) != FAIL
)
4574 unwind
.frame_size
+= exp
.X_add_number
;
4578 exp
.X_op
= O_illegal
;
4580 if (exp
.X_op
!= O_constant
)
4582 as_bad (_("expected <offset>, <opcode>"));
4583 ignore_rest_of_line ();
4589 /* Parse the opcode. */
4594 as_bad (_("unwind opcode too long"));
4595 ignore_rest_of_line ();
4597 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4599 as_bad (_("invalid unwind opcode"));
4600 ignore_rest_of_line ();
4603 op
[count
++] = exp
.X_add_number
;
4605 /* Parse the next byte. */
4606 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4612 /* Add the opcode bytes in reverse order. */
4614 add_unwind_opcode (op
[count
], 1);
4616 demand_empty_rest_of_line ();
4620 /* Parse a .eabi_attribute directive. */
4623 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4625 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4627 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4628 attributes_set_explicitly
[tag
] = 1;
4631 /* Emit a tls fix for the symbol. */
4634 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4638 #ifdef md_flush_pending_output
4639 md_flush_pending_output ();
4642 #ifdef md_cons_align
4646 /* Since we're just labelling the code, there's no need to define a
4649 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4650 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4651 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4652 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4654 #endif /* OBJ_ELF */
4656 static void s_arm_arch (int);
4657 static void s_arm_object_arch (int);
4658 static void s_arm_cpu (int);
4659 static void s_arm_fpu (int);
4660 static void s_arm_arch_extension (int);
4665 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4672 if (exp
.X_op
== O_symbol
)
4673 exp
.X_op
= O_secrel
;
4675 emit_expr (&exp
, 4);
4677 while (*input_line_pointer
++ == ',');
4679 input_line_pointer
--;
4680 demand_empty_rest_of_line ();
4684 /* This table describes all the machine specific pseudo-ops the assembler
4685 has to support. The fields are:
4686 pseudo-op name without dot
4687 function to call to execute this pseudo-op
4688 Integer arg to pass to the function. */
4690 const pseudo_typeS md_pseudo_table
[] =
4692 /* Never called because '.req' does not start a line. */
4693 { "req", s_req
, 0 },
4694 /* Following two are likewise never called. */
4697 { "unreq", s_unreq
, 0 },
4698 { "bss", s_bss
, 0 },
4699 { "align", s_align_ptwo
, 2 },
4700 { "arm", s_arm
, 0 },
4701 { "thumb", s_thumb
, 0 },
4702 { "code", s_code
, 0 },
4703 { "force_thumb", s_force_thumb
, 0 },
4704 { "thumb_func", s_thumb_func
, 0 },
4705 { "thumb_set", s_thumb_set
, 0 },
4706 { "even", s_even
, 0 },
4707 { "ltorg", s_ltorg
, 0 },
4708 { "pool", s_ltorg
, 0 },
4709 { "syntax", s_syntax
, 0 },
4710 { "cpu", s_arm_cpu
, 0 },
4711 { "arch", s_arm_arch
, 0 },
4712 { "object_arch", s_arm_object_arch
, 0 },
4713 { "fpu", s_arm_fpu
, 0 },
4714 { "arch_extension", s_arm_arch_extension
, 0 },
4716 { "word", s_arm_elf_cons
, 4 },
4717 { "long", s_arm_elf_cons
, 4 },
4718 { "inst.n", s_arm_elf_inst
, 2 },
4719 { "inst.w", s_arm_elf_inst
, 4 },
4720 { "inst", s_arm_elf_inst
, 0 },
4721 { "rel31", s_arm_rel31
, 0 },
4722 { "fnstart", s_arm_unwind_fnstart
, 0 },
4723 { "fnend", s_arm_unwind_fnend
, 0 },
4724 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4725 { "personality", s_arm_unwind_personality
, 0 },
4726 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4727 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4728 { "save", s_arm_unwind_save
, 0 },
4729 { "vsave", s_arm_unwind_save
, 1 },
4730 { "movsp", s_arm_unwind_movsp
, 0 },
4731 { "pad", s_arm_unwind_pad
, 0 },
4732 { "setfp", s_arm_unwind_setfp
, 0 },
4733 { "unwind_raw", s_arm_unwind_raw
, 0 },
4734 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4735 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4739 /* These are used for dwarf. */
4743 /* These are used for dwarf2. */
4744 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
4745 { "loc", dwarf2_directive_loc
, 0 },
4746 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4748 { "extend", float_cons
, 'x' },
4749 { "ldouble", float_cons
, 'x' },
4750 { "packed", float_cons
, 'p' },
4752 {"secrel32", pe_directive_secrel
, 0},
4755 /* These are for compatibility with CodeComposer Studio. */
4756 {"ref", s_ccs_ref
, 0},
4757 {"def", s_ccs_def
, 0},
4758 {"asmfunc", s_ccs_asmfunc
, 0},
4759 {"endasmfunc", s_ccs_endasmfunc
, 0},
4764 /* Parser functions used exclusively in instruction operands. */
4766 /* Generic immediate-value read function for use in insn parsing.
4767 STR points to the beginning of the immediate (the leading #);
4768 VAL receives the value; if the value is outside [MIN, MAX]
4769 issue an error. PREFIX_OPT is true if the immediate prefix is
4773 parse_immediate (char **str
, int *val
, int min
, int max
,
4774 bfd_boolean prefix_opt
)
4778 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4779 if (exp
.X_op
!= O_constant
)
4781 inst
.error
= _("constant expression required");
4785 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4787 inst
.error
= _("immediate value out of range");
4791 *val
= exp
.X_add_number
;
4795 /* Less-generic immediate-value read function with the possibility of loading a
4796 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4797 instructions. Puts the result directly in inst.operands[i]. */
4800 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
4801 bfd_boolean allow_symbol_p
)
4804 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
4807 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
4809 if (exp_p
->X_op
== O_constant
)
4811 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
4812 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4813 O_constant. We have to be careful not to break compilation for
4814 32-bit X_add_number, though. */
4815 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
4817 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4818 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
4820 inst
.operands
[i
].regisimm
= 1;
4823 else if (exp_p
->X_op
== O_big
4824 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
4826 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4828 /* Bignums have their least significant bits in
4829 generic_bignum[0]. Make sure we put 32 bits in imm and
4830 32 bits in reg, in a (hopefully) portable way. */
4831 gas_assert (parts
!= 0);
4833 /* Make sure that the number is not too big.
4834 PR 11972: Bignums can now be sign-extended to the
4835 size of a .octa so check that the out of range bits
4836 are all zero or all one. */
4837 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
4839 LITTLENUM_TYPE m
= -1;
4841 if (generic_bignum
[parts
* 2] != 0
4842 && generic_bignum
[parts
* 2] != m
)
4845 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
4846 if (generic_bignum
[j
] != generic_bignum
[j
-1])
4850 inst
.operands
[i
].imm
= 0;
4851 for (j
= 0; j
< parts
; j
++, idx
++)
4852 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4853 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4854 inst
.operands
[i
].reg
= 0;
4855 for (j
= 0; j
< parts
; j
++, idx
++)
4856 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4857 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4858 inst
.operands
[i
].regisimm
= 1;
4860 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
4868 /* Returns the pseudo-register number of an FPA immediate constant,
4869 or FAIL if there isn't a valid constant here. */
4872 parse_fpa_immediate (char ** str
)
4874 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4880 /* First try and match exact strings, this is to guarantee
4881 that some formats will work even for cross assembly. */
4883 for (i
= 0; fp_const
[i
]; i
++)
4885 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4889 *str
+= strlen (fp_const
[i
]);
4890 if (is_end_of_line
[(unsigned char) **str
])
4896 /* Just because we didn't get a match doesn't mean that the constant
4897 isn't valid, just that it is in a format that we don't
4898 automatically recognize. Try parsing it with the standard
4899 expression routines. */
4901 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4903 /* Look for a raw floating point number. */
4904 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4905 && is_end_of_line
[(unsigned char) *save_in
])
4907 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4909 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4911 if (words
[j
] != fp_values
[i
][j
])
4915 if (j
== MAX_LITTLENUMS
)
4923 /* Try and parse a more complex expression, this will probably fail
4924 unless the code uses a floating point prefix (eg "0f"). */
4925 save_in
= input_line_pointer
;
4926 input_line_pointer
= *str
;
4927 if (expression (&exp
) == absolute_section
4928 && exp
.X_op
== O_big
4929 && exp
.X_add_number
< 0)
4931 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4933 #define X_PRECISION 5
4934 #define E_PRECISION 15L
4935 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
4937 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4939 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4941 if (words
[j
] != fp_values
[i
][j
])
4945 if (j
== MAX_LITTLENUMS
)
4947 *str
= input_line_pointer
;
4948 input_line_pointer
= save_in
;
4955 *str
= input_line_pointer
;
4956 input_line_pointer
= save_in
;
4957 inst
.error
= _("invalid FPA immediate expression");
4961 /* Returns 1 if a number has "quarter-precision" float format
4962 0baBbbbbbc defgh000 00000000 00000000. */
4965 is_quarter_float (unsigned imm
)
4967 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4968 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4972 /* Detect the presence of a floating point or integer zero constant,
4976 parse_ifimm_zero (char **in
)
4980 if (!is_immediate_prefix (**in
))
4982 /* In unified syntax, all prefixes are optional. */
4983 if (!unified_syntax
)
4989 /* Accept #0x0 as a synonym for #0. */
4990 if (strncmp (*in
, "0x", 2) == 0)
4993 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
4998 error_code
= atof_generic (in
, ".", EXP_CHARS
,
4999 &generic_floating_point_number
);
5002 && generic_floating_point_number
.sign
== '+'
5003 && (generic_floating_point_number
.low
5004 > generic_floating_point_number
.leader
))
5010 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5011 0baBbbbbbc defgh000 00000000 00000000.
5012 The zero and minus-zero cases need special handling, since they can't be
5013 encoded in the "quarter-precision" float format, but can nonetheless be
5014 loaded as integer constants. */
5017 parse_qfloat_immediate (char **ccp
, int *immed
)
5021 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5022 int found_fpchar
= 0;
5024 skip_past_char (&str
, '#');
5026 /* We must not accidentally parse an integer as a floating-point number. Make
5027 sure that the value we parse is not an integer by checking for special
5028 characters '.' or 'e'.
5029 FIXME: This is a horrible hack, but doing better is tricky because type
5030 information isn't in a very usable state at parse time. */
5032 skip_whitespace (fpnum
);
5034 if (strncmp (fpnum
, "0x", 2) == 0)
5038 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
5039 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5049 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5051 unsigned fpword
= 0;
5054 /* Our FP word must be 32 bits (single-precision FP). */
5055 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5057 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5061 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5074 /* Shift operands. */
5077 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5080 struct asm_shift_name
5083 enum shift_kind kind
;
5086 /* Third argument to parse_shift. */
5087 enum parse_shift_mode
5089 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5090 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5091 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5092 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5093 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5096 /* Parse a <shift> specifier on an ARM data processing instruction.
5097 This has three forms:
5099 (LSL|LSR|ASL|ASR|ROR) Rs
5100 (LSL|LSR|ASL|ASR|ROR) #imm
5103 Note that ASL is assimilated to LSL in the instruction encoding, and
5104 RRX to ROR #0 (which cannot be written as such). */
5107 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5109 const struct asm_shift_name
*shift_name
;
5110 enum shift_kind shift
;
5115 for (p
= *str
; ISALPHA (*p
); p
++)
5120 inst
.error
= _("shift expression expected");
5124 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5127 if (shift_name
== NULL
)
5129 inst
.error
= _("shift expression expected");
5133 shift
= shift_name
->kind
;
5137 case NO_SHIFT_RESTRICT
:
5138 case SHIFT_IMMEDIATE
: break;
5140 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5141 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5143 inst
.error
= _("'LSL' or 'ASR' required");
5148 case SHIFT_LSL_IMMEDIATE
:
5149 if (shift
!= SHIFT_LSL
)
5151 inst
.error
= _("'LSL' required");
5156 case SHIFT_ASR_IMMEDIATE
:
5157 if (shift
!= SHIFT_ASR
)
5159 inst
.error
= _("'ASR' required");
5167 if (shift
!= SHIFT_RRX
)
5169 /* Whitespace can appear here if the next thing is a bare digit. */
5170 skip_whitespace (p
);
5172 if (mode
== NO_SHIFT_RESTRICT
5173 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5175 inst
.operands
[i
].imm
= reg
;
5176 inst
.operands
[i
].immisreg
= 1;
5178 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5181 inst
.operands
[i
].shift_kind
= shift
;
5182 inst
.operands
[i
].shifted
= 1;
5187 /* Parse a <shifter_operand> for an ARM data processing instruction:
5190 #<immediate>, <rotate>
5194 where <shift> is defined by parse_shift above, and <rotate> is a
5195 multiple of 2 between 0 and 30. Validation of immediate operands
5196 is deferred to md_apply_fix. */
5199 parse_shifter_operand (char **str
, int i
)
5204 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5206 inst
.operands
[i
].reg
= value
;
5207 inst
.operands
[i
].isreg
= 1;
5209 /* parse_shift will override this if appropriate */
5210 inst
.reloc
.exp
.X_op
= O_constant
;
5211 inst
.reloc
.exp
.X_add_number
= 0;
5213 if (skip_past_comma (str
) == FAIL
)
5216 /* Shift operation on register. */
5217 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5220 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
5223 if (skip_past_comma (str
) == SUCCESS
)
5225 /* #x, y -- ie explicit rotation by Y. */
5226 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5229 if (exp
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
5231 inst
.error
= _("constant expression expected");
5235 value
= exp
.X_add_number
;
5236 if (value
< 0 || value
> 30 || value
% 2 != 0)
5238 inst
.error
= _("invalid rotation");
5241 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
5243 inst
.error
= _("invalid constant");
5247 /* Encode as specified. */
5248 inst
.operands
[i
].imm
= inst
.reloc
.exp
.X_add_number
| value
<< 7;
5252 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
5253 inst
.reloc
.pc_rel
= 0;
5257 /* Group relocation information. Each entry in the table contains the
5258 textual name of the relocation as may appear in assembler source
5259 and must end with a colon.
5260 Along with this textual name are the relocation codes to be used if
5261 the corresponding instruction is an ALU instruction (ADD or SUB only),
5262 an LDR, an LDRS, or an LDC. */
5264 struct group_reloc_table_entry
5275 /* Varieties of non-ALU group relocation. */
5282 static struct group_reloc_table_entry group_reloc_table
[] =
5283 { /* Program counter relative: */
5285 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5290 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5291 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5292 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5293 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5295 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5300 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5301 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5302 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5303 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5305 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5306 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5307 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5308 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5309 /* Section base relative */
5311 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5316 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5317 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5318 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5319 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5321 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5326 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5327 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5328 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5329 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5331 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5332 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5333 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5334 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5335 /* Absolute thumb alu relocations. */
5337 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5342 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5347 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5352 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5357 /* Given the address of a pointer pointing to the textual name of a group
5358 relocation as may appear in assembler source, attempt to find its details
5359 in group_reloc_table. The pointer will be updated to the character after
5360 the trailing colon. On failure, FAIL will be returned; SUCCESS
5361 otherwise. On success, *entry will be updated to point at the relevant
5362 group_reloc_table entry. */
5365 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5368 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5370 int length
= strlen (group_reloc_table
[i
].name
);
5372 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5373 && (*str
)[length
] == ':')
5375 *out
= &group_reloc_table
[i
];
5376 *str
+= (length
+ 1);
5384 /* Parse a <shifter_operand> for an ARM data processing instruction
5385 (as for parse_shifter_operand) where group relocations are allowed:
5388 #<immediate>, <rotate>
5389 #:<group_reloc>:<expression>
5393 where <group_reloc> is one of the strings defined in group_reloc_table.
5394 The hashes are optional.
5396 Everything else is as for parse_shifter_operand. */
5398 static parse_operand_result
5399 parse_shifter_operand_group_reloc (char **str
, int i
)
5401 /* Determine if we have the sequence of characters #: or just :
5402 coming next. If we do, then we check for a group relocation.
5403 If we don't, punt the whole lot to parse_shifter_operand. */
5405 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5406 || (*str
)[0] == ':')
5408 struct group_reloc_table_entry
*entry
;
5410 if ((*str
)[0] == '#')
5415 /* Try to parse a group relocation. Anything else is an error. */
5416 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5418 inst
.error
= _("unknown group relocation");
5419 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5422 /* We now have the group relocation table entry corresponding to
5423 the name in the assembler source. Next, we parse the expression. */
5424 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
5425 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5427 /* Record the relocation type (always the ALU variant here). */
5428 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5429 gas_assert (inst
.reloc
.type
!= 0);
5431 return PARSE_OPERAND_SUCCESS
;
5434 return parse_shifter_operand (str
, i
) == SUCCESS
5435 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5437 /* Never reached. */
5440 /* Parse a Neon alignment expression. Information is written to
5441 inst.operands[i]. We assume the initial ':' has been skipped.
5443 align .imm = align << 8, .immisalign=1, .preind=0 */
5444 static parse_operand_result
5445 parse_neon_alignment (char **str
, int i
)
5450 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5452 if (exp
.X_op
!= O_constant
)
5454 inst
.error
= _("alignment must be constant");
5455 return PARSE_OPERAND_FAIL
;
5458 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5459 inst
.operands
[i
].immisalign
= 1;
5460 /* Alignments are not pre-indexes. */
5461 inst
.operands
[i
].preind
= 0;
5464 return PARSE_OPERAND_SUCCESS
;
5467 /* Parse all forms of an ARM address expression. Information is written
5468 to inst.operands[i] and/or inst.reloc.
5470 Preindexed addressing (.preind=1):
5472 [Rn, #offset] .reg=Rn .reloc.exp=offset
5473 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5474 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5475 .shift_kind=shift .reloc.exp=shift_imm
5477 These three may have a trailing ! which causes .writeback to be set also.
5479 Postindexed addressing (.postind=1, .writeback=1):
5481 [Rn], #offset .reg=Rn .reloc.exp=offset
5482 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5483 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5484 .shift_kind=shift .reloc.exp=shift_imm
5486 Unindexed addressing (.preind=0, .postind=0):
5488 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5492 [Rn]{!} shorthand for [Rn,#0]{!}
5493 =immediate .isreg=0 .reloc.exp=immediate
5494 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5496 It is the caller's responsibility to check for addressing modes not
5497 supported by the instruction, and to set inst.reloc.type. */
5499 static parse_operand_result
5500 parse_address_main (char **str
, int i
, int group_relocations
,
5501 group_reloc_type group_type
)
5506 if (skip_past_char (&p
, '[') == FAIL
)
5508 if (skip_past_char (&p
, '=') == FAIL
)
5510 /* Bare address - translate to PC-relative offset. */
5511 inst
.reloc
.pc_rel
= 1;
5512 inst
.operands
[i
].reg
= REG_PC
;
5513 inst
.operands
[i
].isreg
= 1;
5514 inst
.operands
[i
].preind
= 1;
5516 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_OPT_PREFIX_BIG
))
5517 return PARSE_OPERAND_FAIL
;
5519 else if (parse_big_immediate (&p
, i
, &inst
.reloc
.exp
,
5520 /*allow_symbol_p=*/TRUE
))
5521 return PARSE_OPERAND_FAIL
;
5524 return PARSE_OPERAND_SUCCESS
;
5527 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5528 skip_whitespace (p
);
5530 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5532 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5533 return PARSE_OPERAND_FAIL
;
5535 inst
.operands
[i
].reg
= reg
;
5536 inst
.operands
[i
].isreg
= 1;
5538 if (skip_past_comma (&p
) == SUCCESS
)
5540 inst
.operands
[i
].preind
= 1;
5543 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5545 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5547 inst
.operands
[i
].imm
= reg
;
5548 inst
.operands
[i
].immisreg
= 1;
5550 if (skip_past_comma (&p
) == SUCCESS
)
5551 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5552 return PARSE_OPERAND_FAIL
;
5554 else if (skip_past_char (&p
, ':') == SUCCESS
)
5556 /* FIXME: '@' should be used here, but it's filtered out by generic
5557 code before we get to see it here. This may be subject to
5559 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5561 if (result
!= PARSE_OPERAND_SUCCESS
)
5566 if (inst
.operands
[i
].negative
)
5568 inst
.operands
[i
].negative
= 0;
5572 if (group_relocations
5573 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5575 struct group_reloc_table_entry
*entry
;
5577 /* Skip over the #: or : sequence. */
5583 /* Try to parse a group relocation. Anything else is an
5585 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5587 inst
.error
= _("unknown group relocation");
5588 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5591 /* We now have the group relocation table entry corresponding to
5592 the name in the assembler source. Next, we parse the
5594 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5595 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5597 /* Record the relocation type. */
5601 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldr_code
;
5605 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5609 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldc_code
;
5616 if (inst
.reloc
.type
== 0)
5618 inst
.error
= _("this group relocation is not allowed on this instruction");
5619 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5626 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5627 return PARSE_OPERAND_FAIL
;
5628 /* If the offset is 0, find out if it's a +0 or -0. */
5629 if (inst
.reloc
.exp
.X_op
== O_constant
5630 && inst
.reloc
.exp
.X_add_number
== 0)
5632 skip_whitespace (q
);
5636 skip_whitespace (q
);
5639 inst
.operands
[i
].negative
= 1;
5644 else if (skip_past_char (&p
, ':') == SUCCESS
)
5646 /* FIXME: '@' should be used here, but it's filtered out by generic code
5647 before we get to see it here. This may be subject to change. */
5648 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5650 if (result
!= PARSE_OPERAND_SUCCESS
)
5654 if (skip_past_char (&p
, ']') == FAIL
)
5656 inst
.error
= _("']' expected");
5657 return PARSE_OPERAND_FAIL
;
5660 if (skip_past_char (&p
, '!') == SUCCESS
)
5661 inst
.operands
[i
].writeback
= 1;
5663 else if (skip_past_comma (&p
) == SUCCESS
)
5665 if (skip_past_char (&p
, '{') == SUCCESS
)
5667 /* [Rn], {expr} - unindexed, with option */
5668 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5669 0, 255, TRUE
) == FAIL
)
5670 return PARSE_OPERAND_FAIL
;
5672 if (skip_past_char (&p
, '}') == FAIL
)
5674 inst
.error
= _("'}' expected at end of 'option' field");
5675 return PARSE_OPERAND_FAIL
;
5677 if (inst
.operands
[i
].preind
)
5679 inst
.error
= _("cannot combine index with option");
5680 return PARSE_OPERAND_FAIL
;
5683 return PARSE_OPERAND_SUCCESS
;
5687 inst
.operands
[i
].postind
= 1;
5688 inst
.operands
[i
].writeback
= 1;
5690 if (inst
.operands
[i
].preind
)
5692 inst
.error
= _("cannot combine pre- and post-indexing");
5693 return PARSE_OPERAND_FAIL
;
5697 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5699 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5701 /* We might be using the immediate for alignment already. If we
5702 are, OR the register number into the low-order bits. */
5703 if (inst
.operands
[i
].immisalign
)
5704 inst
.operands
[i
].imm
|= reg
;
5706 inst
.operands
[i
].imm
= reg
;
5707 inst
.operands
[i
].immisreg
= 1;
5709 if (skip_past_comma (&p
) == SUCCESS
)
5710 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5711 return PARSE_OPERAND_FAIL
;
5717 if (inst
.operands
[i
].negative
)
5719 inst
.operands
[i
].negative
= 0;
5722 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5723 return PARSE_OPERAND_FAIL
;
5724 /* If the offset is 0, find out if it's a +0 or -0. */
5725 if (inst
.reloc
.exp
.X_op
== O_constant
5726 && inst
.reloc
.exp
.X_add_number
== 0)
5728 skip_whitespace (q
);
5732 skip_whitespace (q
);
5735 inst
.operands
[i
].negative
= 1;
5741 /* If at this point neither .preind nor .postind is set, we have a
5742 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5743 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5745 inst
.operands
[i
].preind
= 1;
5746 inst
.reloc
.exp
.X_op
= O_constant
;
5747 inst
.reloc
.exp
.X_add_number
= 0;
5750 return PARSE_OPERAND_SUCCESS
;
5754 parse_address (char **str
, int i
)
5756 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5760 static parse_operand_result
5761 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5763 return parse_address_main (str
, i
, 1, type
);
5766 /* Parse an operand for a MOVW or MOVT instruction. */
5768 parse_half (char **str
)
5773 skip_past_char (&p
, '#');
5774 if (strncasecmp (p
, ":lower16:", 9) == 0)
5775 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
5776 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5777 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
5779 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
5782 skip_whitespace (p
);
5785 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5788 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5790 if (inst
.reloc
.exp
.X_op
!= O_constant
)
5792 inst
.error
= _("constant expression expected");
5795 if (inst
.reloc
.exp
.X_add_number
< 0
5796 || inst
.reloc
.exp
.X_add_number
> 0xffff)
5798 inst
.error
= _("immediate value out of range");
5806 /* Miscellaneous. */
5808 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5809 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5811 parse_psr (char **str
, bfd_boolean lhs
)
5814 unsigned long psr_field
;
5815 const struct asm_psr
*psr
;
5817 bfd_boolean is_apsr
= FALSE
;
5818 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
5820 /* PR gas/12698: If the user has specified -march=all then m_profile will
5821 be TRUE, but we want to ignore it in this case as we are building for any
5822 CPU type, including non-m variants. */
5823 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
5826 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5827 feature for ease of use and backwards compatibility. */
5829 if (strncasecmp (p
, "SPSR", 4) == 0)
5832 goto unsupported_psr
;
5834 psr_field
= SPSR_BIT
;
5836 else if (strncasecmp (p
, "CPSR", 4) == 0)
5839 goto unsupported_psr
;
5843 else if (strncasecmp (p
, "APSR", 4) == 0)
5845 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5846 and ARMv7-R architecture CPUs. */
5855 while (ISALNUM (*p
) || *p
== '_');
5857 if (strncasecmp (start
, "iapsr", 5) == 0
5858 || strncasecmp (start
, "eapsr", 5) == 0
5859 || strncasecmp (start
, "xpsr", 4) == 0
5860 || strncasecmp (start
, "psr", 3) == 0)
5861 p
= start
+ strcspn (start
, "rR") + 1;
5863 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5869 /* If APSR is being written, a bitfield may be specified. Note that
5870 APSR itself is handled above. */
5871 if (psr
->field
<= 3)
5873 psr_field
= psr
->field
;
5879 /* M-profile MSR instructions have the mask field set to "10", except
5880 *PSR variants which modify APSR, which may use a different mask (and
5881 have been handled already). Do that by setting the PSR_f field
5883 return psr
->field
| (lhs
? PSR_f
: 0);
5886 goto unsupported_psr
;
5892 /* A suffix follows. */
5898 while (ISALNUM (*p
) || *p
== '_');
5902 /* APSR uses a notation for bits, rather than fields. */
5903 unsigned int nzcvq_bits
= 0;
5904 unsigned int g_bit
= 0;
5907 for (bit
= start
; bit
!= p
; bit
++)
5909 switch (TOLOWER (*bit
))
5912 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
5916 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
5920 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
5924 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
5928 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
5932 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
5936 inst
.error
= _("unexpected bit specified after APSR");
5941 if (nzcvq_bits
== 0x1f)
5946 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
5948 inst
.error
= _("selected processor does not "
5949 "support DSP extension");
5956 if ((nzcvq_bits
& 0x20) != 0
5957 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
5958 || (g_bit
& 0x2) != 0)
5960 inst
.error
= _("bad bitmask specified after APSR");
5966 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
5971 psr_field
|= psr
->field
;
5977 goto error
; /* Garbage after "[CS]PSR". */
5979 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5980 is deprecated, but allow it anyway. */
5984 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5987 else if (!m_profile
)
5988 /* These bits are never right for M-profile devices: don't set them
5989 (only code paths which read/write APSR reach here). */
5990 psr_field
|= (PSR_c
| PSR_f
);
5996 inst
.error
= _("selected processor does not support requested special "
5997 "purpose register");
6001 inst
.error
= _("flag for {c}psr instruction expected");
6005 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6006 value suitable for splatting into the AIF field of the instruction. */
6009 parse_cps_flags (char **str
)
6018 case '\0': case ',':
6021 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
6022 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
6023 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
6026 inst
.error
= _("unrecognized CPS flag");
6031 if (saw_a_flag
== 0)
6033 inst
.error
= _("missing CPS flags");
6041 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6042 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6045 parse_endian_specifier (char **str
)
6050 if (strncasecmp (s
, "BE", 2))
6052 else if (strncasecmp (s
, "LE", 2))
6056 inst
.error
= _("valid endian specifiers are be or le");
6060 if (ISALNUM (s
[2]) || s
[2] == '_')
6062 inst
.error
= _("valid endian specifiers are be or le");
6067 return little_endian
;
6070 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6071 value suitable for poking into the rotate field of an sxt or sxta
6072 instruction, or FAIL on error. */
6075 parse_ror (char **str
)
6080 if (strncasecmp (s
, "ROR", 3) == 0)
6084 inst
.error
= _("missing rotation field after comma");
6088 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6093 case 0: *str
= s
; return 0x0;
6094 case 8: *str
= s
; return 0x1;
6095 case 16: *str
= s
; return 0x2;
6096 case 24: *str
= s
; return 0x3;
6099 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6104 /* Parse a conditional code (from conds[] below). The value returned is in the
6105 range 0 .. 14, or FAIL. */
6107 parse_cond (char **str
)
6110 const struct asm_cond
*c
;
6112 /* Condition codes are always 2 characters, so matching up to
6113 3 characters is sufficient. */
6118 while (ISALPHA (*q
) && n
< 3)
6120 cond
[n
] = TOLOWER (*q
);
6125 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6128 inst
.error
= _("condition required");
6136 /* Record a use of the given feature. */
6138 record_feature_use (const arm_feature_set
*feature
)
6141 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
6143 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
6146 /* If the given feature available in the selected CPU, mark it as used.
6147 Returns TRUE iff feature is available. */
6149 mark_feature_used (const arm_feature_set
*feature
)
6151 /* Ensure the option is valid on the current architecture. */
6152 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
6155 /* Add the appropriate architecture feature for the barrier option used.
6157 record_feature_use (feature
);
6162 /* Parse an option for a barrier instruction. Returns the encoding for the
6165 parse_barrier (char **str
)
6168 const struct asm_barrier_opt
*o
;
6171 while (ISALPHA (*q
))
6174 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6179 if (!mark_feature_used (&o
->arch
))
6186 /* Parse the operands of a table branch instruction. Similar to a memory
6189 parse_tb (char **str
)
6194 if (skip_past_char (&p
, '[') == FAIL
)
6196 inst
.error
= _("'[' expected");
6200 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6202 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6205 inst
.operands
[0].reg
= reg
;
6207 if (skip_past_comma (&p
) == FAIL
)
6209 inst
.error
= _("',' expected");
6213 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6215 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6218 inst
.operands
[0].imm
= reg
;
6220 if (skip_past_comma (&p
) == SUCCESS
)
6222 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6224 if (inst
.reloc
.exp
.X_add_number
!= 1)
6226 inst
.error
= _("invalid shift");
6229 inst
.operands
[0].shifted
= 1;
6232 if (skip_past_char (&p
, ']') == FAIL
)
6234 inst
.error
= _("']' expected");
6241 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6242 information on the types the operands can take and how they are encoded.
6243 Up to four operands may be read; this function handles setting the
6244 ".present" field for each read operand itself.
6245 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6246 else returns FAIL. */
6249 parse_neon_mov (char **str
, int *which_operand
)
6251 int i
= *which_operand
, val
;
6252 enum arm_reg_type rtype
;
6254 struct neon_type_el optype
;
6256 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6258 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6259 inst
.operands
[i
].reg
= val
;
6260 inst
.operands
[i
].isscalar
= 1;
6261 inst
.operands
[i
].vectype
= optype
;
6262 inst
.operands
[i
++].present
= 1;
6264 if (skip_past_comma (&ptr
) == FAIL
)
6267 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6270 inst
.operands
[i
].reg
= val
;
6271 inst
.operands
[i
].isreg
= 1;
6272 inst
.operands
[i
].present
= 1;
6274 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6277 /* Cases 0, 1, 2, 3, 5 (D only). */
6278 if (skip_past_comma (&ptr
) == FAIL
)
6281 inst
.operands
[i
].reg
= val
;
6282 inst
.operands
[i
].isreg
= 1;
6283 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6284 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6285 inst
.operands
[i
].isvec
= 1;
6286 inst
.operands
[i
].vectype
= optype
;
6287 inst
.operands
[i
++].present
= 1;
6289 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6291 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6292 Case 13: VMOV <Sd>, <Rm> */
6293 inst
.operands
[i
].reg
= val
;
6294 inst
.operands
[i
].isreg
= 1;
6295 inst
.operands
[i
].present
= 1;
6297 if (rtype
== REG_TYPE_NQ
)
6299 first_error (_("can't use Neon quad register here"));
6302 else if (rtype
!= REG_TYPE_VFS
)
6305 if (skip_past_comma (&ptr
) == FAIL
)
6307 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6309 inst
.operands
[i
].reg
= val
;
6310 inst
.operands
[i
].isreg
= 1;
6311 inst
.operands
[i
].present
= 1;
6314 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6317 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6318 Case 1: VMOV<c><q> <Dd>, <Dm>
6319 Case 8: VMOV.F32 <Sd>, <Sm>
6320 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6322 inst
.operands
[i
].reg
= val
;
6323 inst
.operands
[i
].isreg
= 1;
6324 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6325 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6326 inst
.operands
[i
].isvec
= 1;
6327 inst
.operands
[i
].vectype
= optype
;
6328 inst
.operands
[i
].present
= 1;
6330 if (skip_past_comma (&ptr
) == SUCCESS
)
6335 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6338 inst
.operands
[i
].reg
= val
;
6339 inst
.operands
[i
].isreg
= 1;
6340 inst
.operands
[i
++].present
= 1;
6342 if (skip_past_comma (&ptr
) == FAIL
)
6345 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6348 inst
.operands
[i
].reg
= val
;
6349 inst
.operands
[i
].isreg
= 1;
6350 inst
.operands
[i
].present
= 1;
6353 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6354 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6355 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6356 Case 10: VMOV.F32 <Sd>, #<imm>
6357 Case 11: VMOV.F64 <Dd>, #<imm> */
6358 inst
.operands
[i
].immisfloat
= 1;
6359 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6361 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6362 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6366 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6370 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6373 inst
.operands
[i
].reg
= val
;
6374 inst
.operands
[i
].isreg
= 1;
6375 inst
.operands
[i
++].present
= 1;
6377 if (skip_past_comma (&ptr
) == FAIL
)
6380 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6382 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6383 inst
.operands
[i
].reg
= val
;
6384 inst
.operands
[i
].isscalar
= 1;
6385 inst
.operands
[i
].present
= 1;
6386 inst
.operands
[i
].vectype
= optype
;
6388 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6390 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6391 inst
.operands
[i
].reg
= val
;
6392 inst
.operands
[i
].isreg
= 1;
6393 inst
.operands
[i
++].present
= 1;
6395 if (skip_past_comma (&ptr
) == FAIL
)
6398 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6401 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6405 inst
.operands
[i
].reg
= val
;
6406 inst
.operands
[i
].isreg
= 1;
6407 inst
.operands
[i
].isvec
= 1;
6408 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6409 inst
.operands
[i
].vectype
= optype
;
6410 inst
.operands
[i
].present
= 1;
6412 if (rtype
== REG_TYPE_VFS
)
6416 if (skip_past_comma (&ptr
) == FAIL
)
6418 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6421 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6424 inst
.operands
[i
].reg
= val
;
6425 inst
.operands
[i
].isreg
= 1;
6426 inst
.operands
[i
].isvec
= 1;
6427 inst
.operands
[i
].issingle
= 1;
6428 inst
.operands
[i
].vectype
= optype
;
6429 inst
.operands
[i
].present
= 1;
6432 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6436 inst
.operands
[i
].reg
= val
;
6437 inst
.operands
[i
].isreg
= 1;
6438 inst
.operands
[i
].isvec
= 1;
6439 inst
.operands
[i
].issingle
= 1;
6440 inst
.operands
[i
].vectype
= optype
;
6441 inst
.operands
[i
].present
= 1;
6446 first_error (_("parse error"));
6450 /* Successfully parsed the operands. Update args. */
6456 first_error (_("expected comma"));
6460 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6464 /* Use this macro when the operand constraints are different
6465 for ARM and THUMB (e.g. ldrd). */
6466 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6467 ((arm_operand) | ((thumb_operand) << 16))
6469 /* Matcher codes for parse_operands. */
6470 enum operand_parse_code
6472 OP_stop
, /* end of line */
6474 OP_RR
, /* ARM register */
6475 OP_RRnpc
, /* ARM register, not r15 */
6476 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6477 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6478 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6479 optional trailing ! */
6480 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6481 OP_RCP
, /* Coprocessor number */
6482 OP_RCN
, /* Coprocessor register */
6483 OP_RF
, /* FPA register */
6484 OP_RVS
, /* VFP single precision register */
6485 OP_RVD
, /* VFP double precision register (0..15) */
6486 OP_RND
, /* Neon double precision register (0..31) */
6487 OP_RNQ
, /* Neon quad precision register */
6488 OP_RVSD
, /* VFP single or double precision register */
6489 OP_RNDQ
, /* Neon double or quad precision register */
6490 OP_RNSDQ
, /* Neon single, double or quad precision register */
6491 OP_RNSC
, /* Neon scalar D[X] */
6492 OP_RVC
, /* VFP control register */
6493 OP_RMF
, /* Maverick F register */
6494 OP_RMD
, /* Maverick D register */
6495 OP_RMFX
, /* Maverick FX register */
6496 OP_RMDX
, /* Maverick DX register */
6497 OP_RMAX
, /* Maverick AX register */
6498 OP_RMDS
, /* Maverick DSPSC register */
6499 OP_RIWR
, /* iWMMXt wR register */
6500 OP_RIWC
, /* iWMMXt wC register */
6501 OP_RIWG
, /* iWMMXt wCG register */
6502 OP_RXA
, /* XScale accumulator register */
6504 OP_REGLST
, /* ARM register list */
6505 OP_VRSLST
, /* VFP single-precision register list */
6506 OP_VRDLST
, /* VFP double-precision register list */
6507 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6508 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6509 OP_NSTRLST
, /* Neon element/structure list */
6511 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6512 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6513 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6514 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6515 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6516 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6517 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6518 OP_VMOV
, /* Neon VMOV operands. */
6519 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6520 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6521 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6523 OP_I0
, /* immediate zero */
6524 OP_I7
, /* immediate value 0 .. 7 */
6525 OP_I15
, /* 0 .. 15 */
6526 OP_I16
, /* 1 .. 16 */
6527 OP_I16z
, /* 0 .. 16 */
6528 OP_I31
, /* 0 .. 31 */
6529 OP_I31w
, /* 0 .. 31, optional trailing ! */
6530 OP_I32
, /* 1 .. 32 */
6531 OP_I32z
, /* 0 .. 32 */
6532 OP_I63
, /* 0 .. 63 */
6533 OP_I63s
, /* -64 .. 63 */
6534 OP_I64
, /* 1 .. 64 */
6535 OP_I64z
, /* 0 .. 64 */
6536 OP_I255
, /* 0 .. 255 */
6538 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6539 OP_I7b
, /* 0 .. 7 */
6540 OP_I15b
, /* 0 .. 15 */
6541 OP_I31b
, /* 0 .. 31 */
6543 OP_SH
, /* shifter operand */
6544 OP_SHG
, /* shifter operand with possible group relocation */
6545 OP_ADDR
, /* Memory address expression (any mode) */
6546 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6547 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6548 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6549 OP_EXP
, /* arbitrary expression */
6550 OP_EXPi
, /* same, with optional immediate prefix */
6551 OP_EXPr
, /* same, with optional relocation suffix */
6552 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6553 OP_IROT1
, /* VCADD rotate immediate: 90, 270. */
6554 OP_IROT2
, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6556 OP_CPSF
, /* CPS flags */
6557 OP_ENDI
, /* Endianness specifier */
6558 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6559 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6560 OP_COND
, /* conditional code */
6561 OP_TB
, /* Table branch. */
6563 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6565 OP_RRnpc_I0
, /* ARM register or literal 0 */
6566 OP_RR_EXr
, /* ARM register or expression with opt. reloc stuff. */
6567 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6568 OP_RF_IF
, /* FPA register or immediate */
6569 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6570 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6572 /* Optional operands. */
6573 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6574 OP_oI31b
, /* 0 .. 31 */
6575 OP_oI32b
, /* 1 .. 32 */
6576 OP_oI32z
, /* 0 .. 32 */
6577 OP_oIffffb
, /* 0 .. 65535 */
6578 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6580 OP_oRR
, /* ARM register */
6581 OP_oRRnpc
, /* ARM register, not the PC */
6582 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6583 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6584 OP_oRND
, /* Optional Neon double precision register */
6585 OP_oRNQ
, /* Optional Neon quad precision register */
6586 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6587 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6588 OP_oSHll
, /* LSL immediate */
6589 OP_oSHar
, /* ASR immediate */
6590 OP_oSHllar
, /* LSL or ASR immediate */
6591 OP_oROR
, /* ROR 0/8/16/24 */
6592 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6594 /* Some pre-defined mixed (ARM/THUMB) operands. */
6595 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6596 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6597 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6599 OP_FIRST_OPTIONAL
= OP_oI7b
6602 /* Generic instruction operand parser. This does no encoding and no
6603 semantic validation; it merely squirrels values away in the inst
6604 structure. Returns SUCCESS or FAIL depending on whether the
6605 specified grammar matched. */
6607 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6609 unsigned const int *upat
= pattern
;
6610 char *backtrack_pos
= 0;
6611 const char *backtrack_error
= 0;
6612 int i
, val
= 0, backtrack_index
= 0;
6613 enum arm_reg_type rtype
;
6614 parse_operand_result result
;
6615 unsigned int op_parse_code
;
6617 #define po_char_or_fail(chr) \
6620 if (skip_past_char (&str, chr) == FAIL) \
6625 #define po_reg_or_fail(regtype) \
6628 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6629 & inst.operands[i].vectype); \
6632 first_error (_(reg_expected_msgs[regtype])); \
6635 inst.operands[i].reg = val; \
6636 inst.operands[i].isreg = 1; \
6637 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6638 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6639 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6640 || rtype == REG_TYPE_VFD \
6641 || rtype == REG_TYPE_NQ); \
6645 #define po_reg_or_goto(regtype, label) \
6648 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6649 & inst.operands[i].vectype); \
6653 inst.operands[i].reg = val; \
6654 inst.operands[i].isreg = 1; \
6655 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6656 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6657 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6658 || rtype == REG_TYPE_VFD \
6659 || rtype == REG_TYPE_NQ); \
6663 #define po_imm_or_fail(min, max, popt) \
6666 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6668 inst.operands[i].imm = val; \
6672 #define po_scalar_or_goto(elsz, label) \
6675 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6678 inst.operands[i].reg = val; \
6679 inst.operands[i].isscalar = 1; \
6683 #define po_misc_or_fail(expr) \
6691 #define po_misc_or_fail_no_backtrack(expr) \
6695 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6696 backtrack_pos = 0; \
6697 if (result != PARSE_OPERAND_SUCCESS) \
6702 #define po_barrier_or_imm(str) \
6705 val = parse_barrier (&str); \
6706 if (val == FAIL && ! ISALPHA (*str)) \
6709 /* ISB can only take SY as an option. */ \
6710 || ((inst.instruction & 0xf0) == 0x60 \
6713 inst.error = _("invalid barrier type"); \
6714 backtrack_pos = 0; \
6720 skip_whitespace (str
);
6722 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6724 op_parse_code
= upat
[i
];
6725 if (op_parse_code
>= 1<<16)
6726 op_parse_code
= thumb
? (op_parse_code
>> 16)
6727 : (op_parse_code
& ((1<<16)-1));
6729 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6731 /* Remember where we are in case we need to backtrack. */
6732 gas_assert (!backtrack_pos
);
6733 backtrack_pos
= str
;
6734 backtrack_error
= inst
.error
;
6735 backtrack_index
= i
;
6738 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6739 po_char_or_fail (',');
6741 switch (op_parse_code
)
6749 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6750 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6751 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6752 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6753 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6754 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6756 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6758 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6760 /* Also accept generic coprocessor regs for unknown registers. */
6762 po_reg_or_fail (REG_TYPE_CN
);
6764 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6765 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6766 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6767 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6768 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6769 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6770 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6771 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6772 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6773 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6775 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6777 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6778 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6780 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6782 /* Neon scalar. Using an element size of 8 means that some invalid
6783 scalars are accepted here, so deal with those in later code. */
6784 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6788 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6791 po_imm_or_fail (0, 0, TRUE
);
6796 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6801 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
6804 if (parse_ifimm_zero (&str
))
6805 inst
.operands
[i
].imm
= 0;
6809 = _("only floating point zero is allowed as immediate value");
6817 po_scalar_or_goto (8, try_rr
);
6820 po_reg_or_fail (REG_TYPE_RN
);
6826 po_scalar_or_goto (8, try_nsdq
);
6829 po_reg_or_fail (REG_TYPE_NSDQ
);
6835 po_scalar_or_goto (8, try_ndq
);
6838 po_reg_or_fail (REG_TYPE_NDQ
);
6844 po_scalar_or_goto (8, try_vfd
);
6847 po_reg_or_fail (REG_TYPE_VFD
);
6852 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6853 not careful then bad things might happen. */
6854 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
6859 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
6862 /* There's a possibility of getting a 64-bit immediate here, so
6863 we need special handling. */
6864 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6867 inst
.error
= _("immediate value is out of range");
6875 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
6878 po_imm_or_fail (0, 63, TRUE
);
6883 po_char_or_fail ('[');
6884 po_reg_or_fail (REG_TYPE_RN
);
6885 po_char_or_fail (']');
6891 po_reg_or_fail (REG_TYPE_RN
);
6892 if (skip_past_char (&str
, '!') == SUCCESS
)
6893 inst
.operands
[i
].writeback
= 1;
6897 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
6898 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
6899 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
6900 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
6901 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
6902 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
6903 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
6904 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
6905 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
6906 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
6907 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
6908 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
6910 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
6912 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
6913 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
6915 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
6916 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
6917 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
6918 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
6920 /* Immediate variants */
6922 po_char_or_fail ('{');
6923 po_imm_or_fail (0, 255, TRUE
);
6924 po_char_or_fail ('}');
6928 /* The expression parser chokes on a trailing !, so we have
6929 to find it first and zap it. */
6932 while (*s
&& *s
!= ',')
6937 inst
.operands
[i
].writeback
= 1;
6939 po_imm_or_fail (0, 31, TRUE
);
6947 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6952 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6957 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6959 if (inst
.reloc
.exp
.X_op
== O_symbol
)
6961 val
= parse_reloc (&str
);
6964 inst
.error
= _("unrecognized relocation suffix");
6967 else if (val
!= BFD_RELOC_UNUSED
)
6969 inst
.operands
[i
].imm
= val
;
6970 inst
.operands
[i
].hasreloc
= 1;
6975 /* Operand for MOVW or MOVT. */
6977 po_misc_or_fail (parse_half (&str
));
6980 /* Register or expression. */
6981 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
6982 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
6984 /* Register or immediate. */
6985 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
6986 I0
: po_imm_or_fail (0, 0, FALSE
); break;
6988 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
6990 if (!is_immediate_prefix (*str
))
6993 val
= parse_fpa_immediate (&str
);
6996 /* FPA immediates are encoded as registers 8-15.
6997 parse_fpa_immediate has already applied the offset. */
6998 inst
.operands
[i
].reg
= val
;
6999 inst
.operands
[i
].isreg
= 1;
7002 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
7003 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
7005 /* Two kinds of register. */
7008 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7010 || (rege
->type
!= REG_TYPE_MMXWR
7011 && rege
->type
!= REG_TYPE_MMXWC
7012 && rege
->type
!= REG_TYPE_MMXWCG
))
7014 inst
.error
= _("iWMMXt data or control register expected");
7017 inst
.operands
[i
].reg
= rege
->number
;
7018 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
7024 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7026 || (rege
->type
!= REG_TYPE_MMXWC
7027 && rege
->type
!= REG_TYPE_MMXWCG
))
7029 inst
.error
= _("iWMMXt control register expected");
7032 inst
.operands
[i
].reg
= rege
->number
;
7033 inst
.operands
[i
].isreg
= 1;
7038 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
7039 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
7040 case OP_oROR
: val
= parse_ror (&str
); break;
7041 case OP_COND
: val
= parse_cond (&str
); break;
7042 case OP_oBARRIER_I15
:
7043 po_barrier_or_imm (str
); break;
7045 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
7051 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
7052 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
7054 inst
.error
= _("Banked registers are not available with this "
7060 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7064 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7067 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7069 if (strncasecmp (str
, "APSR_", 5) == 0)
7076 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7077 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7078 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7079 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7080 default: found
= 16;
7084 inst
.operands
[i
].isvec
= 1;
7085 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7086 inst
.operands
[i
].reg
= REG_PC
;
7093 po_misc_or_fail (parse_tb (&str
));
7096 /* Register lists. */
7098 val
= parse_reg_list (&str
);
7101 inst
.operands
[i
].writeback
= 1;
7107 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
7111 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
7115 /* Allow Q registers too. */
7116 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7121 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7123 inst
.operands
[i
].issingle
= 1;
7128 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7133 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7134 &inst
.operands
[i
].vectype
);
7137 /* Addressing modes */
7139 po_misc_or_fail (parse_address (&str
, i
));
7143 po_misc_or_fail_no_backtrack (
7144 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7148 po_misc_or_fail_no_backtrack (
7149 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7153 po_misc_or_fail_no_backtrack (
7154 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7158 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7162 po_misc_or_fail_no_backtrack (
7163 parse_shifter_operand_group_reloc (&str
, i
));
7167 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7171 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7175 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7179 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7182 /* Various value-based sanity checks and shared operations. We
7183 do not signal immediate failures for the register constraints;
7184 this allows a syntax error to take precedence. */
7185 switch (op_parse_code
)
7193 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7194 inst
.error
= BAD_PC
;
7199 if (inst
.operands
[i
].isreg
)
7201 if (inst
.operands
[i
].reg
== REG_PC
)
7202 inst
.error
= BAD_PC
;
7203 else if (inst
.operands
[i
].reg
== REG_SP
7204 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7205 relaxed since ARMv8-A. */
7206 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
7209 inst
.error
= BAD_SP
;
7215 if (inst
.operands
[i
].isreg
7216 && inst
.operands
[i
].reg
== REG_PC
7217 && (inst
.operands
[i
].writeback
|| thumb
))
7218 inst
.error
= BAD_PC
;
7227 case OP_oBARRIER_I15
:
7236 inst
.operands
[i
].imm
= val
;
7243 /* If we get here, this operand was successfully parsed. */
7244 inst
.operands
[i
].present
= 1;
7248 inst
.error
= BAD_ARGS
;
7253 /* The parse routine should already have set inst.error, but set a
7254 default here just in case. */
7256 inst
.error
= _("syntax error");
7260 /* Do not backtrack over a trailing optional argument that
7261 absorbed some text. We will only fail again, with the
7262 'garbage following instruction' error message, which is
7263 probably less helpful than the current one. */
7264 if (backtrack_index
== i
&& backtrack_pos
!= str
7265 && upat
[i
+1] == OP_stop
)
7268 inst
.error
= _("syntax error");
7272 /* Try again, skipping the optional argument at backtrack_pos. */
7273 str
= backtrack_pos
;
7274 inst
.error
= backtrack_error
;
7275 inst
.operands
[backtrack_index
].present
= 0;
7276 i
= backtrack_index
;
7280 /* Check that we have parsed all the arguments. */
7281 if (*str
!= '\0' && !inst
.error
)
7282 inst
.error
= _("garbage following instruction");
7284 return inst
.error
? FAIL
: SUCCESS
;
7287 #undef po_char_or_fail
7288 #undef po_reg_or_fail
7289 #undef po_reg_or_goto
7290 #undef po_imm_or_fail
7291 #undef po_scalar_or_fail
7292 #undef po_barrier_or_imm
7294 /* Shorthand macro for instruction encoding functions issuing errors. */
7295 #define constraint(expr, err) \
7306 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7307 instructions are unpredictable if these registers are used. This
7308 is the BadReg predicate in ARM's Thumb-2 documentation.
7310 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7311 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7312 #define reject_bad_reg(reg) \
7314 if (reg == REG_PC) \
7316 inst.error = BAD_PC; \
7319 else if (reg == REG_SP \
7320 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7322 inst.error = BAD_SP; \
7327 /* If REG is R13 (the stack pointer), warn that its use is
7329 #define warn_deprecated_sp(reg) \
7331 if (warn_on_deprecated && reg == REG_SP) \
7332 as_tsktsk (_("use of r13 is deprecated")); \
7335 /* Functions for operand encoding. ARM, then Thumb. */
7337 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7339 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7341 The only binary encoding difference is the Coprocessor number. Coprocessor
7342 9 is used for half-precision calculations or conversions. The format of the
7343 instruction is the same as the equivalent Coprocessor 10 instruction that
7344 exists for Single-Precision operation. */
7347 do_scalar_fp16_v82_encode (void)
7349 if (inst
.cond
!= COND_ALWAYS
)
7350 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7351 " the behaviour is UNPREDICTABLE"));
7352 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
7355 inst
.instruction
= (inst
.instruction
& 0xfffff0ff) | 0x900;
7356 mark_feature_used (&arm_ext_fp16
);
7359 /* If VAL can be encoded in the immediate field of an ARM instruction,
7360 return the encoded form. Otherwise, return FAIL. */
7363 encode_arm_immediate (unsigned int val
)
7370 for (i
= 2; i
< 32; i
+= 2)
7371 if ((a
= rotate_left (val
, i
)) <= 0xff)
7372 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7377 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7378 return the encoded form. Otherwise, return FAIL. */
7380 encode_thumb32_immediate (unsigned int val
)
7387 for (i
= 1; i
<= 24; i
++)
7390 if ((val
& ~(0xff << i
)) == 0)
7391 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7395 if (val
== ((a
<< 16) | a
))
7397 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7401 if (val
== ((a
<< 16) | a
))
7402 return 0x200 | (a
>> 8);
7406 /* Encode a VFP SP or DP register number into inst.instruction. */
7409 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7411 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7414 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7417 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7420 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7425 first_error (_("D register out of range for selected VFP version"));
7433 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7437 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7441 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7445 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7449 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7453 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7461 /* Encode a <shift> in an ARM-format instruction. The immediate,
7462 if any, is handled by md_apply_fix. */
7464 encode_arm_shift (int i
)
7466 /* register-shifted register. */
7467 if (inst
.operands
[i
].immisreg
)
7470 for (op_index
= 0; op_index
<= i
; ++op_index
)
7472 /* Check the operand only when it's presented. In pre-UAL syntax,
7473 if the destination register is the same as the first operand, two
7474 register form of the instruction can be used. */
7475 if (inst
.operands
[op_index
].present
&& inst
.operands
[op_index
].isreg
7476 && inst
.operands
[op_index
].reg
== REG_PC
)
7477 as_warn (UNPRED_REG ("r15"));
7480 if (inst
.operands
[i
].imm
== REG_PC
)
7481 as_warn (UNPRED_REG ("r15"));
7484 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7485 inst
.instruction
|= SHIFT_ROR
<< 5;
7488 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7489 if (inst
.operands
[i
].immisreg
)
7491 inst
.instruction
|= SHIFT_BY_REG
;
7492 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7495 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7500 encode_arm_shifter_operand (int i
)
7502 if (inst
.operands
[i
].isreg
)
7504 inst
.instruction
|= inst
.operands
[i
].reg
;
7505 encode_arm_shift (i
);
7509 inst
.instruction
|= INST_IMMEDIATE
;
7510 if (inst
.reloc
.type
!= BFD_RELOC_ARM_IMMEDIATE
)
7511 inst
.instruction
|= inst
.operands
[i
].imm
;
7515 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7517 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7520 Generate an error if the operand is not a register. */
7521 constraint (!inst
.operands
[i
].isreg
,
7522 _("Instruction does not support =N addresses"));
7524 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7526 if (inst
.operands
[i
].preind
)
7530 inst
.error
= _("instruction does not accept preindexed addressing");
7533 inst
.instruction
|= PRE_INDEX
;
7534 if (inst
.operands
[i
].writeback
)
7535 inst
.instruction
|= WRITE_BACK
;
7538 else if (inst
.operands
[i
].postind
)
7540 gas_assert (inst
.operands
[i
].writeback
);
7542 inst
.instruction
|= WRITE_BACK
;
7544 else /* unindexed - only for coprocessor */
7546 inst
.error
= _("instruction does not accept unindexed addressing");
7550 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7551 && (((inst
.instruction
& 0x000f0000) >> 16)
7552 == ((inst
.instruction
& 0x0000f000) >> 12)))
7553 as_warn ((inst
.instruction
& LOAD_BIT
)
7554 ? _("destination register same as write-back base")
7555 : _("source register same as write-back base"));
7558 /* inst.operands[i] was set up by parse_address. Encode it into an
7559 ARM-format mode 2 load or store instruction. If is_t is true,
7560 reject forms that cannot be used with a T instruction (i.e. not
7563 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7565 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7567 encode_arm_addr_mode_common (i
, is_t
);
7569 if (inst
.operands
[i
].immisreg
)
7571 constraint ((inst
.operands
[i
].imm
== REG_PC
7572 || (is_pc
&& inst
.operands
[i
].writeback
)),
7574 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7575 inst
.instruction
|= inst
.operands
[i
].imm
;
7576 if (!inst
.operands
[i
].negative
)
7577 inst
.instruction
|= INDEX_UP
;
7578 if (inst
.operands
[i
].shifted
)
7580 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7581 inst
.instruction
|= SHIFT_ROR
<< 5;
7584 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7585 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7589 else /* immediate offset in inst.reloc */
7591 if (is_pc
&& !inst
.reloc
.pc_rel
)
7593 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7595 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7596 cannot use PC in addressing.
7597 PC cannot be used in writeback addressing, either. */
7598 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7601 /* Use of PC in str is deprecated for ARMv7. */
7602 if (warn_on_deprecated
7604 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
7605 as_tsktsk (_("use of PC in this instruction is deprecated"));
7608 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7610 /* Prefer + for zero encoded value. */
7611 if (!inst
.operands
[i
].negative
)
7612 inst
.instruction
|= INDEX_UP
;
7613 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
7618 /* inst.operands[i] was set up by parse_address. Encode it into an
7619 ARM-format mode 3 load or store instruction. Reject forms that
7620 cannot be used with such instructions. If is_t is true, reject
7621 forms that cannot be used with a T instruction (i.e. not
7624 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
7626 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
7628 inst
.error
= _("instruction does not accept scaled register index");
7632 encode_arm_addr_mode_common (i
, is_t
);
7634 if (inst
.operands
[i
].immisreg
)
7636 constraint ((inst
.operands
[i
].imm
== REG_PC
7637 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
7639 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
7641 inst
.instruction
|= inst
.operands
[i
].imm
;
7642 if (!inst
.operands
[i
].negative
)
7643 inst
.instruction
|= INDEX_UP
;
7645 else /* immediate offset in inst.reloc */
7647 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.reloc
.pc_rel
7648 && inst
.operands
[i
].writeback
),
7650 inst
.instruction
|= HWOFFSET_IMM
;
7651 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7653 /* Prefer + for zero encoded value. */
7654 if (!inst
.operands
[i
].negative
)
7655 inst
.instruction
|= INDEX_UP
;
7657 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
7662 /* Write immediate bits [7:0] to the following locations:
7664 |28/24|23 19|18 16|15 4|3 0|
7665 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7667 This function is used by VMOV/VMVN/VORR/VBIC. */
7670 neon_write_immbits (unsigned immbits
)
7672 inst
.instruction
|= immbits
& 0xf;
7673 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
7674 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
7677 /* Invert low-order SIZE bits of XHI:XLO. */
7680 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
7682 unsigned immlo
= xlo
? *xlo
: 0;
7683 unsigned immhi
= xhi
? *xhi
: 0;
7688 immlo
= (~immlo
) & 0xff;
7692 immlo
= (~immlo
) & 0xffff;
7696 immhi
= (~immhi
) & 0xffffffff;
7700 immlo
= (~immlo
) & 0xffffffff;
7714 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7718 neon_bits_same_in_bytes (unsigned imm
)
7720 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
7721 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
7722 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
7723 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
7726 /* For immediate of above form, return 0bABCD. */
7729 neon_squash_bits (unsigned imm
)
7731 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
7732 | ((imm
& 0x01000000) >> 21);
7735 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7738 neon_qfloat_bits (unsigned imm
)
7740 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
7743 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7744 the instruction. *OP is passed as the initial value of the op field, and
7745 may be set to a different value depending on the constant (i.e.
7746 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7747 MVN). If the immediate looks like a repeated pattern then also
7748 try smaller element sizes. */
7751 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
7752 unsigned *immbits
, int *op
, int size
,
7753 enum neon_el_type type
)
7755 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7757 if (type
== NT_float
&& !float_p
)
7760 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
7762 if (size
!= 32 || *op
== 1)
7764 *immbits
= neon_qfloat_bits (immlo
);
7770 if (neon_bits_same_in_bytes (immhi
)
7771 && neon_bits_same_in_bytes (immlo
))
7775 *immbits
= (neon_squash_bits (immhi
) << 4)
7776 | neon_squash_bits (immlo
);
7787 if (immlo
== (immlo
& 0x000000ff))
7792 else if (immlo
== (immlo
& 0x0000ff00))
7794 *immbits
= immlo
>> 8;
7797 else if (immlo
== (immlo
& 0x00ff0000))
7799 *immbits
= immlo
>> 16;
7802 else if (immlo
== (immlo
& 0xff000000))
7804 *immbits
= immlo
>> 24;
7807 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
7809 *immbits
= (immlo
>> 8) & 0xff;
7812 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
7814 *immbits
= (immlo
>> 16) & 0xff;
7818 if ((immlo
& 0xffff) != (immlo
>> 16))
7825 if (immlo
== (immlo
& 0x000000ff))
7830 else if (immlo
== (immlo
& 0x0000ff00))
7832 *immbits
= immlo
>> 8;
7836 if ((immlo
& 0xff) != (immlo
>> 8))
7841 if (immlo
== (immlo
& 0x000000ff))
7843 /* Don't allow MVN with 8-bit immediate. */
7853 #if defined BFD_HOST_64_BIT
7854 /* Returns TRUE if double precision value V may be cast
7855 to single precision without loss of accuracy. */
7858 is_double_a_single (bfd_int64_t v
)
7860 int exp
= (int)((v
>> 52) & 0x7FF);
7861 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7863 return (exp
== 0 || exp
== 0x7FF
7864 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
7865 && (mantissa
& 0x1FFFFFFFl
) == 0;
7868 /* Returns a double precision value casted to single precision
7869 (ignoring the least significant bits in exponent and mantissa). */
7872 double_to_single (bfd_int64_t v
)
7874 int sign
= (int) ((v
>> 63) & 1l);
7875 int exp
= (int) ((v
>> 52) & 0x7FF);
7876 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
7882 exp
= exp
- 1023 + 127;
7891 /* No denormalized numbers. */
7897 return (sign
<< 31) | (exp
<< 23) | mantissa
;
7899 #endif /* BFD_HOST_64_BIT */
7908 static void do_vfp_nsyn_opcode (const char *);
7910 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7911 Determine whether it can be performed with a move instruction; if
7912 it can, convert inst.instruction to that move instruction and
7913 return TRUE; if it can't, convert inst.instruction to a literal-pool
7914 load and return FALSE. If this is not a valid thing to do in the
7915 current context, set inst.error and return TRUE.
7917 inst.operands[i] describes the destination register. */
7920 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
7923 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
7924 bfd_boolean arm_p
= (t
== CONST_ARM
);
7927 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
7931 if ((inst
.instruction
& tbit
) == 0)
7933 inst
.error
= _("invalid pseudo operation");
7937 if (inst
.reloc
.exp
.X_op
!= O_constant
7938 && inst
.reloc
.exp
.X_op
!= O_symbol
7939 && inst
.reloc
.exp
.X_op
!= O_big
)
7941 inst
.error
= _("constant expression expected");
7945 if (inst
.reloc
.exp
.X_op
== O_constant
7946 || inst
.reloc
.exp
.X_op
== O_big
)
7948 #if defined BFD_HOST_64_BIT
7953 if (inst
.reloc
.exp
.X_op
== O_big
)
7955 LITTLENUM_TYPE w
[X_PRECISION
];
7958 if (inst
.reloc
.exp
.X_add_number
== -1)
7960 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
7962 /* FIXME: Should we check words w[2..5] ? */
7967 #if defined BFD_HOST_64_BIT
7969 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
7970 << LITTLENUM_NUMBER_OF_BITS
)
7971 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
7972 << LITTLENUM_NUMBER_OF_BITS
)
7973 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
7974 << LITTLENUM_NUMBER_OF_BITS
)
7975 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
7977 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
7978 | (l
[0] & LITTLENUM_MASK
);
7982 v
= inst
.reloc
.exp
.X_add_number
;
7984 if (!inst
.operands
[i
].issingle
)
7988 /* LDR should not use lead in a flag-setting instruction being
7989 chosen so we do not check whether movs can be used. */
7991 if ((ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
7992 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
7993 && inst
.operands
[i
].reg
!= 13
7994 && inst
.operands
[i
].reg
!= 15)
7996 /* Check if on thumb2 it can be done with a mov.w, mvn or
7997 movw instruction. */
7998 unsigned int newimm
;
7999 bfd_boolean isNegated
;
8001 newimm
= encode_thumb32_immediate (v
);
8002 if (newimm
!= (unsigned int) FAIL
)
8006 newimm
= encode_thumb32_immediate (~v
);
8007 if (newimm
!= (unsigned int) FAIL
)
8011 /* The number can be loaded with a mov.w or mvn
8013 if (newimm
!= (unsigned int) FAIL
8014 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
8016 inst
.instruction
= (0xf04f0000 /* MOV.W. */
8017 | (inst
.operands
[i
].reg
<< 8));
8018 /* Change to MOVN. */
8019 inst
.instruction
|= (isNegated
? 0x200000 : 0);
8020 inst
.instruction
|= (newimm
& 0x800) << 15;
8021 inst
.instruction
|= (newimm
& 0x700) << 4;
8022 inst
.instruction
|= (newimm
& 0x0ff);
8025 /* The number can be loaded with a movw instruction. */
8026 else if ((v
& ~0xFFFF) == 0
8027 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8029 int imm
= v
& 0xFFFF;
8031 inst
.instruction
= 0xf2400000; /* MOVW. */
8032 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
8033 inst
.instruction
|= (imm
& 0xf000) << 4;
8034 inst
.instruction
|= (imm
& 0x0800) << 15;
8035 inst
.instruction
|= (imm
& 0x0700) << 4;
8036 inst
.instruction
|= (imm
& 0x00ff);
8043 int value
= encode_arm_immediate (v
);
8047 /* This can be done with a mov instruction. */
8048 inst
.instruction
&= LITERAL_MASK
;
8049 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
8050 inst
.instruction
|= value
& 0xfff;
8054 value
= encode_arm_immediate (~ v
);
8057 /* This can be done with a mvn instruction. */
8058 inst
.instruction
&= LITERAL_MASK
;
8059 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
8060 inst
.instruction
|= value
& 0xfff;
8064 else if (t
== CONST_VEC
&& ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
8067 unsigned immbits
= 0;
8068 unsigned immlo
= inst
.operands
[1].imm
;
8069 unsigned immhi
= inst
.operands
[1].regisimm
8070 ? inst
.operands
[1].reg
8071 : inst
.reloc
.exp
.X_unsigned
8073 : ((bfd_int64_t
)((int) immlo
)) >> 32;
8074 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8075 &op
, 64, NT_invtype
);
8079 neon_invert_size (&immlo
, &immhi
, 64);
8081 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8082 &op
, 64, NT_invtype
);
8087 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
8093 /* Fill other bits in vmov encoding for both thumb and arm. */
8095 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
8097 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
8098 neon_write_immbits (immbits
);
8106 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8107 if (inst
.operands
[i
].issingle
8108 && is_quarter_float (inst
.operands
[1].imm
)
8109 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8111 inst
.operands
[1].imm
=
8112 neon_qfloat_bits (v
);
8113 do_vfp_nsyn_opcode ("fconsts");
8117 /* If our host does not support a 64-bit type then we cannot perform
8118 the following optimization. This mean that there will be a
8119 discrepancy between the output produced by an assembler built for
8120 a 32-bit-only host and the output produced from a 64-bit host, but
8121 this cannot be helped. */
8122 #if defined BFD_HOST_64_BIT
8123 else if (!inst
.operands
[1].issingle
8124 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8126 if (is_double_a_single (v
)
8127 && is_quarter_float (double_to_single (v
)))
8129 inst
.operands
[1].imm
=
8130 neon_qfloat_bits (double_to_single (v
));
8131 do_vfp_nsyn_opcode ("fconstd");
8139 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8140 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8143 inst
.operands
[1].reg
= REG_PC
;
8144 inst
.operands
[1].isreg
= 1;
8145 inst
.operands
[1].preind
= 1;
8146 inst
.reloc
.pc_rel
= 1;
8147 inst
.reloc
.type
= (thumb_p
8148 ? BFD_RELOC_ARM_THUMB_OFFSET
8150 ? BFD_RELOC_ARM_HWLITERAL
8151 : BFD_RELOC_ARM_LITERAL
));
8155 /* inst.operands[i] was set up by parse_address. Encode it into an
8156 ARM-format instruction. Reject all forms which cannot be encoded
8157 into a coprocessor load/store instruction. If wb_ok is false,
8158 reject use of writeback; if unind_ok is false, reject use of
8159 unindexed addressing. If reloc_override is not 0, use it instead
8160 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8161 (in which case it is preserved). */
8164 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8166 if (!inst
.operands
[i
].isreg
)
8169 if (! inst
.operands
[0].isvec
)
8171 inst
.error
= _("invalid co-processor operand");
8174 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8178 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8180 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8182 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8184 gas_assert (!inst
.operands
[i
].writeback
);
8187 inst
.error
= _("instruction does not support unindexed addressing");
8190 inst
.instruction
|= inst
.operands
[i
].imm
;
8191 inst
.instruction
|= INDEX_UP
;
8195 if (inst
.operands
[i
].preind
)
8196 inst
.instruction
|= PRE_INDEX
;
8198 if (inst
.operands
[i
].writeback
)
8200 if (inst
.operands
[i
].reg
== REG_PC
)
8202 inst
.error
= _("pc may not be used with write-back");
8207 inst
.error
= _("instruction does not support writeback");
8210 inst
.instruction
|= WRITE_BACK
;
8214 inst
.reloc
.type
= (bfd_reloc_code_real_type
) reloc_override
;
8215 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8216 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
8217 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8220 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8222 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8225 /* Prefer + for zero encoded value. */
8226 if (!inst
.operands
[i
].negative
)
8227 inst
.instruction
|= INDEX_UP
;
8232 /* Functions for instruction encoding, sorted by sub-architecture.
8233 First some generics; their names are taken from the conventional
8234 bit positions for register arguments in ARM format instructions. */
8244 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8250 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8256 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8257 inst
.instruction
|= inst
.operands
[1].reg
;
8263 inst
.instruction
|= inst
.operands
[0].reg
;
8264 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8270 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8271 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8277 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8278 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8284 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8285 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8289 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8291 if (ARM_CPU_IS_ANY (cpu_variant
))
8293 as_tsktsk ("%s", msg
);
8296 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8308 unsigned Rn
= inst
.operands
[2].reg
;
8309 /* Enforce restrictions on SWP instruction. */
8310 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8312 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8313 _("Rn must not overlap other operands"));
8315 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8317 if (!check_obsolete (&arm_ext_v8
,
8318 _("swp{b} use is obsoleted for ARMv8 and later"))
8319 && warn_on_deprecated
8320 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8321 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8324 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8325 inst
.instruction
|= inst
.operands
[1].reg
;
8326 inst
.instruction
|= Rn
<< 16;
8332 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8333 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8334 inst
.instruction
|= inst
.operands
[2].reg
;
8340 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8341 constraint (((inst
.reloc
.exp
.X_op
!= O_constant
8342 && inst
.reloc
.exp
.X_op
!= O_illegal
)
8343 || inst
.reloc
.exp
.X_add_number
!= 0),
8345 inst
.instruction
|= inst
.operands
[0].reg
;
8346 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8347 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8353 inst
.instruction
|= inst
.operands
[0].imm
;
8359 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8360 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8363 /* ARM instructions, in alphabetical order by function name (except
8364 that wrapper functions appear immediately after the function they
8367 /* This is a pseudo-op of the form "adr rd, label" to be converted
8368 into a relative address of the form "add rd, pc, #label-.-8". */
8373 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8375 /* Frag hacking will turn this into a sub instruction if the offset turns
8376 out to be negative. */
8377 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
8378 inst
.reloc
.pc_rel
= 1;
8379 inst
.reloc
.exp
.X_add_number
-= 8;
8381 if (inst
.reloc
.exp
.X_op
== O_symbol
8382 && inst
.reloc
.exp
.X_add_symbol
!= NULL
8383 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
8384 && THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
8385 inst
.reloc
.exp
.X_add_number
+= 1;
8388 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8389 into a relative address of the form:
8390 add rd, pc, #low(label-.-8)"
8391 add rd, rd, #high(label-.-8)" */
8396 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8398 /* Frag hacking will turn this into a sub instruction if the offset turns
8399 out to be negative. */
8400 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8401 inst
.reloc
.pc_rel
= 1;
8402 inst
.size
= INSN_SIZE
* 2;
8403 inst
.reloc
.exp
.X_add_number
-= 8;
8405 if (inst
.reloc
.exp
.X_op
== O_symbol
8406 && inst
.reloc
.exp
.X_add_symbol
!= NULL
8407 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
8408 && THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
8409 inst
.reloc
.exp
.X_add_number
+= 1;
8415 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8416 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
8418 if (!inst
.operands
[1].present
)
8419 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8420 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8421 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8422 encode_arm_shifter_operand (2);
8428 if (inst
.operands
[0].present
)
8429 inst
.instruction
|= inst
.operands
[0].imm
;
8431 inst
.instruction
|= 0xf;
8437 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8438 constraint (msb
> 32, _("bit-field extends past end of register"));
8439 /* The instruction encoding stores the LSB and MSB,
8440 not the LSB and width. */
8441 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8442 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8443 inst
.instruction
|= (msb
- 1) << 16;
8451 /* #0 in second position is alternative syntax for bfc, which is
8452 the same instruction but with REG_PC in the Rm field. */
8453 if (!inst
.operands
[1].isreg
)
8454 inst
.operands
[1].reg
= REG_PC
;
8456 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8457 constraint (msb
> 32, _("bit-field extends past end of register"));
8458 /* The instruction encoding stores the LSB and MSB,
8459 not the LSB and width. */
8460 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8461 inst
.instruction
|= inst
.operands
[1].reg
;
8462 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8463 inst
.instruction
|= (msb
- 1) << 16;
8469 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8470 _("bit-field extends past end of register"));
8471 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8472 inst
.instruction
|= inst
.operands
[1].reg
;
8473 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8474 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8477 /* ARM V5 breakpoint instruction (argument parse)
8478 BKPT <16 bit unsigned immediate>
8479 Instruction is not conditional.
8480 The bit pattern given in insns[] has the COND_ALWAYS condition,
8481 and it is an error if the caller tried to override that. */
8486 /* Top 12 of 16 bits to bits 19:8. */
8487 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8489 /* Bottom 4 of 16 bits to bits 3:0. */
8490 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8494 encode_branch (int default_reloc
)
8496 if (inst
.operands
[0].hasreloc
)
8498 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8499 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8500 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8501 inst
.reloc
.type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8502 ? BFD_RELOC_ARM_PLT32
8503 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8506 inst
.reloc
.type
= (bfd_reloc_code_real_type
) default_reloc
;
8507 inst
.reloc
.pc_rel
= 1;
8514 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8515 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8518 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8525 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8527 if (inst
.cond
== COND_ALWAYS
)
8528 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8530 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8534 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8537 /* ARM V5 branch-link-exchange instruction (argument parse)
8538 BLX <target_addr> ie BLX(1)
8539 BLX{<condition>} <Rm> ie BLX(2)
8540 Unfortunately, there are two different opcodes for this mnemonic.
8541 So, the insns[].value is not used, and the code here zaps values
8542 into inst.instruction.
8543 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8548 if (inst
.operands
[0].isreg
)
8550 /* Arg is a register; the opcode provided by insns[] is correct.
8551 It is not illegal to do "blx pc", just useless. */
8552 if (inst
.operands
[0].reg
== REG_PC
)
8553 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8555 inst
.instruction
|= inst
.operands
[0].reg
;
8559 /* Arg is an address; this instruction cannot be executed
8560 conditionally, and the opcode must be adjusted.
8561 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8562 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8563 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8564 inst
.instruction
= 0xfa000000;
8565 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8572 bfd_boolean want_reloc
;
8574 if (inst
.operands
[0].reg
== REG_PC
)
8575 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8577 inst
.instruction
|= inst
.operands
[0].reg
;
8578 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8579 it is for ARMv4t or earlier. */
8580 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8581 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
8585 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8590 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
8594 /* ARM v5TEJ. Jump to Jazelle code. */
8599 if (inst
.operands
[0].reg
== REG_PC
)
8600 as_tsktsk (_("use of r15 in bxj is not really useful"));
8602 inst
.instruction
|= inst
.operands
[0].reg
;
8605 /* Co-processor data operation:
8606 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8607 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8611 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8612 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
8613 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8614 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8615 inst
.instruction
|= inst
.operands
[4].reg
;
8616 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8622 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8623 encode_arm_shifter_operand (1);
8626 /* Transfer between coprocessor and ARM registers.
8627 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8632 No special properties. */
8634 struct deprecated_coproc_regs_s
8641 arm_feature_set deprecated
;
8642 arm_feature_set obsoleted
;
8643 const char *dep_msg
;
8644 const char *obs_msg
;
8647 #define DEPR_ACCESS_V8 \
8648 N_("This coprocessor register access is deprecated in ARMv8")
8650 /* Table of all deprecated coprocessor registers. */
8651 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
8653 {15, 0, 7, 10, 5, /* CP15DMB. */
8654 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8655 DEPR_ACCESS_V8
, NULL
},
8656 {15, 0, 7, 10, 4, /* CP15DSB. */
8657 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8658 DEPR_ACCESS_V8
, NULL
},
8659 {15, 0, 7, 5, 4, /* CP15ISB. */
8660 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8661 DEPR_ACCESS_V8
, NULL
},
8662 {14, 6, 1, 0, 0, /* TEEHBR. */
8663 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8664 DEPR_ACCESS_V8
, NULL
},
8665 {14, 6, 0, 0, 0, /* TEECR. */
8666 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8667 DEPR_ACCESS_V8
, NULL
},
8670 #undef DEPR_ACCESS_V8
8672 static const size_t deprecated_coproc_reg_count
=
8673 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
8681 Rd
= inst
.operands
[2].reg
;
8684 if (inst
.instruction
== 0xee000010
8685 || inst
.instruction
== 0xfe000010)
8687 reject_bad_reg (Rd
);
8688 else if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
8690 constraint (Rd
== REG_SP
, BAD_SP
);
8695 if (inst
.instruction
== 0xe000010)
8696 constraint (Rd
== REG_PC
, BAD_PC
);
8699 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
8701 const struct deprecated_coproc_regs_s
*r
=
8702 deprecated_coproc_regs
+ i
;
8704 if (inst
.operands
[0].reg
== r
->cp
8705 && inst
.operands
[1].imm
== r
->opc1
8706 && inst
.operands
[3].reg
== r
->crn
8707 && inst
.operands
[4].reg
== r
->crm
8708 && inst
.operands
[5].imm
== r
->opc2
)
8710 if (! ARM_CPU_IS_ANY (cpu_variant
)
8711 && warn_on_deprecated
8712 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
8713 as_tsktsk ("%s", r
->dep_msg
);
8717 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8718 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
8719 inst
.instruction
|= Rd
<< 12;
8720 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8721 inst
.instruction
|= inst
.operands
[4].reg
;
8722 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8725 /* Transfer between coprocessor register and pair of ARM registers.
8726 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8731 Two XScale instructions are special cases of these:
8733 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8734 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8736 Result unpredictable if Rd or Rn is R15. */
8743 Rd
= inst
.operands
[2].reg
;
8744 Rn
= inst
.operands
[3].reg
;
8748 reject_bad_reg (Rd
);
8749 reject_bad_reg (Rn
);
8753 constraint (Rd
== REG_PC
, BAD_PC
);
8754 constraint (Rn
== REG_PC
, BAD_PC
);
8757 /* Only check the MRRC{2} variants. */
8758 if ((inst
.instruction
& 0x0FF00000) == 0x0C500000)
8760 /* If Rd == Rn, error that the operation is
8761 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8762 constraint (Rd
== Rn
, BAD_OVERLAP
);
8765 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8766 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
8767 inst
.instruction
|= Rd
<< 12;
8768 inst
.instruction
|= Rn
<< 16;
8769 inst
.instruction
|= inst
.operands
[4].reg
;
8775 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
8776 if (inst
.operands
[1].present
)
8778 inst
.instruction
|= CPSI_MMOD
;
8779 inst
.instruction
|= inst
.operands
[1].imm
;
8786 inst
.instruction
|= inst
.operands
[0].imm
;
8792 unsigned Rd
, Rn
, Rm
;
8794 Rd
= inst
.operands
[0].reg
;
8795 Rn
= (inst
.operands
[1].present
8796 ? inst
.operands
[1].reg
: Rd
);
8797 Rm
= inst
.operands
[2].reg
;
8799 constraint ((Rd
== REG_PC
), BAD_PC
);
8800 constraint ((Rn
== REG_PC
), BAD_PC
);
8801 constraint ((Rm
== REG_PC
), BAD_PC
);
8803 inst
.instruction
|= Rd
<< 16;
8804 inst
.instruction
|= Rn
<< 0;
8805 inst
.instruction
|= Rm
<< 8;
8811 /* There is no IT instruction in ARM mode. We
8812 process it to do the validation as if in
8813 thumb mode, just in case the code gets
8814 assembled for thumb using the unified syntax. */
8819 set_it_insn_type (IT_INSN
);
8820 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
8821 now_it
.cc
= inst
.operands
[0].imm
;
8825 /* If there is only one register in the register list,
8826 then return its register number. Otherwise return -1. */
8828 only_one_reg_in_list (int range
)
8830 int i
= ffs (range
) - 1;
8831 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
8835 encode_ldmstm(int from_push_pop_mnem
)
8837 int base_reg
= inst
.operands
[0].reg
;
8838 int range
= inst
.operands
[1].imm
;
8841 inst
.instruction
|= base_reg
<< 16;
8842 inst
.instruction
|= range
;
8844 if (inst
.operands
[1].writeback
)
8845 inst
.instruction
|= LDM_TYPE_2_OR_3
;
8847 if (inst
.operands
[0].writeback
)
8849 inst
.instruction
|= WRITE_BACK
;
8850 /* Check for unpredictable uses of writeback. */
8851 if (inst
.instruction
& LOAD_BIT
)
8853 /* Not allowed in LDM type 2. */
8854 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
8855 && ((range
& (1 << REG_PC
)) == 0))
8856 as_warn (_("writeback of base register is UNPREDICTABLE"));
8857 /* Only allowed if base reg not in list for other types. */
8858 else if (range
& (1 << base_reg
))
8859 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8863 /* Not allowed for type 2. */
8864 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
8865 as_warn (_("writeback of base register is UNPREDICTABLE"));
8866 /* Only allowed if base reg not in list, or first in list. */
8867 else if ((range
& (1 << base_reg
))
8868 && (range
& ((1 << base_reg
) - 1)))
8869 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8873 /* If PUSH/POP has only one register, then use the A2 encoding. */
8874 one_reg
= only_one_reg_in_list (range
);
8875 if (from_push_pop_mnem
&& one_reg
>= 0)
8877 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
8879 inst
.instruction
&= A_COND_MASK
;
8880 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
8881 inst
.instruction
|= one_reg
<< 12;
8888 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
8891 /* ARMv5TE load-consecutive (argument parse)
8900 constraint (inst
.operands
[0].reg
% 2 != 0,
8901 _("first transfer register must be even"));
8902 constraint (inst
.operands
[1].present
8903 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8904 _("can only transfer two consecutive registers"));
8905 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8906 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
8908 if (!inst
.operands
[1].present
)
8909 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
8911 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8912 register and the first register written; we have to diagnose
8913 overlap between the base and the second register written here. */
8915 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
8916 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
8917 as_warn (_("base register written back, and overlaps "
8918 "second transfer register"));
8920 if (!(inst
.instruction
& V4_STR_BIT
))
8922 /* For an index-register load, the index register must not overlap the
8923 destination (even if not write-back). */
8924 if (inst
.operands
[2].immisreg
8925 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
8926 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
8927 as_warn (_("index register overlaps transfer register"));
8929 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8930 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
8936 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
8937 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
8938 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
8939 || inst
.operands
[1].negative
8940 /* This can arise if the programmer has written
8942 or if they have mistakenly used a register name as the last
8945 It is very difficult to distinguish between these two cases
8946 because "rX" might actually be a label. ie the register
8947 name has been occluded by a symbol of the same name. So we
8948 just generate a general 'bad addressing mode' type error
8949 message and leave it up to the programmer to discover the
8950 true cause and fix their mistake. */
8951 || (inst
.operands
[1].reg
== REG_PC
),
8954 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8955 || inst
.reloc
.exp
.X_add_number
!= 0,
8956 _("offset must be zero in ARM encoding"));
8958 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
8960 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8961 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8962 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8968 constraint (inst
.operands
[0].reg
% 2 != 0,
8969 _("even register required"));
8970 constraint (inst
.operands
[1].present
8971 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
8972 _("can only load two consecutive registers"));
8973 /* If op 1 were present and equal to PC, this function wouldn't
8974 have been called in the first place. */
8975 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
8977 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8978 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8981 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8982 which is not a multiple of four is UNPREDICTABLE. */
8984 check_ldr_r15_aligned (void)
8986 constraint (!(inst
.operands
[1].immisreg
)
8987 && (inst
.operands
[0].reg
== REG_PC
8988 && inst
.operands
[1].reg
== REG_PC
8989 && (inst
.reloc
.exp
.X_add_number
& 0x3)),
8990 _("ldr to register 15 must be 4-byte aligned"));
8996 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8997 if (!inst
.operands
[1].isreg
)
8998 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
9000 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
9001 check_ldr_r15_aligned ();
9007 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9009 if (inst
.operands
[1].preind
)
9011 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9012 || inst
.reloc
.exp
.X_add_number
!= 0,
9013 _("this instruction requires a post-indexed address"));
9015 inst
.operands
[1].preind
= 0;
9016 inst
.operands
[1].postind
= 1;
9017 inst
.operands
[1].writeback
= 1;
9019 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9020 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
9023 /* Halfword and signed-byte load/store operations. */
9028 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9029 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9030 if (!inst
.operands
[1].isreg
)
9031 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
9033 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
9039 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9041 if (inst
.operands
[1].preind
)
9043 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9044 || inst
.reloc
.exp
.X_add_number
!= 0,
9045 _("this instruction requires a post-indexed address"));
9047 inst
.operands
[1].preind
= 0;
9048 inst
.operands
[1].postind
= 1;
9049 inst
.operands
[1].writeback
= 1;
9051 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9052 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
9055 /* Co-processor register load/store.
9056 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9060 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9061 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9062 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9068 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9069 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9070 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
9071 && !(inst
.instruction
& 0x00400000))
9072 as_tsktsk (_("Rd and Rm should be different in mla"));
9074 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9075 inst
.instruction
|= inst
.operands
[1].reg
;
9076 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9077 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9083 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9084 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
9086 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9087 encode_arm_shifter_operand (1);
9090 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9097 top
= (inst
.instruction
& 0x00400000) != 0;
9098 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
9099 _(":lower16: not allowed in this instruction"));
9100 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
9101 _(":upper16: not allowed in this instruction"));
9102 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9103 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
9105 imm
= inst
.reloc
.exp
.X_add_number
;
9106 /* The value is in two pieces: 0:11, 16:19. */
9107 inst
.instruction
|= (imm
& 0x00000fff);
9108 inst
.instruction
|= (imm
& 0x0000f000) << 4;
9113 do_vfp_nsyn_mrs (void)
9115 if (inst
.operands
[0].isvec
)
9117 if (inst
.operands
[1].reg
!= 1)
9118 first_error (_("operand 1 must be FPSCR"));
9119 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
9120 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
9121 do_vfp_nsyn_opcode ("fmstat");
9123 else if (inst
.operands
[1].isvec
)
9124 do_vfp_nsyn_opcode ("fmrx");
9132 do_vfp_nsyn_msr (void)
9134 if (inst
.operands
[0].isvec
)
9135 do_vfp_nsyn_opcode ("fmxr");
9145 unsigned Rt
= inst
.operands
[0].reg
;
9147 if (thumb_mode
&& Rt
== REG_SP
)
9149 inst
.error
= BAD_SP
;
9153 /* MVFR2 is only valid at ARMv8-A. */
9154 if (inst
.operands
[1].reg
== 5)
9155 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9158 /* APSR_ sets isvec. All other refs to PC are illegal. */
9159 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9161 inst
.error
= BAD_PC
;
9165 /* If we get through parsing the register name, we just insert the number
9166 generated into the instruction without further validation. */
9167 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9168 inst
.instruction
|= (Rt
<< 12);
9174 unsigned Rt
= inst
.operands
[1].reg
;
9177 reject_bad_reg (Rt
);
9178 else if (Rt
== REG_PC
)
9180 inst
.error
= BAD_PC
;
9184 /* MVFR2 is only valid for ARMv8-A. */
9185 if (inst
.operands
[0].reg
== 5)
9186 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9189 /* If we get through parsing the register name, we just insert the number
9190 generated into the instruction without further validation. */
9191 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9192 inst
.instruction
|= (Rt
<< 12);
9200 if (do_vfp_nsyn_mrs () == SUCCESS
)
9203 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9204 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9206 if (inst
.operands
[1].isreg
)
9208 br
= inst
.operands
[1].reg
;
9209 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf000))
9210 as_bad (_("bad register for mrs"));
9214 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9215 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9217 _("'APSR', 'CPSR' or 'SPSR' expected"));
9218 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9221 inst
.instruction
|= br
;
9224 /* Two possible forms:
9225 "{C|S}PSR_<field>, Rm",
9226 "{C|S}PSR_f, #expression". */
9231 if (do_vfp_nsyn_msr () == SUCCESS
)
9234 inst
.instruction
|= inst
.operands
[0].imm
;
9235 if (inst
.operands
[1].isreg
)
9236 inst
.instruction
|= inst
.operands
[1].reg
;
9239 inst
.instruction
|= INST_IMMEDIATE
;
9240 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
9241 inst
.reloc
.pc_rel
= 0;
9248 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9250 if (!inst
.operands
[2].present
)
9251 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9252 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9253 inst
.instruction
|= inst
.operands
[1].reg
;
9254 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9256 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9257 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9258 as_tsktsk (_("Rd and Rm should be different in mul"));
9261 /* Long Multiply Parser
9262 UMULL RdLo, RdHi, Rm, Rs
9263 SMULL RdLo, RdHi, Rm, Rs
9264 UMLAL RdLo, RdHi, Rm, Rs
9265 SMLAL RdLo, RdHi, Rm, Rs. */
9270 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9271 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9272 inst
.instruction
|= inst
.operands
[2].reg
;
9273 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9275 /* rdhi and rdlo must be different. */
9276 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9277 as_tsktsk (_("rdhi and rdlo must be different"));
9279 /* rdhi, rdlo and rm must all be different before armv6. */
9280 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9281 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9282 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9283 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9289 if (inst
.operands
[0].present
9290 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9292 /* Architectural NOP hints are CPSR sets with no bits selected. */
9293 inst
.instruction
&= 0xf0000000;
9294 inst
.instruction
|= 0x0320f000;
9295 if (inst
.operands
[0].present
)
9296 inst
.instruction
|= inst
.operands
[0].imm
;
9300 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9301 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9302 Condition defaults to COND_ALWAYS.
9303 Error if Rd, Rn or Rm are R15. */
9308 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9309 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9310 inst
.instruction
|= inst
.operands
[2].reg
;
9311 if (inst
.operands
[3].present
)
9312 encode_arm_shift (3);
9315 /* ARM V6 PKHTB (Argument Parse). */
9320 if (!inst
.operands
[3].present
)
9322 /* If the shift specifier is omitted, turn the instruction
9323 into pkhbt rd, rm, rn. */
9324 inst
.instruction
&= 0xfff00010;
9325 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9326 inst
.instruction
|= inst
.operands
[1].reg
;
9327 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9331 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9332 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9333 inst
.instruction
|= inst
.operands
[2].reg
;
9334 encode_arm_shift (3);
9338 /* ARMv5TE: Preload-Cache
9339 MP Extensions: Preload for write
9343 Syntactically, like LDR with B=1, W=0, L=1. */
9348 constraint (!inst
.operands
[0].isreg
,
9349 _("'[' expected after PLD mnemonic"));
9350 constraint (inst
.operands
[0].postind
,
9351 _("post-indexed expression used in preload instruction"));
9352 constraint (inst
.operands
[0].writeback
,
9353 _("writeback used in preload instruction"));
9354 constraint (!inst
.operands
[0].preind
,
9355 _("unindexed addressing used in preload instruction"));
9356 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9359 /* ARMv7: PLI <addr_mode> */
9363 constraint (!inst
.operands
[0].isreg
,
9364 _("'[' expected after PLI mnemonic"));
9365 constraint (inst
.operands
[0].postind
,
9366 _("post-indexed expression used in preload instruction"));
9367 constraint (inst
.operands
[0].writeback
,
9368 _("writeback used in preload instruction"));
9369 constraint (!inst
.operands
[0].preind
,
9370 _("unindexed addressing used in preload instruction"));
9371 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9372 inst
.instruction
&= ~PRE_INDEX
;
9378 constraint (inst
.operands
[0].writeback
,
9379 _("push/pop do not support {reglist}^"));
9380 inst
.operands
[1] = inst
.operands
[0];
9381 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9382 inst
.operands
[0].isreg
= 1;
9383 inst
.operands
[0].writeback
= 1;
9384 inst
.operands
[0].reg
= REG_SP
;
9385 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9388 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9389 word at the specified address and the following word
9391 Unconditionally executed.
9392 Error if Rn is R15. */
9397 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9398 if (inst
.operands
[0].writeback
)
9399 inst
.instruction
|= WRITE_BACK
;
9402 /* ARM V6 ssat (argument parse). */
9407 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9408 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9409 inst
.instruction
|= inst
.operands
[2].reg
;
9411 if (inst
.operands
[3].present
)
9412 encode_arm_shift (3);
9415 /* ARM V6 usat (argument parse). */
9420 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9421 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9422 inst
.instruction
|= inst
.operands
[2].reg
;
9424 if (inst
.operands
[3].present
)
9425 encode_arm_shift (3);
9428 /* ARM V6 ssat16 (argument parse). */
9433 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9434 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9435 inst
.instruction
|= inst
.operands
[2].reg
;
9441 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9442 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9443 inst
.instruction
|= inst
.operands
[2].reg
;
9446 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9447 preserving the other bits.
9449 setend <endian_specifier>, where <endian_specifier> is either
9455 if (warn_on_deprecated
9456 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9457 as_tsktsk (_("setend use is deprecated for ARMv8"));
9459 if (inst
.operands
[0].imm
)
9460 inst
.instruction
|= 0x200;
9466 unsigned int Rm
= (inst
.operands
[1].present
9467 ? inst
.operands
[1].reg
9468 : inst
.operands
[0].reg
);
9470 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9471 inst
.instruction
|= Rm
;
9472 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9474 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9475 inst
.instruction
|= SHIFT_BY_REG
;
9476 /* PR 12854: Error on extraneous shifts. */
9477 constraint (inst
.operands
[2].shifted
,
9478 _("extraneous shift as part of operand to shift insn"));
9481 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
9487 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
9488 inst
.reloc
.pc_rel
= 0;
9494 inst
.reloc
.type
= BFD_RELOC_ARM_HVC
;
9495 inst
.reloc
.pc_rel
= 0;
9501 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
9502 inst
.reloc
.pc_rel
= 0;
9508 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9509 _("selected processor does not support SETPAN instruction"));
9511 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9517 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9518 _("selected processor does not support SETPAN instruction"));
9520 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9523 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9524 SMLAxy{cond} Rd,Rm,Rs,Rn
9525 SMLAWy{cond} Rd,Rm,Rs,Rn
9526 Error if any register is R15. */
9531 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9532 inst
.instruction
|= inst
.operands
[1].reg
;
9533 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9534 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9537 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9538 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9539 Error if any register is R15.
9540 Warning if Rdlo == Rdhi. */
9545 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9546 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9547 inst
.instruction
|= inst
.operands
[2].reg
;
9548 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9550 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9551 as_tsktsk (_("rdhi and rdlo must be different"));
9554 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9555 SMULxy{cond} Rd,Rm,Rs
9556 Error if any register is R15. */
9561 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9562 inst
.instruction
|= inst
.operands
[1].reg
;
9563 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9566 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9567 the same for both ARM and Thumb-2. */
9574 if (inst
.operands
[0].present
)
9576 reg
= inst
.operands
[0].reg
;
9577 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9582 inst
.instruction
|= reg
<< 16;
9583 inst
.instruction
|= inst
.operands
[1].imm
;
9584 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9585 inst
.instruction
|= WRITE_BACK
;
9588 /* ARM V6 strex (argument parse). */
9593 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9594 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9595 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9596 || inst
.operands
[2].negative
9597 /* See comment in do_ldrex(). */
9598 || (inst
.operands
[2].reg
== REG_PC
),
9601 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9602 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9604 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9605 || inst
.reloc
.exp
.X_add_number
!= 0,
9606 _("offset must be zero in ARM encoding"));
9608 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9609 inst
.instruction
|= inst
.operands
[1].reg
;
9610 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9611 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9617 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9618 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9619 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9620 || inst
.operands
[2].negative
,
9623 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9624 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9632 constraint (inst
.operands
[1].reg
% 2 != 0,
9633 _("even register required"));
9634 constraint (inst
.operands
[2].present
9635 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
9636 _("can only store two consecutive registers"));
9637 /* If op 2 were present and equal to PC, this function wouldn't
9638 have been called in the first place. */
9639 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
9641 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9642 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
9643 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
9646 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9647 inst
.instruction
|= inst
.operands
[1].reg
;
9648 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9655 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9656 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9664 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9665 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9670 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9671 extends it to 32-bits, and adds the result to a value in another
9672 register. You can specify a rotation by 0, 8, 16, or 24 bits
9673 before extracting the 16-bit value.
9674 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9675 Condition defaults to COND_ALWAYS.
9676 Error if any register uses R15. */
9681 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9682 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9683 inst
.instruction
|= inst
.operands
[2].reg
;
9684 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
9689 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9690 Condition defaults to COND_ALWAYS.
9691 Error if any register uses R15. */
9696 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9697 inst
.instruction
|= inst
.operands
[1].reg
;
9698 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
9701 /* VFP instructions. In a logical order: SP variant first, monad
9702 before dyad, arithmetic then move then load/store. */
9705 do_vfp_sp_monadic (void)
9707 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9708 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9712 do_vfp_sp_dyadic (void)
9714 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9715 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9716 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9720 do_vfp_sp_compare_z (void)
9722 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9726 do_vfp_dp_sp_cvt (void)
9728 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9729 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9733 do_vfp_sp_dp_cvt (void)
9735 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9736 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9740 do_vfp_reg_from_sp (void)
9742 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9743 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9747 do_vfp_reg2_from_sp2 (void)
9749 constraint (inst
.operands
[2].imm
!= 2,
9750 _("only two consecutive VFP SP registers allowed here"));
9751 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9752 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9753 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9757 do_vfp_sp_from_reg (void)
9759 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
9760 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9764 do_vfp_sp2_from_reg2 (void)
9766 constraint (inst
.operands
[0].imm
!= 2,
9767 _("only two consecutive VFP SP registers allowed here"));
9768 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
9769 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9770 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9774 do_vfp_sp_ldst (void)
9776 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9777 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9781 do_vfp_dp_ldst (void)
9783 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9784 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
9789 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
9791 if (inst
.operands
[0].writeback
)
9792 inst
.instruction
|= WRITE_BACK
;
9794 constraint (ldstm_type
!= VFP_LDSTMIA
,
9795 _("this addressing mode requires base-register writeback"));
9796 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9797 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
9798 inst
.instruction
|= inst
.operands
[1].imm
;
9802 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
9806 if (inst
.operands
[0].writeback
)
9807 inst
.instruction
|= WRITE_BACK
;
9809 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
9810 _("this addressing mode requires base-register writeback"));
9812 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9813 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9815 count
= inst
.operands
[1].imm
<< 1;
9816 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
9819 inst
.instruction
|= count
;
9823 do_vfp_sp_ldstmia (void)
9825 vfp_sp_ldstm (VFP_LDSTMIA
);
9829 do_vfp_sp_ldstmdb (void)
9831 vfp_sp_ldstm (VFP_LDSTMDB
);
9835 do_vfp_dp_ldstmia (void)
9837 vfp_dp_ldstm (VFP_LDSTMIA
);
9841 do_vfp_dp_ldstmdb (void)
9843 vfp_dp_ldstm (VFP_LDSTMDB
);
9847 do_vfp_xp_ldstmia (void)
9849 vfp_dp_ldstm (VFP_LDSTMIAX
);
9853 do_vfp_xp_ldstmdb (void)
9855 vfp_dp_ldstm (VFP_LDSTMDBX
);
9859 do_vfp_dp_rd_rm (void)
9861 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9862 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9866 do_vfp_dp_rn_rd (void)
9868 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
9869 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9873 do_vfp_dp_rd_rn (void)
9875 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9876 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9880 do_vfp_dp_rd_rn_rm (void)
9882 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9883 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
9884 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
9890 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9894 do_vfp_dp_rm_rd_rn (void)
9896 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
9897 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
9898 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
9901 /* VFPv3 instructions. */
9903 do_vfp_sp_const (void)
9905 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9906 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9907 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9911 do_vfp_dp_const (void)
9913 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9914 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
9915 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
9919 vfp_conv (int srcsize
)
9921 int immbits
= srcsize
- inst
.operands
[1].imm
;
9923 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
9925 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9926 i.e. immbits must be in range 0 - 16. */
9927 inst
.error
= _("immediate value out of range, expected range [0, 16]");
9930 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
9932 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9933 i.e. immbits must be in range 0 - 31. */
9934 inst
.error
= _("immediate value out of range, expected range [1, 32]");
9938 inst
.instruction
|= (immbits
& 1) << 5;
9939 inst
.instruction
|= (immbits
>> 1);
9943 do_vfp_sp_conv_16 (void)
9945 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9950 do_vfp_dp_conv_16 (void)
9952 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9957 do_vfp_sp_conv_32 (void)
9959 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9964 do_vfp_dp_conv_32 (void)
9966 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9970 /* FPA instructions. Also in a logical order. */
9975 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9976 inst
.instruction
|= inst
.operands
[1].reg
;
9980 do_fpa_ldmstm (void)
9982 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9983 switch (inst
.operands
[1].imm
)
9985 case 1: inst
.instruction
|= CP_T_X
; break;
9986 case 2: inst
.instruction
|= CP_T_Y
; break;
9987 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
9992 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
9994 /* The instruction specified "ea" or "fd", so we can only accept
9995 [Rn]{!}. The instruction does not really support stacking or
9996 unstacking, so we have to emulate these by setting appropriate
9997 bits and offsets. */
9998 constraint (inst
.reloc
.exp
.X_op
!= O_constant
9999 || inst
.reloc
.exp
.X_add_number
!= 0,
10000 _("this instruction does not support indexing"));
10002 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
10003 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
10005 if (!(inst
.instruction
& INDEX_UP
))
10006 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
10008 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
10010 inst
.operands
[2].preind
= 0;
10011 inst
.operands
[2].postind
= 1;
10015 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
10018 /* iWMMXt instructions: strictly in alphabetical order. */
10021 do_iwmmxt_tandorc (void)
10023 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
10027 do_iwmmxt_textrc (void)
10029 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10030 inst
.instruction
|= inst
.operands
[1].imm
;
10034 do_iwmmxt_textrm (void)
10036 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10037 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10038 inst
.instruction
|= inst
.operands
[2].imm
;
10042 do_iwmmxt_tinsr (void)
10044 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10045 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10046 inst
.instruction
|= inst
.operands
[2].imm
;
10050 do_iwmmxt_tmia (void)
10052 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10053 inst
.instruction
|= inst
.operands
[1].reg
;
10054 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10058 do_iwmmxt_waligni (void)
10060 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10061 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10062 inst
.instruction
|= inst
.operands
[2].reg
;
10063 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
10067 do_iwmmxt_wmerge (void)
10069 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10070 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10071 inst
.instruction
|= inst
.operands
[2].reg
;
10072 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
10076 do_iwmmxt_wmov (void)
10078 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10079 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10080 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10081 inst
.instruction
|= inst
.operands
[1].reg
;
10085 do_iwmmxt_wldstbh (void)
10088 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10090 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
10092 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
10093 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
10097 do_iwmmxt_wldstw (void)
10099 /* RIWR_RIWC clears .isreg for a control register. */
10100 if (!inst
.operands
[0].isreg
)
10102 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
10103 inst
.instruction
|= 0xf0000000;
10106 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10107 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
10111 do_iwmmxt_wldstd (void)
10113 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10114 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
10115 && inst
.operands
[1].immisreg
)
10117 inst
.instruction
&= ~0x1a000ff;
10118 inst
.instruction
|= (0xfU
<< 28);
10119 if (inst
.operands
[1].preind
)
10120 inst
.instruction
|= PRE_INDEX
;
10121 if (!inst
.operands
[1].negative
)
10122 inst
.instruction
|= INDEX_UP
;
10123 if (inst
.operands
[1].writeback
)
10124 inst
.instruction
|= WRITE_BACK
;
10125 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10126 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
10127 inst
.instruction
|= inst
.operands
[1].imm
;
10130 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
10134 do_iwmmxt_wshufh (void)
10136 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10137 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10138 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
10139 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
10143 do_iwmmxt_wzero (void)
10145 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10146 inst
.instruction
|= inst
.operands
[0].reg
;
10147 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10148 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10152 do_iwmmxt_wrwrwr_or_imm5 (void)
10154 if (inst
.operands
[2].isreg
)
10157 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10158 _("immediate operand requires iWMMXt2"));
10160 if (inst
.operands
[2].imm
== 0)
10162 switch ((inst
.instruction
>> 20) & 0xf)
10168 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10169 inst
.operands
[2].imm
= 16;
10170 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10176 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10177 inst
.operands
[2].imm
= 32;
10178 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10185 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10187 wrn
= (inst
.instruction
>> 16) & 0xf;
10188 inst
.instruction
&= 0xff0fff0f;
10189 inst
.instruction
|= wrn
;
10190 /* Bail out here; the instruction is now assembled. */
10195 /* Map 32 -> 0, etc. */
10196 inst
.operands
[2].imm
&= 0x1f;
10197 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10201 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10202 operations first, then control, shift, and load/store. */
10204 /* Insns like "foo X,Y,Z". */
10207 do_mav_triple (void)
10209 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10210 inst
.instruction
|= inst
.operands
[1].reg
;
10211 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10214 /* Insns like "foo W,X,Y,Z".
10215 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10220 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10221 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10222 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10223 inst
.instruction
|= inst
.operands
[3].reg
;
10226 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10228 do_mav_dspsc (void)
10230 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10233 /* Maverick shift immediate instructions.
10234 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10235 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10238 do_mav_shift (void)
10240 int imm
= inst
.operands
[2].imm
;
10242 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10243 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10245 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10246 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10247 Bit 4 should be 0. */
10248 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10250 inst
.instruction
|= imm
;
10253 /* XScale instructions. Also sorted arithmetic before move. */
10255 /* Xscale multiply-accumulate (argument parse)
10258 MIAxycc acc0,Rm,Rs. */
10263 inst
.instruction
|= inst
.operands
[1].reg
;
10264 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10267 /* Xscale move-accumulator-register (argument parse)
10269 MARcc acc0,RdLo,RdHi. */
10274 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10275 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10278 /* Xscale move-register-accumulator (argument parse)
10280 MRAcc RdLo,RdHi,acc0. */
10285 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10286 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10287 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10290 /* Encoding functions relevant only to Thumb. */
10292 /* inst.operands[i] is a shifted-register operand; encode
10293 it into inst.instruction in the format used by Thumb32. */
10296 encode_thumb32_shifted_operand (int i
)
10298 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10299 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10301 constraint (inst
.operands
[i
].immisreg
,
10302 _("shift by register not allowed in thumb mode"));
10303 inst
.instruction
|= inst
.operands
[i
].reg
;
10304 if (shift
== SHIFT_RRX
)
10305 inst
.instruction
|= SHIFT_ROR
<< 4;
10308 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10309 _("expression too complex"));
10311 constraint (value
> 32
10312 || (value
== 32 && (shift
== SHIFT_LSL
10313 || shift
== SHIFT_ROR
)),
10314 _("shift expression is too large"));
10318 else if (value
== 32)
10321 inst
.instruction
|= shift
<< 4;
10322 inst
.instruction
|= (value
& 0x1c) << 10;
10323 inst
.instruction
|= (value
& 0x03) << 6;
10328 /* inst.operands[i] was set up by parse_address. Encode it into a
10329 Thumb32 format load or store instruction. Reject forms that cannot
10330 be used with such instructions. If is_t is true, reject forms that
10331 cannot be used with a T instruction; if is_d is true, reject forms
10332 that cannot be used with a D instruction. If it is a store insn,
10333 reject PC in Rn. */
10336 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10338 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10340 constraint (!inst
.operands
[i
].isreg
,
10341 _("Instruction does not support =N addresses"));
10343 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10344 if (inst
.operands
[i
].immisreg
)
10346 constraint (is_pc
, BAD_PC_ADDRESSING
);
10347 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10348 constraint (inst
.operands
[i
].negative
,
10349 _("Thumb does not support negative register indexing"));
10350 constraint (inst
.operands
[i
].postind
,
10351 _("Thumb does not support register post-indexing"));
10352 constraint (inst
.operands
[i
].writeback
,
10353 _("Thumb does not support register indexing with writeback"));
10354 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10355 _("Thumb supports only LSL in shifted register indexing"));
10357 inst
.instruction
|= inst
.operands
[i
].imm
;
10358 if (inst
.operands
[i
].shifted
)
10360 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10361 _("expression too complex"));
10362 constraint (inst
.reloc
.exp
.X_add_number
< 0
10363 || inst
.reloc
.exp
.X_add_number
> 3,
10364 _("shift out of range"));
10365 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
10367 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10369 else if (inst
.operands
[i
].preind
)
10371 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10372 constraint (is_t
&& inst
.operands
[i
].writeback
,
10373 _("cannot use writeback with this instruction"));
10374 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10375 BAD_PC_ADDRESSING
);
10379 inst
.instruction
|= 0x01000000;
10380 if (inst
.operands
[i
].writeback
)
10381 inst
.instruction
|= 0x00200000;
10385 inst
.instruction
|= 0x00000c00;
10386 if (inst
.operands
[i
].writeback
)
10387 inst
.instruction
|= 0x00000100;
10389 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10391 else if (inst
.operands
[i
].postind
)
10393 gas_assert (inst
.operands
[i
].writeback
);
10394 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10395 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10398 inst
.instruction
|= 0x00200000;
10400 inst
.instruction
|= 0x00000900;
10401 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10403 else /* unindexed - only for coprocessor */
10404 inst
.error
= _("instruction does not accept unindexed addressing");
10407 /* Table of Thumb instructions which exist in both 16- and 32-bit
10408 encodings (the latter only in post-V6T2 cores). The index is the
10409 value used in the insns table below. When there is more than one
10410 possible 16-bit encoding for the instruction, this table always
10412 Also contains several pseudo-instructions used during relaxation. */
10413 #define T16_32_TAB \
10414 X(_adc, 4140, eb400000), \
10415 X(_adcs, 4140, eb500000), \
10416 X(_add, 1c00, eb000000), \
10417 X(_adds, 1c00, eb100000), \
10418 X(_addi, 0000, f1000000), \
10419 X(_addis, 0000, f1100000), \
10420 X(_add_pc,000f, f20f0000), \
10421 X(_add_sp,000d, f10d0000), \
10422 X(_adr, 000f, f20f0000), \
10423 X(_and, 4000, ea000000), \
10424 X(_ands, 4000, ea100000), \
10425 X(_asr, 1000, fa40f000), \
10426 X(_asrs, 1000, fa50f000), \
10427 X(_b, e000, f000b000), \
10428 X(_bcond, d000, f0008000), \
10429 X(_bic, 4380, ea200000), \
10430 X(_bics, 4380, ea300000), \
10431 X(_cmn, 42c0, eb100f00), \
10432 X(_cmp, 2800, ebb00f00), \
10433 X(_cpsie, b660, f3af8400), \
10434 X(_cpsid, b670, f3af8600), \
10435 X(_cpy, 4600, ea4f0000), \
10436 X(_dec_sp,80dd, f1ad0d00), \
10437 X(_eor, 4040, ea800000), \
10438 X(_eors, 4040, ea900000), \
10439 X(_inc_sp,00dd, f10d0d00), \
10440 X(_ldmia, c800, e8900000), \
10441 X(_ldr, 6800, f8500000), \
10442 X(_ldrb, 7800, f8100000), \
10443 X(_ldrh, 8800, f8300000), \
10444 X(_ldrsb, 5600, f9100000), \
10445 X(_ldrsh, 5e00, f9300000), \
10446 X(_ldr_pc,4800, f85f0000), \
10447 X(_ldr_pc2,4800, f85f0000), \
10448 X(_ldr_sp,9800, f85d0000), \
10449 X(_lsl, 0000, fa00f000), \
10450 X(_lsls, 0000, fa10f000), \
10451 X(_lsr, 0800, fa20f000), \
10452 X(_lsrs, 0800, fa30f000), \
10453 X(_mov, 2000, ea4f0000), \
10454 X(_movs, 2000, ea5f0000), \
10455 X(_mul, 4340, fb00f000), \
10456 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10457 X(_mvn, 43c0, ea6f0000), \
10458 X(_mvns, 43c0, ea7f0000), \
10459 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10460 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10461 X(_orr, 4300, ea400000), \
10462 X(_orrs, 4300, ea500000), \
10463 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10464 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10465 X(_rev, ba00, fa90f080), \
10466 X(_rev16, ba40, fa90f090), \
10467 X(_revsh, bac0, fa90f0b0), \
10468 X(_ror, 41c0, fa60f000), \
10469 X(_rors, 41c0, fa70f000), \
10470 X(_sbc, 4180, eb600000), \
10471 X(_sbcs, 4180, eb700000), \
10472 X(_stmia, c000, e8800000), \
10473 X(_str, 6000, f8400000), \
10474 X(_strb, 7000, f8000000), \
10475 X(_strh, 8000, f8200000), \
10476 X(_str_sp,9000, f84d0000), \
10477 X(_sub, 1e00, eba00000), \
10478 X(_subs, 1e00, ebb00000), \
10479 X(_subi, 8000, f1a00000), \
10480 X(_subis, 8000, f1b00000), \
10481 X(_sxtb, b240, fa4ff080), \
10482 X(_sxth, b200, fa0ff080), \
10483 X(_tst, 4200, ea100f00), \
10484 X(_uxtb, b2c0, fa5ff080), \
10485 X(_uxth, b280, fa1ff080), \
10486 X(_nop, bf00, f3af8000), \
10487 X(_yield, bf10, f3af8001), \
10488 X(_wfe, bf20, f3af8002), \
10489 X(_wfi, bf30, f3af8003), \
10490 X(_sev, bf40, f3af8004), \
10491 X(_sevl, bf50, f3af8005), \
10492 X(_udf, de00, f7f0a000)
10494 /* To catch errors in encoding functions, the codes are all offset by
10495 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10496 as 16-bit instructions. */
10497 #define X(a,b,c) T_MNEM##a
10498 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10501 #define X(a,b,c) 0x##b
10502 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10503 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10506 #define X(a,b,c) 0x##c
10507 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10508 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10509 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10513 /* Thumb instruction encoders, in alphabetical order. */
10515 /* ADDW or SUBW. */
10518 do_t_add_sub_w (void)
10522 Rd
= inst
.operands
[0].reg
;
10523 Rn
= inst
.operands
[1].reg
;
10525 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10526 is the SP-{plus,minus}-immediate form of the instruction. */
10528 constraint (Rd
== REG_PC
, BAD_PC
);
10530 reject_bad_reg (Rd
);
10532 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10533 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10536 /* Parse an add or subtract instruction. We get here with inst.instruction
10537 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10540 do_t_add_sub (void)
10544 Rd
= inst
.operands
[0].reg
;
10545 Rs
= (inst
.operands
[1].present
10546 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10547 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10550 set_it_insn_type_last ();
10552 if (unified_syntax
)
10555 bfd_boolean narrow
;
10558 flags
= (inst
.instruction
== T_MNEM_adds
10559 || inst
.instruction
== T_MNEM_subs
);
10561 narrow
= !in_it_block ();
10563 narrow
= in_it_block ();
10564 if (!inst
.operands
[2].isreg
)
10568 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
10569 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10571 add
= (inst
.instruction
== T_MNEM_add
10572 || inst
.instruction
== T_MNEM_adds
);
10574 if (inst
.size_req
!= 4)
10576 /* Attempt to use a narrow opcode, with relaxation if
10578 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10579 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10580 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10581 opcode
= T_MNEM_add_sp
;
10582 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10583 opcode
= T_MNEM_add_pc
;
10584 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
10587 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
10589 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
10593 inst
.instruction
= THUMB_OP16(opcode
);
10594 inst
.instruction
|= (Rd
<< 4) | Rs
;
10595 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10596 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
10598 if (inst
.size_req
== 2)
10599 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10601 inst
.relax
= opcode
;
10605 constraint (inst
.size_req
== 2, BAD_HIREG
);
10607 if (inst
.size_req
== 4
10608 || (inst
.size_req
!= 2 && !opcode
))
10610 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10611 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
10612 THUMB1_RELOC_ONLY
);
10615 constraint (add
, BAD_PC
);
10616 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
10617 _("only SUBS PC, LR, #const allowed"));
10618 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10619 _("expression too complex"));
10620 constraint (inst
.reloc
.exp
.X_add_number
< 0
10621 || inst
.reloc
.exp
.X_add_number
> 0xff,
10622 _("immediate value out of range"));
10623 inst
.instruction
= T2_SUBS_PC_LR
10624 | inst
.reloc
.exp
.X_add_number
;
10625 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10628 else if (Rs
== REG_PC
)
10630 /* Always use addw/subw. */
10631 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
10632 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
10636 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10637 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
10640 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10642 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
10644 inst
.instruction
|= Rd
<< 8;
10645 inst
.instruction
|= Rs
<< 16;
10650 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10651 unsigned int shift
= inst
.operands
[2].shift_kind
;
10653 Rn
= inst
.operands
[2].reg
;
10654 /* See if we can do this with a 16-bit instruction. */
10655 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
10657 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10662 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
10663 || inst
.instruction
== T_MNEM_add
)
10665 : T_OPCODE_SUB_R3
);
10666 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10670 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
10672 /* Thumb-1 cores (except v6-M) require at least one high
10673 register in a narrow non flag setting add. */
10674 if (Rd
> 7 || Rn
> 7
10675 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
10676 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
10683 inst
.instruction
= T_OPCODE_ADD_HI
;
10684 inst
.instruction
|= (Rd
& 8) << 4;
10685 inst
.instruction
|= (Rd
& 7);
10686 inst
.instruction
|= Rn
<< 3;
10692 constraint (Rd
== REG_PC
, BAD_PC
);
10693 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
10694 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10695 constraint (Rs
== REG_PC
, BAD_PC
);
10696 reject_bad_reg (Rn
);
10698 /* If we get here, it can't be done in 16 bits. */
10699 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
10700 _("shift must be constant"));
10701 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10702 inst
.instruction
|= Rd
<< 8;
10703 inst
.instruction
|= Rs
<< 16;
10704 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
10705 _("shift value over 3 not allowed in thumb mode"));
10706 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
10707 _("only LSL shift allowed in thumb mode"));
10708 encode_thumb32_shifted_operand (2);
10713 constraint (inst
.instruction
== T_MNEM_adds
10714 || inst
.instruction
== T_MNEM_subs
,
10717 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
10719 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
10720 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
10723 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10724 ? 0x0000 : 0x8000);
10725 inst
.instruction
|= (Rd
<< 4) | Rs
;
10726 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10730 Rn
= inst
.operands
[2].reg
;
10731 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
10733 /* We now have Rd, Rs, and Rn set to registers. */
10734 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10736 /* Can't do this for SUB. */
10737 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
10738 inst
.instruction
= T_OPCODE_ADD_HI
;
10739 inst
.instruction
|= (Rd
& 8) << 4;
10740 inst
.instruction
|= (Rd
& 7);
10742 inst
.instruction
|= Rn
<< 3;
10744 inst
.instruction
|= Rs
<< 3;
10746 constraint (1, _("dest must overlap one source register"));
10750 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10751 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
10752 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10762 Rd
= inst
.operands
[0].reg
;
10763 reject_bad_reg (Rd
);
10765 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
10767 /* Defer to section relaxation. */
10768 inst
.relax
= inst
.instruction
;
10769 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10770 inst
.instruction
|= Rd
<< 4;
10772 else if (unified_syntax
&& inst
.size_req
!= 2)
10774 /* Generate a 32-bit opcode. */
10775 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10776 inst
.instruction
|= Rd
<< 8;
10777 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
10778 inst
.reloc
.pc_rel
= 1;
10782 /* Generate a 16-bit opcode. */
10783 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10784 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
10785 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
10786 inst
.reloc
.pc_rel
= 1;
10787 inst
.instruction
|= Rd
<< 4;
10790 if (inst
.reloc
.exp
.X_op
== O_symbol
10791 && inst
.reloc
.exp
.X_add_symbol
!= NULL
10792 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
10793 && THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
10794 inst
.reloc
.exp
.X_add_number
+= 1;
10797 /* Arithmetic instructions for which there is just one 16-bit
10798 instruction encoding, and it allows only two low registers.
10799 For maximal compatibility with ARM syntax, we allow three register
10800 operands even when Thumb-32 instructions are not available, as long
10801 as the first two are identical. For instance, both "sbc r0,r1" and
10802 "sbc r0,r0,r1" are allowed. */
10808 Rd
= inst
.operands
[0].reg
;
10809 Rs
= (inst
.operands
[1].present
10810 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10811 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10812 Rn
= inst
.operands
[2].reg
;
10814 reject_bad_reg (Rd
);
10815 reject_bad_reg (Rs
);
10816 if (inst
.operands
[2].isreg
)
10817 reject_bad_reg (Rn
);
10819 if (unified_syntax
)
10821 if (!inst
.operands
[2].isreg
)
10823 /* For an immediate, we always generate a 32-bit opcode;
10824 section relaxation will shrink it later if possible. */
10825 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10826 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10827 inst
.instruction
|= Rd
<< 8;
10828 inst
.instruction
|= Rs
<< 16;
10829 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10833 bfd_boolean narrow
;
10835 /* See if we can do this with a 16-bit instruction. */
10836 if (THUMB_SETS_FLAGS (inst
.instruction
))
10837 narrow
= !in_it_block ();
10839 narrow
= in_it_block ();
10841 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10843 if (inst
.operands
[2].shifted
)
10845 if (inst
.size_req
== 4)
10851 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10852 inst
.instruction
|= Rd
;
10853 inst
.instruction
|= Rn
<< 3;
10857 /* If we get here, it can't be done in 16 bits. */
10858 constraint (inst
.operands
[2].shifted
10859 && inst
.operands
[2].immisreg
,
10860 _("shift must be constant"));
10861 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10862 inst
.instruction
|= Rd
<< 8;
10863 inst
.instruction
|= Rs
<< 16;
10864 encode_thumb32_shifted_operand (2);
10869 /* On its face this is a lie - the instruction does set the
10870 flags. However, the only supported mnemonic in this mode
10871 says it doesn't. */
10872 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10874 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10875 _("unshifted register required"));
10876 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10877 constraint (Rd
!= Rs
,
10878 _("dest and source1 must be the same register"));
10880 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10881 inst
.instruction
|= Rd
;
10882 inst
.instruction
|= Rn
<< 3;
10886 /* Similarly, but for instructions where the arithmetic operation is
10887 commutative, so we can allow either of them to be different from
10888 the destination operand in a 16-bit instruction. For instance, all
10889 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10896 Rd
= inst
.operands
[0].reg
;
10897 Rs
= (inst
.operands
[1].present
10898 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10899 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10900 Rn
= inst
.operands
[2].reg
;
10902 reject_bad_reg (Rd
);
10903 reject_bad_reg (Rs
);
10904 if (inst
.operands
[2].isreg
)
10905 reject_bad_reg (Rn
);
10907 if (unified_syntax
)
10909 if (!inst
.operands
[2].isreg
)
10911 /* For an immediate, we always generate a 32-bit opcode;
10912 section relaxation will shrink it later if possible. */
10913 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10914 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10915 inst
.instruction
|= Rd
<< 8;
10916 inst
.instruction
|= Rs
<< 16;
10917 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10921 bfd_boolean narrow
;
10923 /* See if we can do this with a 16-bit instruction. */
10924 if (THUMB_SETS_FLAGS (inst
.instruction
))
10925 narrow
= !in_it_block ();
10927 narrow
= in_it_block ();
10929 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
10931 if (inst
.operands
[2].shifted
)
10933 if (inst
.size_req
== 4)
10940 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10941 inst
.instruction
|= Rd
;
10942 inst
.instruction
|= Rn
<< 3;
10947 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10948 inst
.instruction
|= Rd
;
10949 inst
.instruction
|= Rs
<< 3;
10954 /* If we get here, it can't be done in 16 bits. */
10955 constraint (inst
.operands
[2].shifted
10956 && inst
.operands
[2].immisreg
,
10957 _("shift must be constant"));
10958 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10959 inst
.instruction
|= Rd
<< 8;
10960 inst
.instruction
|= Rs
<< 16;
10961 encode_thumb32_shifted_operand (2);
10966 /* On its face this is a lie - the instruction does set the
10967 flags. However, the only supported mnemonic in this mode
10968 says it doesn't. */
10969 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10971 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
10972 _("unshifted register required"));
10973 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
10975 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10976 inst
.instruction
|= Rd
;
10979 inst
.instruction
|= Rn
<< 3;
10981 inst
.instruction
|= Rs
<< 3;
10983 constraint (1, _("dest must overlap one source register"));
10991 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
10992 constraint (msb
> 32, _("bit-field extends past end of register"));
10993 /* The instruction encoding stores the LSB and MSB,
10994 not the LSB and width. */
10995 Rd
= inst
.operands
[0].reg
;
10996 reject_bad_reg (Rd
);
10997 inst
.instruction
|= Rd
<< 8;
10998 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
10999 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
11000 inst
.instruction
|= msb
- 1;
11009 Rd
= inst
.operands
[0].reg
;
11010 reject_bad_reg (Rd
);
11012 /* #0 in second position is alternative syntax for bfc, which is
11013 the same instruction but with REG_PC in the Rm field. */
11014 if (!inst
.operands
[1].isreg
)
11018 Rn
= inst
.operands
[1].reg
;
11019 reject_bad_reg (Rn
);
11022 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
11023 constraint (msb
> 32, _("bit-field extends past end of register"));
11024 /* The instruction encoding stores the LSB and MSB,
11025 not the LSB and width. */
11026 inst
.instruction
|= Rd
<< 8;
11027 inst
.instruction
|= Rn
<< 16;
11028 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11029 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11030 inst
.instruction
|= msb
- 1;
11038 Rd
= inst
.operands
[0].reg
;
11039 Rn
= inst
.operands
[1].reg
;
11041 reject_bad_reg (Rd
);
11042 reject_bad_reg (Rn
);
11044 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
11045 _("bit-field extends past end of register"));
11046 inst
.instruction
|= Rd
<< 8;
11047 inst
.instruction
|= Rn
<< 16;
11048 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11049 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11050 inst
.instruction
|= inst
.operands
[3].imm
- 1;
11053 /* ARM V5 Thumb BLX (argument parse)
11054 BLX <target_addr> which is BLX(1)
11055 BLX <Rm> which is BLX(2)
11056 Unfortunately, there are two different opcodes for this mnemonic.
11057 So, the insns[].value is not used, and the code here zaps values
11058 into inst.instruction.
11060 ??? How to take advantage of the additional two bits of displacement
11061 available in Thumb32 mode? Need new relocation? */
11066 set_it_insn_type_last ();
11068 if (inst
.operands
[0].isreg
)
11070 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
11071 /* We have a register, so this is BLX(2). */
11072 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11076 /* No register. This must be BLX(1). */
11077 inst
.instruction
= 0xf000e800;
11078 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
11087 bfd_reloc_code_real_type reloc
;
11090 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
11092 if (in_it_block ())
11094 /* Conditional branches inside IT blocks are encoded as unconditional
11096 cond
= COND_ALWAYS
;
11101 if (cond
!= COND_ALWAYS
)
11102 opcode
= T_MNEM_bcond
;
11104 opcode
= inst
.instruction
;
11107 && (inst
.size_req
== 4
11108 || (inst
.size_req
!= 2
11109 && (inst
.operands
[0].hasreloc
11110 || inst
.reloc
.exp
.X_op
== O_constant
))))
11112 inst
.instruction
= THUMB_OP32(opcode
);
11113 if (cond
== COND_ALWAYS
)
11114 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
11117 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
11118 _("selected architecture does not support "
11119 "wide conditional branch instruction"));
11121 gas_assert (cond
!= 0xF);
11122 inst
.instruction
|= cond
<< 22;
11123 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
11128 inst
.instruction
= THUMB_OP16(opcode
);
11129 if (cond
== COND_ALWAYS
)
11130 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
11133 inst
.instruction
|= cond
<< 8;
11134 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
11136 /* Allow section relaxation. */
11137 if (unified_syntax
&& inst
.size_req
!= 2)
11138 inst
.relax
= opcode
;
11140 inst
.reloc
.type
= reloc
;
11141 inst
.reloc
.pc_rel
= 1;
11144 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11145 between the two is the maximum immediate allowed - which is passed in
11148 do_t_bkpt_hlt1 (int range
)
11150 constraint (inst
.cond
!= COND_ALWAYS
,
11151 _("instruction is always unconditional"));
11152 if (inst
.operands
[0].present
)
11154 constraint (inst
.operands
[0].imm
> range
,
11155 _("immediate value out of range"));
11156 inst
.instruction
|= inst
.operands
[0].imm
;
11159 set_it_insn_type (NEUTRAL_IT_INSN
);
11165 do_t_bkpt_hlt1 (63);
11171 do_t_bkpt_hlt1 (255);
11175 do_t_branch23 (void)
11177 set_it_insn_type_last ();
11178 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11180 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11181 this file. We used to simply ignore the PLT reloc type here --
11182 the branch encoding is now needed to deal with TLSCALL relocs.
11183 So if we see a PLT reloc now, put it back to how it used to be to
11184 keep the preexisting behaviour. */
11185 if (inst
.reloc
.type
== BFD_RELOC_ARM_PLT32
)
11186 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11188 #if defined(OBJ_COFF)
11189 /* If the destination of the branch is a defined symbol which does not have
11190 the THUMB_FUNC attribute, then we must be calling a function which has
11191 the (interfacearm) attribute. We look for the Thumb entry point to that
11192 function and change the branch to refer to that function instead. */
11193 if ( inst
.reloc
.exp
.X_op
== O_symbol
11194 && inst
.reloc
.exp
.X_add_symbol
!= NULL
11195 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
11196 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
11197 inst
.reloc
.exp
.X_add_symbol
=
11198 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
11205 set_it_insn_type_last ();
11206 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11207 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11208 should cause the alignment to be checked once it is known. This is
11209 because BX PC only works if the instruction is word aligned. */
11217 set_it_insn_type_last ();
11218 Rm
= inst
.operands
[0].reg
;
11219 reject_bad_reg (Rm
);
11220 inst
.instruction
|= Rm
<< 16;
11229 Rd
= inst
.operands
[0].reg
;
11230 Rm
= inst
.operands
[1].reg
;
11232 reject_bad_reg (Rd
);
11233 reject_bad_reg (Rm
);
11235 inst
.instruction
|= Rd
<< 8;
11236 inst
.instruction
|= Rm
<< 16;
11237 inst
.instruction
|= Rm
;
11243 set_it_insn_type (OUTSIDE_IT_INSN
);
11244 inst
.instruction
|= inst
.operands
[0].imm
;
11250 set_it_insn_type (OUTSIDE_IT_INSN
);
11252 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11253 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11255 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11256 inst
.instruction
= 0xf3af8000;
11257 inst
.instruction
|= imod
<< 9;
11258 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11259 if (inst
.operands
[1].present
)
11260 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11264 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11265 && (inst
.operands
[0].imm
& 4),
11266 _("selected processor does not support 'A' form "
11267 "of this instruction"));
11268 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11269 _("Thumb does not support the 2-argument "
11270 "form of this instruction"));
11271 inst
.instruction
|= inst
.operands
[0].imm
;
11275 /* THUMB CPY instruction (argument parse). */
11280 if (inst
.size_req
== 4)
11282 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11283 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11284 inst
.instruction
|= inst
.operands
[1].reg
;
11288 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11289 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11290 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11297 set_it_insn_type (OUTSIDE_IT_INSN
);
11298 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11299 inst
.instruction
|= inst
.operands
[0].reg
;
11300 inst
.reloc
.pc_rel
= 1;
11301 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11307 inst
.instruction
|= inst
.operands
[0].imm
;
11313 unsigned Rd
, Rn
, Rm
;
11315 Rd
= inst
.operands
[0].reg
;
11316 Rn
= (inst
.operands
[1].present
11317 ? inst
.operands
[1].reg
: Rd
);
11318 Rm
= inst
.operands
[2].reg
;
11320 reject_bad_reg (Rd
);
11321 reject_bad_reg (Rn
);
11322 reject_bad_reg (Rm
);
11324 inst
.instruction
|= Rd
<< 8;
11325 inst
.instruction
|= Rn
<< 16;
11326 inst
.instruction
|= Rm
;
11332 if (unified_syntax
&& inst
.size_req
== 4)
11333 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11335 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11341 unsigned int cond
= inst
.operands
[0].imm
;
11343 set_it_insn_type (IT_INSN
);
11344 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
11346 now_it
.warn_deprecated
= FALSE
;
11348 /* If the condition is a negative condition, invert the mask. */
11349 if ((cond
& 0x1) == 0x0)
11351 unsigned int mask
= inst
.instruction
& 0x000f;
11353 if ((mask
& 0x7) == 0)
11355 /* No conversion needed. */
11356 now_it
.block_length
= 1;
11358 else if ((mask
& 0x3) == 0)
11361 now_it
.block_length
= 2;
11363 else if ((mask
& 0x1) == 0)
11366 now_it
.block_length
= 3;
11371 now_it
.block_length
= 4;
11374 inst
.instruction
&= 0xfff0;
11375 inst
.instruction
|= mask
;
11378 inst
.instruction
|= cond
<< 4;
11381 /* Helper function used for both push/pop and ldm/stm. */
11383 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
11387 load
= (inst
.instruction
& (1 << 20)) != 0;
11389 if (mask
& (1 << 13))
11390 inst
.error
= _("SP not allowed in register list");
11392 if ((mask
& (1 << base
)) != 0
11394 inst
.error
= _("having the base register in the register list when "
11395 "using write back is UNPREDICTABLE");
11399 if (mask
& (1 << 15))
11401 if (mask
& (1 << 14))
11402 inst
.error
= _("LR and PC should not both be in register list");
11404 set_it_insn_type_last ();
11409 if (mask
& (1 << 15))
11410 inst
.error
= _("PC not allowed in register list");
11413 if ((mask
& (mask
- 1)) == 0)
11415 /* Single register transfers implemented as str/ldr. */
11418 if (inst
.instruction
& (1 << 23))
11419 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11421 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11425 if (inst
.instruction
& (1 << 23))
11426 inst
.instruction
= 0x00800000; /* ia -> [base] */
11428 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11431 inst
.instruction
|= 0xf8400000;
11433 inst
.instruction
|= 0x00100000;
11435 mask
= ffs (mask
) - 1;
11438 else if (writeback
)
11439 inst
.instruction
|= WRITE_BACK
;
11441 inst
.instruction
|= mask
;
11442 inst
.instruction
|= base
<< 16;
11448 /* This really doesn't seem worth it. */
11449 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
11450 _("expression too complex"));
11451 constraint (inst
.operands
[1].writeback
,
11452 _("Thumb load/store multiple does not support {reglist}^"));
11454 if (unified_syntax
)
11456 bfd_boolean narrow
;
11460 /* See if we can use a 16-bit instruction. */
11461 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11462 && inst
.size_req
!= 4
11463 && !(inst
.operands
[1].imm
& ~0xff))
11465 mask
= 1 << inst
.operands
[0].reg
;
11467 if (inst
.operands
[0].reg
<= 7)
11469 if (inst
.instruction
== T_MNEM_stmia
11470 ? inst
.operands
[0].writeback
11471 : (inst
.operands
[0].writeback
11472 == !(inst
.operands
[1].imm
& mask
)))
11474 if (inst
.instruction
== T_MNEM_stmia
11475 && (inst
.operands
[1].imm
& mask
)
11476 && (inst
.operands
[1].imm
& (mask
- 1)))
11477 as_warn (_("value stored for r%d is UNKNOWN"),
11478 inst
.operands
[0].reg
);
11480 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11481 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11482 inst
.instruction
|= inst
.operands
[1].imm
;
11485 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11487 /* This means 1 register in reg list one of 3 situations:
11488 1. Instruction is stmia, but without writeback.
11489 2. lmdia without writeback, but with Rn not in
11491 3. ldmia with writeback, but with Rn in reglist.
11492 Case 3 is UNPREDICTABLE behaviour, so we handle
11493 case 1 and 2 which can be converted into a 16-bit
11494 str or ldr. The SP cases are handled below. */
11495 unsigned long opcode
;
11496 /* First, record an error for Case 3. */
11497 if (inst
.operands
[1].imm
& mask
11498 && inst
.operands
[0].writeback
)
11500 _("having the base register in the register list when "
11501 "using write back is UNPREDICTABLE");
11503 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11505 inst
.instruction
= THUMB_OP16 (opcode
);
11506 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11507 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11511 else if (inst
.operands
[0] .reg
== REG_SP
)
11513 if (inst
.operands
[0].writeback
)
11516 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11517 ? T_MNEM_push
: T_MNEM_pop
);
11518 inst
.instruction
|= inst
.operands
[1].imm
;
11521 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11524 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11525 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11526 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11534 if (inst
.instruction
< 0xffff)
11535 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11537 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
11538 inst
.operands
[0].writeback
);
11543 constraint (inst
.operands
[0].reg
> 7
11544 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11545 constraint (inst
.instruction
!= T_MNEM_ldmia
11546 && inst
.instruction
!= T_MNEM_stmia
,
11547 _("Thumb-2 instruction only valid in unified syntax"));
11548 if (inst
.instruction
== T_MNEM_stmia
)
11550 if (!inst
.operands
[0].writeback
)
11551 as_warn (_("this instruction will write back the base register"));
11552 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11553 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11554 as_warn (_("value stored for r%d is UNKNOWN"),
11555 inst
.operands
[0].reg
);
11559 if (!inst
.operands
[0].writeback
11560 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11561 as_warn (_("this instruction will write back the base register"));
11562 else if (inst
.operands
[0].writeback
11563 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11564 as_warn (_("this instruction will not write back the base register"));
11567 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11568 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11569 inst
.instruction
|= inst
.operands
[1].imm
;
11576 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11577 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11578 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
11579 || inst
.operands
[1].negative
,
11582 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
11584 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11585 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11586 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11592 if (!inst
.operands
[1].present
)
11594 constraint (inst
.operands
[0].reg
== REG_LR
,
11595 _("r14 not allowed as first register "
11596 "when second register is omitted"));
11597 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11599 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11602 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11603 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11604 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11610 unsigned long opcode
;
11613 if (inst
.operands
[0].isreg
11614 && !inst
.operands
[0].preind
11615 && inst
.operands
[0].reg
== REG_PC
)
11616 set_it_insn_type_last ();
11618 opcode
= inst
.instruction
;
11619 if (unified_syntax
)
11621 if (!inst
.operands
[1].isreg
)
11623 if (opcode
<= 0xffff)
11624 inst
.instruction
= THUMB_OP32 (opcode
);
11625 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11628 if (inst
.operands
[1].isreg
11629 && !inst
.operands
[1].writeback
11630 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
11631 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
11632 && opcode
<= 0xffff
11633 && inst
.size_req
!= 4)
11635 /* Insn may have a 16-bit form. */
11636 Rn
= inst
.operands
[1].reg
;
11637 if (inst
.operands
[1].immisreg
)
11639 inst
.instruction
= THUMB_OP16 (opcode
);
11641 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
11643 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
11644 reject_bad_reg (inst
.operands
[1].imm
);
11646 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
11647 && opcode
!= T_MNEM_ldrsb
)
11648 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
11649 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
11656 if (inst
.reloc
.pc_rel
)
11657 opcode
= T_MNEM_ldr_pc2
;
11659 opcode
= T_MNEM_ldr_pc
;
11663 if (opcode
== T_MNEM_ldr
)
11664 opcode
= T_MNEM_ldr_sp
;
11666 opcode
= T_MNEM_str_sp
;
11668 inst
.instruction
= inst
.operands
[0].reg
<< 8;
11672 inst
.instruction
= inst
.operands
[0].reg
;
11673 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11675 inst
.instruction
|= THUMB_OP16 (opcode
);
11676 if (inst
.size_req
== 2)
11677 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11679 inst
.relax
= opcode
;
11683 /* Definitely a 32-bit variant. */
11685 /* Warning for Erratum 752419. */
11686 if (opcode
== T_MNEM_ldr
11687 && inst
.operands
[0].reg
== REG_SP
11688 && inst
.operands
[1].writeback
== 1
11689 && !inst
.operands
[1].immisreg
)
11691 if (no_cpu_selected ()
11692 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
11693 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
11694 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
11695 as_warn (_("This instruction may be unpredictable "
11696 "if executed on M-profile cores "
11697 "with interrupts enabled."));
11700 /* Do some validations regarding addressing modes. */
11701 if (inst
.operands
[1].immisreg
)
11702 reject_bad_reg (inst
.operands
[1].imm
);
11704 constraint (inst
.operands
[1].writeback
== 1
11705 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11708 inst
.instruction
= THUMB_OP32 (opcode
);
11709 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11710 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
11711 check_ldr_r15_aligned ();
11715 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11717 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
11719 /* Only [Rn,Rm] is acceptable. */
11720 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
11721 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
11722 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
11723 || inst
.operands
[1].negative
,
11724 _("Thumb does not support this addressing mode"));
11725 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11729 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11730 if (!inst
.operands
[1].isreg
)
11731 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11734 constraint (!inst
.operands
[1].preind
11735 || inst
.operands
[1].shifted
11736 || inst
.operands
[1].writeback
,
11737 _("Thumb does not support this addressing mode"));
11738 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
11740 constraint (inst
.instruction
& 0x0600,
11741 _("byte or halfword not valid for base register"));
11742 constraint (inst
.operands
[1].reg
== REG_PC
11743 && !(inst
.instruction
& THUMB_LOAD_BIT
),
11744 _("r15 based store not allowed"));
11745 constraint (inst
.operands
[1].immisreg
,
11746 _("invalid base register for register offset"));
11748 if (inst
.operands
[1].reg
== REG_PC
)
11749 inst
.instruction
= T_OPCODE_LDR_PC
;
11750 else if (inst
.instruction
& THUMB_LOAD_BIT
)
11751 inst
.instruction
= T_OPCODE_LDR_SP
;
11753 inst
.instruction
= T_OPCODE_STR_SP
;
11755 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11756 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11760 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
11761 if (!inst
.operands
[1].immisreg
)
11763 /* Immediate offset. */
11764 inst
.instruction
|= inst
.operands
[0].reg
;
11765 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11766 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11770 /* Register offset. */
11771 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
11772 constraint (inst
.operands
[1].negative
,
11773 _("Thumb does not support this addressing mode"));
11776 switch (inst
.instruction
)
11778 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
11779 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
11780 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
11781 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
11782 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
11783 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
11784 case 0x5600 /* ldrsb */:
11785 case 0x5e00 /* ldrsh */: break;
11789 inst
.instruction
|= inst
.operands
[0].reg
;
11790 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11791 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
11797 if (!inst
.operands
[1].present
)
11799 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11800 constraint (inst
.operands
[0].reg
== REG_LR
,
11801 _("r14 not allowed here"));
11802 constraint (inst
.operands
[0].reg
== REG_R12
,
11803 _("r12 not allowed here"));
11806 if (inst
.operands
[2].writeback
11807 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
11808 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
11809 as_warn (_("base register written back, and overlaps "
11810 "one of transfer registers"));
11812 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11813 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11814 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
11820 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11821 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
11827 unsigned Rd
, Rn
, Rm
, Ra
;
11829 Rd
= inst
.operands
[0].reg
;
11830 Rn
= inst
.operands
[1].reg
;
11831 Rm
= inst
.operands
[2].reg
;
11832 Ra
= inst
.operands
[3].reg
;
11834 reject_bad_reg (Rd
);
11835 reject_bad_reg (Rn
);
11836 reject_bad_reg (Rm
);
11837 reject_bad_reg (Ra
);
11839 inst
.instruction
|= Rd
<< 8;
11840 inst
.instruction
|= Rn
<< 16;
11841 inst
.instruction
|= Rm
;
11842 inst
.instruction
|= Ra
<< 12;
11848 unsigned RdLo
, RdHi
, Rn
, Rm
;
11850 RdLo
= inst
.operands
[0].reg
;
11851 RdHi
= inst
.operands
[1].reg
;
11852 Rn
= inst
.operands
[2].reg
;
11853 Rm
= inst
.operands
[3].reg
;
11855 reject_bad_reg (RdLo
);
11856 reject_bad_reg (RdHi
);
11857 reject_bad_reg (Rn
);
11858 reject_bad_reg (Rm
);
11860 inst
.instruction
|= RdLo
<< 12;
11861 inst
.instruction
|= RdHi
<< 8;
11862 inst
.instruction
|= Rn
<< 16;
11863 inst
.instruction
|= Rm
;
11867 do_t_mov_cmp (void)
11871 Rn
= inst
.operands
[0].reg
;
11872 Rm
= inst
.operands
[1].reg
;
11875 set_it_insn_type_last ();
11877 if (unified_syntax
)
11879 int r0off
= (inst
.instruction
== T_MNEM_mov
11880 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
11881 unsigned long opcode
;
11882 bfd_boolean narrow
;
11883 bfd_boolean low_regs
;
11885 low_regs
= (Rn
<= 7 && Rm
<= 7);
11886 opcode
= inst
.instruction
;
11887 if (in_it_block ())
11888 narrow
= opcode
!= T_MNEM_movs
;
11890 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
11891 if (inst
.size_req
== 4
11892 || inst
.operands
[1].shifted
)
11895 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11896 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
11897 && !inst
.operands
[1].shifted
11901 inst
.instruction
= T2_SUBS_PC_LR
;
11905 if (opcode
== T_MNEM_cmp
)
11907 constraint (Rn
== REG_PC
, BAD_PC
);
11910 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11912 warn_deprecated_sp (Rm
);
11913 /* R15 was documented as a valid choice for Rm in ARMv6,
11914 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11915 tools reject R15, so we do too. */
11916 constraint (Rm
== REG_PC
, BAD_PC
);
11919 reject_bad_reg (Rm
);
11921 else if (opcode
== T_MNEM_mov
11922 || opcode
== T_MNEM_movs
)
11924 if (inst
.operands
[1].isreg
)
11926 if (opcode
== T_MNEM_movs
)
11928 reject_bad_reg (Rn
);
11929 reject_bad_reg (Rm
);
11933 /* This is mov.n. */
11934 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
11935 && (Rm
== REG_SP
|| Rm
== REG_PC
))
11937 as_tsktsk (_("Use of r%u as a source register is "
11938 "deprecated when r%u is the destination "
11939 "register."), Rm
, Rn
);
11944 /* This is mov.w. */
11945 constraint (Rn
== REG_PC
, BAD_PC
);
11946 constraint (Rm
== REG_PC
, BAD_PC
);
11947 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
11948 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
11952 reject_bad_reg (Rn
);
11955 if (!inst
.operands
[1].isreg
)
11957 /* Immediate operand. */
11958 if (!in_it_block () && opcode
== T_MNEM_mov
)
11960 if (low_regs
&& narrow
)
11962 inst
.instruction
= THUMB_OP16 (opcode
);
11963 inst
.instruction
|= Rn
<< 8;
11964 if (inst
.reloc
.type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11965 || inst
.reloc
.type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
11967 if (inst
.size_req
== 2)
11968 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
11970 inst
.relax
= opcode
;
11975 constraint (inst
.reloc
.type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11976 && inst
.reloc
.type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
11977 THUMB1_RELOC_ONLY
);
11979 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11980 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11981 inst
.instruction
|= Rn
<< r0off
;
11982 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11985 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
11986 && (inst
.instruction
== T_MNEM_mov
11987 || inst
.instruction
== T_MNEM_movs
))
11989 /* Register shifts are encoded as separate shift instructions. */
11990 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
11992 if (in_it_block ())
11997 if (inst
.size_req
== 4)
12000 if (!low_regs
|| inst
.operands
[1].imm
> 7)
12006 switch (inst
.operands
[1].shift_kind
)
12009 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
12012 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
12015 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
12018 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
12024 inst
.instruction
= opcode
;
12027 inst
.instruction
|= Rn
;
12028 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
12033 inst
.instruction
|= CONDS_BIT
;
12035 inst
.instruction
|= Rn
<< 8;
12036 inst
.instruction
|= Rm
<< 16;
12037 inst
.instruction
|= inst
.operands
[1].imm
;
12042 /* Some mov with immediate shift have narrow variants.
12043 Register shifts are handled above. */
12044 if (low_regs
&& inst
.operands
[1].shifted
12045 && (inst
.instruction
== T_MNEM_mov
12046 || inst
.instruction
== T_MNEM_movs
))
12048 if (in_it_block ())
12049 narrow
= (inst
.instruction
== T_MNEM_mov
);
12051 narrow
= (inst
.instruction
== T_MNEM_movs
);
12056 switch (inst
.operands
[1].shift_kind
)
12058 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12059 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12060 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12061 default: narrow
= FALSE
; break;
12067 inst
.instruction
|= Rn
;
12068 inst
.instruction
|= Rm
<< 3;
12069 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12073 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12074 inst
.instruction
|= Rn
<< r0off
;
12075 encode_thumb32_shifted_operand (1);
12079 switch (inst
.instruction
)
12082 /* In v4t or v5t a move of two lowregs produces unpredictable
12083 results. Don't allow this. */
12086 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
12087 "MOV Rd, Rs with two low registers is not "
12088 "permitted on this architecture");
12089 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12093 inst
.instruction
= T_OPCODE_MOV_HR
;
12094 inst
.instruction
|= (Rn
& 0x8) << 4;
12095 inst
.instruction
|= (Rn
& 0x7);
12096 inst
.instruction
|= Rm
<< 3;
12100 /* We know we have low registers at this point.
12101 Generate LSLS Rd, Rs, #0. */
12102 inst
.instruction
= T_OPCODE_LSL_I
;
12103 inst
.instruction
|= Rn
;
12104 inst
.instruction
|= Rm
<< 3;
12110 inst
.instruction
= T_OPCODE_CMP_LR
;
12111 inst
.instruction
|= Rn
;
12112 inst
.instruction
|= Rm
<< 3;
12116 inst
.instruction
= T_OPCODE_CMP_HR
;
12117 inst
.instruction
|= (Rn
& 0x8) << 4;
12118 inst
.instruction
|= (Rn
& 0x7);
12119 inst
.instruction
|= Rm
<< 3;
12126 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12128 /* PR 10443: Do not silently ignore shifted operands. */
12129 constraint (inst
.operands
[1].shifted
,
12130 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12132 if (inst
.operands
[1].isreg
)
12134 if (Rn
< 8 && Rm
< 8)
12136 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12137 since a MOV instruction produces unpredictable results. */
12138 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12139 inst
.instruction
= T_OPCODE_ADD_I3
;
12141 inst
.instruction
= T_OPCODE_CMP_LR
;
12143 inst
.instruction
|= Rn
;
12144 inst
.instruction
|= Rm
<< 3;
12148 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12149 inst
.instruction
= T_OPCODE_MOV_HR
;
12151 inst
.instruction
= T_OPCODE_CMP_HR
;
12157 constraint (Rn
> 7,
12158 _("only lo regs allowed with immediate"));
12159 inst
.instruction
|= Rn
<< 8;
12160 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
12171 top
= (inst
.instruction
& 0x00800000) != 0;
12172 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
12174 constraint (top
, _(":lower16: not allowed in this instruction"));
12175 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
12177 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
12179 constraint (!top
, _(":upper16: not allowed in this instruction"));
12180 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
12183 Rd
= inst
.operands
[0].reg
;
12184 reject_bad_reg (Rd
);
12186 inst
.instruction
|= Rd
<< 8;
12187 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
12189 imm
= inst
.reloc
.exp
.X_add_number
;
12190 inst
.instruction
|= (imm
& 0xf000) << 4;
12191 inst
.instruction
|= (imm
& 0x0800) << 15;
12192 inst
.instruction
|= (imm
& 0x0700) << 4;
12193 inst
.instruction
|= (imm
& 0x00ff);
12198 do_t_mvn_tst (void)
12202 Rn
= inst
.operands
[0].reg
;
12203 Rm
= inst
.operands
[1].reg
;
12205 if (inst
.instruction
== T_MNEM_cmp
12206 || inst
.instruction
== T_MNEM_cmn
)
12207 constraint (Rn
== REG_PC
, BAD_PC
);
12209 reject_bad_reg (Rn
);
12210 reject_bad_reg (Rm
);
12212 if (unified_syntax
)
12214 int r0off
= (inst
.instruction
== T_MNEM_mvn
12215 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12216 bfd_boolean narrow
;
12218 if (inst
.size_req
== 4
12219 || inst
.instruction
> 0xffff
12220 || inst
.operands
[1].shifted
12221 || Rn
> 7 || Rm
> 7)
12223 else if (inst
.instruction
== T_MNEM_cmn
12224 || inst
.instruction
== T_MNEM_tst
)
12226 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12227 narrow
= !in_it_block ();
12229 narrow
= in_it_block ();
12231 if (!inst
.operands
[1].isreg
)
12233 /* For an immediate, we always generate a 32-bit opcode;
12234 section relaxation will shrink it later if possible. */
12235 if (inst
.instruction
< 0xffff)
12236 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12237 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12238 inst
.instruction
|= Rn
<< r0off
;
12239 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12243 /* See if we can do this with a 16-bit instruction. */
12246 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12247 inst
.instruction
|= Rn
;
12248 inst
.instruction
|= Rm
<< 3;
12252 constraint (inst
.operands
[1].shifted
12253 && inst
.operands
[1].immisreg
,
12254 _("shift must be constant"));
12255 if (inst
.instruction
< 0xffff)
12256 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12257 inst
.instruction
|= Rn
<< r0off
;
12258 encode_thumb32_shifted_operand (1);
12264 constraint (inst
.instruction
> 0xffff
12265 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12266 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12267 _("unshifted register required"));
12268 constraint (Rn
> 7 || Rm
> 7,
12271 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12272 inst
.instruction
|= Rn
;
12273 inst
.instruction
|= Rm
<< 3;
12282 if (do_vfp_nsyn_mrs () == SUCCESS
)
12285 Rd
= inst
.operands
[0].reg
;
12286 reject_bad_reg (Rd
);
12287 inst
.instruction
|= Rd
<< 8;
12289 if (inst
.operands
[1].isreg
)
12291 unsigned br
= inst
.operands
[1].reg
;
12292 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12293 as_bad (_("bad register for mrs"));
12295 inst
.instruction
|= br
& (0xf << 16);
12296 inst
.instruction
|= (br
& 0x300) >> 4;
12297 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12301 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12303 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12305 /* PR gas/12698: The constraint is only applied for m_profile.
12306 If the user has specified -march=all, we want to ignore it as
12307 we are building for any CPU type, including non-m variants. */
12308 bfd_boolean m_profile
=
12309 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12310 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12311 "not support requested special purpose register"));
12314 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12316 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12317 _("'APSR', 'CPSR' or 'SPSR' expected"));
12319 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12320 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12321 inst
.instruction
|= 0xf0000;
12331 if (do_vfp_nsyn_msr () == SUCCESS
)
12334 constraint (!inst
.operands
[1].isreg
,
12335 _("Thumb encoding does not support an immediate here"));
12337 if (inst
.operands
[0].isreg
)
12338 flags
= (int)(inst
.operands
[0].reg
);
12340 flags
= inst
.operands
[0].imm
;
12342 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12344 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12346 /* PR gas/12698: The constraint is only applied for m_profile.
12347 If the user has specified -march=all, we want to ignore it as
12348 we are building for any CPU type, including non-m variants. */
12349 bfd_boolean m_profile
=
12350 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12351 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12352 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12353 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12354 && bits
!= PSR_f
)) && m_profile
,
12355 _("selected processor does not support requested special "
12356 "purpose register"));
12359 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12360 "requested special purpose register"));
12362 Rn
= inst
.operands
[1].reg
;
12363 reject_bad_reg (Rn
);
12365 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12366 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12367 inst
.instruction
|= (flags
& 0x300) >> 4;
12368 inst
.instruction
|= (flags
& 0xff);
12369 inst
.instruction
|= Rn
<< 16;
12375 bfd_boolean narrow
;
12376 unsigned Rd
, Rn
, Rm
;
12378 if (!inst
.operands
[2].present
)
12379 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12381 Rd
= inst
.operands
[0].reg
;
12382 Rn
= inst
.operands
[1].reg
;
12383 Rm
= inst
.operands
[2].reg
;
12385 if (unified_syntax
)
12387 if (inst
.size_req
== 4
12393 else if (inst
.instruction
== T_MNEM_muls
)
12394 narrow
= !in_it_block ();
12396 narrow
= in_it_block ();
12400 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12401 constraint (Rn
> 7 || Rm
> 7,
12408 /* 16-bit MULS/Conditional MUL. */
12409 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12410 inst
.instruction
|= Rd
;
12413 inst
.instruction
|= Rm
<< 3;
12415 inst
.instruction
|= Rn
<< 3;
12417 constraint (1, _("dest must overlap one source register"));
12421 constraint (inst
.instruction
!= T_MNEM_mul
,
12422 _("Thumb-2 MUL must not set flags"));
12424 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12425 inst
.instruction
|= Rd
<< 8;
12426 inst
.instruction
|= Rn
<< 16;
12427 inst
.instruction
|= Rm
<< 0;
12429 reject_bad_reg (Rd
);
12430 reject_bad_reg (Rn
);
12431 reject_bad_reg (Rm
);
12438 unsigned RdLo
, RdHi
, Rn
, Rm
;
12440 RdLo
= inst
.operands
[0].reg
;
12441 RdHi
= inst
.operands
[1].reg
;
12442 Rn
= inst
.operands
[2].reg
;
12443 Rm
= inst
.operands
[3].reg
;
12445 reject_bad_reg (RdLo
);
12446 reject_bad_reg (RdHi
);
12447 reject_bad_reg (Rn
);
12448 reject_bad_reg (Rm
);
12450 inst
.instruction
|= RdLo
<< 12;
12451 inst
.instruction
|= RdHi
<< 8;
12452 inst
.instruction
|= Rn
<< 16;
12453 inst
.instruction
|= Rm
;
12456 as_tsktsk (_("rdhi and rdlo must be different"));
12462 set_it_insn_type (NEUTRAL_IT_INSN
);
12464 if (unified_syntax
)
12466 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12468 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12469 inst
.instruction
|= inst
.operands
[0].imm
;
12473 /* PR9722: Check for Thumb2 availability before
12474 generating a thumb2 nop instruction. */
12475 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12477 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12478 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12481 inst
.instruction
= 0x46c0;
12486 constraint (inst
.operands
[0].present
,
12487 _("Thumb does not support NOP with hints"));
12488 inst
.instruction
= 0x46c0;
12495 if (unified_syntax
)
12497 bfd_boolean narrow
;
12499 if (THUMB_SETS_FLAGS (inst
.instruction
))
12500 narrow
= !in_it_block ();
12502 narrow
= in_it_block ();
12503 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12505 if (inst
.size_req
== 4)
12510 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12511 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12512 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12516 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12517 inst
.instruction
|= inst
.operands
[0].reg
;
12518 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12523 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12525 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12527 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12528 inst
.instruction
|= inst
.operands
[0].reg
;
12529 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12538 Rd
= inst
.operands
[0].reg
;
12539 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12541 reject_bad_reg (Rd
);
12542 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12543 reject_bad_reg (Rn
);
12545 inst
.instruction
|= Rd
<< 8;
12546 inst
.instruction
|= Rn
<< 16;
12548 if (!inst
.operands
[2].isreg
)
12550 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12551 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12557 Rm
= inst
.operands
[2].reg
;
12558 reject_bad_reg (Rm
);
12560 constraint (inst
.operands
[2].shifted
12561 && inst
.operands
[2].immisreg
,
12562 _("shift must be constant"));
12563 encode_thumb32_shifted_operand (2);
12570 unsigned Rd
, Rn
, Rm
;
12572 Rd
= inst
.operands
[0].reg
;
12573 Rn
= inst
.operands
[1].reg
;
12574 Rm
= inst
.operands
[2].reg
;
12576 reject_bad_reg (Rd
);
12577 reject_bad_reg (Rn
);
12578 reject_bad_reg (Rm
);
12580 inst
.instruction
|= Rd
<< 8;
12581 inst
.instruction
|= Rn
<< 16;
12582 inst
.instruction
|= Rm
;
12583 if (inst
.operands
[3].present
)
12585 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
12586 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12587 _("expression too complex"));
12588 inst
.instruction
|= (val
& 0x1c) << 10;
12589 inst
.instruction
|= (val
& 0x03) << 6;
12596 if (!inst
.operands
[3].present
)
12600 inst
.instruction
&= ~0x00000020;
12602 /* PR 10168. Swap the Rm and Rn registers. */
12603 Rtmp
= inst
.operands
[1].reg
;
12604 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
12605 inst
.operands
[2].reg
= Rtmp
;
12613 if (inst
.operands
[0].immisreg
)
12614 reject_bad_reg (inst
.operands
[0].imm
);
12616 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12620 do_t_push_pop (void)
12624 constraint (inst
.operands
[0].writeback
,
12625 _("push/pop do not support {reglist}^"));
12626 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
12627 _("expression too complex"));
12629 mask
= inst
.operands
[0].imm
;
12630 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
12631 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
12632 else if (inst
.size_req
!= 4
12633 && (mask
& ~0xff) == (1U << (inst
.instruction
== T_MNEM_push
12634 ? REG_LR
: REG_PC
)))
12636 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12637 inst
.instruction
|= THUMB_PP_PC_LR
;
12638 inst
.instruction
|= mask
& 0xff;
12640 else if (unified_syntax
)
12642 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12643 encode_thumb2_ldmstm (13, mask
, TRUE
);
12647 inst
.error
= _("invalid register list to push/pop instruction");
12657 Rd
= inst
.operands
[0].reg
;
12658 Rm
= inst
.operands
[1].reg
;
12660 reject_bad_reg (Rd
);
12661 reject_bad_reg (Rm
);
12663 inst
.instruction
|= Rd
<< 8;
12664 inst
.instruction
|= Rm
<< 16;
12665 inst
.instruction
|= Rm
;
12673 Rd
= inst
.operands
[0].reg
;
12674 Rm
= inst
.operands
[1].reg
;
12676 reject_bad_reg (Rd
);
12677 reject_bad_reg (Rm
);
12679 if (Rd
<= 7 && Rm
<= 7
12680 && inst
.size_req
!= 4)
12682 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12683 inst
.instruction
|= Rd
;
12684 inst
.instruction
|= Rm
<< 3;
12686 else if (unified_syntax
)
12688 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12689 inst
.instruction
|= Rd
<< 8;
12690 inst
.instruction
|= Rm
<< 16;
12691 inst
.instruction
|= Rm
;
12694 inst
.error
= BAD_HIREG
;
12702 Rd
= inst
.operands
[0].reg
;
12703 Rm
= inst
.operands
[1].reg
;
12705 reject_bad_reg (Rd
);
12706 reject_bad_reg (Rm
);
12708 inst
.instruction
|= Rd
<< 8;
12709 inst
.instruction
|= Rm
;
12717 Rd
= inst
.operands
[0].reg
;
12718 Rs
= (inst
.operands
[1].present
12719 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
12720 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
12722 reject_bad_reg (Rd
);
12723 reject_bad_reg (Rs
);
12724 if (inst
.operands
[2].isreg
)
12725 reject_bad_reg (inst
.operands
[2].reg
);
12727 inst
.instruction
|= Rd
<< 8;
12728 inst
.instruction
|= Rs
<< 16;
12729 if (!inst
.operands
[2].isreg
)
12731 bfd_boolean narrow
;
12733 if ((inst
.instruction
& 0x00100000) != 0)
12734 narrow
= !in_it_block ();
12736 narrow
= in_it_block ();
12738 if (Rd
> 7 || Rs
> 7)
12741 if (inst
.size_req
== 4 || !unified_syntax
)
12744 if (inst
.reloc
.exp
.X_op
!= O_constant
12745 || inst
.reloc
.exp
.X_add_number
!= 0)
12748 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12749 relaxation, but it doesn't seem worth the hassle. */
12752 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12753 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
12754 inst
.instruction
|= Rs
<< 3;
12755 inst
.instruction
|= Rd
;
12759 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12760 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12764 encode_thumb32_shifted_operand (2);
12770 if (warn_on_deprecated
12771 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12772 as_tsktsk (_("setend use is deprecated for ARMv8"));
12774 set_it_insn_type (OUTSIDE_IT_INSN
);
12775 if (inst
.operands
[0].imm
)
12776 inst
.instruction
|= 0x8;
12782 if (!inst
.operands
[1].present
)
12783 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
12785 if (unified_syntax
)
12787 bfd_boolean narrow
;
12790 switch (inst
.instruction
)
12793 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
12795 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
12797 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
12799 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
12803 if (THUMB_SETS_FLAGS (inst
.instruction
))
12804 narrow
= !in_it_block ();
12806 narrow
= in_it_block ();
12807 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12809 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
12811 if (inst
.operands
[2].isreg
12812 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
12813 || inst
.operands
[2].reg
> 7))
12815 if (inst
.size_req
== 4)
12818 reject_bad_reg (inst
.operands
[0].reg
);
12819 reject_bad_reg (inst
.operands
[1].reg
);
12823 if (inst
.operands
[2].isreg
)
12825 reject_bad_reg (inst
.operands
[2].reg
);
12826 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12827 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12828 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12829 inst
.instruction
|= inst
.operands
[2].reg
;
12831 /* PR 12854: Error on extraneous shifts. */
12832 constraint (inst
.operands
[2].shifted
,
12833 _("extraneous shift as part of operand to shift insn"));
12837 inst
.operands
[1].shifted
= 1;
12838 inst
.operands
[1].shift_kind
= shift_kind
;
12839 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
12840 ? T_MNEM_movs
: T_MNEM_mov
);
12841 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12842 encode_thumb32_shifted_operand (1);
12843 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12844 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12849 if (inst
.operands
[2].isreg
)
12851 switch (shift_kind
)
12853 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12854 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12855 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12856 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12860 inst
.instruction
|= inst
.operands
[0].reg
;
12861 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12863 /* PR 12854: Error on extraneous shifts. */
12864 constraint (inst
.operands
[2].shifted
,
12865 _("extraneous shift as part of operand to shift insn"));
12869 switch (shift_kind
)
12871 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12872 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12873 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12876 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12877 inst
.instruction
|= inst
.operands
[0].reg
;
12878 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12884 constraint (inst
.operands
[0].reg
> 7
12885 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
12886 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12888 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
12890 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
12891 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
12892 _("source1 and dest must be same register"));
12894 switch (inst
.instruction
)
12896 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
12897 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
12898 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
12899 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
12903 inst
.instruction
|= inst
.operands
[0].reg
;
12904 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
12906 /* PR 12854: Error on extraneous shifts. */
12907 constraint (inst
.operands
[2].shifted
,
12908 _("extraneous shift as part of operand to shift insn"));
12912 switch (inst
.instruction
)
12914 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12915 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12916 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12917 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
12920 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12921 inst
.instruction
|= inst
.operands
[0].reg
;
12922 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12930 unsigned Rd
, Rn
, Rm
;
12932 Rd
= inst
.operands
[0].reg
;
12933 Rn
= inst
.operands
[1].reg
;
12934 Rm
= inst
.operands
[2].reg
;
12936 reject_bad_reg (Rd
);
12937 reject_bad_reg (Rn
);
12938 reject_bad_reg (Rm
);
12940 inst
.instruction
|= Rd
<< 8;
12941 inst
.instruction
|= Rn
<< 16;
12942 inst
.instruction
|= Rm
;
12948 unsigned Rd
, Rn
, Rm
;
12950 Rd
= inst
.operands
[0].reg
;
12951 Rm
= inst
.operands
[1].reg
;
12952 Rn
= inst
.operands
[2].reg
;
12954 reject_bad_reg (Rd
);
12955 reject_bad_reg (Rn
);
12956 reject_bad_reg (Rm
);
12958 inst
.instruction
|= Rd
<< 8;
12959 inst
.instruction
|= Rn
<< 16;
12960 inst
.instruction
|= Rm
;
12966 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12967 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
12968 _("SMC is not permitted on this architecture"));
12969 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
12970 _("expression too complex"));
12971 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12972 inst
.instruction
|= (value
& 0xf000) >> 12;
12973 inst
.instruction
|= (value
& 0x0ff0);
12974 inst
.instruction
|= (value
& 0x000f) << 16;
12975 /* PR gas/15623: SMC instructions must be last in an IT block. */
12976 set_it_insn_type_last ();
12982 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
12984 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
12985 inst
.instruction
|= (value
& 0x0fff);
12986 inst
.instruction
|= (value
& 0xf000) << 4;
12990 do_t_ssat_usat (int bias
)
12994 Rd
= inst
.operands
[0].reg
;
12995 Rn
= inst
.operands
[2].reg
;
12997 reject_bad_reg (Rd
);
12998 reject_bad_reg (Rn
);
13000 inst
.instruction
|= Rd
<< 8;
13001 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
13002 inst
.instruction
|= Rn
<< 16;
13004 if (inst
.operands
[3].present
)
13006 offsetT shift_amount
= inst
.reloc
.exp
.X_add_number
;
13008 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
13010 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
13011 _("expression too complex"));
13013 if (shift_amount
!= 0)
13015 constraint (shift_amount
> 31,
13016 _("shift expression is too large"));
13018 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
13019 inst
.instruction
|= 0x00200000; /* sh bit. */
13021 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
13022 inst
.instruction
|= (shift_amount
& 0x03) << 6;
13030 do_t_ssat_usat (1);
13038 Rd
= inst
.operands
[0].reg
;
13039 Rn
= inst
.operands
[2].reg
;
13041 reject_bad_reg (Rd
);
13042 reject_bad_reg (Rn
);
13044 inst
.instruction
|= Rd
<< 8;
13045 inst
.instruction
|= inst
.operands
[1].imm
- 1;
13046 inst
.instruction
|= Rn
<< 16;
13052 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
13053 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
13054 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
13055 || inst
.operands
[2].negative
,
13058 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
13060 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13061 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13062 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
13063 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
13069 if (!inst
.operands
[2].present
)
13070 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
13072 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
13073 || inst
.operands
[0].reg
== inst
.operands
[2].reg
13074 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
13077 inst
.instruction
|= inst
.operands
[0].reg
;
13078 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13079 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
13080 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
13086 unsigned Rd
, Rn
, Rm
;
13088 Rd
= inst
.operands
[0].reg
;
13089 Rn
= inst
.operands
[1].reg
;
13090 Rm
= inst
.operands
[2].reg
;
13092 reject_bad_reg (Rd
);
13093 reject_bad_reg (Rn
);
13094 reject_bad_reg (Rm
);
13096 inst
.instruction
|= Rd
<< 8;
13097 inst
.instruction
|= Rn
<< 16;
13098 inst
.instruction
|= Rm
;
13099 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
13107 Rd
= inst
.operands
[0].reg
;
13108 Rm
= inst
.operands
[1].reg
;
13110 reject_bad_reg (Rd
);
13111 reject_bad_reg (Rm
);
13113 if (inst
.instruction
<= 0xffff
13114 && inst
.size_req
!= 4
13115 && Rd
<= 7 && Rm
<= 7
13116 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
13118 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13119 inst
.instruction
|= Rd
;
13120 inst
.instruction
|= Rm
<< 3;
13122 else if (unified_syntax
)
13124 if (inst
.instruction
<= 0xffff)
13125 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13126 inst
.instruction
|= Rd
<< 8;
13127 inst
.instruction
|= Rm
;
13128 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
13132 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
13133 _("Thumb encoding does not support rotation"));
13134 constraint (1, BAD_HIREG
);
13141 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
13150 half
= (inst
.instruction
& 0x10) != 0;
13151 set_it_insn_type_last ();
13152 constraint (inst
.operands
[0].immisreg
,
13153 _("instruction requires register index"));
13155 Rn
= inst
.operands
[0].reg
;
13156 Rm
= inst
.operands
[0].imm
;
13158 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13159 constraint (Rn
== REG_SP
, BAD_SP
);
13160 reject_bad_reg (Rm
);
13162 constraint (!half
&& inst
.operands
[0].shifted
,
13163 _("instruction does not allow shifted index"));
13164 inst
.instruction
|= (Rn
<< 16) | Rm
;
13170 if (!inst
.operands
[0].present
)
13171 inst
.operands
[0].imm
= 0;
13173 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13175 constraint (inst
.size_req
== 2,
13176 _("immediate value out of range"));
13177 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13178 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13179 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13183 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13184 inst
.instruction
|= inst
.operands
[0].imm
;
13187 set_it_insn_type (NEUTRAL_IT_INSN
);
13194 do_t_ssat_usat (0);
13202 Rd
= inst
.operands
[0].reg
;
13203 Rn
= inst
.operands
[2].reg
;
13205 reject_bad_reg (Rd
);
13206 reject_bad_reg (Rn
);
13208 inst
.instruction
|= Rd
<< 8;
13209 inst
.instruction
|= inst
.operands
[1].imm
;
13210 inst
.instruction
|= Rn
<< 16;
13213 /* Neon instruction encoder helpers. */
13215 /* Encodings for the different types for various Neon opcodes. */
13217 /* An "invalid" code for the following tables. */
13220 struct neon_tab_entry
13223 unsigned float_or_poly
;
13224 unsigned scalar_or_imm
;
13227 /* Map overloaded Neon opcodes to their respective encodings. */
13228 #define NEON_ENC_TAB \
13229 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13230 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13231 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13232 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13233 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13234 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13235 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13236 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13237 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13238 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13239 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13240 /* Register variants of the following two instructions are encoded as
13241 vcge / vcgt with the operands reversed. */ \
13242 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13243 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13244 X(vfma, N_INV, 0x0000c10, N_INV), \
13245 X(vfms, N_INV, 0x0200c10, N_INV), \
13246 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13247 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13248 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13249 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13250 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13251 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13252 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13253 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13254 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13255 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13256 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13257 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13258 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13259 X(vshl, 0x0000400, N_INV, 0x0800510), \
13260 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13261 X(vand, 0x0000110, N_INV, 0x0800030), \
13262 X(vbic, 0x0100110, N_INV, 0x0800030), \
13263 X(veor, 0x1000110, N_INV, N_INV), \
13264 X(vorn, 0x0300110, N_INV, 0x0800010), \
13265 X(vorr, 0x0200110, N_INV, 0x0800010), \
13266 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13267 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13268 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13269 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13270 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13271 X(vst1, 0x0000000, 0x0800000, N_INV), \
13272 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13273 X(vst2, 0x0000100, 0x0800100, N_INV), \
13274 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13275 X(vst3, 0x0000200, 0x0800200, N_INV), \
13276 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13277 X(vst4, 0x0000300, 0x0800300, N_INV), \
13278 X(vmovn, 0x1b20200, N_INV, N_INV), \
13279 X(vtrn, 0x1b20080, N_INV, N_INV), \
13280 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13281 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13282 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13283 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13284 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13285 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13286 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13287 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13288 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13289 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13290 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13291 X(vseleq, 0xe000a00, N_INV, N_INV), \
13292 X(vselvs, 0xe100a00, N_INV, N_INV), \
13293 X(vselge, 0xe200a00, N_INV, N_INV), \
13294 X(vselgt, 0xe300a00, N_INV, N_INV), \
13295 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13296 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13297 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13298 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13299 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13300 X(aes, 0x3b00300, N_INV, N_INV), \
13301 X(sha3op, 0x2000c00, N_INV, N_INV), \
13302 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13303 X(sha2op, 0x3ba0380, N_INV, N_INV)
13307 #define X(OPC,I,F,S) N_MNEM_##OPC
13312 static const struct neon_tab_entry neon_enc_tab
[] =
13314 #define X(OPC,I,F,S) { (I), (F), (S) }
13319 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13320 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13321 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13322 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13323 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13324 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13325 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13326 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13327 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13328 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13329 #define NEON_ENC_SINGLE_(X) \
13330 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13331 #define NEON_ENC_DOUBLE_(X) \
13332 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13333 #define NEON_ENC_FPV8_(X) \
13334 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13336 #define NEON_ENCODE(type, inst) \
13339 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13340 inst.is_neon = 1; \
13344 #define check_neon_suffixes \
13347 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13349 as_bad (_("invalid neon suffix for non neon instruction")); \
13355 /* Define shapes for instruction operands. The following mnemonic characters
13356 are used in this table:
13358 F - VFP S<n> register
13359 D - Neon D<n> register
13360 Q - Neon Q<n> register
13364 L - D<n> register list
13366 This table is used to generate various data:
13367 - enumerations of the form NS_DDR to be used as arguments to
13369 - a table classifying shapes into single, double, quad, mixed.
13370 - a table used to drive neon_select_shape. */
13372 #define NEON_SHAPE_DEF \
13373 X(3, (D, D, D), DOUBLE), \
13374 X(3, (Q, Q, Q), QUAD), \
13375 X(3, (D, D, I), DOUBLE), \
13376 X(3, (Q, Q, I), QUAD), \
13377 X(3, (D, D, S), DOUBLE), \
13378 X(3, (Q, Q, S), QUAD), \
13379 X(2, (D, D), DOUBLE), \
13380 X(2, (Q, Q), QUAD), \
13381 X(2, (D, S), DOUBLE), \
13382 X(2, (Q, S), QUAD), \
13383 X(2, (D, R), DOUBLE), \
13384 X(2, (Q, R), QUAD), \
13385 X(2, (D, I), DOUBLE), \
13386 X(2, (Q, I), QUAD), \
13387 X(3, (D, L, D), DOUBLE), \
13388 X(2, (D, Q), MIXED), \
13389 X(2, (Q, D), MIXED), \
13390 X(3, (D, Q, I), MIXED), \
13391 X(3, (Q, D, I), MIXED), \
13392 X(3, (Q, D, D), MIXED), \
13393 X(3, (D, Q, Q), MIXED), \
13394 X(3, (Q, Q, D), MIXED), \
13395 X(3, (Q, D, S), MIXED), \
13396 X(3, (D, Q, S), MIXED), \
13397 X(4, (D, D, D, I), DOUBLE), \
13398 X(4, (Q, Q, Q, I), QUAD), \
13399 X(4, (D, D, S, I), DOUBLE), \
13400 X(4, (Q, Q, S, I), QUAD), \
13401 X(2, (F, F), SINGLE), \
13402 X(3, (F, F, F), SINGLE), \
13403 X(2, (F, I), SINGLE), \
13404 X(2, (F, D), MIXED), \
13405 X(2, (D, F), MIXED), \
13406 X(3, (F, F, I), MIXED), \
13407 X(4, (R, R, F, F), SINGLE), \
13408 X(4, (F, F, R, R), SINGLE), \
13409 X(3, (D, R, R), DOUBLE), \
13410 X(3, (R, R, D), DOUBLE), \
13411 X(2, (S, R), SINGLE), \
13412 X(2, (R, S), SINGLE), \
13413 X(2, (F, R), SINGLE), \
13414 X(2, (R, F), SINGLE), \
13415 /* Half float shape supported so far. */\
13416 X (2, (H, D), MIXED), \
13417 X (2, (D, H), MIXED), \
13418 X (2, (H, F), MIXED), \
13419 X (2, (F, H), MIXED), \
13420 X (2, (H, H), HALF), \
13421 X (2, (H, R), HALF), \
13422 X (2, (R, H), HALF), \
13423 X (2, (H, I), HALF), \
13424 X (3, (H, H, H), HALF), \
13425 X (3, (H, F, I), MIXED), \
13426 X (3, (F, H, I), MIXED)
13428 #define S2(A,B) NS_##A##B
13429 #define S3(A,B,C) NS_##A##B##C
13430 #define S4(A,B,C,D) NS_##A##B##C##D
13432 #define X(N, L, C) S##N L
13445 enum neon_shape_class
13454 #define X(N, L, C) SC_##C
13456 static enum neon_shape_class neon_shape_class
[] =
13475 /* Register widths of above. */
13476 static unsigned neon_shape_el_size
[] =
13488 struct neon_shape_info
13491 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
13494 #define S2(A,B) { SE_##A, SE_##B }
13495 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13496 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13498 #define X(N, L, C) { N, S##N L }
13500 static struct neon_shape_info neon_shape_tab
[] =
13510 /* Bit masks used in type checking given instructions.
13511 'N_EQK' means the type must be the same as (or based on in some way) the key
13512 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13513 set, various other bits can be set as well in order to modify the meaning of
13514 the type constraint. */
13516 enum neon_type_mask
13540 N_KEY
= 0x1000000, /* Key element (main type specifier). */
13541 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
13542 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
13543 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
13544 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
13545 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
13546 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13547 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13548 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13549 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
13550 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13552 N_MAX_NONSPECIAL
= N_P64
13555 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13557 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13558 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13559 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13560 #define N_S_32 (N_S8 | N_S16 | N_S32)
13561 #define N_F_16_32 (N_F16 | N_F32)
13562 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13563 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13564 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13565 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13567 /* Pass this as the first type argument to neon_check_type to ignore types
13569 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13571 /* Select a "shape" for the current instruction (describing register types or
13572 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13573 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13574 function of operand parsing, so this function doesn't need to be called.
13575 Shapes should be listed in order of decreasing length. */
13577 static enum neon_shape
13578 neon_select_shape (enum neon_shape shape
, ...)
13581 enum neon_shape first_shape
= shape
;
13583 /* Fix missing optional operands. FIXME: we don't know at this point how
13584 many arguments we should have, so this makes the assumption that we have
13585 > 1. This is true of all current Neon opcodes, I think, but may not be
13586 true in the future. */
13587 if (!inst
.operands
[1].present
)
13588 inst
.operands
[1] = inst
.operands
[0];
13590 va_start (ap
, shape
);
13592 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
13597 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
13599 if (!inst
.operands
[j
].present
)
13605 switch (neon_shape_tab
[shape
].el
[j
])
13607 /* If a .f16, .16, .u16, .s16 type specifier is given over
13608 a VFP single precision register operand, it's essentially
13609 means only half of the register is used.
13611 If the type specifier is given after the mnemonics, the
13612 information is stored in inst.vectype. If the type specifier
13613 is given after register operand, the information is stored
13614 in inst.operands[].vectype.
13616 When there is only one type specifier, and all the register
13617 operands are the same type of hardware register, the type
13618 specifier applies to all register operands.
13620 If no type specifier is given, the shape is inferred from
13621 operand information.
13624 vadd.f16 s0, s1, s2: NS_HHH
13625 vabs.f16 s0, s1: NS_HH
13626 vmov.f16 s0, r1: NS_HR
13627 vmov.f16 r0, s1: NS_RH
13628 vcvt.f16 r0, s1: NS_RH
13629 vcvt.f16.s32 s2, s2, #29: NS_HFI
13630 vcvt.f16.s32 s2, s2: NS_HF
13633 if (!(inst
.operands
[j
].isreg
13634 && inst
.operands
[j
].isvec
13635 && inst
.operands
[j
].issingle
13636 && !inst
.operands
[j
].isquad
13637 && ((inst
.vectype
.elems
== 1
13638 && inst
.vectype
.el
[0].size
== 16)
13639 || (inst
.vectype
.elems
> 1
13640 && inst
.vectype
.el
[j
].size
== 16)
13641 || (inst
.vectype
.elems
== 0
13642 && inst
.operands
[j
].vectype
.type
!= NT_invtype
13643 && inst
.operands
[j
].vectype
.size
== 16))))
13648 if (!(inst
.operands
[j
].isreg
13649 && inst
.operands
[j
].isvec
13650 && inst
.operands
[j
].issingle
13651 && !inst
.operands
[j
].isquad
13652 && ((inst
.vectype
.elems
== 1 && inst
.vectype
.el
[0].size
== 32)
13653 || (inst
.vectype
.elems
> 1 && inst
.vectype
.el
[j
].size
== 32)
13654 || (inst
.vectype
.elems
== 0
13655 && (inst
.operands
[j
].vectype
.size
== 32
13656 || inst
.operands
[j
].vectype
.type
== NT_invtype
)))))
13661 if (!(inst
.operands
[j
].isreg
13662 && inst
.operands
[j
].isvec
13663 && !inst
.operands
[j
].isquad
13664 && !inst
.operands
[j
].issingle
))
13669 if (!(inst
.operands
[j
].isreg
13670 && !inst
.operands
[j
].isvec
))
13675 if (!(inst
.operands
[j
].isreg
13676 && inst
.operands
[j
].isvec
13677 && inst
.operands
[j
].isquad
13678 && !inst
.operands
[j
].issingle
))
13683 if (!(!inst
.operands
[j
].isreg
13684 && !inst
.operands
[j
].isscalar
))
13689 if (!(!inst
.operands
[j
].isreg
13690 && inst
.operands
[j
].isscalar
))
13700 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
13701 /* We've matched all the entries in the shape table, and we don't
13702 have any left over operands which have not been matched. */
13708 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
13709 first_error (_("invalid instruction shape"));
13714 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13715 means the Q bit should be set). */
13718 neon_quad (enum neon_shape shape
)
13720 return neon_shape_class
[shape
] == SC_QUAD
;
13724 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
13727 /* Allow modification to be made to types which are constrained to be
13728 based on the key element, based on bits set alongside N_EQK. */
13729 if ((typebits
& N_EQK
) != 0)
13731 if ((typebits
& N_HLF
) != 0)
13733 else if ((typebits
& N_DBL
) != 0)
13735 if ((typebits
& N_SGN
) != 0)
13736 *g_type
= NT_signed
;
13737 else if ((typebits
& N_UNS
) != 0)
13738 *g_type
= NT_unsigned
;
13739 else if ((typebits
& N_INT
) != 0)
13740 *g_type
= NT_integer
;
13741 else if ((typebits
& N_FLT
) != 0)
13742 *g_type
= NT_float
;
13743 else if ((typebits
& N_SIZ
) != 0)
13744 *g_type
= NT_untyped
;
13748 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13749 operand type, i.e. the single type specified in a Neon instruction when it
13750 is the only one given. */
13752 static struct neon_type_el
13753 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
13755 struct neon_type_el dest
= *key
;
13757 gas_assert ((thisarg
& N_EQK
) != 0);
13759 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
13764 /* Convert Neon type and size into compact bitmask representation. */
13766 static enum neon_type_mask
13767 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
13774 case 8: return N_8
;
13775 case 16: return N_16
;
13776 case 32: return N_32
;
13777 case 64: return N_64
;
13785 case 8: return N_I8
;
13786 case 16: return N_I16
;
13787 case 32: return N_I32
;
13788 case 64: return N_I64
;
13796 case 16: return N_F16
;
13797 case 32: return N_F32
;
13798 case 64: return N_F64
;
13806 case 8: return N_P8
;
13807 case 16: return N_P16
;
13808 case 64: return N_P64
;
13816 case 8: return N_S8
;
13817 case 16: return N_S16
;
13818 case 32: return N_S32
;
13819 case 64: return N_S64
;
13827 case 8: return N_U8
;
13828 case 16: return N_U16
;
13829 case 32: return N_U32
;
13830 case 64: return N_U64
;
13841 /* Convert compact Neon bitmask type representation to a type and size. Only
13842 handles the case where a single bit is set in the mask. */
13845 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
13846 enum neon_type_mask mask
)
13848 if ((mask
& N_EQK
) != 0)
13851 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
13853 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
13855 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
13857 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
13862 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
13864 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
13865 *type
= NT_unsigned
;
13866 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
13867 *type
= NT_integer
;
13868 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
13869 *type
= NT_untyped
;
13870 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
13872 else if ((mask
& (N_F_ALL
)) != 0)
13880 /* Modify a bitmask of allowed types. This is only needed for type
13884 modify_types_allowed (unsigned allowed
, unsigned mods
)
13887 enum neon_el_type type
;
13893 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
13895 if (el_type_of_type_chk (&type
, &size
,
13896 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
13898 neon_modify_type_size (mods
, &type
, &size
);
13899 destmask
|= type_chk_of_el_type (type
, size
);
13906 /* Check type and return type classification.
13907 The manual states (paraphrase): If one datatype is given, it indicates the
13909 - the second operand, if there is one
13910 - the operand, if there is no second operand
13911 - the result, if there are no operands.
13912 This isn't quite good enough though, so we use a concept of a "key" datatype
13913 which is set on a per-instruction basis, which is the one which matters when
13914 only one data type is written.
13915 Note: this function has side-effects (e.g. filling in missing operands). All
13916 Neon instructions should call it before performing bit encoding. */
13918 static struct neon_type_el
13919 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
13922 unsigned i
, pass
, key_el
= 0;
13923 unsigned types
[NEON_MAX_TYPE_ELS
];
13924 enum neon_el_type k_type
= NT_invtype
;
13925 unsigned k_size
= -1u;
13926 struct neon_type_el badtype
= {NT_invtype
, -1};
13927 unsigned key_allowed
= 0;
13929 /* Optional registers in Neon instructions are always (not) in operand 1.
13930 Fill in the missing operand here, if it was omitted. */
13931 if (els
> 1 && !inst
.operands
[1].present
)
13932 inst
.operands
[1] = inst
.operands
[0];
13934 /* Suck up all the varargs. */
13936 for (i
= 0; i
< els
; i
++)
13938 unsigned thisarg
= va_arg (ap
, unsigned);
13939 if (thisarg
== N_IGNORE_TYPE
)
13944 types
[i
] = thisarg
;
13945 if ((thisarg
& N_KEY
) != 0)
13950 if (inst
.vectype
.elems
> 0)
13951 for (i
= 0; i
< els
; i
++)
13952 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
13954 first_error (_("types specified in both the mnemonic and operands"));
13958 /* Duplicate inst.vectype elements here as necessary.
13959 FIXME: No idea if this is exactly the same as the ARM assembler,
13960 particularly when an insn takes one register and one non-register
13962 if (inst
.vectype
.elems
== 1 && els
> 1)
13965 inst
.vectype
.elems
= els
;
13966 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
13967 for (j
= 0; j
< els
; j
++)
13969 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13972 else if (inst
.vectype
.elems
== 0 && els
> 0)
13975 /* No types were given after the mnemonic, so look for types specified
13976 after each operand. We allow some flexibility here; as long as the
13977 "key" operand has a type, we can infer the others. */
13978 for (j
= 0; j
< els
; j
++)
13979 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
13980 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
13982 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
13984 for (j
= 0; j
< els
; j
++)
13985 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
13986 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
13991 first_error (_("operand types can't be inferred"));
13995 else if (inst
.vectype
.elems
!= els
)
13997 first_error (_("type specifier has the wrong number of parts"));
14001 for (pass
= 0; pass
< 2; pass
++)
14003 for (i
= 0; i
< els
; i
++)
14005 unsigned thisarg
= types
[i
];
14006 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
14007 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
14008 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
14009 unsigned g_size
= inst
.vectype
.el
[i
].size
;
14011 /* Decay more-specific signed & unsigned types to sign-insensitive
14012 integer types if sign-specific variants are unavailable. */
14013 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
14014 && (types_allowed
& N_SU_ALL
) == 0)
14015 g_type
= NT_integer
;
14017 /* If only untyped args are allowed, decay any more specific types to
14018 them. Some instructions only care about signs for some element
14019 sizes, so handle that properly. */
14020 if (((types_allowed
& N_UNT
) == 0)
14021 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
14022 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
14023 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
14024 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
14025 g_type
= NT_untyped
;
14029 if ((thisarg
& N_KEY
) != 0)
14033 key_allowed
= thisarg
& ~N_KEY
;
14035 /* Check architecture constraint on FP16 extension. */
14037 && k_type
== NT_float
14038 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14040 inst
.error
= _(BAD_FP16
);
14047 if ((thisarg
& N_VFP
) != 0)
14049 enum neon_shape_el regshape
;
14050 unsigned regwidth
, match
;
14052 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14055 first_error (_("invalid instruction shape"));
14058 regshape
= neon_shape_tab
[ns
].el
[i
];
14059 regwidth
= neon_shape_el_size
[regshape
];
14061 /* In VFP mode, operands must match register widths. If we
14062 have a key operand, use its width, else use the width of
14063 the current operand. */
14069 /* FP16 will use a single precision register. */
14070 if (regwidth
== 32 && match
== 16)
14072 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14076 inst
.error
= _(BAD_FP16
);
14081 if (regwidth
!= match
)
14083 first_error (_("operand size must match register width"));
14088 if ((thisarg
& N_EQK
) == 0)
14090 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
14092 if ((given_type
& types_allowed
) == 0)
14094 first_error (_("bad type in Neon instruction"));
14100 enum neon_el_type mod_k_type
= k_type
;
14101 unsigned mod_k_size
= k_size
;
14102 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
14103 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
14105 first_error (_("inconsistent types in Neon instruction"));
14113 return inst
.vectype
.el
[key_el
];
14116 /* Neon-style VFP instruction forwarding. */
14118 /* Thumb VFP instructions have 0xE in the condition field. */
14121 do_vfp_cond_or_thumb (void)
14126 inst
.instruction
|= 0xe0000000;
14128 inst
.instruction
|= inst
.cond
<< 28;
14131 /* Look up and encode a simple mnemonic, for use as a helper function for the
14132 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14133 etc. It is assumed that operand parsing has already been done, and that the
14134 operands are in the form expected by the given opcode (this isn't necessarily
14135 the same as the form in which they were parsed, hence some massaging must
14136 take place before this function is called).
14137 Checks current arch version against that in the looked-up opcode. */
14140 do_vfp_nsyn_opcode (const char *opname
)
14142 const struct asm_opcode
*opcode
;
14144 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
14149 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
14150 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
14157 inst
.instruction
= opcode
->tvalue
;
14158 opcode
->tencode ();
14162 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
14163 opcode
->aencode ();
14168 do_vfp_nsyn_add_sub (enum neon_shape rs
)
14170 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
14172 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14175 do_vfp_nsyn_opcode ("fadds");
14177 do_vfp_nsyn_opcode ("fsubs");
14179 /* ARMv8.2 fp16 instruction. */
14181 do_scalar_fp16_v82_encode ();
14186 do_vfp_nsyn_opcode ("faddd");
14188 do_vfp_nsyn_opcode ("fsubd");
14192 /* Check operand types to see if this is a VFP instruction, and if so call
14196 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
14198 enum neon_shape rs
;
14199 struct neon_type_el et
;
14204 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14205 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14209 rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14210 et
= neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14211 N_F_ALL
| N_KEY
| N_VFP
);
14218 if (et
.type
!= NT_invtype
)
14229 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
14231 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
14233 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14236 do_vfp_nsyn_opcode ("fmacs");
14238 do_vfp_nsyn_opcode ("fnmacs");
14240 /* ARMv8.2 fp16 instruction. */
14242 do_scalar_fp16_v82_encode ();
14247 do_vfp_nsyn_opcode ("fmacd");
14249 do_vfp_nsyn_opcode ("fnmacd");
14254 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
14256 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
14258 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14261 do_vfp_nsyn_opcode ("ffmas");
14263 do_vfp_nsyn_opcode ("ffnmas");
14265 /* ARMv8.2 fp16 instruction. */
14267 do_scalar_fp16_v82_encode ();
14272 do_vfp_nsyn_opcode ("ffmad");
14274 do_vfp_nsyn_opcode ("ffnmad");
14279 do_vfp_nsyn_mul (enum neon_shape rs
)
14281 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14283 do_vfp_nsyn_opcode ("fmuls");
14285 /* ARMv8.2 fp16 instruction. */
14287 do_scalar_fp16_v82_encode ();
14290 do_vfp_nsyn_opcode ("fmuld");
14294 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
14296 int is_neg
= (inst
.instruction
& 0x80) != 0;
14297 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_VFP
| N_KEY
);
14299 if (rs
== NS_FF
|| rs
== NS_HH
)
14302 do_vfp_nsyn_opcode ("fnegs");
14304 do_vfp_nsyn_opcode ("fabss");
14306 /* ARMv8.2 fp16 instruction. */
14308 do_scalar_fp16_v82_encode ();
14313 do_vfp_nsyn_opcode ("fnegd");
14315 do_vfp_nsyn_opcode ("fabsd");
14319 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14320 insns belong to Neon, and are handled elsewhere. */
14323 do_vfp_nsyn_ldm_stm (int is_dbmode
)
14325 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
14329 do_vfp_nsyn_opcode ("fldmdbs");
14331 do_vfp_nsyn_opcode ("fldmias");
14336 do_vfp_nsyn_opcode ("fstmdbs");
14338 do_vfp_nsyn_opcode ("fstmias");
14343 do_vfp_nsyn_sqrt (void)
14345 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14346 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14348 if (rs
== NS_FF
|| rs
== NS_HH
)
14350 do_vfp_nsyn_opcode ("fsqrts");
14352 /* ARMv8.2 fp16 instruction. */
14354 do_scalar_fp16_v82_encode ();
14357 do_vfp_nsyn_opcode ("fsqrtd");
14361 do_vfp_nsyn_div (void)
14363 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14364 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14365 N_F_ALL
| N_KEY
| N_VFP
);
14367 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14369 do_vfp_nsyn_opcode ("fdivs");
14371 /* ARMv8.2 fp16 instruction. */
14373 do_scalar_fp16_v82_encode ();
14376 do_vfp_nsyn_opcode ("fdivd");
14380 do_vfp_nsyn_nmul (void)
14382 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14383 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14384 N_F_ALL
| N_KEY
| N_VFP
);
14386 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14388 NEON_ENCODE (SINGLE
, inst
);
14389 do_vfp_sp_dyadic ();
14391 /* ARMv8.2 fp16 instruction. */
14393 do_scalar_fp16_v82_encode ();
14397 NEON_ENCODE (DOUBLE
, inst
);
14398 do_vfp_dp_rd_rn_rm ();
14400 do_vfp_cond_or_thumb ();
14405 do_vfp_nsyn_cmp (void)
14407 enum neon_shape rs
;
14408 if (inst
.operands
[1].isreg
)
14410 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14411 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14413 if (rs
== NS_FF
|| rs
== NS_HH
)
14415 NEON_ENCODE (SINGLE
, inst
);
14416 do_vfp_sp_monadic ();
14420 NEON_ENCODE (DOUBLE
, inst
);
14421 do_vfp_dp_rd_rm ();
14426 rs
= neon_select_shape (NS_HI
, NS_FI
, NS_DI
, NS_NULL
);
14427 neon_check_type (2, rs
, N_F_ALL
| N_KEY
| N_VFP
, N_EQK
);
14429 switch (inst
.instruction
& 0x0fffffff)
14432 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
14435 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
14441 if (rs
== NS_FI
|| rs
== NS_HI
)
14443 NEON_ENCODE (SINGLE
, inst
);
14444 do_vfp_sp_compare_z ();
14448 NEON_ENCODE (DOUBLE
, inst
);
14452 do_vfp_cond_or_thumb ();
14454 /* ARMv8.2 fp16 instruction. */
14455 if (rs
== NS_HI
|| rs
== NS_HH
)
14456 do_scalar_fp16_v82_encode ();
14460 nsyn_insert_sp (void)
14462 inst
.operands
[1] = inst
.operands
[0];
14463 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
14464 inst
.operands
[0].reg
= REG_SP
;
14465 inst
.operands
[0].isreg
= 1;
14466 inst
.operands
[0].writeback
= 1;
14467 inst
.operands
[0].present
= 1;
14471 do_vfp_nsyn_push (void)
14475 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14476 _("register list must contain at least 1 and at most 16 "
14479 if (inst
.operands
[1].issingle
)
14480 do_vfp_nsyn_opcode ("fstmdbs");
14482 do_vfp_nsyn_opcode ("fstmdbd");
14486 do_vfp_nsyn_pop (void)
14490 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14491 _("register list must contain at least 1 and at most 16 "
14494 if (inst
.operands
[1].issingle
)
14495 do_vfp_nsyn_opcode ("fldmias");
14497 do_vfp_nsyn_opcode ("fldmiad");
14500 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14501 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14504 neon_dp_fixup (struct arm_it
* insn
)
14506 unsigned int i
= insn
->instruction
;
14511 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14522 insn
->instruction
= i
;
14525 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14529 neon_logbits (unsigned x
)
14531 return ffs (x
) - 4;
14534 #define LOW4(R) ((R) & 0xf)
14535 #define HI1(R) (((R) >> 4) & 1)
14537 /* Encode insns with bit pattern:
14539 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14540 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14542 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14543 different meaning for some instruction. */
14546 neon_three_same (int isquad
, int ubit
, int size
)
14548 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14549 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14550 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14551 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14552 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14553 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14554 inst
.instruction
|= (isquad
!= 0) << 6;
14555 inst
.instruction
|= (ubit
!= 0) << 24;
14557 inst
.instruction
|= neon_logbits (size
) << 20;
14559 neon_dp_fixup (&inst
);
14562 /* Encode instructions of the form:
14564 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14565 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14567 Don't write size if SIZE == -1. */
14570 neon_two_same (int qbit
, int ubit
, int size
)
14572 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14573 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14574 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14575 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14576 inst
.instruction
|= (qbit
!= 0) << 6;
14577 inst
.instruction
|= (ubit
!= 0) << 24;
14580 inst
.instruction
|= neon_logbits (size
) << 18;
14582 neon_dp_fixup (&inst
);
14585 /* Neon instruction encoders, in approximate order of appearance. */
14588 do_neon_dyadic_i_su (void)
14590 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14591 struct neon_type_el et
= neon_check_type (3, rs
,
14592 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
14593 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14597 do_neon_dyadic_i64_su (void)
14599 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14600 struct neon_type_el et
= neon_check_type (3, rs
,
14601 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14602 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14606 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
14609 unsigned size
= et
.size
>> 3;
14610 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14611 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14612 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14613 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14614 inst
.instruction
|= (isquad
!= 0) << 6;
14615 inst
.instruction
|= immbits
<< 16;
14616 inst
.instruction
|= (size
>> 3) << 7;
14617 inst
.instruction
|= (size
& 0x7) << 19;
14619 inst
.instruction
|= (uval
!= 0) << 24;
14621 neon_dp_fixup (&inst
);
14625 do_neon_shl_imm (void)
14627 if (!inst
.operands
[2].isreg
)
14629 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14630 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
14631 int imm
= inst
.operands
[2].imm
;
14633 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14634 _("immediate out of range for shift"));
14635 NEON_ENCODE (IMMED
, inst
);
14636 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
14640 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14641 struct neon_type_el et
= neon_check_type (3, rs
,
14642 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14645 /* VSHL/VQSHL 3-register variants have syntax such as:
14647 whereas other 3-register operations encoded by neon_three_same have
14650 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14652 tmp
= inst
.operands
[2].reg
;
14653 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14654 inst
.operands
[1].reg
= tmp
;
14655 NEON_ENCODE (INTEGER
, inst
);
14656 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14661 do_neon_qshl_imm (void)
14663 if (!inst
.operands
[2].isreg
)
14665 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14666 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
14667 int imm
= inst
.operands
[2].imm
;
14669 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
14670 _("immediate out of range for shift"));
14671 NEON_ENCODE (IMMED
, inst
);
14672 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
14676 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14677 struct neon_type_el et
= neon_check_type (3, rs
,
14678 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
14681 /* See note in do_neon_shl_imm. */
14682 tmp
= inst
.operands
[2].reg
;
14683 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14684 inst
.operands
[1].reg
= tmp
;
14685 NEON_ENCODE (INTEGER
, inst
);
14686 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14691 do_neon_rshl (void)
14693 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14694 struct neon_type_el et
= neon_check_type (3, rs
,
14695 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
14698 tmp
= inst
.operands
[2].reg
;
14699 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
14700 inst
.operands
[1].reg
= tmp
;
14701 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
14705 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
14707 /* Handle .I8 pseudo-instructions. */
14710 /* Unfortunately, this will make everything apart from zero out-of-range.
14711 FIXME is this the intended semantics? There doesn't seem much point in
14712 accepting .I8 if so. */
14713 immediate
|= immediate
<< 8;
14719 if (immediate
== (immediate
& 0x000000ff))
14721 *immbits
= immediate
;
14724 else if (immediate
== (immediate
& 0x0000ff00))
14726 *immbits
= immediate
>> 8;
14729 else if (immediate
== (immediate
& 0x00ff0000))
14731 *immbits
= immediate
>> 16;
14734 else if (immediate
== (immediate
& 0xff000000))
14736 *immbits
= immediate
>> 24;
14739 if ((immediate
& 0xffff) != (immediate
>> 16))
14740 goto bad_immediate
;
14741 immediate
&= 0xffff;
14744 if (immediate
== (immediate
& 0x000000ff))
14746 *immbits
= immediate
;
14749 else if (immediate
== (immediate
& 0x0000ff00))
14751 *immbits
= immediate
>> 8;
14756 first_error (_("immediate value out of range"));
14761 do_neon_logic (void)
14763 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
14765 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14766 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14767 /* U bit and size field were set as part of the bitmask. */
14768 NEON_ENCODE (INTEGER
, inst
);
14769 neon_three_same (neon_quad (rs
), 0, -1);
14773 const int three_ops_form
= (inst
.operands
[2].present
14774 && !inst
.operands
[2].isreg
);
14775 const int immoperand
= (three_ops_form
? 2 : 1);
14776 enum neon_shape rs
= (three_ops_form
14777 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
14778 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
14779 struct neon_type_el et
= neon_check_type (2, rs
,
14780 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
14781 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
14785 if (et
.type
== NT_invtype
)
14788 if (three_ops_form
)
14789 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
14790 _("first and second operands shall be the same register"));
14792 NEON_ENCODE (IMMED
, inst
);
14794 immbits
= inst
.operands
[immoperand
].imm
;
14797 /* .i64 is a pseudo-op, so the immediate must be a repeating
14799 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
14800 inst
.operands
[immoperand
].reg
: 0))
14802 /* Set immbits to an invalid constant. */
14803 immbits
= 0xdeadbeef;
14810 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14814 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14818 /* Pseudo-instruction for VBIC. */
14819 neon_invert_size (&immbits
, 0, et
.size
);
14820 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14824 /* Pseudo-instruction for VORR. */
14825 neon_invert_size (&immbits
, 0, et
.size
);
14826 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
14836 inst
.instruction
|= neon_quad (rs
) << 6;
14837 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14838 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14839 inst
.instruction
|= cmode
<< 8;
14840 neon_write_immbits (immbits
);
14842 neon_dp_fixup (&inst
);
14847 do_neon_bitfield (void)
14849 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14850 neon_check_type (3, rs
, N_IGNORE_TYPE
);
14851 neon_three_same (neon_quad (rs
), 0, -1);
14855 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
14858 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
14859 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
14861 if (et
.type
== NT_float
)
14863 NEON_ENCODE (FLOAT
, inst
);
14864 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
14868 NEON_ENCODE (INTEGER
, inst
);
14869 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
14874 do_neon_dyadic_if_su (void)
14876 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14880 do_neon_dyadic_if_su_d (void)
14882 /* This version only allow D registers, but that constraint is enforced during
14883 operand parsing so we don't need to do anything extra here. */
14884 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
14888 do_neon_dyadic_if_i_d (void)
14890 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14891 affected if we specify unsigned args. */
14892 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
14895 enum vfp_or_neon_is_neon_bits
14898 NEON_CHECK_ARCH
= 2,
14899 NEON_CHECK_ARCH8
= 4
14902 /* Call this function if an instruction which may have belonged to the VFP or
14903 Neon instruction sets, but turned out to be a Neon instruction (due to the
14904 operand types involved, etc.). We have to check and/or fix-up a couple of
14907 - Make sure the user hasn't attempted to make a Neon instruction
14909 - Alter the value in the condition code field if necessary.
14910 - Make sure that the arch supports Neon instructions.
14912 Which of these operations take place depends on bits from enum
14913 vfp_or_neon_is_neon_bits.
14915 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14916 current instruction's condition is COND_ALWAYS, the condition field is
14917 changed to inst.uncond_value. This is necessary because instructions shared
14918 between VFP and Neon may be conditional for the VFP variants only, and the
14919 unconditional Neon version must have, e.g., 0xF in the condition field. */
14922 vfp_or_neon_is_neon (unsigned check
)
14924 /* Conditions are always legal in Thumb mode (IT blocks). */
14925 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
14927 if (inst
.cond
!= COND_ALWAYS
)
14929 first_error (_(BAD_COND
));
14932 if (inst
.uncond_value
!= -1)
14933 inst
.instruction
|= inst
.uncond_value
<< 28;
14936 if ((check
& NEON_CHECK_ARCH
)
14937 && !mark_feature_used (&fpu_neon_ext_v1
))
14939 first_error (_(BAD_FPU
));
14943 if ((check
& NEON_CHECK_ARCH8
)
14944 && !mark_feature_used (&fpu_neon_ext_armv8
))
14946 first_error (_(BAD_FPU
));
14954 do_neon_addsub_if_i (void)
14956 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
14959 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14962 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14963 affected if we specify unsigned args. */
14964 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
14967 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14969 V<op> A,B (A is operand 0, B is operand 2)
14974 so handle that case specially. */
14977 neon_exchange_operands (void)
14979 if (inst
.operands
[1].present
)
14981 void *scratch
= xmalloc (sizeof (inst
.operands
[0]));
14983 /* Swap operands[1] and operands[2]. */
14984 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
14985 inst
.operands
[1] = inst
.operands
[2];
14986 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
14991 inst
.operands
[1] = inst
.operands
[2];
14992 inst
.operands
[2] = inst
.operands
[0];
14997 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
14999 if (inst
.operands
[2].isreg
)
15002 neon_exchange_operands ();
15003 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
15007 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15008 struct neon_type_el et
= neon_check_type (2, rs
,
15009 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
15011 NEON_ENCODE (IMMED
, inst
);
15012 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15013 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15014 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15015 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15016 inst
.instruction
|= neon_quad (rs
) << 6;
15017 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15018 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15020 neon_dp_fixup (&inst
);
15027 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, FALSE
);
15031 do_neon_cmp_inv (void)
15033 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, TRUE
);
15039 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
15042 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15043 scalars, which are encoded in 5 bits, M : Rm.
15044 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15045 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15048 Dot Product instructions are similar to multiply instructions except elsize
15049 should always be 32.
15051 This function translates SCALAR, which is GAS's internal encoding of indexed
15052 scalar register, to raw encoding. There is also register and index range
15053 check based on ELSIZE. */
15056 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
15058 unsigned regno
= NEON_SCALAR_REG (scalar
);
15059 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
15064 if (regno
> 7 || elno
> 3)
15066 return regno
| (elno
<< 3);
15069 if (regno
> 15 || elno
> 1)
15071 return regno
| (elno
<< 4);
15075 first_error (_("scalar out of range for multiply instruction"));
15081 /* Encode multiply / multiply-accumulate scalar instructions. */
15084 neon_mul_mac (struct neon_type_el et
, int ubit
)
15088 /* Give a more helpful error message if we have an invalid type. */
15089 if (et
.type
== NT_invtype
)
15092 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
15093 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15094 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15095 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15096 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15097 inst
.instruction
|= LOW4 (scalar
);
15098 inst
.instruction
|= HI1 (scalar
) << 5;
15099 inst
.instruction
|= (et
.type
== NT_float
) << 8;
15100 inst
.instruction
|= neon_logbits (et
.size
) << 20;
15101 inst
.instruction
|= (ubit
!= 0) << 24;
15103 neon_dp_fixup (&inst
);
15107 do_neon_mac_maybe_scalar (void)
15109 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
15112 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15115 if (inst
.operands
[2].isscalar
)
15117 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15118 struct neon_type_el et
= neon_check_type (3, rs
,
15119 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F_16_32
| N_KEY
);
15120 NEON_ENCODE (SCALAR
, inst
);
15121 neon_mul_mac (et
, neon_quad (rs
));
15125 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15126 affected if we specify unsigned args. */
15127 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15132 do_neon_fmac (void)
15134 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
15137 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15140 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15146 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15147 struct neon_type_el et
= neon_check_type (3, rs
,
15148 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15149 neon_three_same (neon_quad (rs
), 0, et
.size
);
15152 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15153 same types as the MAC equivalents. The polynomial type for this instruction
15154 is encoded the same as the integer type. */
15159 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
15162 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15165 if (inst
.operands
[2].isscalar
)
15166 do_neon_mac_maybe_scalar ();
15168 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F16
| N_F32
| N_P8
, 0);
15172 do_neon_qdmulh (void)
15174 if (inst
.operands
[2].isscalar
)
15176 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15177 struct neon_type_el et
= neon_check_type (3, rs
,
15178 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15179 NEON_ENCODE (SCALAR
, inst
);
15180 neon_mul_mac (et
, neon_quad (rs
));
15184 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15185 struct neon_type_el et
= neon_check_type (3, rs
,
15186 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15187 NEON_ENCODE (INTEGER
, inst
);
15188 /* The U bit (rounding) comes from bit mask. */
15189 neon_three_same (neon_quad (rs
), 0, et
.size
);
15194 do_neon_qrdmlah (void)
15196 /* Check we're on the correct architecture. */
15197 if (!mark_feature_used (&fpu_neon_ext_armv8
))
15199 _("instruction form not available on this architecture.");
15200 else if (!mark_feature_used (&fpu_neon_ext_v8_1
))
15202 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15203 record_feature_use (&fpu_neon_ext_v8_1
);
15206 if (inst
.operands
[2].isscalar
)
15208 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15209 struct neon_type_el et
= neon_check_type (3, rs
,
15210 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15211 NEON_ENCODE (SCALAR
, inst
);
15212 neon_mul_mac (et
, neon_quad (rs
));
15216 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15217 struct neon_type_el et
= neon_check_type (3, rs
,
15218 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15219 NEON_ENCODE (INTEGER
, inst
);
15220 /* The U bit (rounding) comes from bit mask. */
15221 neon_three_same (neon_quad (rs
), 0, et
.size
);
15226 do_neon_fcmp_absolute (void)
15228 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15229 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15230 N_F_16_32
| N_KEY
);
15231 /* Size field comes from bit mask. */
15232 neon_three_same (neon_quad (rs
), 1, et
.size
== 16 ? (int) et
.size
: -1);
15236 do_neon_fcmp_absolute_inv (void)
15238 neon_exchange_operands ();
15239 do_neon_fcmp_absolute ();
15243 do_neon_step (void)
15245 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15246 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15247 N_F_16_32
| N_KEY
);
15248 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15252 do_neon_abs_neg (void)
15254 enum neon_shape rs
;
15255 struct neon_type_el et
;
15257 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
15260 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15263 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15264 et
= neon_check_type (2, rs
, N_EQK
, N_S_32
| N_F_16_32
| N_KEY
);
15266 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15267 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15268 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15269 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15270 inst
.instruction
|= neon_quad (rs
) << 6;
15271 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15272 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15274 neon_dp_fixup (&inst
);
15280 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15281 struct neon_type_el et
= neon_check_type (2, rs
,
15282 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15283 int imm
= inst
.operands
[2].imm
;
15284 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15285 _("immediate out of range for insert"));
15286 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15292 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15293 struct neon_type_el et
= neon_check_type (2, rs
,
15294 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15295 int imm
= inst
.operands
[2].imm
;
15296 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15297 _("immediate out of range for insert"));
15298 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
15302 do_neon_qshlu_imm (void)
15304 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15305 struct neon_type_el et
= neon_check_type (2, rs
,
15306 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
15307 int imm
= inst
.operands
[2].imm
;
15308 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15309 _("immediate out of range for shift"));
15310 /* Only encodes the 'U present' variant of the instruction.
15311 In this case, signed types have OP (bit 8) set to 0.
15312 Unsigned types have OP set to 1. */
15313 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
15314 /* The rest of the bits are the same as other immediate shifts. */
15315 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15319 do_neon_qmovn (void)
15321 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15322 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15323 /* Saturating move where operands can be signed or unsigned, and the
15324 destination has the same signedness. */
15325 NEON_ENCODE (INTEGER
, inst
);
15326 if (et
.type
== NT_unsigned
)
15327 inst
.instruction
|= 0xc0;
15329 inst
.instruction
|= 0x80;
15330 neon_two_same (0, 1, et
.size
/ 2);
15334 do_neon_qmovun (void)
15336 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15337 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15338 /* Saturating move with unsigned results. Operands must be signed. */
15339 NEON_ENCODE (INTEGER
, inst
);
15340 neon_two_same (0, 1, et
.size
/ 2);
15344 do_neon_rshift_sat_narrow (void)
15346 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15347 or unsigned. If operands are unsigned, results must also be unsigned. */
15348 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15349 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15350 int imm
= inst
.operands
[2].imm
;
15351 /* This gets the bounds check, size encoding and immediate bits calculation
15355 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15356 VQMOVN.I<size> <Dd>, <Qm>. */
15359 inst
.operands
[2].present
= 0;
15360 inst
.instruction
= N_MNEM_vqmovn
;
15365 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15366 _("immediate out of range"));
15367 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
15371 do_neon_rshift_sat_narrow_u (void)
15373 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15374 or unsigned. If operands are unsigned, results must also be unsigned. */
15375 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15376 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15377 int imm
= inst
.operands
[2].imm
;
15378 /* This gets the bounds check, size encoding and immediate bits calculation
15382 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15383 VQMOVUN.I<size> <Dd>, <Qm>. */
15386 inst
.operands
[2].present
= 0;
15387 inst
.instruction
= N_MNEM_vqmovun
;
15392 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15393 _("immediate out of range"));
15394 /* FIXME: The manual is kind of unclear about what value U should have in
15395 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15397 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
15401 do_neon_movn (void)
15403 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15404 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15405 NEON_ENCODE (INTEGER
, inst
);
15406 neon_two_same (0, 1, et
.size
/ 2);
15410 do_neon_rshift_narrow (void)
15412 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15413 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15414 int imm
= inst
.operands
[2].imm
;
15415 /* This gets the bounds check, size encoding and immediate bits calculation
15419 /* If immediate is zero then we are a pseudo-instruction for
15420 VMOVN.I<size> <Dd>, <Qm> */
15423 inst
.operands
[2].present
= 0;
15424 inst
.instruction
= N_MNEM_vmovn
;
15429 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15430 _("immediate out of range for narrowing operation"));
15431 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
15435 do_neon_shll (void)
15437 /* FIXME: Type checking when lengthening. */
15438 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
15439 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
15440 unsigned imm
= inst
.operands
[2].imm
;
15442 if (imm
== et
.size
)
15444 /* Maximum shift variant. */
15445 NEON_ENCODE (INTEGER
, inst
);
15446 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15447 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15448 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15449 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15450 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15452 neon_dp_fixup (&inst
);
15456 /* A more-specific type check for non-max versions. */
15457 et
= neon_check_type (2, NS_QDI
,
15458 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15459 NEON_ENCODE (IMMED
, inst
);
15460 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
15464 /* Check the various types for the VCVT instruction, and return which version
15465 the current instruction is. */
15467 #define CVT_FLAVOUR_VAR \
15468 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15469 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15470 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15471 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15472 /* Half-precision conversions. */ \
15473 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15474 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15475 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15476 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15477 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15478 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15479 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15480 Compared with single/double precision variants, only the co-processor \
15481 field is different, so the encoding flow is reused here. */ \
15482 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15483 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15484 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15485 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15486 /* VFP instructions. */ \
15487 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15488 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15489 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15490 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15491 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15492 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15493 /* VFP instructions with bitshift. */ \
15494 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15495 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15496 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15497 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15498 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15499 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15500 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15501 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15503 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15504 neon_cvt_flavour_##C,
15506 /* The different types of conversions we can do. */
15507 enum neon_cvt_flavour
15510 neon_cvt_flavour_invalid
,
15511 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
15516 static enum neon_cvt_flavour
15517 get_neon_cvt_flavour (enum neon_shape rs
)
15519 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15520 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15521 if (et.type != NT_invtype) \
15523 inst.error = NULL; \
15524 return (neon_cvt_flavour_##C); \
15527 struct neon_type_el et
;
15528 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
15529 || rs
== NS_FF
) ? N_VFP
: 0;
15530 /* The instruction versions which take an immediate take one register
15531 argument, which is extended to the width of the full register. Thus the
15532 "source" and "destination" registers must have the same width. Hack that
15533 here by making the size equal to the key (wider, in this case) operand. */
15534 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
15538 return neon_cvt_flavour_invalid
;
15553 /* Neon-syntax VFP conversions. */
15556 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
15558 const char *opname
= 0;
15560 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
15561 || rs
== NS_FHI
|| rs
== NS_HFI
)
15563 /* Conversions with immediate bitshift. */
15564 const char *enc
[] =
15566 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15572 if (flavour
< (int) ARRAY_SIZE (enc
))
15574 opname
= enc
[flavour
];
15575 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15576 _("operands 0 and 1 must be the same register"));
15577 inst
.operands
[1] = inst
.operands
[2];
15578 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
15583 /* Conversions without bitshift. */
15584 const char *enc
[] =
15586 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15592 if (flavour
< (int) ARRAY_SIZE (enc
))
15593 opname
= enc
[flavour
];
15597 do_vfp_nsyn_opcode (opname
);
15599 /* ARMv8.2 fp16 VCVT instruction. */
15600 if (flavour
== neon_cvt_flavour_s32_f16
15601 || flavour
== neon_cvt_flavour_u32_f16
15602 || flavour
== neon_cvt_flavour_f16_u32
15603 || flavour
== neon_cvt_flavour_f16_s32
)
15604 do_scalar_fp16_v82_encode ();
15608 do_vfp_nsyn_cvtz (void)
15610 enum neon_shape rs
= neon_select_shape (NS_FH
, NS_FF
, NS_FD
, NS_NULL
);
15611 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15612 const char *enc
[] =
15614 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15620 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
15621 do_vfp_nsyn_opcode (enc
[flavour
]);
15625 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
15626 enum neon_cvt_mode mode
)
15631 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15632 D register operands. */
15633 if (flavour
== neon_cvt_flavour_s32_f64
15634 || flavour
== neon_cvt_flavour_u32_f64
)
15635 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15638 if (flavour
== neon_cvt_flavour_s32_f16
15639 || flavour
== neon_cvt_flavour_u32_f16
)
15640 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
15643 set_it_insn_type (OUTSIDE_IT_INSN
);
15647 case neon_cvt_flavour_s32_f64
:
15651 case neon_cvt_flavour_s32_f32
:
15655 case neon_cvt_flavour_s32_f16
:
15659 case neon_cvt_flavour_u32_f64
:
15663 case neon_cvt_flavour_u32_f32
:
15667 case neon_cvt_flavour_u32_f16
:
15672 first_error (_("invalid instruction shape"));
15678 case neon_cvt_mode_a
: rm
= 0; break;
15679 case neon_cvt_mode_n
: rm
= 1; break;
15680 case neon_cvt_mode_p
: rm
= 2; break;
15681 case neon_cvt_mode_m
: rm
= 3; break;
15682 default: first_error (_("invalid rounding mode")); return;
15685 NEON_ENCODE (FPV8
, inst
);
15686 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
15687 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
15688 inst
.instruction
|= sz
<< 8;
15690 /* ARMv8.2 fp16 VCVT instruction. */
15691 if (flavour
== neon_cvt_flavour_s32_f16
15692 ||flavour
== neon_cvt_flavour_u32_f16
)
15693 do_scalar_fp16_v82_encode ();
15694 inst
.instruction
|= op
<< 7;
15695 inst
.instruction
|= rm
<< 16;
15696 inst
.instruction
|= 0xf0000000;
15697 inst
.is_neon
= TRUE
;
15701 do_neon_cvt_1 (enum neon_cvt_mode mode
)
15703 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
15704 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
,
15705 NS_FH
, NS_HF
, NS_FHI
, NS_HFI
,
15707 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
15709 if (flavour
== neon_cvt_flavour_invalid
)
15712 /* PR11109: Handle round-to-zero for VCVT conversions. */
15713 if (mode
== neon_cvt_mode_z
15714 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
15715 && (flavour
== neon_cvt_flavour_s16_f16
15716 || flavour
== neon_cvt_flavour_u16_f16
15717 || flavour
== neon_cvt_flavour_s32_f32
15718 || flavour
== neon_cvt_flavour_u32_f32
15719 || flavour
== neon_cvt_flavour_s32_f64
15720 || flavour
== neon_cvt_flavour_u32_f64
)
15721 && (rs
== NS_FD
|| rs
== NS_FF
))
15723 do_vfp_nsyn_cvtz ();
15727 /* ARMv8.2 fp16 VCVT conversions. */
15728 if (mode
== neon_cvt_mode_z
15729 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
)
15730 && (flavour
== neon_cvt_flavour_s32_f16
15731 || flavour
== neon_cvt_flavour_u32_f16
)
15734 do_vfp_nsyn_cvtz ();
15735 do_scalar_fp16_v82_encode ();
15739 /* VFP rather than Neon conversions. */
15740 if (flavour
>= neon_cvt_flavour_first_fp
)
15742 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15743 do_vfp_nsyn_cvt (rs
, flavour
);
15745 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15756 unsigned enctab
[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15757 0x0000100, 0x1000100, 0x0, 0x1000000};
15759 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15762 /* Fixed-point conversion with #0 immediate is encoded as an
15763 integer conversion. */
15764 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
15766 NEON_ENCODE (IMMED
, inst
);
15767 if (flavour
!= neon_cvt_flavour_invalid
)
15768 inst
.instruction
|= enctab
[flavour
];
15769 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15770 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15771 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15772 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15773 inst
.instruction
|= neon_quad (rs
) << 6;
15774 inst
.instruction
|= 1 << 21;
15775 if (flavour
< neon_cvt_flavour_s16_f16
)
15777 inst
.instruction
|= 1 << 21;
15778 immbits
= 32 - inst
.operands
[2].imm
;
15779 inst
.instruction
|= immbits
<< 16;
15783 inst
.instruction
|= 3 << 20;
15784 immbits
= 16 - inst
.operands
[2].imm
;
15785 inst
.instruction
|= immbits
<< 16;
15786 inst
.instruction
&= ~(1 << 9);
15789 neon_dp_fixup (&inst
);
15795 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
15797 NEON_ENCODE (FLOAT
, inst
);
15798 set_it_insn_type (OUTSIDE_IT_INSN
);
15800 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
15803 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15804 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15805 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15806 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15807 inst
.instruction
|= neon_quad (rs
) << 6;
15808 inst
.instruction
|= (flavour
== neon_cvt_flavour_u16_f16
15809 || flavour
== neon_cvt_flavour_u32_f32
) << 7;
15810 inst
.instruction
|= mode
<< 8;
15811 if (flavour
== neon_cvt_flavour_u16_f16
15812 || flavour
== neon_cvt_flavour_s16_f16
)
15813 /* Mask off the original size bits and reencode them. */
15814 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff) | (1 << 18));
15817 inst
.instruction
|= 0xfc000000;
15819 inst
.instruction
|= 0xf0000000;
15825 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080,
15826 0x100, 0x180, 0x0, 0x080};
15828 NEON_ENCODE (INTEGER
, inst
);
15830 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15833 if (flavour
!= neon_cvt_flavour_invalid
)
15834 inst
.instruction
|= enctab
[flavour
];
15836 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15837 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15838 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15839 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15840 inst
.instruction
|= neon_quad (rs
) << 6;
15841 if (flavour
>= neon_cvt_flavour_s16_f16
15842 && flavour
<= neon_cvt_flavour_f16_u16
)
15843 /* Half precision. */
15844 inst
.instruction
|= 1 << 18;
15846 inst
.instruction
|= 2 << 18;
15848 neon_dp_fixup (&inst
);
15853 /* Half-precision conversions for Advanced SIMD -- neon. */
15858 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
15860 as_bad (_("operand size must match register width"));
15865 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
15867 as_bad (_("operand size must match register width"));
15872 inst
.instruction
= 0x3b60600;
15874 inst
.instruction
= 0x3b60700;
15876 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15877 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15878 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15879 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15880 neon_dp_fixup (&inst
);
15884 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15885 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
15886 do_vfp_nsyn_cvt (rs
, flavour
);
15888 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
15893 do_neon_cvtr (void)
15895 do_neon_cvt_1 (neon_cvt_mode_x
);
15901 do_neon_cvt_1 (neon_cvt_mode_z
);
15905 do_neon_cvta (void)
15907 do_neon_cvt_1 (neon_cvt_mode_a
);
15911 do_neon_cvtn (void)
15913 do_neon_cvt_1 (neon_cvt_mode_n
);
15917 do_neon_cvtp (void)
15919 do_neon_cvt_1 (neon_cvt_mode_p
);
15923 do_neon_cvtm (void)
15925 do_neon_cvt_1 (neon_cvt_mode_m
);
15929 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
15932 mark_feature_used (&fpu_vfp_ext_armv8
);
15934 encode_arm_vfp_reg (inst
.operands
[0].reg
,
15935 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
15936 encode_arm_vfp_reg (inst
.operands
[1].reg
,
15937 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
15938 inst
.instruction
|= to
? 0x10000 : 0;
15939 inst
.instruction
|= t
? 0x80 : 0;
15940 inst
.instruction
|= is_double
? 0x100 : 0;
15941 do_vfp_cond_or_thumb ();
15945 do_neon_cvttb_1 (bfd_boolean t
)
15947 enum neon_shape rs
= neon_select_shape (NS_HF
, NS_HD
, NS_FH
, NS_FF
, NS_FD
,
15948 NS_DF
, NS_DH
, NS_NULL
);
15952 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
15955 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
15957 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
15960 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
15962 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
15964 /* The VCVTB and VCVTT instructions with D-register operands
15965 don't work for SP only targets. */
15966 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15970 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
15972 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
15974 /* The VCVTB and VCVTT instructions with D-register operands
15975 don't work for SP only targets. */
15976 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
15980 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
15987 do_neon_cvtb (void)
15989 do_neon_cvttb_1 (FALSE
);
15994 do_neon_cvtt (void)
15996 do_neon_cvttb_1 (TRUE
);
16000 neon_move_immediate (void)
16002 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
16003 struct neon_type_el et
= neon_check_type (2, rs
,
16004 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
16005 unsigned immlo
, immhi
= 0, immbits
;
16006 int op
, cmode
, float_p
;
16008 constraint (et
.type
== NT_invtype
,
16009 _("operand size must be specified for immediate VMOV"));
16011 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16012 op
= (inst
.instruction
& (1 << 5)) != 0;
16014 immlo
= inst
.operands
[1].imm
;
16015 if (inst
.operands
[1].regisimm
)
16016 immhi
= inst
.operands
[1].reg
;
16018 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
16019 _("immediate has bits set outside the operand size"));
16021 float_p
= inst
.operands
[1].immisfloat
;
16023 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
16024 et
.size
, et
.type
)) == FAIL
)
16026 /* Invert relevant bits only. */
16027 neon_invert_size (&immlo
, &immhi
, et
.size
);
16028 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16029 with one or the other; those cases are caught by
16030 neon_cmode_for_move_imm. */
16032 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
16033 &op
, et
.size
, et
.type
)) == FAIL
)
16035 first_error (_("immediate out of range"));
16040 inst
.instruction
&= ~(1 << 5);
16041 inst
.instruction
|= op
<< 5;
16043 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16044 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16045 inst
.instruction
|= neon_quad (rs
) << 6;
16046 inst
.instruction
|= cmode
<< 8;
16048 neon_write_immbits (immbits
);
16054 if (inst
.operands
[1].isreg
)
16056 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16058 NEON_ENCODE (INTEGER
, inst
);
16059 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16060 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16061 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16062 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16063 inst
.instruction
|= neon_quad (rs
) << 6;
16067 NEON_ENCODE (IMMED
, inst
);
16068 neon_move_immediate ();
16071 neon_dp_fixup (&inst
);
16074 /* Encode instructions of form:
16076 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16077 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16080 neon_mixed_length (struct neon_type_el et
, unsigned size
)
16082 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16083 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16084 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16085 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16086 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16087 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16088 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
16089 inst
.instruction
|= neon_logbits (size
) << 20;
16091 neon_dp_fixup (&inst
);
16095 do_neon_dyadic_long (void)
16097 /* FIXME: Type checking for lengthening op. */
16098 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16099 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16100 neon_mixed_length (et
, et
.size
);
16104 do_neon_abal (void)
16106 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16107 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16108 neon_mixed_length (et
, et
.size
);
16112 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
16114 if (inst
.operands
[2].isscalar
)
16116 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
16117 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
16118 NEON_ENCODE (SCALAR
, inst
);
16119 neon_mul_mac (et
, et
.type
== NT_unsigned
);
16123 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16124 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
16125 NEON_ENCODE (INTEGER
, inst
);
16126 neon_mixed_length (et
, et
.size
);
16131 do_neon_mac_maybe_scalar_long (void)
16133 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
16137 do_neon_dyadic_wide (void)
16139 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
16140 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16141 neon_mixed_length (et
, et
.size
);
16145 do_neon_dyadic_narrow (void)
16147 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16148 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
16149 /* Operand sign is unimportant, and the U bit is part of the opcode,
16150 so force the operand type to integer. */
16151 et
.type
= NT_integer
;
16152 neon_mixed_length (et
, et
.size
/ 2);
16156 do_neon_mul_sat_scalar_long (void)
16158 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
16162 do_neon_vmull (void)
16164 if (inst
.operands
[2].isscalar
)
16165 do_neon_mac_maybe_scalar_long ();
16168 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16169 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
16171 if (et
.type
== NT_poly
)
16172 NEON_ENCODE (POLY
, inst
);
16174 NEON_ENCODE (INTEGER
, inst
);
16176 /* For polynomial encoding the U bit must be zero, and the size must
16177 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16178 obviously, as 0b10). */
16181 /* Check we're on the correct architecture. */
16182 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
16184 _("Instruction form not available on this architecture.");
16189 neon_mixed_length (et
, et
.size
);
16196 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
16197 struct neon_type_el et
= neon_check_type (3, rs
,
16198 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16199 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
16201 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
16202 _("shift out of range"));
16203 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16204 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16205 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16206 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16207 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16208 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16209 inst
.instruction
|= neon_quad (rs
) << 6;
16210 inst
.instruction
|= imm
<< 8;
16212 neon_dp_fixup (&inst
);
16218 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16219 struct neon_type_el et
= neon_check_type (2, rs
,
16220 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16221 unsigned op
= (inst
.instruction
>> 7) & 3;
16222 /* N (width of reversed regions) is encoded as part of the bitmask. We
16223 extract it here to check the elements to be reversed are smaller.
16224 Otherwise we'd get a reserved instruction. */
16225 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
16226 gas_assert (elsize
!= 0);
16227 constraint (et
.size
>= elsize
,
16228 _("elements must be smaller than reversal region"));
16229 neon_two_same (neon_quad (rs
), 1, et
.size
);
16235 if (inst
.operands
[1].isscalar
)
16237 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
16238 struct neon_type_el et
= neon_check_type (2, rs
,
16239 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16240 unsigned sizebits
= et
.size
>> 3;
16241 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16242 int logsize
= neon_logbits (et
.size
);
16243 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
16245 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
16248 NEON_ENCODE (SCALAR
, inst
);
16249 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16250 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16251 inst
.instruction
|= LOW4 (dm
);
16252 inst
.instruction
|= HI1 (dm
) << 5;
16253 inst
.instruction
|= neon_quad (rs
) << 6;
16254 inst
.instruction
|= x
<< 17;
16255 inst
.instruction
|= sizebits
<< 16;
16257 neon_dp_fixup (&inst
);
16261 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
16262 struct neon_type_el et
= neon_check_type (2, rs
,
16263 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16264 /* Duplicate ARM register to lanes of vector. */
16265 NEON_ENCODE (ARMREG
, inst
);
16268 case 8: inst
.instruction
|= 0x400000; break;
16269 case 16: inst
.instruction
|= 0x000020; break;
16270 case 32: inst
.instruction
|= 0x000000; break;
16273 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16274 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
16275 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
16276 inst
.instruction
|= neon_quad (rs
) << 21;
16277 /* The encoding for this instruction is identical for the ARM and Thumb
16278 variants, except for the condition field. */
16279 do_vfp_cond_or_thumb ();
16283 /* VMOV has particularly many variations. It can be one of:
16284 0. VMOV<c><q> <Qd>, <Qm>
16285 1. VMOV<c><q> <Dd>, <Dm>
16286 (Register operations, which are VORR with Rm = Rn.)
16287 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16288 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16290 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16291 (ARM register to scalar.)
16292 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16293 (Two ARM registers to vector.)
16294 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16295 (Scalar to ARM register.)
16296 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16297 (Vector to two ARM registers.)
16298 8. VMOV.F32 <Sd>, <Sm>
16299 9. VMOV.F64 <Dd>, <Dm>
16300 (VFP register moves.)
16301 10. VMOV.F32 <Sd>, #imm
16302 11. VMOV.F64 <Dd>, #imm
16303 (VFP float immediate load.)
16304 12. VMOV <Rd>, <Sm>
16305 (VFP single to ARM reg.)
16306 13. VMOV <Sd>, <Rm>
16307 (ARM reg to VFP single.)
16308 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16309 (Two ARM regs to two VFP singles.)
16310 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16311 (Two VFP singles to two ARM regs.)
16313 These cases can be disambiguated using neon_select_shape, except cases 1/9
16314 and 3/11 which depend on the operand type too.
16316 All the encoded bits are hardcoded by this function.
16318 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16319 Cases 5, 7 may be used with VFPv2 and above.
16321 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16322 can specify a type where it doesn't make sense to, and is ignored). */
16327 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
16328 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
,
16329 NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
16330 NS_HR
, NS_RH
, NS_HI
, NS_NULL
);
16331 struct neon_type_el et
;
16332 const char *ldconst
= 0;
16336 case NS_DD
: /* case 1/9. */
16337 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16338 /* It is not an error here if no type is given. */
16340 if (et
.type
== NT_float
&& et
.size
== 64)
16342 do_vfp_nsyn_opcode ("fcpyd");
16345 /* fall through. */
16347 case NS_QQ
: /* case 0/1. */
16349 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16351 /* The architecture manual I have doesn't explicitly state which
16352 value the U bit should have for register->register moves, but
16353 the equivalent VORR instruction has U = 0, so do that. */
16354 inst
.instruction
= 0x0200110;
16355 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16356 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16357 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16358 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16359 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16360 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16361 inst
.instruction
|= neon_quad (rs
) << 6;
16363 neon_dp_fixup (&inst
);
16367 case NS_DI
: /* case 3/11. */
16368 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16370 if (et
.type
== NT_float
&& et
.size
== 64)
16372 /* case 11 (fconstd). */
16373 ldconst
= "fconstd";
16374 goto encode_fconstd
;
16376 /* fall through. */
16378 case NS_QI
: /* case 2/3. */
16379 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16381 inst
.instruction
= 0x0800010;
16382 neon_move_immediate ();
16383 neon_dp_fixup (&inst
);
16386 case NS_SR
: /* case 4. */
16388 unsigned bcdebits
= 0;
16390 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
16391 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
16393 /* .<size> is optional here, defaulting to .32. */
16394 if (inst
.vectype
.elems
== 0
16395 && inst
.operands
[0].vectype
.type
== NT_invtype
16396 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16398 inst
.vectype
.el
[0].type
= NT_untyped
;
16399 inst
.vectype
.el
[0].size
= 32;
16400 inst
.vectype
.elems
= 1;
16403 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16404 logsize
= neon_logbits (et
.size
);
16406 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16408 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16409 && et
.size
!= 32, _(BAD_FPU
));
16410 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16411 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16415 case 8: bcdebits
= 0x8; break;
16416 case 16: bcdebits
= 0x1; break;
16417 case 32: bcdebits
= 0x0; break;
16421 bcdebits
|= x
<< logsize
;
16423 inst
.instruction
= 0xe000b10;
16424 do_vfp_cond_or_thumb ();
16425 inst
.instruction
|= LOW4 (dn
) << 16;
16426 inst
.instruction
|= HI1 (dn
) << 7;
16427 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16428 inst
.instruction
|= (bcdebits
& 3) << 5;
16429 inst
.instruction
|= (bcdebits
>> 2) << 21;
16433 case NS_DRR
: /* case 5 (fmdrr). */
16434 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16437 inst
.instruction
= 0xc400b10;
16438 do_vfp_cond_or_thumb ();
16439 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
16440 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
16441 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
16442 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
16445 case NS_RS
: /* case 6. */
16448 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16449 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
16450 unsigned abcdebits
= 0;
16452 /* .<dt> is optional here, defaulting to .32. */
16453 if (inst
.vectype
.elems
== 0
16454 && inst
.operands
[0].vectype
.type
== NT_invtype
16455 && inst
.operands
[1].vectype
.type
== NT_invtype
)
16457 inst
.vectype
.el
[0].type
= NT_untyped
;
16458 inst
.vectype
.el
[0].size
= 32;
16459 inst
.vectype
.elems
= 1;
16462 et
= neon_check_type (2, NS_NULL
,
16463 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
16464 logsize
= neon_logbits (et
.size
);
16466 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
16468 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
16469 && et
.size
!= 32, _(BAD_FPU
));
16470 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
16471 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
16475 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
16476 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
16477 case 32: abcdebits
= 0x00; break;
16481 abcdebits
|= x
<< logsize
;
16482 inst
.instruction
= 0xe100b10;
16483 do_vfp_cond_or_thumb ();
16484 inst
.instruction
|= LOW4 (dn
) << 16;
16485 inst
.instruction
|= HI1 (dn
) << 7;
16486 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16487 inst
.instruction
|= (abcdebits
& 3) << 5;
16488 inst
.instruction
|= (abcdebits
>> 2) << 21;
16492 case NS_RRD
: /* case 7 (fmrrd). */
16493 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
16496 inst
.instruction
= 0xc500b10;
16497 do_vfp_cond_or_thumb ();
16498 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16499 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16500 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16501 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16504 case NS_FF
: /* case 8 (fcpys). */
16505 do_vfp_nsyn_opcode ("fcpys");
16509 case NS_FI
: /* case 10 (fconsts). */
16510 ldconst
= "fconsts";
16512 if (is_quarter_float (inst
.operands
[1].imm
))
16514 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
16515 do_vfp_nsyn_opcode (ldconst
);
16517 /* ARMv8.2 fp16 vmov.f16 instruction. */
16519 do_scalar_fp16_v82_encode ();
16522 first_error (_("immediate out of range"));
16526 case NS_RF
: /* case 12 (fmrs). */
16527 do_vfp_nsyn_opcode ("fmrs");
16528 /* ARMv8.2 fp16 vmov.f16 instruction. */
16530 do_scalar_fp16_v82_encode ();
16534 case NS_FR
: /* case 13 (fmsr). */
16535 do_vfp_nsyn_opcode ("fmsr");
16536 /* ARMv8.2 fp16 vmov.f16 instruction. */
16538 do_scalar_fp16_v82_encode ();
16541 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16542 (one of which is a list), but we have parsed four. Do some fiddling to
16543 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16545 case NS_RRFF
: /* case 14 (fmrrs). */
16546 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
16547 _("VFP registers must be adjacent"));
16548 inst
.operands
[2].imm
= 2;
16549 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16550 do_vfp_nsyn_opcode ("fmrrs");
16553 case NS_FFRR
: /* case 15 (fmsrr). */
16554 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
16555 _("VFP registers must be adjacent"));
16556 inst
.operands
[1] = inst
.operands
[2];
16557 inst
.operands
[2] = inst
.operands
[3];
16558 inst
.operands
[0].imm
= 2;
16559 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
16560 do_vfp_nsyn_opcode ("fmsrr");
16564 /* neon_select_shape has determined that the instruction
16565 shape is wrong and has already set the error message. */
16574 do_neon_rshift_round_imm (void)
16576 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16577 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
16578 int imm
= inst
.operands
[2].imm
;
16580 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16583 inst
.operands
[2].present
= 0;
16588 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16589 _("immediate out of range for shift"));
16590 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
16595 do_neon_movhf (void)
16597 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_NULL
);
16598 constraint (rs
!= NS_HH
, _("invalid suffix"));
16600 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16603 do_vfp_sp_monadic ();
16606 inst
.instruction
|= 0xf0000000;
16610 do_neon_movl (void)
16612 struct neon_type_el et
= neon_check_type (2, NS_QD
,
16613 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16614 unsigned sizebits
= et
.size
>> 3;
16615 inst
.instruction
|= sizebits
<< 19;
16616 neon_two_same (0, et
.type
== NT_unsigned
, -1);
16622 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16623 struct neon_type_el et
= neon_check_type (2, rs
,
16624 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16625 NEON_ENCODE (INTEGER
, inst
);
16626 neon_two_same (neon_quad (rs
), 1, et
.size
);
16630 do_neon_zip_uzp (void)
16632 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16633 struct neon_type_el et
= neon_check_type (2, rs
,
16634 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16635 if (rs
== NS_DD
&& et
.size
== 32)
16637 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16638 inst
.instruction
= N_MNEM_vtrn
;
16642 neon_two_same (neon_quad (rs
), 1, et
.size
);
16646 do_neon_sat_abs_neg (void)
16648 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16649 struct neon_type_el et
= neon_check_type (2, rs
,
16650 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16651 neon_two_same (neon_quad (rs
), 1, et
.size
);
16655 do_neon_pair_long (void)
16657 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16658 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
16659 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16660 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
16661 neon_two_same (neon_quad (rs
), 1, et
.size
);
16665 do_neon_recip_est (void)
16667 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16668 struct neon_type_el et
= neon_check_type (2, rs
,
16669 N_EQK
| N_FLT
, N_F_16_32
| N_U32
| N_KEY
);
16670 inst
.instruction
|= (et
.type
== NT_float
) << 8;
16671 neon_two_same (neon_quad (rs
), 1, et
.size
);
16677 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16678 struct neon_type_el et
= neon_check_type (2, rs
,
16679 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
16680 neon_two_same (neon_quad (rs
), 1, et
.size
);
16686 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16687 struct neon_type_el et
= neon_check_type (2, rs
,
16688 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
16689 neon_two_same (neon_quad (rs
), 1, et
.size
);
16695 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16696 struct neon_type_el et
= neon_check_type (2, rs
,
16697 N_EQK
| N_INT
, N_8
| N_KEY
);
16698 neon_two_same (neon_quad (rs
), 1, et
.size
);
16704 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16705 neon_two_same (neon_quad (rs
), 1, -1);
16709 do_neon_tbl_tbx (void)
16711 unsigned listlenbits
;
16712 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
16714 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
16716 first_error (_("bad list length for table lookup"));
16720 listlenbits
= inst
.operands
[1].imm
- 1;
16721 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16722 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16723 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16724 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16725 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16726 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16727 inst
.instruction
|= listlenbits
<< 8;
16729 neon_dp_fixup (&inst
);
16733 do_neon_ldm_stm (void)
16735 /* P, U and L bits are part of bitmask. */
16736 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
16737 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
16739 if (inst
.operands
[1].issingle
)
16741 do_vfp_nsyn_ldm_stm (is_dbmode
);
16745 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
16746 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16748 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
16749 _("register list must contain at least 1 and at most 16 "
16752 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
16753 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
16754 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16755 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
16757 inst
.instruction
|= offsetbits
;
16759 do_vfp_cond_or_thumb ();
16763 do_neon_ldr_str (void)
16765 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
16767 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16768 And is UNPREDICTABLE in thumb mode. */
16770 && inst
.operands
[1].reg
== REG_PC
16771 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
16774 inst
.error
= _("Use of PC here is UNPREDICTABLE");
16775 else if (warn_on_deprecated
)
16776 as_tsktsk (_("Use of PC here is deprecated"));
16779 if (inst
.operands
[0].issingle
)
16782 do_vfp_nsyn_opcode ("flds");
16784 do_vfp_nsyn_opcode ("fsts");
16786 /* ARMv8.2 vldr.16/vstr.16 instruction. */
16787 if (inst
.vectype
.el
[0].size
== 16)
16788 do_scalar_fp16_v82_encode ();
16793 do_vfp_nsyn_opcode ("fldd");
16795 do_vfp_nsyn_opcode ("fstd");
16799 /* "interleave" version also handles non-interleaving register VLD1/VST1
16803 do_neon_ld_st_interleave (void)
16805 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
16806 N_8
| N_16
| N_32
| N_64
);
16807 unsigned alignbits
= 0;
16809 /* The bits in this table go:
16810 0: register stride of one (0) or two (1)
16811 1,2: register list length, minus one (1, 2, 3, 4).
16812 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16813 We use -1 for invalid entries. */
16814 const int typetable
[] =
16816 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16817 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16818 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16819 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16823 if (et
.type
== NT_invtype
)
16826 if (inst
.operands
[1].immisalign
)
16827 switch (inst
.operands
[1].imm
>> 8)
16829 case 64: alignbits
= 1; break;
16831 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
16832 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16833 goto bad_alignment
;
16837 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
16838 goto bad_alignment
;
16843 first_error (_("bad alignment"));
16847 inst
.instruction
|= alignbits
<< 4;
16848 inst
.instruction
|= neon_logbits (et
.size
) << 6;
16850 /* Bits [4:6] of the immediate in a list specifier encode register stride
16851 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16852 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16853 up the right value for "type" in a table based on this value and the given
16854 list style, then stick it back. */
16855 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
16856 | (((inst
.instruction
>> 8) & 3) << 3);
16858 typebits
= typetable
[idx
];
16860 constraint (typebits
== -1, _("bad list type for instruction"));
16861 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
16862 _("bad element type for instruction"));
16864 inst
.instruction
&= ~0xf00;
16865 inst
.instruction
|= typebits
<< 8;
16868 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16869 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16870 otherwise. The variable arguments are a list of pairs of legal (size, align)
16871 values, terminated with -1. */
16874 neon_alignment_bit (int size
, int align
, int *do_alignment
, ...)
16877 int result
= FAIL
, thissize
, thisalign
;
16879 if (!inst
.operands
[1].immisalign
)
16885 va_start (ap
, do_alignment
);
16889 thissize
= va_arg (ap
, int);
16890 if (thissize
== -1)
16892 thisalign
= va_arg (ap
, int);
16894 if (size
== thissize
&& align
== thisalign
)
16897 while (result
!= SUCCESS
);
16901 if (result
== SUCCESS
)
16904 first_error (_("unsupported alignment for instruction"));
16910 do_neon_ld_st_lane (void)
16912 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
16913 int align_good
, do_alignment
= 0;
16914 int logsize
= neon_logbits (et
.size
);
16915 int align
= inst
.operands
[1].imm
>> 8;
16916 int n
= (inst
.instruction
>> 8) & 3;
16917 int max_el
= 64 / et
.size
;
16919 if (et
.type
== NT_invtype
)
16922 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
16923 _("bad list length"));
16924 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
16925 _("scalar index out of range"));
16926 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
16928 _("stride of 2 unavailable when element size is 8"));
16932 case 0: /* VLD1 / VST1. */
16933 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 16, 16,
16935 if (align_good
== FAIL
)
16939 unsigned alignbits
= 0;
16942 case 16: alignbits
= 0x1; break;
16943 case 32: alignbits
= 0x3; break;
16946 inst
.instruction
|= alignbits
<< 4;
16950 case 1: /* VLD2 / VST2. */
16951 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 16,
16952 16, 32, 32, 64, -1);
16953 if (align_good
== FAIL
)
16956 inst
.instruction
|= 1 << 4;
16959 case 2: /* VLD3 / VST3. */
16960 constraint (inst
.operands
[1].immisalign
,
16961 _("can't use alignment with this instruction"));
16964 case 3: /* VLD4 / VST4. */
16965 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
16966 16, 64, 32, 64, 32, 128, -1);
16967 if (align_good
== FAIL
)
16971 unsigned alignbits
= 0;
16974 case 8: alignbits
= 0x1; break;
16975 case 16: alignbits
= 0x1; break;
16976 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
16979 inst
.instruction
|= alignbits
<< 4;
16986 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16987 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
16988 inst
.instruction
|= 1 << (4 + logsize
);
16990 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
16991 inst
.instruction
|= logsize
<< 10;
16994 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16997 do_neon_ld_dup (void)
16999 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
17000 int align_good
, do_alignment
= 0;
17002 if (et
.type
== NT_invtype
)
17005 switch ((inst
.instruction
>> 8) & 3)
17007 case 0: /* VLD1. */
17008 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
17009 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
17010 &do_alignment
, 16, 16, 32, 32, -1);
17011 if (align_good
== FAIL
)
17013 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
17016 case 2: inst
.instruction
|= 1 << 5; break;
17017 default: first_error (_("bad list length")); return;
17019 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17022 case 1: /* VLD2. */
17023 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
17024 &do_alignment
, 8, 16, 16, 32, 32, 64,
17026 if (align_good
== FAIL
)
17028 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
17029 _("bad list length"));
17030 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17031 inst
.instruction
|= 1 << 5;
17032 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17035 case 2: /* VLD3. */
17036 constraint (inst
.operands
[1].immisalign
,
17037 _("can't use alignment with this instruction"));
17038 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
17039 _("bad list length"));
17040 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17041 inst
.instruction
|= 1 << 5;
17042 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17045 case 3: /* VLD4. */
17047 int align
= inst
.operands
[1].imm
>> 8;
17048 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
17049 16, 64, 32, 64, 32, 128, -1);
17050 if (align_good
== FAIL
)
17052 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
17053 _("bad list length"));
17054 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17055 inst
.instruction
|= 1 << 5;
17056 if (et
.size
== 32 && align
== 128)
17057 inst
.instruction
|= 0x3 << 6;
17059 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17066 inst
.instruction
|= do_alignment
<< 4;
17069 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17070 apart from bits [11:4]. */
17073 do_neon_ldx_stx (void)
17075 if (inst
.operands
[1].isreg
)
17076 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
17078 switch (NEON_LANE (inst
.operands
[0].imm
))
17080 case NEON_INTERLEAVE_LANES
:
17081 NEON_ENCODE (INTERLV
, inst
);
17082 do_neon_ld_st_interleave ();
17085 case NEON_ALL_LANES
:
17086 NEON_ENCODE (DUP
, inst
);
17087 if (inst
.instruction
== N_INV
)
17089 first_error ("only loads support such operands");
17096 NEON_ENCODE (LANE
, inst
);
17097 do_neon_ld_st_lane ();
17100 /* L bit comes from bit mask. */
17101 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17102 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17103 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17105 if (inst
.operands
[1].postind
)
17107 int postreg
= inst
.operands
[1].imm
& 0xf;
17108 constraint (!inst
.operands
[1].immisreg
,
17109 _("post-index must be a register"));
17110 constraint (postreg
== 0xd || postreg
== 0xf,
17111 _("bad register for post-index"));
17112 inst
.instruction
|= postreg
;
17116 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
17117 constraint (inst
.reloc
.exp
.X_op
!= O_constant
17118 || inst
.reloc
.exp
.X_add_number
!= 0,
17121 if (inst
.operands
[1].writeback
)
17123 inst
.instruction
|= 0xd;
17126 inst
.instruction
|= 0xf;
17130 inst
.instruction
|= 0xf9000000;
17132 inst
.instruction
|= 0xf4000000;
17137 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
17139 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17140 D register operands. */
17141 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17142 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17145 NEON_ENCODE (FPV8
, inst
);
17147 if (rs
== NS_FFF
|| rs
== NS_HHH
)
17149 do_vfp_sp_dyadic ();
17151 /* ARMv8.2 fp16 instruction. */
17153 do_scalar_fp16_v82_encode ();
17156 do_vfp_dp_rd_rn_rm ();
17159 inst
.instruction
|= 0x100;
17161 inst
.instruction
|= 0xf0000000;
17167 set_it_insn_type (OUTSIDE_IT_INSN
);
17169 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
17170 first_error (_("invalid instruction shape"));
17176 set_it_insn_type (OUTSIDE_IT_INSN
);
17178 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
17181 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17184 neon_dyadic_misc (NT_untyped
, N_F_16_32
, 0);
17188 do_vrint_1 (enum neon_cvt_mode mode
)
17190 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
17191 struct neon_type_el et
;
17196 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17197 D register operands. */
17198 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17199 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17202 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
17204 if (et
.type
!= NT_invtype
)
17206 /* VFP encodings. */
17207 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
17208 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
17209 set_it_insn_type (OUTSIDE_IT_INSN
);
17211 NEON_ENCODE (FPV8
, inst
);
17212 if (rs
== NS_FF
|| rs
== NS_HH
)
17213 do_vfp_sp_monadic ();
17215 do_vfp_dp_rd_rm ();
17219 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
17220 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
17221 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
17222 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
17223 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
17224 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
17225 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
17229 inst
.instruction
|= (rs
== NS_DD
) << 8;
17230 do_vfp_cond_or_thumb ();
17232 /* ARMv8.2 fp16 vrint instruction. */
17234 do_scalar_fp16_v82_encode ();
17238 /* Neon encodings (or something broken...). */
17240 et
= neon_check_type (2, rs
, N_EQK
, N_F_16_32
| N_KEY
);
17242 if (et
.type
== NT_invtype
)
17245 set_it_insn_type (OUTSIDE_IT_INSN
);
17246 NEON_ENCODE (FLOAT
, inst
);
17248 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17251 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17252 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17253 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17254 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17255 inst
.instruction
|= neon_quad (rs
) << 6;
17256 /* Mask off the original size bits and reencode them. */
17257 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff)
17258 | neon_logbits (et
.size
) << 18);
17262 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
17263 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
17264 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
17265 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
17266 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
17267 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
17268 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
17273 inst
.instruction
|= 0xfc000000;
17275 inst
.instruction
|= 0xf0000000;
17282 do_vrint_1 (neon_cvt_mode_x
);
17288 do_vrint_1 (neon_cvt_mode_z
);
17294 do_vrint_1 (neon_cvt_mode_r
);
17300 do_vrint_1 (neon_cvt_mode_a
);
17306 do_vrint_1 (neon_cvt_mode_n
);
17312 do_vrint_1 (neon_cvt_mode_p
);
17318 do_vrint_1 (neon_cvt_mode_m
);
17322 neon_scalar_for_vcmla (unsigned opnd
, unsigned elsize
)
17324 unsigned regno
= NEON_SCALAR_REG (opnd
);
17325 unsigned elno
= NEON_SCALAR_INDEX (opnd
);
17327 if (elsize
== 16 && elno
< 2 && regno
< 16)
17328 return regno
| (elno
<< 4);
17329 else if (elsize
== 32 && elno
== 0)
17332 first_error (_("scalar out of range"));
17339 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
17341 constraint (inst
.reloc
.exp
.X_op
!= O_constant
, _("expression too complex"));
17342 unsigned rot
= inst
.reloc
.exp
.X_add_number
;
17343 constraint (rot
!= 0 && rot
!= 90 && rot
!= 180 && rot
!= 270,
17344 _("immediate out of range"));
17346 if (inst
.operands
[2].isscalar
)
17348 enum neon_shape rs
= neon_select_shape (NS_DDSI
, NS_QQSI
, NS_NULL
);
17349 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
17350 N_KEY
| N_F16
| N_F32
).size
;
17351 unsigned m
= neon_scalar_for_vcmla (inst
.operands
[2].reg
, size
);
17353 inst
.instruction
= 0xfe000800;
17354 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17355 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17356 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17357 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17358 inst
.instruction
|= LOW4 (m
);
17359 inst
.instruction
|= HI1 (m
) << 5;
17360 inst
.instruction
|= neon_quad (rs
) << 6;
17361 inst
.instruction
|= rot
<< 20;
17362 inst
.instruction
|= (size
== 32) << 23;
17366 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
17367 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
17368 N_KEY
| N_F16
| N_F32
).size
;
17369 neon_three_same (neon_quad (rs
), 0, -1);
17370 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
17371 inst
.instruction
|= 0xfc200800;
17372 inst
.instruction
|= rot
<< 23;
17373 inst
.instruction
|= (size
== 32) << 20;
17380 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
17382 constraint (inst
.reloc
.exp
.X_op
!= O_constant
, _("expression too complex"));
17383 unsigned rot
= inst
.reloc
.exp
.X_add_number
;
17384 constraint (rot
!= 90 && rot
!= 270, _("immediate out of range"));
17385 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
17386 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
17387 N_KEY
| N_F16
| N_F32
).size
;
17388 neon_three_same (neon_quad (rs
), 0, -1);
17389 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
17390 inst
.instruction
|= 0xfc800800;
17391 inst
.instruction
|= (rot
== 270) << 24;
17392 inst
.instruction
|= (size
== 32) << 20;
17395 /* Dot Product instructions encoding support. */
17398 do_neon_dotproduct (int unsigned_p
)
17400 enum neon_shape rs
;
17401 unsigned scalar_oprd2
= 0;
17404 if (inst
.cond
!= COND_ALWAYS
)
17405 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
17406 "is UNPREDICTABLE"));
17408 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
17411 /* Dot Product instructions are in three-same D/Q register format or the third
17412 operand can be a scalar index register. */
17413 if (inst
.operands
[2].isscalar
)
17415 scalar_oprd2
= neon_scalar_for_mul (inst
.operands
[2].reg
, 32);
17416 high8
= 0xfe000000;
17417 rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
17421 high8
= 0xfc000000;
17422 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
17426 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_U8
);
17428 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_S8
);
17430 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
17431 Product instruction, so we pass 0 as the "ubit" parameter. And the
17432 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
17433 neon_three_same (neon_quad (rs
), 0, 32);
17435 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
17436 different NEON three-same encoding. */
17437 inst
.instruction
&= 0x00ffffff;
17438 inst
.instruction
|= high8
;
17439 /* Encode 'U' bit which indicates signedness. */
17440 inst
.instruction
|= (unsigned_p
? 1 : 0) << 4;
17441 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
17442 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
17443 the instruction encoding. */
17444 if (inst
.operands
[2].isscalar
)
17446 inst
.instruction
&= 0xffffffd0;
17447 inst
.instruction
|= LOW4 (scalar_oprd2
);
17448 inst
.instruction
|= HI1 (scalar_oprd2
) << 5;
17452 /* Dot Product instructions for signed integer. */
17455 do_neon_dotproduct_s (void)
17457 return do_neon_dotproduct (0);
17460 /* Dot Product instructions for unsigned integer. */
17463 do_neon_dotproduct_u (void)
17465 return do_neon_dotproduct (1);
17468 /* Crypto v1 instructions. */
17470 do_crypto_2op_1 (unsigned elttype
, int op
)
17472 set_it_insn_type (OUTSIDE_IT_INSN
);
17474 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
17480 NEON_ENCODE (INTEGER
, inst
);
17481 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17482 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17483 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17484 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17486 inst
.instruction
|= op
<< 6;
17489 inst
.instruction
|= 0xfc000000;
17491 inst
.instruction
|= 0xf0000000;
17495 do_crypto_3op_1 (int u
, int op
)
17497 set_it_insn_type (OUTSIDE_IT_INSN
);
17499 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
17500 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
17505 NEON_ENCODE (INTEGER
, inst
);
17506 neon_three_same (1, u
, 8 << op
);
17512 do_crypto_2op_1 (N_8
, 0);
17518 do_crypto_2op_1 (N_8
, 1);
17524 do_crypto_2op_1 (N_8
, 2);
17530 do_crypto_2op_1 (N_8
, 3);
17536 do_crypto_3op_1 (0, 0);
17542 do_crypto_3op_1 (0, 1);
17548 do_crypto_3op_1 (0, 2);
17554 do_crypto_3op_1 (0, 3);
17560 do_crypto_3op_1 (1, 0);
17566 do_crypto_3op_1 (1, 1);
17570 do_sha256su1 (void)
17572 do_crypto_3op_1 (1, 2);
17578 do_crypto_2op_1 (N_32
, -1);
17584 do_crypto_2op_1 (N_32
, 0);
17588 do_sha256su0 (void)
17590 do_crypto_2op_1 (N_32
, 1);
17594 do_crc32_1 (unsigned int poly
, unsigned int sz
)
17596 unsigned int Rd
= inst
.operands
[0].reg
;
17597 unsigned int Rn
= inst
.operands
[1].reg
;
17598 unsigned int Rm
= inst
.operands
[2].reg
;
17600 set_it_insn_type (OUTSIDE_IT_INSN
);
17601 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
17602 inst
.instruction
|= LOW4 (Rn
) << 16;
17603 inst
.instruction
|= LOW4 (Rm
);
17604 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
17605 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
17607 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
17608 as_warn (UNPRED_REG ("r15"));
17650 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17652 neon_check_type (2, NS_FD
, N_S32
, N_F64
);
17653 do_vfp_sp_dp_cvt ();
17654 do_vfp_cond_or_thumb ();
17658 /* Overall per-instruction processing. */
17660 /* We need to be able to fix up arbitrary expressions in some statements.
17661 This is so that we can handle symbols that are an arbitrary distance from
17662 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17663 which returns part of an address in a form which will be valid for
17664 a data instruction. We do this by pushing the expression into a symbol
17665 in the expr_section, and creating a fix for that. */
17668 fix_new_arm (fragS
* frag
,
17682 /* Create an absolute valued symbol, so we have something to
17683 refer to in the object file. Unfortunately for us, gas's
17684 generic expression parsing will already have folded out
17685 any use of .set foo/.type foo %function that may have
17686 been used to set type information of the target location,
17687 that's being specified symbolically. We have to presume
17688 the user knows what they are doing. */
17692 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
17694 symbol
= symbol_find_or_make (name
);
17695 S_SET_SEGMENT (symbol
, absolute_section
);
17696 symbol_set_frag (symbol
, &zero_address_frag
);
17697 S_SET_VALUE (symbol
, exp
->X_add_number
);
17698 exp
->X_op
= O_symbol
;
17699 exp
->X_add_symbol
= symbol
;
17700 exp
->X_add_number
= 0;
17706 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
17707 (enum bfd_reloc_code_real
) reloc
);
17711 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
17712 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
17716 /* Mark whether the fix is to a THUMB instruction, or an ARM
17718 new_fix
->tc_fix_data
= thumb_mode
;
17721 /* Create a frg for an instruction requiring relaxation. */
17723 output_relax_insn (void)
17729 /* The size of the instruction is unknown, so tie the debug info to the
17730 start of the instruction. */
17731 dwarf2_emit_insn (0);
17733 switch (inst
.reloc
.exp
.X_op
)
17736 sym
= inst
.reloc
.exp
.X_add_symbol
;
17737 offset
= inst
.reloc
.exp
.X_add_number
;
17741 offset
= inst
.reloc
.exp
.X_add_number
;
17744 sym
= make_expr_symbol (&inst
.reloc
.exp
);
17748 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
17749 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
17750 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
17753 /* Write a 32-bit thumb instruction to buf. */
17755 put_thumb32_insn (char * buf
, unsigned long insn
)
17757 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
17758 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
17762 output_inst (const char * str
)
17768 as_bad ("%s -- `%s'", inst
.error
, str
);
17773 output_relax_insn ();
17776 if (inst
.size
== 0)
17779 to
= frag_more (inst
.size
);
17780 /* PR 9814: Record the thumb mode into the current frag so that we know
17781 what type of NOP padding to use, if necessary. We override any previous
17782 setting so that if the mode has changed then the NOPS that we use will
17783 match the encoding of the last instruction in the frag. */
17784 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
17786 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
17788 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
17789 put_thumb32_insn (to
, inst
.instruction
);
17791 else if (inst
.size
> INSN_SIZE
)
17793 gas_assert (inst
.size
== (2 * INSN_SIZE
));
17794 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
17795 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
17798 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
17800 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
17801 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
17802 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
17805 dwarf2_emit_insn (inst
.size
);
17809 output_it_inst (int cond
, int mask
, char * to
)
17811 unsigned long instruction
= 0xbf00;
17814 instruction
|= mask
;
17815 instruction
|= cond
<< 4;
17819 to
= frag_more (2);
17821 dwarf2_emit_insn (2);
17825 md_number_to_chars (to
, instruction
, 2);
17830 /* Tag values used in struct asm_opcode's tag field. */
17833 OT_unconditional
, /* Instruction cannot be conditionalized.
17834 The ARM condition field is still 0xE. */
17835 OT_unconditionalF
, /* Instruction cannot be conditionalized
17836 and carries 0xF in its ARM condition field. */
17837 OT_csuffix
, /* Instruction takes a conditional suffix. */
17838 OT_csuffixF
, /* Some forms of the instruction take a conditional
17839 suffix, others place 0xF where the condition field
17841 OT_cinfix3
, /* Instruction takes a conditional infix,
17842 beginning at character index 3. (In
17843 unified mode, it becomes a suffix.) */
17844 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
17845 tsts, cmps, cmns, and teqs. */
17846 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
17847 character index 3, even in unified mode. Used for
17848 legacy instructions where suffix and infix forms
17849 may be ambiguous. */
17850 OT_csuf_or_in3
, /* Instruction takes either a conditional
17851 suffix or an infix at character index 3. */
17852 OT_odd_infix_unc
, /* This is the unconditional variant of an
17853 instruction that takes a conditional infix
17854 at an unusual position. In unified mode,
17855 this variant will accept a suffix. */
17856 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
17857 are the conditional variants of instructions that
17858 take conditional infixes in unusual positions.
17859 The infix appears at character index
17860 (tag - OT_odd_infix_0). These are not accepted
17861 in unified mode. */
17864 /* Subroutine of md_assemble, responsible for looking up the primary
17865 opcode from the mnemonic the user wrote. STR points to the
17866 beginning of the mnemonic.
17868 This is not simply a hash table lookup, because of conditional
17869 variants. Most instructions have conditional variants, which are
17870 expressed with a _conditional affix_ to the mnemonic. If we were
17871 to encode each conditional variant as a literal string in the opcode
17872 table, it would have approximately 20,000 entries.
17874 Most mnemonics take this affix as a suffix, and in unified syntax,
17875 'most' is upgraded to 'all'. However, in the divided syntax, some
17876 instructions take the affix as an infix, notably the s-variants of
17877 the arithmetic instructions. Of those instructions, all but six
17878 have the infix appear after the third character of the mnemonic.
17880 Accordingly, the algorithm for looking up primary opcodes given
17883 1. Look up the identifier in the opcode table.
17884 If we find a match, go to step U.
17886 2. Look up the last two characters of the identifier in the
17887 conditions table. If we find a match, look up the first N-2
17888 characters of the identifier in the opcode table. If we
17889 find a match, go to step CE.
17891 3. Look up the fourth and fifth characters of the identifier in
17892 the conditions table. If we find a match, extract those
17893 characters from the identifier, and look up the remaining
17894 characters in the opcode table. If we find a match, go
17899 U. Examine the tag field of the opcode structure, in case this is
17900 one of the six instructions with its conditional infix in an
17901 unusual place. If it is, the tag tells us where to find the
17902 infix; look it up in the conditions table and set inst.cond
17903 accordingly. Otherwise, this is an unconditional instruction.
17904 Again set inst.cond accordingly. Return the opcode structure.
17906 CE. Examine the tag field to make sure this is an instruction that
17907 should receive a conditional suffix. If it is not, fail.
17908 Otherwise, set inst.cond from the suffix we already looked up,
17909 and return the opcode structure.
17911 CM. Examine the tag field to make sure this is an instruction that
17912 should receive a conditional infix after the third character.
17913 If it is not, fail. Otherwise, undo the edits to the current
17914 line of input and proceed as for case CE. */
17916 static const struct asm_opcode
*
17917 opcode_lookup (char **str
)
17921 const struct asm_opcode
*opcode
;
17922 const struct asm_cond
*cond
;
17925 /* Scan up to the end of the mnemonic, which must end in white space,
17926 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17927 for (base
= end
= *str
; *end
!= '\0'; end
++)
17928 if (*end
== ' ' || *end
== '.')
17934 /* Handle a possible width suffix and/or Neon type suffix. */
17939 /* The .w and .n suffixes are only valid if the unified syntax is in
17941 if (unified_syntax
&& end
[1] == 'w')
17943 else if (unified_syntax
&& end
[1] == 'n')
17948 inst
.vectype
.elems
= 0;
17950 *str
= end
+ offset
;
17952 if (end
[offset
] == '.')
17954 /* See if we have a Neon type suffix (possible in either unified or
17955 non-unified ARM syntax mode). */
17956 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
17959 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
17965 /* Look for unaffixed or special-case affixed mnemonic. */
17966 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17971 if (opcode
->tag
< OT_odd_infix_0
)
17973 inst
.cond
= COND_ALWAYS
;
17977 if (warn_on_deprecated
&& unified_syntax
)
17978 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17979 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
17980 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17983 inst
.cond
= cond
->value
;
17987 /* Cannot have a conditional suffix on a mnemonic of less than two
17989 if (end
- base
< 3)
17992 /* Look for suffixed mnemonic. */
17994 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
17995 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
17997 if (opcode
&& cond
)
18000 switch (opcode
->tag
)
18002 case OT_cinfix3_legacy
:
18003 /* Ignore conditional suffixes matched on infix only mnemonics. */
18007 case OT_cinfix3_deprecated
:
18008 case OT_odd_infix_unc
:
18009 if (!unified_syntax
)
18011 /* Fall through. */
18015 case OT_csuf_or_in3
:
18016 inst
.cond
= cond
->value
;
18019 case OT_unconditional
:
18020 case OT_unconditionalF
:
18022 inst
.cond
= cond
->value
;
18025 /* Delayed diagnostic. */
18026 inst
.error
= BAD_COND
;
18027 inst
.cond
= COND_ALWAYS
;
18036 /* Cannot have a usual-position infix on a mnemonic of less than
18037 six characters (five would be a suffix). */
18038 if (end
- base
< 6)
18041 /* Look for infixed mnemonic in the usual position. */
18043 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
18047 memcpy (save
, affix
, 2);
18048 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
18049 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
18051 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
18052 memcpy (affix
, save
, 2);
18055 && (opcode
->tag
== OT_cinfix3
18056 || opcode
->tag
== OT_cinfix3_deprecated
18057 || opcode
->tag
== OT_csuf_or_in3
18058 || opcode
->tag
== OT_cinfix3_legacy
))
18061 if (warn_on_deprecated
&& unified_syntax
18062 && (opcode
->tag
== OT_cinfix3
18063 || opcode
->tag
== OT_cinfix3_deprecated
))
18064 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18066 inst
.cond
= cond
->value
;
18073 /* This function generates an initial IT instruction, leaving its block
18074 virtually open for the new instructions. Eventually,
18075 the mask will be updated by now_it_add_mask () each time
18076 a new instruction needs to be included in the IT block.
18077 Finally, the block is closed with close_automatic_it_block ().
18078 The block closure can be requested either from md_assemble (),
18079 a tencode (), or due to a label hook. */
18082 new_automatic_it_block (int cond
)
18084 now_it
.state
= AUTOMATIC_IT_BLOCK
;
18085 now_it
.mask
= 0x18;
18087 now_it
.block_length
= 1;
18088 mapping_state (MAP_THUMB
);
18089 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
18090 now_it
.warn_deprecated
= FALSE
;
18091 now_it
.insn_cond
= TRUE
;
18094 /* Close an automatic IT block.
18095 See comments in new_automatic_it_block (). */
18098 close_automatic_it_block (void)
18100 now_it
.mask
= 0x10;
18101 now_it
.block_length
= 0;
18104 /* Update the mask of the current automatically-generated IT
18105 instruction. See comments in new_automatic_it_block (). */
18108 now_it_add_mask (int cond
)
18110 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
18111 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
18112 | ((bitvalue) << (nbit)))
18113 const int resulting_bit
= (cond
& 1);
18115 now_it
.mask
&= 0xf;
18116 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
18118 (5 - now_it
.block_length
));
18119 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
18121 ((5 - now_it
.block_length
) - 1) );
18122 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
18125 #undef SET_BIT_VALUE
18128 /* The IT blocks handling machinery is accessed through the these functions:
18129 it_fsm_pre_encode () from md_assemble ()
18130 set_it_insn_type () optional, from the tencode functions
18131 set_it_insn_type_last () ditto
18132 in_it_block () ditto
18133 it_fsm_post_encode () from md_assemble ()
18134 force_automatic_it_block_close () from label handling functions
18137 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
18138 initializing the IT insn type with a generic initial value depending
18139 on the inst.condition.
18140 2) During the tencode function, two things may happen:
18141 a) The tencode function overrides the IT insn type by
18142 calling either set_it_insn_type (type) or set_it_insn_type_last ().
18143 b) The tencode function queries the IT block state by
18144 calling in_it_block () (i.e. to determine narrow/not narrow mode).
18146 Both set_it_insn_type and in_it_block run the internal FSM state
18147 handling function (handle_it_state), because: a) setting the IT insn
18148 type may incur in an invalid state (exiting the function),
18149 and b) querying the state requires the FSM to be updated.
18150 Specifically we want to avoid creating an IT block for conditional
18151 branches, so it_fsm_pre_encode is actually a guess and we can't
18152 determine whether an IT block is required until the tencode () routine
18153 has decided what type of instruction this actually it.
18154 Because of this, if set_it_insn_type and in_it_block have to be used,
18155 set_it_insn_type has to be called first.
18157 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
18158 determines the insn IT type depending on the inst.cond code.
18159 When a tencode () routine encodes an instruction that can be
18160 either outside an IT block, or, in the case of being inside, has to be
18161 the last one, set_it_insn_type_last () will determine the proper
18162 IT instruction type based on the inst.cond code. Otherwise,
18163 set_it_insn_type can be called for overriding that logic or
18164 for covering other cases.
18166 Calling handle_it_state () may not transition the IT block state to
18167 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
18168 still queried. Instead, if the FSM determines that the state should
18169 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
18170 after the tencode () function: that's what it_fsm_post_encode () does.
18172 Since in_it_block () calls the state handling function to get an
18173 updated state, an error may occur (due to invalid insns combination).
18174 In that case, inst.error is set.
18175 Therefore, inst.error has to be checked after the execution of
18176 the tencode () routine.
18178 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
18179 any pending state change (if any) that didn't take place in
18180 handle_it_state () as explained above. */
18183 it_fsm_pre_encode (void)
18185 if (inst
.cond
!= COND_ALWAYS
)
18186 inst
.it_insn_type
= INSIDE_IT_INSN
;
18188 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
18190 now_it
.state_handled
= 0;
18193 /* IT state FSM handling function. */
18196 handle_it_state (void)
18198 now_it
.state_handled
= 1;
18199 now_it
.insn_cond
= FALSE
;
18201 switch (now_it
.state
)
18203 case OUTSIDE_IT_BLOCK
:
18204 switch (inst
.it_insn_type
)
18206 case OUTSIDE_IT_INSN
:
18209 case INSIDE_IT_INSN
:
18210 case INSIDE_IT_LAST_INSN
:
18211 if (thumb_mode
== 0)
18214 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
18215 as_tsktsk (_("Warning: conditional outside an IT block"\
18220 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
18221 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
18223 /* Automatically generate the IT instruction. */
18224 new_automatic_it_block (inst
.cond
);
18225 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
18226 close_automatic_it_block ();
18230 inst
.error
= BAD_OUT_IT
;
18236 case IF_INSIDE_IT_LAST_INSN
:
18237 case NEUTRAL_IT_INSN
:
18241 now_it
.state
= MANUAL_IT_BLOCK
;
18242 now_it
.block_length
= 0;
18247 case AUTOMATIC_IT_BLOCK
:
18248 /* Three things may happen now:
18249 a) We should increment current it block size;
18250 b) We should close current it block (closing insn or 4 insns);
18251 c) We should close current it block and start a new one (due
18252 to incompatible conditions or
18253 4 insns-length block reached). */
18255 switch (inst
.it_insn_type
)
18257 case OUTSIDE_IT_INSN
:
18258 /* The closure of the block shall happen immediately,
18259 so any in_it_block () call reports the block as closed. */
18260 force_automatic_it_block_close ();
18263 case INSIDE_IT_INSN
:
18264 case INSIDE_IT_LAST_INSN
:
18265 case IF_INSIDE_IT_LAST_INSN
:
18266 now_it
.block_length
++;
18268 if (now_it
.block_length
> 4
18269 || !now_it_compatible (inst
.cond
))
18271 force_automatic_it_block_close ();
18272 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
18273 new_automatic_it_block (inst
.cond
);
18277 now_it
.insn_cond
= TRUE
;
18278 now_it_add_mask (inst
.cond
);
18281 if (now_it
.state
== AUTOMATIC_IT_BLOCK
18282 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
18283 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
18284 close_automatic_it_block ();
18287 case NEUTRAL_IT_INSN
:
18288 now_it
.block_length
++;
18289 now_it
.insn_cond
= TRUE
;
18291 if (now_it
.block_length
> 4)
18292 force_automatic_it_block_close ();
18294 now_it_add_mask (now_it
.cc
& 1);
18298 close_automatic_it_block ();
18299 now_it
.state
= MANUAL_IT_BLOCK
;
18304 case MANUAL_IT_BLOCK
:
18306 /* Check conditional suffixes. */
18307 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
18310 now_it
.mask
&= 0x1f;
18311 is_last
= (now_it
.mask
== 0x10);
18312 now_it
.insn_cond
= TRUE
;
18314 switch (inst
.it_insn_type
)
18316 case OUTSIDE_IT_INSN
:
18317 inst
.error
= BAD_NOT_IT
;
18320 case INSIDE_IT_INSN
:
18321 if (cond
!= inst
.cond
)
18323 inst
.error
= BAD_IT_COND
;
18328 case INSIDE_IT_LAST_INSN
:
18329 case IF_INSIDE_IT_LAST_INSN
:
18330 if (cond
!= inst
.cond
)
18332 inst
.error
= BAD_IT_COND
;
18337 inst
.error
= BAD_BRANCH
;
18342 case NEUTRAL_IT_INSN
:
18343 /* The BKPT instruction is unconditional even in an IT block. */
18347 inst
.error
= BAD_IT_IT
;
18357 struct depr_insn_mask
18359 unsigned long pattern
;
18360 unsigned long mask
;
18361 const char* description
;
18364 /* List of 16-bit instruction patterns deprecated in an IT block in
18366 static const struct depr_insn_mask depr_it_insns
[] = {
18367 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18368 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18369 { 0xa000, 0xb800, N_("ADR") },
18370 { 0x4800, 0xf800, N_("Literal loads") },
18371 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18372 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18373 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18374 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18375 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18380 it_fsm_post_encode (void)
18384 if (!now_it
.state_handled
)
18385 handle_it_state ();
18387 if (now_it
.insn_cond
18388 && !now_it
.warn_deprecated
18389 && warn_on_deprecated
18390 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
18392 if (inst
.instruction
>= 0x10000)
18394 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18395 "deprecated in ARMv8"));
18396 now_it
.warn_deprecated
= TRUE
;
18400 const struct depr_insn_mask
*p
= depr_it_insns
;
18402 while (p
->mask
!= 0)
18404 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
18406 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
18407 "of the following class are deprecated in ARMv8: "
18408 "%s"), p
->description
);
18409 now_it
.warn_deprecated
= TRUE
;
18417 if (now_it
.block_length
> 1)
18419 as_tsktsk (_("IT blocks containing more than one conditional "
18420 "instruction are deprecated in ARMv8"));
18421 now_it
.warn_deprecated
= TRUE
;
18425 is_last
= (now_it
.mask
== 0x10);
18428 now_it
.state
= OUTSIDE_IT_BLOCK
;
18434 force_automatic_it_block_close (void)
18436 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
18438 close_automatic_it_block ();
18439 now_it
.state
= OUTSIDE_IT_BLOCK
;
18447 if (!now_it
.state_handled
)
18448 handle_it_state ();
18450 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
18453 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18454 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18455 here, hence the "known" in the function name. */
18458 known_t32_only_insn (const struct asm_opcode
*opcode
)
18460 /* Original Thumb-1 wide instruction. */
18461 if (opcode
->tencode
== do_t_blx
18462 || opcode
->tencode
== do_t_branch23
18463 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
18464 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
18467 /* Wide-only instruction added to ARMv8-M Baseline. */
18468 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m_m_only
)
18469 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
18470 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
18471 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
18477 /* Whether wide instruction variant can be used if available for a valid OPCODE
18481 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
18483 if (known_t32_only_insn (opcode
))
18486 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18487 of variant T3 of B.W is checked in do_t_branch. */
18488 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
18489 && opcode
->tencode
== do_t_branch
)
18492 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
18493 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
18494 && opcode
->tencode
== do_t_mov_cmp
18495 /* Make sure CMP instruction is not affected. */
18496 && opcode
->aencode
== do_mov
)
18499 /* Wide instruction variants of all instructions with narrow *and* wide
18500 variants become available with ARMv6t2. Other opcodes are either
18501 narrow-only or wide-only and are thus available if OPCODE is valid. */
18502 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
18505 /* OPCODE with narrow only instruction variant or wide variant not
18511 md_assemble (char *str
)
18514 const struct asm_opcode
* opcode
;
18516 /* Align the previous label if needed. */
18517 if (last_label_seen
!= NULL
)
18519 symbol_set_frag (last_label_seen
, frag_now
);
18520 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
18521 S_SET_SEGMENT (last_label_seen
, now_seg
);
18524 memset (&inst
, '\0', sizeof (inst
));
18525 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
18527 opcode
= opcode_lookup (&p
);
18530 /* It wasn't an instruction, but it might be a register alias of
18531 the form alias .req reg, or a Neon .dn/.qn directive. */
18532 if (! create_register_alias (str
, p
)
18533 && ! create_neon_reg_alias (str
, p
))
18534 as_bad (_("bad instruction `%s'"), str
);
18539 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
18540 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18542 /* The value which unconditional instructions should have in place of the
18543 condition field. */
18544 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
18548 arm_feature_set variant
;
18550 variant
= cpu_variant
;
18551 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18552 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
18553 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
18554 /* Check that this instruction is supported for this CPU. */
18555 if (!opcode
->tvariant
18556 || (thumb_mode
== 1
18557 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
18559 if (opcode
->tencode
== do_t_swi
)
18560 as_bad (_("SVC is not permitted on this architecture"));
18562 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
18565 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
18566 && opcode
->tencode
!= do_t_branch
)
18568 as_bad (_("Thumb does not support conditional execution"));
18572 /* Two things are addressed here:
18573 1) Implicit require narrow instructions on Thumb-1.
18574 This avoids relaxation accidentally introducing Thumb-2
18576 2) Reject wide instructions in non Thumb-2 cores.
18578 Only instructions with narrow and wide variants need to be handled
18579 but selecting all non wide-only instructions is easier. */
18580 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
18581 && !t32_insn_ok (variant
, opcode
))
18583 if (inst
.size_req
== 0)
18585 else if (inst
.size_req
== 4)
18587 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
18588 as_bad (_("selected processor does not support 32bit wide "
18589 "variant of instruction `%s'"), str
);
18591 as_bad (_("selected processor does not support `%s' in "
18592 "Thumb-2 mode"), str
);
18597 inst
.instruction
= opcode
->tvalue
;
18599 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
18601 /* Prepare the it_insn_type for those encodings that don't set
18603 it_fsm_pre_encode ();
18605 opcode
->tencode ();
18607 it_fsm_post_encode ();
18610 if (!(inst
.error
|| inst
.relax
))
18612 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
18613 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
18614 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
18616 as_bad (_("cannot honor width suffix -- `%s'"), str
);
18621 /* Something has gone badly wrong if we try to relax a fixed size
18623 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
18625 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
18626 *opcode
->tvariant
);
18627 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18628 set those bits when Thumb-2 32-bit instructions are seen. The impact
18629 of relaxable instructions will be considered later after we finish all
18631 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
18632 variant
= arm_arch_none
;
18634 variant
= cpu_variant
;
18635 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
18636 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
18639 check_neon_suffixes
;
18643 mapping_state (MAP_THUMB
);
18646 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
18650 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18651 is_bx
= (opcode
->aencode
== do_bx
);
18653 /* Check that this instruction is supported for this CPU. */
18654 if (!(is_bx
&& fix_v4bx
)
18655 && !(opcode
->avariant
&&
18656 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
18658 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
18663 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
18667 inst
.instruction
= opcode
->avalue
;
18668 if (opcode
->tag
== OT_unconditionalF
)
18669 inst
.instruction
|= 0xFU
<< 28;
18671 inst
.instruction
|= inst
.cond
<< 28;
18672 inst
.size
= INSN_SIZE
;
18673 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
18675 it_fsm_pre_encode ();
18676 opcode
->aencode ();
18677 it_fsm_post_encode ();
18679 /* Arm mode bx is marked as both v4T and v5 because it's still required
18680 on a hypothetical non-thumb v5 core. */
18682 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
18684 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
18685 *opcode
->avariant
);
18687 check_neon_suffixes
;
18691 mapping_state (MAP_ARM
);
18696 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18704 check_it_blocks_finished (void)
18709 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
18710 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
18711 == MANUAL_IT_BLOCK
)
18713 as_warn (_("section '%s' finished with an open IT block."),
18717 if (now_it
.state
== MANUAL_IT_BLOCK
)
18718 as_warn (_("file finished with an open IT block."));
18722 /* Various frobbings of labels and their addresses. */
18725 arm_start_line_hook (void)
18727 last_label_seen
= NULL
;
18731 arm_frob_label (symbolS
* sym
)
18733 last_label_seen
= sym
;
18735 ARM_SET_THUMB (sym
, thumb_mode
);
18737 #if defined OBJ_COFF || defined OBJ_ELF
18738 ARM_SET_INTERWORK (sym
, support_interwork
);
18741 force_automatic_it_block_close ();
18743 /* Note - do not allow local symbols (.Lxxx) to be labelled
18744 as Thumb functions. This is because these labels, whilst
18745 they exist inside Thumb code, are not the entry points for
18746 possible ARM->Thumb calls. Also, these labels can be used
18747 as part of a computed goto or switch statement. eg gcc
18748 can generate code that looks like this:
18750 ldr r2, [pc, .Laaa]
18760 The first instruction loads the address of the jump table.
18761 The second instruction converts a table index into a byte offset.
18762 The third instruction gets the jump address out of the table.
18763 The fourth instruction performs the jump.
18765 If the address stored at .Laaa is that of a symbol which has the
18766 Thumb_Func bit set, then the linker will arrange for this address
18767 to have the bottom bit set, which in turn would mean that the
18768 address computation performed by the third instruction would end
18769 up with the bottom bit set. Since the ARM is capable of unaligned
18770 word loads, the instruction would then load the incorrect address
18771 out of the jump table, and chaos would ensue. */
18772 if (label_is_thumb_function_name
18773 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
18774 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
18776 /* When the address of a Thumb function is taken the bottom
18777 bit of that address should be set. This will allow
18778 interworking between Arm and Thumb functions to work
18781 THUMB_SET_FUNC (sym
, 1);
18783 label_is_thumb_function_name
= FALSE
;
18786 dwarf2_emit_label (sym
);
18790 arm_data_in_code (void)
18792 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
18794 *input_line_pointer
= '/';
18795 input_line_pointer
+= 5;
18796 *input_line_pointer
= 0;
18804 arm_canonicalize_symbol_name (char * name
)
18808 if (thumb_mode
&& (len
= strlen (name
)) > 5
18809 && streq (name
+ len
- 5, "/data"))
18810 *(name
+ len
- 5) = 0;
18815 /* Table of all register names defined by default. The user can
18816 define additional names with .req. Note that all register names
18817 should appear in both upper and lowercase variants. Some registers
18818 also have mixed-case names. */
18820 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18821 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18822 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18823 #define REGSET(p,t) \
18824 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18825 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18826 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18827 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18828 #define REGSETH(p,t) \
18829 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18830 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18831 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18832 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18833 #define REGSET2(p,t) \
18834 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18835 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18836 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18837 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18838 #define SPLRBANK(base,bank,t) \
18839 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18840 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18841 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18842 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18843 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18844 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18846 static const struct reg_entry reg_names
[] =
18848 /* ARM integer registers. */
18849 REGSET(r
, RN
), REGSET(R
, RN
),
18851 /* ATPCS synonyms. */
18852 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
18853 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
18854 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
18856 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
18857 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
18858 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
18860 /* Well-known aliases. */
18861 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
18862 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
18864 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
18865 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
18867 /* Coprocessor numbers. */
18868 REGSET(p
, CP
), REGSET(P
, CP
),
18870 /* Coprocessor register numbers. The "cr" variants are for backward
18872 REGSET(c
, CN
), REGSET(C
, CN
),
18873 REGSET(cr
, CN
), REGSET(CR
, CN
),
18875 /* ARM banked registers. */
18876 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
18877 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
18878 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
18879 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
18880 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
18881 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
18882 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
18884 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
18885 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
18886 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
18887 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
18888 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
18889 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
18890 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
18891 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
18893 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
18894 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
18895 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
18896 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
18897 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
18898 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
18899 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
18900 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18901 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
18903 /* FPA registers. */
18904 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
18905 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
18907 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
18908 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
18910 /* VFP SP registers. */
18911 REGSET(s
,VFS
), REGSET(S
,VFS
),
18912 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
18914 /* VFP DP Registers. */
18915 REGSET(d
,VFD
), REGSET(D
,VFD
),
18916 /* Extra Neon DP registers. */
18917 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
18919 /* Neon QP registers. */
18920 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
18922 /* VFP control registers. */
18923 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
18924 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
18925 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
18926 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
18927 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
18928 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
18929 REGDEF(mvfr2
,5,VFC
), REGDEF(MVFR2
,5,VFC
),
18931 /* Maverick DSP coprocessor registers. */
18932 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
18933 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
18935 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
18936 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
18937 REGDEF(dspsc
,0,DSPSC
),
18939 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
18940 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
18941 REGDEF(DSPSC
,0,DSPSC
),
18943 /* iWMMXt data registers - p0, c0-15. */
18944 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
18946 /* iWMMXt control registers - p1, c0-3. */
18947 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
18948 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
18949 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
18950 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
18952 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18953 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
18954 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
18955 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
18956 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
18958 /* XScale accumulator registers. */
18959 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
18965 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18966 within psr_required_here. */
18967 static const struct asm_psr psrs
[] =
18969 /* Backward compatibility notation. Note that "all" is no longer
18970 truly all possible PSR bits. */
18971 {"all", PSR_c
| PSR_f
},
18975 /* Individual flags. */
18981 /* Combinations of flags. */
18982 {"fs", PSR_f
| PSR_s
},
18983 {"fx", PSR_f
| PSR_x
},
18984 {"fc", PSR_f
| PSR_c
},
18985 {"sf", PSR_s
| PSR_f
},
18986 {"sx", PSR_s
| PSR_x
},
18987 {"sc", PSR_s
| PSR_c
},
18988 {"xf", PSR_x
| PSR_f
},
18989 {"xs", PSR_x
| PSR_s
},
18990 {"xc", PSR_x
| PSR_c
},
18991 {"cf", PSR_c
| PSR_f
},
18992 {"cs", PSR_c
| PSR_s
},
18993 {"cx", PSR_c
| PSR_x
},
18994 {"fsx", PSR_f
| PSR_s
| PSR_x
},
18995 {"fsc", PSR_f
| PSR_s
| PSR_c
},
18996 {"fxs", PSR_f
| PSR_x
| PSR_s
},
18997 {"fxc", PSR_f
| PSR_x
| PSR_c
},
18998 {"fcs", PSR_f
| PSR_c
| PSR_s
},
18999 {"fcx", PSR_f
| PSR_c
| PSR_x
},
19000 {"sfx", PSR_s
| PSR_f
| PSR_x
},
19001 {"sfc", PSR_s
| PSR_f
| PSR_c
},
19002 {"sxf", PSR_s
| PSR_x
| PSR_f
},
19003 {"sxc", PSR_s
| PSR_x
| PSR_c
},
19004 {"scf", PSR_s
| PSR_c
| PSR_f
},
19005 {"scx", PSR_s
| PSR_c
| PSR_x
},
19006 {"xfs", PSR_x
| PSR_f
| PSR_s
},
19007 {"xfc", PSR_x
| PSR_f
| PSR_c
},
19008 {"xsf", PSR_x
| PSR_s
| PSR_f
},
19009 {"xsc", PSR_x
| PSR_s
| PSR_c
},
19010 {"xcf", PSR_x
| PSR_c
| PSR_f
},
19011 {"xcs", PSR_x
| PSR_c
| PSR_s
},
19012 {"cfs", PSR_c
| PSR_f
| PSR_s
},
19013 {"cfx", PSR_c
| PSR_f
| PSR_x
},
19014 {"csf", PSR_c
| PSR_s
| PSR_f
},
19015 {"csx", PSR_c
| PSR_s
| PSR_x
},
19016 {"cxf", PSR_c
| PSR_x
| PSR_f
},
19017 {"cxs", PSR_c
| PSR_x
| PSR_s
},
19018 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
19019 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
19020 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
19021 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
19022 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
19023 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
19024 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
19025 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
19026 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
19027 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
19028 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
19029 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
19030 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
19031 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
19032 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
19033 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
19034 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
19035 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
19036 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
19037 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
19038 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
19039 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
19040 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
19041 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
19044 /* Table of V7M psr names. */
19045 static const struct asm_psr v7m_psrs
[] =
19047 {"apsr", 0x0 }, {"APSR", 0x0 },
19048 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
19049 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
19050 {"psr", 0x3 }, {"PSR", 0x3 },
19051 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
19052 {"ipsr", 0x5 }, {"IPSR", 0x5 },
19053 {"epsr", 0x6 }, {"EPSR", 0x6 },
19054 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
19055 {"msp", 0x8 }, {"MSP", 0x8 },
19056 {"psp", 0x9 }, {"PSP", 0x9 },
19057 {"msplim", 0xa }, {"MSPLIM", 0xa },
19058 {"psplim", 0xb }, {"PSPLIM", 0xb },
19059 {"primask", 0x10}, {"PRIMASK", 0x10},
19060 {"basepri", 0x11}, {"BASEPRI", 0x11},
19061 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
19062 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
19063 {"control", 0x14}, {"CONTROL", 0x14},
19064 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
19065 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
19066 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
19067 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
19068 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
19069 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
19070 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
19071 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
19072 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
19075 /* Table of all shift-in-operand names. */
19076 static const struct asm_shift_name shift_names
[] =
19078 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
19079 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
19080 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
19081 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
19082 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
19083 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
19086 /* Table of all explicit relocation names. */
19088 static struct reloc_entry reloc_names
[] =
19090 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
19091 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
19092 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
19093 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
19094 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
19095 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
19096 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
19097 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
19098 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
19099 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
19100 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
19101 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
19102 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
19103 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
19104 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
19105 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
19106 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
19107 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
}
19111 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
19112 static const struct asm_cond conds
[] =
19116 {"cs", 0x2}, {"hs", 0x2},
19117 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
19131 #define UL_BARRIER(L,U,CODE,FEAT) \
19132 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
19133 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
19135 static struct asm_barrier_opt barrier_opt_names
[] =
19137 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
19138 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
19139 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
19140 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
19141 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
19142 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
19143 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
19144 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
19145 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
19146 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
19147 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
19148 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
19149 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
19150 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
19151 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
19152 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
19157 /* Table of ARM-format instructions. */
19159 /* Macros for gluing together operand strings. N.B. In all cases
19160 other than OPS0, the trailing OP_stop comes from default
19161 zero-initialization of the unspecified elements of the array. */
19162 #define OPS0() { OP_stop, }
19163 #define OPS1(a) { OP_##a, }
19164 #define OPS2(a,b) { OP_##a,OP_##b, }
19165 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
19166 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
19167 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
19168 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
19170 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
19171 This is useful when mixing operands for ARM and THUMB, i.e. using the
19172 MIX_ARM_THUMB_OPERANDS macro.
19173 In order to use these macros, prefix the number of operands with _
19175 #define OPS_1(a) { a, }
19176 #define OPS_2(a,b) { a,b, }
19177 #define OPS_3(a,b,c) { a,b,c, }
19178 #define OPS_4(a,b,c,d) { a,b,c,d, }
19179 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
19180 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
19182 /* These macros abstract out the exact format of the mnemonic table and
19183 save some repeated characters. */
19185 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
19186 #define TxCE(mnem, op, top, nops, ops, ae, te) \
19187 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
19188 THUMB_VARIANT, do_##ae, do_##te }
19190 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
19191 a T_MNEM_xyz enumerator. */
19192 #define TCE(mnem, aop, top, nops, ops, ae, te) \
19193 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
19194 #define tCE(mnem, aop, top, nops, ops, ae, te) \
19195 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19197 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
19198 infix after the third character. */
19199 #define TxC3(mnem, op, top, nops, ops, ae, te) \
19200 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
19201 THUMB_VARIANT, do_##ae, do_##te }
19202 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
19203 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
19204 THUMB_VARIANT, do_##ae, do_##te }
19205 #define TC3(mnem, aop, top, nops, ops, ae, te) \
19206 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
19207 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
19208 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
19209 #define tC3(mnem, aop, top, nops, ops, ae, te) \
19210 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19211 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
19212 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19214 /* Mnemonic that cannot be conditionalized. The ARM condition-code
19215 field is still 0xE. Many of the Thumb variants can be executed
19216 conditionally, so this is checked separately. */
19217 #define TUE(mnem, op, top, nops, ops, ae, te) \
19218 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19219 THUMB_VARIANT, do_##ae, do_##te }
19221 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19222 Used by mnemonics that have very minimal differences in the encoding for
19223 ARM and Thumb variants and can be handled in a common function. */
19224 #define TUEc(mnem, op, top, nops, ops, en) \
19225 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19226 THUMB_VARIANT, do_##en, do_##en }
19228 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19229 condition code field. */
19230 #define TUF(mnem, op, top, nops, ops, ae, te) \
19231 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19232 THUMB_VARIANT, do_##ae, do_##te }
19234 /* ARM-only variants of all the above. */
19235 #define CE(mnem, op, nops, ops, ae) \
19236 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19238 #define C3(mnem, op, nops, ops, ae) \
19239 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19241 /* Legacy mnemonics that always have conditional infix after the third
19243 #define CL(mnem, op, nops, ops, ae) \
19244 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19245 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19247 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19248 #define cCE(mnem, op, nops, ops, ae) \
19249 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19251 /* Legacy coprocessor instructions where conditional infix and conditional
19252 suffix are ambiguous. For consistency this includes all FPA instructions,
19253 not just the potentially ambiguous ones. */
19254 #define cCL(mnem, op, nops, ops, ae) \
19255 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19256 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19258 /* Coprocessor, takes either a suffix or a position-3 infix
19259 (for an FPA corner case). */
19260 #define C3E(mnem, op, nops, ops, ae) \
19261 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19262 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19264 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19265 { m1 #m2 m3, OPS##nops ops, \
19266 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19267 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19269 #define CM(m1, m2, op, nops, ops, ae) \
19270 xCM_ (m1, , m2, op, nops, ops, ae), \
19271 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19272 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19273 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19274 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19275 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19276 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19277 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19278 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19279 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19280 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19281 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19282 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19283 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19284 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19285 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19286 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19287 xCM_ (m1, le, m2, op, nops, ops, ae), \
19288 xCM_ (m1, al, m2, op, nops, ops, ae)
19290 #define UE(mnem, op, nops, ops, ae) \
19291 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19293 #define UF(mnem, op, nops, ops, ae) \
19294 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19296 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19297 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19298 use the same encoding function for each. */
19299 #define NUF(mnem, op, nops, ops, enc) \
19300 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19301 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19303 /* Neon data processing, version which indirects through neon_enc_tab for
19304 the various overloaded versions of opcodes. */
19305 #define nUF(mnem, op, nops, ops, enc) \
19306 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19307 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19309 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19311 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19312 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19313 THUMB_VARIANT, do_##enc, do_##enc }
19315 #define NCE(mnem, op, nops, ops, enc) \
19316 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19318 #define NCEF(mnem, op, nops, ops, enc) \
19319 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19321 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19322 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19323 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19324 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19326 #define nCE(mnem, op, nops, ops, enc) \
19327 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19329 #define nCEF(mnem, op, nops, ops, enc) \
19330 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19334 static const struct asm_opcode insns
[] =
19336 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19337 #define THUMB_VARIANT & arm_ext_v4t
19338 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19339 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19340 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19341 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19342 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
19343 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
19344 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
19345 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
19346 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19347 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19348 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19349 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19350 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19351 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
19352 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19353 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
19355 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19356 for setting PSR flag bits. They are obsolete in V6 and do not
19357 have Thumb equivalents. */
19358 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19359 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19360 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
19361 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
19362 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
19363 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
19364 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19365 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19366 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
19368 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
19369 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
19370 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
19371 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
19373 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
19374 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
19375 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
19377 OP_ADDRGLDR
),ldst
, t_ldst
),
19378 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
19380 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19381 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19382 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19383 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19384 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19385 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19387 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
19388 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
19391 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
19392 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
19393 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
19394 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
19396 /* Thumb-compatibility pseudo ops. */
19397 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19398 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19399 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19400 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19401 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19402 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19403 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19404 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
19405 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
19406 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
19407 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
19408 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
19410 /* These may simplify to neg. */
19411 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
19412 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
19414 #undef THUMB_VARIANT
19415 #define THUMB_VARIANT & arm_ext_os
19417 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
19418 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
19420 #undef THUMB_VARIANT
19421 #define THUMB_VARIANT & arm_ext_v6
19423 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
19425 /* V1 instructions with no Thumb analogue prior to V6T2. */
19426 #undef THUMB_VARIANT
19427 #define THUMB_VARIANT & arm_ext_v6t2
19429 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19430 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
19431 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
19433 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19434 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19435 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
19436 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
19438 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19439 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19441 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19442 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
19444 /* V1 instructions with no Thumb analogue at all. */
19445 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
19446 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
19448 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
19449 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
19450 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
19451 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
19452 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
19453 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
19454 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
19455 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
19458 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19459 #undef THUMB_VARIANT
19460 #define THUMB_VARIANT & arm_ext_v4t
19462 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
19463 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
19465 #undef THUMB_VARIANT
19466 #define THUMB_VARIANT & arm_ext_v6t2
19468 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
19469 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
19471 /* Generic coprocessor instructions. */
19472 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
19473 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19474 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19475 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19476 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19477 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19478 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19481 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19483 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
19484 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
19487 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19488 #undef THUMB_VARIANT
19489 #define THUMB_VARIANT & arm_ext_msr
19491 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
19492 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
19495 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19496 #undef THUMB_VARIANT
19497 #define THUMB_VARIANT & arm_ext_v6t2
19499 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19500 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19501 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19502 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19503 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19504 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19505 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
19506 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
19509 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19510 #undef THUMB_VARIANT
19511 #define THUMB_VARIANT & arm_ext_v4t
19513 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19514 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19515 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19516 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19517 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19518 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
19521 #define ARM_VARIANT & arm_ext_v4t_5
19523 /* ARM Architecture 4T. */
19524 /* Note: bx (and blx) are required on V5, even if the processor does
19525 not support Thumb. */
19526 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
19529 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19530 #undef THUMB_VARIANT
19531 #define THUMB_VARIANT & arm_ext_v5t
19533 /* Note: blx has 2 variants; the .value coded here is for
19534 BLX(2). Only this variant has conditional execution. */
19535 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
19536 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
19538 #undef THUMB_VARIANT
19539 #define THUMB_VARIANT & arm_ext_v6t2
19541 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
19542 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19543 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19544 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19545 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
19546 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
19547 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19548 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
19551 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19552 #undef THUMB_VARIANT
19553 #define THUMB_VARIANT & arm_ext_v5exp
19555 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19556 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19557 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19558 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19560 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19561 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
19563 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19564 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19565 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19566 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
19568 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19569 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19570 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19571 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19573 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19574 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19576 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19577 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19578 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19579 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
19582 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19583 #undef THUMB_VARIANT
19584 #define THUMB_VARIANT & arm_ext_v6t2
19586 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
19587 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
19589 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
19590 ADDRGLDRS
), ldrd
, t_ldstd
),
19592 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19593 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19596 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19598 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
19601 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19602 #undef THUMB_VARIANT
19603 #define THUMB_VARIANT & arm_ext_v6
19605 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
19606 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
19607 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19608 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19609 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
19610 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19611 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19612 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19613 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19614 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
19616 #undef THUMB_VARIANT
19617 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19619 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
19620 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19622 #undef THUMB_VARIANT
19623 #define THUMB_VARIANT & arm_ext_v6t2
19625 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19626 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
19628 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
19629 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
19631 /* ARM V6 not included in V7M. */
19632 #undef THUMB_VARIANT
19633 #define THUMB_VARIANT & arm_ext_v6_notm
19634 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19635 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19636 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
19637 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
19638 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
19639 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
19640 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
19641 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
19642 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
19643 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19644 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19645 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
19646 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
19647 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
19648 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
19649 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
19650 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
19651 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
19652 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
19654 /* ARM V6 not included in V7M (eg. integer SIMD). */
19655 #undef THUMB_VARIANT
19656 #define THUMB_VARIANT & arm_ext_v6_dsp
19657 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
19658 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
19659 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19660 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19661 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19662 /* Old name for QASX. */
19663 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19664 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19665 /* Old name for QSAX. */
19666 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19667 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19668 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19669 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19670 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19671 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19672 /* Old name for SASX. */
19673 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19674 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19675 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19676 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19677 /* Old name for SHASX. */
19678 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19679 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19680 /* Old name for SHSAX. */
19681 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19682 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19683 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19684 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19685 /* Old name for SSAX. */
19686 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19687 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19688 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19689 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19690 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19691 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19692 /* Old name for UASX. */
19693 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19694 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19695 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19696 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19697 /* Old name for UHASX. */
19698 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19699 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19700 /* Old name for UHSAX. */
19701 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19702 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19703 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19704 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19705 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19706 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19707 /* Old name for UQASX. */
19708 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19709 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19710 /* Old name for UQSAX. */
19711 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19712 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19713 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19714 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19715 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19716 /* Old name for USAX. */
19717 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19718 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19719 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19720 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19721 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19722 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19723 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19724 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19725 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
19726 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
19727 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
19728 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19729 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19730 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19731 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19732 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19733 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19734 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19735 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
19736 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19737 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19738 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19739 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19740 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19741 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19742 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19743 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19744 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19745 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19746 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
19747 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
19748 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
19749 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
19750 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
19753 #define ARM_VARIANT & arm_ext_v6k
19754 #undef THUMB_VARIANT
19755 #define THUMB_VARIANT & arm_ext_v6k
19757 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
19758 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
19759 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
19760 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
19762 #undef THUMB_VARIANT
19763 #define THUMB_VARIANT & arm_ext_v6_notm
19764 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
19766 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
19767 RRnpcb
), strexd
, t_strexd
),
19769 #undef THUMB_VARIANT
19770 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19771 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
19773 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
19775 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19777 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
19779 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
19782 #define ARM_VARIANT & arm_ext_sec
19783 #undef THUMB_VARIANT
19784 #define THUMB_VARIANT & arm_ext_sec
19786 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
19789 #define ARM_VARIANT & arm_ext_virt
19790 #undef THUMB_VARIANT
19791 #define THUMB_VARIANT & arm_ext_virt
19793 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
19794 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
19797 #define ARM_VARIANT & arm_ext_pan
19798 #undef THUMB_VARIANT
19799 #define THUMB_VARIANT & arm_ext_pan
19801 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
19804 #define ARM_VARIANT & arm_ext_v6t2
19805 #undef THUMB_VARIANT
19806 #define THUMB_VARIANT & arm_ext_v6t2
19808 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
19809 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
19810 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19811 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
19813 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
19814 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
19816 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19817 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19818 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19819 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
19821 #undef THUMB_VARIANT
19822 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19823 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19824 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
19826 /* Thumb-only instructions. */
19828 #define ARM_VARIANT NULL
19829 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
19830 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
19832 /* ARM does not really have an IT instruction, so always allow it.
19833 The opcode is copied from Thumb in order to allow warnings in
19834 -mimplicit-it=[never | arm] modes. */
19836 #define ARM_VARIANT & arm_ext_v1
19837 #undef THUMB_VARIANT
19838 #define THUMB_VARIANT & arm_ext_v6t2
19840 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
19841 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
19842 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
19843 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
19844 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
19845 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
19846 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
19847 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
19848 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
19849 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
19850 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
19851 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
19852 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
19853 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
19854 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
19855 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19856 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19857 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
19859 /* Thumb2 only instructions. */
19861 #define ARM_VARIANT NULL
19863 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19864 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
19865 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19866 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
19867 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
19868 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
19870 /* Hardware division instructions. */
19872 #define ARM_VARIANT & arm_ext_adiv
19873 #undef THUMB_VARIANT
19874 #define THUMB_VARIANT & arm_ext_div
19876 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19877 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
19879 /* ARM V6M/V7 instructions. */
19881 #define ARM_VARIANT & arm_ext_barrier
19882 #undef THUMB_VARIANT
19883 #define THUMB_VARIANT & arm_ext_barrier
19885 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
19886 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
19887 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
19889 /* ARM V7 instructions. */
19891 #define ARM_VARIANT & arm_ext_v7
19892 #undef THUMB_VARIANT
19893 #define THUMB_VARIANT & arm_ext_v7
19895 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
19896 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
19899 #define ARM_VARIANT & arm_ext_mp
19900 #undef THUMB_VARIANT
19901 #define THUMB_VARIANT & arm_ext_mp
19903 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
19905 /* AArchv8 instructions. */
19907 #define ARM_VARIANT & arm_ext_v8
19909 /* Instructions shared between armv8-a and armv8-m. */
19910 #undef THUMB_VARIANT
19911 #define THUMB_VARIANT & arm_ext_atomics
19913 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19914 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19915 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19916 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19917 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19918 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
19919 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19920 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
19921 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
19922 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19924 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19926 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
19928 #undef THUMB_VARIANT
19929 #define THUMB_VARIANT & arm_ext_v8
19931 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
19932 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
19933 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
19935 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
19937 /* ARMv8 T32 only. */
19939 #define ARM_VARIANT NULL
19940 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
19941 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
19942 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
19944 /* FP for ARMv8. */
19946 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19947 #undef THUMB_VARIANT
19948 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19950 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19951 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19952 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19953 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
19954 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19955 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
19956 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
19957 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
19958 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
19959 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
19960 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
19961 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
19962 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
19963 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
19964 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
19965 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
19966 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
19968 /* Crypto v1 extensions. */
19970 #define ARM_VARIANT & fpu_crypto_ext_armv8
19971 #undef THUMB_VARIANT
19972 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19974 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
19975 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
19976 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
19977 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
19978 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
19979 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
19980 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
19981 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
19982 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
19983 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
19984 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
19985 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
19986 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
19987 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
19990 #define ARM_VARIANT & crc_ext_armv8
19991 #undef THUMB_VARIANT
19992 #define THUMB_VARIANT & crc_ext_armv8
19993 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
19994 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
19995 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
19996 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
19997 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
19998 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
20000 /* ARMv8.2 RAS extension. */
20002 #define ARM_VARIANT & arm_ext_ras
20003 #undef THUMB_VARIANT
20004 #define THUMB_VARIANT & arm_ext_ras
20005 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
20008 #define ARM_VARIANT & arm_ext_v8_3
20009 #undef THUMB_VARIANT
20010 #define THUMB_VARIANT & arm_ext_v8_3
20011 NCE (vjcvt
, eb90bc0
, 2, (RVS
, RVD
), vjcvt
),
20012 NUF (vcmla
, 0, 4, (RNDQ
, RNDQ
, RNDQ_RNSC
, EXPi
), vcmla
),
20013 NUF (vcadd
, 0, 4, (RNDQ
, RNDQ
, RNDQ
, EXPi
), vcadd
),
20016 #define ARM_VARIANT & fpu_neon_ext_dotprod
20017 #undef THUMB_VARIANT
20018 #define THUMB_VARIANT & fpu_neon_ext_dotprod
20019 NUF (vsdot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_s
),
20020 NUF (vudot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_u
),
20023 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
20024 #undef THUMB_VARIANT
20025 #define THUMB_VARIANT NULL
20027 cCE("wfs", e200110
, 1, (RR
), rd
),
20028 cCE("rfs", e300110
, 1, (RR
), rd
),
20029 cCE("wfc", e400110
, 1, (RR
), rd
),
20030 cCE("rfc", e500110
, 1, (RR
), rd
),
20032 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20033 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20034 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20035 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20037 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20038 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20039 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20040 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20042 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
20043 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
20044 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
20045 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
20046 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
20047 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
20048 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
20049 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
20050 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
20051 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
20052 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
20053 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
20055 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
20056 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
20057 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
20058 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
20059 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
20060 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
20061 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
20062 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
20063 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
20064 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
20065 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
20066 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
20068 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
20069 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
20070 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
20071 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
20072 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
20073 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
20074 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
20075 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
20076 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
20077 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
20078 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
20079 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
20081 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
20082 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
20083 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
20084 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
20085 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
20086 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
20087 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
20088 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
20089 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
20090 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
20091 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
20092 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
20094 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
20095 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
20096 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
20097 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
20098 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
20099 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
20100 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
20101 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
20102 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
20103 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
20104 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
20105 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
20107 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
20108 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
20109 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
20110 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
20111 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
20112 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
20113 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
20114 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
20115 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
20116 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
20117 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
20118 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
20120 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
20121 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
20122 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
20123 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
20124 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
20125 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
20126 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
20127 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
20128 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
20129 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
20130 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
20131 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
20133 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
20134 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
20135 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
20136 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
20137 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
20138 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
20139 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
20140 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
20141 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
20142 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
20143 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
20144 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
20146 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
20147 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
20148 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
20149 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
20150 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
20151 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
20152 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
20153 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
20154 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
20155 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
20156 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
20157 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
20159 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
20160 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
20161 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
20162 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
20163 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
20164 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
20165 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
20166 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
20167 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
20168 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
20169 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
20170 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
20172 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
20173 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
20174 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
20175 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
20176 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
20177 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
20178 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
20179 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
20180 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
20181 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
20182 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
20183 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
20185 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
20186 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
20187 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
20188 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
20189 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
20190 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
20191 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
20192 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
20193 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
20194 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
20195 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
20196 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
20198 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
20199 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
20200 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
20201 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
20202 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
20203 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
20204 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
20205 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
20206 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
20207 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
20208 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
20209 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
20211 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
20212 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
20213 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
20214 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
20215 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
20216 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
20217 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
20218 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
20219 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
20220 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
20221 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
20222 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
20224 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
20225 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
20226 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
20227 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
20228 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
20229 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
20230 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
20231 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
20232 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
20233 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
20234 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
20235 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
20237 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
20238 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
20239 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
20240 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
20241 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
20242 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
20243 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
20244 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
20245 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
20246 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
20247 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
20248 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
20250 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20251 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20252 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20253 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20254 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20255 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20256 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20257 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20258 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20259 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20260 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20261 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20263 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20264 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20265 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20266 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20267 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20268 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20269 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20270 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20271 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20272 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20273 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20274 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20276 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20277 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20278 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20279 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20280 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20281 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20282 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20283 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20284 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20285 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20286 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20287 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20289 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20290 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20291 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20292 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20293 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20294 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20295 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20296 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20297 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20298 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20299 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20300 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20302 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20303 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20304 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20305 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20306 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20307 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20308 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20309 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20310 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20311 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20312 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20313 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20315 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20316 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20317 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20318 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20319 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20320 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20321 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20322 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20323 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20324 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20325 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20326 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20328 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20329 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20330 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20331 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20332 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20333 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20334 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20335 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20336 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20337 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20338 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20339 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20341 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20342 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20343 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20344 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20345 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20346 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20347 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20348 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20349 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20350 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20351 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20352 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20354 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20355 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20356 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20357 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20358 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20359 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20360 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20361 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20362 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20363 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20364 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20365 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20367 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20368 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20369 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20370 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20371 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20372 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20373 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20374 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20375 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20376 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20377 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20378 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20380 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20381 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20382 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20383 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20384 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20385 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20386 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20387 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20388 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20389 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20390 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20391 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20393 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20394 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20395 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20396 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20397 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20398 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20399 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20400 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20401 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20402 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20403 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20404 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20406 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20407 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20408 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20409 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20410 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20411 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20412 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20413 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20414 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20415 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20416 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20417 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
20419 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20420 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20421 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20422 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
20424 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
20425 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
20426 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
20427 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
20428 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
20429 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
20430 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
20431 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
20432 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
20433 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
20434 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
20435 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
20437 /* The implementation of the FIX instruction is broken on some
20438 assemblers, in that it accepts a precision specifier as well as a
20439 rounding specifier, despite the fact that this is meaningless.
20440 To be more compatible, we accept it as well, though of course it
20441 does not set any bits. */
20442 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
20443 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
20444 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
20445 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
20446 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
20447 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
20448 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
20449 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
20450 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
20451 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
20452 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
20453 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
20454 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
20456 /* Instructions that were new with the real FPA, call them V2. */
20458 #define ARM_VARIANT & fpu_fpa_ext_v2
20460 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20461 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20462 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20463 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20464 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20465 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
20468 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20470 /* Moves and type conversions. */
20471 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20472 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
20473 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
20474 cCE("fmstat", ef1fa10
, 0, (), noargs
),
20475 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
20476 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
20477 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20478 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20479 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20480 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20481 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20482 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20483 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
20484 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
20486 /* Memory operations. */
20487 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
20488 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
20489 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20490 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20491 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20492 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20493 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20494 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20495 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20496 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20497 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20498 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
20499 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20500 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
20501 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20502 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
20503 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20504 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
20506 /* Monadic operations. */
20507 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20508 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20509 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20511 /* Dyadic operations. */
20512 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20513 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20514 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20515 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20516 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20517 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20518 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20519 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20520 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20523 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20524 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
20525 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
20526 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
20528 /* Double precision load/store are still present on single precision
20529 implementations. */
20530 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
20531 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
20532 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20533 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20534 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20535 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20536 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20537 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
20538 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20539 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
20542 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20544 /* Moves and type conversions. */
20545 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20546 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20547 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20548 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
20549 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
20550 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
20551 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
20552 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20553 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
20554 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20555 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20556 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20557 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
20559 /* Monadic operations. */
20560 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20561 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20562 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20564 /* Dyadic operations. */
20565 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20566 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20567 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20568 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20569 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20570 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20571 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20572 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20573 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20576 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20577 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
20578 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
20579 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
20582 #define ARM_VARIANT & fpu_vfp_ext_v2
20584 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
20585 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
20586 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
20587 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
20589 /* Instructions which may belong to either the Neon or VFP instruction sets.
20590 Individual encoder functions perform additional architecture checks. */
20592 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20593 #undef THUMB_VARIANT
20594 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20596 /* These mnemonics are unique to VFP. */
20597 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
20598 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
20599 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20600 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20601 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20602 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
20603 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
20604 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
20605 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
20606 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
20608 /* Mnemonics shared by Neon and VFP. */
20609 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
20610 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
20611 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
20613 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
20614 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
20616 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
20617 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
20619 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20620 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20621 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20622 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20623 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20624 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
20625 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
20626 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
20628 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
20629 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
20630 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
20631 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
20634 /* NOTE: All VMOV encoding is special-cased! */
20635 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
20636 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
20639 #define ARM_VARIANT & arm_ext_fp16
20640 #undef THUMB_VARIANT
20641 #define THUMB_VARIANT & arm_ext_fp16
20642 /* New instructions added from v8.2, allowing the extraction and insertion of
20643 the upper 16 bits of a 32-bit vector register. */
20644 NCE (vmovx
, eb00a40
, 2, (RVS
, RVS
), neon_movhf
),
20645 NCE (vins
, eb00ac0
, 2, (RVS
, RVS
), neon_movhf
),
20647 #undef THUMB_VARIANT
20648 #define THUMB_VARIANT & fpu_neon_ext_v1
20650 #define ARM_VARIANT & fpu_neon_ext_v1
20652 /* Data processing with three registers of the same length. */
20653 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20654 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
20655 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
20656 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20657 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20658 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20659 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20660 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
20661 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
20662 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20663 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
20664 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
20665 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
20666 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
20667 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
20668 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
20669 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
20670 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
20671 /* If not immediate, fall back to neon_dyadic_i64_su.
20672 shl_imm should accept I8 I16 I32 I64,
20673 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20674 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
20675 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
20676 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
20677 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
20678 /* Logic ops, types optional & ignored. */
20679 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20680 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20681 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20682 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20683 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20684 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20685 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
20686 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
20687 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
20688 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
20689 /* Bitfield ops, untyped. */
20690 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20691 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20692 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20693 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20694 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
20695 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
20696 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
20697 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20698 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20699 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20700 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20701 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
20702 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
20703 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20704 back to neon_dyadic_if_su. */
20705 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20706 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20707 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
20708 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
20709 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20710 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20711 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
20712 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
20713 /* Comparison. Type I8 I16 I32 F32. */
20714 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
20715 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
20716 /* As above, D registers only. */
20717 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20718 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
20719 /* Int and float variants, signedness unimportant. */
20720 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20721 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
20722 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
20723 /* Add/sub take types I8 I16 I32 I64 F32. */
20724 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20725 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
20726 /* vtst takes sizes 8, 16, 32. */
20727 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
20728 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
20729 /* VMUL takes I8 I16 I32 F32 P8. */
20730 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
20731 /* VQD{R}MULH takes S16 S32. */
20732 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20733 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20734 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
20735 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
20736 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20737 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20738 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
20739 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
20740 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20741 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20742 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
20743 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
20744 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20745 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20746 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
20747 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
20748 /* ARM v8.1 extension. */
20749 nUF (vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
20750 nUF (vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
20751 nUF (vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
20752 nUF (vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
20754 /* Two address, int/float. Types S8 S16 S32 F32. */
20755 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20756 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
20758 /* Data processing with two registers and a shift amount. */
20759 /* Right shifts, and variants with rounding.
20760 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20761 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20762 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20763 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
20764 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
20765 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20766 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20767 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
20768 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
20769 /* Shift and insert. Sizes accepted 8 16 32 64. */
20770 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
20771 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
20772 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
20773 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
20774 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20775 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
20776 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
20777 /* Right shift immediate, saturating & narrowing, with rounding variants.
20778 Types accepted S16 S32 S64 U16 U32 U64. */
20779 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20780 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
20781 /* As above, unsigned. Types accepted S16 S32 S64. */
20782 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20783 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
20784 /* Right shift narrowing. Types accepted I16 I32 I64. */
20785 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20786 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
20787 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20788 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
20789 /* CVT with optional immediate for fixed-point variant. */
20790 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
20792 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
20793 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
20795 /* Data processing, three registers of different lengths. */
20796 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20797 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
20798 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20799 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20800 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
20801 /* If not scalar, fall back to neon_dyadic_long.
20802 Vector types as above, scalar types S16 S32 U16 U32. */
20803 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20804 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
20805 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20806 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20807 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
20808 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20809 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20810 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20811 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20812 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
20813 /* Saturating doubling multiplies. Types S16 S32. */
20814 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20815 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20816 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
20817 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20818 S16 S32 U16 U32. */
20819 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
20821 /* Extract. Size 8. */
20822 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
20823 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
20825 /* Two registers, miscellaneous. */
20826 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20827 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
20828 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
20829 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
20830 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
20831 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
20832 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
20833 /* Vector replicate. Sizes 8 16 32. */
20834 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
20835 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
20836 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20837 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
20838 /* VMOVN. Types I16 I32 I64. */
20839 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
20840 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20841 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
20842 /* VQMOVUN. Types S16 S32 S64. */
20843 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
20844 /* VZIP / VUZP. Sizes 8 16 32. */
20845 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20846 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20847 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
20848 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
20849 /* VQABS / VQNEG. Types S8 S16 S32. */
20850 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20851 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20852 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
20853 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
20854 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20855 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20856 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
20857 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
20858 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
20859 /* Reciprocal estimates. Types U32 F16 F32. */
20860 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20861 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
20862 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
20863 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
20864 /* VCLS. Types S8 S16 S32. */
20865 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
20866 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
20867 /* VCLZ. Types I8 I16 I32. */
20868 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
20869 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
20870 /* VCNT. Size 8. */
20871 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
20872 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
20873 /* Two address, untyped. */
20874 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
20875 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
20876 /* VTRN. Sizes 8 16 32. */
20877 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
20878 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
20880 /* Table lookup. Size 8. */
20881 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20882 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
20884 #undef THUMB_VARIANT
20885 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20887 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20889 /* Neon element/structure load/store. */
20890 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20891 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20892 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20893 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20894 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20895 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20896 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20897 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
20899 #undef THUMB_VARIANT
20900 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20902 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20903 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
20904 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20905 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20906 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20907 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20908 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20909 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20910 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
20911 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
20913 #undef THUMB_VARIANT
20914 #define THUMB_VARIANT & fpu_vfp_ext_v3
20916 #define ARM_VARIANT & fpu_vfp_ext_v3
20918 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
20919 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20920 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20921 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20922 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20923 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20924 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20925 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
20926 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
20929 #define ARM_VARIANT & fpu_vfp_ext_fma
20930 #undef THUMB_VARIANT
20931 #define THUMB_VARIANT & fpu_vfp_ext_fma
20932 /* Mnemonics shared by Neon and VFP. These are included in the
20933 VFP FMA variant; NEON and VFP FMA always includes the NEON
20934 FMA instructions. */
20935 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20936 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
20937 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20938 the v form should always be used. */
20939 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20940 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
20941 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20942 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
20943 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20944 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
20946 #undef THUMB_VARIANT
20948 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20950 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20951 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20952 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20953 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20954 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20955 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
20956 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
20957 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
20960 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20962 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
20963 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
20964 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
20965 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
20966 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
20967 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
20968 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
20969 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
20970 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
20971 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20972 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20973 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20974 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20975 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20976 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
20977 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20978 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20979 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
20980 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
20981 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
20982 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20983 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20984 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20985 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20986 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20987 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
20988 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
20989 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
20990 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
20991 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
20992 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
20993 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
20994 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
20995 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
20996 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20997 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20998 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
20999 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21000 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21001 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21002 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21003 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21004 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21005 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21006 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21007 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21008 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
21009 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21010 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21011 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21012 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21013 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21014 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21015 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21016 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21017 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21018 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21019 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21020 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21021 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21022 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21023 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21024 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21025 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21026 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21027 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21028 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21029 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21030 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
21031 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
21032 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21033 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21034 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21035 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21036 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21037 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21038 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21039 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21040 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21041 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21042 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21043 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21044 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21045 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21046 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21047 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21048 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21049 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21050 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
21051 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21052 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21053 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21054 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21055 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21056 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21057 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21058 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21059 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21060 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21061 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21062 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21063 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21064 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21065 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21066 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21067 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21068 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21069 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21070 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21071 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21072 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
21073 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21074 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21075 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21076 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21077 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21078 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21079 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21080 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21081 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21082 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21083 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21084 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21085 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21086 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21087 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21088 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21089 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21090 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21091 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21092 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21093 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
21094 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
21095 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21096 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21097 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21098 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21099 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21100 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21101 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21102 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21103 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21104 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21105 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21106 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21107 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21108 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21109 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21110 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21111 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21112 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21113 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21114 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21115 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21116 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21117 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21118 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21119 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21120 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21121 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21122 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21123 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
21126 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
21128 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
21129 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
21130 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
21131 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21132 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21133 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21134 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21135 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21136 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21137 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21138 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21139 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21140 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21141 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21142 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21143 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21144 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21145 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21146 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21147 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21148 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
21149 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21150 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21151 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21152 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21153 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21154 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21155 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21156 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21157 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21158 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21159 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21160 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21161 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21162 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21163 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21164 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21165 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21166 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21167 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21168 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21169 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21170 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21171 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21172 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21173 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21174 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21175 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21176 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21177 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21178 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21179 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21180 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21181 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21182 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21183 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21184 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21187 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
21189 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
21190 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
21191 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
21192 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
21193 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
21194 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
21195 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
21196 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
21197 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
21198 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
21199 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
21200 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
21201 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
21202 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
21203 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
21204 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
21205 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
21206 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
21207 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
21208 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
21209 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
21210 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
21211 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
21212 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
21213 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
21214 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
21215 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
21216 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
21217 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
21218 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
21219 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
21220 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
21221 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
21222 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
21223 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
21224 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
21225 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
21226 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
21227 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
21228 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
21229 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
21230 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
21231 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
21232 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
21233 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
21234 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
21235 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
21236 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
21237 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
21238 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
21239 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
21240 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
21241 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
21242 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
21243 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
21244 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
21245 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
21246 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
21247 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
21248 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
21249 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
21250 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
21251 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
21252 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
21253 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21254 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
21255 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21256 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
21257 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21258 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
21259 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21260 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
21261 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
21262 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
21263 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
21264 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
21266 /* ARMv8-M instructions. */
21268 #define ARM_VARIANT NULL
21269 #undef THUMB_VARIANT
21270 #define THUMB_VARIANT & arm_ext_v8m
21271 TUE("sg", 0, e97fe97f
, 0, (), 0, noargs
),
21272 TUE("blxns", 0, 4784, 1, (RRnpc
), 0, t_blx
),
21273 TUE("bxns", 0, 4704, 1, (RRnpc
), 0, t_bx
),
21274 TUE("tt", 0, e840f000
, 2, (RRnpc
, RRnpc
), 0, tt
),
21275 TUE("ttt", 0, e840f040
, 2, (RRnpc
, RRnpc
), 0, tt
),
21276 TUE("tta", 0, e840f080
, 2, (RRnpc
, RRnpc
), 0, tt
),
21277 TUE("ttat", 0, e840f0c0
, 2, (RRnpc
, RRnpc
), 0, tt
),
21279 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21280 instructions behave as nop if no VFP is present. */
21281 #undef THUMB_VARIANT
21282 #define THUMB_VARIANT & arm_ext_v8m_main
21283 TUEc("vlldm", 0, ec300a00
, 1, (RRnpc
), rn
),
21284 TUEc("vlstm", 0, ec200a00
, 1, (RRnpc
), rn
),
21287 #undef THUMB_VARIANT
21313 /* MD interface: bits in the object file. */
21315 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21316 for use in the a.out file, and stores them in the array pointed to by buf.
21317 This knows about the endian-ness of the target machine and does
21318 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21319 2 (short) and 4 (long) Floating numbers are put out as a series of
21320 LITTLENUMS (shorts, here at least). */
21323 md_number_to_chars (char * buf
, valueT val
, int n
)
21325 if (target_big_endian
)
21326 number_to_chars_bigendian (buf
, val
, n
);
21328 number_to_chars_littleendian (buf
, val
, n
);
21332 md_chars_to_number (char * buf
, int n
)
21335 unsigned char * where
= (unsigned char *) buf
;
21337 if (target_big_endian
)
21342 result
|= (*where
++ & 255);
21350 result
|= (where
[n
] & 255);
21357 /* MD interface: Sections. */
21359 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21360 that an rs_machine_dependent frag may reach. */
21363 arm_frag_max_var (fragS
*fragp
)
21365 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21366 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21368 Note that we generate relaxable instructions even for cases that don't
21369 really need it, like an immediate that's a trivial constant. So we're
21370 overestimating the instruction size for some of those cases. Rather
21371 than putting more intelligence here, it would probably be better to
21372 avoid generating a relaxation frag in the first place when it can be
21373 determined up front that a short instruction will suffice. */
21375 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
21379 /* Estimate the size of a frag before relaxing. Assume everything fits in
21383 md_estimate_size_before_relax (fragS
* fragp
,
21384 segT segtype ATTRIBUTE_UNUSED
)
21390 /* Convert a machine dependent frag. */
21393 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
21395 unsigned long insn
;
21396 unsigned long old_op
;
21404 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
21406 old_op
= bfd_get_16(abfd
, buf
);
21407 if (fragp
->fr_symbol
)
21409 exp
.X_op
= O_symbol
;
21410 exp
.X_add_symbol
= fragp
->fr_symbol
;
21414 exp
.X_op
= O_constant
;
21416 exp
.X_add_number
= fragp
->fr_offset
;
21417 opcode
= fragp
->fr_subtype
;
21420 case T_MNEM_ldr_pc
:
21421 case T_MNEM_ldr_pc2
:
21422 case T_MNEM_ldr_sp
:
21423 case T_MNEM_str_sp
:
21430 if (fragp
->fr_var
== 4)
21432 insn
= THUMB_OP32 (opcode
);
21433 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
21435 insn
|= (old_op
& 0x700) << 4;
21439 insn
|= (old_op
& 7) << 12;
21440 insn
|= (old_op
& 0x38) << 13;
21442 insn
|= 0x00000c00;
21443 put_thumb32_insn (buf
, insn
);
21444 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
21448 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
21450 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
21453 if (fragp
->fr_var
== 4)
21455 insn
= THUMB_OP32 (opcode
);
21456 insn
|= (old_op
& 0xf0) << 4;
21457 put_thumb32_insn (buf
, insn
);
21458 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
21462 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21463 exp
.X_add_number
-= 4;
21471 if (fragp
->fr_var
== 4)
21473 int r0off
= (opcode
== T_MNEM_mov
21474 || opcode
== T_MNEM_movs
) ? 0 : 8;
21475 insn
= THUMB_OP32 (opcode
);
21476 insn
= (insn
& 0xe1ffffff) | 0x10000000;
21477 insn
|= (old_op
& 0x700) << r0off
;
21478 put_thumb32_insn (buf
, insn
);
21479 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
21483 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
21488 if (fragp
->fr_var
== 4)
21490 insn
= THUMB_OP32(opcode
);
21491 put_thumb32_insn (buf
, insn
);
21492 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
21495 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
21499 if (fragp
->fr_var
== 4)
21501 insn
= THUMB_OP32(opcode
);
21502 insn
|= (old_op
& 0xf00) << 14;
21503 put_thumb32_insn (buf
, insn
);
21504 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
21507 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
21510 case T_MNEM_add_sp
:
21511 case T_MNEM_add_pc
:
21512 case T_MNEM_inc_sp
:
21513 case T_MNEM_dec_sp
:
21514 if (fragp
->fr_var
== 4)
21516 /* ??? Choose between add and addw. */
21517 insn
= THUMB_OP32 (opcode
);
21518 insn
|= (old_op
& 0xf0) << 4;
21519 put_thumb32_insn (buf
, insn
);
21520 if (opcode
== T_MNEM_add_pc
)
21521 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
21523 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
21526 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21534 if (fragp
->fr_var
== 4)
21536 insn
= THUMB_OP32 (opcode
);
21537 insn
|= (old_op
& 0xf0) << 4;
21538 insn
|= (old_op
& 0xf) << 16;
21539 put_thumb32_insn (buf
, insn
);
21540 if (insn
& (1 << 20))
21541 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
21543 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
21546 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
21552 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
21553 (enum bfd_reloc_code_real
) reloc_type
);
21554 fixp
->fx_file
= fragp
->fr_file
;
21555 fixp
->fx_line
= fragp
->fr_line
;
21556 fragp
->fr_fix
+= fragp
->fr_var
;
21558 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21559 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
21560 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
21561 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
21564 /* Return the size of a relaxable immediate operand instruction.
21565 SHIFT and SIZE specify the form of the allowable immediate. */
21567 relax_immediate (fragS
*fragp
, int size
, int shift
)
21573 /* ??? Should be able to do better than this. */
21574 if (fragp
->fr_symbol
)
21577 low
= (1 << shift
) - 1;
21578 mask
= (1 << (shift
+ size
)) - (1 << shift
);
21579 offset
= fragp
->fr_offset
;
21580 /* Force misaligned offsets to 32-bit variant. */
21583 if (offset
& ~mask
)
21588 /* Get the address of a symbol during relaxation. */
21590 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
21596 sym
= fragp
->fr_symbol
;
21597 sym_frag
= symbol_get_frag (sym
);
21598 know (S_GET_SEGMENT (sym
) != absolute_section
21599 || sym_frag
== &zero_address_frag
);
21600 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
21602 /* If frag has yet to be reached on this pass, assume it will
21603 move by STRETCH just as we did. If this is not so, it will
21604 be because some frag between grows, and that will force
21608 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
21612 /* Adjust stretch for any alignment frag. Note that if have
21613 been expanding the earlier code, the symbol may be
21614 defined in what appears to be an earlier frag. FIXME:
21615 This doesn't handle the fr_subtype field, which specifies
21616 a maximum number of bytes to skip when doing an
21618 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
21620 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
21623 stretch
= - ((- stretch
)
21624 & ~ ((1 << (int) f
->fr_offset
) - 1));
21626 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
21638 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21641 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
21646 /* Assume worst case for symbols not known to be in the same section. */
21647 if (fragp
->fr_symbol
== NULL
21648 || !S_IS_DEFINED (fragp
->fr_symbol
)
21649 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
21650 || S_IS_WEAK (fragp
->fr_symbol
))
21653 val
= relaxed_symbol_addr (fragp
, stretch
);
21654 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
21655 addr
= (addr
+ 4) & ~3;
21656 /* Force misaligned targets to 32-bit variant. */
21660 if (val
< 0 || val
> 1020)
21665 /* Return the size of a relaxable add/sub immediate instruction. */
21667 relax_addsub (fragS
*fragp
, asection
*sec
)
21672 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
21673 op
= bfd_get_16(sec
->owner
, buf
);
21674 if ((op
& 0xf) == ((op
>> 4) & 0xf))
21675 return relax_immediate (fragp
, 8, 0);
21677 return relax_immediate (fragp
, 3, 0);
21680 /* Return TRUE iff the definition of symbol S could be pre-empted
21681 (overridden) at link or load time. */
21683 symbol_preemptible (symbolS
*s
)
21685 /* Weak symbols can always be pre-empted. */
21689 /* Non-global symbols cannot be pre-empted. */
21690 if (! S_IS_EXTERNAL (s
))
21694 /* In ELF, a global symbol can be marked protected, or private. In that
21695 case it can't be pre-empted (other definitions in the same link unit
21696 would violate the ODR). */
21697 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
21701 /* Other global symbols might be pre-empted. */
21705 /* Return the size of a relaxable branch instruction. BITS is the
21706 size of the offset field in the narrow instruction. */
21709 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
21715 /* Assume worst case for symbols not known to be in the same section. */
21716 if (!S_IS_DEFINED (fragp
->fr_symbol
)
21717 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
21718 || S_IS_WEAK (fragp
->fr_symbol
))
21722 /* A branch to a function in ARM state will require interworking. */
21723 if (S_IS_DEFINED (fragp
->fr_symbol
)
21724 && ARM_IS_FUNC (fragp
->fr_symbol
))
21728 if (symbol_preemptible (fragp
->fr_symbol
))
21731 val
= relaxed_symbol_addr (fragp
, stretch
);
21732 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
21735 /* Offset is a signed value *2 */
21737 if (val
>= limit
|| val
< -limit
)
21743 /* Relax a machine dependent frag. This returns the amount by which
21744 the current size of the frag should change. */
21747 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
21752 oldsize
= fragp
->fr_var
;
21753 switch (fragp
->fr_subtype
)
21755 case T_MNEM_ldr_pc2
:
21756 newsize
= relax_adr (fragp
, sec
, stretch
);
21758 case T_MNEM_ldr_pc
:
21759 case T_MNEM_ldr_sp
:
21760 case T_MNEM_str_sp
:
21761 newsize
= relax_immediate (fragp
, 8, 2);
21765 newsize
= relax_immediate (fragp
, 5, 2);
21769 newsize
= relax_immediate (fragp
, 5, 1);
21773 newsize
= relax_immediate (fragp
, 5, 0);
21776 newsize
= relax_adr (fragp
, sec
, stretch
);
21782 newsize
= relax_immediate (fragp
, 8, 0);
21785 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
21788 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
21790 case T_MNEM_add_sp
:
21791 case T_MNEM_add_pc
:
21792 newsize
= relax_immediate (fragp
, 8, 2);
21794 case T_MNEM_inc_sp
:
21795 case T_MNEM_dec_sp
:
21796 newsize
= relax_immediate (fragp
, 7, 2);
21802 newsize
= relax_addsub (fragp
, sec
);
21808 fragp
->fr_var
= newsize
;
21809 /* Freeze wide instructions that are at or before the same location as
21810 in the previous pass. This avoids infinite loops.
21811 Don't freeze them unconditionally because targets may be artificially
21812 misaligned by the expansion of preceding frags. */
21813 if (stretch
<= 0 && newsize
> 2)
21815 md_convert_frag (sec
->owner
, sec
, fragp
);
21819 return newsize
- oldsize
;
21822 /* Round up a section size to the appropriate boundary. */
21825 md_section_align (segT segment ATTRIBUTE_UNUSED
,
21828 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21829 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
21831 /* For a.out, force the section size to be aligned. If we don't do
21832 this, BFD will align it for us, but it will not write out the
21833 final bytes of the section. This may be a bug in BFD, but it is
21834 easier to fix it here since that is how the other a.out targets
21838 align
= bfd_get_section_alignment (stdoutput
, segment
);
21839 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
21846 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21847 of an rs_align_code fragment. */
21850 arm_handle_align (fragS
* fragP
)
21852 static unsigned char const arm_noop
[2][2][4] =
21855 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21856 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21859 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21860 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21863 static unsigned char const thumb_noop
[2][2][2] =
21866 {0xc0, 0x46}, /* LE */
21867 {0x46, 0xc0}, /* BE */
21870 {0x00, 0xbf}, /* LE */
21871 {0xbf, 0x00} /* BE */
21874 static unsigned char const wide_thumb_noop
[2][4] =
21875 { /* Wide Thumb-2 */
21876 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21877 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21880 unsigned bytes
, fix
, noop_size
;
21882 const unsigned char * noop
;
21883 const unsigned char *narrow_noop
= NULL
;
21888 if (fragP
->fr_type
!= rs_align_code
)
21891 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
21892 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
21895 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21896 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
21898 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
21900 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
21902 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21903 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
21905 narrow_noop
= thumb_noop
[1][target_big_endian
];
21906 noop
= wide_thumb_noop
[target_big_endian
];
21909 noop
= thumb_noop
[0][target_big_endian
];
21917 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
21918 ? selected_cpu
: arm_arch_none
,
21920 [target_big_endian
];
21927 fragP
->fr_var
= noop_size
;
21929 if (bytes
& (noop_size
- 1))
21931 fix
= bytes
& (noop_size
- 1);
21933 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
21935 memset (p
, 0, fix
);
21942 if (bytes
& noop_size
)
21944 /* Insert a narrow noop. */
21945 memcpy (p
, narrow_noop
, noop_size
);
21947 bytes
-= noop_size
;
21951 /* Use wide noops for the remainder */
21955 while (bytes
>= noop_size
)
21957 memcpy (p
, noop
, noop_size
);
21959 bytes
-= noop_size
;
21963 fragP
->fr_fix
+= fix
;
21966 /* Called from md_do_align. Used to create an alignment
21967 frag in a code section. */
21970 arm_frag_align_code (int n
, int max
)
21974 /* We assume that there will never be a requirement
21975 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21976 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
21981 _("alignments greater than %d bytes not supported in .text sections."),
21982 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
21983 as_fatal ("%s", err_msg
);
21986 p
= frag_var (rs_align_code
,
21987 MAX_MEM_FOR_RS_ALIGN_CODE
,
21989 (relax_substateT
) max
,
21996 /* Perform target specific initialisation of a frag.
21997 Note - despite the name this initialisation is not done when the frag
21998 is created, but only when its type is assigned. A frag can be created
21999 and used a long time before its type is set, so beware of assuming that
22000 this initialisation is performed first. */
22004 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
22006 /* Record whether this frag is in an ARM or a THUMB area. */
22007 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
22010 #else /* OBJ_ELF is defined. */
22012 arm_init_frag (fragS
* fragP
, int max_chars
)
22014 bfd_boolean frag_thumb_mode
;
22016 /* If the current ARM vs THUMB mode has not already
22017 been recorded into this frag then do so now. */
22018 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
22019 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
22021 /* PR 21809: Do not set a mapping state for debug sections
22022 - it just confuses other tools. */
22023 if (bfd_get_section_flags (NULL
, now_seg
) & SEC_DEBUGGING
)
22026 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
22028 /* Record a mapping symbol for alignment frags. We will delete this
22029 later if the alignment ends up empty. */
22030 switch (fragP
->fr_type
)
22033 case rs_align_test
:
22035 mapping_state_2 (MAP_DATA
, max_chars
);
22037 case rs_align_code
:
22038 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
22045 /* When we change sections we need to issue a new mapping symbol. */
22048 arm_elf_change_section (void)
22050 /* Link an unlinked unwind index table section to the .text section. */
22051 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
22052 && elf_linked_to_section (now_seg
) == NULL
)
22053 elf_linked_to_section (now_seg
) = text_section
;
22057 arm_elf_section_type (const char * str
, size_t len
)
22059 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
22060 return SHT_ARM_EXIDX
;
22065 /* Code to deal with unwinding tables. */
22067 static void add_unwind_adjustsp (offsetT
);
22069 /* Generate any deferred unwind frame offset. */
22072 flush_pending_unwind (void)
22076 offset
= unwind
.pending_offset
;
22077 unwind
.pending_offset
= 0;
22079 add_unwind_adjustsp (offset
);
22082 /* Add an opcode to this list for this function. Two-byte opcodes should
22083 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
22087 add_unwind_opcode (valueT op
, int length
)
22089 /* Add any deferred stack adjustment. */
22090 if (unwind
.pending_offset
)
22091 flush_pending_unwind ();
22093 unwind
.sp_restored
= 0;
22095 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
22097 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
22098 if (unwind
.opcodes
)
22099 unwind
.opcodes
= XRESIZEVEC (unsigned char, unwind
.opcodes
,
22100 unwind
.opcode_alloc
);
22102 unwind
.opcodes
= XNEWVEC (unsigned char, unwind
.opcode_alloc
);
22107 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
22109 unwind
.opcode_count
++;
22113 /* Add unwind opcodes to adjust the stack pointer. */
22116 add_unwind_adjustsp (offsetT offset
)
22120 if (offset
> 0x200)
22122 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
22127 /* Long form: 0xb2, uleb128. */
22128 /* This might not fit in a word so add the individual bytes,
22129 remembering the list is built in reverse order. */
22130 o
= (valueT
) ((offset
- 0x204) >> 2);
22132 add_unwind_opcode (0, 1);
22134 /* Calculate the uleb128 encoding of the offset. */
22138 bytes
[n
] = o
& 0x7f;
22144 /* Add the insn. */
22146 add_unwind_opcode (bytes
[n
- 1], 1);
22147 add_unwind_opcode (0xb2, 1);
22149 else if (offset
> 0x100)
22151 /* Two short opcodes. */
22152 add_unwind_opcode (0x3f, 1);
22153 op
= (offset
- 0x104) >> 2;
22154 add_unwind_opcode (op
, 1);
22156 else if (offset
> 0)
22158 /* Short opcode. */
22159 op
= (offset
- 4) >> 2;
22160 add_unwind_opcode (op
, 1);
22162 else if (offset
< 0)
22165 while (offset
> 0x100)
22167 add_unwind_opcode (0x7f, 1);
22170 op
= ((offset
- 4) >> 2) | 0x40;
22171 add_unwind_opcode (op
, 1);
22175 /* Finish the list of unwind opcodes for this function. */
22178 finish_unwind_opcodes (void)
22182 if (unwind
.fp_used
)
22184 /* Adjust sp as necessary. */
22185 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
22186 flush_pending_unwind ();
22188 /* After restoring sp from the frame pointer. */
22189 op
= 0x90 | unwind
.fp_reg
;
22190 add_unwind_opcode (op
, 1);
22193 flush_pending_unwind ();
22197 /* Start an exception table entry. If idx is nonzero this is an index table
22201 start_unwind_section (const segT text_seg
, int idx
)
22203 const char * text_name
;
22204 const char * prefix
;
22205 const char * prefix_once
;
22206 const char * group_name
;
22214 prefix
= ELF_STRING_ARM_unwind
;
22215 prefix_once
= ELF_STRING_ARM_unwind_once
;
22216 type
= SHT_ARM_EXIDX
;
22220 prefix
= ELF_STRING_ARM_unwind_info
;
22221 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
22222 type
= SHT_PROGBITS
;
22225 text_name
= segment_name (text_seg
);
22226 if (streq (text_name
, ".text"))
22229 if (strncmp (text_name
, ".gnu.linkonce.t.",
22230 strlen (".gnu.linkonce.t.")) == 0)
22232 prefix
= prefix_once
;
22233 text_name
+= strlen (".gnu.linkonce.t.");
22236 sec_name
= concat (prefix
, text_name
, (char *) NULL
);
22242 /* Handle COMDAT group. */
22243 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
22245 group_name
= elf_group_name (text_seg
);
22246 if (group_name
== NULL
)
22248 as_bad (_("Group section `%s' has no group signature"),
22249 segment_name (text_seg
));
22250 ignore_rest_of_line ();
22253 flags
|= SHF_GROUP
;
22257 obj_elf_change_section (sec_name
, type
, 0, flags
, 0, group_name
,
22260 /* Set the section link for index tables. */
22262 elf_linked_to_section (now_seg
) = text_seg
;
22266 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
22267 personality routine data. Returns zero, or the index table value for
22268 an inline entry. */
22271 create_unwind_entry (int have_data
)
22276 /* The current word of data. */
22278 /* The number of bytes left in this word. */
22281 finish_unwind_opcodes ();
22283 /* Remember the current text section. */
22284 unwind
.saved_seg
= now_seg
;
22285 unwind
.saved_subseg
= now_subseg
;
22287 start_unwind_section (now_seg
, 0);
22289 if (unwind
.personality_routine
== NULL
)
22291 if (unwind
.personality_index
== -2)
22294 as_bad (_("handlerdata in cantunwind frame"));
22295 return 1; /* EXIDX_CANTUNWIND. */
22298 /* Use a default personality routine if none is specified. */
22299 if (unwind
.personality_index
== -1)
22301 if (unwind
.opcode_count
> 3)
22302 unwind
.personality_index
= 1;
22304 unwind
.personality_index
= 0;
22307 /* Space for the personality routine entry. */
22308 if (unwind
.personality_index
== 0)
22310 if (unwind
.opcode_count
> 3)
22311 as_bad (_("too many unwind opcodes for personality routine 0"));
22315 /* All the data is inline in the index table. */
22318 while (unwind
.opcode_count
> 0)
22320 unwind
.opcode_count
--;
22321 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
22325 /* Pad with "finish" opcodes. */
22327 data
= (data
<< 8) | 0xb0;
22334 /* We get two opcodes "free" in the first word. */
22335 size
= unwind
.opcode_count
- 2;
22339 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22340 if (unwind
.personality_index
!= -1)
22342 as_bad (_("attempt to recreate an unwind entry"));
22346 /* An extra byte is required for the opcode count. */
22347 size
= unwind
.opcode_count
+ 1;
22350 size
= (size
+ 3) >> 2;
22352 as_bad (_("too many unwind opcodes"));
22354 frag_align (2, 0, 0);
22355 record_alignment (now_seg
, 2);
22356 unwind
.table_entry
= expr_build_dot ();
22358 /* Allocate the table entry. */
22359 ptr
= frag_more ((size
<< 2) + 4);
22360 /* PR 13449: Zero the table entries in case some of them are not used. */
22361 memset (ptr
, 0, (size
<< 2) + 4);
22362 where
= frag_now_fix () - ((size
<< 2) + 4);
22364 switch (unwind
.personality_index
)
22367 /* ??? Should this be a PLT generating relocation? */
22368 /* Custom personality routine. */
22369 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
22370 BFD_RELOC_ARM_PREL31
);
22375 /* Set the first byte to the number of additional words. */
22376 data
= size
> 0 ? size
- 1 : 0;
22380 /* ABI defined personality routines. */
22382 /* Three opcodes bytes are packed into the first word. */
22389 /* The size and first two opcode bytes go in the first word. */
22390 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
22395 /* Should never happen. */
22399 /* Pack the opcodes into words (MSB first), reversing the list at the same
22401 while (unwind
.opcode_count
> 0)
22405 md_number_to_chars (ptr
, data
, 4);
22410 unwind
.opcode_count
--;
22412 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
22415 /* Finish off the last word. */
22418 /* Pad with "finish" opcodes. */
22420 data
= (data
<< 8) | 0xb0;
22422 md_number_to_chars (ptr
, data
, 4);
22427 /* Add an empty descriptor if there is no user-specified data. */
22428 ptr
= frag_more (4);
22429 md_number_to_chars (ptr
, 0, 4);
22436 /* Initialize the DWARF-2 unwind information for this procedure. */
22439 tc_arm_frame_initial_instructions (void)
22441 cfi_add_CFA_def_cfa (REG_SP
, 0);
22443 #endif /* OBJ_ELF */
22445 /* Convert REGNAME to a DWARF-2 register number. */
22448 tc_arm_regname_to_dw2regnum (char *regname
)
22450 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
22454 /* PR 16694: Allow VFP registers as well. */
22455 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
22459 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
22468 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
22472 exp
.X_op
= O_secrel
;
22473 exp
.X_add_symbol
= symbol
;
22474 exp
.X_add_number
= 0;
22475 emit_expr (&exp
, size
);
22479 /* MD interface: Symbol and relocation handling. */
22481 /* Return the address within the segment that a PC-relative fixup is
22482 relative to. For ARM, PC-relative fixups applied to instructions
22483 are generally relative to the location of the fixup plus 8 bytes.
22484 Thumb branches are offset by 4, and Thumb loads relative to PC
22485 require special handling. */
22488 md_pcrel_from_section (fixS
* fixP
, segT seg
)
22490 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22492 /* If this is pc-relative and we are going to emit a relocation
22493 then we just want to put out any pipeline compensation that the linker
22494 will need. Otherwise we want to use the calculated base.
22495 For WinCE we skip the bias for externals as well, since this
22496 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22498 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
22499 || (arm_force_relocation (fixP
)
22501 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
22507 switch (fixP
->fx_r_type
)
22509 /* PC relative addressing on the Thumb is slightly odd as the
22510 bottom two bits of the PC are forced to zero for the
22511 calculation. This happens *after* application of the
22512 pipeline offset. However, Thumb adrl already adjusts for
22513 this, so we need not do it again. */
22514 case BFD_RELOC_ARM_THUMB_ADD
:
22517 case BFD_RELOC_ARM_THUMB_OFFSET
:
22518 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
22519 case BFD_RELOC_ARM_T32_ADD_PC12
:
22520 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
22521 return (base
+ 4) & ~3;
22523 /* Thumb branches are simply offset by +4. */
22524 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
22525 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
22526 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
22527 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
22528 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
22531 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
22533 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22534 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22535 && ARM_IS_FUNC (fixP
->fx_addsy
)
22536 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22537 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22540 /* BLX is like branches above, but forces the low two bits of PC to
22542 case BFD_RELOC_THUMB_PCREL_BLX
:
22544 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22545 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22546 && THUMB_IS_FUNC (fixP
->fx_addsy
)
22547 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22548 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22549 return (base
+ 4) & ~3;
22551 /* ARM mode branches are offset by +8. However, the Windows CE
22552 loader expects the relocation not to take this into account. */
22553 case BFD_RELOC_ARM_PCREL_BLX
:
22555 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22556 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22557 && ARM_IS_FUNC (fixP
->fx_addsy
)
22558 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22559 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22562 case BFD_RELOC_ARM_PCREL_CALL
:
22564 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22565 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
22566 && THUMB_IS_FUNC (fixP
->fx_addsy
)
22567 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
22568 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
22571 case BFD_RELOC_ARM_PCREL_BRANCH
:
22572 case BFD_RELOC_ARM_PCREL_JUMP
:
22573 case BFD_RELOC_ARM_PLT32
:
22575 /* When handling fixups immediately, because we have already
22576 discovered the value of a symbol, or the address of the frag involved
22577 we must account for the offset by +8, as the OS loader will never see the reloc.
22578 see fixup_segment() in write.c
22579 The S_IS_EXTERNAL test handles the case of global symbols.
22580 Those need the calculated base, not just the pipe compensation the linker will need. */
22582 && fixP
->fx_addsy
!= NULL
22583 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
22584 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
22592 /* ARM mode loads relative to PC are also offset by +8. Unlike
22593 branches, the Windows CE loader *does* expect the relocation
22594 to take this into account. */
22595 case BFD_RELOC_ARM_OFFSET_IMM
:
22596 case BFD_RELOC_ARM_OFFSET_IMM8
:
22597 case BFD_RELOC_ARM_HWLITERAL
:
22598 case BFD_RELOC_ARM_LITERAL
:
22599 case BFD_RELOC_ARM_CP_OFF_IMM
:
22603 /* Other PC-relative relocations are un-offset. */
22609 static bfd_boolean flag_warn_syms
= TRUE
;
22612 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
22614 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22615 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22616 does mean that the resulting code might be very confusing to the reader.
22617 Also this warning can be triggered if the user omits an operand before
22618 an immediate address, eg:
22622 GAS treats this as an assignment of the value of the symbol foo to a
22623 symbol LDR, and so (without this code) it will not issue any kind of
22624 warning or error message.
22626 Note - ARM instructions are case-insensitive but the strings in the hash
22627 table are all stored in lower case, so we must first ensure that name is
22629 if (flag_warn_syms
&& arm_ops_hsh
)
22631 char * nbuf
= strdup (name
);
22634 for (p
= nbuf
; *p
; p
++)
22636 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
22638 static struct hash_control
* already_warned
= NULL
;
22640 if (already_warned
== NULL
)
22641 already_warned
= hash_new ();
22642 /* Only warn about the symbol once. To keep the code
22643 simple we let hash_insert do the lookup for us. */
22644 if (hash_insert (already_warned
, name
, NULL
) == NULL
)
22645 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
22654 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22655 Otherwise we have no need to default values of symbols. */
22658 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
22661 if (name
[0] == '_' && name
[1] == 'G'
22662 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
22666 if (symbol_find (name
))
22667 as_bad (_("GOT already in the symbol table"));
22669 GOT_symbol
= symbol_new (name
, undefined_section
,
22670 (valueT
) 0, & zero_address_frag
);
22680 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22681 computed as two separate immediate values, added together. We
22682 already know that this value cannot be computed by just one ARM
22685 static unsigned int
22686 validate_immediate_twopart (unsigned int val
,
22687 unsigned int * highpart
)
22692 for (i
= 0; i
< 32; i
+= 2)
22693 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
22699 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
22701 else if (a
& 0xff0000)
22703 if (a
& 0xff000000)
22705 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
22709 gas_assert (a
& 0xff000000);
22710 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
22713 return (a
& 0xff) | (i
<< 7);
22720 validate_offset_imm (unsigned int val
, int hwse
)
22722 if ((hwse
&& val
> 255) || val
> 4095)
22727 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22728 negative immediate constant by altering the instruction. A bit of
22733 by inverting the second operand, and
22736 by negating the second operand. */
22739 negate_data_op (unsigned long * instruction
,
22740 unsigned long value
)
22743 unsigned long negated
, inverted
;
22745 negated
= encode_arm_immediate (-value
);
22746 inverted
= encode_arm_immediate (~value
);
22748 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
22751 /* First negates. */
22752 case OPCODE_SUB
: /* ADD <-> SUB */
22753 new_inst
= OPCODE_ADD
;
22758 new_inst
= OPCODE_SUB
;
22762 case OPCODE_CMP
: /* CMP <-> CMN */
22763 new_inst
= OPCODE_CMN
;
22768 new_inst
= OPCODE_CMP
;
22772 /* Now Inverted ops. */
22773 case OPCODE_MOV
: /* MOV <-> MVN */
22774 new_inst
= OPCODE_MVN
;
22779 new_inst
= OPCODE_MOV
;
22783 case OPCODE_AND
: /* AND <-> BIC */
22784 new_inst
= OPCODE_BIC
;
22789 new_inst
= OPCODE_AND
;
22793 case OPCODE_ADC
: /* ADC <-> SBC */
22794 new_inst
= OPCODE_SBC
;
22799 new_inst
= OPCODE_ADC
;
22803 /* We cannot do anything. */
22808 if (value
== (unsigned) FAIL
)
22811 *instruction
&= OPCODE_MASK
;
22812 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
22816 /* Like negate_data_op, but for Thumb-2. */
22818 static unsigned int
22819 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
22823 unsigned int negated
, inverted
;
22825 negated
= encode_thumb32_immediate (-value
);
22826 inverted
= encode_thumb32_immediate (~value
);
22828 rd
= (*instruction
>> 8) & 0xf;
22829 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
22832 /* ADD <-> SUB. Includes CMP <-> CMN. */
22833 case T2_OPCODE_SUB
:
22834 new_inst
= T2_OPCODE_ADD
;
22838 case T2_OPCODE_ADD
:
22839 new_inst
= T2_OPCODE_SUB
;
22843 /* ORR <-> ORN. Includes MOV <-> MVN. */
22844 case T2_OPCODE_ORR
:
22845 new_inst
= T2_OPCODE_ORN
;
22849 case T2_OPCODE_ORN
:
22850 new_inst
= T2_OPCODE_ORR
;
22854 /* AND <-> BIC. TST has no inverted equivalent. */
22855 case T2_OPCODE_AND
:
22856 new_inst
= T2_OPCODE_BIC
;
22863 case T2_OPCODE_BIC
:
22864 new_inst
= T2_OPCODE_AND
;
22869 case T2_OPCODE_ADC
:
22870 new_inst
= T2_OPCODE_SBC
;
22874 case T2_OPCODE_SBC
:
22875 new_inst
= T2_OPCODE_ADC
;
22879 /* We cannot do anything. */
22884 if (value
== (unsigned int)FAIL
)
22887 *instruction
&= T2_OPCODE_MASK
;
22888 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
22892 /* Read a 32-bit thumb instruction from buf. */
22894 static unsigned long
22895 get_thumb32_insn (char * buf
)
22897 unsigned long insn
;
22898 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
22899 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22904 /* We usually want to set the low bit on the address of thumb function
22905 symbols. In particular .word foo - . should have the low bit set.
22906 Generic code tries to fold the difference of two symbols to
22907 a constant. Prevent this and force a relocation when the first symbols
22908 is a thumb function. */
22911 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
22913 if (op
== O_subtract
22914 && l
->X_op
== O_symbol
22915 && r
->X_op
== O_symbol
22916 && THUMB_IS_FUNC (l
->X_add_symbol
))
22918 l
->X_op
= O_subtract
;
22919 l
->X_op_symbol
= r
->X_add_symbol
;
22920 l
->X_add_number
-= r
->X_add_number
;
22924 /* Process as normal. */
22928 /* Encode Thumb2 unconditional branches and calls. The encoding
22929 for the 2 are identical for the immediate values. */
22932 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
22934 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22937 addressT S
, I1
, I2
, lo
, hi
;
22939 S
= (value
>> 24) & 0x01;
22940 I1
= (value
>> 23) & 0x01;
22941 I2
= (value
>> 22) & 0x01;
22942 hi
= (value
>> 12) & 0x3ff;
22943 lo
= (value
>> 1) & 0x7ff;
22944 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
22945 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
22946 newval
|= (S
<< 10) | hi
;
22947 newval2
&= ~T2I1I2MASK
;
22948 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
22949 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
22950 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
22954 md_apply_fix (fixS
* fixP
,
22958 offsetT value
= * valP
;
22960 unsigned int newimm
;
22961 unsigned long temp
;
22963 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
22965 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
22967 /* Note whether this will delete the relocation. */
22969 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
22972 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22973 consistency with the behaviour on 32-bit hosts. Remember value
22975 value
&= 0xffffffff;
22976 value
^= 0x80000000;
22977 value
-= 0x80000000;
22980 fixP
->fx_addnumber
= value
;
22982 /* Same treatment for fixP->fx_offset. */
22983 fixP
->fx_offset
&= 0xffffffff;
22984 fixP
->fx_offset
^= 0x80000000;
22985 fixP
->fx_offset
-= 0x80000000;
22987 switch (fixP
->fx_r_type
)
22989 case BFD_RELOC_NONE
:
22990 /* This will need to go in the object file. */
22994 case BFD_RELOC_ARM_IMMEDIATE
:
22995 /* We claim that this fixup has been processed here,
22996 even if in fact we generate an error because we do
22997 not have a reloc for it, so tc_gen_reloc will reject it. */
23000 if (fixP
->fx_addsy
)
23002 const char *msg
= 0;
23004 if (! S_IS_DEFINED (fixP
->fx_addsy
))
23005 msg
= _("undefined symbol %s used as an immediate value");
23006 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
23007 msg
= _("symbol %s is in a different section");
23008 else if (S_IS_WEAK (fixP
->fx_addsy
))
23009 msg
= _("symbol %s is weak and may be overridden later");
23013 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23014 msg
, S_GET_NAME (fixP
->fx_addsy
));
23019 temp
= md_chars_to_number (buf
, INSN_SIZE
);
23021 /* If the offset is negative, we should use encoding A2 for ADR. */
23022 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
23023 newimm
= negate_data_op (&temp
, value
);
23026 newimm
= encode_arm_immediate (value
);
23028 /* If the instruction will fail, see if we can fix things up by
23029 changing the opcode. */
23030 if (newimm
== (unsigned int) FAIL
)
23031 newimm
= negate_data_op (&temp
, value
);
23032 /* MOV accepts both ARM modified immediate (A1 encoding) and
23033 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
23034 When disassembling, MOV is preferred when there is no encoding
23036 if (newimm
== (unsigned int) FAIL
23037 && ((temp
>> DATA_OP_SHIFT
) & 0xf) == OPCODE_MOV
23038 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
23039 && !((temp
>> SBIT_SHIFT
) & 0x1)
23040 && value
>= 0 && value
<= 0xffff)
23042 /* Clear bits[23:20] to change encoding from A1 to A2. */
23043 temp
&= 0xff0fffff;
23044 /* Encoding high 4bits imm. Code below will encode the remaining
23046 temp
|= (value
& 0x0000f000) << 4;
23047 newimm
= value
& 0x00000fff;
23051 if (newimm
== (unsigned int) FAIL
)
23053 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23054 _("invalid constant (%lx) after fixup"),
23055 (unsigned long) value
);
23059 newimm
|= (temp
& 0xfffff000);
23060 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
23063 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
23065 unsigned int highpart
= 0;
23066 unsigned int newinsn
= 0xe1a00000; /* nop. */
23068 if (fixP
->fx_addsy
)
23070 const char *msg
= 0;
23072 if (! S_IS_DEFINED (fixP
->fx_addsy
))
23073 msg
= _("undefined symbol %s used as an immediate value");
23074 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
23075 msg
= _("symbol %s is in a different section");
23076 else if (S_IS_WEAK (fixP
->fx_addsy
))
23077 msg
= _("symbol %s is weak and may be overridden later");
23081 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23082 msg
, S_GET_NAME (fixP
->fx_addsy
));
23087 newimm
= encode_arm_immediate (value
);
23088 temp
= md_chars_to_number (buf
, INSN_SIZE
);
23090 /* If the instruction will fail, see if we can fix things up by
23091 changing the opcode. */
23092 if (newimm
== (unsigned int) FAIL
23093 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
23095 /* No ? OK - try using two ADD instructions to generate
23097 newimm
= validate_immediate_twopart (value
, & highpart
);
23099 /* Yes - then make sure that the second instruction is
23101 if (newimm
!= (unsigned int) FAIL
)
23103 /* Still No ? Try using a negated value. */
23104 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
23105 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
23106 /* Otherwise - give up. */
23109 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23110 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
23115 /* Replace the first operand in the 2nd instruction (which
23116 is the PC) with the destination register. We have
23117 already added in the PC in the first instruction and we
23118 do not want to do it again. */
23119 newinsn
&= ~ 0xf0000;
23120 newinsn
|= ((newinsn
& 0x0f000) << 4);
23123 newimm
|= (temp
& 0xfffff000);
23124 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
23126 highpart
|= (newinsn
& 0xfffff000);
23127 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
23131 case BFD_RELOC_ARM_OFFSET_IMM
:
23132 if (!fixP
->fx_done
&& seg
->use_rela_p
)
23134 /* Fall through. */
23136 case BFD_RELOC_ARM_LITERAL
:
23142 if (validate_offset_imm (value
, 0) == FAIL
)
23144 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
23145 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23146 _("invalid literal constant: pool needs to be closer"));
23148 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23149 _("bad immediate value for offset (%ld)"),
23154 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23156 newval
&= 0xfffff000;
23159 newval
&= 0xff7ff000;
23160 newval
|= value
| (sign
? INDEX_UP
: 0);
23162 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23165 case BFD_RELOC_ARM_OFFSET_IMM8
:
23166 case BFD_RELOC_ARM_HWLITERAL
:
23172 if (validate_offset_imm (value
, 1) == FAIL
)
23174 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
23175 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23176 _("invalid literal constant: pool needs to be closer"));
23178 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23179 _("bad immediate value for 8-bit offset (%ld)"),
23184 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23186 newval
&= 0xfffff0f0;
23189 newval
&= 0xff7ff0f0;
23190 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
23192 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23195 case BFD_RELOC_ARM_T32_OFFSET_U8
:
23196 if (value
< 0 || value
> 1020 || value
% 4 != 0)
23197 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23198 _("bad immediate value for offset (%ld)"), (long) value
);
23201 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
23203 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
23206 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
23207 /* This is a complicated relocation used for all varieties of Thumb32
23208 load/store instruction with immediate offset:
23210 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
23211 *4, optional writeback(W)
23212 (doubleword load/store)
23214 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
23215 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
23216 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
23217 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
23218 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
23220 Uppercase letters indicate bits that are already encoded at
23221 this point. Lowercase letters are our problem. For the
23222 second block of instructions, the secondary opcode nybble
23223 (bits 8..11) is present, and bit 23 is zero, even if this is
23224 a PC-relative operation. */
23225 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23227 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
23229 if ((newval
& 0xf0000000) == 0xe0000000)
23231 /* Doubleword load/store: 8-bit offset, scaled by 4. */
23233 newval
|= (1 << 23);
23236 if (value
% 4 != 0)
23238 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23239 _("offset not a multiple of 4"));
23245 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23246 _("offset out of range"));
23251 else if ((newval
& 0x000f0000) == 0x000f0000)
23253 /* PC-relative, 12-bit offset. */
23255 newval
|= (1 << 23);
23260 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23261 _("offset out of range"));
23266 else if ((newval
& 0x00000100) == 0x00000100)
23268 /* Writeback: 8-bit, +/- offset. */
23270 newval
|= (1 << 9);
23275 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23276 _("offset out of range"));
23281 else if ((newval
& 0x00000f00) == 0x00000e00)
23283 /* T-instruction: positive 8-bit offset. */
23284 if (value
< 0 || value
> 0xff)
23286 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23287 _("offset out of range"));
23295 /* Positive 12-bit or negative 8-bit offset. */
23299 newval
|= (1 << 23);
23309 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23310 _("offset out of range"));
23317 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
23318 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
23321 case BFD_RELOC_ARM_SHIFT_IMM
:
23322 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23323 if (((unsigned long) value
) > 32
23325 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
23327 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23328 _("shift expression is too large"));
23333 /* Shifts of zero must be done as lsl. */
23335 else if (value
== 32)
23337 newval
&= 0xfffff07f;
23338 newval
|= (value
& 0x1f) << 7;
23339 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23342 case BFD_RELOC_ARM_T32_IMMEDIATE
:
23343 case BFD_RELOC_ARM_T32_ADD_IMM
:
23344 case BFD_RELOC_ARM_T32_IMM12
:
23345 case BFD_RELOC_ARM_T32_ADD_PC12
:
23346 /* We claim that this fixup has been processed here,
23347 even if in fact we generate an error because we do
23348 not have a reloc for it, so tc_gen_reloc will reject it. */
23352 && ! S_IS_DEFINED (fixP
->fx_addsy
))
23354 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23355 _("undefined symbol %s used as an immediate value"),
23356 S_GET_NAME (fixP
->fx_addsy
));
23360 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23362 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
23365 if ((fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
23366 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
23367 Thumb2 modified immediate encoding (T2). */
23368 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
23369 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
23371 newimm
= encode_thumb32_immediate (value
);
23372 if (newimm
== (unsigned int) FAIL
)
23373 newimm
= thumb32_negate_data_op (&newval
, value
);
23375 if (newimm
== (unsigned int) FAIL
)
23377 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
)
23379 /* Turn add/sum into addw/subw. */
23380 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
23381 newval
= (newval
& 0xfeffffff) | 0x02000000;
23382 /* No flat 12-bit imm encoding for addsw/subsw. */
23383 if ((newval
& 0x00100000) == 0)
23385 /* 12 bit immediate for addw/subw. */
23389 newval
^= 0x00a00000;
23392 newimm
= (unsigned int) FAIL
;
23399 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
23400 UINT16 (T3 encoding), MOVW only accepts UINT16. When
23401 disassembling, MOV is preferred when there is no encoding
23403 NOTE: MOV is using ORR opcode under Thumb 2 mode. */
23404 if (((newval
>> T2_DATA_OP_SHIFT
) & 0xf) == T2_OPCODE_ORR
23405 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
)
23406 && !((newval
>> T2_SBIT_SHIFT
) & 0x1)
23407 && value
>= 0 && value
<=0xffff)
23409 /* Toggle bit[25] to change encoding from T2 to T3. */
23411 /* Clear bits[19:16]. */
23412 newval
&= 0xfff0ffff;
23413 /* Encoding high 4bits imm. Code below will encode the
23414 remaining low 12bits. */
23415 newval
|= (value
& 0x0000f000) << 4;
23416 newimm
= value
& 0x00000fff;
23421 if (newimm
== (unsigned int)FAIL
)
23423 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23424 _("invalid constant (%lx) after fixup"),
23425 (unsigned long) value
);
23429 newval
|= (newimm
& 0x800) << 15;
23430 newval
|= (newimm
& 0x700) << 4;
23431 newval
|= (newimm
& 0x0ff);
23433 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
23434 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
23437 case BFD_RELOC_ARM_SMC
:
23438 if (((unsigned long) value
) > 0xffff)
23439 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23440 _("invalid smc expression"));
23441 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23442 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
23443 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23446 case BFD_RELOC_ARM_HVC
:
23447 if (((unsigned long) value
) > 0xffff)
23448 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23449 _("invalid hvc expression"));
23450 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23451 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
23452 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23455 case BFD_RELOC_ARM_SWI
:
23456 if (fixP
->tc_fix_data
!= 0)
23458 if (((unsigned long) value
) > 0xff)
23459 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23460 _("invalid swi expression"));
23461 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23463 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23467 if (((unsigned long) value
) > 0x00ffffff)
23468 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23469 _("invalid swi expression"));
23470 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23472 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23476 case BFD_RELOC_ARM_MULTI
:
23477 if (((unsigned long) value
) > 0xffff)
23478 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23479 _("invalid expression in load/store multiple"));
23480 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
23481 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23485 case BFD_RELOC_ARM_PCREL_CALL
:
23487 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23489 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23490 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23491 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23492 /* Flip the bl to blx. This is a simple flip
23493 bit here because we generate PCREL_CALL for
23494 unconditional bls. */
23496 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23497 newval
= newval
| 0x10000000;
23498 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23504 goto arm_branch_common
;
23506 case BFD_RELOC_ARM_PCREL_JUMP
:
23507 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23509 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23510 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23511 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23513 /* This would map to a bl<cond>, b<cond>,
23514 b<always> to a Thumb function. We
23515 need to force a relocation for this particular
23517 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23520 /* Fall through. */
23522 case BFD_RELOC_ARM_PLT32
:
23524 case BFD_RELOC_ARM_PCREL_BRANCH
:
23526 goto arm_branch_common
;
23528 case BFD_RELOC_ARM_PCREL_BLX
:
23531 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23533 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23534 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23535 && ARM_IS_FUNC (fixP
->fx_addsy
))
23537 /* Flip the blx to a bl and warn. */
23538 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
23539 newval
= 0xeb000000;
23540 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
23541 _("blx to '%s' an ARM ISA state function changed to bl"),
23543 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23549 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
23550 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
23554 /* We are going to store value (shifted right by two) in the
23555 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23556 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23559 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23560 _("misaligned branch destination"));
23561 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
23562 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
23563 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23565 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23567 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23568 newval
|= (value
>> 2) & 0x00ffffff;
23569 /* Set the H bit on BLX instructions. */
23573 newval
|= 0x01000000;
23575 newval
&= ~0x01000000;
23577 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23581 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
23582 /* CBZ can only branch forward. */
23584 /* Attempts to use CBZ to branch to the next instruction
23585 (which, strictly speaking, are prohibited) will be turned into
23588 FIXME: It may be better to remove the instruction completely and
23589 perform relaxation. */
23592 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23593 newval
= 0xbf00; /* NOP encoding T1 */
23594 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23599 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23601 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23603 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23604 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
23605 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23610 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
23611 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
23612 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23614 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23616 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23617 newval
|= (value
& 0x1ff) >> 1;
23618 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23622 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
23623 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
23624 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23626 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23628 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23629 newval
|= (value
& 0xfff) >> 1;
23630 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23634 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23636 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23637 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23638 && ARM_IS_FUNC (fixP
->fx_addsy
)
23639 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23641 /* Force a relocation for a branch 20 bits wide. */
23644 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
23645 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23646 _("conditional branch out of range"));
23648 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23651 addressT S
, J1
, J2
, lo
, hi
;
23653 S
= (value
& 0x00100000) >> 20;
23654 J2
= (value
& 0x00080000) >> 19;
23655 J1
= (value
& 0x00040000) >> 18;
23656 hi
= (value
& 0x0003f000) >> 12;
23657 lo
= (value
& 0x00000ffe) >> 1;
23659 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23660 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23661 newval
|= (S
<< 10) | hi
;
23662 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
23663 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23664 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
23668 case BFD_RELOC_THUMB_PCREL_BLX
:
23669 /* If there is a blx from a thumb state function to
23670 another thumb function flip this to a bl and warn
23674 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23675 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23676 && THUMB_IS_FUNC (fixP
->fx_addsy
))
23678 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
23679 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
23680 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23682 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23683 newval
= newval
| 0x1000;
23684 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
23685 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23690 goto thumb_bl_common
;
23692 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23693 /* A bl from Thumb state ISA to an internal ARM state function
23694 is converted to a blx. */
23696 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23697 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23698 && ARM_IS_FUNC (fixP
->fx_addsy
)
23699 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23701 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23702 newval
= newval
& ~0x1000;
23703 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
23704 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
23710 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
23711 /* For a BLX instruction, make sure that the relocation is rounded up
23712 to a word boundary. This follows the semantics of the instruction
23713 which specifies that bit 1 of the target address will come from bit
23714 1 of the base address. */
23715 value
= (value
+ 3) & ~ 3;
23718 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
23719 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
23720 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
23723 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
23725 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
23726 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23727 else if ((value
& ~0x1ffffff)
23728 && ((value
& ~0x1ffffff) != ~0x1ffffff))
23729 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23730 _("Thumb2 branch out of range"));
23733 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23734 encode_thumb2_b_bl_offset (buf
, value
);
23738 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23739 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
23740 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
23742 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23743 encode_thumb2_b_bl_offset (buf
, value
);
23748 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23753 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23754 md_number_to_chars (buf
, value
, 2);
23758 case BFD_RELOC_ARM_TLS_CALL
:
23759 case BFD_RELOC_ARM_THM_TLS_CALL
:
23760 case BFD_RELOC_ARM_TLS_DESCSEQ
:
23761 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
23762 case BFD_RELOC_ARM_TLS_GOTDESC
:
23763 case BFD_RELOC_ARM_TLS_GD32
:
23764 case BFD_RELOC_ARM_TLS_LE32
:
23765 case BFD_RELOC_ARM_TLS_IE32
:
23766 case BFD_RELOC_ARM_TLS_LDM32
:
23767 case BFD_RELOC_ARM_TLS_LDO32
:
23768 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
23771 case BFD_RELOC_ARM_GOT32
:
23772 case BFD_RELOC_ARM_GOTOFF
:
23775 case BFD_RELOC_ARM_GOT_PREL
:
23776 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23777 md_number_to_chars (buf
, value
, 4);
23780 case BFD_RELOC_ARM_TARGET2
:
23781 /* TARGET2 is not partial-inplace, so we need to write the
23782 addend here for REL targets, because it won't be written out
23783 during reloc processing later. */
23784 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23785 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
23789 case BFD_RELOC_RVA
:
23791 case BFD_RELOC_ARM_TARGET1
:
23792 case BFD_RELOC_ARM_ROSEGREL32
:
23793 case BFD_RELOC_ARM_SBREL32
:
23794 case BFD_RELOC_32_PCREL
:
23796 case BFD_RELOC_32_SECREL
:
23798 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23800 /* For WinCE we only do this for pcrel fixups. */
23801 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
23803 md_number_to_chars (buf
, value
, 4);
23807 case BFD_RELOC_ARM_PREL31
:
23808 if (fixP
->fx_done
|| !seg
->use_rela_p
)
23810 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
23811 if ((value
^ (value
>> 1)) & 0x40000000)
23813 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23814 _("rel31 relocation overflow"));
23816 newval
|= value
& 0x7fffffff;
23817 md_number_to_chars (buf
, newval
, 4);
23822 case BFD_RELOC_ARM_CP_OFF_IMM
:
23823 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
23824 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
)
23825 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23827 newval
= get_thumb32_insn (buf
);
23828 if ((newval
& 0x0f200f00) == 0x0d000900)
23830 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
23831 has permitted values that are multiples of 2, in the range 0
23833 if (value
< -510 || value
> 510 || (value
& 1))
23834 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23835 _("co-processor offset out of range"));
23837 else if (value
< -1023 || value
> 1023 || (value
& 3))
23838 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23839 _("co-processor offset out of range"));
23844 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23845 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23846 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23848 newval
= get_thumb32_insn (buf
);
23850 newval
&= 0xffffff00;
23853 newval
&= 0xff7fff00;
23854 if ((newval
& 0x0f200f00) == 0x0d000900)
23856 /* This is a fp16 vstr/vldr.
23858 It requires the immediate offset in the instruction is shifted
23859 left by 1 to be a half-word offset.
23861 Here, left shift by 1 first, and later right shift by 2
23862 should get the right offset. */
23865 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
23867 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
23868 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
23869 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23871 put_thumb32_insn (buf
, newval
);
23874 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
23875 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
23876 if (value
< -255 || value
> 255)
23877 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23878 _("co-processor offset out of range"));
23880 goto cp_off_common
;
23882 case BFD_RELOC_ARM_THUMB_OFFSET
:
23883 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23884 /* Exactly what ranges, and where the offset is inserted depends
23885 on the type of instruction, we can establish this from the
23887 switch (newval
>> 12)
23889 case 4: /* PC load. */
23890 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23891 forced to zero for these loads; md_pcrel_from has already
23892 compensated for this. */
23894 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23895 _("invalid offset, target not word aligned (0x%08lX)"),
23896 (((unsigned long) fixP
->fx_frag
->fr_address
23897 + (unsigned long) fixP
->fx_where
) & ~3)
23898 + (unsigned long) value
);
23900 if (value
& ~0x3fc)
23901 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23902 _("invalid offset, value too big (0x%08lX)"),
23905 newval
|= value
>> 2;
23908 case 9: /* SP load/store. */
23909 if (value
& ~0x3fc)
23910 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23911 _("invalid offset, value too big (0x%08lX)"),
23913 newval
|= value
>> 2;
23916 case 6: /* Word load/store. */
23918 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23919 _("invalid offset, value too big (0x%08lX)"),
23921 newval
|= value
<< 4; /* 6 - 2. */
23924 case 7: /* Byte load/store. */
23926 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23927 _("invalid offset, value too big (0x%08lX)"),
23929 newval
|= value
<< 6;
23932 case 8: /* Halfword load/store. */
23934 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23935 _("invalid offset, value too big (0x%08lX)"),
23937 newval
|= value
<< 5; /* 6 - 1. */
23941 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23942 "Unable to process relocation for thumb opcode: %lx",
23943 (unsigned long) newval
);
23946 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23949 case BFD_RELOC_ARM_THUMB_ADD
:
23950 /* This is a complicated relocation, since we use it for all of
23951 the following immediate relocations:
23955 9bit ADD/SUB SP word-aligned
23956 10bit ADD PC/SP word-aligned
23958 The type of instruction being processed is encoded in the
23965 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23967 int rd
= (newval
>> 4) & 0xf;
23968 int rs
= newval
& 0xf;
23969 int subtract
= !!(newval
& 0x8000);
23971 /* Check for HI regs, only very restricted cases allowed:
23972 Adjusting SP, and using PC or SP to get an address. */
23973 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
23974 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
23975 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23976 _("invalid Hi register with immediate"));
23978 /* If value is negative, choose the opposite instruction. */
23982 subtract
= !subtract
;
23984 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23985 _("immediate value out of range"));
23990 if (value
& ~0x1fc)
23991 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23992 _("invalid immediate for stack address calculation"));
23993 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
23994 newval
|= value
>> 2;
23996 else if (rs
== REG_PC
|| rs
== REG_SP
)
23998 /* PR gas/18541. If the addition is for a defined symbol
23999 within range of an ADR instruction then accept it. */
24002 && fixP
->fx_addsy
!= NULL
)
24006 if (! S_IS_DEFINED (fixP
->fx_addsy
)
24007 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
24008 || S_IS_WEAK (fixP
->fx_addsy
))
24010 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24011 _("address calculation needs a strongly defined nearby symbol"));
24015 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24017 /* Round up to the next 4-byte boundary. */
24022 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
24026 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24027 _("symbol too far away"));
24037 if (subtract
|| value
& ~0x3fc)
24038 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24039 _("invalid immediate for address calculation (value = 0x%08lX)"),
24040 (unsigned long) (subtract
? - value
: value
));
24041 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
24043 newval
|= value
>> 2;
24048 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24049 _("immediate value out of range"));
24050 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
24051 newval
|= (rd
<< 8) | value
;
24056 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24057 _("immediate value out of range"));
24058 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
24059 newval
|= rd
| (rs
<< 3) | (value
<< 6);
24062 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24065 case BFD_RELOC_ARM_THUMB_IMM
:
24066 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24067 if (value
< 0 || value
> 255)
24068 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24069 _("invalid immediate: %ld is out of range"),
24072 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24075 case BFD_RELOC_ARM_THUMB_SHIFT
:
24076 /* 5bit shift value (0..32). LSL cannot take 32. */
24077 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
24078 temp
= newval
& 0xf800;
24079 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
24080 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24081 _("invalid shift value: %ld"), (long) value
);
24082 /* Shifts of zero must be encoded as LSL. */
24084 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
24085 /* Shifts of 32 are encoded as zero. */
24086 else if (value
== 32)
24088 newval
|= value
<< 6;
24089 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24092 case BFD_RELOC_VTABLE_INHERIT
:
24093 case BFD_RELOC_VTABLE_ENTRY
:
24097 case BFD_RELOC_ARM_MOVW
:
24098 case BFD_RELOC_ARM_MOVT
:
24099 case BFD_RELOC_ARM_THUMB_MOVW
:
24100 case BFD_RELOC_ARM_THUMB_MOVT
:
24101 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24103 /* REL format relocations are limited to a 16-bit addend. */
24104 if (!fixP
->fx_done
)
24106 if (value
< -0x8000 || value
> 0x7fff)
24107 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24108 _("offset out of range"));
24110 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
24111 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
24116 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
24117 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
24119 newval
= get_thumb32_insn (buf
);
24120 newval
&= 0xfbf08f00;
24121 newval
|= (value
& 0xf000) << 4;
24122 newval
|= (value
& 0x0800) << 15;
24123 newval
|= (value
& 0x0700) << 4;
24124 newval
|= (value
& 0x00ff);
24125 put_thumb32_insn (buf
, newval
);
24129 newval
= md_chars_to_number (buf
, 4);
24130 newval
&= 0xfff0f000;
24131 newval
|= value
& 0x0fff;
24132 newval
|= (value
& 0xf000) << 4;
24133 md_number_to_chars (buf
, newval
, 4);
24138 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
24139 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
24140 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
24141 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
24142 gas_assert (!fixP
->fx_done
);
24145 bfd_boolean is_mov
;
24146 bfd_vma encoded_addend
= value
;
24148 /* Check that addend can be encoded in instruction. */
24149 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
24150 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24151 _("the offset 0x%08lX is not representable"),
24152 (unsigned long) encoded_addend
);
24154 /* Extract the instruction. */
24155 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
24156 is_mov
= (insn
& 0xf800) == 0x2000;
24161 if (!seg
->use_rela_p
)
24162 insn
|= encoded_addend
;
24168 /* Extract the instruction. */
24169 /* Encoding is the following
24174 /* The following conditions must be true :
24179 rd
= (insn
>> 4) & 0xf;
24181 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
24182 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24183 _("Unable to process relocation for thumb opcode: %lx"),
24184 (unsigned long) insn
);
24186 /* Encode as ADD immediate8 thumb 1 code. */
24187 insn
= 0x3000 | (rd
<< 8);
24189 /* Place the encoded addend into the first 8 bits of the
24191 if (!seg
->use_rela_p
)
24192 insn
|= encoded_addend
;
24195 /* Update the instruction. */
24196 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
24200 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
24201 case BFD_RELOC_ARM_ALU_PC_G0
:
24202 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
24203 case BFD_RELOC_ARM_ALU_PC_G1
:
24204 case BFD_RELOC_ARM_ALU_PC_G2
:
24205 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
24206 case BFD_RELOC_ARM_ALU_SB_G0
:
24207 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
24208 case BFD_RELOC_ARM_ALU_SB_G1
:
24209 case BFD_RELOC_ARM_ALU_SB_G2
:
24210 gas_assert (!fixP
->fx_done
);
24211 if (!seg
->use_rela_p
)
24214 bfd_vma encoded_addend
;
24215 bfd_vma addend_abs
= abs (value
);
24217 /* Check that the absolute value of the addend can be
24218 expressed as an 8-bit constant plus a rotation. */
24219 encoded_addend
= encode_arm_immediate (addend_abs
);
24220 if (encoded_addend
== (unsigned int) FAIL
)
24221 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24222 _("the offset 0x%08lX is not representable"),
24223 (unsigned long) addend_abs
);
24225 /* Extract the instruction. */
24226 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24228 /* If the addend is positive, use an ADD instruction.
24229 Otherwise use a SUB. Take care not to destroy the S bit. */
24230 insn
&= 0xff1fffff;
24236 /* Place the encoded addend into the first 12 bits of the
24238 insn
&= 0xfffff000;
24239 insn
|= encoded_addend
;
24241 /* Update the instruction. */
24242 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24246 case BFD_RELOC_ARM_LDR_PC_G0
:
24247 case BFD_RELOC_ARM_LDR_PC_G1
:
24248 case BFD_RELOC_ARM_LDR_PC_G2
:
24249 case BFD_RELOC_ARM_LDR_SB_G0
:
24250 case BFD_RELOC_ARM_LDR_SB_G1
:
24251 case BFD_RELOC_ARM_LDR_SB_G2
:
24252 gas_assert (!fixP
->fx_done
);
24253 if (!seg
->use_rela_p
)
24256 bfd_vma addend_abs
= abs (value
);
24258 /* Check that the absolute value of the addend can be
24259 encoded in 12 bits. */
24260 if (addend_abs
>= 0x1000)
24261 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24262 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
24263 (unsigned long) addend_abs
);
24265 /* Extract the instruction. */
24266 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24268 /* If the addend is negative, clear bit 23 of the instruction.
24269 Otherwise set it. */
24271 insn
&= ~(1 << 23);
24275 /* Place the absolute value of the addend into the first 12 bits
24276 of the instruction. */
24277 insn
&= 0xfffff000;
24278 insn
|= addend_abs
;
24280 /* Update the instruction. */
24281 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24285 case BFD_RELOC_ARM_LDRS_PC_G0
:
24286 case BFD_RELOC_ARM_LDRS_PC_G1
:
24287 case BFD_RELOC_ARM_LDRS_PC_G2
:
24288 case BFD_RELOC_ARM_LDRS_SB_G0
:
24289 case BFD_RELOC_ARM_LDRS_SB_G1
:
24290 case BFD_RELOC_ARM_LDRS_SB_G2
:
24291 gas_assert (!fixP
->fx_done
);
24292 if (!seg
->use_rela_p
)
24295 bfd_vma addend_abs
= abs (value
);
24297 /* Check that the absolute value of the addend can be
24298 encoded in 8 bits. */
24299 if (addend_abs
>= 0x100)
24300 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24301 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
24302 (unsigned long) addend_abs
);
24304 /* Extract the instruction. */
24305 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24307 /* If the addend is negative, clear bit 23 of the instruction.
24308 Otherwise set it. */
24310 insn
&= ~(1 << 23);
24314 /* Place the first four bits of the absolute value of the addend
24315 into the first 4 bits of the instruction, and the remaining
24316 four into bits 8 .. 11. */
24317 insn
&= 0xfffff0f0;
24318 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
24320 /* Update the instruction. */
24321 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24325 case BFD_RELOC_ARM_LDC_PC_G0
:
24326 case BFD_RELOC_ARM_LDC_PC_G1
:
24327 case BFD_RELOC_ARM_LDC_PC_G2
:
24328 case BFD_RELOC_ARM_LDC_SB_G0
:
24329 case BFD_RELOC_ARM_LDC_SB_G1
:
24330 case BFD_RELOC_ARM_LDC_SB_G2
:
24331 gas_assert (!fixP
->fx_done
);
24332 if (!seg
->use_rela_p
)
24335 bfd_vma addend_abs
= abs (value
);
24337 /* Check that the absolute value of the addend is a multiple of
24338 four and, when divided by four, fits in 8 bits. */
24339 if (addend_abs
& 0x3)
24340 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24341 _("bad offset 0x%08lX (must be word-aligned)"),
24342 (unsigned long) addend_abs
);
24344 if ((addend_abs
>> 2) > 0xff)
24345 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24346 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24347 (unsigned long) addend_abs
);
24349 /* Extract the instruction. */
24350 insn
= md_chars_to_number (buf
, INSN_SIZE
);
24352 /* If the addend is negative, clear bit 23 of the instruction.
24353 Otherwise set it. */
24355 insn
&= ~(1 << 23);
24359 /* Place the addend (divided by four) into the first eight
24360 bits of the instruction. */
24361 insn
&= 0xfffffff0;
24362 insn
|= addend_abs
>> 2;
24364 /* Update the instruction. */
24365 md_number_to_chars (buf
, insn
, INSN_SIZE
);
24369 case BFD_RELOC_ARM_V4BX
:
24370 /* This will need to go in the object file. */
24374 case BFD_RELOC_UNUSED
:
24376 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24377 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
24381 /* Translate internal representation of relocation info to BFD target
24385 tc_gen_reloc (asection
*section
, fixS
*fixp
)
24388 bfd_reloc_code_real_type code
;
24390 reloc
= XNEW (arelent
);
24392 reloc
->sym_ptr_ptr
= XNEW (asymbol
*);
24393 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
24394 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
24396 if (fixp
->fx_pcrel
)
24398 if (section
->use_rela_p
)
24399 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
24401 fixp
->fx_offset
= reloc
->address
;
24403 reloc
->addend
= fixp
->fx_offset
;
24405 switch (fixp
->fx_r_type
)
24408 if (fixp
->fx_pcrel
)
24410 code
= BFD_RELOC_8_PCREL
;
24413 /* Fall through. */
24416 if (fixp
->fx_pcrel
)
24418 code
= BFD_RELOC_16_PCREL
;
24421 /* Fall through. */
24424 if (fixp
->fx_pcrel
)
24426 code
= BFD_RELOC_32_PCREL
;
24429 /* Fall through. */
24431 case BFD_RELOC_ARM_MOVW
:
24432 if (fixp
->fx_pcrel
)
24434 code
= BFD_RELOC_ARM_MOVW_PCREL
;
24437 /* Fall through. */
24439 case BFD_RELOC_ARM_MOVT
:
24440 if (fixp
->fx_pcrel
)
24442 code
= BFD_RELOC_ARM_MOVT_PCREL
;
24445 /* Fall through. */
24447 case BFD_RELOC_ARM_THUMB_MOVW
:
24448 if (fixp
->fx_pcrel
)
24450 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
24453 /* Fall through. */
24455 case BFD_RELOC_ARM_THUMB_MOVT
:
24456 if (fixp
->fx_pcrel
)
24458 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
24461 /* Fall through. */
24463 case BFD_RELOC_NONE
:
24464 case BFD_RELOC_ARM_PCREL_BRANCH
:
24465 case BFD_RELOC_ARM_PCREL_BLX
:
24466 case BFD_RELOC_RVA
:
24467 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
24468 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
24469 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
24470 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24471 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24472 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24473 case BFD_RELOC_VTABLE_ENTRY
:
24474 case BFD_RELOC_VTABLE_INHERIT
:
24476 case BFD_RELOC_32_SECREL
:
24478 code
= fixp
->fx_r_type
;
24481 case BFD_RELOC_THUMB_PCREL_BLX
:
24483 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
24484 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
24487 code
= BFD_RELOC_THUMB_PCREL_BLX
;
24490 case BFD_RELOC_ARM_LITERAL
:
24491 case BFD_RELOC_ARM_HWLITERAL
:
24492 /* If this is called then the a literal has
24493 been referenced across a section boundary. */
24494 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24495 _("literal referenced across section boundary"));
24499 case BFD_RELOC_ARM_TLS_CALL
:
24500 case BFD_RELOC_ARM_THM_TLS_CALL
:
24501 case BFD_RELOC_ARM_TLS_DESCSEQ
:
24502 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
24503 case BFD_RELOC_ARM_GOT32
:
24504 case BFD_RELOC_ARM_GOTOFF
:
24505 case BFD_RELOC_ARM_GOT_PREL
:
24506 case BFD_RELOC_ARM_PLT32
:
24507 case BFD_RELOC_ARM_TARGET1
:
24508 case BFD_RELOC_ARM_ROSEGREL32
:
24509 case BFD_RELOC_ARM_SBREL32
:
24510 case BFD_RELOC_ARM_PREL31
:
24511 case BFD_RELOC_ARM_TARGET2
:
24512 case BFD_RELOC_ARM_TLS_LDO32
:
24513 case BFD_RELOC_ARM_PCREL_CALL
:
24514 case BFD_RELOC_ARM_PCREL_JUMP
:
24515 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
24516 case BFD_RELOC_ARM_ALU_PC_G0
:
24517 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
24518 case BFD_RELOC_ARM_ALU_PC_G1
:
24519 case BFD_RELOC_ARM_ALU_PC_G2
:
24520 case BFD_RELOC_ARM_LDR_PC_G0
:
24521 case BFD_RELOC_ARM_LDR_PC_G1
:
24522 case BFD_RELOC_ARM_LDR_PC_G2
:
24523 case BFD_RELOC_ARM_LDRS_PC_G0
:
24524 case BFD_RELOC_ARM_LDRS_PC_G1
:
24525 case BFD_RELOC_ARM_LDRS_PC_G2
:
24526 case BFD_RELOC_ARM_LDC_PC_G0
:
24527 case BFD_RELOC_ARM_LDC_PC_G1
:
24528 case BFD_RELOC_ARM_LDC_PC_G2
:
24529 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
24530 case BFD_RELOC_ARM_ALU_SB_G0
:
24531 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
24532 case BFD_RELOC_ARM_ALU_SB_G1
:
24533 case BFD_RELOC_ARM_ALU_SB_G2
:
24534 case BFD_RELOC_ARM_LDR_SB_G0
:
24535 case BFD_RELOC_ARM_LDR_SB_G1
:
24536 case BFD_RELOC_ARM_LDR_SB_G2
:
24537 case BFD_RELOC_ARM_LDRS_SB_G0
:
24538 case BFD_RELOC_ARM_LDRS_SB_G1
:
24539 case BFD_RELOC_ARM_LDRS_SB_G2
:
24540 case BFD_RELOC_ARM_LDC_SB_G0
:
24541 case BFD_RELOC_ARM_LDC_SB_G1
:
24542 case BFD_RELOC_ARM_LDC_SB_G2
:
24543 case BFD_RELOC_ARM_V4BX
:
24544 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
24545 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
24546 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
24547 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
24548 code
= fixp
->fx_r_type
;
24551 case BFD_RELOC_ARM_TLS_GOTDESC
:
24552 case BFD_RELOC_ARM_TLS_GD32
:
24553 case BFD_RELOC_ARM_TLS_LE32
:
24554 case BFD_RELOC_ARM_TLS_IE32
:
24555 case BFD_RELOC_ARM_TLS_LDM32
:
24556 /* BFD will include the symbol's address in the addend.
24557 But we don't want that, so subtract it out again here. */
24558 if (!S_IS_COMMON (fixp
->fx_addsy
))
24559 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
24560 code
= fixp
->fx_r_type
;
24564 case BFD_RELOC_ARM_IMMEDIATE
:
24565 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24566 _("internal relocation (type: IMMEDIATE) not fixed up"));
24569 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
24570 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24571 _("ADRL used for a symbol not defined in the same file"));
24574 case BFD_RELOC_ARM_OFFSET_IMM
:
24575 if (section
->use_rela_p
)
24577 code
= fixp
->fx_r_type
;
24581 if (fixp
->fx_addsy
!= NULL
24582 && !S_IS_DEFINED (fixp
->fx_addsy
)
24583 && S_IS_LOCAL (fixp
->fx_addsy
))
24585 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24586 _("undefined local label `%s'"),
24587 S_GET_NAME (fixp
->fx_addsy
));
24591 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24592 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24599 switch (fixp
->fx_r_type
)
24601 case BFD_RELOC_NONE
: type
= "NONE"; break;
24602 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
24603 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
24604 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
24605 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
24606 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
24607 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
24608 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
24609 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
24610 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
24611 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
24612 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
24613 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
24614 default: type
= _("<unknown>"); break;
24616 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24617 _("cannot represent %s relocation in this object file format"),
24624 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
24626 && fixp
->fx_addsy
== GOT_symbol
)
24628 code
= BFD_RELOC_ARM_GOTPC
;
24629 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
24633 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
24635 if (reloc
->howto
== NULL
)
24637 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
24638 _("cannot represent %s relocation in this object file format"),
24639 bfd_get_reloc_code_name (code
));
24643 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24644 vtable entry to be used in the relocation's section offset. */
24645 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
24646 reloc
->address
= fixp
->fx_offset
;
24651 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24654 cons_fix_new_arm (fragS
* frag
,
24658 bfd_reloc_code_real_type reloc
)
24663 FIXME: @@ Should look at CPU word size. */
24667 reloc
= BFD_RELOC_8
;
24670 reloc
= BFD_RELOC_16
;
24674 reloc
= BFD_RELOC_32
;
24677 reloc
= BFD_RELOC_64
;
24682 if (exp
->X_op
== O_secrel
)
24684 exp
->X_op
= O_symbol
;
24685 reloc
= BFD_RELOC_32_SECREL
;
24689 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
24692 #if defined (OBJ_COFF)
24694 arm_validate_fix (fixS
* fixP
)
24696 /* If the destination of the branch is a defined symbol which does not have
24697 the THUMB_FUNC attribute, then we must be calling a function which has
24698 the (interfacearm) attribute. We look for the Thumb entry point to that
24699 function and change the branch to refer to that function instead. */
24700 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
24701 && fixP
->fx_addsy
!= NULL
24702 && S_IS_DEFINED (fixP
->fx_addsy
)
24703 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
24705 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
24712 arm_force_relocation (struct fix
* fixp
)
24714 #if defined (OBJ_COFF) && defined (TE_PE)
24715 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
24719 /* In case we have a call or a branch to a function in ARM ISA mode from
24720 a thumb function or vice-versa force the relocation. These relocations
24721 are cleared off for some cores that might have blx and simple transformations
24725 switch (fixp
->fx_r_type
)
24727 case BFD_RELOC_ARM_PCREL_JUMP
:
24728 case BFD_RELOC_ARM_PCREL_CALL
:
24729 case BFD_RELOC_THUMB_PCREL_BLX
:
24730 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
24734 case BFD_RELOC_ARM_PCREL_BLX
:
24735 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24736 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24737 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24738 if (ARM_IS_FUNC (fixp
->fx_addsy
))
24747 /* Resolve these relocations even if the symbol is extern or weak.
24748 Technically this is probably wrong due to symbol preemption.
24749 In practice these relocations do not have enough range to be useful
24750 at dynamic link time, and some code (e.g. in the Linux kernel)
24751 expects these references to be resolved. */
24752 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
24753 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
24754 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
24755 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
24756 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
24757 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
24758 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
24759 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
24760 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
24761 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
24762 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
24763 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
24764 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
24765 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
24768 /* Always leave these relocations for the linker. */
24769 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24770 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24771 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24774 /* Always generate relocations against function symbols. */
24775 if (fixp
->fx_r_type
== BFD_RELOC_32
24777 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
24780 return generic_force_reloc (fixp
);
24783 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24784 /* Relocations against function names must be left unadjusted,
24785 so that the linker can use this information to generate interworking
24786 stubs. The MIPS version of this function
24787 also prevents relocations that are mips-16 specific, but I do not
24788 know why it does this.
24791 There is one other problem that ought to be addressed here, but
24792 which currently is not: Taking the address of a label (rather
24793 than a function) and then later jumping to that address. Such
24794 addresses also ought to have their bottom bit set (assuming that
24795 they reside in Thumb code), but at the moment they will not. */
24798 arm_fix_adjustable (fixS
* fixP
)
24800 if (fixP
->fx_addsy
== NULL
)
24803 /* Preserve relocations against symbols with function type. */
24804 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
24807 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
24808 && fixP
->fx_subsy
== NULL
)
24811 /* We need the symbol name for the VTABLE entries. */
24812 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
24813 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
24816 /* Don't allow symbols to be discarded on GOT related relocs. */
24817 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
24818 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
24819 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
24820 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
24821 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
24822 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
24823 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
24824 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
24825 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
24826 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
24827 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
24828 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
24829 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
24830 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
24833 /* Similarly for group relocations. */
24834 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
24835 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
24836 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
24839 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24840 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
24841 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
24842 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
24843 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
24844 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
24845 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
24846 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
24847 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
24850 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24851 offsets, so keep these symbols. */
24852 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24853 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
24858 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24862 elf32_arm_target_format (void)
24865 return (target_big_endian
24866 ? "elf32-bigarm-symbian"
24867 : "elf32-littlearm-symbian");
24868 #elif defined (TE_VXWORKS)
24869 return (target_big_endian
24870 ? "elf32-bigarm-vxworks"
24871 : "elf32-littlearm-vxworks");
24872 #elif defined (TE_NACL)
24873 return (target_big_endian
24874 ? "elf32-bigarm-nacl"
24875 : "elf32-littlearm-nacl");
24877 if (target_big_endian
)
24878 return "elf32-bigarm";
24880 return "elf32-littlearm";
24885 armelf_frob_symbol (symbolS
* symp
,
24888 elf_frob_symbol (symp
, puntp
);
24892 /* MD interface: Finalization. */
24897 literal_pool
* pool
;
24899 /* Ensure that all the IT blocks are properly closed. */
24900 check_it_blocks_finished ();
24902 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
24904 /* Put it at the end of the relevant section. */
24905 subseg_set (pool
->section
, pool
->sub_section
);
24907 arm_elf_change_section ();
24914 /* Remove any excess mapping symbols generated for alignment frags in
24915 SEC. We may have created a mapping symbol before a zero byte
24916 alignment; remove it if there's a mapping symbol after the
24919 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
24920 void *dummy ATTRIBUTE_UNUSED
)
24922 segment_info_type
*seginfo
= seg_info (sec
);
24925 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
24928 for (fragp
= seginfo
->frchainP
->frch_root
;
24930 fragp
= fragp
->fr_next
)
24932 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
24933 fragS
*next
= fragp
->fr_next
;
24935 /* Variable-sized frags have been converted to fixed size by
24936 this point. But if this was variable-sized to start with,
24937 there will be a fixed-size frag after it. So don't handle
24939 if (sym
== NULL
|| next
== NULL
)
24942 if (S_GET_VALUE (sym
) < next
->fr_address
)
24943 /* Not at the end of this frag. */
24945 know (S_GET_VALUE (sym
) == next
->fr_address
);
24949 if (next
->tc_frag_data
.first_map
!= NULL
)
24951 /* Next frag starts with a mapping symbol. Discard this
24953 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24957 if (next
->fr_next
== NULL
)
24959 /* This mapping symbol is at the end of the section. Discard
24961 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
24962 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
24966 /* As long as we have empty frags without any mapping symbols,
24968 /* If the next frag is non-empty and does not start with a
24969 mapping symbol, then this mapping symbol is required. */
24970 if (next
->fr_address
!= next
->fr_next
->fr_address
)
24973 next
= next
->fr_next
;
24975 while (next
!= NULL
);
24980 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24984 arm_adjust_symtab (void)
24989 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
24991 if (ARM_IS_THUMB (sym
))
24993 if (THUMB_IS_FUNC (sym
))
24995 /* Mark the symbol as a Thumb function. */
24996 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
24997 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
24998 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
25000 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
25001 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
25003 as_bad (_("%s: unexpected function type: %d"),
25004 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
25006 else switch (S_GET_STORAGE_CLASS (sym
))
25009 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
25012 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
25015 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
25023 if (ARM_IS_INTERWORK (sym
))
25024 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
25031 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
25033 if (ARM_IS_THUMB (sym
))
25035 elf_symbol_type
* elf_sym
;
25037 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
25038 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
25040 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
25041 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
25043 /* If it's a .thumb_func, declare it as so,
25044 otherwise tag label as .code 16. */
25045 if (THUMB_IS_FUNC (sym
))
25046 ARM_SET_SYM_BRANCH_TYPE (elf_sym
->internal_elf_sym
.st_target_internal
,
25047 ST_BRANCH_TO_THUMB
);
25048 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
25049 elf_sym
->internal_elf_sym
.st_info
=
25050 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
25055 /* Remove any overlapping mapping symbols generated by alignment frags. */
25056 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
25057 /* Now do generic ELF adjustments. */
25058 elf_adjust_symtab ();
25062 /* MD interface: Initialization. */
25065 set_constant_flonums (void)
25069 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
25070 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
25074 /* Auto-select Thumb mode if it's the only available instruction set for the
25075 given architecture. */
25078 autoselect_thumb_from_cpu_variant (void)
25080 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
25081 opcode_select (16);
25090 if ( (arm_ops_hsh
= hash_new ()) == NULL
25091 || (arm_cond_hsh
= hash_new ()) == NULL
25092 || (arm_shift_hsh
= hash_new ()) == NULL
25093 || (arm_psr_hsh
= hash_new ()) == NULL
25094 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
25095 || (arm_reg_hsh
= hash_new ()) == NULL
25096 || (arm_reloc_hsh
= hash_new ()) == NULL
25097 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
25098 as_fatal (_("virtual memory exhausted"));
25100 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
25101 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
25102 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
25103 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
25104 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
25105 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
25106 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
25107 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
25108 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
25109 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
25110 (void *) (v7m_psrs
+ i
));
25111 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
25112 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
25114 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
25116 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
25117 (void *) (barrier_opt_names
+ i
));
25119 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
25121 struct reloc_entry
* entry
= reloc_names
+ i
;
25123 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
25124 /* This makes encode_branch() use the EABI versions of this relocation. */
25125 entry
->reloc
= BFD_RELOC_UNUSED
;
25127 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
25131 set_constant_flonums ();
25133 /* Set the cpu variant based on the command-line options. We prefer
25134 -mcpu= over -march= if both are set (as for GCC); and we prefer
25135 -mfpu= over any other way of setting the floating point unit.
25136 Use of legacy options with new options are faulted. */
25139 if (mcpu_cpu_opt
|| march_cpu_opt
)
25140 as_bad (_("use of old and new-style options to set CPU type"));
25142 mcpu_cpu_opt
= legacy_cpu
;
25144 else if (!mcpu_cpu_opt
)
25146 mcpu_cpu_opt
= march_cpu_opt
;
25147 dyn_mcpu_ext_opt
= dyn_march_ext_opt
;
25148 /* Avoid double free in arm_md_end. */
25149 dyn_march_ext_opt
= NULL
;
25155 as_bad (_("use of old and new-style options to set FPU type"));
25157 mfpu_opt
= legacy_fpu
;
25159 else if (!mfpu_opt
)
25161 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
25162 || defined (TE_NetBSD) || defined (TE_VXWORKS))
25163 /* Some environments specify a default FPU. If they don't, infer it
25164 from the processor. */
25166 mfpu_opt
= mcpu_fpu_opt
;
25168 mfpu_opt
= march_fpu_opt
;
25170 mfpu_opt
= &fpu_default
;
25176 if (mcpu_cpu_opt
!= NULL
)
25177 mfpu_opt
= &fpu_default
;
25178 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
25179 mfpu_opt
= &fpu_arch_vfp_v2
;
25181 mfpu_opt
= &fpu_arch_fpa
;
25187 mcpu_cpu_opt
= &cpu_default
;
25188 selected_cpu
= cpu_default
;
25190 else if (dyn_mcpu_ext_opt
)
25191 ARM_MERGE_FEATURE_SETS (selected_cpu
, *mcpu_cpu_opt
, *dyn_mcpu_ext_opt
);
25193 selected_cpu
= *mcpu_cpu_opt
;
25195 if (mcpu_cpu_opt
&& dyn_mcpu_ext_opt
)
25196 ARM_MERGE_FEATURE_SETS (selected_cpu
, *mcpu_cpu_opt
, *dyn_mcpu_ext_opt
);
25197 else if (mcpu_cpu_opt
)
25198 selected_cpu
= *mcpu_cpu_opt
;
25200 mcpu_cpu_opt
= &arm_arch_any
;
25203 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
25204 if (dyn_mcpu_ext_opt
)
25205 ARM_MERGE_FEATURE_SETS (cpu_variant
, cpu_variant
, *dyn_mcpu_ext_opt
);
25207 autoselect_thumb_from_cpu_variant ();
25209 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
25211 #if defined OBJ_COFF || defined OBJ_ELF
25213 unsigned int flags
= 0;
25215 #if defined OBJ_ELF
25216 flags
= meabi_flags
;
25218 switch (meabi_flags
)
25220 case EF_ARM_EABI_UNKNOWN
:
25222 /* Set the flags in the private structure. */
25223 if (uses_apcs_26
) flags
|= F_APCS26
;
25224 if (support_interwork
) flags
|= F_INTERWORK
;
25225 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
25226 if (pic_code
) flags
|= F_PIC
;
25227 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
25228 flags
|= F_SOFT_FLOAT
;
25230 switch (mfloat_abi_opt
)
25232 case ARM_FLOAT_ABI_SOFT
:
25233 case ARM_FLOAT_ABI_SOFTFP
:
25234 flags
|= F_SOFT_FLOAT
;
25237 case ARM_FLOAT_ABI_HARD
:
25238 if (flags
& F_SOFT_FLOAT
)
25239 as_bad (_("hard-float conflicts with specified fpu"));
25243 /* Using pure-endian doubles (even if soft-float). */
25244 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
25245 flags
|= F_VFP_FLOAT
;
25247 #if defined OBJ_ELF
25248 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
25249 flags
|= EF_ARM_MAVERICK_FLOAT
;
25252 case EF_ARM_EABI_VER4
:
25253 case EF_ARM_EABI_VER5
:
25254 /* No additional flags to set. */
25261 bfd_set_private_flags (stdoutput
, flags
);
25263 /* We have run out flags in the COFF header to encode the
25264 status of ATPCS support, so instead we create a dummy,
25265 empty, debug section called .arm.atpcs. */
25270 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
25274 bfd_set_section_flags
25275 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
25276 bfd_set_section_size (stdoutput
, sec
, 0);
25277 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
25283 /* Record the CPU type as well. */
25284 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
25285 mach
= bfd_mach_arm_iWMMXt2
;
25286 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
25287 mach
= bfd_mach_arm_iWMMXt
;
25288 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
25289 mach
= bfd_mach_arm_XScale
;
25290 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
25291 mach
= bfd_mach_arm_ep9312
;
25292 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
25293 mach
= bfd_mach_arm_5TE
;
25294 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
25296 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
25297 mach
= bfd_mach_arm_5T
;
25299 mach
= bfd_mach_arm_5
;
25301 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
25303 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
25304 mach
= bfd_mach_arm_4T
;
25306 mach
= bfd_mach_arm_4
;
25308 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
25309 mach
= bfd_mach_arm_3M
;
25310 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
25311 mach
= bfd_mach_arm_3
;
25312 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
25313 mach
= bfd_mach_arm_2a
;
25314 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
25315 mach
= bfd_mach_arm_2
;
25317 mach
= bfd_mach_arm_unknown
;
25319 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
25322 /* Command line processing. */
25325 Invocation line includes a switch not recognized by the base assembler.
25326 See if it's a processor-specific option.
25328 This routine is somewhat complicated by the need for backwards
25329 compatibility (since older releases of gcc can't be changed).
25330 The new options try to make the interface as compatible as
25333 New options (supported) are:
25335 -mcpu=<cpu name> Assemble for selected processor
25336 -march=<architecture name> Assemble for selected architecture
25337 -mfpu=<fpu architecture> Assemble for selected FPU.
25338 -EB/-mbig-endian Big-endian
25339 -EL/-mlittle-endian Little-endian
25340 -k Generate PIC code
25341 -mthumb Start in Thumb mode
25342 -mthumb-interwork Code supports ARM/Thumb interworking
25344 -m[no-]warn-deprecated Warn about deprecated features
25345 -m[no-]warn-syms Warn when symbols match instructions
25347 For now we will also provide support for:
25349 -mapcs-32 32-bit Program counter
25350 -mapcs-26 26-bit Program counter
25351 -macps-float Floats passed in FP registers
25352 -mapcs-reentrant Reentrant code
25354 (sometime these will probably be replaced with -mapcs=<list of options>
25355 and -matpcs=<list of options>)
25357 The remaining options are only supported for back-wards compatibility.
25358 Cpu variants, the arm part is optional:
25359 -m[arm]1 Currently not supported.
25360 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
25361 -m[arm]3 Arm 3 processor
25362 -m[arm]6[xx], Arm 6 processors
25363 -m[arm]7[xx][t][[d]m] Arm 7 processors
25364 -m[arm]8[10] Arm 8 processors
25365 -m[arm]9[20][tdmi] Arm 9 processors
25366 -mstrongarm[110[0]] StrongARM processors
25367 -mxscale XScale processors
25368 -m[arm]v[2345[t[e]]] Arm architectures
25369 -mall All (except the ARM1)
25371 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
25372 -mfpe-old (No float load/store multiples)
25373 -mvfpxd VFP Single precision
25375 -mno-fpu Disable all floating point instructions
25377 The following CPU names are recognized:
25378 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25379 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25380 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25381 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25382 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25383 arm10t arm10e, arm1020t, arm1020e, arm10200e,
25384 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25388 const char * md_shortopts
= "m:k";
25390 #ifdef ARM_BI_ENDIAN
25391 #define OPTION_EB (OPTION_MD_BASE + 0)
25392 #define OPTION_EL (OPTION_MD_BASE + 1)
25394 #if TARGET_BYTES_BIG_ENDIAN
25395 #define OPTION_EB (OPTION_MD_BASE + 0)
25397 #define OPTION_EL (OPTION_MD_BASE + 1)
25400 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25402 struct option md_longopts
[] =
25405 {"EB", no_argument
, NULL
, OPTION_EB
},
25408 {"EL", no_argument
, NULL
, OPTION_EL
},
25410 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
25411 {NULL
, no_argument
, NULL
, 0}
25414 size_t md_longopts_size
= sizeof (md_longopts
);
25416 struct arm_option_table
25418 const char * option
; /* Option name to match. */
25419 const char * help
; /* Help information. */
25420 int * var
; /* Variable to change. */
25421 int value
; /* What to change it to. */
25422 const char * deprecated
; /* If non-null, print this message. */
25425 struct arm_option_table arm_opts
[] =
25427 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
25428 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
25429 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25430 &support_interwork
, 1, NULL
},
25431 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
25432 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
25433 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
25435 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
25436 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
25437 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
25438 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
25441 /* These are recognized by the assembler, but have no affect on code. */
25442 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
25443 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
25445 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
25446 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25447 &warn_on_deprecated
, 0, NULL
},
25448 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
25449 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
25450 {NULL
, NULL
, NULL
, 0, NULL
}
25453 struct arm_legacy_option_table
25455 const char * option
; /* Option name to match. */
25456 const arm_feature_set
** var
; /* Variable to change. */
25457 const arm_feature_set value
; /* What to change it to. */
25458 const char * deprecated
; /* If non-null, print this message. */
25461 const struct arm_legacy_option_table arm_legacy_opts
[] =
25463 /* DON'T add any new processors to this list -- we want the whole list
25464 to go away... Add them to the processors table instead. */
25465 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
25466 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
25467 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
25468 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
25469 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
25470 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
25471 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
25472 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
25473 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
25474 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
25475 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
25476 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
25477 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
25478 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
25479 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
25480 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
25481 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
25482 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
25483 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
25484 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
25485 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
25486 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
25487 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
25488 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
25489 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
25490 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
25491 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
25492 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
25493 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
25494 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
25495 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
25496 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
25497 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
25498 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
25499 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
25500 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
25501 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
25502 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
25503 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
25504 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
25505 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
25506 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
25507 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
25508 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
25509 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
25510 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
25511 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25512 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25513 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25514 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
25515 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
25516 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
25517 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
25518 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
25519 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
25520 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
25521 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
25522 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
25523 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
25524 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
25525 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
25526 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
25527 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
25528 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
25529 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
25530 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
25531 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
25532 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
25533 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
25534 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
25535 N_("use -mcpu=strongarm110")},
25536 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
25537 N_("use -mcpu=strongarm1100")},
25538 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
25539 N_("use -mcpu=strongarm1110")},
25540 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
25541 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
25542 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
25544 /* Architecture variants -- don't add any more to this list either. */
25545 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
25546 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
25547 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
25548 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
25549 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
25550 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
25551 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
25552 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
25553 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
25554 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
25555 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
25556 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
25557 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
25558 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
25559 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
25560 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
25561 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
25562 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
25564 /* Floating point variants -- don't add any more to this list either. */
25565 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
25566 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
25567 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
25568 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
25569 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25571 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
25574 struct arm_cpu_option_table
25578 const arm_feature_set value
;
25579 const arm_feature_set ext
;
25580 /* For some CPUs we assume an FPU unless the user explicitly sets
25582 const arm_feature_set default_fpu
;
25583 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25585 const char * canonical_name
;
25588 /* This list should, at a minimum, contain all the cpu names
25589 recognized by GCC. */
25590 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
25592 static const struct arm_cpu_option_table arm_cpus
[] =
25594 ARM_CPU_OPT ("all", NULL
, ARM_ANY
,
25597 ARM_CPU_OPT ("arm1", NULL
, ARM_ARCH_V1
,
25600 ARM_CPU_OPT ("arm2", NULL
, ARM_ARCH_V2
,
25603 ARM_CPU_OPT ("arm250", NULL
, ARM_ARCH_V2S
,
25606 ARM_CPU_OPT ("arm3", NULL
, ARM_ARCH_V2S
,
25609 ARM_CPU_OPT ("arm6", NULL
, ARM_ARCH_V3
,
25612 ARM_CPU_OPT ("arm60", NULL
, ARM_ARCH_V3
,
25615 ARM_CPU_OPT ("arm600", NULL
, ARM_ARCH_V3
,
25618 ARM_CPU_OPT ("arm610", NULL
, ARM_ARCH_V3
,
25621 ARM_CPU_OPT ("arm620", NULL
, ARM_ARCH_V3
,
25624 ARM_CPU_OPT ("arm7", NULL
, ARM_ARCH_V3
,
25627 ARM_CPU_OPT ("arm7m", NULL
, ARM_ARCH_V3M
,
25630 ARM_CPU_OPT ("arm7d", NULL
, ARM_ARCH_V3
,
25633 ARM_CPU_OPT ("arm7dm", NULL
, ARM_ARCH_V3M
,
25636 ARM_CPU_OPT ("arm7di", NULL
, ARM_ARCH_V3
,
25639 ARM_CPU_OPT ("arm7dmi", NULL
, ARM_ARCH_V3M
,
25642 ARM_CPU_OPT ("arm70", NULL
, ARM_ARCH_V3
,
25645 ARM_CPU_OPT ("arm700", NULL
, ARM_ARCH_V3
,
25648 ARM_CPU_OPT ("arm700i", NULL
, ARM_ARCH_V3
,
25651 ARM_CPU_OPT ("arm710", NULL
, ARM_ARCH_V3
,
25654 ARM_CPU_OPT ("arm710t", NULL
, ARM_ARCH_V4T
,
25657 ARM_CPU_OPT ("arm720", NULL
, ARM_ARCH_V3
,
25660 ARM_CPU_OPT ("arm720t", NULL
, ARM_ARCH_V4T
,
25663 ARM_CPU_OPT ("arm740t", NULL
, ARM_ARCH_V4T
,
25666 ARM_CPU_OPT ("arm710c", NULL
, ARM_ARCH_V3
,
25669 ARM_CPU_OPT ("arm7100", NULL
, ARM_ARCH_V3
,
25672 ARM_CPU_OPT ("arm7500", NULL
, ARM_ARCH_V3
,
25675 ARM_CPU_OPT ("arm7500fe", NULL
, ARM_ARCH_V3
,
25678 ARM_CPU_OPT ("arm7t", NULL
, ARM_ARCH_V4T
,
25681 ARM_CPU_OPT ("arm7tdmi", NULL
, ARM_ARCH_V4T
,
25684 ARM_CPU_OPT ("arm7tdmi-s", NULL
, ARM_ARCH_V4T
,
25687 ARM_CPU_OPT ("arm8", NULL
, ARM_ARCH_V4
,
25690 ARM_CPU_OPT ("arm810", NULL
, ARM_ARCH_V4
,
25693 ARM_CPU_OPT ("strongarm", NULL
, ARM_ARCH_V4
,
25696 ARM_CPU_OPT ("strongarm1", NULL
, ARM_ARCH_V4
,
25699 ARM_CPU_OPT ("strongarm110", NULL
, ARM_ARCH_V4
,
25702 ARM_CPU_OPT ("strongarm1100", NULL
, ARM_ARCH_V4
,
25705 ARM_CPU_OPT ("strongarm1110", NULL
, ARM_ARCH_V4
,
25708 ARM_CPU_OPT ("arm9", NULL
, ARM_ARCH_V4T
,
25711 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T
,
25714 ARM_CPU_OPT ("arm920t", NULL
, ARM_ARCH_V4T
,
25717 ARM_CPU_OPT ("arm922t", NULL
, ARM_ARCH_V4T
,
25720 ARM_CPU_OPT ("arm940t", NULL
, ARM_ARCH_V4T
,
25723 ARM_CPU_OPT ("arm9tdmi", NULL
, ARM_ARCH_V4T
,
25726 ARM_CPU_OPT ("fa526", NULL
, ARM_ARCH_V4
,
25729 ARM_CPU_OPT ("fa626", NULL
, ARM_ARCH_V4
,
25733 /* For V5 or later processors we default to using VFP; but the user
25734 should really set the FPU type explicitly. */
25735 ARM_CPU_OPT ("arm9e-r0", NULL
, ARM_ARCH_V5TExP
,
25738 ARM_CPU_OPT ("arm9e", NULL
, ARM_ARCH_V5TE
,
25741 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
25744 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
25747 ARM_CPU_OPT ("arm926ej-s", NULL
, ARM_ARCH_V5TEJ
,
25750 ARM_CPU_OPT ("arm946e-r0", NULL
, ARM_ARCH_V5TExP
,
25753 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE
,
25756 ARM_CPU_OPT ("arm946e-s", NULL
, ARM_ARCH_V5TE
,
25759 ARM_CPU_OPT ("arm966e-r0", NULL
, ARM_ARCH_V5TExP
,
25762 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE
,
25765 ARM_CPU_OPT ("arm966e-s", NULL
, ARM_ARCH_V5TE
,
25768 ARM_CPU_OPT ("arm968e-s", NULL
, ARM_ARCH_V5TE
,
25771 ARM_CPU_OPT ("arm10t", NULL
, ARM_ARCH_V5T
,
25774 ARM_CPU_OPT ("arm10tdmi", NULL
, ARM_ARCH_V5T
,
25777 ARM_CPU_OPT ("arm10e", NULL
, ARM_ARCH_V5TE
,
25780 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE
,
25783 ARM_CPU_OPT ("arm1020t", NULL
, ARM_ARCH_V5T
,
25786 ARM_CPU_OPT ("arm1020e", NULL
, ARM_ARCH_V5TE
,
25789 ARM_CPU_OPT ("arm1022e", NULL
, ARM_ARCH_V5TE
,
25792 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ
,
25795 ARM_CPU_OPT ("arm1026ej-s", NULL
, ARM_ARCH_V5TEJ
,
25798 ARM_CPU_OPT ("fa606te", NULL
, ARM_ARCH_V5TE
,
25801 ARM_CPU_OPT ("fa616te", NULL
, ARM_ARCH_V5TE
,
25804 ARM_CPU_OPT ("fa626te", NULL
, ARM_ARCH_V5TE
,
25807 ARM_CPU_OPT ("fmp626", NULL
, ARM_ARCH_V5TE
,
25810 ARM_CPU_OPT ("fa726te", NULL
, ARM_ARCH_V5TE
,
25813 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6
,
25816 ARM_CPU_OPT ("arm1136j-s", NULL
, ARM_ARCH_V6
,
25819 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6
,
25822 ARM_CPU_OPT ("arm1136jf-s", NULL
, ARM_ARCH_V6
,
25825 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K
,
25828 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K
,
25831 ARM_CPU_OPT ("arm1156t2-s", NULL
, ARM_ARCH_V6T2
,
25834 ARM_CPU_OPT ("arm1156t2f-s", NULL
, ARM_ARCH_V6T2
,
25837 ARM_CPU_OPT ("arm1176jz-s", NULL
, ARM_ARCH_V6KZ
,
25840 ARM_CPU_OPT ("arm1176jzf-s", NULL
, ARM_ARCH_V6KZ
,
25843 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A
,
25844 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
25846 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE
,
25848 FPU_ARCH_NEON_VFP_V4
),
25849 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A
,
25850 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
25851 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
25852 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A
,
25853 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
25854 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
25855 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE
,
25857 FPU_ARCH_NEON_VFP_V4
),
25858 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE
,
25860 FPU_ARCH_NEON_VFP_V4
),
25861 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE
,
25863 FPU_ARCH_NEON_VFP_V4
),
25864 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A
,
25865 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25866 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25867 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A
,
25868 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25869 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25870 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A
,
25871 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25872 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25873 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A
,
25874 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
25875 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
25876 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A
,
25877 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25878 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25879 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A
,
25880 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25881 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25882 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A
,
25883 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25884 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25885 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A
,
25886 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
25887 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
25888 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R
,
25891 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R
,
25893 FPU_ARCH_VFP_V3D16
),
25894 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R
,
25895 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
25897 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R
,
25898 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
25899 FPU_ARCH_VFP_V3D16
),
25900 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R
,
25901 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
25902 FPU_ARCH_VFP_V3D16
),
25903 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R
,
25904 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25905 FPU_ARCH_NEON_VFP_ARMV8
),
25906 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN
,
25907 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
25909 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE
,
25912 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM
,
25915 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM
,
25918 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M
,
25921 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM
,
25924 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM
,
25927 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM
,
25930 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A
,
25931 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25932 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25934 /* ??? XSCALE is really an architecture. */
25935 ARM_CPU_OPT ("xscale", NULL
, ARM_ARCH_XSCALE
,
25939 /* ??? iwmmxt is not a processor. */
25940 ARM_CPU_OPT ("iwmmxt", NULL
, ARM_ARCH_IWMMXT
,
25943 ARM_CPU_OPT ("iwmmxt2", NULL
, ARM_ARCH_IWMMXT2
,
25946 ARM_CPU_OPT ("i80200", NULL
, ARM_ARCH_XSCALE
,
25951 ARM_CPU_OPT ("ep9312", "ARM920T",
25952 ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
25953 ARM_ARCH_NONE
, FPU_ARCH_MAVERICK
),
25955 /* Marvell processors. */
25956 ARM_CPU_OPT ("marvell-pj4", NULL
, ARM_ARCH_V7A
,
25957 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
25958 FPU_ARCH_VFP_V3D16
),
25959 ARM_CPU_OPT ("marvell-whitney", NULL
, ARM_ARCH_V7A
,
25960 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
25961 FPU_ARCH_NEON_VFP_V4
),
25963 /* APM X-Gene family. */
25964 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A
,
25966 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25967 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A
,
25968 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
25969 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
25971 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
25975 struct arm_arch_option_table
25979 const arm_feature_set value
;
25980 const arm_feature_set default_fpu
;
25983 /* This list should, at a minimum, contain all the architecture names
25984 recognized by GCC. */
25985 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
25987 static const struct arm_arch_option_table arm_archs
[] =
25989 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
25990 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
25991 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
25992 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
25993 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
25994 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
25995 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
25996 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
25997 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
25998 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
25999 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
26000 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
26001 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
26002 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
26003 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
),
26004 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
),
26005 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
),
26006 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
),
26007 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
),
26008 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
),
26009 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
),
26010 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
26011 kept to preserve existing behaviour. */
26012 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
26013 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
),
26014 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
),
26015 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
),
26016 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
),
26017 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
26018 kept to preserve existing behaviour. */
26019 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
26020 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
),
26021 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
26022 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
26023 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
),
26024 /* The official spelling of the ARMv7 profile variants is the dashed form.
26025 Accept the non-dashed form for compatibility with old toolchains. */
26026 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
26027 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
),
26028 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
26029 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
26030 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
),
26031 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
),
26032 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
26033 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
),
26034 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
26035 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
),
26036 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
),
26037 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
),
26038 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
),
26039 ARM_ARCH_OPT ("armv8.3-a", ARM_ARCH_V8_3A
, FPU_ARCH_VFP
),
26040 ARM_ARCH_OPT ("armv8-r", ARM_ARCH_V8R
, FPU_ARCH_VFP
),
26041 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
26042 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
26043 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
),
26044 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
26046 #undef ARM_ARCH_OPT
26048 /* ISA extensions in the co-processor and main instruction set space. */
26050 struct arm_option_extension_value_table
26054 const arm_feature_set merge_value
;
26055 const arm_feature_set clear_value
;
26056 /* List of architectures for which an extension is available. ARM_ARCH_NONE
26057 indicates that an extension is available for all architectures while
26058 ARM_ANY marks an empty entry. */
26059 const arm_feature_set allowed_archs
[2];
26062 /* The following table must be in alphabetical order with a NULL last entry. */
26064 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
26065 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
26067 static const struct arm_option_extension_value_table arm_extensions
[] =
26069 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26070 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
26071 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
26072 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
26073 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
26074 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
,
26075 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
),
26077 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
26078 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
26079 ARM_FEATURE_CORE (ARM_EXT_V7M
, ARM_EXT2_V8M
)),
26080 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
26081 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
26082 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26083 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26085 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
26086 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
26087 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
26088 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
26089 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
26090 Thumb divide instruction. Due to this having the same name as the
26091 previous entry, this will be ignored when doing command-line parsing and
26092 only considered by build attribute selection code. */
26093 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
26094 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
26095 ARM_FEATURE_CORE_LOW (ARM_EXT_V7
)),
26096 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
26097 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ARCH_NONE
),
26098 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
26099 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ARCH_NONE
),
26100 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
26101 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ARCH_NONE
),
26102 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
26103 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
26104 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
26105 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
26106 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
26107 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
26108 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
26109 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
26110 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
26111 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
26112 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
),
26113 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_RAS
, 0),
26114 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
26115 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1
,
26116 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
26117 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
26118 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
26119 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
26120 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
),
26121 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
26122 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
26123 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
26124 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
26125 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
26127 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
26128 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
26129 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
26130 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ARCH_NONE
),
26131 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, { ARM_ARCH_NONE
, ARM_ARCH_NONE
} }
26135 /* ISA floating-point and Advanced SIMD extensions. */
26136 struct arm_option_fpu_value_table
26139 const arm_feature_set value
;
26142 /* This list should, at a minimum, contain all the fpu names
26143 recognized by GCC. */
26144 static const struct arm_option_fpu_value_table arm_fpus
[] =
26146 {"softfpa", FPU_NONE
},
26147 {"fpe", FPU_ARCH_FPE
},
26148 {"fpe2", FPU_ARCH_FPE
},
26149 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
26150 {"fpa", FPU_ARCH_FPA
},
26151 {"fpa10", FPU_ARCH_FPA
},
26152 {"fpa11", FPU_ARCH_FPA
},
26153 {"arm7500fe", FPU_ARCH_FPA
},
26154 {"softvfp", FPU_ARCH_VFP
},
26155 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
26156 {"vfp", FPU_ARCH_VFP_V2
},
26157 {"vfp9", FPU_ARCH_VFP_V2
},
26158 {"vfp3", FPU_ARCH_VFP_V3
}, /* Undocumented, use vfpv3. */
26159 {"vfp10", FPU_ARCH_VFP_V2
},
26160 {"vfp10-r0", FPU_ARCH_VFP_V1
},
26161 {"vfpxd", FPU_ARCH_VFP_V1xD
},
26162 {"vfpv2", FPU_ARCH_VFP_V2
},
26163 {"vfpv3", FPU_ARCH_VFP_V3
},
26164 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
26165 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
26166 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
26167 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
26168 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
26169 {"arm1020t", FPU_ARCH_VFP_V1
},
26170 {"arm1020e", FPU_ARCH_VFP_V2
},
26171 {"arm1136jfs", FPU_ARCH_VFP_V2
}, /* Undocumented, use arm1136jf-s. */
26172 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
26173 {"maverick", FPU_ARCH_MAVERICK
},
26174 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
26175 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
26176 {"neon-fp16", FPU_ARCH_NEON_FP16
},
26177 {"vfpv4", FPU_ARCH_VFP_V4
},
26178 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
26179 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
26180 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
26181 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
26182 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
26183 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
26184 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
26185 {"crypto-neon-fp-armv8",
26186 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
26187 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
26188 {"crypto-neon-fp-armv8.1",
26189 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
26190 {NULL
, ARM_ARCH_NONE
}
26193 struct arm_option_value_table
26199 static const struct arm_option_value_table arm_float_abis
[] =
26201 {"hard", ARM_FLOAT_ABI_HARD
},
26202 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
26203 {"soft", ARM_FLOAT_ABI_SOFT
},
26208 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
26209 static const struct arm_option_value_table arm_eabis
[] =
26211 {"gnu", EF_ARM_EABI_UNKNOWN
},
26212 {"4", EF_ARM_EABI_VER4
},
26213 {"5", EF_ARM_EABI_VER5
},
26218 struct arm_long_option_table
26220 const char * option
; /* Substring to match. */
26221 const char * help
; /* Help information. */
26222 int (* func
) (const char * subopt
); /* Function to decode sub-option. */
26223 const char * deprecated
; /* If non-null, print this message. */
26227 arm_parse_extension (const char *str
, const arm_feature_set
*opt_set
,
26228 arm_feature_set
**ext_set_p
)
26230 /* We insist on extensions being specified in alphabetical order, and with
26231 extensions being added before being removed. We achieve this by having
26232 the global ARM_EXTENSIONS table in alphabetical order, and using the
26233 ADDING_VALUE variable to indicate whether we are adding an extension (1)
26234 or removing it (0) and only allowing it to change in the order
26236 const struct arm_option_extension_value_table
* opt
= NULL
;
26237 const arm_feature_set arm_any
= ARM_ANY
;
26238 int adding_value
= -1;
26242 *ext_set_p
= XNEW (arm_feature_set
);
26243 **ext_set_p
= arm_arch_none
;
26246 while (str
!= NULL
&& *str
!= 0)
26253 as_bad (_("invalid architectural extension"));
26258 ext
= strchr (str
, '+');
26263 len
= strlen (str
);
26265 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
26267 if (adding_value
!= 0)
26270 opt
= arm_extensions
;
26278 if (adding_value
== -1)
26281 opt
= arm_extensions
;
26283 else if (adding_value
!= 1)
26285 as_bad (_("must specify extensions to add before specifying "
26286 "those to remove"));
26293 as_bad (_("missing architectural extension"));
26297 gas_assert (adding_value
!= -1);
26298 gas_assert (opt
!= NULL
);
26300 /* Scan over the options table trying to find an exact match. */
26301 for (; opt
->name
!= NULL
; opt
++)
26302 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
26304 int i
, nb_allowed_archs
=
26305 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
26306 /* Check we can apply the extension to this architecture. */
26307 for (i
= 0; i
< nb_allowed_archs
; i
++)
26310 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
26312 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *opt_set
))
26315 if (i
== nb_allowed_archs
)
26317 as_bad (_("extension does not apply to the base architecture"));
26321 /* Add or remove the extension. */
26323 ARM_MERGE_FEATURE_SETS (**ext_set_p
, **ext_set_p
,
26326 ARM_CLEAR_FEATURE (**ext_set_p
, **ext_set_p
, opt
->clear_value
);
26328 /* Allowing Thumb division instructions for ARMv7 in autodetection
26329 rely on this break so that duplicate extensions (extensions
26330 with the same name as a previous extension in the list) are not
26331 considered for command-line parsing. */
26335 if (opt
->name
== NULL
)
26337 /* Did we fail to find an extension because it wasn't specified in
26338 alphabetical order, or because it does not exist? */
26340 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
26341 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
26344 if (opt
->name
== NULL
)
26345 as_bad (_("unknown architectural extension `%s'"), str
);
26347 as_bad (_("architectural extensions must be specified in "
26348 "alphabetical order"));
26354 /* We should skip the extension we've just matched the next time
26366 arm_parse_cpu (const char *str
)
26368 const struct arm_cpu_option_table
*opt
;
26369 const char *ext
= strchr (str
, '+');
26375 len
= strlen (str
);
26379 as_bad (_("missing cpu name `%s'"), str
);
26383 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
26384 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
26386 mcpu_cpu_opt
= &opt
->value
;
26387 if (!dyn_mcpu_ext_opt
)
26388 dyn_mcpu_ext_opt
= XNEW (arm_feature_set
);
26389 *dyn_mcpu_ext_opt
= opt
->ext
;
26390 mcpu_fpu_opt
= &opt
->default_fpu
;
26391 if (opt
->canonical_name
)
26393 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
26394 strcpy (selected_cpu_name
, opt
->canonical_name
);
26400 if (len
>= sizeof selected_cpu_name
)
26401 len
= (sizeof selected_cpu_name
) - 1;
26403 for (i
= 0; i
< len
; i
++)
26404 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
26405 selected_cpu_name
[i
] = 0;
26409 return arm_parse_extension (ext
, mcpu_cpu_opt
, &dyn_mcpu_ext_opt
);
26414 as_bad (_("unknown cpu `%s'"), str
);
26419 arm_parse_arch (const char *str
)
26421 const struct arm_arch_option_table
*opt
;
26422 const char *ext
= strchr (str
, '+');
26428 len
= strlen (str
);
26432 as_bad (_("missing architecture name `%s'"), str
);
26436 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
26437 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
26439 march_cpu_opt
= &opt
->value
;
26440 march_fpu_opt
= &opt
->default_fpu
;
26441 strcpy (selected_cpu_name
, opt
->name
);
26444 return arm_parse_extension (ext
, march_cpu_opt
, &dyn_march_ext_opt
);
26449 as_bad (_("unknown architecture `%s'\n"), str
);
26454 arm_parse_fpu (const char * str
)
26456 const struct arm_option_fpu_value_table
* opt
;
26458 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
26459 if (streq (opt
->name
, str
))
26461 mfpu_opt
= &opt
->value
;
26465 as_bad (_("unknown floating point format `%s'\n"), str
);
26470 arm_parse_float_abi (const char * str
)
26472 const struct arm_option_value_table
* opt
;
26474 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
26475 if (streq (opt
->name
, str
))
26477 mfloat_abi_opt
= opt
->value
;
26481 as_bad (_("unknown floating point abi `%s'\n"), str
);
26487 arm_parse_eabi (const char * str
)
26489 const struct arm_option_value_table
*opt
;
26491 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
26492 if (streq (opt
->name
, str
))
26494 meabi_flags
= opt
->value
;
26497 as_bad (_("unknown EABI `%s'\n"), str
);
26503 arm_parse_it_mode (const char * str
)
26505 bfd_boolean ret
= TRUE
;
26507 if (streq ("arm", str
))
26508 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
26509 else if (streq ("thumb", str
))
26510 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
26511 else if (streq ("always", str
))
26512 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
26513 else if (streq ("never", str
))
26514 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
26517 as_bad (_("unknown implicit IT mode `%s', should be "\
26518 "arm, thumb, always, or never."), str
);
26526 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED
)
26528 codecomposer_syntax
= TRUE
;
26529 arm_comment_chars
[0] = ';';
26530 arm_line_separator_chars
[0] = 0;
26534 struct arm_long_option_table arm_long_opts
[] =
26536 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
26537 arm_parse_cpu
, NULL
},
26538 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
26539 arm_parse_arch
, NULL
},
26540 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
26541 arm_parse_fpu
, NULL
},
26542 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
26543 arm_parse_float_abi
, NULL
},
26545 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
26546 arm_parse_eabi
, NULL
},
26548 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
26549 arm_parse_it_mode
, NULL
},
26550 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
26551 arm_ccs_mode
, NULL
},
26552 {NULL
, NULL
, 0, NULL
}
26556 md_parse_option (int c
, const char * arg
)
26558 struct arm_option_table
*opt
;
26559 const struct arm_legacy_option_table
*fopt
;
26560 struct arm_long_option_table
*lopt
;
26566 target_big_endian
= 1;
26572 target_big_endian
= 0;
26576 case OPTION_FIX_V4BX
:
26581 /* Listing option. Just ignore these, we don't support additional
26586 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
26588 if (c
== opt
->option
[0]
26589 && ((arg
== NULL
&& opt
->option
[1] == 0)
26590 || streq (arg
, opt
->option
+ 1)))
26592 /* If the option is deprecated, tell the user. */
26593 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
26594 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
26595 arg
? arg
: "", _(opt
->deprecated
));
26597 if (opt
->var
!= NULL
)
26598 *opt
->var
= opt
->value
;
26604 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
26606 if (c
== fopt
->option
[0]
26607 && ((arg
== NULL
&& fopt
->option
[1] == 0)
26608 || streq (arg
, fopt
->option
+ 1)))
26610 /* If the option is deprecated, tell the user. */
26611 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
26612 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
26613 arg
? arg
: "", _(fopt
->deprecated
));
26615 if (fopt
->var
!= NULL
)
26616 *fopt
->var
= &fopt
->value
;
26622 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
26624 /* These options are expected to have an argument. */
26625 if (c
== lopt
->option
[0]
26627 && strncmp (arg
, lopt
->option
+ 1,
26628 strlen (lopt
->option
+ 1)) == 0)
26630 /* If the option is deprecated, tell the user. */
26631 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
26632 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
26633 _(lopt
->deprecated
));
26635 /* Call the sup-option parser. */
26636 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
26647 md_show_usage (FILE * fp
)
26649 struct arm_option_table
*opt
;
26650 struct arm_long_option_table
*lopt
;
26652 fprintf (fp
, _(" ARM-specific assembler options:\n"));
26654 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
26655 if (opt
->help
!= NULL
)
26656 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
26658 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
26659 if (lopt
->help
!= NULL
)
26660 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
26664 -EB assemble code for a big-endian cpu\n"));
26669 -EL assemble code for a little-endian cpu\n"));
26673 --fix-v4bx Allow BX in ARMv4 code\n"));
26681 arm_feature_set flags
;
26682 } cpu_arch_ver_table
;
26684 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
26685 chronologically for architectures, with an exception for ARMv6-M and
26686 ARMv6S-M due to legacy reasons. No new architecture should have a
26687 special case. This allows for build attribute selection results to be
26688 stable when new architectures are added. */
26689 static const cpu_arch_ver_table cpu_arch_ver
[] =
26696 {1, ARM_ARCH_V4xM
},
26698 {2, ARM_ARCH_V4TxM
},
26700 {3, ARM_ARCH_V5xM
},
26702 {3, ARM_ARCH_V5TxM
},
26704 {4, ARM_ARCH_V5TExP
},
26705 {4, ARM_ARCH_V5TE
},
26706 {5, ARM_ARCH_V5TEJ
},
26709 {7, ARM_ARCH_V6KZ
},
26711 {8, ARM_ARCH_V6T2
},
26712 {8, ARM_ARCH_V6KT2
},
26713 {8, ARM_ARCH_V6ZT2
},
26714 {8, ARM_ARCH_V6KZT2
},
26716 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
26717 always selected build attributes to match those of ARMv6-M
26718 (resp. ARMv6S-M). However, due to these architectures being a strict
26719 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
26720 would be selected when fully respecting chronology of architectures.
26721 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
26722 move them before ARMv7 architectures. */
26723 {11, ARM_ARCH_V6M
},
26724 {12, ARM_ARCH_V6SM
},
26727 {10, ARM_ARCH_V7A
},
26728 {10, ARM_ARCH_V7R
},
26729 {10, ARM_ARCH_V7M
},
26730 {10, ARM_ARCH_V7VE
},
26731 {13, ARM_ARCH_V7EM
},
26732 {14, ARM_ARCH_V8A
},
26733 {14, ARM_ARCH_V8_1A
},
26734 {14, ARM_ARCH_V8_2A
},
26735 {14, ARM_ARCH_V8_3A
},
26736 {16, ARM_ARCH_V8M_BASE
},
26737 {17, ARM_ARCH_V8M_MAIN
},
26738 {15, ARM_ARCH_V8R
},
26739 {-1, ARM_ARCH_NONE
}
26742 /* Set an attribute if it has not already been set by the user. */
26745 aeabi_set_attribute_int (int tag
, int value
)
26748 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
26749 || !attributes_set_explicitly
[tag
])
26750 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
26754 aeabi_set_attribute_string (int tag
, const char *value
)
26757 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
26758 || !attributes_set_explicitly
[tag
])
26759 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
26762 /* Return whether features in the *NEEDED feature set are available via
26763 extensions for the architecture whose feature set is *ARCH_FSET. */
26766 have_ext_for_needed_feat_p (const arm_feature_set
*arch_fset
,
26767 const arm_feature_set
*needed
)
26769 int i
, nb_allowed_archs
;
26770 arm_feature_set ext_fset
;
26771 const struct arm_option_extension_value_table
*opt
;
26773 ext_fset
= arm_arch_none
;
26774 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
26776 /* Extension does not provide any feature we need. */
26777 if (!ARM_CPU_HAS_FEATURE (*needed
, opt
->merge_value
))
26781 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
26782 for (i
= 0; i
< nb_allowed_archs
; i
++)
26785 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_arch_any
))
26788 /* Extension is available, add it. */
26789 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *arch_fset
))
26790 ARM_MERGE_FEATURE_SETS (ext_fset
, ext_fset
, opt
->merge_value
);
26794 /* Can we enable all features in *needed? */
26795 return ARM_FSET_CPU_SUBSET (*needed
, ext_fset
);
26798 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
26799 a given architecture feature set *ARCH_EXT_FSET including extension feature
26800 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
26801 - if true, check for an exact match of the architecture modulo extensions;
26802 - otherwise, select build attribute value of the first superset
26803 architecture released so that results remains stable when new architectures
26805 For -march/-mcpu=all the build attribute value of the most featureful
26806 architecture is returned. Tag_CPU_arch_profile result is returned in
26810 get_aeabi_cpu_arch_from_fset (const arm_feature_set
*arch_ext_fset
,
26811 const arm_feature_set
*ext_fset
,
26812 char *profile
, int exact_match
)
26814 arm_feature_set arch_fset
;
26815 const cpu_arch_ver_table
*p_ver
, *p_ver_ret
= NULL
;
26817 /* Select most featureful architecture with all its extensions if building
26818 for -march=all as the feature sets used to set build attributes. */
26819 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, arm_arch_any
))
26821 /* Force revisiting of decision for each new architecture. */
26822 gas_assert (MAX_TAG_CPU_ARCH
<= TAG_CPU_ARCH_V8M_MAIN
);
26824 return TAG_CPU_ARCH_V8
;
26827 ARM_CLEAR_FEATURE (arch_fset
, *arch_ext_fset
, *ext_fset
);
26829 for (p_ver
= cpu_arch_ver
; p_ver
->val
!= -1; p_ver
++)
26831 arm_feature_set known_arch_fset
;
26833 ARM_CLEAR_FEATURE (known_arch_fset
, p_ver
->flags
, fpu_any
);
26836 /* Base architecture match user-specified architecture and
26837 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
26838 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, known_arch_fset
))
26843 /* Base architecture match user-specified architecture only
26844 (eg. ARMv6-M in the same case as above). Record it in case we
26845 find a match with above condition. */
26846 else if (p_ver_ret
== NULL
26847 && ARM_FEATURE_EQUAL (arch_fset
, known_arch_fset
))
26853 /* Architecture has all features wanted. */
26854 if (ARM_FSET_CPU_SUBSET (arch_fset
, known_arch_fset
))
26856 arm_feature_set added_fset
;
26858 /* Compute features added by this architecture over the one
26859 recorded in p_ver_ret. */
26860 if (p_ver_ret
!= NULL
)
26861 ARM_CLEAR_FEATURE (added_fset
, known_arch_fset
,
26863 /* First architecture that match incl. with extensions, or the
26864 only difference in features over the recorded match is
26865 features that were optional and are now mandatory. */
26866 if (p_ver_ret
== NULL
26867 || ARM_FSET_CPU_SUBSET (added_fset
, arch_fset
))
26873 else if (p_ver_ret
== NULL
)
26875 arm_feature_set needed_ext_fset
;
26877 ARM_CLEAR_FEATURE (needed_ext_fset
, arch_fset
, known_arch_fset
);
26879 /* Architecture has all features needed when using some
26880 extensions. Record it and continue searching in case there
26881 exist an architecture providing all needed features without
26882 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
26884 if (have_ext_for_needed_feat_p (&known_arch_fset
,
26891 if (p_ver_ret
== NULL
)
26895 /* Tag_CPU_arch_profile. */
26896 if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7a
)
26897 || ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8
)
26898 || (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_atomics
)
26899 && !ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8m_m_only
)))
26901 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7r
))
26903 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_m
))
26907 return p_ver_ret
->val
;
26910 /* Set the public EABI object attributes. */
26913 aeabi_set_public_attributes (void)
26918 int fp16_optional
= 0;
26919 int skip_exact_match
= 0;
26920 arm_feature_set flags
, flags_arch
, flags_ext
;
26922 /* Autodetection mode, choose the architecture based the instructions
26924 if (no_cpu_selected ())
26926 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
26928 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
26929 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
26931 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
26932 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
26934 /* Code run during relaxation relies on selected_cpu being set. */
26935 selected_cpu
= flags
;
26937 /* Otherwise, choose the architecture based on the capabilities of the
26940 flags
= selected_cpu
;
26941 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
26943 /* Allow the user to override the reported architecture. */
26946 ARM_CLEAR_FEATURE (flags_arch
, *object_arch
, fpu_any
);
26947 flags_ext
= arm_arch_none
;
26951 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
26952 flags_ext
= dyn_mcpu_ext_opt
? *dyn_mcpu_ext_opt
: arm_arch_none
;
26953 skip_exact_match
= ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_any
);
26956 /* When this function is run again after relaxation has happened there is no
26957 way to determine whether an architecture or CPU was specified by the user:
26958 - selected_cpu is set above for relaxation to work;
26959 - march_cpu_opt is not set if only -mcpu or .cpu is used;
26960 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
26961 Therefore, if not in -march=all case we first try an exact match and fall
26962 back to autodetection. */
26963 if (!skip_exact_match
)
26964 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 1);
26966 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 0);
26968 as_bad (_("no architecture contains all the instructions used\n"));
26970 /* Tag_CPU_name. */
26971 if (selected_cpu_name
[0])
26975 q
= selected_cpu_name
;
26976 if (strncmp (q
, "armv", 4) == 0)
26981 for (i
= 0; q
[i
]; i
++)
26982 q
[i
] = TOUPPER (q
[i
]);
26984 aeabi_set_attribute_string (Tag_CPU_name
, q
);
26987 /* Tag_CPU_arch. */
26988 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
26990 /* Tag_CPU_arch_profile. */
26991 if (profile
!= '\0')
26992 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
26994 /* Tag_DSP_extension. */
26995 if (dyn_mcpu_ext_opt
&& ARM_CPU_HAS_FEATURE (*dyn_mcpu_ext_opt
, arm_ext_dsp
))
26996 aeabi_set_attribute_int (Tag_DSP_extension
, 1);
26998 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
26999 /* Tag_ARM_ISA_use. */
27000 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
27001 || ARM_FEATURE_ZERO (flags_arch
))
27002 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
27004 /* Tag_THUMB_ISA_use. */
27005 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
27006 || ARM_FEATURE_ZERO (flags_arch
))
27010 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
27011 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
))
27013 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
27017 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
27020 /* Tag_VFP_arch. */
27021 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
27022 aeabi_set_attribute_int (Tag_VFP_arch
,
27023 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
27025 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
27026 aeabi_set_attribute_int (Tag_VFP_arch
,
27027 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
27029 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
27032 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
27034 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
27036 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
27039 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
27040 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
27041 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
27042 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
27043 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
27045 /* Tag_ABI_HardFP_use. */
27046 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
27047 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
27048 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
27050 /* Tag_WMMX_arch. */
27051 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
27052 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
27053 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
27054 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
27056 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
27057 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v8_1
))
27058 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 4);
27059 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
27060 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
27061 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
27063 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
27065 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
27069 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
27074 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
27075 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
27076 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
27080 We set Tag_DIV_use to two when integer divide instructions have been used
27081 in ARM state, or when Thumb integer divide instructions have been used,
27082 but we have no architecture profile set, nor have we any ARM instructions.
27084 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
27085 by the base architecture.
27087 For new architectures we will have to check these tests. */
27088 gas_assert (arch
<= TAG_CPU_ARCH_V8M_MAIN
);
27089 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
27090 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
27091 aeabi_set_attribute_int (Tag_DIV_use
, 0);
27092 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
27093 || (profile
== '\0'
27094 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
27095 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
27096 aeabi_set_attribute_int (Tag_DIV_use
, 2);
27098 /* Tag_MP_extension_use. */
27099 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
27100 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
27102 /* Tag Virtualization_use. */
27103 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
27105 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
27108 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
27111 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
27112 finished and free extension feature bits which will not be used anymore. */
27115 arm_md_post_relax (void)
27117 aeabi_set_public_attributes ();
27118 XDELETE (dyn_mcpu_ext_opt
);
27119 dyn_mcpu_ext_opt
= NULL
;
27120 XDELETE (dyn_march_ext_opt
);
27121 dyn_march_ext_opt
= NULL
;
27124 /* Add the default contents for the .ARM.attributes section. */
27129 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
27132 aeabi_set_public_attributes ();
27134 #endif /* OBJ_ELF */
27136 /* Parse a .cpu directive. */
27139 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
27141 const struct arm_cpu_option_table
*opt
;
27145 name
= input_line_pointer
;
27146 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
27147 input_line_pointer
++;
27148 saved_char
= *input_line_pointer
;
27149 *input_line_pointer
= 0;
27151 /* Skip the first "all" entry. */
27152 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
27153 if (streq (opt
->name
, name
))
27155 mcpu_cpu_opt
= &opt
->value
;
27156 if (!dyn_mcpu_ext_opt
)
27157 dyn_mcpu_ext_opt
= XNEW (arm_feature_set
);
27158 *dyn_mcpu_ext_opt
= opt
->ext
;
27159 ARM_MERGE_FEATURE_SETS (selected_cpu
, *mcpu_cpu_opt
, *dyn_mcpu_ext_opt
);
27160 if (opt
->canonical_name
)
27161 strcpy (selected_cpu_name
, opt
->canonical_name
);
27165 for (i
= 0; opt
->name
[i
]; i
++)
27166 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
27168 selected_cpu_name
[i
] = 0;
27170 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
27171 if (dyn_mcpu_ext_opt
)
27172 ARM_MERGE_FEATURE_SETS (cpu_variant
, cpu_variant
, *dyn_mcpu_ext_opt
);
27173 *input_line_pointer
= saved_char
;
27174 demand_empty_rest_of_line ();
27177 as_bad (_("unknown cpu `%s'"), name
);
27178 *input_line_pointer
= saved_char
;
27179 ignore_rest_of_line ();
27182 /* Parse a .arch directive. */
27185 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
27187 const struct arm_arch_option_table
*opt
;
27191 name
= input_line_pointer
;
27192 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
27193 input_line_pointer
++;
27194 saved_char
= *input_line_pointer
;
27195 *input_line_pointer
= 0;
27197 /* Skip the first "all" entry. */
27198 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
27199 if (streq (opt
->name
, name
))
27201 mcpu_cpu_opt
= &opt
->value
;
27202 XDELETE (dyn_mcpu_ext_opt
);
27203 dyn_mcpu_ext_opt
= NULL
;
27204 selected_cpu
= *mcpu_cpu_opt
;
27205 strcpy (selected_cpu_name
, opt
->name
);
27206 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, *mfpu_opt
);
27207 *input_line_pointer
= saved_char
;
27208 demand_empty_rest_of_line ();
27212 as_bad (_("unknown architecture `%s'\n"), name
);
27213 *input_line_pointer
= saved_char
;
27214 ignore_rest_of_line ();
27217 /* Parse a .object_arch directive. */
27220 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
27222 const struct arm_arch_option_table
*opt
;
27226 name
= input_line_pointer
;
27227 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
27228 input_line_pointer
++;
27229 saved_char
= *input_line_pointer
;
27230 *input_line_pointer
= 0;
27232 /* Skip the first "all" entry. */
27233 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
27234 if (streq (opt
->name
, name
))
27236 object_arch
= &opt
->value
;
27237 *input_line_pointer
= saved_char
;
27238 demand_empty_rest_of_line ();
27242 as_bad (_("unknown architecture `%s'\n"), name
);
27243 *input_line_pointer
= saved_char
;
27244 ignore_rest_of_line ();
27247 /* Parse a .arch_extension directive. */
27250 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
27252 const struct arm_option_extension_value_table
*opt
;
27253 const arm_feature_set arm_any
= ARM_ANY
;
27256 int adding_value
= 1;
27258 name
= input_line_pointer
;
27259 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
27260 input_line_pointer
++;
27261 saved_char
= *input_line_pointer
;
27262 *input_line_pointer
= 0;
27264 if (strlen (name
) >= 2
27265 && strncmp (name
, "no", 2) == 0)
27271 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
27272 if (streq (opt
->name
, name
))
27274 int i
, nb_allowed_archs
=
27275 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[i
]);
27276 for (i
= 0; i
< nb_allowed_archs
; i
++)
27279 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
27281 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *mcpu_cpu_opt
))
27285 if (i
== nb_allowed_archs
)
27287 as_bad (_("architectural extension `%s' is not allowed for the "
27288 "current base architecture"), name
);
27292 if (!dyn_mcpu_ext_opt
)
27294 dyn_mcpu_ext_opt
= XNEW (arm_feature_set
);
27295 *dyn_mcpu_ext_opt
= arm_arch_none
;
27298 ARM_MERGE_FEATURE_SETS (*dyn_mcpu_ext_opt
, *dyn_mcpu_ext_opt
,
27301 ARM_CLEAR_FEATURE (*dyn_mcpu_ext_opt
, *dyn_mcpu_ext_opt
,
27304 ARM_MERGE_FEATURE_SETS (selected_cpu
, *mcpu_cpu_opt
, *dyn_mcpu_ext_opt
);
27305 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, *mfpu_opt
);
27306 *input_line_pointer
= saved_char
;
27307 demand_empty_rest_of_line ();
27308 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
27309 on this return so that duplicate extensions (extensions with the
27310 same name as a previous extension in the list) are not considered
27311 for command-line parsing. */
27315 if (opt
->name
== NULL
)
27316 as_bad (_("unknown architecture extension `%s'\n"), name
);
27318 *input_line_pointer
= saved_char
;
27319 ignore_rest_of_line ();
27322 /* Parse a .fpu directive. */
27325 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
27327 const struct arm_option_fpu_value_table
*opt
;
27331 name
= input_line_pointer
;
27332 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
27333 input_line_pointer
++;
27334 saved_char
= *input_line_pointer
;
27335 *input_line_pointer
= 0;
27337 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
27338 if (streq (opt
->name
, name
))
27340 mfpu_opt
= &opt
->value
;
27341 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
27342 if (dyn_mcpu_ext_opt
)
27343 ARM_MERGE_FEATURE_SETS (cpu_variant
, cpu_variant
, *dyn_mcpu_ext_opt
);
27344 *input_line_pointer
= saved_char
;
27345 demand_empty_rest_of_line ();
27349 as_bad (_("unknown floating point format `%s'\n"), name
);
27350 *input_line_pointer
= saved_char
;
27351 ignore_rest_of_line ();
27354 /* Copy symbol information. */
27357 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
27359 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
27363 /* Given a symbolic attribute NAME, return the proper integer value.
27364 Returns -1 if the attribute is not known. */
27367 arm_convert_symbolic_attribute (const char *name
)
27369 static const struct
27374 attribute_table
[] =
27376 /* When you modify this table you should
27377 also modify the list in doc/c-arm.texi. */
27378 #define T(tag) {#tag, tag}
27379 T (Tag_CPU_raw_name
),
27382 T (Tag_CPU_arch_profile
),
27383 T (Tag_ARM_ISA_use
),
27384 T (Tag_THUMB_ISA_use
),
27388 T (Tag_Advanced_SIMD_arch
),
27389 T (Tag_PCS_config
),
27390 T (Tag_ABI_PCS_R9_use
),
27391 T (Tag_ABI_PCS_RW_data
),
27392 T (Tag_ABI_PCS_RO_data
),
27393 T (Tag_ABI_PCS_GOT_use
),
27394 T (Tag_ABI_PCS_wchar_t
),
27395 T (Tag_ABI_FP_rounding
),
27396 T (Tag_ABI_FP_denormal
),
27397 T (Tag_ABI_FP_exceptions
),
27398 T (Tag_ABI_FP_user_exceptions
),
27399 T (Tag_ABI_FP_number_model
),
27400 T (Tag_ABI_align_needed
),
27401 T (Tag_ABI_align8_needed
),
27402 T (Tag_ABI_align_preserved
),
27403 T (Tag_ABI_align8_preserved
),
27404 T (Tag_ABI_enum_size
),
27405 T (Tag_ABI_HardFP_use
),
27406 T (Tag_ABI_VFP_args
),
27407 T (Tag_ABI_WMMX_args
),
27408 T (Tag_ABI_optimization_goals
),
27409 T (Tag_ABI_FP_optimization_goals
),
27410 T (Tag_compatibility
),
27411 T (Tag_CPU_unaligned_access
),
27412 T (Tag_FP_HP_extension
),
27413 T (Tag_VFP_HP_extension
),
27414 T (Tag_ABI_FP_16bit_format
),
27415 T (Tag_MPextension_use
),
27417 T (Tag_nodefaults
),
27418 T (Tag_also_compatible_with
),
27419 T (Tag_conformance
),
27421 T (Tag_Virtualization_use
),
27422 T (Tag_DSP_extension
),
27423 /* We deliberately do not include Tag_MPextension_use_legacy. */
27431 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
27432 if (streq (name
, attribute_table
[i
].name
))
27433 return attribute_table
[i
].tag
;
27438 /* Apply sym value for relocations only in the case that they are for
27439 local symbols in the same segment as the fixup and you have the
27440 respective architectural feature for blx and simple switches. */
27443 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
27446 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
27447 /* PR 17444: If the local symbol is in a different section then a reloc
27448 will always be generated for it, so applying the symbol value now
27449 will result in a double offset being stored in the relocation. */
27450 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
27451 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
27453 switch (fixP
->fx_r_type
)
27455 case BFD_RELOC_ARM_PCREL_BLX
:
27456 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
27457 if (ARM_IS_FUNC (fixP
->fx_addsy
))
27461 case BFD_RELOC_ARM_PCREL_CALL
:
27462 case BFD_RELOC_THUMB_PCREL_BLX
:
27463 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
27474 #endif /* OBJ_ELF */