1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2022 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
39 #include "dw2gencfi.h"
42 #include "dwarf2dbg.h"
45 /* Must be at least the size of the largest unwind opcode (currently two). */
46 #define ARM_OPCODE_CHUNK_SIZE 8
48 /* This structure holds the unwinding state. */
53 symbolS
* table_entry
;
54 symbolS
* personality_routine
;
55 int personality_index
;
56 /* The segment containing the function. */
59 /* Opcodes generated from this function. */
60 unsigned char * opcodes
;
63 /* The number of bytes pushed to the stack. */
65 /* We don't add stack adjustment opcodes immediately so that we can merge
66 multiple adjustments. We can also omit the final adjustment
67 when using a frame pointer. */
68 offsetT pending_offset
;
69 /* These two fields are set by both unwind_movsp and unwind_setfp. They
70 hold the reg+offset to use when restoring sp from a frame pointer. */
73 /* Nonzero if an unwind_setfp directive has been seen. */
75 /* Nonzero if the last opcode restores sp from fp_reg. */
76 unsigned sp_restored
:1;
79 /* Whether --fdpic was given. */
84 /* Results from operand parsing worker functions. */
88 PARSE_OPERAND_SUCCESS
,
90 PARSE_OPERAND_FAIL_NO_BACKTRACK
91 } parse_operand_result
;
100 /* Types of processor to assemble for. */
102 /* The code that was here used to select a default CPU depending on compiler
103 pre-defines which were only present when doing native builds, thus
104 changing gas' default behaviour depending upon the build host.
106 If you have a target that requires a default CPU option then the you
107 should define CPU_DEFAULT here. */
110 /* Perform range checks on positive and negative overflows by checking if the
111 VALUE given fits within the range of an BITS sized immediate. */
112 static bool out_of_range_p (offsetT value
, offsetT bits
)
114 gas_assert (bits
< (offsetT
)(sizeof (value
) * 8));
115 return (value
& ~((1 << bits
)-1))
116 && ((value
& ~((1 << bits
)-1)) != ~((1 << bits
)-1));
121 # define FPU_DEFAULT FPU_ARCH_FPA
122 # elif defined (TE_NetBSD)
124 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
126 /* Legacy a.out format. */
127 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
129 # elif defined (TE_VXWORKS)
130 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
132 /* For backwards compatibility, default to FPA. */
133 # define FPU_DEFAULT FPU_ARCH_FPA
135 #endif /* ifndef FPU_DEFAULT */
137 #define streq(a, b) (strcmp (a, b) == 0)
139 /* Current set of feature bits available (CPU+FPU). Different from
140 selected_cpu + selected_fpu in case of autodetection since the CPU
141 feature bits are then all set. */
142 static arm_feature_set cpu_variant
;
143 /* Feature bits used in each execution state. Used to set build attribute
144 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
145 static arm_feature_set arm_arch_used
;
146 static arm_feature_set thumb_arch_used
;
148 /* Flags stored in private area of BFD structure. */
149 static int uses_apcs_26
= false;
150 static int atpcs
= false;
151 static int support_interwork
= false;
152 static int uses_apcs_float
= false;
153 static int pic_code
= false;
154 static int fix_v4bx
= false;
155 /* Warn on using deprecated features. */
156 static int warn_on_deprecated
= true;
157 static int warn_on_restrict_it
= false;
159 /* Understand CodeComposer Studio assembly syntax. */
160 bool codecomposer_syntax
= false;
162 /* Variables that we set while parsing command-line options. Once all
163 options have been read we re-process these values to set the real
166 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
167 instead of -mcpu=arm1). */
168 static const arm_feature_set
*legacy_cpu
= NULL
;
169 static const arm_feature_set
*legacy_fpu
= NULL
;
171 /* CPU, extension and FPU feature bits selected by -mcpu. */
172 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
173 static arm_feature_set
*mcpu_ext_opt
= NULL
;
174 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
176 /* CPU, extension and FPU feature bits selected by -march. */
177 static const arm_feature_set
*march_cpu_opt
= NULL
;
178 static arm_feature_set
*march_ext_opt
= NULL
;
179 static const arm_feature_set
*march_fpu_opt
= NULL
;
181 /* Feature bits selected by -mfpu. */
182 static const arm_feature_set
*mfpu_opt
= NULL
;
184 /* Constants for known architecture features. */
185 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
186 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V1
;
187 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
188 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V3
;
189 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_NEON_V1
;
190 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
191 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
193 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
195 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
198 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
201 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
202 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2
);
203 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
204 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
205 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
206 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
207 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
208 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
209 static const arm_feature_set arm_ext_v4t_5
=
210 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
211 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
212 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
213 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
214 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
215 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
216 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
217 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
218 /* Only for compatability of hint instructions. */
219 static const arm_feature_set arm_ext_v6k_v6t2
=
220 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
| ARM_EXT_V6T2
);
221 static const arm_feature_set arm_ext_v6_notm
=
222 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
223 static const arm_feature_set arm_ext_v6_dsp
=
224 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
225 static const arm_feature_set arm_ext_barrier
=
226 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
227 static const arm_feature_set arm_ext_msr
=
228 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
229 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
230 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
231 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
232 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
233 static const arm_feature_set arm_ext_v8r
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8R
);
235 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
237 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
238 static const arm_feature_set arm_ext_m
=
239 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_V7M
,
240 ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
241 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
242 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
243 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
244 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
245 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
246 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
247 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
248 static const arm_feature_set arm_ext_v8m_main
=
249 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN
);
250 static const arm_feature_set arm_ext_v8_1m_main
=
251 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN
);
252 /* Instructions in ARMv8-M only found in M profile architectures. */
253 static const arm_feature_set arm_ext_v8m_m_only
=
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
255 static const arm_feature_set arm_ext_v6t2_v8m
=
256 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
257 /* Instructions shared between ARMv8-A and ARMv8-M. */
258 static const arm_feature_set arm_ext_atomics
=
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
261 /* DSP instructions Tag_DSP_extension refers to. */
262 static const arm_feature_set arm_ext_dsp
=
263 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
| ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
);
265 static const arm_feature_set arm_ext_ras
=
266 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
);
267 /* FP16 instructions. */
268 static const arm_feature_set arm_ext_fp16
=
269 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
270 static const arm_feature_set arm_ext_fp16_fml
=
271 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML
);
272 static const arm_feature_set arm_ext_v8_2
=
273 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A
);
274 static const arm_feature_set arm_ext_v8_3
=
275 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A
);
276 static const arm_feature_set arm_ext_sb
=
277 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
);
278 static const arm_feature_set arm_ext_predres
=
279 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
);
280 static const arm_feature_set arm_ext_bf16
=
281 ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16
);
282 static const arm_feature_set arm_ext_i8mm
=
283 ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM
);
284 static const arm_feature_set arm_ext_crc
=
285 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC
);
286 static const arm_feature_set arm_ext_cde
=
287 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE
);
288 static const arm_feature_set arm_ext_cde0
=
289 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE0
);
290 static const arm_feature_set arm_ext_cde1
=
291 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE1
);
292 static const arm_feature_set arm_ext_cde2
=
293 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE2
);
294 static const arm_feature_set arm_ext_cde3
=
295 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE3
);
296 static const arm_feature_set arm_ext_cde4
=
297 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE4
);
298 static const arm_feature_set arm_ext_cde5
=
299 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE5
);
300 static const arm_feature_set arm_ext_cde6
=
301 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE6
);
302 static const arm_feature_set arm_ext_cde7
=
303 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE7
);
305 static const arm_feature_set arm_arch_any
= ARM_ANY
;
306 static const arm_feature_set fpu_any
= FPU_ANY
;
307 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED
= ARM_FEATURE (-1, -1, -1);
308 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
309 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
311 static const arm_feature_set arm_cext_iwmmxt2
=
312 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
313 static const arm_feature_set arm_cext_iwmmxt
=
314 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
315 static const arm_feature_set arm_cext_xscale
=
316 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
317 static const arm_feature_set arm_cext_maverick
=
318 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
319 static const arm_feature_set fpu_fpa_ext_v1
=
320 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
321 static const arm_feature_set fpu_fpa_ext_v2
=
322 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
323 static const arm_feature_set fpu_vfp_ext_v1xd
=
324 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
325 static const arm_feature_set fpu_vfp_ext_v1
=
326 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
327 static const arm_feature_set fpu_vfp_ext_v2
=
328 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
329 static const arm_feature_set fpu_vfp_ext_v3xd
=
330 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
331 static const arm_feature_set fpu_vfp_ext_v3
=
332 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
333 static const arm_feature_set fpu_vfp_ext_d32
=
334 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
335 static const arm_feature_set fpu_neon_ext_v1
=
336 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
337 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
338 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
339 static const arm_feature_set mve_ext
=
340 ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE
);
341 static const arm_feature_set mve_fp_ext
=
342 ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP
);
343 /* Note: This has more than one bit set, which means using it with
344 mark_feature_used (which returns if *any* of the bits are set in the current
345 cpu variant) can give surprising results. */
346 static const arm_feature_set armv8m_fp
=
347 ARM_FEATURE_COPROC (FPU_VFP_V5_SP_D16
);
349 static const arm_feature_set fpu_vfp_fp16
=
350 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
351 static const arm_feature_set fpu_neon_ext_fma
=
352 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
354 static const arm_feature_set fpu_vfp_ext_fma
=
355 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
356 static const arm_feature_set fpu_vfp_ext_armv8
=
357 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
358 static const arm_feature_set fpu_vfp_ext_armv8xd
=
359 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
360 static const arm_feature_set fpu_neon_ext_armv8
=
361 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
362 static const arm_feature_set fpu_crypto_ext_armv8
=
363 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
364 static const arm_feature_set fpu_neon_ext_v8_1
=
365 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA
);
366 static const arm_feature_set fpu_neon_ext_dotprod
=
367 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
);
368 static const arm_feature_set pacbti_ext
=
369 ARM_FEATURE_CORE_HIGH_HIGH (ARM_EXT3_PACBTI
);
371 static int mfloat_abi_opt
= -1;
372 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
374 static arm_feature_set selected_arch
= ARM_ARCH_NONE
;
375 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
377 static arm_feature_set selected_ext
= ARM_ARCH_NONE
;
378 /* Feature bits selected by the last -mcpu/-march or by the combination of the
379 last .cpu/.arch directive .arch_extension directives since that
381 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
382 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
383 static arm_feature_set selected_fpu
= FPU_NONE
;
384 /* Feature bits selected by the last .object_arch directive. */
385 static arm_feature_set selected_object_arch
= ARM_ARCH_NONE
;
386 /* Must be long enough to hold any of the names in arm_cpus. */
387 static const struct arm_ext_table
* selected_ctx_ext_table
= NULL
;
388 static char selected_cpu_name
[20];
390 extern FLONUM_TYPE generic_floating_point_number
;
392 /* Return if no cpu was selected on command-line. */
394 no_cpu_selected (void)
396 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
401 static int meabi_flags
= EABI_DEFAULT
;
403 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
406 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
411 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
416 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
417 symbolS
* GOT_symbol
;
420 /* 0: assemble for ARM,
421 1: assemble for Thumb,
422 2: assemble for Thumb even though target CPU does not support thumb
424 static int thumb_mode
= 0;
425 /* A value distinct from the possible values for thumb_mode that we
426 can use to record whether thumb_mode has been copied into the
427 tc_frag_data field of a frag. */
428 #define MODE_RECORDED (1 << 4)
430 /* Specifies the intrinsic IT insn behavior mode. */
431 enum implicit_it_mode
433 IMPLICIT_IT_MODE_NEVER
= 0x00,
434 IMPLICIT_IT_MODE_ARM
= 0x01,
435 IMPLICIT_IT_MODE_THUMB
= 0x02,
436 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
438 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
440 /* If unified_syntax is true, we are processing the new unified
441 ARM/Thumb syntax. Important differences from the old ARM mode:
443 - Immediate operands do not require a # prefix.
444 - Conditional affixes always appear at the end of the
445 instruction. (For backward compatibility, those instructions
446 that formerly had them in the middle, continue to accept them
448 - The IT instruction may appear, and if it does is validated
449 against subsequent conditional affixes. It does not generate
452 Important differences from the old Thumb mode:
454 - Immediate operands do not require a # prefix.
455 - Most of the V6T2 instructions are only available in unified mode.
456 - The .N and .W suffixes are recognized and honored (it is an error
457 if they cannot be honored).
458 - All instructions set the flags if and only if they have an 's' affix.
459 - Conditional affixes may be used. They are validated against
460 preceding IT instructions. Unlike ARM mode, you cannot use a
461 conditional affix except in the scope of an IT instruction. */
463 static bool unified_syntax
= false;
465 /* An immediate operand can start with #, and ld*, st*, pld operands
466 can contain [ and ]. We need to tell APP not to elide whitespace
467 before a [, which can appear as the first operand for pld.
468 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
469 const char arm_symbol_chars
[] = "#[]{}";
485 enum neon_el_type type
;
489 #define NEON_MAX_TYPE_ELS 5
493 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
497 enum pred_instruction_type
503 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
504 if inside, should be the last one. */
505 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
506 i.e. BKPT and NOP. */
507 IT_INSN
, /* The IT insn has been parsed. */
508 VPT_INSN
, /* The VPT/VPST insn has been parsed. */
509 MVE_OUTSIDE_PRED_INSN
, /* Instruction to indicate a MVE instruction without
510 a predication code. */
511 MVE_UNPREDICABLE_INSN
, /* MVE instruction that is non-predicable. */
514 /* The maximum number of operands we need. */
515 #define ARM_IT_MAX_OPERANDS 6
516 #define ARM_IT_MAX_RELOCS 3
521 unsigned long instruction
;
523 unsigned int size_req
;
525 /* "uncond_value" is set to the value in place of the conditional field in
526 unconditional versions of the instruction, or -1u if nothing is
528 unsigned int uncond_value
;
529 struct neon_type vectype
;
530 /* This does not indicate an actual NEON instruction, only that
531 the mnemonic accepts neon-style type suffixes. */
533 /* Set to the opcode if the instruction needs relaxation.
534 Zero if the instruction is not relaxed. */
538 bfd_reloc_code_real_type type
;
541 } relocs
[ARM_IT_MAX_RELOCS
];
543 enum pred_instruction_type pred_insn_type
;
549 struct neon_type_el vectype
;
550 unsigned present
: 1; /* Operand present. */
551 unsigned isreg
: 1; /* Operand was a register. */
552 unsigned immisreg
: 2; /* .imm field is a second register.
553 0: imm, 1: gpr, 2: MVE Q-register. */
554 unsigned isscalar
: 2; /* Operand is a (SIMD) scalar:
558 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
559 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
560 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
561 instructions. This allows us to disambiguate ARM <-> vector insns. */
562 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
563 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
564 unsigned isquad
: 1; /* Operand is SIMD quad register. */
565 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
566 unsigned iszr
: 1; /* Operand is ZR register. */
567 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
568 unsigned writeback
: 1; /* Operand has trailing ! */
569 unsigned preind
: 1; /* Preindexed address. */
570 unsigned postind
: 1; /* Postindexed address. */
571 unsigned negative
: 1; /* Index register was negated. */
572 unsigned shifted
: 1; /* Shift applied to operation. */
573 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
574 } operands
[ARM_IT_MAX_OPERANDS
];
577 static struct arm_it inst
;
579 #define NUM_FLOAT_VALS 8
581 const char * fp_const
[] =
583 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
586 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
596 #define CP_T_X 0x00008000
597 #define CP_T_Y 0x00400000
599 #define CONDS_BIT 0x00100000
600 #define LOAD_BIT 0x00100000
602 #define DOUBLE_LOAD_FLAG 0x00000001
606 const char * template_name
;
610 #define COND_ALWAYS 0xE
614 const char * template_name
;
618 struct asm_barrier_opt
620 const char * template_name
;
622 const arm_feature_set arch
;
625 /* The bit that distinguishes CPSR and SPSR. */
626 #define SPSR_BIT (1 << 22)
628 /* The individual PSR flag bits. */
629 #define PSR_c (1 << 16)
630 #define PSR_x (1 << 17)
631 #define PSR_s (1 << 18)
632 #define PSR_f (1 << 19)
637 bfd_reloc_code_real_type reloc
;
642 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
643 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
648 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
651 /* Bits for DEFINED field in neon_typed_alias. */
652 #define NTA_HASTYPE 1
653 #define NTA_HASINDEX 2
655 struct neon_typed_alias
657 unsigned char defined
;
659 struct neon_type_el eltype
;
662 /* ARM register categories. This includes coprocessor numbers and various
663 architecture extensions' registers. Each entry should have an error message
664 in reg_expected_msgs below. */
695 /* Structure for a hash table entry for a register.
696 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
697 information which states whether a vector type or index is specified (for a
698 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
704 unsigned char builtin
;
705 struct neon_typed_alias
* neon
;
708 /* Diagnostics used when we don't get a register of the expected type. */
709 const char * const reg_expected_msgs
[] =
711 [REG_TYPE_RN
] = N_("ARM register expected"),
712 [REG_TYPE_CP
] = N_("bad or missing co-processor number"),
713 [REG_TYPE_CN
] = N_("co-processor register expected"),
714 [REG_TYPE_FN
] = N_("FPA register expected"),
715 [REG_TYPE_VFS
] = N_("VFP single precision register expected"),
716 [REG_TYPE_VFD
] = N_("VFP/Neon double precision register expected"),
717 [REG_TYPE_NQ
] = N_("Neon quad precision register expected"),
718 [REG_TYPE_VFSD
] = N_("VFP single or double precision register expected"),
719 [REG_TYPE_NDQ
] = N_("Neon double or quad precision register expected"),
720 [REG_TYPE_NSD
] = N_("Neon single or double precision register expected"),
721 [REG_TYPE_NSDQ
] = N_("VFP single, double or Neon quad precision register"
723 [REG_TYPE_VFC
] = N_("VFP system register expected"),
724 [REG_TYPE_MVF
] = N_("Maverick MVF register expected"),
725 [REG_TYPE_MVD
] = N_("Maverick MVD register expected"),
726 [REG_TYPE_MVFX
] = N_("Maverick MVFX register expected"),
727 [REG_TYPE_MVDX
] = N_("Maverick MVDX register expected"),
728 [REG_TYPE_MVAX
] = N_("Maverick MVAX register expected"),
729 [REG_TYPE_DSPSC
] = N_("Maverick DSPSC register expected"),
730 [REG_TYPE_MMXWR
] = N_("iWMMXt data register expected"),
731 [REG_TYPE_MMXWC
] = N_("iWMMXt control register expected"),
732 [REG_TYPE_MMXWCG
] = N_("iWMMXt scalar register expected"),
733 [REG_TYPE_XSCALE
] = N_("XScale accumulator register expected"),
734 [REG_TYPE_MQ
] = N_("MVE vector register expected"),
736 [REG_TYPE_ZR
] = N_("ZR register expected"),
737 [REG_TYPE_PSEUDO
] = N_("Pseudo register expected"),
740 /* Some well known registers that we refer to directly elsewhere. */
746 /* ARM instructions take 4bytes in the object file, Thumb instructions
752 /* Basic string to match. */
753 const char * template_name
;
755 /* Parameters to instruction. */
756 unsigned int operands
[8];
758 /* Conditional tag - see opcode_lookup. */
759 unsigned int tag
: 4;
761 /* Basic instruction code. */
764 /* Thumb-format instruction code. */
767 /* Which architecture variant provides this instruction. */
768 const arm_feature_set
* avariant
;
769 const arm_feature_set
* tvariant
;
771 /* Function to call to encode instruction in ARM format. */
772 void (* aencode
) (void);
774 /* Function to call to encode instruction in Thumb format. */
775 void (* tencode
) (void);
777 /* Indicates whether this instruction may be vector predicated. */
778 unsigned int mayBeVecPred
: 1;
781 /* Defines for various bits that we will want to toggle. */
782 #define INST_IMMEDIATE 0x02000000
783 #define OFFSET_REG 0x02000000
784 #define HWOFFSET_IMM 0x00400000
785 #define SHIFT_BY_REG 0x00000010
786 #define PRE_INDEX 0x01000000
787 #define INDEX_UP 0x00800000
788 #define WRITE_BACK 0x00200000
789 #define LDM_TYPE_2_OR_3 0x00400000
790 #define CPSI_MMOD 0x00020000
792 #define LITERAL_MASK 0xf000f000
793 #define OPCODE_MASK 0xfe1fffff
794 #define V4_STR_BIT 0x00000020
795 #define VLDR_VMOV_SAME 0x0040f000
797 #define T2_SUBS_PC_LR 0xf3de8f00
799 #define DATA_OP_SHIFT 21
800 #define SBIT_SHIFT 20
802 #define T2_OPCODE_MASK 0xfe1fffff
803 #define T2_DATA_OP_SHIFT 21
804 #define T2_SBIT_SHIFT 20
806 #define A_COND_MASK 0xf0000000
807 #define A_PUSH_POP_OP_MASK 0x0fff0000
809 /* Opcodes for pushing/poping registers to/from the stack. */
810 #define A1_OPCODE_PUSH 0x092d0000
811 #define A2_OPCODE_PUSH 0x052d0004
812 #define A2_OPCODE_POP 0x049d0004
814 /* Codes to distinguish the arithmetic instructions. */
825 #define OPCODE_CMP 10
826 #define OPCODE_CMN 11
827 #define OPCODE_ORR 12
828 #define OPCODE_MOV 13
829 #define OPCODE_BIC 14
830 #define OPCODE_MVN 15
832 #define T2_OPCODE_AND 0
833 #define T2_OPCODE_BIC 1
834 #define T2_OPCODE_ORR 2
835 #define T2_OPCODE_ORN 3
836 #define T2_OPCODE_EOR 4
837 #define T2_OPCODE_ADD 8
838 #define T2_OPCODE_ADC 10
839 #define T2_OPCODE_SBC 11
840 #define T2_OPCODE_SUB 13
841 #define T2_OPCODE_RSB 14
843 #define T_OPCODE_MUL 0x4340
844 #define T_OPCODE_TST 0x4200
845 #define T_OPCODE_CMN 0x42c0
846 #define T_OPCODE_NEG 0x4240
847 #define T_OPCODE_MVN 0x43c0
849 #define T_OPCODE_ADD_R3 0x1800
850 #define T_OPCODE_SUB_R3 0x1a00
851 #define T_OPCODE_ADD_HI 0x4400
852 #define T_OPCODE_ADD_ST 0xb000
853 #define T_OPCODE_SUB_ST 0xb080
854 #define T_OPCODE_ADD_SP 0xa800
855 #define T_OPCODE_ADD_PC 0xa000
856 #define T_OPCODE_ADD_I8 0x3000
857 #define T_OPCODE_SUB_I8 0x3800
858 #define T_OPCODE_ADD_I3 0x1c00
859 #define T_OPCODE_SUB_I3 0x1e00
861 #define T_OPCODE_ASR_R 0x4100
862 #define T_OPCODE_LSL_R 0x4080
863 #define T_OPCODE_LSR_R 0x40c0
864 #define T_OPCODE_ROR_R 0x41c0
865 #define T_OPCODE_ASR_I 0x1000
866 #define T_OPCODE_LSL_I 0x0000
867 #define T_OPCODE_LSR_I 0x0800
869 #define T_OPCODE_MOV_I8 0x2000
870 #define T_OPCODE_CMP_I8 0x2800
871 #define T_OPCODE_CMP_LR 0x4280
872 #define T_OPCODE_MOV_HR 0x4600
873 #define T_OPCODE_CMP_HR 0x4500
875 #define T_OPCODE_LDR_PC 0x4800
876 #define T_OPCODE_LDR_SP 0x9800
877 #define T_OPCODE_STR_SP 0x9000
878 #define T_OPCODE_LDR_IW 0x6800
879 #define T_OPCODE_STR_IW 0x6000
880 #define T_OPCODE_LDR_IH 0x8800
881 #define T_OPCODE_STR_IH 0x8000
882 #define T_OPCODE_LDR_IB 0x7800
883 #define T_OPCODE_STR_IB 0x7000
884 #define T_OPCODE_LDR_RW 0x5800
885 #define T_OPCODE_STR_RW 0x5000
886 #define T_OPCODE_LDR_RH 0x5a00
887 #define T_OPCODE_STR_RH 0x5200
888 #define T_OPCODE_LDR_RB 0x5c00
889 #define T_OPCODE_STR_RB 0x5400
891 #define T_OPCODE_PUSH 0xb400
892 #define T_OPCODE_POP 0xbc00
894 #define T_OPCODE_BRANCH 0xe000
896 #define THUMB_SIZE 2 /* Size of thumb instruction. */
897 #define THUMB_PP_PC_LR 0x0100
898 #define THUMB_LOAD_BIT 0x0800
899 #define THUMB2_LOAD_BIT 0x00100000
901 #define BAD_SYNTAX _("syntax error")
902 #define BAD_ARGS _("bad arguments to instruction")
903 #define BAD_SP _("r13 not allowed here")
904 #define BAD_PC _("r15 not allowed here")
905 #define BAD_ODD _("Odd register not allowed here")
906 #define BAD_EVEN _("Even register not allowed here")
907 #define BAD_COND _("instruction cannot be conditional")
908 #define BAD_OVERLAP _("registers may not be the same")
909 #define BAD_HIREG _("lo register required")
910 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
911 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode")
912 #define BAD_BRANCH _("branch must be last instruction in IT block")
913 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
914 #define BAD_NO_VPT _("instruction not allowed in VPT block")
915 #define BAD_NOT_IT _("instruction not allowed in IT block")
916 #define BAD_NOT_VPT _("instruction missing MVE vector predication code")
917 #define BAD_FPU _("selected FPU does not support instruction")
918 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
919 #define BAD_OUT_VPT \
920 _("vector predicated instruction should be in VPT/VPST block")
921 #define BAD_IT_COND _("incorrect condition in IT block")
922 #define BAD_VPT_COND _("incorrect condition in VPT/VPST block")
923 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
924 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
925 #define BAD_PC_ADDRESSING \
926 _("cannot use register index with PC-relative addressing")
927 #define BAD_PC_WRITEBACK \
928 _("cannot use writeback with PC-relative addressing")
929 #define BAD_RANGE _("branch out of range")
930 #define BAD_FP16 _("selected processor does not support fp16 instruction")
931 #define BAD_BF16 _("selected processor does not support bf16 instruction")
932 #define BAD_CDE _("selected processor does not support cde instruction")
933 #define BAD_CDE_COPROC _("coprocessor for insn is not enabled for cde")
934 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
935 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
936 #define MVE_NOT_IT _("Warning: instruction is UNPREDICTABLE in an IT " \
938 #define MVE_NOT_VPT _("Warning: instruction is UNPREDICTABLE in a VPT " \
940 #define MVE_BAD_PC _("Warning: instruction is UNPREDICTABLE with PC" \
942 #define MVE_BAD_SP _("Warning: instruction is UNPREDICTABLE with SP" \
944 #define BAD_SIMD_TYPE _("bad type in SIMD instruction")
945 #define BAD_MVE_AUTO \
946 _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \
947 " use a valid -march or -mcpu option.")
948 #define BAD_MVE_SRCDEST _("Warning: 32-bit element size and same destination "\
949 "and source operands makes instruction UNPREDICTABLE")
950 #define BAD_EL_TYPE _("bad element type for instruction")
951 #define MVE_BAD_QREG _("MVE vector register Q[0..7] expected")
952 #define BAD_PACBTI _("selected processor does not support PACBTI extention")
954 static htab_t arm_ops_hsh
;
955 static htab_t arm_cond_hsh
;
956 static htab_t arm_vcond_hsh
;
957 static htab_t arm_shift_hsh
;
958 static htab_t arm_psr_hsh
;
959 static htab_t arm_v7m_psr_hsh
;
960 static htab_t arm_reg_hsh
;
961 static htab_t arm_reloc_hsh
;
962 static htab_t arm_barrier_opt_hsh
;
964 /* Stuff needed to resolve the label ambiguity
973 symbolS
* last_label_seen
;
974 static int label_is_thumb_function_name
= false;
976 /* Literal pool structure. Held on a per-section
977 and per-sub-section basis. */
979 #define MAX_LITERAL_POOL_SIZE 1024
980 typedef struct literal_pool
982 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
983 unsigned int next_free_entry
;
989 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
991 struct literal_pool
* next
;
992 unsigned int alignment
;
995 /* Pointer to a linked list of literal pools. */
996 literal_pool
* list_of_pools
= NULL
;
998 typedef enum asmfunc_states
1001 WAITING_ASMFUNC_NAME
,
1005 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
1008 # define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred
1010 static struct current_pred now_pred
;
1014 now_pred_compatible (int cond
)
1016 return (cond
& ~1) == (now_pred
.cc
& ~1);
1020 conditional_insn (void)
1022 return inst
.cond
!= COND_ALWAYS
;
1025 static int in_pred_block (void);
1027 static int handle_pred_state (void);
1029 static void force_automatic_it_block_close (void);
1031 static void it_fsm_post_encode (void);
1033 #define set_pred_insn_type(type) \
1036 inst.pred_insn_type = type; \
1037 if (handle_pred_state () == FAIL) \
1042 #define set_pred_insn_type_nonvoid(type, failret) \
1045 inst.pred_insn_type = type; \
1046 if (handle_pred_state () == FAIL) \
1051 #define set_pred_insn_type_last() \
1054 if (inst.cond == COND_ALWAYS) \
1055 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); \
1057 set_pred_insn_type (INSIDE_IT_LAST_INSN); \
1061 /* Toggle value[pos]. */
1062 #define TOGGLE_BIT(value, pos) (value ^ (1 << pos))
1066 /* This array holds the chars that always start a comment. If the
1067 pre-processor is disabled, these aren't very useful. */
1068 char arm_comment_chars
[] = "@";
1070 /* This array holds the chars that only start a comment at the beginning of
1071 a line. If the line seems to have the form '# 123 filename'
1072 .line and .file directives will appear in the pre-processed output. */
1073 /* Note that input_file.c hand checks for '#' at the beginning of the
1074 first line of the input file. This is because the compiler outputs
1075 #NO_APP at the beginning of its output. */
1076 /* Also note that comments like this one will always work. */
1077 const char line_comment_chars
[] = "#";
1079 char arm_line_separator_chars
[] = ";";
1081 /* Chars that can be used to separate mant
1082 from exp in floating point numbers. */
1083 const char EXP_CHARS
[] = "eE";
1085 /* Chars that mean this number is a floating point constant. */
1086 /* As in 0f12.456 */
1087 /* or 0d1.2345e12 */
1089 const char FLT_CHARS
[] = "rRsSfFdDxXeEpPHh";
1091 /* Prefix characters that indicate the start of an immediate
1093 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1095 /* Separator character handling. */
1097 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1099 enum fp_16bit_format
1101 ARM_FP16_FORMAT_IEEE
= 0x1,
1102 ARM_FP16_FORMAT_ALTERNATIVE
= 0x2,
1103 ARM_FP16_FORMAT_DEFAULT
= 0x3
1106 static enum fp_16bit_format fp16_format
= ARM_FP16_FORMAT_DEFAULT
;
1110 skip_past_char (char ** str
, char c
)
1112 /* PR gas/14987: Allow for whitespace before the expected character. */
1113 skip_whitespace (*str
);
1124 #define skip_past_comma(str) skip_past_char (str, ',')
1126 /* Arithmetic expressions (possibly involving symbols). */
1128 /* Return TRUE if anything in the expression is a bignum. */
1131 walk_no_bignums (symbolS
* sp
)
1133 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
1136 if (symbol_get_value_expression (sp
)->X_add_symbol
)
1138 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
1139 || (symbol_get_value_expression (sp
)->X_op_symbol
1140 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
1146 static bool in_my_get_expression
= false;
1148 /* Third argument to my_get_expression. */
1149 #define GE_NO_PREFIX 0
1150 #define GE_IMM_PREFIX 1
1151 #define GE_OPT_PREFIX 2
1152 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1153 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1154 #define GE_OPT_PREFIX_BIG 3
1157 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
1161 /* In unified syntax, all prefixes are optional. */
1163 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
1166 switch (prefix_mode
)
1168 case GE_NO_PREFIX
: break;
1170 if (!is_immediate_prefix (**str
))
1172 inst
.error
= _("immediate expression requires a # prefix");
1178 case GE_OPT_PREFIX_BIG
:
1179 if (is_immediate_prefix (**str
))
1186 memset (ep
, 0, sizeof (expressionS
));
1188 save_in
= input_line_pointer
;
1189 input_line_pointer
= *str
;
1190 in_my_get_expression
= true;
1192 in_my_get_expression
= false;
1194 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1196 /* We found a bad or missing expression in md_operand(). */
1197 *str
= input_line_pointer
;
1198 input_line_pointer
= save_in
;
1199 if (inst
.error
== NULL
)
1200 inst
.error
= (ep
->X_op
== O_absent
1201 ? _("missing expression") :_("bad expression"));
1205 /* Get rid of any bignums now, so that we don't generate an error for which
1206 we can't establish a line number later on. Big numbers are never valid
1207 in instructions, which is where this routine is always called. */
1208 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1209 && (ep
->X_op
== O_big
1210 || (ep
->X_add_symbol
1211 && (walk_no_bignums (ep
->X_add_symbol
)
1213 && walk_no_bignums (ep
->X_op_symbol
))))))
1215 inst
.error
= _("invalid constant");
1216 *str
= input_line_pointer
;
1217 input_line_pointer
= save_in
;
1221 *str
= input_line_pointer
;
1222 input_line_pointer
= save_in
;
1226 /* Turn a string in input_line_pointer into a floating point constant
1227 of type TYPE, and store the appropriate bytes in *LITP. The number
1228 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1229 returned, or NULL on OK.
1231 Note that fp constants aren't represent in the normal way on the ARM.
1232 In big endian mode, things are as expected. However, in little endian
1233 mode fp constants are big-endian word-wise, and little-endian byte-wise
1234 within the words. For example, (double) 1.1 in big endian mode is
1235 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1236 the byte sequence 99 99 f1 3f 9a 99 99 99.
1238 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1241 md_atof (int type
, char * litP
, int * sizeP
)
1244 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1252 /* bfloat16, despite not being part of the IEEE specification, can also
1253 be handled by atof_ieee(). */
1284 return _("Unrecognized or unsupported floating point constant");
1287 t
= atof_ieee (input_line_pointer
, type
, words
);
1289 input_line_pointer
= t
;
1290 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1292 if (target_big_endian
|| prec
== 1)
1293 for (i
= 0; i
< prec
; i
++)
1295 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1296 litP
+= sizeof (LITTLENUM_TYPE
);
1298 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1299 for (i
= prec
- 1; i
>= 0; i
--)
1301 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1302 litP
+= sizeof (LITTLENUM_TYPE
);
1305 /* For a 4 byte float the order of elements in `words' is 1 0.
1306 For an 8 byte float the order is 1 0 3 2. */
1307 for (i
= 0; i
< prec
; i
+= 2)
1309 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1310 sizeof (LITTLENUM_TYPE
));
1311 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1312 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1313 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1319 /* We handle all bad expressions here, so that we can report the faulty
1320 instruction in the error message. */
1323 md_operand (expressionS
* exp
)
1325 if (in_my_get_expression
)
1326 exp
->X_op
= O_illegal
;
1329 /* Immediate values. */
1332 /* Generic immediate-value read function for use in directives.
1333 Accepts anything that 'expression' can fold to a constant.
1334 *val receives the number. */
1337 immediate_for_directive (int *val
)
1340 exp
.X_op
= O_illegal
;
1342 if (is_immediate_prefix (*input_line_pointer
))
1344 input_line_pointer
++;
1348 if (exp
.X_op
!= O_constant
)
1350 as_bad (_("expected #constant"));
1351 ignore_rest_of_line ();
1354 *val
= exp
.X_add_number
;
1359 /* Register parsing. */
1361 /* Generic register parser. CCP points to what should be the
1362 beginning of a register name. If it is indeed a valid register
1363 name, advance CCP over it and return the reg_entry structure;
1364 otherwise return NULL. Does not issue diagnostics. */
1366 static struct reg_entry
*
1367 arm_reg_parse_multi (char **ccp
)
1371 struct reg_entry
*reg
;
1373 skip_whitespace (start
);
1375 #ifdef REGISTER_PREFIX
1376 if (*start
!= REGISTER_PREFIX
)
1380 #ifdef OPTIONAL_REGISTER_PREFIX
1381 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1386 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1391 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1393 reg
= (struct reg_entry
*) str_hash_find_n (arm_reg_hsh
, start
, p
- start
);
1403 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1404 enum arm_reg_type type
)
1406 /* Alternative syntaxes are accepted for a few register classes. */
1413 /* Generic coprocessor register names are allowed for these. */
1414 if (reg
&& reg
->type
== REG_TYPE_CN
)
1419 /* For backward compatibility, a bare number is valid here. */
1421 unsigned long processor
= strtoul (start
, ccp
, 10);
1422 if (*ccp
!= start
&& processor
<= 15)
1427 case REG_TYPE_MMXWC
:
1428 /* WC includes WCG. ??? I'm not sure this is true for all
1429 instructions that take WC registers. */
1430 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1441 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1442 return value is the register number or FAIL. */
1445 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1448 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1451 /* Do not allow a scalar (reg+index) to parse as a register. */
1452 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1455 if (reg
&& reg
->type
== type
)
1458 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1465 /* Parse a Neon type specifier. *STR should point at the leading '.'
1466 character. Does no verification at this stage that the type fits the opcode
1473 Can all be legally parsed by this function.
1475 Fills in neon_type struct pointer with parsed information, and updates STR
1476 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1477 type, FAIL if not. */
1480 parse_neon_type (struct neon_type
*type
, char **str
)
1487 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1489 enum neon_el_type thistype
= NT_untyped
;
1490 unsigned thissize
= -1u;
1497 /* Just a size without an explicit type. */
1501 switch (TOLOWER (*ptr
))
1503 case 'i': thistype
= NT_integer
; break;
1504 case 'f': thistype
= NT_float
; break;
1505 case 'p': thistype
= NT_poly
; break;
1506 case 's': thistype
= NT_signed
; break;
1507 case 'u': thistype
= NT_unsigned
; break;
1509 thistype
= NT_float
;
1514 thistype
= NT_bfloat
;
1515 switch (TOLOWER (*(++ptr
)))
1519 thissize
= strtoul (ptr
, &ptr
, 10);
1522 as_bad (_("bad size %d in type specifier"), thissize
);
1526 case '0': case '1': case '2': case '3': case '4':
1527 case '5': case '6': case '7': case '8': case '9':
1529 as_bad (_("unexpected type character `b' -- did you mean `bf'?"));
1536 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1542 /* .f is an abbreviation for .f32. */
1543 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1548 thissize
= strtoul (ptr
, &ptr
, 10);
1550 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1553 as_bad (_("bad size %d in type specifier"), thissize
);
1561 type
->el
[type
->elems
].type
= thistype
;
1562 type
->el
[type
->elems
].size
= thissize
;
1567 /* Empty/missing type is not a successful parse. */
1568 if (type
->elems
== 0)
1576 /* Errors may be set multiple times during parsing or bit encoding
1577 (particularly in the Neon bits), but usually the earliest error which is set
1578 will be the most meaningful. Avoid overwriting it with later (cascading)
1579 errors by calling this function. */
1582 first_error (const char *err
)
1588 /* Parse a single type, e.g. ".s32", leading period included. */
1590 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1593 struct neon_type optype
;
1597 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1599 if (optype
.elems
== 1)
1600 *vectype
= optype
.el
[0];
1603 first_error (_("only one type should be specified for operand"));
1609 first_error (_("vector type expected"));
1621 /* Special meanings for indices (which have a range of 0-7), which will fit into
1624 #define NEON_ALL_LANES 15
1625 #define NEON_INTERLEAVE_LANES 14
1627 /* Record a use of the given feature. */
1629 record_feature_use (const arm_feature_set
*feature
)
1632 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
1634 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
1637 /* If the given feature available in the selected CPU, mark it as used.
1638 Returns TRUE iff feature is available. */
1640 mark_feature_used (const arm_feature_set
*feature
)
1643 /* Do not support the use of MVE only instructions when in auto-detection or
1645 if (((feature
== &mve_ext
) || (feature
== &mve_fp_ext
))
1646 && ARM_CPU_IS_ANY (cpu_variant
))
1648 first_error (BAD_MVE_AUTO
);
1651 /* Ensure the option is valid on the current architecture. */
1652 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
1655 /* Add the appropriate architecture feature for the barrier option used.
1657 record_feature_use (feature
);
1662 /* Parse either a register or a scalar, with an optional type. Return the
1663 register number, and optionally fill in the actual type of the register
1664 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1665 type/index information in *TYPEINFO. */
1668 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1669 enum arm_reg_type
*rtype
,
1670 struct neon_typed_alias
*typeinfo
)
1673 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1674 struct neon_typed_alias atype
;
1675 struct neon_type_el parsetype
;
1679 atype
.eltype
.type
= NT_invtype
;
1680 atype
.eltype
.size
= -1;
1682 /* Try alternate syntax for some types of register. Note these are mutually
1683 exclusive with the Neon syntax extensions. */
1686 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1694 /* Undo polymorphism when a set of register types may be accepted. */
1695 if ((type
== REG_TYPE_NDQ
1696 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1697 || (type
== REG_TYPE_VFSD
1698 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1699 || (type
== REG_TYPE_NSDQ
1700 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1701 || reg
->type
== REG_TYPE_NQ
))
1702 || (type
== REG_TYPE_NSD
1703 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1704 || (type
== REG_TYPE_MMXWC
1705 && (reg
->type
== REG_TYPE_MMXWCG
)))
1706 type
= (enum arm_reg_type
) reg
->type
;
1708 if (type
== REG_TYPE_MQ
)
1710 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
1713 if (!reg
|| reg
->type
!= REG_TYPE_NQ
)
1716 if (reg
->number
> 14 && !mark_feature_used (&fpu_vfp_ext_d32
))
1718 first_error (_("expected MVE register [q0..q7]"));
1723 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
1724 && (type
== REG_TYPE_NQ
))
1728 if (type
!= reg
->type
)
1734 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1736 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1738 first_error (_("can't redefine type for operand"));
1741 atype
.defined
|= NTA_HASTYPE
;
1742 atype
.eltype
= parsetype
;
1745 if (skip_past_char (&str
, '[') == SUCCESS
)
1747 if (type
!= REG_TYPE_VFD
1748 && !(type
== REG_TYPE_VFS
1749 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8_2
))
1750 && !(type
== REG_TYPE_NQ
1751 && ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)))
1753 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
1754 first_error (_("only D and Q registers may be indexed"));
1756 first_error (_("only D registers may be indexed"));
1760 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1762 first_error (_("can't change index for operand"));
1766 atype
.defined
|= NTA_HASINDEX
;
1768 if (skip_past_char (&str
, ']') == SUCCESS
)
1769 atype
.index
= NEON_ALL_LANES
;
1774 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1776 if (exp
.X_op
!= O_constant
)
1778 first_error (_("constant expression required"));
1782 if (skip_past_char (&str
, ']') == FAIL
)
1785 atype
.index
= exp
.X_add_number
;
1800 /* Like arm_reg_parse, but also allow the following extra features:
1801 - If RTYPE is non-zero, return the (possibly restricted) type of the
1802 register (e.g. Neon double or quad reg when either has been requested).
1803 - If this is a Neon vector type with additional type information, fill
1804 in the struct pointed to by VECTYPE (if non-NULL).
1805 This function will fault on encountering a scalar. */
1808 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1809 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1811 struct neon_typed_alias atype
;
1813 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1818 /* Do not allow regname(... to parse as a register. */
1822 /* Do not allow a scalar (reg+index) to parse as a register. */
1823 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1825 first_error (_("register operand expected, but got scalar"));
1830 *vectype
= atype
.eltype
;
1837 #define NEON_SCALAR_REG(X) ((X) >> 4)
1838 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1840 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1841 have enough information to be able to do a good job bounds-checking. So, we
1842 just do easy checks here, and do further checks later. */
1845 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
, enum
1846 arm_reg_type reg_type
)
1850 struct neon_typed_alias atype
;
1853 reg
= parse_typed_reg_or_scalar (&str
, reg_type
, NULL
, &atype
);
1871 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1874 if (reg_type
!= REG_TYPE_MQ
&& atype
.index
== NEON_ALL_LANES
)
1876 first_error (_("scalar must have an index"));
1879 else if (atype
.index
>= reg_size
/ elsize
)
1881 first_error (_("scalar index out of range"));
1886 *type
= atype
.eltype
;
1890 return reg
* 16 + atype
.index
;
1893 /* Types of registers in a list. */
1907 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1910 parse_reg_list (char ** strp
, enum reg_list_els etype
)
1916 gas_assert (etype
== REGLIST_RN
|| etype
== REGLIST_CLRM
1917 || etype
== REGLIST_PSEUDO
);
1919 /* We come back here if we get ranges concatenated by '+' or '|'. */
1922 skip_whitespace (str
);
1935 const char apsr_str
[] = "apsr";
1936 int apsr_str_len
= strlen (apsr_str
);
1937 enum arm_reg_type rt
;
1939 if (etype
== REGLIST_RN
|| etype
== REGLIST_CLRM
)
1942 rt
= REG_TYPE_PSEUDO
;
1944 reg
= arm_reg_parse (&str
, rt
);
1945 if (etype
== REGLIST_CLRM
)
1947 if (reg
== REG_SP
|| reg
== REG_PC
)
1949 else if (reg
== FAIL
1950 && !strncasecmp (str
, apsr_str
, apsr_str_len
)
1951 && !ISALPHA (*(str
+ apsr_str_len
)))
1954 str
+= apsr_str_len
;
1959 first_error (_("r0-r12, lr or APSR expected"));
1963 else if (etype
== REGLIST_PSEUDO
)
1967 first_error (_(reg_expected_msgs
[REG_TYPE_PSEUDO
]));
1971 else /* etype == REGLIST_RN. */
1975 first_error (_(reg_expected_msgs
[REGLIST_RN
]));
1986 first_error (_("bad range in register list"));
1990 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1992 if (range
& (1 << i
))
1994 (_("Warning: duplicated register (r%d) in register list"),
2002 if (range
& (1 << reg
))
2003 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
2005 else if (reg
<= cur_reg
)
2006 as_tsktsk (_("Warning: register range not in ascending order"));
2011 while (skip_past_comma (&str
) != FAIL
2012 || (in_range
= 1, *str
++ == '-'));
2015 if (skip_past_char (&str
, '}') == FAIL
)
2017 first_error (_("missing `}'"));
2021 else if (etype
== REGLIST_RN
)
2025 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
2028 if (exp
.X_op
== O_constant
)
2030 if (exp
.X_add_number
2031 != (exp
.X_add_number
& 0x0000ffff))
2033 inst
.error
= _("invalid register mask");
2037 if ((range
& exp
.X_add_number
) != 0)
2039 int regno
= range
& exp
.X_add_number
;
2042 regno
= (1 << regno
) - 1;
2044 (_("Warning: duplicated register (r%d) in register list"),
2048 range
|= exp
.X_add_number
;
2052 if (inst
.relocs
[0].type
!= 0)
2054 inst
.error
= _("expression too complex");
2058 memcpy (&inst
.relocs
[0].exp
, &exp
, sizeof (expressionS
));
2059 inst
.relocs
[0].type
= BFD_RELOC_ARM_MULTI
;
2060 inst
.relocs
[0].pc_rel
= 0;
2064 if (*str
== '|' || *str
== '+')
2070 while (another_range
);
2076 /* Parse a VFP register list. If the string is invalid return FAIL.
2077 Otherwise return the number of registers, and set PBASE to the first
2078 register. Parses registers of type ETYPE.
2079 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
2080 - Q registers can be used to specify pairs of D registers
2081 - { } can be omitted from around a singleton register list
2082 FIXME: This is not implemented, as it would require backtracking in
2085 This could be done (the meaning isn't really ambiguous), but doesn't
2086 fit in well with the current parsing framework.
2087 - 32 D registers may be used (also true for VFPv3).
2088 FIXME: Types are ignored in these register lists, which is probably a
2092 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
,
2093 bool *partial_match
)
2098 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
2102 unsigned long mask
= 0;
2104 bool vpr_seen
= false;
2106 (etype
== REGLIST_VFP_S_VPR
) || (etype
== REGLIST_VFP_D_VPR
);
2108 if (skip_past_char (&str
, '{') == FAIL
)
2110 inst
.error
= _("expecting {");
2117 case REGLIST_VFP_S_VPR
:
2118 regtype
= REG_TYPE_VFS
;
2123 case REGLIST_VFP_D_VPR
:
2124 regtype
= REG_TYPE_VFD
;
2127 case REGLIST_NEON_D
:
2128 regtype
= REG_TYPE_NDQ
;
2135 if (etype
!= REGLIST_VFP_S
&& etype
!= REGLIST_VFP_S_VPR
)
2137 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
2138 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
2142 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
2145 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
2152 base_reg
= max_regs
;
2153 *partial_match
= false;
2157 unsigned int setmask
= 1, addregs
= 1;
2158 const char vpr_str
[] = "vpr";
2159 size_t vpr_str_len
= strlen (vpr_str
);
2161 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
2165 if (new_base
== FAIL
2166 && !strncasecmp (str
, vpr_str
, vpr_str_len
)
2167 && !ISALPHA (*(str
+ vpr_str_len
))
2173 base_reg
= 0; /* Canonicalize VPR only on d0 with 0 regs. */
2177 first_error (_("VPR expected last"));
2180 else if (new_base
== FAIL
)
2182 if (regtype
== REG_TYPE_VFS
)
2183 first_error (_("VFP single precision register or VPR "
2185 else /* regtype == REG_TYPE_VFD. */
2186 first_error (_("VFP/Neon double precision register or VPR "
2191 else if (new_base
== FAIL
)
2193 first_error (_(reg_expected_msgs
[regtype
]));
2197 *partial_match
= true;
2201 if (new_base
>= max_regs
)
2203 first_error (_("register out of range in list"));
2207 /* Note: a value of 2 * n is returned for the register Q<n>. */
2208 if (regtype
== REG_TYPE_NQ
)
2214 if (new_base
< base_reg
)
2215 base_reg
= new_base
;
2217 if (mask
& (setmask
<< new_base
))
2219 first_error (_("invalid register list"));
2223 if ((mask
>> new_base
) != 0 && ! warned
&& !vpr_seen
)
2225 as_tsktsk (_("register list not in ascending order"));
2229 mask
|= setmask
<< new_base
;
2232 if (*str
== '-') /* We have the start of a range expression */
2238 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
2241 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
2245 if (high_range
>= max_regs
)
2247 first_error (_("register out of range in list"));
2251 if (regtype
== REG_TYPE_NQ
)
2252 high_range
= high_range
+ 1;
2254 if (high_range
<= new_base
)
2256 inst
.error
= _("register range not in ascending order");
2260 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
2262 if (mask
& (setmask
<< new_base
))
2264 inst
.error
= _("invalid register list");
2268 mask
|= setmask
<< new_base
;
2273 while (skip_past_comma (&str
) != FAIL
);
2277 /* Sanity check -- should have raised a parse error above. */
2278 if ((!vpr_seen
&& count
== 0) || count
> max_regs
)
2283 if (expect_vpr
&& !vpr_seen
)
2285 first_error (_("VPR expected last"));
2289 /* Final test -- the registers must be consecutive. */
2291 for (i
= 0; i
< count
; i
++)
2293 if ((mask
& (1u << i
)) == 0)
2295 inst
.error
= _("non-contiguous register range");
2305 /* True if two alias types are the same. */
2308 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
2316 if (a
->defined
!= b
->defined
)
2319 if ((a
->defined
& NTA_HASTYPE
) != 0
2320 && (a
->eltype
.type
!= b
->eltype
.type
2321 || a
->eltype
.size
!= b
->eltype
.size
))
2324 if ((a
->defined
& NTA_HASINDEX
) != 0
2325 && (a
->index
!= b
->index
))
2331 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2332 The base register is put in *PBASE.
2333 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2335 The register stride (minus one) is put in bit 4 of the return value.
2336 Bits [6:5] encode the list length (minus one).
2337 The type of the list elements is put in *ELTYPE, if non-NULL. */
2339 #define NEON_LANE(X) ((X) & 0xf)
2340 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2341 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2344 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
2346 struct neon_type_el
*eltype
)
2353 int leading_brace
= 0;
2354 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
2355 const char *const incr_error
= mve
? _("register stride must be 1") :
2356 _("register stride must be 1 or 2");
2357 const char *const type_error
= _("mismatched element/structure types in list");
2358 struct neon_typed_alias firsttype
;
2359 firsttype
.defined
= 0;
2360 firsttype
.eltype
.type
= NT_invtype
;
2361 firsttype
.eltype
.size
= -1;
2362 firsttype
.index
= -1;
2364 if (skip_past_char (&ptr
, '{') == SUCCESS
)
2369 struct neon_typed_alias atype
;
2371 rtype
= REG_TYPE_MQ
;
2372 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2376 first_error (_(reg_expected_msgs
[rtype
]));
2383 if (rtype
== REG_TYPE_NQ
)
2389 else if (reg_incr
== -1)
2391 reg_incr
= getreg
- base_reg
;
2392 if (reg_incr
< 1 || reg_incr
> 2)
2394 first_error (_(incr_error
));
2398 else if (getreg
!= base_reg
+ reg_incr
* count
)
2400 first_error (_(incr_error
));
2404 if (! neon_alias_types_same (&atype
, &firsttype
))
2406 first_error (_(type_error
));
2410 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2414 struct neon_typed_alias htype
;
2415 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2417 lane
= NEON_INTERLEAVE_LANES
;
2418 else if (lane
!= NEON_INTERLEAVE_LANES
)
2420 first_error (_(type_error
));
2425 else if (reg_incr
!= 1)
2427 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2431 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2434 first_error (_(reg_expected_msgs
[rtype
]));
2437 if (! neon_alias_types_same (&htype
, &firsttype
))
2439 first_error (_(type_error
));
2442 count
+= hireg
+ dregs
- getreg
;
2446 /* If we're using Q registers, we can't use [] or [n] syntax. */
2447 if (rtype
== REG_TYPE_NQ
)
2453 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2457 else if (lane
!= atype
.index
)
2459 first_error (_(type_error
));
2463 else if (lane
== -1)
2464 lane
= NEON_INTERLEAVE_LANES
;
2465 else if (lane
!= NEON_INTERLEAVE_LANES
)
2467 first_error (_(type_error
));
2472 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2474 /* No lane set by [x]. We must be interleaving structures. */
2476 lane
= NEON_INTERLEAVE_LANES
;
2479 if (lane
== -1 || base_reg
== -1 || count
< 1 || (!mve
&& count
> 4)
2480 || (count
> 1 && reg_incr
== -1))
2482 first_error (_("error parsing element/structure list"));
2486 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2488 first_error (_("expected }"));
2496 *eltype
= firsttype
.eltype
;
2501 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2504 /* Parse an explicit relocation suffix on an expression. This is
2505 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2506 arm_reloc_hsh contains no entries, so this function can only
2507 succeed if there is no () after the word. Returns -1 on error,
2508 BFD_RELOC_UNUSED if there wasn't any suffix. */
2511 parse_reloc (char **str
)
2513 struct reloc_entry
*r
;
2517 return BFD_RELOC_UNUSED
;
2522 while (*q
&& *q
!= ')' && *q
!= ',')
2527 if ((r
= (struct reloc_entry
*)
2528 str_hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2535 /* Directives: register aliases. */
2537 static struct reg_entry
*
2538 insert_reg_alias (char *str
, unsigned number
, int type
)
2540 struct reg_entry
*new_reg
;
2543 if ((new_reg
= (struct reg_entry
*) str_hash_find (arm_reg_hsh
, str
)) != 0)
2545 if (new_reg
->builtin
)
2546 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2548 /* Only warn about a redefinition if it's not defined as the
2550 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2551 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2556 name
= xstrdup (str
);
2557 new_reg
= XNEW (struct reg_entry
);
2559 new_reg
->name
= name
;
2560 new_reg
->number
= number
;
2561 new_reg
->type
= type
;
2562 new_reg
->builtin
= false;
2563 new_reg
->neon
= NULL
;
2565 str_hash_insert (arm_reg_hsh
, name
, new_reg
, 0);
2571 insert_neon_reg_alias (char *str
, int number
, int type
,
2572 struct neon_typed_alias
*atype
)
2574 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2578 first_error (_("attempt to redefine typed alias"));
2584 reg
->neon
= XNEW (struct neon_typed_alias
);
2585 *reg
->neon
= *atype
;
2589 /* Look for the .req directive. This is of the form:
2591 new_register_name .req existing_register_name
2593 If we find one, or if it looks sufficiently like one that we want to
2594 handle any error here, return TRUE. Otherwise return FALSE. */
2597 create_register_alias (char * newname
, char *p
)
2599 struct reg_entry
*old
;
2600 char *oldname
, *nbuf
;
2603 /* The input scrubber ensures that whitespace after the mnemonic is
2604 collapsed to single spaces. */
2606 if (!startswith (oldname
, " .req "))
2610 if (*oldname
== '\0')
2613 old
= (struct reg_entry
*) str_hash_find (arm_reg_hsh
, oldname
);
2616 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2620 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2621 the desired alias name, and p points to its end. If not, then
2622 the desired alias name is in the global original_case_string. */
2623 #ifdef TC_CASE_SENSITIVE
2626 newname
= original_case_string
;
2627 nlen
= strlen (newname
);
2630 nbuf
= xmemdup0 (newname
, nlen
);
2632 /* Create aliases under the new name as stated; an all-lowercase
2633 version of the new name; and an all-uppercase version of the new
2635 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2637 for (p
= nbuf
; *p
; p
++)
2640 if (strncmp (nbuf
, newname
, nlen
))
2642 /* If this attempt to create an additional alias fails, do not bother
2643 trying to create the all-lower case alias. We will fail and issue
2644 a second, duplicate error message. This situation arises when the
2645 programmer does something like:
2648 The second .req creates the "Foo" alias but then fails to create
2649 the artificial FOO alias because it has already been created by the
2651 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2658 for (p
= nbuf
; *p
; p
++)
2661 if (strncmp (nbuf
, newname
, nlen
))
2662 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2669 /* Create a Neon typed/indexed register alias using directives, e.g.:
2674 These typed registers can be used instead of the types specified after the
2675 Neon mnemonic, so long as all operands given have types. Types can also be
2676 specified directly, e.g.:
2677 vadd d0.s32, d1.s32, d2.s32 */
2680 create_neon_reg_alias (char *newname
, char *p
)
2682 enum arm_reg_type basetype
;
2683 struct reg_entry
*basereg
;
2684 struct reg_entry mybasereg
;
2685 struct neon_type ntype
;
2686 struct neon_typed_alias typeinfo
;
2687 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2690 typeinfo
.defined
= 0;
2691 typeinfo
.eltype
.type
= NT_invtype
;
2692 typeinfo
.eltype
.size
= -1;
2693 typeinfo
.index
= -1;
2697 if (startswith (p
, " .dn "))
2698 basetype
= REG_TYPE_VFD
;
2699 else if (startswith (p
, " .qn "))
2700 basetype
= REG_TYPE_NQ
;
2709 basereg
= arm_reg_parse_multi (&p
);
2711 if (basereg
&& basereg
->type
!= basetype
)
2713 as_bad (_("bad type for register"));
2717 if (basereg
== NULL
)
2720 /* Try parsing as an integer. */
2721 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2722 if (exp
.X_op
!= O_constant
)
2724 as_bad (_("expression must be constant"));
2727 basereg
= &mybasereg
;
2728 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2734 typeinfo
= *basereg
->neon
;
2736 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2738 /* We got a type. */
2739 if (typeinfo
.defined
& NTA_HASTYPE
)
2741 as_bad (_("can't redefine the type of a register alias"));
2745 typeinfo
.defined
|= NTA_HASTYPE
;
2746 if (ntype
.elems
!= 1)
2748 as_bad (_("you must specify a single type only"));
2751 typeinfo
.eltype
= ntype
.el
[0];
2754 if (skip_past_char (&p
, '[') == SUCCESS
)
2757 /* We got a scalar index. */
2759 if (typeinfo
.defined
& NTA_HASINDEX
)
2761 as_bad (_("can't redefine the index of a scalar alias"));
2765 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2767 if (exp
.X_op
!= O_constant
)
2769 as_bad (_("scalar index must be constant"));
2773 typeinfo
.defined
|= NTA_HASINDEX
;
2774 typeinfo
.index
= exp
.X_add_number
;
2776 if (skip_past_char (&p
, ']') == FAIL
)
2778 as_bad (_("expecting ]"));
2783 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2784 the desired alias name, and p points to its end. If not, then
2785 the desired alias name is in the global original_case_string. */
2786 #ifdef TC_CASE_SENSITIVE
2787 namelen
= nameend
- newname
;
2789 newname
= original_case_string
;
2790 namelen
= strlen (newname
);
2793 namebuf
= xmemdup0 (newname
, namelen
);
2795 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2796 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2798 /* Insert name in all uppercase. */
2799 for (p
= namebuf
; *p
; p
++)
2802 if (strncmp (namebuf
, newname
, namelen
))
2803 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2804 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2806 /* Insert name in all lowercase. */
2807 for (p
= namebuf
; *p
; p
++)
2810 if (strncmp (namebuf
, newname
, namelen
))
2811 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2812 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2818 /* Should never be called, as .req goes between the alias and the
2819 register name, not at the beginning of the line. */
2822 s_req (int a ATTRIBUTE_UNUSED
)
2824 as_bad (_("invalid syntax for .req directive"));
2828 s_dn (int a ATTRIBUTE_UNUSED
)
2830 as_bad (_("invalid syntax for .dn directive"));
2834 s_qn (int a ATTRIBUTE_UNUSED
)
2836 as_bad (_("invalid syntax for .qn directive"));
2839 /* The .unreq directive deletes an alias which was previously defined
2840 by .req. For example:
2846 s_unreq (int a ATTRIBUTE_UNUSED
)
2851 name
= input_line_pointer
;
2853 while (*input_line_pointer
!= 0
2854 && *input_line_pointer
!= ' '
2855 && *input_line_pointer
!= '\n')
2856 ++input_line_pointer
;
2858 saved_char
= *input_line_pointer
;
2859 *input_line_pointer
= 0;
2862 as_bad (_("invalid syntax for .unreq directive"));
2865 struct reg_entry
*reg
2866 = (struct reg_entry
*) str_hash_find (arm_reg_hsh
, name
);
2869 as_bad (_("unknown register alias '%s'"), name
);
2870 else if (reg
->builtin
)
2871 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2878 str_hash_delete (arm_reg_hsh
, name
);
2879 free ((char *) reg
->name
);
2883 /* Also locate the all upper case and all lower case versions.
2884 Do not complain if we cannot find one or the other as it
2885 was probably deleted above. */
2887 nbuf
= strdup (name
);
2888 for (p
= nbuf
; *p
; p
++)
2890 reg
= (struct reg_entry
*) str_hash_find (arm_reg_hsh
, nbuf
);
2893 str_hash_delete (arm_reg_hsh
, nbuf
);
2894 free ((char *) reg
->name
);
2899 for (p
= nbuf
; *p
; p
++)
2901 reg
= (struct reg_entry
*) str_hash_find (arm_reg_hsh
, nbuf
);
2904 str_hash_delete (arm_reg_hsh
, nbuf
);
2905 free ((char *) reg
->name
);
2914 *input_line_pointer
= saved_char
;
2915 demand_empty_rest_of_line ();
2918 /* Directives: Instruction set selection. */
2921 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2922 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2923 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2924 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2926 /* Create a new mapping symbol for the transition to STATE. */
2929 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2932 const char * symname
;
2939 type
= BSF_NO_FLAGS
;
2943 type
= BSF_NO_FLAGS
;
2947 type
= BSF_NO_FLAGS
;
2953 symbolP
= symbol_new (symname
, now_seg
, frag
, value
);
2954 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2959 THUMB_SET_FUNC (symbolP
, 0);
2960 ARM_SET_THUMB (symbolP
, 0);
2961 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2965 THUMB_SET_FUNC (symbolP
, 1);
2966 ARM_SET_THUMB (symbolP
, 1);
2967 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2975 /* Save the mapping symbols for future reference. Also check that
2976 we do not place two mapping symbols at the same offset within a
2977 frag. We'll handle overlap between frags in
2978 check_mapping_symbols.
2980 If .fill or other data filling directive generates zero sized data,
2981 the mapping symbol for the following code will have the same value
2982 as the one generated for the data filling directive. In this case,
2983 we replace the old symbol with the new one at the same address. */
2986 if (frag
->tc_frag_data
.first_map
!= NULL
)
2988 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2989 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2991 frag
->tc_frag_data
.first_map
= symbolP
;
2993 if (frag
->tc_frag_data
.last_map
!= NULL
)
2995 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2996 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2997 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2999 frag
->tc_frag_data
.last_map
= symbolP
;
3002 /* We must sometimes convert a region marked as code to data during
3003 code alignment, if an odd number of bytes have to be padded. The
3004 code mapping symbol is pushed to an aligned address. */
3007 insert_data_mapping_symbol (enum mstate state
,
3008 valueT value
, fragS
*frag
, offsetT bytes
)
3010 /* If there was already a mapping symbol, remove it. */
3011 if (frag
->tc_frag_data
.last_map
!= NULL
3012 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
3014 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
3018 know (frag
->tc_frag_data
.first_map
== symp
);
3019 frag
->tc_frag_data
.first_map
= NULL
;
3021 frag
->tc_frag_data
.last_map
= NULL
;
3022 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
3025 make_mapping_symbol (MAP_DATA
, value
, frag
);
3026 make_mapping_symbol (state
, value
+ bytes
, frag
);
3029 static void mapping_state_2 (enum mstate state
, int max_chars
);
3031 /* Set the mapping state to STATE. Only call this when about to
3032 emit some STATE bytes to the file. */
3034 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
3036 mapping_state (enum mstate state
)
3038 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
3040 if (mapstate
== state
)
3041 /* The mapping symbol has already been emitted.
3042 There is nothing else to do. */
3045 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
3047 All ARM instructions require 4-byte alignment.
3048 (Almost) all Thumb instructions require 2-byte alignment.
3050 When emitting instructions into any section, mark the section
3053 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
3054 but themselves require 2-byte alignment; this applies to some
3055 PC- relative forms. However, these cases will involve implicit
3056 literal pool generation or an explicit .align >=2, both of
3057 which will cause the section to me marked with sufficient
3058 alignment. Thus, we don't handle those cases here. */
3059 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
3061 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
3062 /* This case will be evaluated later. */
3065 mapping_state_2 (state
, 0);
3068 /* Same as mapping_state, but MAX_CHARS bytes have already been
3069 allocated. Put the mapping symbol that far back. */
3072 mapping_state_2 (enum mstate state
, int max_chars
)
3074 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
3076 if (!SEG_NORMAL (now_seg
))
3079 if (mapstate
== state
)
3080 /* The mapping symbol has already been emitted.
3081 There is nothing else to do. */
3084 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
3085 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
3087 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
3088 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
3091 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
3094 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
3095 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
3099 #define mapping_state(x) ((void)0)
3100 #define mapping_state_2(x, y) ((void)0)
3103 /* Find the real, Thumb encoded start of a Thumb function. */
3107 find_real_start (symbolS
* symbolP
)
3110 const char * name
= S_GET_NAME (symbolP
);
3111 symbolS
* new_target
;
3113 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
3114 #define STUB_NAME ".real_start_of"
3119 /* The compiler may generate BL instructions to local labels because
3120 it needs to perform a branch to a far away location. These labels
3121 do not have a corresponding ".real_start_of" label. We check
3122 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
3123 the ".real_start_of" convention for nonlocal branches. */
3124 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
3127 real_start
= concat (STUB_NAME
, name
, NULL
);
3128 new_target
= symbol_find (real_start
);
3131 if (new_target
== NULL
)
3133 as_warn (_("Failed to find real start of function: %s\n"), name
);
3134 new_target
= symbolP
;
3142 opcode_select (int width
)
3149 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
3150 as_bad (_("selected processor does not support THUMB opcodes"));
3153 /* No need to force the alignment, since we will have been
3154 coming from ARM mode, which is word-aligned. */
3155 record_alignment (now_seg
, 1);
3162 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
3163 as_bad (_("selected processor does not support ARM opcodes"));
3168 frag_align (2, 0, 0);
3170 record_alignment (now_seg
, 1);
3175 as_bad (_("invalid instruction size selected (%d)"), width
);
3180 s_arm (int ignore ATTRIBUTE_UNUSED
)
3183 demand_empty_rest_of_line ();
3187 s_thumb (int ignore ATTRIBUTE_UNUSED
)
3190 demand_empty_rest_of_line ();
3194 s_code (int unused ATTRIBUTE_UNUSED
)
3198 temp
= get_absolute_expression ();
3203 opcode_select (temp
);
3207 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
3212 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
3214 /* If we are not already in thumb mode go into it, EVEN if
3215 the target processor does not support thumb instructions.
3216 This is used by gcc/config/arm/lib1funcs.asm for example
3217 to compile interworking support functions even if the
3218 target processor should not support interworking. */
3222 record_alignment (now_seg
, 1);
3225 demand_empty_rest_of_line ();
3229 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
3233 /* The following label is the name/address of the start of a Thumb function.
3234 We need to know this for the interworking support. */
3235 label_is_thumb_function_name
= true;
3238 /* Perform a .set directive, but also mark the alias as
3239 being a thumb function. */
3242 s_thumb_set (int equiv
)
3244 /* XXX the following is a duplicate of the code for s_set() in read.c
3245 We cannot just call that code as we need to get at the symbol that
3252 /* Especial apologies for the random logic:
3253 This just grew, and could be parsed much more simply!
3255 delim
= get_symbol_name (& name
);
3256 end_name
= input_line_pointer
;
3257 (void) restore_line_pointer (delim
);
3259 if (*input_line_pointer
!= ',')
3262 as_bad (_("expected comma after name \"%s\""), name
);
3264 ignore_rest_of_line ();
3268 input_line_pointer
++;
3271 if (name
[0] == '.' && name
[1] == '\0')
3273 /* XXX - this should not happen to .thumb_set. */
3277 if ((symbolP
= symbol_find (name
)) == NULL
3278 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
3281 /* When doing symbol listings, play games with dummy fragments living
3282 outside the normal fragment chain to record the file and line info
3284 if (listing
& LISTING_SYMBOLS
)
3286 extern struct list_info_struct
* listing_tail
;
3287 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
3289 memset (dummy_frag
, 0, sizeof (fragS
));
3290 dummy_frag
->fr_type
= rs_fill
;
3291 dummy_frag
->line
= listing_tail
;
3292 symbolP
= symbol_new (name
, undefined_section
, dummy_frag
, 0);
3293 dummy_frag
->fr_symbol
= symbolP
;
3297 symbolP
= symbol_new (name
, undefined_section
, &zero_address_frag
, 0);
3300 /* "set" symbols are local unless otherwise specified. */
3301 SF_SET_LOCAL (symbolP
);
3302 #endif /* OBJ_COFF */
3303 } /* Make a new symbol. */
3305 symbol_table_insert (symbolP
);
3310 && S_IS_DEFINED (symbolP
)
3311 && S_GET_SEGMENT (symbolP
) != reg_section
)
3312 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
3314 pseudo_set (symbolP
);
3316 demand_empty_rest_of_line ();
3318 /* XXX Now we come to the Thumb specific bit of code. */
3320 THUMB_SET_FUNC (symbolP
, 1);
3321 ARM_SET_THUMB (symbolP
, 1);
3322 #if defined OBJ_ELF || defined OBJ_COFF
3323 ARM_SET_INTERWORK (symbolP
, support_interwork
);
3327 /* Directives: Mode selection. */
3329 /* .syntax [unified|divided] - choose the new unified syntax
3330 (same for Arm and Thumb encoding, modulo slight differences in what
3331 can be represented) or the old divergent syntax for each mode. */
3333 s_syntax (int unused ATTRIBUTE_UNUSED
)
3337 delim
= get_symbol_name (& name
);
3339 if (!strcasecmp (name
, "unified"))
3340 unified_syntax
= true;
3341 else if (!strcasecmp (name
, "divided"))
3342 unified_syntax
= false;
3345 as_bad (_("unrecognized syntax mode \"%s\""), name
);
3348 (void) restore_line_pointer (delim
);
3349 demand_empty_rest_of_line ();
3352 /* Directives: sectioning and alignment. */
3355 s_bss (int ignore ATTRIBUTE_UNUSED
)
3357 /* We don't support putting frags in the BSS segment, we fake it by
3358 marking in_bss, then looking at s_skip for clues. */
3359 subseg_set (bss_section
, 0);
3360 demand_empty_rest_of_line ();
3362 #ifdef md_elf_section_change_hook
3363 md_elf_section_change_hook ();
3368 s_even (int ignore ATTRIBUTE_UNUSED
)
3370 /* Never make frag if expect extra pass. */
3372 frag_align (1, 0, 0);
3374 record_alignment (now_seg
, 1);
3376 demand_empty_rest_of_line ();
3379 /* Directives: CodeComposer Studio. */
3381 /* .ref (for CodeComposer Studio syntax only). */
3383 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3385 if (codecomposer_syntax
)
3386 ignore_rest_of_line ();
3388 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3391 /* If name is not NULL, then it is used for marking the beginning of a
3392 function, whereas if it is NULL then it means the function end. */
3394 asmfunc_debug (const char * name
)
3396 static const char * last_name
= NULL
;
3400 gas_assert (last_name
== NULL
);
3403 if (debug_type
== DEBUG_STABS
)
3404 stabs_generate_asm_func (name
, name
);
3408 gas_assert (last_name
!= NULL
);
3410 if (debug_type
== DEBUG_STABS
)
3411 stabs_generate_asm_endfunc (last_name
, last_name
);
3418 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3420 if (codecomposer_syntax
)
3422 switch (asmfunc_state
)
3424 case OUTSIDE_ASMFUNC
:
3425 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3428 case WAITING_ASMFUNC_NAME
:
3429 as_bad (_(".asmfunc repeated."));
3432 case WAITING_ENDASMFUNC
:
3433 as_bad (_(".asmfunc without function."));
3436 demand_empty_rest_of_line ();
3439 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3443 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3445 if (codecomposer_syntax
)
3447 switch (asmfunc_state
)
3449 case OUTSIDE_ASMFUNC
:
3450 as_bad (_(".endasmfunc without a .asmfunc."));
3453 case WAITING_ASMFUNC_NAME
:
3454 as_bad (_(".endasmfunc without function."));
3457 case WAITING_ENDASMFUNC
:
3458 asmfunc_state
= OUTSIDE_ASMFUNC
;
3459 asmfunc_debug (NULL
);
3462 demand_empty_rest_of_line ();
3465 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3469 s_ccs_def (int name
)
3471 if (codecomposer_syntax
)
3474 as_bad (_(".def pseudo-op only available with -mccs flag."));
3477 /* Directives: Literal pools. */
3479 static literal_pool
*
3480 find_literal_pool (void)
3482 literal_pool
* pool
;
3484 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3486 if (pool
->section
== now_seg
3487 && pool
->sub_section
== now_subseg
)
3494 static literal_pool
*
3495 find_or_make_literal_pool (void)
3497 /* Next literal pool ID number. */
3498 static unsigned int latest_pool_num
= 1;
3499 literal_pool
* pool
;
3501 pool
= find_literal_pool ();
3505 /* Create a new pool. */
3506 pool
= XNEW (literal_pool
);
3510 pool
->next_free_entry
= 0;
3511 pool
->section
= now_seg
;
3512 pool
->sub_section
= now_subseg
;
3513 pool
->next
= list_of_pools
;
3514 pool
->symbol
= NULL
;
3515 pool
->alignment
= 2;
3517 /* Add it to the list. */
3518 list_of_pools
= pool
;
3521 /* New pools, and emptied pools, will have a NULL symbol. */
3522 if (pool
->symbol
== NULL
)
3524 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3525 &zero_address_frag
, 0);
3526 pool
->id
= latest_pool_num
++;
3533 /* Add the literal in the global 'inst'
3534 structure to the relevant literal pool. */
3537 add_to_lit_pool (unsigned int nbytes
)
3539 #define PADDING_SLOT 0x1
3540 #define LIT_ENTRY_SIZE_MASK 0xFF
3541 literal_pool
* pool
;
3542 unsigned int entry
, pool_size
= 0;
3543 bool padding_slot_p
= false;
3549 imm1
= inst
.operands
[1].imm
;
3550 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3551 : inst
.relocs
[0].exp
.X_unsigned
? 0
3552 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3553 if (target_big_endian
)
3556 imm2
= inst
.operands
[1].imm
;
3560 pool
= find_or_make_literal_pool ();
3562 /* Check if this literal value is already in the pool. */
3563 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3567 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3568 && (inst
.relocs
[0].exp
.X_op
== O_constant
)
3569 && (pool
->literals
[entry
].X_add_number
3570 == inst
.relocs
[0].exp
.X_add_number
)
3571 && (pool
->literals
[entry
].X_md
== nbytes
)
3572 && (pool
->literals
[entry
].X_unsigned
3573 == inst
.relocs
[0].exp
.X_unsigned
))
3576 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3577 && (inst
.relocs
[0].exp
.X_op
== O_symbol
)
3578 && (pool
->literals
[entry
].X_add_number
3579 == inst
.relocs
[0].exp
.X_add_number
)
3580 && (pool
->literals
[entry
].X_add_symbol
3581 == inst
.relocs
[0].exp
.X_add_symbol
)
3582 && (pool
->literals
[entry
].X_op_symbol
3583 == inst
.relocs
[0].exp
.X_op_symbol
)
3584 && (pool
->literals
[entry
].X_md
== nbytes
))
3587 else if ((nbytes
== 8)
3588 && !(pool_size
& 0x7)
3589 && ((entry
+ 1) != pool
->next_free_entry
)
3590 && (pool
->literals
[entry
].X_op
== O_constant
)
3591 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3592 && (pool
->literals
[entry
].X_unsigned
3593 == inst
.relocs
[0].exp
.X_unsigned
)
3594 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3595 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3596 && (pool
->literals
[entry
+ 1].X_unsigned
3597 == inst
.relocs
[0].exp
.X_unsigned
))
3600 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3601 if (padding_slot_p
&& (nbytes
== 4))
3607 /* Do we need to create a new entry? */
3608 if (entry
== pool
->next_free_entry
)
3610 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3612 inst
.error
= _("literal pool overflow");
3618 /* For 8-byte entries, we align to an 8-byte boundary,
3619 and split it into two 4-byte entries, because on 32-bit
3620 host, 8-byte constants are treated as big num, thus
3621 saved in "generic_bignum" which will be overwritten
3622 by later assignments.
3624 We also need to make sure there is enough space for
3627 We also check to make sure the literal operand is a
3629 if (!(inst
.relocs
[0].exp
.X_op
== O_constant
3630 || inst
.relocs
[0].exp
.X_op
== O_big
))
3632 inst
.error
= _("invalid type for literal pool");
3635 else if (pool_size
& 0x7)
3637 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3639 inst
.error
= _("literal pool overflow");
3643 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3644 pool
->literals
[entry
].X_op
= O_constant
;
3645 pool
->literals
[entry
].X_add_number
= 0;
3646 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3647 pool
->next_free_entry
+= 1;
3650 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3652 inst
.error
= _("literal pool overflow");
3656 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3657 pool
->literals
[entry
].X_op
= O_constant
;
3658 pool
->literals
[entry
].X_add_number
= imm1
;
3659 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3660 pool
->literals
[entry
++].X_md
= 4;
3661 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3662 pool
->literals
[entry
].X_op
= O_constant
;
3663 pool
->literals
[entry
].X_add_number
= imm2
;
3664 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3665 pool
->literals
[entry
].X_md
= 4;
3666 pool
->alignment
= 3;
3667 pool
->next_free_entry
+= 1;
3671 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3672 pool
->literals
[entry
].X_md
= 4;
3676 /* PR ld/12974: Record the location of the first source line to reference
3677 this entry in the literal pool. If it turns out during linking that the
3678 symbol does not exist we will be able to give an accurate line number for
3679 the (first use of the) missing reference. */
3680 if (debug_type
== DEBUG_DWARF2
)
3681 dwarf2_where (pool
->locs
+ entry
);
3683 pool
->next_free_entry
+= 1;
3685 else if (padding_slot_p
)
3687 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3688 pool
->literals
[entry
].X_md
= nbytes
;
3691 inst
.relocs
[0].exp
.X_op
= O_symbol
;
3692 inst
.relocs
[0].exp
.X_add_number
= pool_size
;
3693 inst
.relocs
[0].exp
.X_add_symbol
= pool
->symbol
;
3699 tc_start_label_without_colon (void)
3703 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3705 const char *label
= input_line_pointer
;
3707 while (!is_end_of_line
[(int) label
[-1]])
3712 as_bad (_("Invalid label '%s'"), label
);
3716 asmfunc_debug (label
);
3718 asmfunc_state
= WAITING_ENDASMFUNC
;
3724 /* Can't use symbol_new here, so have to create a symbol and then at
3725 a later date assign it a value. That's what these functions do. */
3728 symbol_locate (symbolS
* symbolP
,
3729 const char * name
, /* It is copied, the caller can modify. */
3730 segT segment
, /* Segment identifier (SEG_<something>). */
3731 valueT valu
, /* Symbol value. */
3732 fragS
* frag
) /* Associated fragment. */
3735 char * preserved_copy_of_name
;
3737 name_length
= strlen (name
) + 1; /* +1 for \0. */
3738 obstack_grow (¬es
, name
, name_length
);
3739 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3741 #ifdef tc_canonicalize_symbol_name
3742 preserved_copy_of_name
=
3743 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3746 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3748 S_SET_SEGMENT (symbolP
, segment
);
3749 S_SET_VALUE (symbolP
, valu
);
3750 symbol_clear_list_pointers (symbolP
);
3752 symbol_set_frag (symbolP
, frag
);
3754 /* Link to end of symbol chain. */
3756 extern int symbol_table_frozen
;
3758 if (symbol_table_frozen
)
3762 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3764 obj_symbol_new_hook (symbolP
);
3766 #ifdef tc_symbol_new_hook
3767 tc_symbol_new_hook (symbolP
);
3771 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3772 #endif /* DEBUG_SYMS */
3776 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3779 literal_pool
* pool
;
3782 pool
= find_literal_pool ();
3784 || pool
->symbol
== NULL
3785 || pool
->next_free_entry
== 0)
3788 /* Align pool as you have word accesses.
3789 Only make a frag if we have to. */
3791 frag_align (pool
->alignment
, 0, 0);
3793 record_alignment (now_seg
, 2);
3796 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3797 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3799 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3801 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3802 (valueT
) frag_now_fix (), frag_now
);
3803 symbol_table_insert (pool
->symbol
);
3805 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3807 #if defined OBJ_COFF || defined OBJ_ELF
3808 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3811 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3814 if (debug_type
== DEBUG_DWARF2
)
3815 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3817 /* First output the expression in the instruction to the pool. */
3818 emit_expr (&(pool
->literals
[entry
]),
3819 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3822 /* Mark the pool as empty. */
3823 pool
->next_free_entry
= 0;
3824 pool
->symbol
= NULL
;
3828 /* Forward declarations for functions below, in the MD interface
3830 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3831 static valueT
create_unwind_entry (int);
3832 static void start_unwind_section (const segT
, int);
3833 static void add_unwind_opcode (valueT
, int);
3834 static void flush_pending_unwind (void);
3836 /* Directives: Data. */
3839 s_arm_elf_cons (int nbytes
)
3843 #ifdef md_flush_pending_output
3844 md_flush_pending_output ();
3847 if (is_it_end_of_statement ())
3849 demand_empty_rest_of_line ();
3853 #ifdef md_cons_align
3854 md_cons_align (nbytes
);
3857 mapping_state (MAP_DATA
);
3861 char *base
= input_line_pointer
;
3865 if (exp
.X_op
!= O_symbol
)
3866 emit_expr (&exp
, (unsigned int) nbytes
);
3869 char *before_reloc
= input_line_pointer
;
3870 reloc
= parse_reloc (&input_line_pointer
);
3873 as_bad (_("unrecognized relocation suffix"));
3874 ignore_rest_of_line ();
3877 else if (reloc
== BFD_RELOC_UNUSED
)
3878 emit_expr (&exp
, (unsigned int) nbytes
);
3881 reloc_howto_type
*howto
= (reloc_howto_type
*)
3882 bfd_reloc_type_lookup (stdoutput
,
3883 (bfd_reloc_code_real_type
) reloc
);
3884 int size
= bfd_get_reloc_size (howto
);
3886 if (reloc
== BFD_RELOC_ARM_PLT32
)
3888 as_bad (_("(plt) is only valid on branch targets"));
3889 reloc
= BFD_RELOC_UNUSED
;
3894 as_bad (ngettext ("%s relocations do not fit in %d byte",
3895 "%s relocations do not fit in %d bytes",
3897 howto
->name
, nbytes
);
3900 /* We've parsed an expression stopping at O_symbol.
3901 But there may be more expression left now that we
3902 have parsed the relocation marker. Parse it again.
3903 XXX Surely there is a cleaner way to do this. */
3904 char *p
= input_line_pointer
;
3906 char *save_buf
= XNEWVEC (char, input_line_pointer
- base
);
3908 memcpy (save_buf
, base
, input_line_pointer
- base
);
3909 memmove (base
+ (input_line_pointer
- before_reloc
),
3910 base
, before_reloc
- base
);
3912 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3914 memcpy (base
, save_buf
, p
- base
);
3916 offset
= nbytes
- size
;
3917 p
= frag_more (nbytes
);
3918 memset (p
, 0, nbytes
);
3919 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3920 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3926 while (*input_line_pointer
++ == ',');
3928 /* Put terminator back into stream. */
3929 input_line_pointer
--;
3930 demand_empty_rest_of_line ();
3933 /* Emit an expression containing a 32-bit thumb instruction.
3934 Implementation based on put_thumb32_insn. */
3937 emit_thumb32_expr (expressionS
* exp
)
3939 expressionS exp_high
= *exp
;
3941 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3942 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3943 exp
->X_add_number
&= 0xffff;
3944 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3947 /* Guess the instruction size based on the opcode. */
3950 thumb_insn_size (int opcode
)
3952 if ((unsigned int) opcode
< 0xe800u
)
3954 else if ((unsigned int) opcode
>= 0xe8000000u
)
3961 emit_insn (expressionS
*exp
, int nbytes
)
3965 if (exp
->X_op
== O_constant
)
3970 size
= thumb_insn_size (exp
->X_add_number
);
3974 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3976 as_bad (_(".inst.n operand too big. "\
3977 "Use .inst.w instead"));
3982 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
)
3983 set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN
, 0);
3985 set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3987 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3988 emit_thumb32_expr (exp
);
3990 emit_expr (exp
, (unsigned int) size
);
3992 it_fsm_post_encode ();
3996 as_bad (_("cannot determine Thumb instruction size. " \
3997 "Use .inst.n/.inst.w instead"));
4000 as_bad (_("constant expression required"));
4005 /* Like s_arm_elf_cons but do not use md_cons_align and
4006 set the mapping state to MAP_ARM/MAP_THUMB. */
4009 s_arm_elf_inst (int nbytes
)
4011 if (is_it_end_of_statement ())
4013 demand_empty_rest_of_line ();
4017 /* Calling mapping_state () here will not change ARM/THUMB,
4018 but will ensure not to be in DATA state. */
4021 mapping_state (MAP_THUMB
);
4026 as_bad (_("width suffixes are invalid in ARM mode"));
4027 ignore_rest_of_line ();
4033 mapping_state (MAP_ARM
);
4042 if (! emit_insn (& exp
, nbytes
))
4044 ignore_rest_of_line ();
4048 while (*input_line_pointer
++ == ',');
4050 /* Put terminator back into stream. */
4051 input_line_pointer
--;
4052 demand_empty_rest_of_line ();
4055 /* Parse a .rel31 directive. */
4058 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
4065 if (*input_line_pointer
== '1')
4066 highbit
= 0x80000000;
4067 else if (*input_line_pointer
!= '0')
4068 as_bad (_("expected 0 or 1"));
4070 input_line_pointer
++;
4071 if (*input_line_pointer
!= ',')
4072 as_bad (_("missing comma"));
4073 input_line_pointer
++;
4075 #ifdef md_flush_pending_output
4076 md_flush_pending_output ();
4079 #ifdef md_cons_align
4083 mapping_state (MAP_DATA
);
4088 md_number_to_chars (p
, highbit
, 4);
4089 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
4090 BFD_RELOC_ARM_PREL31
);
4092 demand_empty_rest_of_line ();
4095 /* Directives: AEABI stack-unwind tables. */
4097 /* Parse an unwind_fnstart directive. Simply records the current location. */
4100 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
4102 demand_empty_rest_of_line ();
4103 if (unwind
.proc_start
)
4105 as_bad (_("duplicate .fnstart directive"));
4109 /* Mark the start of the function. */
4110 unwind
.proc_start
= expr_build_dot ();
4112 /* Reset the rest of the unwind info. */
4113 unwind
.opcode_count
= 0;
4114 unwind
.table_entry
= NULL
;
4115 unwind
.personality_routine
= NULL
;
4116 unwind
.personality_index
= -1;
4117 unwind
.frame_size
= 0;
4118 unwind
.fp_offset
= 0;
4119 unwind
.fp_reg
= REG_SP
;
4121 unwind
.sp_restored
= 0;
4125 /* Parse a handlerdata directive. Creates the exception handling table entry
4126 for the function. */
4129 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
4131 demand_empty_rest_of_line ();
4132 if (!unwind
.proc_start
)
4133 as_bad (MISSING_FNSTART
);
4135 if (unwind
.table_entry
)
4136 as_bad (_("duplicate .handlerdata directive"));
4138 create_unwind_entry (1);
4141 /* Parse an unwind_fnend directive. Generates the index table entry. */
4144 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
4149 unsigned int marked_pr_dependency
;
4151 demand_empty_rest_of_line ();
4153 if (!unwind
.proc_start
)
4155 as_bad (_(".fnend directive without .fnstart"));
4159 /* Add eh table entry. */
4160 if (unwind
.table_entry
== NULL
)
4161 val
= create_unwind_entry (0);
4165 /* Add index table entry. This is two words. */
4166 start_unwind_section (unwind
.saved_seg
, 1);
4167 frag_align (2, 0, 0);
4168 record_alignment (now_seg
, 2);
4170 ptr
= frag_more (8);
4172 where
= frag_now_fix () - 8;
4174 /* Self relative offset of the function start. */
4175 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
4176 BFD_RELOC_ARM_PREL31
);
4178 /* Indicate dependency on EHABI-defined personality routines to the
4179 linker, if it hasn't been done already. */
4180 marked_pr_dependency
4181 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
4182 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
4183 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
4185 static const char *const name
[] =
4187 "__aeabi_unwind_cpp_pr0",
4188 "__aeabi_unwind_cpp_pr1",
4189 "__aeabi_unwind_cpp_pr2"
4191 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
4192 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
4193 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
4194 |= 1 << unwind
.personality_index
;
4198 /* Inline exception table entry. */
4199 md_number_to_chars (ptr
+ 4, val
, 4);
4201 /* Self relative offset of the table entry. */
4202 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
4203 BFD_RELOC_ARM_PREL31
);
4205 /* Restore the original section. */
4206 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
4208 unwind
.proc_start
= NULL
;
4212 /* Parse an unwind_cantunwind directive. */
4215 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
4217 demand_empty_rest_of_line ();
4218 if (!unwind
.proc_start
)
4219 as_bad (MISSING_FNSTART
);
4221 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4222 as_bad (_("personality routine specified for cantunwind frame"));
4224 unwind
.personality_index
= -2;
4228 /* Parse a personalityindex directive. */
4231 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
4235 if (!unwind
.proc_start
)
4236 as_bad (MISSING_FNSTART
);
4238 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4239 as_bad (_("duplicate .personalityindex directive"));
4243 if (exp
.X_op
!= O_constant
4244 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
4246 as_bad (_("bad personality routine number"));
4247 ignore_rest_of_line ();
4251 unwind
.personality_index
= exp
.X_add_number
;
4253 demand_empty_rest_of_line ();
4257 /* Parse a personality directive. */
4260 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
4264 if (!unwind
.proc_start
)
4265 as_bad (MISSING_FNSTART
);
4267 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4268 as_bad (_("duplicate .personality directive"));
4270 c
= get_symbol_name (& name
);
4271 p
= input_line_pointer
;
4273 ++ input_line_pointer
;
4274 unwind
.personality_routine
= symbol_find_or_make (name
);
4276 demand_empty_rest_of_line ();
4279 /* Parse a directive saving pseudo registers. */
4282 s_arm_unwind_save_pseudo (void)
4287 range
= parse_reg_list (&input_line_pointer
, REGLIST_PSEUDO
);
4290 as_bad (_("expected pseudo register list"));
4291 ignore_rest_of_line ();
4295 demand_empty_rest_of_line ();
4297 if (range
& (1 << 9))
4299 /* Opcode for restoring RA_AUTH_CODE. */
4301 add_unwind_opcode (op
, 1);
4306 /* Parse a directive saving core registers. */
4309 s_arm_unwind_save_core (void)
4315 range
= parse_reg_list (&input_line_pointer
, REGLIST_RN
);
4318 as_bad (_("expected register list"));
4319 ignore_rest_of_line ();
4323 demand_empty_rest_of_line ();
4325 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4326 into .unwind_save {..., sp...}. We aren't bothered about the value of
4327 ip because it is clobbered by calls. */
4328 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
4329 && (range
& 0x3000) == 0x1000)
4331 unwind
.opcode_count
--;
4332 unwind
.sp_restored
= 0;
4333 range
= (range
| 0x2000) & ~0x1000;
4334 unwind
.pending_offset
= 0;
4340 /* See if we can use the short opcodes. These pop a block of up to 8
4341 registers starting with r4, plus maybe r14. */
4342 for (n
= 0; n
< 8; n
++)
4344 /* Break at the first non-saved register. */
4345 if ((range
& (1 << (n
+ 4))) == 0)
4348 /* See if there are any other bits set. */
4349 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
4351 /* Use the long form. */
4352 op
= 0x8000 | ((range
>> 4) & 0xfff);
4353 add_unwind_opcode (op
, 2);
4357 /* Use the short form. */
4359 op
= 0xa8; /* Pop r14. */
4361 op
= 0xa0; /* Do not pop r14. */
4363 add_unwind_opcode (op
, 1);
4370 op
= 0xb100 | (range
& 0xf);
4371 add_unwind_opcode (op
, 2);
4374 /* Record the number of bytes pushed. */
4375 for (n
= 0; n
< 16; n
++)
4377 if (range
& (1 << n
))
4378 unwind
.frame_size
+= 4;
4383 /* Parse a directive saving FPA registers. */
4386 s_arm_unwind_save_fpa (int reg
)
4392 /* Get Number of registers to transfer. */
4393 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4396 exp
.X_op
= O_illegal
;
4398 if (exp
.X_op
!= O_constant
)
4400 as_bad (_("expected , <constant>"));
4401 ignore_rest_of_line ();
4405 num_regs
= exp
.X_add_number
;
4407 if (num_regs
< 1 || num_regs
> 4)
4409 as_bad (_("number of registers must be in the range [1:4]"));
4410 ignore_rest_of_line ();
4414 demand_empty_rest_of_line ();
4419 op
= 0xb4 | (num_regs
- 1);
4420 add_unwind_opcode (op
, 1);
4425 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4426 add_unwind_opcode (op
, 2);
4428 unwind
.frame_size
+= num_regs
* 12;
4432 /* Parse a directive saving VFP registers for ARMv6 and above. */
4435 s_arm_unwind_save_vfp_armv6 (void)
4440 int num_vfpv3_regs
= 0;
4441 int num_regs_below_16
;
4444 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
,
4448 as_bad (_("expected register list"));
4449 ignore_rest_of_line ();
4453 demand_empty_rest_of_line ();
4455 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4456 than FSTMX/FLDMX-style ones). */
4458 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4460 num_vfpv3_regs
= count
;
4461 else if (start
+ count
> 16)
4462 num_vfpv3_regs
= start
+ count
- 16;
4464 if (num_vfpv3_regs
> 0)
4466 int start_offset
= start
> 16 ? start
- 16 : 0;
4467 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4468 add_unwind_opcode (op
, 2);
4471 /* Generate opcode for registers numbered in the range 0 .. 15. */
4472 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4473 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4474 if (num_regs_below_16
> 0)
4476 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4477 add_unwind_opcode (op
, 2);
4480 unwind
.frame_size
+= count
* 8;
4484 /* Parse a directive saving VFP registers for pre-ARMv6. */
4487 s_arm_unwind_save_vfp (void)
4494 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
,
4498 as_bad (_("expected register list"));
4499 ignore_rest_of_line ();
4503 demand_empty_rest_of_line ();
4508 op
= 0xb8 | (count
- 1);
4509 add_unwind_opcode (op
, 1);
4514 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4515 add_unwind_opcode (op
, 2);
4517 unwind
.frame_size
+= count
* 8 + 4;
4521 /* Parse a directive saving iWMMXt data registers. */
4524 s_arm_unwind_save_mmxwr (void)
4532 if (*input_line_pointer
== '{')
4533 input_line_pointer
++;
4537 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4541 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4546 as_tsktsk (_("register list not in ascending order"));
4549 if (*input_line_pointer
== '-')
4551 input_line_pointer
++;
4552 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4555 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4558 else if (reg
>= hi_reg
)
4560 as_bad (_("bad register range"));
4563 for (; reg
< hi_reg
; reg
++)
4567 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4569 skip_past_char (&input_line_pointer
, '}');
4571 demand_empty_rest_of_line ();
4573 /* Generate any deferred opcodes because we're going to be looking at
4575 flush_pending_unwind ();
4577 for (i
= 0; i
< 16; i
++)
4579 if (mask
& (1 << i
))
4580 unwind
.frame_size
+= 8;
4583 /* Attempt to combine with a previous opcode. We do this because gcc
4584 likes to output separate unwind directives for a single block of
4586 if (unwind
.opcode_count
> 0)
4588 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4589 if ((i
& 0xf8) == 0xc0)
4592 /* Only merge if the blocks are contiguous. */
4595 if ((mask
& 0xfe00) == (1 << 9))
4597 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4598 unwind
.opcode_count
--;
4601 else if (i
== 6 && unwind
.opcode_count
>= 2)
4603 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4607 op
= 0xffff << (reg
- 1);
4609 && ((mask
& op
) == (1u << (reg
- 1))))
4611 op
= (1 << (reg
+ i
+ 1)) - 1;
4612 op
&= ~((1 << reg
) - 1);
4614 unwind
.opcode_count
-= 2;
4621 /* We want to generate opcodes in the order the registers have been
4622 saved, ie. descending order. */
4623 for (reg
= 15; reg
>= -1; reg
--)
4625 /* Save registers in blocks. */
4627 || !(mask
& (1 << reg
)))
4629 /* We found an unsaved reg. Generate opcodes to save the
4636 op
= 0xc0 | (hi_reg
- 10);
4637 add_unwind_opcode (op
, 1);
4642 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4643 add_unwind_opcode (op
, 2);
4652 ignore_rest_of_line ();
4656 s_arm_unwind_save_mmxwcg (void)
4663 if (*input_line_pointer
== '{')
4664 input_line_pointer
++;
4666 skip_whitespace (input_line_pointer
);
4670 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4674 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4680 as_tsktsk (_("register list not in ascending order"));
4683 if (*input_line_pointer
== '-')
4685 input_line_pointer
++;
4686 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4689 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4692 else if (reg
>= hi_reg
)
4694 as_bad (_("bad register range"));
4697 for (; reg
< hi_reg
; reg
++)
4701 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4703 skip_past_char (&input_line_pointer
, '}');
4705 demand_empty_rest_of_line ();
4707 /* Generate any deferred opcodes because we're going to be looking at
4709 flush_pending_unwind ();
4711 for (reg
= 0; reg
< 16; reg
++)
4713 if (mask
& (1 << reg
))
4714 unwind
.frame_size
+= 4;
4717 add_unwind_opcode (op
, 2);
4720 ignore_rest_of_line ();
4724 /* Parse an unwind_save directive.
4725 If the argument is non-zero, this is a .vsave directive. */
4728 s_arm_unwind_save (int arch_v6
)
4731 struct reg_entry
*reg
;
4732 bool had_brace
= false;
4734 if (!unwind
.proc_start
)
4735 as_bad (MISSING_FNSTART
);
4737 /* Figure out what sort of save we have. */
4738 peek
= input_line_pointer
;
4746 reg
= arm_reg_parse_multi (&peek
);
4750 as_bad (_("register expected"));
4751 ignore_rest_of_line ();
4760 as_bad (_("FPA .unwind_save does not take a register list"));
4761 ignore_rest_of_line ();
4764 input_line_pointer
= peek
;
4765 s_arm_unwind_save_fpa (reg
->number
);
4769 s_arm_unwind_save_core ();
4772 case REG_TYPE_PSEUDO
:
4773 s_arm_unwind_save_pseudo ();
4778 s_arm_unwind_save_vfp_armv6 ();
4780 s_arm_unwind_save_vfp ();
4783 case REG_TYPE_MMXWR
:
4784 s_arm_unwind_save_mmxwr ();
4787 case REG_TYPE_MMXWCG
:
4788 s_arm_unwind_save_mmxwcg ();
4792 as_bad (_(".unwind_save does not support this kind of register"));
4793 ignore_rest_of_line ();
4798 /* Parse an unwind_movsp directive. */
4801 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4807 if (!unwind
.proc_start
)
4808 as_bad (MISSING_FNSTART
);
4810 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4813 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4814 ignore_rest_of_line ();
4818 /* Optional constant. */
4819 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4821 if (immediate_for_directive (&offset
) == FAIL
)
4827 demand_empty_rest_of_line ();
4829 if (reg
== REG_SP
|| reg
== REG_PC
)
4831 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4835 if (unwind
.fp_reg
!= REG_SP
)
4836 as_bad (_("unexpected .unwind_movsp directive"));
4838 /* Generate opcode to restore the value. */
4840 add_unwind_opcode (op
, 1);
4842 /* Record the information for later. */
4843 unwind
.fp_reg
= reg
;
4844 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4845 unwind
.sp_restored
= 1;
4848 /* Parse an unwind_pad directive. */
4851 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4855 if (!unwind
.proc_start
)
4856 as_bad (MISSING_FNSTART
);
4858 if (immediate_for_directive (&offset
) == FAIL
)
4863 as_bad (_("stack increment must be multiple of 4"));
4864 ignore_rest_of_line ();
4868 /* Don't generate any opcodes, just record the details for later. */
4869 unwind
.frame_size
+= offset
;
4870 unwind
.pending_offset
+= offset
;
4872 demand_empty_rest_of_line ();
4875 /* Parse an unwind_setfp directive. */
4878 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4884 if (!unwind
.proc_start
)
4885 as_bad (MISSING_FNSTART
);
4887 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4888 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4891 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4893 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4895 as_bad (_("expected <reg>, <reg>"));
4896 ignore_rest_of_line ();
4900 /* Optional constant. */
4901 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4903 if (immediate_for_directive (&offset
) == FAIL
)
4909 demand_empty_rest_of_line ();
4911 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4913 as_bad (_("register must be either sp or set by a previous"
4914 "unwind_movsp directive"));
4918 /* Don't generate any opcodes, just record the information for later. */
4919 unwind
.fp_reg
= fp_reg
;
4921 if (sp_reg
== REG_SP
)
4922 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4924 unwind
.fp_offset
-= offset
;
4927 /* Parse an unwind_raw directive. */
4930 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4933 /* This is an arbitrary limit. */
4934 unsigned char op
[16];
4937 if (!unwind
.proc_start
)
4938 as_bad (MISSING_FNSTART
);
4941 if (exp
.X_op
== O_constant
4942 && skip_past_comma (&input_line_pointer
) != FAIL
)
4944 unwind
.frame_size
+= exp
.X_add_number
;
4948 exp
.X_op
= O_illegal
;
4950 if (exp
.X_op
!= O_constant
)
4952 as_bad (_("expected <offset>, <opcode>"));
4953 ignore_rest_of_line ();
4959 /* Parse the opcode. */
4964 as_bad (_("unwind opcode too long"));
4965 ignore_rest_of_line ();
4967 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4969 as_bad (_("invalid unwind opcode"));
4970 ignore_rest_of_line ();
4973 op
[count
++] = exp
.X_add_number
;
4975 /* Parse the next byte. */
4976 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4982 /* Add the opcode bytes in reverse order. */
4984 add_unwind_opcode (op
[count
], 1);
4986 demand_empty_rest_of_line ();
4990 /* Parse a .eabi_attribute directive. */
4993 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4995 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4997 if (tag
>= 0 && tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4998 attributes_set_explicitly
[tag
] = 1;
5001 /* Emit a tls fix for the symbol. */
5004 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
5008 #ifdef md_flush_pending_output
5009 md_flush_pending_output ();
5012 #ifdef md_cons_align
5016 /* Since we're just labelling the code, there's no need to define a
5019 p
= obstack_next_free (&frchain_now
->frch_obstack
);
5020 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
5021 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
5022 : BFD_RELOC_ARM_TLS_DESCSEQ
);
5024 #endif /* OBJ_ELF */
5026 static void s_arm_arch (int);
5027 static void s_arm_object_arch (int);
5028 static void s_arm_cpu (int);
5029 static void s_arm_fpu (int);
5030 static void s_arm_arch_extension (int);
5035 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
5042 if (exp
.X_op
== O_symbol
)
5043 exp
.X_op
= O_secrel
;
5045 emit_expr (&exp
, 4);
5047 while (*input_line_pointer
++ == ',');
5049 input_line_pointer
--;
5050 demand_empty_rest_of_line ();
5055 arm_is_largest_exponent_ok (int precision
)
5057 /* precision == 1 ensures that this will only return
5058 true for 16 bit floats. */
5059 return (precision
== 1) && (fp16_format
== ARM_FP16_FORMAT_ALTERNATIVE
);
5063 set_fp16_format (int dummy ATTRIBUTE_UNUSED
)
5067 enum fp_16bit_format new_format
;
5069 new_format
= ARM_FP16_FORMAT_DEFAULT
;
5071 name
= input_line_pointer
;
5072 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
5073 input_line_pointer
++;
5075 saved_char
= *input_line_pointer
;
5076 *input_line_pointer
= 0;
5078 if (strcasecmp (name
, "ieee") == 0)
5079 new_format
= ARM_FP16_FORMAT_IEEE
;
5080 else if (strcasecmp (name
, "alternative") == 0)
5081 new_format
= ARM_FP16_FORMAT_ALTERNATIVE
;
5084 as_bad (_("unrecognised float16 format \"%s\""), name
);
5088 /* Only set fp16_format if it is still the default (aka not already
5090 if (fp16_format
== ARM_FP16_FORMAT_DEFAULT
)
5091 fp16_format
= new_format
;
5094 if (new_format
!= fp16_format
)
5095 as_warn (_("float16 format cannot be set more than once, ignoring."));
5099 *input_line_pointer
= saved_char
;
5100 ignore_rest_of_line ();
5103 /* This table describes all the machine specific pseudo-ops the assembler
5104 has to support. The fields are:
5105 pseudo-op name without dot
5106 function to call to execute this pseudo-op
5107 Integer arg to pass to the function. */
5109 const pseudo_typeS md_pseudo_table
[] =
5111 /* Never called because '.req' does not start a line. */
5112 { "req", s_req
, 0 },
5113 /* Following two are likewise never called. */
5116 { "unreq", s_unreq
, 0 },
5117 { "bss", s_bss
, 0 },
5118 { "align", s_align_ptwo
, 2 },
5119 { "arm", s_arm
, 0 },
5120 { "thumb", s_thumb
, 0 },
5121 { "code", s_code
, 0 },
5122 { "force_thumb", s_force_thumb
, 0 },
5123 { "thumb_func", s_thumb_func
, 0 },
5124 { "thumb_set", s_thumb_set
, 0 },
5125 { "even", s_even
, 0 },
5126 { "ltorg", s_ltorg
, 0 },
5127 { "pool", s_ltorg
, 0 },
5128 { "syntax", s_syntax
, 0 },
5129 { "cpu", s_arm_cpu
, 0 },
5130 { "arch", s_arm_arch
, 0 },
5131 { "object_arch", s_arm_object_arch
, 0 },
5132 { "fpu", s_arm_fpu
, 0 },
5133 { "arch_extension", s_arm_arch_extension
, 0 },
5135 { "word", s_arm_elf_cons
, 4 },
5136 { "long", s_arm_elf_cons
, 4 },
5137 { "inst.n", s_arm_elf_inst
, 2 },
5138 { "inst.w", s_arm_elf_inst
, 4 },
5139 { "inst", s_arm_elf_inst
, 0 },
5140 { "rel31", s_arm_rel31
, 0 },
5141 { "fnstart", s_arm_unwind_fnstart
, 0 },
5142 { "fnend", s_arm_unwind_fnend
, 0 },
5143 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
5144 { "personality", s_arm_unwind_personality
, 0 },
5145 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
5146 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
5147 { "save", s_arm_unwind_save
, 0 },
5148 { "vsave", s_arm_unwind_save
, 1 },
5149 { "movsp", s_arm_unwind_movsp
, 0 },
5150 { "pad", s_arm_unwind_pad
, 0 },
5151 { "setfp", s_arm_unwind_setfp
, 0 },
5152 { "unwind_raw", s_arm_unwind_raw
, 0 },
5153 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
5154 { "tlsdescseq", s_arm_tls_descseq
, 0 },
5158 /* These are used for dwarf. */
5162 /* These are used for dwarf2. */
5163 { "file", dwarf2_directive_file
, 0 },
5164 { "loc", dwarf2_directive_loc
, 0 },
5165 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
5167 { "extend", float_cons
, 'x' },
5168 { "ldouble", float_cons
, 'x' },
5169 { "packed", float_cons
, 'p' },
5170 { "bfloat16", float_cons
, 'b' },
5172 {"secrel32", pe_directive_secrel
, 0},
5175 /* These are for compatibility with CodeComposer Studio. */
5176 {"ref", s_ccs_ref
, 0},
5177 {"def", s_ccs_def
, 0},
5178 {"asmfunc", s_ccs_asmfunc
, 0},
5179 {"endasmfunc", s_ccs_endasmfunc
, 0},
5181 {"float16", float_cons
, 'h' },
5182 {"float16_format", set_fp16_format
, 0 },
5187 /* Parser functions used exclusively in instruction operands. */
5189 /* Generic immediate-value read function for use in insn parsing.
5190 STR points to the beginning of the immediate (the leading #);
5191 VAL receives the value; if the value is outside [MIN, MAX]
5192 issue an error. PREFIX_OPT is true if the immediate prefix is
5196 parse_immediate (char **str
, int *val
, int min
, int max
,
5201 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
5202 if (exp
.X_op
!= O_constant
)
5204 inst
.error
= _("constant expression required");
5208 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
5210 inst
.error
= _("immediate value out of range");
5214 *val
= exp
.X_add_number
;
5218 /* Less-generic immediate-value read function with the possibility of loading a
5219 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
5220 instructions. Puts the result directly in inst.operands[i]. */
5223 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
5224 bool allow_symbol_p
)
5227 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
5230 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
5232 if (exp_p
->X_op
== O_constant
)
5234 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
5235 /* If we're on a 64-bit host, then a 64-bit number can be returned using
5236 O_constant. We have to be careful not to break compilation for
5237 32-bit X_add_number, though. */
5238 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
5240 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
5241 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
5243 inst
.operands
[i
].regisimm
= 1;
5246 else if (exp_p
->X_op
== O_big
5247 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
5249 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
5251 /* Bignums have their least significant bits in
5252 generic_bignum[0]. Make sure we put 32 bits in imm and
5253 32 bits in reg, in a (hopefully) portable way. */
5254 gas_assert (parts
!= 0);
5256 /* Make sure that the number is not too big.
5257 PR 11972: Bignums can now be sign-extended to the
5258 size of a .octa so check that the out of range bits
5259 are all zero or all one. */
5260 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
5262 LITTLENUM_TYPE m
= -1;
5264 if (generic_bignum
[parts
* 2] != 0
5265 && generic_bignum
[parts
* 2] != m
)
5268 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
5269 if (generic_bignum
[j
] != generic_bignum
[j
-1])
5273 inst
.operands
[i
].imm
= 0;
5274 for (j
= 0; j
< parts
; j
++, idx
++)
5275 inst
.operands
[i
].imm
|= ((unsigned) generic_bignum
[idx
]
5276 << (LITTLENUM_NUMBER_OF_BITS
* j
));
5277 inst
.operands
[i
].reg
= 0;
5278 for (j
= 0; j
< parts
; j
++, idx
++)
5279 inst
.operands
[i
].reg
|= ((unsigned) generic_bignum
[idx
]
5280 << (LITTLENUM_NUMBER_OF_BITS
* j
));
5281 inst
.operands
[i
].regisimm
= 1;
5283 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
5291 /* Returns the pseudo-register number of an FPA immediate constant,
5292 or FAIL if there isn't a valid constant here. */
5295 parse_fpa_immediate (char ** str
)
5297 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5303 /* First try and match exact strings, this is to guarantee
5304 that some formats will work even for cross assembly. */
5306 for (i
= 0; fp_const
[i
]; i
++)
5308 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
5312 *str
+= strlen (fp_const
[i
]);
5313 if (is_end_of_line
[(unsigned char) **str
])
5319 /* Just because we didn't get a match doesn't mean that the constant
5320 isn't valid, just that it is in a format that we don't
5321 automatically recognize. Try parsing it with the standard
5322 expression routines. */
5324 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
5326 /* Look for a raw floating point number. */
5327 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
5328 && is_end_of_line
[(unsigned char) *save_in
])
5330 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5332 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5334 if (words
[j
] != fp_values
[i
][j
])
5338 if (j
== MAX_LITTLENUMS
)
5346 /* Try and parse a more complex expression, this will probably fail
5347 unless the code uses a floating point prefix (eg "0f"). */
5348 save_in
= input_line_pointer
;
5349 input_line_pointer
= *str
;
5350 if (expression (&exp
) == absolute_section
5351 && exp
.X_op
== O_big
5352 && exp
.X_add_number
< 0)
5354 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5356 #define X_PRECISION 5
5357 #define E_PRECISION 15L
5358 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
5360 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5362 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5364 if (words
[j
] != fp_values
[i
][j
])
5368 if (j
== MAX_LITTLENUMS
)
5370 *str
= input_line_pointer
;
5371 input_line_pointer
= save_in
;
5378 *str
= input_line_pointer
;
5379 input_line_pointer
= save_in
;
5380 inst
.error
= _("invalid FPA immediate expression");
5384 /* Returns 1 if a number has "quarter-precision" float format
5385 0baBbbbbbc defgh000 00000000 00000000. */
5388 is_quarter_float (unsigned imm
)
5390 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
5391 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
5395 /* Detect the presence of a floating point or integer zero constant,
5399 parse_ifimm_zero (char **in
)
5403 if (!is_immediate_prefix (**in
))
5405 /* In unified syntax, all prefixes are optional. */
5406 if (!unified_syntax
)
5412 /* Accept #0x0 as a synonym for #0. */
5413 if (startswith (*in
, "0x"))
5416 if (parse_immediate (in
, &val
, 0, 0, true) == FAIL
)
5421 error_code
= atof_generic (in
, ".", EXP_CHARS
,
5422 &generic_floating_point_number
);
5425 && generic_floating_point_number
.sign
== '+'
5426 && (generic_floating_point_number
.low
5427 > generic_floating_point_number
.leader
))
5433 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5434 0baBbbbbbc defgh000 00000000 00000000.
5435 The zero and minus-zero cases need special handling, since they can't be
5436 encoded in the "quarter-precision" float format, but can nonetheless be
5437 loaded as integer constants. */
5440 parse_qfloat_immediate (char **ccp
, int *immed
)
5444 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5445 int found_fpchar
= 0;
5447 skip_past_char (&str
, '#');
5449 /* We must not accidentally parse an integer as a floating-point number. Make
5450 sure that the value we parse is not an integer by checking for special
5451 characters '.' or 'e'.
5452 FIXME: This is a horrible hack, but doing better is tricky because type
5453 information isn't in a very usable state at parse time. */
5455 skip_whitespace (fpnum
);
5457 if (startswith (fpnum
, "0x"))
5461 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
5462 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5472 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5474 unsigned fpword
= 0;
5477 /* Our FP word must be 32 bits (single-precision FP). */
5478 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5480 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5484 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5497 /* Shift operands. */
5500 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
, SHIFT_UXTW
5503 struct asm_shift_name
5506 enum shift_kind kind
;
5509 /* Third argument to parse_shift. */
5510 enum parse_shift_mode
5512 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5513 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5514 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5515 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5516 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5517 SHIFT_UXTW_IMMEDIATE
/* Shift must be UXTW immediate. */
5520 /* Parse a <shift> specifier on an ARM data processing instruction.
5521 This has three forms:
5523 (LSL|LSR|ASL|ASR|ROR) Rs
5524 (LSL|LSR|ASL|ASR|ROR) #imm
5527 Note that ASL is assimilated to LSL in the instruction encoding, and
5528 RRX to ROR #0 (which cannot be written as such). */
5531 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5533 const struct asm_shift_name
*shift_name
;
5534 enum shift_kind shift
;
5539 for (p
= *str
; ISALPHA (*p
); p
++)
5544 inst
.error
= _("shift expression expected");
5549 = (const struct asm_shift_name
*) str_hash_find_n (arm_shift_hsh
, *str
,
5552 if (shift_name
== NULL
)
5554 inst
.error
= _("shift expression expected");
5558 shift
= shift_name
->kind
;
5562 case NO_SHIFT_RESTRICT
:
5563 case SHIFT_IMMEDIATE
:
5564 if (shift
== SHIFT_UXTW
)
5566 inst
.error
= _("'UXTW' not allowed here");
5571 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5572 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5574 inst
.error
= _("'LSL' or 'ASR' required");
5579 case SHIFT_LSL_IMMEDIATE
:
5580 if (shift
!= SHIFT_LSL
)
5582 inst
.error
= _("'LSL' required");
5587 case SHIFT_ASR_IMMEDIATE
:
5588 if (shift
!= SHIFT_ASR
)
5590 inst
.error
= _("'ASR' required");
5594 case SHIFT_UXTW_IMMEDIATE
:
5595 if (shift
!= SHIFT_UXTW
)
5597 inst
.error
= _("'UXTW' required");
5605 if (shift
!= SHIFT_RRX
)
5607 /* Whitespace can appear here if the next thing is a bare digit. */
5608 skip_whitespace (p
);
5610 if (mode
== NO_SHIFT_RESTRICT
5611 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5613 inst
.operands
[i
].imm
= reg
;
5614 inst
.operands
[i
].immisreg
= 1;
5616 else if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5619 inst
.operands
[i
].shift_kind
= shift
;
5620 inst
.operands
[i
].shifted
= 1;
5625 /* Parse a <shifter_operand> for an ARM data processing instruction:
5628 #<immediate>, <rotate>
5632 where <shift> is defined by parse_shift above, and <rotate> is a
5633 multiple of 2 between 0 and 30. Validation of immediate operands
5634 is deferred to md_apply_fix. */
5637 parse_shifter_operand (char **str
, int i
)
5642 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5644 inst
.operands
[i
].reg
= value
;
5645 inst
.operands
[i
].isreg
= 1;
5647 /* parse_shift will override this if appropriate */
5648 inst
.relocs
[0].exp
.X_op
= O_constant
;
5649 inst
.relocs
[0].exp
.X_add_number
= 0;
5651 if (skip_past_comma (str
) == FAIL
)
5654 /* Shift operation on register. */
5655 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5658 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_IMM_PREFIX
))
5661 if (skip_past_comma (str
) == SUCCESS
)
5663 /* #x, y -- ie explicit rotation by Y. */
5664 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5667 if (exp
.X_op
!= O_constant
|| inst
.relocs
[0].exp
.X_op
!= O_constant
)
5669 inst
.error
= _("constant expression expected");
5673 value
= exp
.X_add_number
;
5674 if (value
< 0 || value
> 30 || value
% 2 != 0)
5676 inst
.error
= _("invalid rotation");
5679 if (inst
.relocs
[0].exp
.X_add_number
< 0
5680 || inst
.relocs
[0].exp
.X_add_number
> 255)
5682 inst
.error
= _("invalid constant");
5686 /* Encode as specified. */
5687 inst
.operands
[i
].imm
= inst
.relocs
[0].exp
.X_add_number
| value
<< 7;
5691 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
5692 inst
.relocs
[0].pc_rel
= 0;
5696 /* Group relocation information. Each entry in the table contains the
5697 textual name of the relocation as may appear in assembler source
5698 and must end with a colon.
5699 Along with this textual name are the relocation codes to be used if
5700 the corresponding instruction is an ALU instruction (ADD or SUB only),
5701 an LDR, an LDRS, or an LDC. */
5703 struct group_reloc_table_entry
5714 /* Varieties of non-ALU group relocation. */
5722 static struct group_reloc_table_entry group_reloc_table
[] =
5723 { /* Program counter relative: */
5725 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5730 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5731 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5732 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5733 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5735 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5740 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5741 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5742 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5743 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5745 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5746 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5747 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5748 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5749 /* Section base relative */
5751 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5756 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5757 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5758 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5759 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5761 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5766 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5767 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5768 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5769 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5771 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5772 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5773 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5774 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5775 /* Absolute thumb alu relocations. */
5777 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5782 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5787 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5792 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5797 /* Given the address of a pointer pointing to the textual name of a group
5798 relocation as may appear in assembler source, attempt to find its details
5799 in group_reloc_table. The pointer will be updated to the character after
5800 the trailing colon. On failure, FAIL will be returned; SUCCESS
5801 otherwise. On success, *entry will be updated to point at the relevant
5802 group_reloc_table entry. */
5805 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5808 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5810 int length
= strlen (group_reloc_table
[i
].name
);
5812 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5813 && (*str
)[length
] == ':')
5815 *out
= &group_reloc_table
[i
];
5816 *str
+= (length
+ 1);
5824 /* Parse a <shifter_operand> for an ARM data processing instruction
5825 (as for parse_shifter_operand) where group relocations are allowed:
5828 #<immediate>, <rotate>
5829 #:<group_reloc>:<expression>
5833 where <group_reloc> is one of the strings defined in group_reloc_table.
5834 The hashes are optional.
5836 Everything else is as for parse_shifter_operand. */
5838 static parse_operand_result
5839 parse_shifter_operand_group_reloc (char **str
, int i
)
5841 /* Determine if we have the sequence of characters #: or just :
5842 coming next. If we do, then we check for a group relocation.
5843 If we don't, punt the whole lot to parse_shifter_operand. */
5845 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5846 || (*str
)[0] == ':')
5848 struct group_reloc_table_entry
*entry
;
5850 if ((*str
)[0] == '#')
5855 /* Try to parse a group relocation. Anything else is an error. */
5856 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5858 inst
.error
= _("unknown group relocation");
5859 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5862 /* We now have the group relocation table entry corresponding to
5863 the name in the assembler source. Next, we parse the expression. */
5864 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_NO_PREFIX
))
5865 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5867 /* Record the relocation type (always the ALU variant here). */
5868 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5869 gas_assert (inst
.relocs
[0].type
!= 0);
5871 return PARSE_OPERAND_SUCCESS
;
5874 return parse_shifter_operand (str
, i
) == SUCCESS
5875 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5877 /* Never reached. */
5880 /* Parse a Neon alignment expression. Information is written to
5881 inst.operands[i]. We assume the initial ':' has been skipped.
5883 align .imm = align << 8, .immisalign=1, .preind=0 */
5884 static parse_operand_result
5885 parse_neon_alignment (char **str
, int i
)
5890 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5892 if (exp
.X_op
!= O_constant
)
5894 inst
.error
= _("alignment must be constant");
5895 return PARSE_OPERAND_FAIL
;
5898 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5899 inst
.operands
[i
].immisalign
= 1;
5900 /* Alignments are not pre-indexes. */
5901 inst
.operands
[i
].preind
= 0;
5904 return PARSE_OPERAND_SUCCESS
;
5907 /* Parse all forms of an ARM address expression. Information is written
5908 to inst.operands[i] and/or inst.relocs[0].
5910 Preindexed addressing (.preind=1):
5912 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5913 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5914 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5915 .shift_kind=shift .relocs[0].exp=shift_imm
5917 These three may have a trailing ! which causes .writeback to be set also.
5919 Postindexed addressing (.postind=1, .writeback=1):
5921 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5922 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5923 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5924 .shift_kind=shift .relocs[0].exp=shift_imm
5926 Unindexed addressing (.preind=0, .postind=0):
5928 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5932 [Rn]{!} shorthand for [Rn,#0]{!}
5933 =immediate .isreg=0 .relocs[0].exp=immediate
5934 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5936 It is the caller's responsibility to check for addressing modes not
5937 supported by the instruction, and to set inst.relocs[0].type. */
5939 static parse_operand_result
5940 parse_address_main (char **str
, int i
, int group_relocations
,
5941 group_reloc_type group_type
)
5946 if (skip_past_char (&p
, '[') == FAIL
)
5948 if (group_type
== GROUP_MVE
5949 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5951 /* [r0-r15] expected as argument but receiving r0-r15 without
5953 inst
.error
= BAD_SYNTAX
;
5954 return PARSE_OPERAND_FAIL
;
5956 else if (skip_past_char (&p
, '=') == FAIL
)
5958 /* Bare address - translate to PC-relative offset. */
5959 inst
.relocs
[0].pc_rel
= 1;
5960 inst
.operands
[i
].reg
= REG_PC
;
5961 inst
.operands
[i
].isreg
= 1;
5962 inst
.operands
[i
].preind
= 1;
5964 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_OPT_PREFIX_BIG
))
5965 return PARSE_OPERAND_FAIL
;
5967 else if (parse_big_immediate (&p
, i
, &inst
.relocs
[0].exp
,
5968 /*allow_symbol_p=*/true))
5969 return PARSE_OPERAND_FAIL
;
5972 return PARSE_OPERAND_SUCCESS
;
5975 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5976 skip_whitespace (p
);
5978 if (group_type
== GROUP_MVE
)
5980 enum arm_reg_type rtype
= REG_TYPE_MQ
;
5981 struct neon_type_el et
;
5982 if ((reg
= arm_typed_reg_parse (&p
, rtype
, &rtype
, &et
)) != FAIL
)
5984 inst
.operands
[i
].isquad
= 1;
5986 else if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5988 inst
.error
= BAD_ADDR_MODE
;
5989 return PARSE_OPERAND_FAIL
;
5992 else if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5994 if (group_type
== GROUP_MVE
)
5995 inst
.error
= BAD_ADDR_MODE
;
5997 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5998 return PARSE_OPERAND_FAIL
;
6000 inst
.operands
[i
].reg
= reg
;
6001 inst
.operands
[i
].isreg
= 1;
6003 if (skip_past_comma (&p
) == SUCCESS
)
6005 inst
.operands
[i
].preind
= 1;
6008 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
6010 enum arm_reg_type rtype
= REG_TYPE_MQ
;
6011 struct neon_type_el et
;
6012 if (group_type
== GROUP_MVE
6013 && (reg
= arm_typed_reg_parse (&p
, rtype
, &rtype
, &et
)) != FAIL
)
6015 inst
.operands
[i
].immisreg
= 2;
6016 inst
.operands
[i
].imm
= reg
;
6018 if (skip_past_comma (&p
) == SUCCESS
)
6020 if (parse_shift (&p
, i
, SHIFT_UXTW_IMMEDIATE
) == SUCCESS
)
6022 inst
.operands
[i
].imm
|= inst
.relocs
[0].exp
.X_add_number
<< 5;
6023 inst
.relocs
[0].exp
.X_add_number
= 0;
6026 return PARSE_OPERAND_FAIL
;
6029 else if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
6031 inst
.operands
[i
].imm
= reg
;
6032 inst
.operands
[i
].immisreg
= 1;
6034 if (skip_past_comma (&p
) == SUCCESS
)
6035 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
6036 return PARSE_OPERAND_FAIL
;
6038 else if (skip_past_char (&p
, ':') == SUCCESS
)
6040 /* FIXME: '@' should be used here, but it's filtered out by generic
6041 code before we get to see it here. This may be subject to
6043 parse_operand_result result
= parse_neon_alignment (&p
, i
);
6045 if (result
!= PARSE_OPERAND_SUCCESS
)
6050 if (inst
.operands
[i
].negative
)
6052 inst
.operands
[i
].negative
= 0;
6056 if (group_relocations
6057 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
6059 struct group_reloc_table_entry
*entry
;
6061 /* Skip over the #: or : sequence. */
6067 /* Try to parse a group relocation. Anything else is an
6069 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
6071 inst
.error
= _("unknown group relocation");
6072 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
6075 /* We now have the group relocation table entry corresponding to
6076 the name in the assembler source. Next, we parse the
6078 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
6079 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
6081 /* Record the relocation type. */
6086 = (bfd_reloc_code_real_type
) entry
->ldr_code
;
6091 = (bfd_reloc_code_real_type
) entry
->ldrs_code
;
6096 = (bfd_reloc_code_real_type
) entry
->ldc_code
;
6103 if (inst
.relocs
[0].type
== 0)
6105 inst
.error
= _("this group relocation is not allowed on this instruction");
6106 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
6113 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
6114 return PARSE_OPERAND_FAIL
;
6115 /* If the offset is 0, find out if it's a +0 or -0. */
6116 if (inst
.relocs
[0].exp
.X_op
== O_constant
6117 && inst
.relocs
[0].exp
.X_add_number
== 0)
6119 skip_whitespace (q
);
6123 skip_whitespace (q
);
6126 inst
.operands
[i
].negative
= 1;
6131 else if (skip_past_char (&p
, ':') == SUCCESS
)
6133 /* FIXME: '@' should be used here, but it's filtered out by generic code
6134 before we get to see it here. This may be subject to change. */
6135 parse_operand_result result
= parse_neon_alignment (&p
, i
);
6137 if (result
!= PARSE_OPERAND_SUCCESS
)
6141 if (skip_past_char (&p
, ']') == FAIL
)
6143 inst
.error
= _("']' expected");
6144 return PARSE_OPERAND_FAIL
;
6147 if (skip_past_char (&p
, '!') == SUCCESS
)
6148 inst
.operands
[i
].writeback
= 1;
6150 else if (skip_past_comma (&p
) == SUCCESS
)
6152 if (skip_past_char (&p
, '{') == SUCCESS
)
6154 /* [Rn], {expr} - unindexed, with option */
6155 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
6156 0, 255, true) == FAIL
)
6157 return PARSE_OPERAND_FAIL
;
6159 if (skip_past_char (&p
, '}') == FAIL
)
6161 inst
.error
= _("'}' expected at end of 'option' field");
6162 return PARSE_OPERAND_FAIL
;
6164 if (inst
.operands
[i
].preind
)
6166 inst
.error
= _("cannot combine index with option");
6167 return PARSE_OPERAND_FAIL
;
6170 return PARSE_OPERAND_SUCCESS
;
6174 inst
.operands
[i
].postind
= 1;
6175 inst
.operands
[i
].writeback
= 1;
6177 if (inst
.operands
[i
].preind
)
6179 inst
.error
= _("cannot combine pre- and post-indexing");
6180 return PARSE_OPERAND_FAIL
;
6184 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
6186 enum arm_reg_type rtype
= REG_TYPE_MQ
;
6187 struct neon_type_el et
;
6188 if (group_type
== GROUP_MVE
6189 && (reg
= arm_typed_reg_parse (&p
, rtype
, &rtype
, &et
)) != FAIL
)
6191 inst
.operands
[i
].immisreg
= 2;
6192 inst
.operands
[i
].imm
= reg
;
6194 else if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
6196 /* We might be using the immediate for alignment already. If we
6197 are, OR the register number into the low-order bits. */
6198 if (inst
.operands
[i
].immisalign
)
6199 inst
.operands
[i
].imm
|= reg
;
6201 inst
.operands
[i
].imm
= reg
;
6202 inst
.operands
[i
].immisreg
= 1;
6204 if (skip_past_comma (&p
) == SUCCESS
)
6205 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
6206 return PARSE_OPERAND_FAIL
;
6212 if (inst
.operands
[i
].negative
)
6214 inst
.operands
[i
].negative
= 0;
6217 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
6218 return PARSE_OPERAND_FAIL
;
6219 /* If the offset is 0, find out if it's a +0 or -0. */
6220 if (inst
.relocs
[0].exp
.X_op
== O_constant
6221 && inst
.relocs
[0].exp
.X_add_number
== 0)
6223 skip_whitespace (q
);
6227 skip_whitespace (q
);
6230 inst
.operands
[i
].negative
= 1;
6236 /* If at this point neither .preind nor .postind is set, we have a
6237 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
6238 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
6240 inst
.operands
[i
].preind
= 1;
6241 inst
.relocs
[0].exp
.X_op
= O_constant
;
6242 inst
.relocs
[0].exp
.X_add_number
= 0;
6245 return PARSE_OPERAND_SUCCESS
;
6249 parse_address (char **str
, int i
)
6251 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
6255 static parse_operand_result
6256 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
6258 return parse_address_main (str
, i
, 1, type
);
6261 /* Parse an operand for a MOVW or MOVT instruction. */
6263 parse_half (char **str
)
6268 skip_past_char (&p
, '#');
6269 if (strncasecmp (p
, ":lower16:", 9) == 0)
6270 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVW
;
6271 else if (strncasecmp (p
, ":upper16:", 9) == 0)
6272 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVT
;
6274 if (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
)
6277 skip_whitespace (p
);
6280 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
6283 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
6285 if (inst
.relocs
[0].exp
.X_op
!= O_constant
)
6287 inst
.error
= _("constant expression expected");
6290 if (inst
.relocs
[0].exp
.X_add_number
< 0
6291 || inst
.relocs
[0].exp
.X_add_number
> 0xffff)
6293 inst
.error
= _("immediate value out of range");
6301 /* Miscellaneous. */
6303 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
6304 or a bitmask suitable to be or-ed into the ARM msr instruction. */
6306 parse_psr (char **str
, bool lhs
)
6309 unsigned long psr_field
;
6310 const struct asm_psr
*psr
;
6312 bool is_apsr
= false;
6313 bool m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
6315 /* PR gas/12698: If the user has specified -march=all then m_profile will
6316 be TRUE, but we want to ignore it in this case as we are building for any
6317 CPU type, including non-m variants. */
6318 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
6321 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
6322 feature for ease of use and backwards compatibility. */
6324 if (strncasecmp (p
, "SPSR", 4) == 0)
6327 goto unsupported_psr
;
6329 psr_field
= SPSR_BIT
;
6331 else if (strncasecmp (p
, "CPSR", 4) == 0)
6334 goto unsupported_psr
;
6338 else if (strncasecmp (p
, "APSR", 4) == 0)
6340 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
6341 and ARMv7-R architecture CPUs. */
6350 while (ISALNUM (*p
) || *p
== '_');
6352 if (strncasecmp (start
, "iapsr", 5) == 0
6353 || strncasecmp (start
, "eapsr", 5) == 0
6354 || strncasecmp (start
, "xpsr", 4) == 0
6355 || strncasecmp (start
, "psr", 3) == 0)
6356 p
= start
+ strcspn (start
, "rR") + 1;
6358 psr
= (const struct asm_psr
*) str_hash_find_n (arm_v7m_psr_hsh
, start
,
6364 /* If APSR is being written, a bitfield may be specified. Note that
6365 APSR itself is handled above. */
6366 if (psr
->field
<= 3)
6368 psr_field
= psr
->field
;
6374 /* M-profile MSR instructions have the mask field set to "10", except
6375 *PSR variants which modify APSR, which may use a different mask (and
6376 have been handled already). Do that by setting the PSR_f field
6378 return psr
->field
| (lhs
? PSR_f
: 0);
6381 goto unsupported_psr
;
6387 /* A suffix follows. */
6393 while (ISALNUM (*p
) || *p
== '_');
6397 /* APSR uses a notation for bits, rather than fields. */
6398 unsigned int nzcvq_bits
= 0;
6399 unsigned int g_bit
= 0;
6402 for (bit
= start
; bit
!= p
; bit
++)
6404 switch (TOLOWER (*bit
))
6407 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
6411 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
6415 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
6419 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
6423 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
6427 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
6431 inst
.error
= _("unexpected bit specified after APSR");
6436 if (nzcvq_bits
== 0x1f)
6441 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
6443 inst
.error
= _("selected processor does not "
6444 "support DSP extension");
6451 if ((nzcvq_bits
& 0x20) != 0
6452 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
6453 || (g_bit
& 0x2) != 0)
6455 inst
.error
= _("bad bitmask specified after APSR");
6461 psr
= (const struct asm_psr
*) str_hash_find_n (arm_psr_hsh
, start
,
6466 psr_field
|= psr
->field
;
6472 goto error
; /* Garbage after "[CS]PSR". */
6474 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6475 is deprecated, but allow it anyway. */
6479 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6482 else if (!m_profile
)
6483 /* These bits are never right for M-profile devices: don't set them
6484 (only code paths which read/write APSR reach here). */
6485 psr_field
|= (PSR_c
| PSR_f
);
6491 inst
.error
= _("selected processor does not support requested special "
6492 "purpose register");
6496 inst
.error
= _("flag for {c}psr instruction expected");
6501 parse_sys_vldr_vstr (char **str
)
6510 {"FPSCR", 0x1, 0x0},
6511 {"FPSCR_nzcvqc", 0x2, 0x0},
6514 {"FPCXTNS", 0x6, 0x1},
6515 {"FPCXTS", 0x7, 0x1}
6517 char *op_end
= strchr (*str
, ',');
6518 size_t op_strlen
= op_end
- *str
;
6520 for (i
= 0; i
< sizeof (sysregs
) / sizeof (sysregs
[0]); i
++)
6522 if (!strncmp (*str
, sysregs
[i
].name
, op_strlen
))
6524 val
= sysregs
[i
].regl
| (sysregs
[i
].regh
<< 3);
6533 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6534 value suitable for splatting into the AIF field of the instruction. */
6537 parse_cps_flags (char **str
)
6546 case '\0': case ',':
6549 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
6550 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
6551 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
6554 inst
.error
= _("unrecognized CPS flag");
6559 if (saw_a_flag
== 0)
6561 inst
.error
= _("missing CPS flags");
6569 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6570 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6573 parse_endian_specifier (char **str
)
6578 if (strncasecmp (s
, "BE", 2))
6580 else if (strncasecmp (s
, "LE", 2))
6584 inst
.error
= _("valid endian specifiers are be or le");
6588 if (ISALNUM (s
[2]) || s
[2] == '_')
6590 inst
.error
= _("valid endian specifiers are be or le");
6595 return little_endian
;
6598 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6599 value suitable for poking into the rotate field of an sxt or sxta
6600 instruction, or FAIL on error. */
6603 parse_ror (char **str
)
6608 if (strncasecmp (s
, "ROR", 3) == 0)
6612 inst
.error
= _("missing rotation field after comma");
6616 if (parse_immediate (&s
, &rot
, 0, 24, false) == FAIL
)
6621 case 0: *str
= s
; return 0x0;
6622 case 8: *str
= s
; return 0x1;
6623 case 16: *str
= s
; return 0x2;
6624 case 24: *str
= s
; return 0x3;
6627 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6632 /* Parse a conditional code (from conds[] below). The value returned is in the
6633 range 0 .. 14, or FAIL. */
6635 parse_cond (char **str
)
6638 const struct asm_cond
*c
;
6640 /* Condition codes are always 2 characters, so matching up to
6641 3 characters is sufficient. */
6646 while (ISALPHA (*q
) && n
< 3)
6648 cond
[n
] = TOLOWER (*q
);
6653 c
= (const struct asm_cond
*) str_hash_find_n (arm_cond_hsh
, cond
, n
);
6656 inst
.error
= _("condition required");
6664 /* Parse an option for a barrier instruction. Returns the encoding for the
6667 parse_barrier (char **str
)
6670 const struct asm_barrier_opt
*o
;
6673 while (ISALPHA (*q
))
6676 o
= (const struct asm_barrier_opt
*) str_hash_find_n (arm_barrier_opt_hsh
, p
,
6681 if (!mark_feature_used (&o
->arch
))
6688 /* Parse the operands of a table branch instruction. Similar to a memory
6691 parse_tb (char **str
)
6696 if (skip_past_char (&p
, '[') == FAIL
)
6698 inst
.error
= _("'[' expected");
6702 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6704 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6707 inst
.operands
[0].reg
= reg
;
6709 if (skip_past_comma (&p
) == FAIL
)
6711 inst
.error
= _("',' expected");
6715 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6717 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6720 inst
.operands
[0].imm
= reg
;
6722 if (skip_past_comma (&p
) == SUCCESS
)
6724 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6726 if (inst
.relocs
[0].exp
.X_add_number
!= 1)
6728 inst
.error
= _("invalid shift");
6731 inst
.operands
[0].shifted
= 1;
6734 if (skip_past_char (&p
, ']') == FAIL
)
6736 inst
.error
= _("']' expected");
6743 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6744 information on the types the operands can take and how they are encoded.
6745 Up to four operands may be read; this function handles setting the
6746 ".present" field for each read operand itself.
6747 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6748 else returns FAIL. */
6751 parse_neon_mov (char **str
, int *which_operand
)
6753 int i
= *which_operand
, val
;
6754 enum arm_reg_type rtype
;
6756 struct neon_type_el optype
;
6758 if ((val
= parse_scalar (&ptr
, 8, &optype
, REG_TYPE_MQ
)) != FAIL
)
6760 /* Cases 17 or 19. */
6761 inst
.operands
[i
].reg
= val
;
6762 inst
.operands
[i
].isvec
= 1;
6763 inst
.operands
[i
].isscalar
= 2;
6764 inst
.operands
[i
].vectype
= optype
;
6765 inst
.operands
[i
++].present
= 1;
6767 if (skip_past_comma (&ptr
) == FAIL
)
6770 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6772 /* Case 17: VMOV<c>.<dt> <Qd[idx]>, <Rt> */
6773 inst
.operands
[i
].reg
= val
;
6774 inst
.operands
[i
].isreg
= 1;
6775 inst
.operands
[i
].present
= 1;
6777 else if ((val
= parse_scalar (&ptr
, 8, &optype
, REG_TYPE_MQ
)) != FAIL
)
6779 /* Case 19: VMOV<c> <Qd[idx]>, <Qd[idx2]>, <Rt>, <Rt2> */
6780 inst
.operands
[i
].reg
= val
;
6781 inst
.operands
[i
].isvec
= 1;
6782 inst
.operands
[i
].isscalar
= 2;
6783 inst
.operands
[i
].vectype
= optype
;
6784 inst
.operands
[i
++].present
= 1;
6786 if (skip_past_comma (&ptr
) == FAIL
)
6789 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6792 inst
.operands
[i
].reg
= val
;
6793 inst
.operands
[i
].isreg
= 1;
6794 inst
.operands
[i
++].present
= 1;
6796 if (skip_past_comma (&ptr
) == FAIL
)
6799 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6802 inst
.operands
[i
].reg
= val
;
6803 inst
.operands
[i
].isreg
= 1;
6804 inst
.operands
[i
].present
= 1;
6808 first_error (_("expected ARM or MVE vector register"));
6812 else if ((val
= parse_scalar (&ptr
, 8, &optype
, REG_TYPE_VFD
)) != FAIL
)
6814 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6815 inst
.operands
[i
].reg
= val
;
6816 inst
.operands
[i
].isscalar
= 1;
6817 inst
.operands
[i
].vectype
= optype
;
6818 inst
.operands
[i
++].present
= 1;
6820 if (skip_past_comma (&ptr
) == FAIL
)
6823 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6826 inst
.operands
[i
].reg
= val
;
6827 inst
.operands
[i
].isreg
= 1;
6828 inst
.operands
[i
].present
= 1;
6830 else if (((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6832 || ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_MQ
, &rtype
, &optype
))
6835 /* Cases 0, 1, 2, 3, 5 (D only). */
6836 if (skip_past_comma (&ptr
) == FAIL
)
6839 inst
.operands
[i
].reg
= val
;
6840 inst
.operands
[i
].isreg
= 1;
6841 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6842 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6843 inst
.operands
[i
].isvec
= 1;
6844 inst
.operands
[i
].vectype
= optype
;
6845 inst
.operands
[i
++].present
= 1;
6847 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6849 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6850 Case 13: VMOV <Sd>, <Rm> */
6851 inst
.operands
[i
].reg
= val
;
6852 inst
.operands
[i
].isreg
= 1;
6853 inst
.operands
[i
].present
= 1;
6855 if (rtype
== REG_TYPE_NQ
)
6857 first_error (_("can't use Neon quad register here"));
6860 else if (rtype
!= REG_TYPE_VFS
)
6863 if (skip_past_comma (&ptr
) == FAIL
)
6865 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6867 inst
.operands
[i
].reg
= val
;
6868 inst
.operands
[i
].isreg
= 1;
6869 inst
.operands
[i
].present
= 1;
6872 else if (((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6874 || ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_MQ
, &rtype
,
6877 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6878 Case 1: VMOV<c><q> <Dd>, <Dm>
6879 Case 8: VMOV.F32 <Sd>, <Sm>
6880 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6882 inst
.operands
[i
].reg
= val
;
6883 inst
.operands
[i
].isreg
= 1;
6884 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6885 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6886 inst
.operands
[i
].isvec
= 1;
6887 inst
.operands
[i
].vectype
= optype
;
6888 inst
.operands
[i
].present
= 1;
6890 if (skip_past_comma (&ptr
) == SUCCESS
)
6895 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6898 inst
.operands
[i
].reg
= val
;
6899 inst
.operands
[i
].isreg
= 1;
6900 inst
.operands
[i
++].present
= 1;
6902 if (skip_past_comma (&ptr
) == FAIL
)
6905 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6908 inst
.operands
[i
].reg
= val
;
6909 inst
.operands
[i
].isreg
= 1;
6910 inst
.operands
[i
].present
= 1;
6913 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6914 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6915 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6916 Case 10: VMOV.F32 <Sd>, #<imm>
6917 Case 11: VMOV.F64 <Dd>, #<imm> */
6918 inst
.operands
[i
].immisfloat
= 1;
6919 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/false)
6921 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6922 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6926 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6930 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6932 /* Cases 6, 7, 16, 18. */
6933 inst
.operands
[i
].reg
= val
;
6934 inst
.operands
[i
].isreg
= 1;
6935 inst
.operands
[i
++].present
= 1;
6937 if (skip_past_comma (&ptr
) == FAIL
)
6940 if ((val
= parse_scalar (&ptr
, 8, &optype
, REG_TYPE_MQ
)) != FAIL
)
6942 /* Case 18: VMOV<c>.<dt> <Rt>, <Qn[idx]> */
6943 inst
.operands
[i
].reg
= val
;
6944 inst
.operands
[i
].isscalar
= 2;
6945 inst
.operands
[i
].present
= 1;
6946 inst
.operands
[i
].vectype
= optype
;
6948 else if ((val
= parse_scalar (&ptr
, 8, &optype
, REG_TYPE_VFD
)) != FAIL
)
6950 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6951 inst
.operands
[i
].reg
= val
;
6952 inst
.operands
[i
].isscalar
= 1;
6953 inst
.operands
[i
].present
= 1;
6954 inst
.operands
[i
].vectype
= optype
;
6956 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6958 inst
.operands
[i
].reg
= val
;
6959 inst
.operands
[i
].isreg
= 1;
6960 inst
.operands
[i
++].present
= 1;
6962 if (skip_past_comma (&ptr
) == FAIL
)
6965 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6968 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6970 inst
.operands
[i
].reg
= val
;
6971 inst
.operands
[i
].isreg
= 1;
6972 inst
.operands
[i
].isvec
= 1;
6973 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6974 inst
.operands
[i
].vectype
= optype
;
6975 inst
.operands
[i
].present
= 1;
6977 if (rtype
== REG_TYPE_VFS
)
6981 if (skip_past_comma (&ptr
) == FAIL
)
6983 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6986 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6989 inst
.operands
[i
].reg
= val
;
6990 inst
.operands
[i
].isreg
= 1;
6991 inst
.operands
[i
].isvec
= 1;
6992 inst
.operands
[i
].issingle
= 1;
6993 inst
.operands
[i
].vectype
= optype
;
6994 inst
.operands
[i
].present
= 1;
6999 if ((val
= parse_scalar (&ptr
, 8, &optype
, REG_TYPE_MQ
))
7002 /* Case 16: VMOV<c> <Rt>, <Rt2>, <Qd[idx]>, <Qd[idx2]> */
7003 inst
.operands
[i
].reg
= val
;
7004 inst
.operands
[i
].isvec
= 1;
7005 inst
.operands
[i
].isscalar
= 2;
7006 inst
.operands
[i
].vectype
= optype
;
7007 inst
.operands
[i
++].present
= 1;
7009 if (skip_past_comma (&ptr
) == FAIL
)
7012 if ((val
= parse_scalar (&ptr
, 8, &optype
, REG_TYPE_MQ
))
7015 first_error (_(reg_expected_msgs
[REG_TYPE_MQ
]));
7018 inst
.operands
[i
].reg
= val
;
7019 inst
.operands
[i
].isvec
= 1;
7020 inst
.operands
[i
].isscalar
= 2;
7021 inst
.operands
[i
].vectype
= optype
;
7022 inst
.operands
[i
].present
= 1;
7026 first_error (_("VFP single, double or MVE vector register"
7032 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
7036 inst
.operands
[i
].reg
= val
;
7037 inst
.operands
[i
].isreg
= 1;
7038 inst
.operands
[i
].isvec
= 1;
7039 inst
.operands
[i
].issingle
= 1;
7040 inst
.operands
[i
].vectype
= optype
;
7041 inst
.operands
[i
].present
= 1;
7046 first_error (_("parse error"));
7050 /* Successfully parsed the operands. Update args. */
7056 first_error (_("expected comma"));
7060 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
7064 /* Use this macro when the operand constraints are different
7065 for ARM and THUMB (e.g. ldrd). */
7066 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
7067 ((arm_operand) | ((thumb_operand) << 16))
7069 /* Matcher codes for parse_operands. */
7070 enum operand_parse_code
7072 OP_stop
, /* end of line */
7074 OP_RR
, /* ARM register */
7075 OP_RRnpc
, /* ARM register, not r15 */
7076 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
7077 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
7078 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
7079 optional trailing ! */
7080 OP_RRw
, /* ARM register, not r15, optional trailing ! */
7081 OP_RCP
, /* Coprocessor number */
7082 OP_RCN
, /* Coprocessor register */
7083 OP_RF
, /* FPA register */
7084 OP_RVS
, /* VFP single precision register */
7085 OP_RVD
, /* VFP double precision register (0..15) */
7086 OP_RND
, /* Neon double precision register (0..31) */
7087 OP_RNDMQ
, /* Neon double precision (0..31) or MVE vector register. */
7088 OP_RNDMQR
, /* Neon double precision (0..31), MVE vector or ARM register.
7090 OP_RNSDMQR
, /* Neon single or double precision, MVE vector or ARM register.
7092 OP_RNQ
, /* Neon quad precision register */
7093 OP_RNQMQ
, /* Neon quad or MVE vector register. */
7094 OP_RVSD
, /* VFP single or double precision register */
7095 OP_RVSD_COND
, /* VFP single, double precision register or condition code. */
7096 OP_RVSDMQ
, /* VFP single, double precision or MVE vector register. */
7097 OP_RNSD
, /* Neon single or double precision register */
7098 OP_RNDQ
, /* Neon double or quad precision register */
7099 OP_RNDQMQ
, /* Neon double, quad or MVE vector register. */
7100 OP_RNDQMQR
, /* Neon double, quad, MVE vector or ARM register. */
7101 OP_RNSDQ
, /* Neon single, double or quad precision register */
7102 OP_RNSC
, /* Neon scalar D[X] */
7103 OP_RVC
, /* VFP control register */
7104 OP_RMF
, /* Maverick F register */
7105 OP_RMD
, /* Maverick D register */
7106 OP_RMFX
, /* Maverick FX register */
7107 OP_RMDX
, /* Maverick DX register */
7108 OP_RMAX
, /* Maverick AX register */
7109 OP_RMDS
, /* Maverick DSPSC register */
7110 OP_RIWR
, /* iWMMXt wR register */
7111 OP_RIWC
, /* iWMMXt wC register */
7112 OP_RIWG
, /* iWMMXt wCG register */
7113 OP_RXA
, /* XScale accumulator register */
7115 OP_RNSDMQ
, /* Neon single, double or MVE vector register */
7116 OP_RNSDQMQ
, /* Neon single, double or quad register or MVE vector register
7118 OP_RNSDQMQR
, /* Neon single, double or quad register, MVE vector register or
7120 OP_RMQ
, /* MVE vector register. */
7121 OP_RMQRZ
, /* MVE vector or ARM register including ZR. */
7122 OP_RMQRR
, /* MVE vector or ARM register. */
7124 /* New operands for Armv8.1-M Mainline. */
7125 OP_LR
, /* ARM LR register */
7126 OP_SP
, /* ARM SP register */
7128 OP_RRe
, /* ARM register, only even numbered. */
7129 OP_RRo
, /* ARM register, only odd numbered, not r13 or r15. */
7130 OP_RRnpcsp_I32
, /* ARM register (no BadReg) or literal 1 .. 32 */
7131 OP_RR_ZR
, /* ARM register or ZR but no PC */
7133 OP_REGLST
, /* ARM register list */
7134 OP_CLRMLST
, /* CLRM register list */
7135 OP_VRSLST
, /* VFP single-precision register list */
7136 OP_VRDLST
, /* VFP double-precision register list */
7137 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
7138 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
7139 OP_NSTRLST
, /* Neon element/structure list */
7140 OP_VRSDVLST
, /* VFP single or double-precision register list and VPR */
7141 OP_MSTRLST2
, /* MVE vector list with two elements. */
7142 OP_MSTRLST4
, /* MVE vector list with four elements. */
7144 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
7145 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
7146 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
7147 OP_RSVDMQ_FI0
, /* VFP S, D, MVE vector register or floating point immediate
7149 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
7150 OP_RNSD_RNSC
, /* Neon S or D reg, or Neon scalar. */
7151 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
7152 OP_RNSDQ_RNSC_MQ
, /* Vector S, D or Q reg, Neon scalar or MVE vector register.
7154 OP_RNSDQ_RNSC_MQ_RR
, /* Vector S, D or Q reg, or MVE vector reg , or Neon
7155 scalar, or ARM register. */
7156 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
7157 OP_RNDQ_RNSC_RR
, /* Neon D or Q reg, Neon scalar, or ARM register. */
7158 OP_RNDQMQ_RNSC_RR
, /* Neon D or Q reg, Neon scalar, MVE vector or ARM
7160 OP_RNDQMQ_RNSC
, /* Neon D, Q or MVE vector reg, or Neon scalar. */
7161 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
7162 OP_VMOV
, /* Neon VMOV operands. */
7163 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
7164 /* Neon D, Q or MVE vector register, or big immediate for logic and VMVN. */
7166 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
7167 OP_RNDQMQ_I63b_RR
, /* Neon D or Q reg, immediate for shift, MVE vector or
7169 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
7170 OP_VLDR
, /* VLDR operand. */
7172 OP_I0
, /* immediate zero */
7173 OP_I7
, /* immediate value 0 .. 7 */
7174 OP_I15
, /* 0 .. 15 */
7175 OP_I16
, /* 1 .. 16 */
7176 OP_I16z
, /* 0 .. 16 */
7177 OP_I31
, /* 0 .. 31 */
7178 OP_I31w
, /* 0 .. 31, optional trailing ! */
7179 OP_I32
, /* 1 .. 32 */
7180 OP_I32z
, /* 0 .. 32 */
7181 OP_I48_I64
, /* 48 or 64 */
7182 OP_I63
, /* 0 .. 63 */
7183 OP_I63s
, /* -64 .. 63 */
7184 OP_I64
, /* 1 .. 64 */
7185 OP_I64z
, /* 0 .. 64 */
7186 OP_I127
, /* 0 .. 127 */
7187 OP_I255
, /* 0 .. 255 */
7188 OP_I511
, /* 0 .. 511 */
7189 OP_I4095
, /* 0 .. 4095 */
7190 OP_I8191
, /* 0 .. 8191 */
7191 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
7192 OP_I7b
, /* 0 .. 7 */
7193 OP_I15b
, /* 0 .. 15 */
7194 OP_I31b
, /* 0 .. 31 */
7196 OP_SH
, /* shifter operand */
7197 OP_SHG
, /* shifter operand with possible group relocation */
7198 OP_ADDR
, /* Memory address expression (any mode) */
7199 OP_ADDRMVE
, /* Memory address expression for MVE's VSTR/VLDR. */
7200 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
7201 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
7202 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
7203 OP_EXP
, /* arbitrary expression */
7204 OP_EXPi
, /* same, with optional immediate prefix */
7205 OP_EXPr
, /* same, with optional relocation suffix */
7206 OP_EXPs
, /* same, with optional non-first operand relocation suffix */
7207 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
7208 OP_IROT1
, /* VCADD rotate immediate: 90, 270. */
7209 OP_IROT2
, /* VCMLA rotate immediate: 0, 90, 180, 270. */
7211 OP_CPSF
, /* CPS flags */
7212 OP_ENDI
, /* Endianness specifier */
7213 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
7214 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
7215 OP_COND
, /* conditional code */
7216 OP_TB
, /* Table branch. */
7218 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
7220 OP_RRnpc_I0
, /* ARM register or literal 0 */
7221 OP_RR_EXr
, /* ARM register or expression with opt. reloc stuff. */
7222 OP_RR_EXi
, /* ARM register or expression with imm prefix */
7223 OP_RF_IF
, /* FPA register or immediate */
7224 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
7225 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
7227 /* Optional operands. */
7228 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
7229 OP_oI31b
, /* 0 .. 31 */
7230 OP_oI32b
, /* 1 .. 32 */
7231 OP_oI32z
, /* 0 .. 32 */
7232 OP_oIffffb
, /* 0 .. 65535 */
7233 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
7235 OP_oRR
, /* ARM register */
7236 OP_oLR
, /* ARM LR register */
7237 OP_oRRnpc
, /* ARM register, not the PC */
7238 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
7239 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
7240 OP_oRND
, /* Optional Neon double precision register */
7241 OP_oRNQ
, /* Optional Neon quad precision register */
7242 OP_oRNDQMQ
, /* Optional Neon double, quad or MVE vector register. */
7243 OP_oRNDQ
, /* Optional Neon double or quad precision register */
7244 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
7245 OP_oRNSDQMQ
, /* Optional single, double or quad register or MVE vector
7247 OP_oRNSDMQ
, /* Optional single, double register or MVE vector
7249 OP_oSHll
, /* LSL immediate */
7250 OP_oSHar
, /* ASR immediate */
7251 OP_oSHllar
, /* LSL or ASR immediate */
7252 OP_oROR
, /* ROR 0/8/16/24 */
7253 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
7255 OP_oRMQRZ
, /* optional MVE vector or ARM register including ZR. */
7257 /* Some pre-defined mixed (ARM/THUMB) operands. */
7258 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
7259 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
7260 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
7262 OP_FIRST_OPTIONAL
= OP_oI7b
7265 /* Generic instruction operand parser. This does no encoding and no
7266 semantic validation; it merely squirrels values away in the inst
7267 structure. Returns SUCCESS or FAIL depending on whether the
7268 specified grammar matched. */
7270 parse_operands (char *str
, const unsigned int *pattern
, bool thumb
)
7272 unsigned const int *upat
= pattern
;
7273 char *backtrack_pos
= 0;
7274 const char *backtrack_error
= 0;
7275 int i
, val
= 0, backtrack_index
= 0;
7276 enum arm_reg_type rtype
;
7277 parse_operand_result result
;
7278 unsigned int op_parse_code
;
7281 #define po_char_or_fail(chr) \
7284 if (skip_past_char (&str, chr) == FAIL) \
7289 #define po_reg_or_fail(regtype) \
7292 val = arm_typed_reg_parse (& str, regtype, & rtype, \
7293 & inst.operands[i].vectype); \
7296 first_error (_(reg_expected_msgs[regtype])); \
7299 inst.operands[i].reg = val; \
7300 inst.operands[i].isreg = 1; \
7301 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
7302 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
7303 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
7304 || rtype == REG_TYPE_VFD \
7305 || rtype == REG_TYPE_NQ); \
7306 inst.operands[i].iszr = (rtype == REG_TYPE_ZR); \
7310 #define po_reg_or_goto(regtype, label) \
7313 val = arm_typed_reg_parse (& str, regtype, & rtype, \
7314 & inst.operands[i].vectype); \
7318 inst.operands[i].reg = val; \
7319 inst.operands[i].isreg = 1; \
7320 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
7321 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
7322 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
7323 || rtype == REG_TYPE_VFD \
7324 || rtype == REG_TYPE_NQ); \
7325 inst.operands[i].iszr = (rtype == REG_TYPE_ZR); \
7329 #define po_imm_or_fail(min, max, popt) \
7332 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
7334 inst.operands[i].imm = val; \
7338 #define po_imm1_or_imm2_or_fail(imm1, imm2, popt) \
7342 my_get_expression (&exp, &str, popt); \
7343 if (exp.X_op != O_constant) \
7345 inst.error = _("constant expression required"); \
7348 if (exp.X_add_number != imm1 && exp.X_add_number != imm2) \
7350 inst.error = _("immediate value 48 or 64 expected"); \
7353 inst.operands[i].imm = exp.X_add_number; \
7357 #define po_scalar_or_goto(elsz, label, reg_type) \
7360 val = parse_scalar (& str, elsz, & inst.operands[i].vectype, \
7364 inst.operands[i].reg = val; \
7365 inst.operands[i].isscalar = 1; \
7369 #define po_misc_or_fail(expr) \
7377 #define po_misc_or_fail_no_backtrack(expr) \
7381 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
7382 backtrack_pos = 0; \
7383 if (result != PARSE_OPERAND_SUCCESS) \
7388 #define po_barrier_or_imm(str) \
7391 val = parse_barrier (&str); \
7392 if (val == FAIL && ! ISALPHA (*str)) \
7395 /* ISB can only take SY as an option. */ \
7396 || ((inst.instruction & 0xf0) == 0x60 \
7399 inst.error = _("invalid barrier type"); \
7400 backtrack_pos = 0; \
7406 skip_whitespace (str
);
7408 for (i
= 0; upat
[i
] != OP_stop
; i
++)
7410 op_parse_code
= upat
[i
];
7411 if (op_parse_code
>= 1<<16)
7412 op_parse_code
= thumb
? (op_parse_code
>> 16)
7413 : (op_parse_code
& ((1<<16)-1));
7415 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
7417 /* Remember where we are in case we need to backtrack. */
7418 backtrack_pos
= str
;
7419 backtrack_error
= inst
.error
;
7420 backtrack_index
= i
;
7423 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
7424 po_char_or_fail (',');
7426 switch (op_parse_code
)
7440 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
7441 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
7442 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
7443 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
7444 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
7445 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
7448 po_reg_or_goto (REG_TYPE_VFS
, try_rndmqr
);
7452 po_reg_or_goto (REG_TYPE_RN
, try_rndmq
);
7456 po_reg_or_goto (REG_TYPE_MQ
, try_rnd
);
7459 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
7461 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
7463 /* Also accept generic coprocessor regs for unknown registers. */
7465 po_reg_or_goto (REG_TYPE_CN
, vpr_po
);
7467 /* Also accept P0 or p0 for VPR.P0. Since P0 is already an
7468 existing register with a value of 0, this seems like the
7469 best way to parse P0. */
7471 if (strncasecmp (str
, "P0", 2) == 0)
7474 inst
.operands
[i
].isreg
= 1;
7475 inst
.operands
[i
].reg
= 13;
7480 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
7481 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
7482 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
7483 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
7484 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
7485 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
7486 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
7487 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
7488 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
7489 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
7492 po_reg_or_goto (REG_TYPE_MQ
, try_nq
);
7495 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
7496 case OP_RNSD
: po_reg_or_fail (REG_TYPE_NSD
); break;
7498 po_reg_or_goto (REG_TYPE_RN
, try_rndqmq
);
7503 po_reg_or_goto (REG_TYPE_MQ
, try_rndq
);
7507 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
7509 po_reg_or_goto (REG_TYPE_MQ
, try_rvsd
);
7512 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
7514 po_reg_or_goto (REG_TYPE_VFSD
, try_cond
);
7518 po_reg_or_goto (REG_TYPE_NSD
, try_mq2
);
7521 po_reg_or_fail (REG_TYPE_MQ
);
7524 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
7526 po_reg_or_goto (REG_TYPE_RN
, try_mq
);
7531 po_reg_or_goto (REG_TYPE_MQ
, try_nsdq2
);
7534 po_reg_or_fail (REG_TYPE_NSDQ
);
7538 po_reg_or_goto (REG_TYPE_RN
, try_rmq
);
7542 po_reg_or_fail (REG_TYPE_MQ
);
7544 /* Neon scalar. Using an element size of 8 means that some invalid
7545 scalars are accepted here, so deal with those in later code. */
7546 case OP_RNSC
: po_scalar_or_goto (8, failure
, REG_TYPE_VFD
); break;
7550 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
7553 po_imm_or_fail (0, 0, true);
7558 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
7562 po_reg_or_goto (REG_TYPE_MQ
, try_rsvd_fi0
);
7567 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
7570 if (parse_ifimm_zero (&str
))
7571 inst
.operands
[i
].imm
= 0;
7575 = _("only floating point zero is allowed as immediate value");
7583 po_scalar_or_goto (8, try_rr
, REG_TYPE_VFD
);
7586 po_reg_or_fail (REG_TYPE_RN
);
7590 case OP_RNSDQ_RNSC_MQ_RR
:
7591 po_reg_or_goto (REG_TYPE_RN
, try_rnsdq_rnsc_mq
);
7594 case OP_RNSDQ_RNSC_MQ
:
7595 po_reg_or_goto (REG_TYPE_MQ
, try_rnsdq_rnsc
);
7600 po_scalar_or_goto (8, try_nsdq
, REG_TYPE_VFD
);
7604 po_reg_or_fail (REG_TYPE_NSDQ
);
7611 po_scalar_or_goto (8, try_s_scalar
, REG_TYPE_VFD
);
7614 po_scalar_or_goto (4, try_nsd
, REG_TYPE_VFS
);
7617 po_reg_or_fail (REG_TYPE_NSD
);
7621 case OP_RNDQMQ_RNSC_RR
:
7622 po_reg_or_goto (REG_TYPE_MQ
, try_rndq_rnsc_rr
);
7625 case OP_RNDQ_RNSC_RR
:
7626 po_reg_or_goto (REG_TYPE_RN
, try_rndq_rnsc
);
7628 case OP_RNDQMQ_RNSC
:
7629 po_reg_or_goto (REG_TYPE_MQ
, try_rndq_rnsc
);
7634 po_scalar_or_goto (8, try_ndq
, REG_TYPE_VFD
);
7637 po_reg_or_fail (REG_TYPE_NDQ
);
7643 po_scalar_or_goto (8, try_vfd
, REG_TYPE_VFD
);
7646 po_reg_or_fail (REG_TYPE_VFD
);
7651 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7652 not careful then bad things might happen. */
7653 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
7656 case OP_RNDQMQ_Ibig
:
7657 po_reg_or_goto (REG_TYPE_MQ
, try_rndq_ibig
);
7662 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
7665 /* There's a possibility of getting a 64-bit immediate here, so
7666 we need special handling. */
7667 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/false)
7670 inst
.error
= _("immediate value is out of range");
7676 case OP_RNDQMQ_I63b_RR
:
7677 po_reg_or_goto (REG_TYPE_MQ
, try_rndq_i63b_rr
);
7680 po_reg_or_goto (REG_TYPE_RN
, try_rndq_i63b
);
7685 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
7688 po_imm_or_fail (0, 63, true);
7693 po_char_or_fail ('[');
7694 po_reg_or_fail (REG_TYPE_RN
);
7695 po_char_or_fail (']');
7701 po_reg_or_fail (REG_TYPE_RN
);
7702 if (skip_past_char (&str
, '!') == SUCCESS
)
7703 inst
.operands
[i
].writeback
= 1;
7707 case OP_I7
: po_imm_or_fail ( 0, 7, false); break;
7708 case OP_I15
: po_imm_or_fail ( 0, 15, false); break;
7709 case OP_I16
: po_imm_or_fail ( 1, 16, false); break;
7710 case OP_I16z
: po_imm_or_fail ( 0, 16, false); break;
7711 case OP_I31
: po_imm_or_fail ( 0, 31, false); break;
7712 case OP_I32
: po_imm_or_fail ( 1, 32, false); break;
7713 case OP_I32z
: po_imm_or_fail ( 0, 32, false); break;
7714 case OP_I48_I64
: po_imm1_or_imm2_or_fail (48, 64, false); break;
7715 case OP_I63s
: po_imm_or_fail (-64, 63, false); break;
7716 case OP_I63
: po_imm_or_fail ( 0, 63, false); break;
7717 case OP_I64
: po_imm_or_fail ( 1, 64, false); break;
7718 case OP_I64z
: po_imm_or_fail ( 0, 64, false); break;
7719 case OP_I127
: po_imm_or_fail ( 0, 127, false); break;
7720 case OP_I255
: po_imm_or_fail ( 0, 255, false); break;
7721 case OP_I511
: po_imm_or_fail ( 0, 511, false); break;
7722 case OP_I4095
: po_imm_or_fail ( 0, 4095, false); break;
7723 case OP_I8191
: po_imm_or_fail ( 0, 8191, false); break;
7724 case OP_I4b
: po_imm_or_fail ( 1, 4, true); break;
7726 case OP_I7b
: po_imm_or_fail ( 0, 7, true); break;
7727 case OP_I15b
: po_imm_or_fail ( 0, 15, true); break;
7729 case OP_I31b
: po_imm_or_fail ( 0, 31, true); break;
7730 case OP_oI32b
: po_imm_or_fail ( 1, 32, true); break;
7731 case OP_oI32z
: po_imm_or_fail ( 0, 32, true); break;
7732 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, true); break;
7734 /* Immediate variants */
7736 po_char_or_fail ('{');
7737 po_imm_or_fail (0, 255, true);
7738 po_char_or_fail ('}');
7742 /* The expression parser chokes on a trailing !, so we have
7743 to find it first and zap it. */
7746 while (*s
&& *s
!= ',')
7751 inst
.operands
[i
].writeback
= 1;
7753 po_imm_or_fail (0, 31, true);
7761 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7766 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7771 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7773 if (inst
.relocs
[0].exp
.X_op
== O_symbol
)
7775 val
= parse_reloc (&str
);
7778 inst
.error
= _("unrecognized relocation suffix");
7781 else if (val
!= BFD_RELOC_UNUSED
)
7783 inst
.operands
[i
].imm
= val
;
7784 inst
.operands
[i
].hasreloc
= 1;
7790 po_misc_or_fail (my_get_expression (&inst
.relocs
[i
].exp
, &str
,
7792 if (inst
.relocs
[i
].exp
.X_op
== O_symbol
)
7794 inst
.operands
[i
].hasreloc
= 1;
7796 else if (inst
.relocs
[i
].exp
.X_op
== O_constant
)
7798 inst
.operands
[i
].imm
= inst
.relocs
[i
].exp
.X_add_number
;
7799 inst
.operands
[i
].hasreloc
= 0;
7803 /* Operand for MOVW or MOVT. */
7805 po_misc_or_fail (parse_half (&str
));
7808 /* Register or expression. */
7809 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
7810 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
7812 /* Register or immediate. */
7813 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
7814 I0
: po_imm_or_fail (0, 0, false); break;
7816 case OP_RRnpcsp_I32
: po_reg_or_goto (REG_TYPE_RN
, I32
); break;
7817 I32
: po_imm_or_fail (1, 32, false); break;
7819 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
7821 if (!is_immediate_prefix (*str
))
7824 val
= parse_fpa_immediate (&str
);
7827 /* FPA immediates are encoded as registers 8-15.
7828 parse_fpa_immediate has already applied the offset. */
7829 inst
.operands
[i
].reg
= val
;
7830 inst
.operands
[i
].isreg
= 1;
7833 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
7834 I32z
: po_imm_or_fail (0, 32, false); break;
7836 /* Two kinds of register. */
7839 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7841 || (rege
->type
!= REG_TYPE_MMXWR
7842 && rege
->type
!= REG_TYPE_MMXWC
7843 && rege
->type
!= REG_TYPE_MMXWCG
))
7845 inst
.error
= _("iWMMXt data or control register expected");
7848 inst
.operands
[i
].reg
= rege
->number
;
7849 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
7855 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7857 || (rege
->type
!= REG_TYPE_MMXWC
7858 && rege
->type
!= REG_TYPE_MMXWCG
))
7860 inst
.error
= _("iWMMXt control register expected");
7863 inst
.operands
[i
].reg
= rege
->number
;
7864 inst
.operands
[i
].isreg
= 1;
7869 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
7870 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
7871 case OP_oROR
: val
= parse_ror (&str
); break;
7873 case OP_COND
: val
= parse_cond (&str
); break;
7874 case OP_oBARRIER_I15
:
7875 po_barrier_or_imm (str
); break;
7877 if (parse_immediate (&str
, &val
, 0, 15, true) == FAIL
)
7883 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
7884 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
7886 inst
.error
= _("Banked registers are not available with this "
7892 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7896 po_reg_or_goto (REG_TYPE_VFSD
, try_sysreg
);
7899 val
= parse_sys_vldr_vstr (&str
);
7903 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7906 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7908 if (strncasecmp (str
, "APSR_", 5) == 0)
7915 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7916 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7917 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7918 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7919 default: found
= 16;
7923 inst
.operands
[i
].isvec
= 1;
7924 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7925 inst
.operands
[i
].reg
= REG_PC
;
7932 po_misc_or_fail (parse_tb (&str
));
7935 /* Register lists. */
7937 val
= parse_reg_list (&str
, REGLIST_RN
);
7940 inst
.operands
[i
].writeback
= 1;
7946 val
= parse_reg_list (&str
, REGLIST_CLRM
);
7950 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
,
7955 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
,
7960 /* Allow Q registers too. */
7961 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7962 REGLIST_NEON_D
, &partial_match
);
7966 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7967 REGLIST_VFP_S
, &partial_match
);
7968 inst
.operands
[i
].issingle
= 1;
7973 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7974 REGLIST_VFP_D_VPR
, &partial_match
);
7975 if (val
== FAIL
&& !partial_match
)
7978 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7979 REGLIST_VFP_S_VPR
, &partial_match
);
7980 inst
.operands
[i
].issingle
= 1;
7985 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7986 REGLIST_NEON_D
, &partial_match
);
7991 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7992 1, &inst
.operands
[i
].vectype
);
7993 if (val
!= (((op_parse_code
== OP_MSTRLST2
) ? 3 : 7) << 5 | 0xe))
7997 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7998 0, &inst
.operands
[i
].vectype
);
8001 /* Addressing modes */
8003 po_misc_or_fail (parse_address_group_reloc (&str
, i
, GROUP_MVE
));
8007 po_misc_or_fail (parse_address (&str
, i
));
8011 po_misc_or_fail_no_backtrack (
8012 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
8016 po_misc_or_fail_no_backtrack (
8017 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
8021 po_misc_or_fail_no_backtrack (
8022 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
8026 po_misc_or_fail (parse_shifter_operand (&str
, i
));
8030 po_misc_or_fail_no_backtrack (
8031 parse_shifter_operand_group_reloc (&str
, i
));
8035 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
8039 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
8043 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
8048 po_reg_or_goto (REG_TYPE_MQ
, try_rr_zr
);
8053 po_reg_or_goto (REG_TYPE_RN
, ZR
);
8056 po_reg_or_fail (REG_TYPE_ZR
);
8060 as_fatal (_("unhandled operand code %d"), op_parse_code
);
8063 /* Various value-based sanity checks and shared operations. We
8064 do not signal immediate failures for the register constraints;
8065 this allows a syntax error to take precedence. */
8066 switch (op_parse_code
)
8074 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
8075 inst
.error
= BAD_PC
;
8080 case OP_RRnpcsp_I32
:
8081 if (inst
.operands
[i
].isreg
)
8083 if (inst
.operands
[i
].reg
== REG_PC
)
8084 inst
.error
= BAD_PC
;
8085 else if (inst
.operands
[i
].reg
== REG_SP
8086 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
8087 relaxed since ARMv8-A. */
8088 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
8091 inst
.error
= BAD_SP
;
8097 if (inst
.operands
[i
].isreg
8098 && inst
.operands
[i
].reg
== REG_PC
8099 && (inst
.operands
[i
].writeback
|| thumb
))
8100 inst
.error
= BAD_PC
;
8105 if (inst
.operands
[i
].isreg
)
8115 case OP_oBARRIER_I15
:
8128 inst
.operands
[i
].imm
= val
;
8133 if (inst
.operands
[i
].reg
!= REG_LR
)
8134 inst
.error
= _("operand must be LR register");
8138 if (inst
.operands
[i
].reg
!= REG_SP
)
8139 inst
.error
= _("operand must be SP register");
8143 if (inst
.operands
[i
].reg
!= REG_R12
)
8144 inst
.error
= _("operand must be r12");
8150 if (!inst
.operands
[i
].iszr
&& inst
.operands
[i
].reg
== REG_PC
)
8151 inst
.error
= BAD_PC
;
8155 if (inst
.operands
[i
].isreg
8156 && (inst
.operands
[i
].reg
& 0x00000001) != 0)
8157 inst
.error
= BAD_ODD
;
8161 if (inst
.operands
[i
].isreg
)
8163 if ((inst
.operands
[i
].reg
& 0x00000001) != 1)
8164 inst
.error
= BAD_EVEN
;
8165 else if (inst
.operands
[i
].reg
== REG_SP
)
8166 as_tsktsk (MVE_BAD_SP
);
8167 else if (inst
.operands
[i
].reg
== REG_PC
)
8168 inst
.error
= BAD_PC
;
8176 /* If we get here, this operand was successfully parsed. */
8177 inst
.operands
[i
].present
= 1;
8181 inst
.error
= BAD_ARGS
;
8186 /* The parse routine should already have set inst.error, but set a
8187 default here just in case. */
8189 inst
.error
= BAD_SYNTAX
;
8193 /* Do not backtrack over a trailing optional argument that
8194 absorbed some text. We will only fail again, with the
8195 'garbage following instruction' error message, which is
8196 probably less helpful than the current one. */
8197 if (backtrack_index
== i
&& backtrack_pos
!= str
8198 && upat
[i
+1] == OP_stop
)
8201 inst
.error
= BAD_SYNTAX
;
8205 /* Try again, skipping the optional argument at backtrack_pos. */
8206 str
= backtrack_pos
;
8207 inst
.error
= backtrack_error
;
8208 inst
.operands
[backtrack_index
].present
= 0;
8209 i
= backtrack_index
;
8213 /* Check that we have parsed all the arguments. */
8214 if (*str
!= '\0' && !inst
.error
)
8215 inst
.error
= _("garbage following instruction");
8217 return inst
.error
? FAIL
: SUCCESS
;
8220 #undef po_char_or_fail
8221 #undef po_reg_or_fail
8222 #undef po_reg_or_goto
8223 #undef po_imm_or_fail
8224 #undef po_scalar_or_fail
8225 #undef po_barrier_or_imm
8227 /* Shorthand macro for instruction encoding functions issuing errors. */
8228 #define constraint(expr, err) \
8239 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
8240 instructions are unpredictable if these registers are used. This
8241 is the BadReg predicate in ARM's Thumb-2 documentation.
8243 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
8244 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
8245 #define reject_bad_reg(reg) \
8247 if (reg == REG_PC) \
8249 inst.error = BAD_PC; \
8252 else if (reg == REG_SP \
8253 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
8255 inst.error = BAD_SP; \
8260 /* If REG is R13 (the stack pointer), warn that its use is
8262 #define warn_deprecated_sp(reg) \
8264 if (warn_on_deprecated && reg == REG_SP) \
8265 as_tsktsk (_("use of r13 is deprecated")); \
8268 /* Functions for operand encoding. ARM, then Thumb. */
8270 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
8272 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
8274 The only binary encoding difference is the Coprocessor number. Coprocessor
8275 9 is used for half-precision calculations or conversions. The format of the
8276 instruction is the same as the equivalent Coprocessor 10 instruction that
8277 exists for Single-Precision operation. */
8280 do_scalar_fp16_v82_encode (void)
8282 if (inst
.cond
< COND_ALWAYS
)
8283 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
8284 " the behaviour is UNPREDICTABLE"));
8285 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
8288 inst
.instruction
= (inst
.instruction
& 0xfffff0ff) | 0x900;
8289 mark_feature_used (&arm_ext_fp16
);
8292 /* If VAL can be encoded in the immediate field of an ARM instruction,
8293 return the encoded form. Otherwise, return FAIL. */
8296 encode_arm_immediate (unsigned int val
)
8303 for (i
= 2; i
< 32; i
+= 2)
8304 if ((a
= rotate_left (val
, i
)) <= 0xff)
8305 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
8310 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
8311 return the encoded form. Otherwise, return FAIL. */
8313 encode_thumb32_immediate (unsigned int val
)
8320 for (i
= 1; i
<= 24; i
++)
8323 if ((val
& ~(0xffU
<< i
)) == 0)
8324 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
8328 if (val
== ((a
<< 16) | a
))
8330 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
8334 if (val
== ((a
<< 16) | a
))
8335 return 0x200 | (a
>> 8);
8339 /* Encode a VFP SP or DP register number into inst.instruction. */
8342 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
8344 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
8347 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
8350 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
8353 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
8358 first_error (_("D register out of range for selected VFP version"));
8366 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
8370 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
8374 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
8378 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
8382 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
8386 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
8394 /* Encode a <shift> in an ARM-format instruction. The immediate,
8395 if any, is handled by md_apply_fix. */
8397 encode_arm_shift (int i
)
8399 /* register-shifted register. */
8400 if (inst
.operands
[i
].immisreg
)
8403 for (op_index
= 0; op_index
<= i
; ++op_index
)
8405 /* Check the operand only when it's presented. In pre-UAL syntax,
8406 if the destination register is the same as the first operand, two
8407 register form of the instruction can be used. */
8408 if (inst
.operands
[op_index
].present
&& inst
.operands
[op_index
].isreg
8409 && inst
.operands
[op_index
].reg
== REG_PC
)
8410 as_warn (UNPRED_REG ("r15"));
8413 if (inst
.operands
[i
].imm
== REG_PC
)
8414 as_warn (UNPRED_REG ("r15"));
8417 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
8418 inst
.instruction
|= SHIFT_ROR
<< 5;
8421 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
8422 if (inst
.operands
[i
].immisreg
)
8424 inst
.instruction
|= SHIFT_BY_REG
;
8425 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
8428 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
8433 encode_arm_shifter_operand (int i
)
8435 if (inst
.operands
[i
].isreg
)
8437 inst
.instruction
|= inst
.operands
[i
].reg
;
8438 encode_arm_shift (i
);
8442 inst
.instruction
|= INST_IMMEDIATE
;
8443 if (inst
.relocs
[0].type
!= BFD_RELOC_ARM_IMMEDIATE
)
8444 inst
.instruction
|= inst
.operands
[i
].imm
;
8448 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
8450 encode_arm_addr_mode_common (int i
, bool is_t
)
8453 Generate an error if the operand is not a register. */
8454 constraint (!inst
.operands
[i
].isreg
,
8455 _("Instruction does not support =N addresses"));
8457 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8459 if (inst
.operands
[i
].preind
)
8463 inst
.error
= _("instruction does not accept preindexed addressing");
8466 inst
.instruction
|= PRE_INDEX
;
8467 if (inst
.operands
[i
].writeback
)
8468 inst
.instruction
|= WRITE_BACK
;
8471 else if (inst
.operands
[i
].postind
)
8473 gas_assert (inst
.operands
[i
].writeback
);
8475 inst
.instruction
|= WRITE_BACK
;
8477 else /* unindexed - only for coprocessor */
8479 inst
.error
= _("instruction does not accept unindexed addressing");
8483 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
8484 && (((inst
.instruction
& 0x000f0000) >> 16)
8485 == ((inst
.instruction
& 0x0000f000) >> 12)))
8486 as_warn ((inst
.instruction
& LOAD_BIT
)
8487 ? _("destination register same as write-back base")
8488 : _("source register same as write-back base"));
8491 /* inst.operands[i] was set up by parse_address. Encode it into an
8492 ARM-format mode 2 load or store instruction. If is_t is true,
8493 reject forms that cannot be used with a T instruction (i.e. not
8496 encode_arm_addr_mode_2 (int i
, bool is_t
)
8498 const bool is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
8500 encode_arm_addr_mode_common (i
, is_t
);
8502 if (inst
.operands
[i
].immisreg
)
8504 constraint ((inst
.operands
[i
].imm
== REG_PC
8505 || (is_pc
&& inst
.operands
[i
].writeback
)),
8507 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
8508 inst
.instruction
|= inst
.operands
[i
].imm
;
8509 if (!inst
.operands
[i
].negative
)
8510 inst
.instruction
|= INDEX_UP
;
8511 if (inst
.operands
[i
].shifted
)
8513 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
8514 inst
.instruction
|= SHIFT_ROR
<< 5;
8517 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
8518 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
8522 else /* immediate offset in inst.relocs[0] */
8524 if (is_pc
&& !inst
.relocs
[0].pc_rel
)
8526 const bool is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
8528 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
8529 cannot use PC in addressing.
8530 PC cannot be used in writeback addressing, either. */
8531 constraint ((is_t
|| inst
.operands
[i
].writeback
),
8534 /* Use of PC in str is deprecated for ARMv7. */
8535 if (warn_on_deprecated
8537 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
8538 as_tsktsk (_("use of PC in this instruction is deprecated"));
8541 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
8543 /* Prefer + for zero encoded value. */
8544 if (!inst
.operands
[i
].negative
)
8545 inst
.instruction
|= INDEX_UP
;
8546 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM
;
8551 /* inst.operands[i] was set up by parse_address. Encode it into an
8552 ARM-format mode 3 load or store instruction. Reject forms that
8553 cannot be used with such instructions. If is_t is true, reject
8554 forms that cannot be used with a T instruction (i.e. not
8557 encode_arm_addr_mode_3 (int i
, bool is_t
)
8559 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
8561 inst
.error
= _("instruction does not accept scaled register index");
8565 encode_arm_addr_mode_common (i
, is_t
);
8567 if (inst
.operands
[i
].immisreg
)
8569 constraint ((inst
.operands
[i
].imm
== REG_PC
8570 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
8572 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
8574 inst
.instruction
|= inst
.operands
[i
].imm
;
8575 if (!inst
.operands
[i
].negative
)
8576 inst
.instruction
|= INDEX_UP
;
8578 else /* immediate offset in inst.relocs[0] */
8580 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.relocs
[0].pc_rel
8581 && inst
.operands
[i
].writeback
),
8583 inst
.instruction
|= HWOFFSET_IMM
;
8584 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
8586 /* Prefer + for zero encoded value. */
8587 if (!inst
.operands
[i
].negative
)
8588 inst
.instruction
|= INDEX_UP
;
8590 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM8
;
8595 /* Write immediate bits [7:0] to the following locations:
8597 |28/24|23 19|18 16|15 4|3 0|
8598 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
8600 This function is used by VMOV/VMVN/VORR/VBIC. */
8603 neon_write_immbits (unsigned immbits
)
8605 inst
.instruction
|= immbits
& 0xf;
8606 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
8607 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
8610 /* Invert low-order SIZE bits of XHI:XLO. */
8613 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
8615 unsigned immlo
= xlo
? *xlo
: 0;
8616 unsigned immhi
= xhi
? *xhi
: 0;
8621 immlo
= (~immlo
) & 0xff;
8625 immlo
= (~immlo
) & 0xffff;
8629 immhi
= (~immhi
) & 0xffffffff;
8633 immlo
= (~immlo
) & 0xffffffff;
8647 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
8651 neon_bits_same_in_bytes (unsigned imm
)
8653 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
8654 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
8655 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
8656 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
8659 /* For immediate of above form, return 0bABCD. */
8662 neon_squash_bits (unsigned imm
)
8664 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
8665 | ((imm
& 0x01000000) >> 21);
8668 /* Compress quarter-float representation to 0b...000 abcdefgh. */
8671 neon_qfloat_bits (unsigned imm
)
8673 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
8676 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
8677 the instruction. *OP is passed as the initial value of the op field, and
8678 may be set to a different value depending on the constant (i.e.
8679 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
8680 MVN). If the immediate looks like a repeated pattern then also
8681 try smaller element sizes. */
8684 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
8685 unsigned *immbits
, int *op
, int size
,
8686 enum neon_el_type type
)
8688 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
8690 if (type
== NT_float
&& !float_p
)
8693 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
8695 if (size
!= 32 || *op
== 1)
8697 *immbits
= neon_qfloat_bits (immlo
);
8703 if (neon_bits_same_in_bytes (immhi
)
8704 && neon_bits_same_in_bytes (immlo
))
8708 *immbits
= (neon_squash_bits (immhi
) << 4)
8709 | neon_squash_bits (immlo
);
8720 if (immlo
== (immlo
& 0x000000ff))
8725 else if (immlo
== (immlo
& 0x0000ff00))
8727 *immbits
= immlo
>> 8;
8730 else if (immlo
== (immlo
& 0x00ff0000))
8732 *immbits
= immlo
>> 16;
8735 else if (immlo
== (immlo
& 0xff000000))
8737 *immbits
= immlo
>> 24;
8740 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
8742 *immbits
= (immlo
>> 8) & 0xff;
8745 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
8747 *immbits
= (immlo
>> 16) & 0xff;
8751 if ((immlo
& 0xffff) != (immlo
>> 16))
8758 if (immlo
== (immlo
& 0x000000ff))
8763 else if (immlo
== (immlo
& 0x0000ff00))
8765 *immbits
= immlo
>> 8;
8769 if ((immlo
& 0xff) != (immlo
>> 8))
8774 if (immlo
== (immlo
& 0x000000ff))
8776 /* Don't allow MVN with 8-bit immediate. */
8786 #if defined BFD_HOST_64_BIT
8787 /* Returns TRUE if double precision value V may be cast
8788 to single precision without loss of accuracy. */
8791 is_double_a_single (bfd_uint64_t v
)
8793 int exp
= (v
>> 52) & 0x7FF;
8794 bfd_uint64_t mantissa
= v
& 0xFFFFFFFFFFFFFULL
;
8796 return ((exp
== 0 || exp
== 0x7FF
8797 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
8798 && (mantissa
& 0x1FFFFFFFL
) == 0);
8801 /* Returns a double precision value casted to single precision
8802 (ignoring the least significant bits in exponent and mantissa). */
8805 double_to_single (bfd_uint64_t v
)
8807 unsigned int sign
= (v
>> 63) & 1;
8808 int exp
= (v
>> 52) & 0x7FF;
8809 bfd_uint64_t mantissa
= v
& 0xFFFFFFFFFFFFFULL
;
8815 exp
= exp
- 1023 + 127;
8824 /* No denormalized numbers. */
8830 return (sign
<< 31) | (exp
<< 23) | mantissa
;
8832 #endif /* BFD_HOST_64_BIT */
8841 static void do_vfp_nsyn_opcode (const char *);
8843 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8844 Determine whether it can be performed with a move instruction; if
8845 it can, convert inst.instruction to that move instruction and
8846 return true; if it can't, convert inst.instruction to a literal-pool
8847 load and return FALSE. If this is not a valid thing to do in the
8848 current context, set inst.error and return TRUE.
8850 inst.operands[i] describes the destination register. */
8853 move_or_literal_pool (int i
, enum lit_type t
, bool mode_3
)
8856 bool thumb_p
= (t
== CONST_THUMB
);
8857 bool arm_p
= (t
== CONST_ARM
);
8860 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
8864 if ((inst
.instruction
& tbit
) == 0)
8866 inst
.error
= _("invalid pseudo operation");
8870 if (inst
.relocs
[0].exp
.X_op
!= O_constant
8871 && inst
.relocs
[0].exp
.X_op
!= O_symbol
8872 && inst
.relocs
[0].exp
.X_op
!= O_big
)
8874 inst
.error
= _("constant expression expected");
8878 if (inst
.relocs
[0].exp
.X_op
== O_constant
8879 || inst
.relocs
[0].exp
.X_op
== O_big
)
8881 #if defined BFD_HOST_64_BIT
8886 if (inst
.relocs
[0].exp
.X_op
== O_big
)
8888 LITTLENUM_TYPE w
[X_PRECISION
];
8891 if (inst
.relocs
[0].exp
.X_add_number
== -1)
8893 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
8895 /* FIXME: Should we check words w[2..5] ? */
8900 #if defined BFD_HOST_64_BIT
8901 v
= l
[3] & LITTLENUM_MASK
;
8902 v
<<= LITTLENUM_NUMBER_OF_BITS
;
8903 v
|= l
[2] & LITTLENUM_MASK
;
8904 v
<<= LITTLENUM_NUMBER_OF_BITS
;
8905 v
|= l
[1] & LITTLENUM_MASK
;
8906 v
<<= LITTLENUM_NUMBER_OF_BITS
;
8907 v
|= l
[0] & LITTLENUM_MASK
;
8909 v
= l
[1] & LITTLENUM_MASK
;
8910 v
<<= LITTLENUM_NUMBER_OF_BITS
;
8911 v
|= l
[0] & LITTLENUM_MASK
;
8915 v
= inst
.relocs
[0].exp
.X_add_number
;
8917 if (!inst
.operands
[i
].issingle
)
8921 /* LDR should not use lead in a flag-setting instruction being
8922 chosen so we do not check whether movs can be used. */
8924 if ((ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
8925 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8926 && inst
.operands
[i
].reg
!= 13
8927 && inst
.operands
[i
].reg
!= 15)
8929 /* Check if on thumb2 it can be done with a mov.w, mvn or
8930 movw instruction. */
8931 unsigned int newimm
;
8932 bool isNegated
= false;
8934 newimm
= encode_thumb32_immediate (v
);
8935 if (newimm
== (unsigned int) FAIL
)
8937 newimm
= encode_thumb32_immediate (~v
);
8941 /* The number can be loaded with a mov.w or mvn
8943 if (newimm
!= (unsigned int) FAIL
8944 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
8946 inst
.instruction
= (0xf04f0000 /* MOV.W. */
8947 | (inst
.operands
[i
].reg
<< 8));
8948 /* Change to MOVN. */
8949 inst
.instruction
|= (isNegated
? 0x200000 : 0);
8950 inst
.instruction
|= (newimm
& 0x800) << 15;
8951 inst
.instruction
|= (newimm
& 0x700) << 4;
8952 inst
.instruction
|= (newimm
& 0x0ff);
8955 /* The number can be loaded with a movw instruction. */
8956 else if ((v
& ~0xFFFF) == 0
8957 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8959 int imm
= v
& 0xFFFF;
8961 inst
.instruction
= 0xf2400000; /* MOVW. */
8962 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
8963 inst
.instruction
|= (imm
& 0xf000) << 4;
8964 inst
.instruction
|= (imm
& 0x0800) << 15;
8965 inst
.instruction
|= (imm
& 0x0700) << 4;
8966 inst
.instruction
|= (imm
& 0x00ff);
8967 /* In case this replacement is being done on Armv8-M
8968 Baseline we need to make sure to disable the
8969 instruction size check, as otherwise GAS will reject
8970 the use of this T32 instruction. */
8978 int value
= encode_arm_immediate (v
);
8982 /* This can be done with a mov instruction. */
8983 inst
.instruction
&= LITERAL_MASK
;
8984 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
8985 inst
.instruction
|= value
& 0xfff;
8989 value
= encode_arm_immediate (~ v
);
8992 /* This can be done with a mvn instruction. */
8993 inst
.instruction
&= LITERAL_MASK
;
8994 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
8995 inst
.instruction
|= value
& 0xfff;
8999 else if (t
== CONST_VEC
&& ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
9002 unsigned immbits
= 0;
9003 unsigned immlo
= inst
.operands
[1].imm
;
9004 unsigned immhi
= inst
.operands
[1].regisimm
9005 ? inst
.operands
[1].reg
9006 : inst
.relocs
[0].exp
.X_unsigned
9008 : ((bfd_int64_t
)((int) immlo
)) >> 32;
9009 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, false, &immbits
,
9010 &op
, 64, NT_invtype
);
9014 neon_invert_size (&immlo
, &immhi
, 64);
9016 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, false, &immbits
,
9017 &op
, 64, NT_invtype
);
9022 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
9028 /* Fill other bits in vmov encoding for both thumb and arm. */
9030 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
9032 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
9033 neon_write_immbits (immbits
);
9041 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
9042 if (inst
.operands
[i
].issingle
9043 && is_quarter_float (inst
.operands
[1].imm
)
9044 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
9046 inst
.operands
[1].imm
=
9047 neon_qfloat_bits (v
);
9048 do_vfp_nsyn_opcode ("fconsts");
9052 /* If our host does not support a 64-bit type then we cannot perform
9053 the following optimization. This mean that there will be a
9054 discrepancy between the output produced by an assembler built for
9055 a 32-bit-only host and the output produced from a 64-bit host, but
9056 this cannot be helped. */
9057 #if defined BFD_HOST_64_BIT
9058 else if (!inst
.operands
[1].issingle
9059 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
9061 if (is_double_a_single (v
)
9062 && is_quarter_float (double_to_single (v
)))
9064 inst
.operands
[1].imm
=
9065 neon_qfloat_bits (double_to_single (v
));
9066 do_vfp_nsyn_opcode ("fconstd");
9074 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
9075 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
9078 inst
.operands
[1].reg
= REG_PC
;
9079 inst
.operands
[1].isreg
= 1;
9080 inst
.operands
[1].preind
= 1;
9081 inst
.relocs
[0].pc_rel
= 1;
9082 inst
.relocs
[0].type
= (thumb_p
9083 ? BFD_RELOC_ARM_THUMB_OFFSET
9085 ? BFD_RELOC_ARM_HWLITERAL
9086 : BFD_RELOC_ARM_LITERAL
));
9090 /* inst.operands[i] was set up by parse_address. Encode it into an
9091 ARM-format instruction. Reject all forms which cannot be encoded
9092 into a coprocessor load/store instruction. If wb_ok is false,
9093 reject use of writeback; if unind_ok is false, reject use of
9094 unindexed addressing. If reloc_override is not 0, use it instead
9095 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
9096 (in which case it is preserved). */
9099 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
9101 if (!inst
.operands
[i
].isreg
)
9104 if (! inst
.operands
[0].isvec
)
9106 inst
.error
= _("invalid co-processor operand");
9109 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/false))
9113 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
9115 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
9117 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
9119 gas_assert (!inst
.operands
[i
].writeback
);
9122 inst
.error
= _("instruction does not support unindexed addressing");
9125 inst
.instruction
|= inst
.operands
[i
].imm
;
9126 inst
.instruction
|= INDEX_UP
;
9130 if (inst
.operands
[i
].preind
)
9131 inst
.instruction
|= PRE_INDEX
;
9133 if (inst
.operands
[i
].writeback
)
9135 if (inst
.operands
[i
].reg
== REG_PC
)
9137 inst
.error
= _("pc may not be used with write-back");
9142 inst
.error
= _("instruction does not support writeback");
9145 inst
.instruction
|= WRITE_BACK
;
9149 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) reloc_override
;
9150 else if ((inst
.relocs
[0].type
< BFD_RELOC_ARM_ALU_PC_G0_NC
9151 || inst
.relocs
[0].type
> BFD_RELOC_ARM_LDC_SB_G2
)
9152 && inst
.relocs
[0].type
!= BFD_RELOC_ARM_LDR_PC_G0
)
9155 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
9157 inst
.relocs
[0].type
= BFD_RELOC_ARM_CP_OFF_IMM
;
9160 /* Prefer + for zero encoded value. */
9161 if (!inst
.operands
[i
].negative
)
9162 inst
.instruction
|= INDEX_UP
;
9167 /* Functions for instruction encoding, sorted by sub-architecture.
9168 First some generics; their names are taken from the conventional
9169 bit positions for register arguments in ARM format instructions. */
9179 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9185 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9191 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9192 inst
.instruction
|= inst
.operands
[1].reg
;
9198 inst
.instruction
|= inst
.operands
[0].reg
;
9199 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9205 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9206 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9212 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9213 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9219 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9220 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9224 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
9226 if (ARM_CPU_IS_ANY (cpu_variant
))
9228 as_tsktsk ("%s", msg
);
9231 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
9243 unsigned Rn
= inst
.operands
[2].reg
;
9244 /* Enforce restrictions on SWP instruction. */
9245 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
9247 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
9248 _("Rn must not overlap other operands"));
9250 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
9252 if (!check_obsolete (&arm_ext_v8
,
9253 _("swp{b} use is obsoleted for ARMv8 and later"))
9254 && warn_on_deprecated
9255 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
9256 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
9259 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9260 inst
.instruction
|= inst
.operands
[1].reg
;
9261 inst
.instruction
|= Rn
<< 16;
9267 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9268 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9269 inst
.instruction
|= inst
.operands
[2].reg
;
9275 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
9276 constraint (((inst
.relocs
[0].exp
.X_op
!= O_constant
9277 && inst
.relocs
[0].exp
.X_op
!= O_illegal
)
9278 || inst
.relocs
[0].exp
.X_add_number
!= 0),
9280 inst
.instruction
|= inst
.operands
[0].reg
;
9281 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9282 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9288 inst
.instruction
|= inst
.operands
[0].imm
;
9294 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9295 encode_arm_cp_address (1, true, true, 0);
9298 /* ARM instructions, in alphabetical order by function name (except
9299 that wrapper functions appear immediately after the function they
9302 /* This is a pseudo-op of the form "adr rd, label" to be converted
9303 into a relative address of the form "add rd, pc, #label-.-8". */
9308 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
9310 /* Frag hacking will turn this into a sub instruction if the offset turns
9311 out to be negative. */
9312 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
9313 inst
.relocs
[0].pc_rel
= 1;
9314 inst
.relocs
[0].exp
.X_add_number
-= 8;
9316 if (support_interwork
9317 && inst
.relocs
[0].exp
.X_op
== O_symbol
9318 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
9319 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
9320 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
9321 inst
.relocs
[0].exp
.X_add_number
|= 1;
9324 /* This is a pseudo-op of the form "adrl rd, label" to be converted
9325 into a relative address of the form:
9326 add rd, pc, #low(label-.-8)"
9327 add rd, rd, #high(label-.-8)" */
9332 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
9334 /* Frag hacking will turn this into a sub instruction if the offset turns
9335 out to be negative. */
9336 inst
.relocs
[0].type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
9337 inst
.relocs
[0].pc_rel
= 1;
9338 inst
.size
= INSN_SIZE
* 2;
9339 inst
.relocs
[0].exp
.X_add_number
-= 8;
9341 if (support_interwork
9342 && inst
.relocs
[0].exp
.X_op
== O_symbol
9343 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
9344 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
9345 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
9346 inst
.relocs
[0].exp
.X_add_number
|= 1;
9352 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9353 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
9355 if (!inst
.operands
[1].present
)
9356 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
9357 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9358 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9359 encode_arm_shifter_operand (2);
9365 if (inst
.operands
[0].present
)
9366 inst
.instruction
|= inst
.operands
[0].imm
;
9368 inst
.instruction
|= 0xf;
9374 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
9375 constraint (msb
> 32, _("bit-field extends past end of register"));
9376 /* The instruction encoding stores the LSB and MSB,
9377 not the LSB and width. */
9378 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9379 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
9380 inst
.instruction
|= (msb
- 1) << 16;
9388 /* #0 in second position is alternative syntax for bfc, which is
9389 the same instruction but with REG_PC in the Rm field. */
9390 if (!inst
.operands
[1].isreg
)
9391 inst
.operands
[1].reg
= REG_PC
;
9393 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
9394 constraint (msb
> 32, _("bit-field extends past end of register"));
9395 /* The instruction encoding stores the LSB and MSB,
9396 not the LSB and width. */
9397 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9398 inst
.instruction
|= inst
.operands
[1].reg
;
9399 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
9400 inst
.instruction
|= (msb
- 1) << 16;
9406 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
9407 _("bit-field extends past end of register"));
9408 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9409 inst
.instruction
|= inst
.operands
[1].reg
;
9410 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
9411 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
9414 /* ARM V5 breakpoint instruction (argument parse)
9415 BKPT <16 bit unsigned immediate>
9416 Instruction is not conditional.
9417 The bit pattern given in insns[] has the COND_ALWAYS condition,
9418 and it is an error if the caller tried to override that. */
9423 /* Top 12 of 16 bits to bits 19:8. */
9424 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
9426 /* Bottom 4 of 16 bits to bits 3:0. */
9427 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
9431 encode_branch (int default_reloc
)
9433 if (inst
.operands
[0].hasreloc
)
9435 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
9436 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
9437 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
9438 inst
.relocs
[0].type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
9439 ? BFD_RELOC_ARM_PLT32
9440 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
9443 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) default_reloc
;
9444 inst
.relocs
[0].pc_rel
= 1;
9451 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
9452 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
9455 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
9462 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
9464 if (inst
.cond
== COND_ALWAYS
)
9465 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
9467 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
9471 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
9474 /* ARM V5 branch-link-exchange instruction (argument parse)
9475 BLX <target_addr> ie BLX(1)
9476 BLX{<condition>} <Rm> ie BLX(2)
9477 Unfortunately, there are two different opcodes for this mnemonic.
9478 So, the insns[].value is not used, and the code here zaps values
9479 into inst.instruction.
9480 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
9485 if (inst
.operands
[0].isreg
)
9487 /* Arg is a register; the opcode provided by insns[] is correct.
9488 It is not illegal to do "blx pc", just useless. */
9489 if (inst
.operands
[0].reg
== REG_PC
)
9490 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
9492 inst
.instruction
|= inst
.operands
[0].reg
;
9496 /* Arg is an address; this instruction cannot be executed
9497 conditionally, and the opcode must be adjusted.
9498 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
9499 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
9500 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
9501 inst
.instruction
= 0xfa000000;
9502 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
9511 if (inst
.operands
[0].reg
== REG_PC
)
9512 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
9514 inst
.instruction
|= inst
.operands
[0].reg
;
9515 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
9516 it is for ARMv4t or earlier. */
9517 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
9518 if (!ARM_FEATURE_ZERO (selected_object_arch
)
9519 && !ARM_CPU_HAS_FEATURE (selected_object_arch
, arm_ext_v5
))
9523 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
9528 inst
.relocs
[0].type
= BFD_RELOC_ARM_V4BX
;
9532 /* ARM v5TEJ. Jump to Jazelle code. */
9537 if (inst
.operands
[0].reg
== REG_PC
)
9538 as_tsktsk (_("use of r15 in bxj is not really useful"));
9540 inst
.instruction
|= inst
.operands
[0].reg
;
9543 /* Co-processor data operation:
9544 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
9545 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
9549 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9550 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
9551 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
9552 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9553 inst
.instruction
|= inst
.operands
[4].reg
;
9554 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
9560 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9561 encode_arm_shifter_operand (1);
9564 /* Transfer between coprocessor and ARM registers.
9565 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
9570 No special properties. */
9572 struct deprecated_coproc_regs_s
9579 arm_feature_set deprecated
;
9580 arm_feature_set obsoleted
;
9581 const char *dep_msg
;
9582 const char *obs_msg
;
9585 #define DEPR_ACCESS_V8 \
9586 N_("This coprocessor register access is deprecated in ARMv8")
9588 /* Table of all deprecated coprocessor registers. */
9589 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
9591 {15, 0, 7, 10, 5, /* CP15DMB. */
9592 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9593 DEPR_ACCESS_V8
, NULL
},
9594 {15, 0, 7, 10, 4, /* CP15DSB. */
9595 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9596 DEPR_ACCESS_V8
, NULL
},
9597 {15, 0, 7, 5, 4, /* CP15ISB. */
9598 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9599 DEPR_ACCESS_V8
, NULL
},
9600 {14, 6, 1, 0, 0, /* TEEHBR. */
9601 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9602 DEPR_ACCESS_V8
, NULL
},
9603 {14, 6, 0, 0, 0, /* TEECR. */
9604 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9605 DEPR_ACCESS_V8
, NULL
},
9608 #undef DEPR_ACCESS_V8
9610 static const size_t deprecated_coproc_reg_count
=
9611 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
9619 Rd
= inst
.operands
[2].reg
;
9622 if (inst
.instruction
== 0xee000010
9623 || inst
.instruction
== 0xfe000010)
9625 reject_bad_reg (Rd
);
9626 else if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9628 constraint (Rd
== REG_SP
, BAD_SP
);
9633 if (inst
.instruction
== 0xe000010)
9634 constraint (Rd
== REG_PC
, BAD_PC
);
9637 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
9639 const struct deprecated_coproc_regs_s
*r
=
9640 deprecated_coproc_regs
+ i
;
9642 if (inst
.operands
[0].reg
== r
->cp
9643 && inst
.operands
[1].imm
== r
->opc1
9644 && inst
.operands
[3].reg
== r
->crn
9645 && inst
.operands
[4].reg
== r
->crm
9646 && inst
.operands
[5].imm
== r
->opc2
)
9648 if (! ARM_CPU_IS_ANY (cpu_variant
)
9649 && warn_on_deprecated
9650 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
9651 as_tsktsk ("%s", r
->dep_msg
);
9655 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9656 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
9657 inst
.instruction
|= Rd
<< 12;
9658 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9659 inst
.instruction
|= inst
.operands
[4].reg
;
9660 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
9663 /* Transfer between coprocessor register and pair of ARM registers.
9664 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
9669 Two XScale instructions are special cases of these:
9671 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
9672 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
9674 Result unpredictable if Rd or Rn is R15. */
9681 Rd
= inst
.operands
[2].reg
;
9682 Rn
= inst
.operands
[3].reg
;
9686 reject_bad_reg (Rd
);
9687 reject_bad_reg (Rn
);
9691 constraint (Rd
== REG_PC
, BAD_PC
);
9692 constraint (Rn
== REG_PC
, BAD_PC
);
9695 /* Only check the MRRC{2} variants. */
9696 if ((inst
.instruction
& 0x0FF00000) == 0x0C500000)
9698 /* If Rd == Rn, error that the operation is
9699 unpredictable (example MRRC p3,#1,r1,r1,c4). */
9700 constraint (Rd
== Rn
, BAD_OVERLAP
);
9703 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9704 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
9705 inst
.instruction
|= Rd
<< 12;
9706 inst
.instruction
|= Rn
<< 16;
9707 inst
.instruction
|= inst
.operands
[4].reg
;
9713 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
9714 if (inst
.operands
[1].present
)
9716 inst
.instruction
|= CPSI_MMOD
;
9717 inst
.instruction
|= inst
.operands
[1].imm
;
9724 inst
.instruction
|= inst
.operands
[0].imm
;
9730 unsigned Rd
, Rn
, Rm
;
9732 Rd
= inst
.operands
[0].reg
;
9733 Rn
= (inst
.operands
[1].present
9734 ? inst
.operands
[1].reg
: Rd
);
9735 Rm
= inst
.operands
[2].reg
;
9737 constraint ((Rd
== REG_PC
), BAD_PC
);
9738 constraint ((Rn
== REG_PC
), BAD_PC
);
9739 constraint ((Rm
== REG_PC
), BAD_PC
);
9741 inst
.instruction
|= Rd
<< 16;
9742 inst
.instruction
|= Rn
<< 0;
9743 inst
.instruction
|= Rm
<< 8;
9749 /* There is no IT instruction in ARM mode. We
9750 process it to do the validation as if in
9751 thumb mode, just in case the code gets
9752 assembled for thumb using the unified syntax. */
9757 set_pred_insn_type (IT_INSN
);
9758 now_pred
.mask
= (inst
.instruction
& 0xf) | 0x10;
9759 now_pred
.cc
= inst
.operands
[0].imm
;
9763 /* If there is only one register in the register list,
9764 then return its register number. Otherwise return -1. */
9766 only_one_reg_in_list (int range
)
9768 int i
= ffs (range
) - 1;
9769 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
9773 encode_ldmstm(int from_push_pop_mnem
)
9775 int base_reg
= inst
.operands
[0].reg
;
9776 int range
= inst
.operands
[1].imm
;
9779 inst
.instruction
|= base_reg
<< 16;
9780 inst
.instruction
|= range
;
9782 if (inst
.operands
[1].writeback
)
9783 inst
.instruction
|= LDM_TYPE_2_OR_3
;
9785 if (inst
.operands
[0].writeback
)
9787 inst
.instruction
|= WRITE_BACK
;
9788 /* Check for unpredictable uses of writeback. */
9789 if (inst
.instruction
& LOAD_BIT
)
9791 /* Not allowed in LDM type 2. */
9792 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
9793 && ((range
& (1 << REG_PC
)) == 0))
9794 as_warn (_("writeback of base register is UNPREDICTABLE"));
9795 /* Only allowed if base reg not in list for other types. */
9796 else if (range
& (1 << base_reg
))
9797 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9801 /* Not allowed for type 2. */
9802 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
9803 as_warn (_("writeback of base register is UNPREDICTABLE"));
9804 /* Only allowed if base reg not in list, or first in list. */
9805 else if ((range
& (1 << base_reg
))
9806 && (range
& ((1 << base_reg
) - 1)))
9807 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9811 /* If PUSH/POP has only one register, then use the A2 encoding. */
9812 one_reg
= only_one_reg_in_list (range
);
9813 if (from_push_pop_mnem
&& one_reg
>= 0)
9815 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
9817 if (is_push
&& one_reg
== 13 /* SP */)
9818 /* PR 22483: The A2 encoding cannot be used when
9819 pushing the stack pointer as this is UNPREDICTABLE. */
9822 inst
.instruction
&= A_COND_MASK
;
9823 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
9824 inst
.instruction
|= one_reg
<< 12;
9831 encode_ldmstm (/*from_push_pop_mnem=*/false);
9834 /* ARMv5TE load-consecutive (argument parse)
9843 constraint (inst
.operands
[0].reg
% 2 != 0,
9844 _("first transfer register must be even"));
9845 constraint (inst
.operands
[1].present
9846 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9847 _("can only transfer two consecutive registers"));
9848 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9849 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
9851 if (!inst
.operands
[1].present
)
9852 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9854 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9855 register and the first register written; we have to diagnose
9856 overlap between the base and the second register written here. */
9858 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
9859 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
9860 as_warn (_("base register written back, and overlaps "
9861 "second transfer register"));
9863 if (!(inst
.instruction
& V4_STR_BIT
))
9865 /* For an index-register load, the index register must not overlap the
9866 destination (even if not write-back). */
9867 if (inst
.operands
[2].immisreg
9868 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
9869 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
9870 as_warn (_("index register overlaps transfer register"));
9872 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9873 encode_arm_addr_mode_3 (2, /*is_t=*/false);
9879 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9880 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9881 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9882 || inst
.operands
[1].negative
9883 /* This can arise if the programmer has written
9885 or if they have mistakenly used a register name as the last
9888 It is very difficult to distinguish between these two cases
9889 because "rX" might actually be a label. ie the register
9890 name has been occluded by a symbol of the same name. So we
9891 just generate a general 'bad addressing mode' type error
9892 message and leave it up to the programmer to discover the
9893 true cause and fix their mistake. */
9894 || (inst
.operands
[1].reg
== REG_PC
),
9897 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9898 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9899 _("offset must be zero in ARM encoding"));
9901 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
9903 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9904 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9905 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
9911 constraint (inst
.operands
[0].reg
% 2 != 0,
9912 _("even register required"));
9913 constraint (inst
.operands
[1].present
9914 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9915 _("can only load two consecutive registers"));
9916 /* If op 1 were present and equal to PC, this function wouldn't
9917 have been called in the first place. */
9918 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9920 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9921 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9924 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9925 which is not a multiple of four is UNPREDICTABLE. */
9927 check_ldr_r15_aligned (void)
9929 constraint (!(inst
.operands
[1].immisreg
)
9930 && (inst
.operands
[0].reg
== REG_PC
9931 && inst
.operands
[1].reg
== REG_PC
9932 && (inst
.relocs
[0].exp
.X_add_number
& 0x3)),
9933 _("ldr to register 15 must be 4-byte aligned"));
9939 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9940 if (!inst
.operands
[1].isreg
)
9941 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/false))
9943 encode_arm_addr_mode_2 (1, /*is_t=*/false);
9944 check_ldr_r15_aligned ();
9950 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9952 if (inst
.operands
[1].preind
)
9954 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9955 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9956 _("this instruction requires a post-indexed address"));
9958 inst
.operands
[1].preind
= 0;
9959 inst
.operands
[1].postind
= 1;
9960 inst
.operands
[1].writeback
= 1;
9962 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9963 encode_arm_addr_mode_2 (1, /*is_t=*/true);
9966 /* Halfword and signed-byte load/store operations. */
9971 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9972 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9973 if (!inst
.operands
[1].isreg
)
9974 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/true))
9976 encode_arm_addr_mode_3 (1, /*is_t=*/false);
9982 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9984 if (inst
.operands
[1].preind
)
9986 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9987 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9988 _("this instruction requires a post-indexed address"));
9990 inst
.operands
[1].preind
= 0;
9991 inst
.operands
[1].postind
= 1;
9992 inst
.operands
[1].writeback
= 1;
9994 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9995 encode_arm_addr_mode_3 (1, /*is_t=*/true);
9998 /* Co-processor register load/store.
9999 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
10003 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10004 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10005 encode_arm_cp_address (2, true, true, 0);
10011 /* This restriction does not apply to mls (nor to mla in v6 or later). */
10012 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
10013 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
10014 && !(inst
.instruction
& 0x00400000))
10015 as_tsktsk (_("Rd and Rm should be different in mla"));
10017 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10018 inst
.instruction
|= inst
.operands
[1].reg
;
10019 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
10020 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
10026 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10027 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
10028 THUMB1_RELOC_ONLY
);
10029 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10030 encode_arm_shifter_operand (1);
10033 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
10040 top
= (inst
.instruction
& 0x00400000) != 0;
10041 constraint (top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
,
10042 _(":lower16: not allowed in this instruction"));
10043 constraint (!top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
,
10044 _(":upper16: not allowed in this instruction"));
10045 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10046 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
10048 imm
= inst
.relocs
[0].exp
.X_add_number
;
10049 /* The value is in two pieces: 0:11, 16:19. */
10050 inst
.instruction
|= (imm
& 0x00000fff);
10051 inst
.instruction
|= (imm
& 0x0000f000) << 4;
10056 do_vfp_nsyn_mrs (void)
10058 if (inst
.operands
[0].isvec
)
10060 if (inst
.operands
[1].reg
!= 1)
10061 first_error (_("operand 1 must be FPSCR"));
10062 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
10063 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
10064 do_vfp_nsyn_opcode ("fmstat");
10066 else if (inst
.operands
[1].isvec
)
10067 do_vfp_nsyn_opcode ("fmrx");
10075 do_vfp_nsyn_msr (void)
10077 if (inst
.operands
[0].isvec
)
10078 do_vfp_nsyn_opcode ("fmxr");
10088 unsigned Rt
= inst
.operands
[0].reg
;
10090 if (thumb_mode
&& Rt
== REG_SP
)
10092 inst
.error
= BAD_SP
;
10096 switch (inst
.operands
[1].reg
)
10098 /* MVFR2 is only valid for Armv8-A. */
10100 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
10104 /* Check for new Armv8.1-M Mainline changes to <spec_reg>. */
10105 case 1: /* fpscr. */
10106 constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
10107 || ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1xd
)),
10111 case 14: /* fpcxt_ns. */
10112 case 15: /* fpcxt_s. */
10113 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8_1m_main
),
10114 _("selected processor does not support instruction"));
10117 case 2: /* fpscr_nzcvqc. */
10118 case 12: /* vpr. */
10120 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8_1m_main
)
10121 || (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
10122 && !ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1xd
)),
10123 _("selected processor does not support instruction"));
10124 if (inst
.operands
[0].reg
!= 2
10125 && !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
10126 as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE"));
10133 /* APSR_ sets isvec. All other refs to PC are illegal. */
10134 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
10136 inst
.error
= BAD_PC
;
10140 /* If we get through parsing the register name, we just insert the number
10141 generated into the instruction without further validation. */
10142 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
10143 inst
.instruction
|= (Rt
<< 12);
10149 unsigned Rt
= inst
.operands
[1].reg
;
10152 reject_bad_reg (Rt
);
10153 else if (Rt
== REG_PC
)
10155 inst
.error
= BAD_PC
;
10159 switch (inst
.operands
[0].reg
)
10161 /* MVFR2 is only valid for Armv8-A. */
10163 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
10167 /* Check for new Armv8.1-M Mainline changes to <spec_reg>. */
10168 case 1: /* fpcr. */
10169 constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
10170 || ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1xd
)),
10174 case 14: /* fpcxt_ns. */
10175 case 15: /* fpcxt_s. */
10176 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8_1m_main
),
10177 _("selected processor does not support instruction"));
10180 case 2: /* fpscr_nzcvqc. */
10181 case 12: /* vpr. */
10183 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8_1m_main
)
10184 || (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
10185 && !ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1xd
)),
10186 _("selected processor does not support instruction"));
10187 if (inst
.operands
[0].reg
!= 2
10188 && !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
10189 as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE"));
10196 /* If we get through parsing the register name, we just insert the number
10197 generated into the instruction without further validation. */
10198 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
10199 inst
.instruction
|= (Rt
<< 12);
10207 if (do_vfp_nsyn_mrs () == SUCCESS
)
10210 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
10211 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10213 if (inst
.operands
[1].isreg
)
10215 br
= inst
.operands
[1].reg
;
10216 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf0000))
10217 as_bad (_("bad register for mrs"));
10221 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10222 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
10224 _("'APSR', 'CPSR' or 'SPSR' expected"));
10225 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
10228 inst
.instruction
|= br
;
10231 /* Two possible forms:
10232 "{C|S}PSR_<field>, Rm",
10233 "{C|S}PSR_f, #expression". */
10238 if (do_vfp_nsyn_msr () == SUCCESS
)
10241 inst
.instruction
|= inst
.operands
[0].imm
;
10242 if (inst
.operands
[1].isreg
)
10243 inst
.instruction
|= inst
.operands
[1].reg
;
10246 inst
.instruction
|= INST_IMMEDIATE
;
10247 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
10248 inst
.relocs
[0].pc_rel
= 0;
10255 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
10257 if (!inst
.operands
[2].present
)
10258 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
10259 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10260 inst
.instruction
|= inst
.operands
[1].reg
;
10261 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
10263 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
10264 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
10265 as_tsktsk (_("Rd and Rm should be different in mul"));
10268 /* Long Multiply Parser
10269 UMULL RdLo, RdHi, Rm, Rs
10270 SMULL RdLo, RdHi, Rm, Rs
10271 UMLAL RdLo, RdHi, Rm, Rs
10272 SMLAL RdLo, RdHi, Rm, Rs. */
10277 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10278 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10279 inst
.instruction
|= inst
.operands
[2].reg
;
10280 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
10282 /* rdhi and rdlo must be different. */
10283 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
10284 as_tsktsk (_("rdhi and rdlo must be different"));
10286 /* rdhi, rdlo and rm must all be different before armv6. */
10287 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
10288 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
10289 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
10290 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
10296 if (inst
.operands
[0].present
10297 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
10299 /* Architectural NOP hints are CPSR sets with no bits selected. */
10300 inst
.instruction
&= 0xf0000000;
10301 inst
.instruction
|= 0x0320f000;
10302 if (inst
.operands
[0].present
)
10303 inst
.instruction
|= inst
.operands
[0].imm
;
10307 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
10308 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
10309 Condition defaults to COND_ALWAYS.
10310 Error if Rd, Rn or Rm are R15. */
10315 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10316 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10317 inst
.instruction
|= inst
.operands
[2].reg
;
10318 if (inst
.operands
[3].present
)
10319 encode_arm_shift (3);
10322 /* ARM V6 PKHTB (Argument Parse). */
10327 if (!inst
.operands
[3].present
)
10329 /* If the shift specifier is omitted, turn the instruction
10330 into pkhbt rd, rm, rn. */
10331 inst
.instruction
&= 0xfff00010;
10332 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10333 inst
.instruction
|= inst
.operands
[1].reg
;
10334 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10338 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10339 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10340 inst
.instruction
|= inst
.operands
[2].reg
;
10341 encode_arm_shift (3);
10345 /* ARMv5TE: Preload-Cache
10346 MP Extensions: Preload for write
10350 Syntactically, like LDR with B=1, W=0, L=1. */
10355 constraint (!inst
.operands
[0].isreg
,
10356 _("'[' expected after PLD mnemonic"));
10357 constraint (inst
.operands
[0].postind
,
10358 _("post-indexed expression used in preload instruction"));
10359 constraint (inst
.operands
[0].writeback
,
10360 _("writeback used in preload instruction"));
10361 constraint (!inst
.operands
[0].preind
,
10362 _("unindexed addressing used in preload instruction"));
10363 encode_arm_addr_mode_2 (0, /*is_t=*/false);
10366 /* ARMv7: PLI <addr_mode> */
10370 constraint (!inst
.operands
[0].isreg
,
10371 _("'[' expected after PLI mnemonic"));
10372 constraint (inst
.operands
[0].postind
,
10373 _("post-indexed expression used in preload instruction"));
10374 constraint (inst
.operands
[0].writeback
,
10375 _("writeback used in preload instruction"));
10376 constraint (!inst
.operands
[0].preind
,
10377 _("unindexed addressing used in preload instruction"));
10378 encode_arm_addr_mode_2 (0, /*is_t=*/false);
10379 inst
.instruction
&= ~PRE_INDEX
;
10385 constraint (inst
.operands
[0].writeback
,
10386 _("push/pop do not support {reglist}^"));
10387 inst
.operands
[1] = inst
.operands
[0];
10388 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
10389 inst
.operands
[0].isreg
= 1;
10390 inst
.operands
[0].writeback
= 1;
10391 inst
.operands
[0].reg
= REG_SP
;
10392 encode_ldmstm (/*from_push_pop_mnem=*/true);
10395 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
10396 word at the specified address and the following word
10398 Unconditionally executed.
10399 Error if Rn is R15. */
10404 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10405 if (inst
.operands
[0].writeback
)
10406 inst
.instruction
|= WRITE_BACK
;
10409 /* ARM V6 ssat (argument parse). */
10414 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10415 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
10416 inst
.instruction
|= inst
.operands
[2].reg
;
10418 if (inst
.operands
[3].present
)
10419 encode_arm_shift (3);
10422 /* ARM V6 usat (argument parse). */
10427 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10428 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
10429 inst
.instruction
|= inst
.operands
[2].reg
;
10431 if (inst
.operands
[3].present
)
10432 encode_arm_shift (3);
10435 /* ARM V6 ssat16 (argument parse). */
10440 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10441 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
10442 inst
.instruction
|= inst
.operands
[2].reg
;
10448 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10449 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
10450 inst
.instruction
|= inst
.operands
[2].reg
;
10453 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
10454 preserving the other bits.
10456 setend <endian_specifier>, where <endian_specifier> is either
10462 if (warn_on_deprecated
10463 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
10464 as_tsktsk (_("setend use is deprecated for ARMv8"));
10466 if (inst
.operands
[0].imm
)
10467 inst
.instruction
|= 0x200;
10473 unsigned int Rm
= (inst
.operands
[1].present
10474 ? inst
.operands
[1].reg
10475 : inst
.operands
[0].reg
);
10477 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10478 inst
.instruction
|= Rm
;
10479 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
10481 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
10482 inst
.instruction
|= SHIFT_BY_REG
;
10483 /* PR 12854: Error on extraneous shifts. */
10484 constraint (inst
.operands
[2].shifted
,
10485 _("extraneous shift as part of operand to shift insn"));
10488 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
10494 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
10495 constraint (value
> 0xf, _("immediate too large (bigger than 0xF)"));
10497 inst
.relocs
[0].type
= BFD_RELOC_ARM_SMC
;
10498 inst
.relocs
[0].pc_rel
= 0;
10504 inst
.relocs
[0].type
= BFD_RELOC_ARM_HVC
;
10505 inst
.relocs
[0].pc_rel
= 0;
10511 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
10512 inst
.relocs
[0].pc_rel
= 0;
10518 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
10519 _("selected processor does not support SETPAN instruction"));
10521 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
10527 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
10528 _("selected processor does not support SETPAN instruction"));
10530 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
10533 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
10534 SMLAxy{cond} Rd,Rm,Rs,Rn
10535 SMLAWy{cond} Rd,Rm,Rs,Rn
10536 Error if any register is R15. */
10541 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10542 inst
.instruction
|= inst
.operands
[1].reg
;
10543 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
10544 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
10547 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
10548 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
10549 Error if any register is R15.
10550 Warning if Rdlo == Rdhi. */
10555 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10556 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10557 inst
.instruction
|= inst
.operands
[2].reg
;
10558 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
10560 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
10561 as_tsktsk (_("rdhi and rdlo must be different"));
10564 /* ARM V5E (El Segundo) signed-multiply (argument parse)
10565 SMULxy{cond} Rd,Rm,Rs
10566 Error if any register is R15. */
10571 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10572 inst
.instruction
|= inst
.operands
[1].reg
;
10573 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
10576 /* ARM V6 srs (argument parse). The variable fields in the encoding are
10577 the same for both ARM and Thumb-2. */
10584 if (inst
.operands
[0].present
)
10586 reg
= inst
.operands
[0].reg
;
10587 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
10592 inst
.instruction
|= reg
<< 16;
10593 inst
.instruction
|= inst
.operands
[1].imm
;
10594 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
10595 inst
.instruction
|= WRITE_BACK
;
10598 /* ARM V6 strex (argument parse). */
10603 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
10604 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
10605 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
10606 || inst
.operands
[2].negative
10607 /* See comment in do_ldrex(). */
10608 || (inst
.operands
[2].reg
== REG_PC
),
10611 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10612 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10614 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
10615 || inst
.relocs
[0].exp
.X_add_number
!= 0,
10616 _("offset must be zero in ARM encoding"));
10618 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10619 inst
.instruction
|= inst
.operands
[1].reg
;
10620 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10621 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10625 do_t_strexbh (void)
10627 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
10628 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
10629 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
10630 || inst
.operands
[2].negative
,
10633 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10634 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10642 constraint (inst
.operands
[1].reg
% 2 != 0,
10643 _("even register required"));
10644 constraint (inst
.operands
[2].present
10645 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
10646 _("can only store two consecutive registers"));
10647 /* If op 2 were present and equal to PC, this function wouldn't
10648 have been called in the first place. */
10649 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
10651 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10652 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
10653 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
10656 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10657 inst
.instruction
|= inst
.operands
[1].reg
;
10658 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
10665 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10666 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10674 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10675 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10680 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
10681 extends it to 32-bits, and adds the result to a value in another
10682 register. You can specify a rotation by 0, 8, 16, or 24 bits
10683 before extracting the 16-bit value.
10684 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
10685 Condition defaults to COND_ALWAYS.
10686 Error if any register uses R15. */
10691 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10692 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10693 inst
.instruction
|= inst
.operands
[2].reg
;
10694 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
10699 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
10700 Condition defaults to COND_ALWAYS.
10701 Error if any register uses R15. */
10706 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10707 inst
.instruction
|= inst
.operands
[1].reg
;
10708 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
10711 /* VFP instructions. In a logical order: SP variant first, monad
10712 before dyad, arithmetic then move then load/store. */
10715 do_vfp_sp_monadic (void)
10717 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1xd
)
10718 && !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
),
10721 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10722 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
10726 do_vfp_sp_dyadic (void)
10728 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10729 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
10730 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
10734 do_vfp_sp_compare_z (void)
10736 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10740 do_vfp_dp_sp_cvt (void)
10742 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10743 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
10747 do_vfp_sp_dp_cvt (void)
10749 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10750 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
10754 do_vfp_reg_from_sp (void)
10756 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1xd
)
10757 && !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
),
10760 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10761 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
10765 do_vfp_reg2_from_sp2 (void)
10767 constraint (inst
.operands
[2].imm
!= 2,
10768 _("only two consecutive VFP SP registers allowed here"));
10769 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10770 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10771 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
10775 do_vfp_sp_from_reg (void)
10777 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1xd
)
10778 && !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
),
10781 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
10782 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10786 do_vfp_sp2_from_reg2 (void)
10788 constraint (inst
.operands
[0].imm
!= 2,
10789 _("only two consecutive VFP SP registers allowed here"));
10790 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
10791 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10792 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10796 do_vfp_sp_ldst (void)
10798 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10799 encode_arm_cp_address (1, false, true, 0);
10803 do_vfp_dp_ldst (void)
10805 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10806 encode_arm_cp_address (1, false, true, 0);
10811 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
10813 if (inst
.operands
[0].writeback
)
10814 inst
.instruction
|= WRITE_BACK
;
10816 constraint (ldstm_type
!= VFP_LDSTMIA
,
10817 _("this addressing mode requires base-register writeback"));
10818 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10819 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
10820 inst
.instruction
|= inst
.operands
[1].imm
;
10824 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
10828 if (inst
.operands
[0].writeback
)
10829 inst
.instruction
|= WRITE_BACK
;
10831 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
10832 _("this addressing mode requires base-register writeback"));
10834 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10835 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10837 count
= inst
.operands
[1].imm
<< 1;
10838 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
10841 inst
.instruction
|= count
;
10845 do_vfp_sp_ldstmia (void)
10847 vfp_sp_ldstm (VFP_LDSTMIA
);
10851 do_vfp_sp_ldstmdb (void)
10853 vfp_sp_ldstm (VFP_LDSTMDB
);
10857 do_vfp_dp_ldstmia (void)
10859 vfp_dp_ldstm (VFP_LDSTMIA
);
10863 do_vfp_dp_ldstmdb (void)
10865 vfp_dp_ldstm (VFP_LDSTMDB
);
10869 do_vfp_xp_ldstmia (void)
10871 vfp_dp_ldstm (VFP_LDSTMIAX
);
10875 do_vfp_xp_ldstmdb (void)
10877 vfp_dp_ldstm (VFP_LDSTMDBX
);
10881 do_vfp_dp_rd_rm (void)
10883 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
)
10884 && !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
),
10887 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10888 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
10892 do_vfp_dp_rn_rd (void)
10894 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
10895 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10899 do_vfp_dp_rd_rn (void)
10901 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10902 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10906 do_vfp_dp_rd_rn_rm (void)
10908 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
)
10909 && !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
),
10912 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10913 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10914 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
10918 do_vfp_dp_rd (void)
10920 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10924 do_vfp_dp_rm_rd_rn (void)
10926 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
)
10927 && !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
),
10930 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
10931 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10932 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
10935 /* VFPv3 instructions. */
10937 do_vfp_sp_const (void)
10939 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10940 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10941 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10945 do_vfp_dp_const (void)
10947 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10948 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10949 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10953 vfp_conv (int srcsize
)
10955 int immbits
= srcsize
- inst
.operands
[1].imm
;
10957 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
10959 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10960 i.e. immbits must be in range 0 - 16. */
10961 inst
.error
= _("immediate value out of range, expected range [0, 16]");
10964 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
10966 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10967 i.e. immbits must be in range 0 - 31. */
10968 inst
.error
= _("immediate value out of range, expected range [1, 32]");
10972 inst
.instruction
|= (immbits
& 1) << 5;
10973 inst
.instruction
|= (immbits
>> 1);
10977 do_vfp_sp_conv_16 (void)
10979 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10984 do_vfp_dp_conv_16 (void)
10986 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10991 do_vfp_sp_conv_32 (void)
10993 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10998 do_vfp_dp_conv_32 (void)
11000 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
11004 /* FPA instructions. Also in a logical order. */
11009 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
11010 inst
.instruction
|= inst
.operands
[1].reg
;
11014 do_fpa_ldmstm (void)
11016 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11017 switch (inst
.operands
[1].imm
)
11019 case 1: inst
.instruction
|= CP_T_X
; break;
11020 case 2: inst
.instruction
|= CP_T_Y
; break;
11021 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
11026 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
11028 /* The instruction specified "ea" or "fd", so we can only accept
11029 [Rn]{!}. The instruction does not really support stacking or
11030 unstacking, so we have to emulate these by setting appropriate
11031 bits and offsets. */
11032 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
11033 || inst
.relocs
[0].exp
.X_add_number
!= 0,
11034 _("this instruction does not support indexing"));
11036 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
11037 inst
.relocs
[0].exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
11039 if (!(inst
.instruction
& INDEX_UP
))
11040 inst
.relocs
[0].exp
.X_add_number
= -inst
.relocs
[0].exp
.X_add_number
;
11042 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
11044 inst
.operands
[2].preind
= 0;
11045 inst
.operands
[2].postind
= 1;
11049 encode_arm_cp_address (2, true, true, 0);
11052 /* iWMMXt instructions: strictly in alphabetical order. */
11055 do_iwmmxt_tandorc (void)
11057 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
11061 do_iwmmxt_textrc (void)
11063 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11064 inst
.instruction
|= inst
.operands
[1].imm
;
11068 do_iwmmxt_textrm (void)
11070 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11071 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11072 inst
.instruction
|= inst
.operands
[2].imm
;
11076 do_iwmmxt_tinsr (void)
11078 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
11079 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11080 inst
.instruction
|= inst
.operands
[2].imm
;
11084 do_iwmmxt_tmia (void)
11086 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
11087 inst
.instruction
|= inst
.operands
[1].reg
;
11088 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
11092 do_iwmmxt_waligni (void)
11094 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11095 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11096 inst
.instruction
|= inst
.operands
[2].reg
;
11097 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
11101 do_iwmmxt_wmerge (void)
11103 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11104 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11105 inst
.instruction
|= inst
.operands
[2].reg
;
11106 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
11110 do_iwmmxt_wmov (void)
11112 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
11113 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11114 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11115 inst
.instruction
|= inst
.operands
[1].reg
;
11119 do_iwmmxt_wldstbh (void)
11122 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11124 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
11126 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
11127 encode_arm_cp_address (1, true, false, reloc
);
11131 do_iwmmxt_wldstw (void)
11133 /* RIWR_RIWC clears .isreg for a control register. */
11134 if (!inst
.operands
[0].isreg
)
11136 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
11137 inst
.instruction
|= 0xf0000000;
11140 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11141 encode_arm_cp_address (1, true, true, 0);
11145 do_iwmmxt_wldstd (void)
11147 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11148 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
11149 && inst
.operands
[1].immisreg
)
11151 inst
.instruction
&= ~0x1a000ff;
11152 inst
.instruction
|= (0xfU
<< 28);
11153 if (inst
.operands
[1].preind
)
11154 inst
.instruction
|= PRE_INDEX
;
11155 if (!inst
.operands
[1].negative
)
11156 inst
.instruction
|= INDEX_UP
;
11157 if (inst
.operands
[1].writeback
)
11158 inst
.instruction
|= WRITE_BACK
;
11159 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11160 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
11161 inst
.instruction
|= inst
.operands
[1].imm
;
11164 encode_arm_cp_address (1, true, false, 0);
11168 do_iwmmxt_wshufh (void)
11170 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11171 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11172 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
11173 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
11177 do_iwmmxt_wzero (void)
11179 /* WZERO reg is an alias for WANDN reg, reg, reg. */
11180 inst
.instruction
|= inst
.operands
[0].reg
;
11181 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11182 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
11186 do_iwmmxt_wrwrwr_or_imm5 (void)
11188 if (inst
.operands
[2].isreg
)
11191 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
11192 _("immediate operand requires iWMMXt2"));
11194 if (inst
.operands
[2].imm
== 0)
11196 switch ((inst
.instruction
>> 20) & 0xf)
11202 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
11203 inst
.operands
[2].imm
= 16;
11204 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
11210 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
11211 inst
.operands
[2].imm
= 32;
11212 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
11219 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
11221 wrn
= (inst
.instruction
>> 16) & 0xf;
11222 inst
.instruction
&= 0xff0fff0f;
11223 inst
.instruction
|= wrn
;
11224 /* Bail out here; the instruction is now assembled. */
11229 /* Map 32 -> 0, etc. */
11230 inst
.operands
[2].imm
&= 0x1f;
11231 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
11235 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
11236 operations first, then control, shift, and load/store. */
11238 /* Insns like "foo X,Y,Z". */
11241 do_mav_triple (void)
11243 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
11244 inst
.instruction
|= inst
.operands
[1].reg
;
11245 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
11248 /* Insns like "foo W,X,Y,Z".
11249 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
11254 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
11255 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11256 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11257 inst
.instruction
|= inst
.operands
[3].reg
;
11260 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
11262 do_mav_dspsc (void)
11264 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11267 /* Maverick shift immediate instructions.
11268 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
11269 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
11272 do_mav_shift (void)
11274 int imm
= inst
.operands
[2].imm
;
11276 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11277 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11279 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
11280 Bits 5-7 of the insn should have bits 4-6 of the immediate.
11281 Bit 4 should be 0. */
11282 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
11284 inst
.instruction
|= imm
;
11287 /* XScale instructions. Also sorted arithmetic before move. */
11289 /* Xscale multiply-accumulate (argument parse)
11292 MIAxycc acc0,Rm,Rs. */
11297 inst
.instruction
|= inst
.operands
[1].reg
;
11298 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
11301 /* Xscale move-accumulator-register (argument parse)
11303 MARcc acc0,RdLo,RdHi. */
11308 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11309 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11312 /* Xscale move-register-accumulator (argument parse)
11314 MRAcc RdLo,RdHi,acc0. */
11319 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
11320 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11321 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11324 /* Encoding functions relevant only to Thumb. */
11326 /* inst.operands[i] is a shifted-register operand; encode
11327 it into inst.instruction in the format used by Thumb32. */
11330 encode_thumb32_shifted_operand (int i
)
11332 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
11333 unsigned int shift
= inst
.operands
[i
].shift_kind
;
11335 constraint (inst
.operands
[i
].immisreg
,
11336 _("shift by register not allowed in thumb mode"));
11337 inst
.instruction
|= inst
.operands
[i
].reg
;
11338 if (shift
== SHIFT_RRX
)
11339 inst
.instruction
|= SHIFT_ROR
<< 4;
11342 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
11343 _("expression too complex"));
11345 constraint (value
> 32
11346 || (value
== 32 && (shift
== SHIFT_LSL
11347 || shift
== SHIFT_ROR
)),
11348 _("shift expression is too large"));
11352 else if (value
== 32)
11355 inst
.instruction
|= shift
<< 4;
11356 inst
.instruction
|= (value
& 0x1c) << 10;
11357 inst
.instruction
|= (value
& 0x03) << 6;
11362 /* inst.operands[i] was set up by parse_address. Encode it into a
11363 Thumb32 format load or store instruction. Reject forms that cannot
11364 be used with such instructions. If is_t is true, reject forms that
11365 cannot be used with a T instruction; if is_d is true, reject forms
11366 that cannot be used with a D instruction. If it is a store insn,
11367 reject PC in Rn. */
11370 encode_thumb32_addr_mode (int i
, bool is_t
, bool is_d
)
11372 const bool is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
11374 constraint (!inst
.operands
[i
].isreg
,
11375 _("Instruction does not support =N addresses"));
11377 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
11378 if (inst
.operands
[i
].immisreg
)
11380 constraint (is_pc
, BAD_PC_ADDRESSING
);
11381 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
11382 constraint (inst
.operands
[i
].negative
,
11383 _("Thumb does not support negative register indexing"));
11384 constraint (inst
.operands
[i
].postind
,
11385 _("Thumb does not support register post-indexing"));
11386 constraint (inst
.operands
[i
].writeback
,
11387 _("Thumb does not support register indexing with writeback"));
11388 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
11389 _("Thumb supports only LSL in shifted register indexing"));
11391 inst
.instruction
|= inst
.operands
[i
].imm
;
11392 if (inst
.operands
[i
].shifted
)
11394 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
11395 _("expression too complex"));
11396 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
11397 || inst
.relocs
[0].exp
.X_add_number
> 3,
11398 _("shift out of range"));
11399 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
11401 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
11403 else if (inst
.operands
[i
].preind
)
11405 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
11406 constraint (is_t
&& inst
.operands
[i
].writeback
,
11407 _("cannot use writeback with this instruction"));
11408 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
11409 BAD_PC_ADDRESSING
);
11413 inst
.instruction
|= 0x01000000;
11414 if (inst
.operands
[i
].writeback
)
11415 inst
.instruction
|= 0x00200000;
11419 inst
.instruction
|= 0x00000c00;
11420 if (inst
.operands
[i
].writeback
)
11421 inst
.instruction
|= 0x00000100;
11423 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
11425 else if (inst
.operands
[i
].postind
)
11427 gas_assert (inst
.operands
[i
].writeback
);
11428 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
11429 constraint (is_t
, _("cannot use post-indexing with this instruction"));
11432 inst
.instruction
|= 0x00200000;
11434 inst
.instruction
|= 0x00000900;
11435 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
11437 else /* unindexed - only for coprocessor */
11438 inst
.error
= _("instruction does not accept unindexed addressing");
11441 /* Table of Thumb instructions which exist in 16- and/or 32-bit
11442 encodings (the latter only in post-V6T2 cores). The index is the
11443 value used in the insns table below. When there is more than one
11444 possible 16-bit encoding for the instruction, this table always
11446 Also contains several pseudo-instructions used during relaxation. */
11447 #define T16_32_TAB \
11448 X(_adc, 4140, eb400000), \
11449 X(_adcs, 4140, eb500000), \
11450 X(_add, 1c00, eb000000), \
11451 X(_adds, 1c00, eb100000), \
11452 X(_addi, 0000, f1000000), \
11453 X(_addis, 0000, f1100000), \
11454 X(_add_pc,000f, f20f0000), \
11455 X(_add_sp,000d, f10d0000), \
11456 X(_adr, 000f, f20f0000), \
11457 X(_and, 4000, ea000000), \
11458 X(_ands, 4000, ea100000), \
11459 X(_asr, 1000, fa40f000), \
11460 X(_asrs, 1000, fa50f000), \
11461 X(_aut, 0000, f3af802d), \
11462 X(_autg, 0000, fb500f00), \
11463 X(_b, e000, f000b000), \
11464 X(_bcond, d000, f0008000), \
11465 X(_bf, 0000, f040e001), \
11466 X(_bfcsel,0000, f000e001), \
11467 X(_bfx, 0000, f060e001), \
11468 X(_bfl, 0000, f000c001), \
11469 X(_bflx, 0000, f070e001), \
11470 X(_bic, 4380, ea200000), \
11471 X(_bics, 4380, ea300000), \
11472 X(_bxaut, 0000, fb500f10), \
11473 X(_cinc, 0000, ea509000), \
11474 X(_cinv, 0000, ea50a000), \
11475 X(_cmn, 42c0, eb100f00), \
11476 X(_cmp, 2800, ebb00f00), \
11477 X(_cneg, 0000, ea50b000), \
11478 X(_cpsie, b660, f3af8400), \
11479 X(_cpsid, b670, f3af8600), \
11480 X(_cpy, 4600, ea4f0000), \
11481 X(_csel, 0000, ea508000), \
11482 X(_cset, 0000, ea5f900f), \
11483 X(_csetm, 0000, ea5fa00f), \
11484 X(_csinc, 0000, ea509000), \
11485 X(_csinv, 0000, ea50a000), \
11486 X(_csneg, 0000, ea50b000), \
11487 X(_dec_sp,80dd, f1ad0d00), \
11488 X(_dls, 0000, f040e001), \
11489 X(_dlstp, 0000, f000e001), \
11490 X(_eor, 4040, ea800000), \
11491 X(_eors, 4040, ea900000), \
11492 X(_inc_sp,00dd, f10d0d00), \
11493 X(_lctp, 0000, f00fe001), \
11494 X(_ldmia, c800, e8900000), \
11495 X(_ldr, 6800, f8500000), \
11496 X(_ldrb, 7800, f8100000), \
11497 X(_ldrh, 8800, f8300000), \
11498 X(_ldrsb, 5600, f9100000), \
11499 X(_ldrsh, 5e00, f9300000), \
11500 X(_ldr_pc,4800, f85f0000), \
11501 X(_ldr_pc2,4800, f85f0000), \
11502 X(_ldr_sp,9800, f85d0000), \
11503 X(_le, 0000, f00fc001), \
11504 X(_letp, 0000, f01fc001), \
11505 X(_lsl, 0000, fa00f000), \
11506 X(_lsls, 0000, fa10f000), \
11507 X(_lsr, 0800, fa20f000), \
11508 X(_lsrs, 0800, fa30f000), \
11509 X(_mov, 2000, ea4f0000), \
11510 X(_movs, 2000, ea5f0000), \
11511 X(_mul, 4340, fb00f000), \
11512 X(_muls, 4340, ffffffff), /* no 32b muls */ \
11513 X(_mvn, 43c0, ea6f0000), \
11514 X(_mvns, 43c0, ea7f0000), \
11515 X(_neg, 4240, f1c00000), /* rsb #0 */ \
11516 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
11517 X(_orr, 4300, ea400000), \
11518 X(_orrs, 4300, ea500000), \
11519 X(_pac, 0000, f3af801d), \
11520 X(_pacbti, 0000, f3af800d), \
11521 X(_pacg, 0000, fb60f000), \
11522 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
11523 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
11524 X(_rev, ba00, fa90f080), \
11525 X(_rev16, ba40, fa90f090), \
11526 X(_revsh, bac0, fa90f0b0), \
11527 X(_ror, 41c0, fa60f000), \
11528 X(_rors, 41c0, fa70f000), \
11529 X(_sbc, 4180, eb600000), \
11530 X(_sbcs, 4180, eb700000), \
11531 X(_stmia, c000, e8800000), \
11532 X(_str, 6000, f8400000), \
11533 X(_strb, 7000, f8000000), \
11534 X(_strh, 8000, f8200000), \
11535 X(_str_sp,9000, f84d0000), \
11536 X(_sub, 1e00, eba00000), \
11537 X(_subs, 1e00, ebb00000), \
11538 X(_subi, 8000, f1a00000), \
11539 X(_subis, 8000, f1b00000), \
11540 X(_sxtb, b240, fa4ff080), \
11541 X(_sxth, b200, fa0ff080), \
11542 X(_tst, 4200, ea100f00), \
11543 X(_uxtb, b2c0, fa5ff080), \
11544 X(_uxth, b280, fa1ff080), \
11545 X(_nop, bf00, f3af8000), \
11546 X(_yield, bf10, f3af8001), \
11547 X(_wfe, bf20, f3af8002), \
11548 X(_wfi, bf30, f3af8003), \
11549 X(_wls, 0000, f040c001), \
11550 X(_wlstp, 0000, f000c001), \
11551 X(_sev, bf40, f3af8004), \
11552 X(_sevl, bf50, f3af8005), \
11553 X(_udf, de00, f7f0a000)
11555 /* To catch errors in encoding functions, the codes are all offset by
11556 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
11557 as 16-bit instructions. */
11558 #define X(a,b,c) T_MNEM##a
11559 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
11562 #define X(a,b,c) 0x##b
11563 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
11564 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
11567 #define X(a,b,c) 0x##c
11568 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
11569 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
11570 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
11574 /* Thumb instruction encoders, in alphabetical order. */
11576 /* ADDW or SUBW. */
11579 do_t_add_sub_w (void)
11583 Rd
= inst
.operands
[0].reg
;
11584 Rn
= inst
.operands
[1].reg
;
11586 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
11587 is the SP-{plus,minus}-immediate form of the instruction. */
11589 constraint (Rd
== REG_PC
, BAD_PC
);
11591 reject_bad_reg (Rd
);
11593 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
11594 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
11597 /* Parse an add or subtract instruction. We get here with inst.instruction
11598 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
11601 do_t_add_sub (void)
11605 Rd
= inst
.operands
[0].reg
;
11606 Rs
= (inst
.operands
[1].present
11607 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11608 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11611 set_pred_insn_type_last ();
11613 if (unified_syntax
)
11619 flags
= (inst
.instruction
== T_MNEM_adds
11620 || inst
.instruction
== T_MNEM_subs
);
11622 narrow
= !in_pred_block ();
11624 narrow
= in_pred_block ();
11625 if (!inst
.operands
[2].isreg
)
11629 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
11630 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
11632 add
= (inst
.instruction
== T_MNEM_add
11633 || inst
.instruction
== T_MNEM_adds
);
11635 if (inst
.size_req
!= 4)
11637 /* Attempt to use a narrow opcode, with relaxation if
11639 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
11640 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
11641 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
11642 opcode
= T_MNEM_add_sp
;
11643 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
11644 opcode
= T_MNEM_add_pc
;
11645 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
11648 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
11650 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
11654 inst
.instruction
= THUMB_OP16(opcode
);
11655 inst
.instruction
|= (Rd
<< 4) | Rs
;
11656 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11657 || (inst
.relocs
[0].type
11658 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
))
11660 if (inst
.size_req
== 2)
11661 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11663 inst
.relax
= opcode
;
11667 constraint (inst
.size_req
== 2, _("cannot honor width suffix"));
11669 if (inst
.size_req
== 4
11670 || (inst
.size_req
!= 2 && !opcode
))
11672 constraint ((inst
.relocs
[0].type
11673 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
11674 && (inst
.relocs
[0].type
11675 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
11676 THUMB1_RELOC_ONLY
);
11679 constraint (add
, BAD_PC
);
11680 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
11681 _("only SUBS PC, LR, #const allowed"));
11682 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
11683 _("expression too complex"));
11684 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
11685 || inst
.relocs
[0].exp
.X_add_number
> 0xff,
11686 _("immediate value out of range"));
11687 inst
.instruction
= T2_SUBS_PC_LR
11688 | inst
.relocs
[0].exp
.X_add_number
;
11689 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
11692 else if (Rs
== REG_PC
)
11694 /* Always use addw/subw. */
11695 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
11696 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
11700 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11701 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
11704 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11706 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_IMM
;
11708 inst
.instruction
|= Rd
<< 8;
11709 inst
.instruction
|= Rs
<< 16;
11714 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
11715 unsigned int shift
= inst
.operands
[2].shift_kind
;
11717 Rn
= inst
.operands
[2].reg
;
11718 /* See if we can do this with a 16-bit instruction. */
11719 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
11721 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
11726 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
11727 || inst
.instruction
== T_MNEM_add
)
11729 : T_OPCODE_SUB_R3
);
11730 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
11734 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
11736 /* Thumb-1 cores (except v6-M) require at least one high
11737 register in a narrow non flag setting add. */
11738 if (Rd
> 7 || Rn
> 7
11739 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
11740 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
11747 inst
.instruction
= T_OPCODE_ADD_HI
;
11748 inst
.instruction
|= (Rd
& 8) << 4;
11749 inst
.instruction
|= (Rd
& 7);
11750 inst
.instruction
|= Rn
<< 3;
11756 constraint (Rd
== REG_PC
, BAD_PC
);
11757 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
11758 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
11759 constraint (Rs
== REG_PC
, BAD_PC
);
11760 reject_bad_reg (Rn
);
11762 /* If we get here, it can't be done in 16 bits. */
11763 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
11764 _("shift must be constant"));
11765 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11766 inst
.instruction
|= Rd
<< 8;
11767 inst
.instruction
|= Rs
<< 16;
11768 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
11769 _("shift value over 3 not allowed in thumb mode"));
11770 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
11771 _("only LSL shift allowed in thumb mode"));
11772 encode_thumb32_shifted_operand (2);
11777 constraint (inst
.instruction
== T_MNEM_adds
11778 || inst
.instruction
== T_MNEM_subs
,
11781 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
11783 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
11784 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
11787 inst
.instruction
= (inst
.instruction
== T_MNEM_add
11788 ? 0x0000 : 0x8000);
11789 inst
.instruction
|= (Rd
<< 4) | Rs
;
11790 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11794 Rn
= inst
.operands
[2].reg
;
11795 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
11797 /* We now have Rd, Rs, and Rn set to registers. */
11798 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
11800 /* Can't do this for SUB. */
11801 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
11802 inst
.instruction
= T_OPCODE_ADD_HI
;
11803 inst
.instruction
|= (Rd
& 8) << 4;
11804 inst
.instruction
|= (Rd
& 7);
11806 inst
.instruction
|= Rn
<< 3;
11808 inst
.instruction
|= Rs
<< 3;
11810 constraint (1, _("dest must overlap one source register"));
11814 inst
.instruction
= (inst
.instruction
== T_MNEM_add
11815 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
11816 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
11826 Rd
= inst
.operands
[0].reg
;
11827 reject_bad_reg (Rd
);
11829 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
11831 /* Defer to section relaxation. */
11832 inst
.relax
= inst
.instruction
;
11833 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11834 inst
.instruction
|= Rd
<< 4;
11836 else if (unified_syntax
&& inst
.size_req
!= 2)
11838 /* Generate a 32-bit opcode. */
11839 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11840 inst
.instruction
|= Rd
<< 8;
11841 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_PC12
;
11842 inst
.relocs
[0].pc_rel
= 1;
11846 /* Generate a 16-bit opcode. */
11847 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11848 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11849 inst
.relocs
[0].exp
.X_add_number
-= 4; /* PC relative adjust. */
11850 inst
.relocs
[0].pc_rel
= 1;
11851 inst
.instruction
|= Rd
<< 4;
11854 if (inst
.relocs
[0].exp
.X_op
== O_symbol
11855 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11856 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11857 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11858 inst
.relocs
[0].exp
.X_add_number
+= 1;
11861 /* Arithmetic instructions for which there is just one 16-bit
11862 instruction encoding, and it allows only two low registers.
11863 For maximal compatibility with ARM syntax, we allow three register
11864 operands even when Thumb-32 instructions are not available, as long
11865 as the first two are identical. For instance, both "sbc r0,r1" and
11866 "sbc r0,r0,r1" are allowed. */
11872 Rd
= inst
.operands
[0].reg
;
11873 Rs
= (inst
.operands
[1].present
11874 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11875 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11876 Rn
= inst
.operands
[2].reg
;
11878 reject_bad_reg (Rd
);
11879 reject_bad_reg (Rs
);
11880 if (inst
.operands
[2].isreg
)
11881 reject_bad_reg (Rn
);
11883 if (unified_syntax
)
11885 if (!inst
.operands
[2].isreg
)
11887 /* For an immediate, we always generate a 32-bit opcode;
11888 section relaxation will shrink it later if possible. */
11889 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11890 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11891 inst
.instruction
|= Rd
<< 8;
11892 inst
.instruction
|= Rs
<< 16;
11893 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11899 /* See if we can do this with a 16-bit instruction. */
11900 if (THUMB_SETS_FLAGS (inst
.instruction
))
11901 narrow
= !in_pred_block ();
11903 narrow
= in_pred_block ();
11905 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11907 if (inst
.operands
[2].shifted
)
11909 if (inst
.size_req
== 4)
11915 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11916 inst
.instruction
|= Rd
;
11917 inst
.instruction
|= Rn
<< 3;
11921 /* If we get here, it can't be done in 16 bits. */
11922 constraint (inst
.operands
[2].shifted
11923 && inst
.operands
[2].immisreg
,
11924 _("shift must be constant"));
11925 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11926 inst
.instruction
|= Rd
<< 8;
11927 inst
.instruction
|= Rs
<< 16;
11928 encode_thumb32_shifted_operand (2);
11933 /* On its face this is a lie - the instruction does set the
11934 flags. However, the only supported mnemonic in this mode
11935 says it doesn't. */
11936 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11938 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11939 _("unshifted register required"));
11940 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11941 constraint (Rd
!= Rs
,
11942 _("dest and source1 must be the same register"));
11944 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11945 inst
.instruction
|= Rd
;
11946 inst
.instruction
|= Rn
<< 3;
11950 /* Similarly, but for instructions where the arithmetic operation is
11951 commutative, so we can allow either of them to be different from
11952 the destination operand in a 16-bit instruction. For instance, all
11953 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11960 Rd
= inst
.operands
[0].reg
;
11961 Rs
= (inst
.operands
[1].present
11962 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11963 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11964 Rn
= inst
.operands
[2].reg
;
11966 reject_bad_reg (Rd
);
11967 reject_bad_reg (Rs
);
11968 if (inst
.operands
[2].isreg
)
11969 reject_bad_reg (Rn
);
11971 if (unified_syntax
)
11973 if (!inst
.operands
[2].isreg
)
11975 /* For an immediate, we always generate a 32-bit opcode;
11976 section relaxation will shrink it later if possible. */
11977 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11978 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11979 inst
.instruction
|= Rd
<< 8;
11980 inst
.instruction
|= Rs
<< 16;
11981 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11987 /* See if we can do this with a 16-bit instruction. */
11988 if (THUMB_SETS_FLAGS (inst
.instruction
))
11989 narrow
= !in_pred_block ();
11991 narrow
= in_pred_block ();
11993 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11995 if (inst
.operands
[2].shifted
)
11997 if (inst
.size_req
== 4)
12004 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12005 inst
.instruction
|= Rd
;
12006 inst
.instruction
|= Rn
<< 3;
12011 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12012 inst
.instruction
|= Rd
;
12013 inst
.instruction
|= Rs
<< 3;
12018 /* If we get here, it can't be done in 16 bits. */
12019 constraint (inst
.operands
[2].shifted
12020 && inst
.operands
[2].immisreg
,
12021 _("shift must be constant"));
12022 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12023 inst
.instruction
|= Rd
<< 8;
12024 inst
.instruction
|= Rs
<< 16;
12025 encode_thumb32_shifted_operand (2);
12030 /* On its face this is a lie - the instruction does set the
12031 flags. However, the only supported mnemonic in this mode
12032 says it doesn't. */
12033 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12035 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
12036 _("unshifted register required"));
12037 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
12039 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12040 inst
.instruction
|= Rd
;
12043 inst
.instruction
|= Rn
<< 3;
12045 inst
.instruction
|= Rs
<< 3;
12047 constraint (1, _("dest must overlap one source register"));
12055 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
12056 constraint (msb
> 32, _("bit-field extends past end of register"));
12057 /* The instruction encoding stores the LSB and MSB,
12058 not the LSB and width. */
12059 Rd
= inst
.operands
[0].reg
;
12060 reject_bad_reg (Rd
);
12061 inst
.instruction
|= Rd
<< 8;
12062 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
12063 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
12064 inst
.instruction
|= msb
- 1;
12073 Rd
= inst
.operands
[0].reg
;
12074 reject_bad_reg (Rd
);
12076 /* #0 in second position is alternative syntax for bfc, which is
12077 the same instruction but with REG_PC in the Rm field. */
12078 if (!inst
.operands
[1].isreg
)
12082 Rn
= inst
.operands
[1].reg
;
12083 reject_bad_reg (Rn
);
12086 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
12087 constraint (msb
> 32, _("bit-field extends past end of register"));
12088 /* The instruction encoding stores the LSB and MSB,
12089 not the LSB and width. */
12090 inst
.instruction
|= Rd
<< 8;
12091 inst
.instruction
|= Rn
<< 16;
12092 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
12093 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
12094 inst
.instruction
|= msb
- 1;
12102 Rd
= inst
.operands
[0].reg
;
12103 Rn
= inst
.operands
[1].reg
;
12105 reject_bad_reg (Rd
);
12106 reject_bad_reg (Rn
);
12108 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
12109 _("bit-field extends past end of register"));
12110 inst
.instruction
|= Rd
<< 8;
12111 inst
.instruction
|= Rn
<< 16;
12112 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
12113 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
12114 inst
.instruction
|= inst
.operands
[3].imm
- 1;
12117 /* ARM V5 Thumb BLX (argument parse)
12118 BLX <target_addr> which is BLX(1)
12119 BLX <Rm> which is BLX(2)
12120 Unfortunately, there are two different opcodes for this mnemonic.
12121 So, the insns[].value is not used, and the code here zaps values
12122 into inst.instruction.
12124 ??? How to take advantage of the additional two bits of displacement
12125 available in Thumb32 mode? Need new relocation? */
12130 set_pred_insn_type_last ();
12132 if (inst
.operands
[0].isreg
)
12134 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
12135 /* We have a register, so this is BLX(2). */
12136 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
12140 /* No register. This must be BLX(1). */
12141 inst
.instruction
= 0xf000e800;
12142 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
12151 bfd_reloc_code_real_type reloc
;
12154 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN
);
12156 if (in_pred_block ())
12158 /* Conditional branches inside IT blocks are encoded as unconditional
12160 cond
= COND_ALWAYS
;
12165 if (cond
!= COND_ALWAYS
)
12166 opcode
= T_MNEM_bcond
;
12168 opcode
= inst
.instruction
;
12171 && (inst
.size_req
== 4
12172 || (inst
.size_req
!= 2
12173 && (inst
.operands
[0].hasreloc
12174 || inst
.relocs
[0].exp
.X_op
== O_constant
))))
12176 inst
.instruction
= THUMB_OP32(opcode
);
12177 if (cond
== COND_ALWAYS
)
12178 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
12181 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
12182 _("selected architecture does not support "
12183 "wide conditional branch instruction"));
12185 gas_assert (cond
!= 0xF);
12186 inst
.instruction
|= cond
<< 22;
12187 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
12192 inst
.instruction
= THUMB_OP16(opcode
);
12193 if (cond
== COND_ALWAYS
)
12194 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
12197 inst
.instruction
|= cond
<< 8;
12198 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
12200 /* Allow section relaxation. */
12201 if (unified_syntax
&& inst
.size_req
!= 2)
12202 inst
.relax
= opcode
;
12204 inst
.relocs
[0].type
= reloc
;
12205 inst
.relocs
[0].pc_rel
= 1;
12208 /* Actually do the work for Thumb state bkpt and hlt. The only difference
12209 between the two is the maximum immediate allowed - which is passed in
12212 do_t_bkpt_hlt1 (int range
)
12214 constraint (inst
.cond
!= COND_ALWAYS
,
12215 _("instruction is always unconditional"));
12216 if (inst
.operands
[0].present
)
12218 constraint (inst
.operands
[0].imm
> range
,
12219 _("immediate value out of range"));
12220 inst
.instruction
|= inst
.operands
[0].imm
;
12223 set_pred_insn_type (NEUTRAL_IT_INSN
);
12229 do_t_bkpt_hlt1 (63);
12235 do_t_bkpt_hlt1 (255);
12239 do_t_branch23 (void)
12241 set_pred_insn_type_last ();
12242 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
12244 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
12245 this file. We used to simply ignore the PLT reloc type here --
12246 the branch encoding is now needed to deal with TLSCALL relocs.
12247 So if we see a PLT reloc now, put it back to how it used to be to
12248 keep the preexisting behaviour. */
12249 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_PLT32
)
12250 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
12252 #if defined(OBJ_COFF)
12253 /* If the destination of the branch is a defined symbol which does not have
12254 the THUMB_FUNC attribute, then we must be calling a function which has
12255 the (interfacearm) attribute. We look for the Thumb entry point to that
12256 function and change the branch to refer to that function instead. */
12257 if ( inst
.relocs
[0].exp
.X_op
== O_symbol
12258 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
12259 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
12260 && ! THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
12261 inst
.relocs
[0].exp
.X_add_symbol
12262 = find_real_start (inst
.relocs
[0].exp
.X_add_symbol
);
12269 set_pred_insn_type_last ();
12270 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
12271 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
12272 should cause the alignment to be checked once it is known. This is
12273 because BX PC only works if the instruction is word aligned. */
12281 set_pred_insn_type_last ();
12282 Rm
= inst
.operands
[0].reg
;
12283 reject_bad_reg (Rm
);
12284 inst
.instruction
|= Rm
<< 16;
12293 Rd
= inst
.operands
[0].reg
;
12294 Rm
= inst
.operands
[1].reg
;
12296 reject_bad_reg (Rd
);
12297 reject_bad_reg (Rm
);
12299 inst
.instruction
|= Rd
<< 8;
12300 inst
.instruction
|= Rm
<< 16;
12301 inst
.instruction
|= Rm
;
12304 /* For the Armv8.1-M conditional instructions. */
12308 unsigned Rd
, Rn
, Rm
;
12311 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
12313 Rd
= inst
.operands
[0].reg
;
12314 switch (inst
.instruction
)
12320 Rn
= inst
.operands
[1].reg
;
12321 Rm
= inst
.operands
[2].reg
;
12322 cond
= inst
.operands
[3].imm
;
12323 constraint (Rn
== REG_SP
, BAD_SP
);
12324 constraint (Rm
== REG_SP
, BAD_SP
);
12330 Rn
= inst
.operands
[1].reg
;
12331 cond
= inst
.operands
[2].imm
;
12332 /* Invert the last bit to invert the cond. */
12333 cond
= TOGGLE_BIT (cond
, 0);
12334 constraint (Rn
== REG_SP
, BAD_SP
);
12340 cond
= inst
.operands
[1].imm
;
12341 /* Invert the last bit to invert the cond. */
12342 cond
= TOGGLE_BIT (cond
, 0);
12350 set_pred_insn_type (OUTSIDE_PRED_INSN
);
12351 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12352 inst
.instruction
|= Rd
<< 8;
12353 inst
.instruction
|= Rn
<< 16;
12354 inst
.instruction
|= Rm
;
12355 inst
.instruction
|= cond
<< 4;
12361 set_pred_insn_type (OUTSIDE_PRED_INSN
);
12367 set_pred_insn_type (OUTSIDE_PRED_INSN
);
12368 inst
.instruction
|= inst
.operands
[0].imm
;
12374 set_pred_insn_type (OUTSIDE_PRED_INSN
);
12376 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
12377 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
12379 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
12380 inst
.instruction
= 0xf3af8000;
12381 inst
.instruction
|= imod
<< 9;
12382 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
12383 if (inst
.operands
[1].present
)
12384 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
12388 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
12389 && (inst
.operands
[0].imm
& 4),
12390 _("selected processor does not support 'A' form "
12391 "of this instruction"));
12392 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
12393 _("Thumb does not support the 2-argument "
12394 "form of this instruction"));
12395 inst
.instruction
|= inst
.operands
[0].imm
;
12399 /* THUMB CPY instruction (argument parse). */
12404 if (inst
.size_req
== 4)
12406 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
12407 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12408 inst
.instruction
|= inst
.operands
[1].reg
;
12412 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
12413 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
12414 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12421 set_pred_insn_type (OUTSIDE_PRED_INSN
);
12422 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
12423 inst
.instruction
|= inst
.operands
[0].reg
;
12424 inst
.relocs
[0].pc_rel
= 1;
12425 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
12431 inst
.instruction
|= inst
.operands
[0].imm
;
12437 unsigned Rd
, Rn
, Rm
;
12439 Rd
= inst
.operands
[0].reg
;
12440 Rn
= (inst
.operands
[1].present
12441 ? inst
.operands
[1].reg
: Rd
);
12442 Rm
= inst
.operands
[2].reg
;
12444 reject_bad_reg (Rd
);
12445 reject_bad_reg (Rn
);
12446 reject_bad_reg (Rm
);
12448 inst
.instruction
|= Rd
<< 8;
12449 inst
.instruction
|= Rn
<< 16;
12450 inst
.instruction
|= Rm
;
12456 if (unified_syntax
&& inst
.size_req
== 4)
12457 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12459 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12465 unsigned int cond
= inst
.operands
[0].imm
;
12467 set_pred_insn_type (IT_INSN
);
12468 now_pred
.mask
= (inst
.instruction
& 0xf) | 0x10;
12469 now_pred
.cc
= cond
;
12470 now_pred
.warn_deprecated
= false;
12471 now_pred
.type
= SCALAR_PRED
;
12473 /* If the condition is a negative condition, invert the mask. */
12474 if ((cond
& 0x1) == 0x0)
12476 unsigned int mask
= inst
.instruction
& 0x000f;
12478 if ((mask
& 0x7) == 0)
12480 /* No conversion needed. */
12481 now_pred
.block_length
= 1;
12483 else if ((mask
& 0x3) == 0)
12486 now_pred
.block_length
= 2;
12488 else if ((mask
& 0x1) == 0)
12491 now_pred
.block_length
= 3;
12496 now_pred
.block_length
= 4;
12499 inst
.instruction
&= 0xfff0;
12500 inst
.instruction
|= mask
;
12503 inst
.instruction
|= cond
<< 4;
12506 /* Helper function used for both push/pop and ldm/stm. */
12508 encode_thumb2_multi (bool do_io
, int base
, unsigned mask
,
12513 gas_assert (base
!= -1 || !do_io
);
12514 load
= do_io
&& ((inst
.instruction
& (1 << 20)) != 0);
12515 store
= do_io
&& !load
;
12517 if (mask
& (1 << 13))
12518 inst
.error
= _("SP not allowed in register list");
12520 if (do_io
&& (mask
& (1 << base
)) != 0
12522 inst
.error
= _("having the base register in the register list when "
12523 "using write back is UNPREDICTABLE");
12527 if (mask
& (1 << 15))
12529 if (mask
& (1 << 14))
12530 inst
.error
= _("LR and PC should not both be in register list");
12532 set_pred_insn_type_last ();
12537 if (mask
& (1 << 15))
12538 inst
.error
= _("PC not allowed in register list");
12541 if (do_io
&& ((mask
& (mask
- 1)) == 0))
12543 /* Single register transfers implemented as str/ldr. */
12546 if (inst
.instruction
& (1 << 23))
12547 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
12549 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
12553 if (inst
.instruction
& (1 << 23))
12554 inst
.instruction
= 0x00800000; /* ia -> [base] */
12556 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
12559 inst
.instruction
|= 0xf8400000;
12561 inst
.instruction
|= 0x00100000;
12563 mask
= ffs (mask
) - 1;
12566 else if (writeback
)
12567 inst
.instruction
|= WRITE_BACK
;
12569 inst
.instruction
|= mask
;
12571 inst
.instruction
|= base
<< 16;
12577 /* This really doesn't seem worth it. */
12578 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
12579 _("expression too complex"));
12580 constraint (inst
.operands
[1].writeback
,
12581 _("Thumb load/store multiple does not support {reglist}^"));
12583 if (unified_syntax
)
12589 /* See if we can use a 16-bit instruction. */
12590 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
12591 && inst
.size_req
!= 4
12592 && !(inst
.operands
[1].imm
& ~0xff))
12594 mask
= 1 << inst
.operands
[0].reg
;
12596 if (inst
.operands
[0].reg
<= 7)
12598 if (inst
.instruction
== T_MNEM_stmia
12599 ? inst
.operands
[0].writeback
12600 : (inst
.operands
[0].writeback
12601 == !(inst
.operands
[1].imm
& mask
)))
12603 if (inst
.instruction
== T_MNEM_stmia
12604 && (inst
.operands
[1].imm
& mask
)
12605 && (inst
.operands
[1].imm
& (mask
- 1)))
12606 as_warn (_("value stored for r%d is UNKNOWN"),
12607 inst
.operands
[0].reg
);
12609 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12610 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12611 inst
.instruction
|= inst
.operands
[1].imm
;
12614 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
12616 /* This means 1 register in reg list one of 3 situations:
12617 1. Instruction is stmia, but without writeback.
12618 2. lmdia without writeback, but with Rn not in
12620 3. ldmia with writeback, but with Rn in reglist.
12621 Case 3 is UNPREDICTABLE behaviour, so we handle
12622 case 1 and 2 which can be converted into a 16-bit
12623 str or ldr. The SP cases are handled below. */
12624 unsigned long opcode
;
12625 /* First, record an error for Case 3. */
12626 if (inst
.operands
[1].imm
& mask
12627 && inst
.operands
[0].writeback
)
12629 _("having the base register in the register list when "
12630 "using write back is UNPREDICTABLE");
12632 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
12634 inst
.instruction
= THUMB_OP16 (opcode
);
12635 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
12636 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
12640 else if (inst
.operands
[0] .reg
== REG_SP
)
12642 if (inst
.operands
[0].writeback
)
12645 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
12646 ? T_MNEM_push
: T_MNEM_pop
);
12647 inst
.instruction
|= inst
.operands
[1].imm
;
12650 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
12653 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
12654 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
12655 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
12663 if (inst
.instruction
< 0xffff)
12664 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12666 encode_thumb2_multi (true /* do_io */, inst
.operands
[0].reg
,
12667 inst
.operands
[1].imm
,
12668 inst
.operands
[0].writeback
);
12673 constraint (inst
.operands
[0].reg
> 7
12674 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
12675 constraint (inst
.instruction
!= T_MNEM_ldmia
12676 && inst
.instruction
!= T_MNEM_stmia
,
12677 _("Thumb-2 instruction only valid in unified syntax"));
12678 if (inst
.instruction
== T_MNEM_stmia
)
12680 if (!inst
.operands
[0].writeback
)
12681 as_warn (_("this instruction will write back the base register"));
12682 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
12683 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
12684 as_warn (_("value stored for r%d is UNKNOWN"),
12685 inst
.operands
[0].reg
);
12689 if (!inst
.operands
[0].writeback
12690 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
12691 as_warn (_("this instruction will write back the base register"));
12692 else if (inst
.operands
[0].writeback
12693 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
12694 as_warn (_("this instruction will not write back the base register"));
12697 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12698 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12699 inst
.instruction
|= inst
.operands
[1].imm
;
12706 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
12707 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
12708 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
12709 || inst
.operands
[1].negative
,
12712 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
12714 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12715 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12716 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
12722 if (!inst
.operands
[1].present
)
12724 constraint (inst
.operands
[0].reg
== REG_LR
,
12725 _("r14 not allowed as first register "
12726 "when second register is omitted"));
12727 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
12729 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
12732 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12733 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
12734 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
12740 unsigned long opcode
;
12743 if (inst
.operands
[0].isreg
12744 && !inst
.operands
[0].preind
12745 && inst
.operands
[0].reg
== REG_PC
)
12746 set_pred_insn_type_last ();
12748 opcode
= inst
.instruction
;
12749 if (unified_syntax
)
12751 if (!inst
.operands
[1].isreg
)
12753 if (opcode
<= 0xffff)
12754 inst
.instruction
= THUMB_OP32 (opcode
);
12755 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/false))
12758 if (inst
.operands
[1].isreg
12759 && !inst
.operands
[1].writeback
12760 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
12761 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
12762 && opcode
<= 0xffff
12763 && inst
.size_req
!= 4)
12765 /* Insn may have a 16-bit form. */
12766 Rn
= inst
.operands
[1].reg
;
12767 if (inst
.operands
[1].immisreg
)
12769 inst
.instruction
= THUMB_OP16 (opcode
);
12771 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
12773 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
12774 reject_bad_reg (inst
.operands
[1].imm
);
12776 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
12777 && opcode
!= T_MNEM_ldrsb
)
12778 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
12779 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
12786 if (inst
.relocs
[0].pc_rel
)
12787 opcode
= T_MNEM_ldr_pc2
;
12789 opcode
= T_MNEM_ldr_pc
;
12793 if (opcode
== T_MNEM_ldr
)
12794 opcode
= T_MNEM_ldr_sp
;
12796 opcode
= T_MNEM_str_sp
;
12798 inst
.instruction
= inst
.operands
[0].reg
<< 8;
12802 inst
.instruction
= inst
.operands
[0].reg
;
12803 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12805 inst
.instruction
|= THUMB_OP16 (opcode
);
12806 if (inst
.size_req
== 2)
12807 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12809 inst
.relax
= opcode
;
12813 /* Definitely a 32-bit variant. */
12815 /* Warning for Erratum 752419. */
12816 if (opcode
== T_MNEM_ldr
12817 && inst
.operands
[0].reg
== REG_SP
12818 && inst
.operands
[1].writeback
== 1
12819 && !inst
.operands
[1].immisreg
)
12821 if (no_cpu_selected ()
12822 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
12823 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
12824 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
12825 as_warn (_("This instruction may be unpredictable "
12826 "if executed on M-profile cores "
12827 "with interrupts enabled."));
12830 /* Do some validations regarding addressing modes. */
12831 if (inst
.operands
[1].immisreg
)
12832 reject_bad_reg (inst
.operands
[1].imm
);
12834 constraint (inst
.operands
[1].writeback
== 1
12835 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
12838 inst
.instruction
= THUMB_OP32 (opcode
);
12839 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12840 encode_thumb32_addr_mode (1, /*is_t=*/false, /*is_d=*/false);
12841 check_ldr_r15_aligned ();
12845 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
12847 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
12849 /* Only [Rn,Rm] is acceptable. */
12850 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
12851 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
12852 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
12853 || inst
.operands
[1].negative
,
12854 _("Thumb does not support this addressing mode"));
12855 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12859 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12860 if (!inst
.operands
[1].isreg
)
12861 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/false))
12864 constraint (!inst
.operands
[1].preind
12865 || inst
.operands
[1].shifted
12866 || inst
.operands
[1].writeback
,
12867 _("Thumb does not support this addressing mode"));
12868 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
12870 constraint (inst
.instruction
& 0x0600,
12871 _("byte or halfword not valid for base register"));
12872 constraint (inst
.operands
[1].reg
== REG_PC
12873 && !(inst
.instruction
& THUMB_LOAD_BIT
),
12874 _("r15 based store not allowed"));
12875 constraint (inst
.operands
[1].immisreg
,
12876 _("invalid base register for register offset"));
12878 if (inst
.operands
[1].reg
== REG_PC
)
12879 inst
.instruction
= T_OPCODE_LDR_PC
;
12880 else if (inst
.instruction
& THUMB_LOAD_BIT
)
12881 inst
.instruction
= T_OPCODE_LDR_SP
;
12883 inst
.instruction
= T_OPCODE_STR_SP
;
12885 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12886 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12890 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
12891 if (!inst
.operands
[1].immisreg
)
12893 /* Immediate offset. */
12894 inst
.instruction
|= inst
.operands
[0].reg
;
12895 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12896 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12900 /* Register offset. */
12901 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
12902 constraint (inst
.operands
[1].negative
,
12903 _("Thumb does not support this addressing mode"));
12906 switch (inst
.instruction
)
12908 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
12909 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
12910 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
12911 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
12912 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
12913 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
12914 case 0x5600 /* ldrsb */:
12915 case 0x5e00 /* ldrsh */: break;
12919 inst
.instruction
|= inst
.operands
[0].reg
;
12920 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12921 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
12927 if (!inst
.operands
[1].present
)
12929 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
12930 constraint (inst
.operands
[0].reg
== REG_LR
,
12931 _("r14 not allowed here"));
12932 constraint (inst
.operands
[0].reg
== REG_R12
,
12933 _("r12 not allowed here"));
12936 if (inst
.operands
[2].writeback
12937 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
12938 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
12939 as_warn (_("base register written back, and overlaps "
12940 "one of transfer registers"));
12942 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12943 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
12944 encode_thumb32_addr_mode (2, /*is_t=*/false, /*is_d=*/true);
12950 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12951 encode_thumb32_addr_mode (1, /*is_t=*/true, /*is_d=*/false);
12957 unsigned Rd
, Rn
, Rm
, Ra
;
12959 Rd
= inst
.operands
[0].reg
;
12960 Rn
= inst
.operands
[1].reg
;
12961 Rm
= inst
.operands
[2].reg
;
12962 Ra
= inst
.operands
[3].reg
;
12964 reject_bad_reg (Rd
);
12965 reject_bad_reg (Rn
);
12966 reject_bad_reg (Rm
);
12967 reject_bad_reg (Ra
);
12969 inst
.instruction
|= Rd
<< 8;
12970 inst
.instruction
|= Rn
<< 16;
12971 inst
.instruction
|= Rm
;
12972 inst
.instruction
|= Ra
<< 12;
12978 unsigned RdLo
, RdHi
, Rn
, Rm
;
12980 RdLo
= inst
.operands
[0].reg
;
12981 RdHi
= inst
.operands
[1].reg
;
12982 Rn
= inst
.operands
[2].reg
;
12983 Rm
= inst
.operands
[3].reg
;
12985 reject_bad_reg (RdLo
);
12986 reject_bad_reg (RdHi
);
12987 reject_bad_reg (Rn
);
12988 reject_bad_reg (Rm
);
12990 inst
.instruction
|= RdLo
<< 12;
12991 inst
.instruction
|= RdHi
<< 8;
12992 inst
.instruction
|= Rn
<< 16;
12993 inst
.instruction
|= Rm
;
12997 do_t_mov_cmp (void)
13001 Rn
= inst
.operands
[0].reg
;
13002 Rm
= inst
.operands
[1].reg
;
13005 set_pred_insn_type_last ();
13007 if (unified_syntax
)
13009 int r0off
= (inst
.instruction
== T_MNEM_mov
13010 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
13011 unsigned long opcode
;
13015 low_regs
= (Rn
<= 7 && Rm
<= 7);
13016 opcode
= inst
.instruction
;
13017 if (in_pred_block ())
13018 narrow
= opcode
!= T_MNEM_movs
;
13020 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
13021 if (inst
.size_req
== 4
13022 || inst
.operands
[1].shifted
)
13025 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
13026 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
13027 && !inst
.operands
[1].shifted
13031 inst
.instruction
= T2_SUBS_PC_LR
;
13035 if (opcode
== T_MNEM_cmp
)
13037 constraint (Rn
== REG_PC
, BAD_PC
);
13040 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
13042 warn_deprecated_sp (Rm
);
13043 /* R15 was documented as a valid choice for Rm in ARMv6,
13044 but as UNPREDICTABLE in ARMv7. ARM's proprietary
13045 tools reject R15, so we do too. */
13046 constraint (Rm
== REG_PC
, BAD_PC
);
13049 reject_bad_reg (Rm
);
13051 else if (opcode
== T_MNEM_mov
13052 || opcode
== T_MNEM_movs
)
13054 if (inst
.operands
[1].isreg
)
13056 if (opcode
== T_MNEM_movs
)
13058 reject_bad_reg (Rn
);
13059 reject_bad_reg (Rm
);
13063 /* This is mov.n. */
13064 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
13065 && (Rm
== REG_SP
|| Rm
== REG_PC
))
13067 as_tsktsk (_("Use of r%u as a source register is "
13068 "deprecated when r%u is the destination "
13069 "register."), Rm
, Rn
);
13074 /* This is mov.w. */
13075 constraint (Rn
== REG_PC
, BAD_PC
);
13076 constraint (Rm
== REG_PC
, BAD_PC
);
13077 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13078 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
13082 reject_bad_reg (Rn
);
13085 if (!inst
.operands
[1].isreg
)
13087 /* Immediate operand. */
13088 if (!in_pred_block () && opcode
== T_MNEM_mov
)
13090 if (low_regs
&& narrow
)
13092 inst
.instruction
= THUMB_OP16 (opcode
);
13093 inst
.instruction
|= Rn
<< 8;
13094 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
13095 || inst
.relocs
[0].type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
13097 if (inst
.size_req
== 2)
13098 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
13100 inst
.relax
= opcode
;
13105 constraint ((inst
.relocs
[0].type
13106 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
13107 && (inst
.relocs
[0].type
13108 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
13109 THUMB1_RELOC_ONLY
);
13111 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13112 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
13113 inst
.instruction
|= Rn
<< r0off
;
13114 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
13117 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
13118 && (inst
.instruction
== T_MNEM_mov
13119 || inst
.instruction
== T_MNEM_movs
))
13121 /* Register shifts are encoded as separate shift instructions. */
13122 bool flags
= (inst
.instruction
== T_MNEM_movs
);
13124 if (in_pred_block ())
13129 if (inst
.size_req
== 4)
13132 if (!low_regs
|| inst
.operands
[1].imm
> 7)
13138 switch (inst
.operands
[1].shift_kind
)
13141 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
13144 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
13147 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
13150 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
13156 inst
.instruction
= opcode
;
13159 inst
.instruction
|= Rn
;
13160 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
13165 inst
.instruction
|= CONDS_BIT
;
13167 inst
.instruction
|= Rn
<< 8;
13168 inst
.instruction
|= Rm
<< 16;
13169 inst
.instruction
|= inst
.operands
[1].imm
;
13174 /* Some mov with immediate shift have narrow variants.
13175 Register shifts are handled above. */
13176 if (low_regs
&& inst
.operands
[1].shifted
13177 && (inst
.instruction
== T_MNEM_mov
13178 || inst
.instruction
== T_MNEM_movs
))
13180 if (in_pred_block ())
13181 narrow
= (inst
.instruction
== T_MNEM_mov
);
13183 narrow
= (inst
.instruction
== T_MNEM_movs
);
13188 switch (inst
.operands
[1].shift_kind
)
13190 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13191 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13192 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13193 default: narrow
= false; break;
13199 inst
.instruction
|= Rn
;
13200 inst
.instruction
|= Rm
<< 3;
13201 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13205 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13206 inst
.instruction
|= Rn
<< r0off
;
13207 encode_thumb32_shifted_operand (1);
13211 switch (inst
.instruction
)
13214 /* In v4t or v5t a move of two lowregs produces unpredictable
13215 results. Don't allow this. */
13218 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
13219 "MOV Rd, Rs with two low registers is not "
13220 "permitted on this architecture");
13221 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
13225 inst
.instruction
= T_OPCODE_MOV_HR
;
13226 inst
.instruction
|= (Rn
& 0x8) << 4;
13227 inst
.instruction
|= (Rn
& 0x7);
13228 inst
.instruction
|= Rm
<< 3;
13232 /* We know we have low registers at this point.
13233 Generate LSLS Rd, Rs, #0. */
13234 inst
.instruction
= T_OPCODE_LSL_I
;
13235 inst
.instruction
|= Rn
;
13236 inst
.instruction
|= Rm
<< 3;
13242 inst
.instruction
= T_OPCODE_CMP_LR
;
13243 inst
.instruction
|= Rn
;
13244 inst
.instruction
|= Rm
<< 3;
13248 inst
.instruction
= T_OPCODE_CMP_HR
;
13249 inst
.instruction
|= (Rn
& 0x8) << 4;
13250 inst
.instruction
|= (Rn
& 0x7);
13251 inst
.instruction
|= Rm
<< 3;
13258 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13260 /* PR 10443: Do not silently ignore shifted operands. */
13261 constraint (inst
.operands
[1].shifted
,
13262 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
13264 if (inst
.operands
[1].isreg
)
13266 if (Rn
< 8 && Rm
< 8)
13268 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
13269 since a MOV instruction produces unpredictable results. */
13270 if (inst
.instruction
== T_OPCODE_MOV_I8
)
13271 inst
.instruction
= T_OPCODE_ADD_I3
;
13273 inst
.instruction
= T_OPCODE_CMP_LR
;
13275 inst
.instruction
|= Rn
;
13276 inst
.instruction
|= Rm
<< 3;
13280 if (inst
.instruction
== T_OPCODE_MOV_I8
)
13281 inst
.instruction
= T_OPCODE_MOV_HR
;
13283 inst
.instruction
= T_OPCODE_CMP_HR
;
13289 constraint (Rn
> 7,
13290 _("only lo regs allowed with immediate"));
13291 inst
.instruction
|= Rn
<< 8;
13292 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
13303 top
= (inst
.instruction
& 0x00800000) != 0;
13304 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
)
13306 constraint (top
, _(":lower16: not allowed in this instruction"));
13307 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVW
;
13309 else if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
)
13311 constraint (!top
, _(":upper16: not allowed in this instruction"));
13312 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVT
;
13315 Rd
= inst
.operands
[0].reg
;
13316 reject_bad_reg (Rd
);
13318 inst
.instruction
|= Rd
<< 8;
13319 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
13321 imm
= inst
.relocs
[0].exp
.X_add_number
;
13322 inst
.instruction
|= (imm
& 0xf000) << 4;
13323 inst
.instruction
|= (imm
& 0x0800) << 15;
13324 inst
.instruction
|= (imm
& 0x0700) << 4;
13325 inst
.instruction
|= (imm
& 0x00ff);
13330 do_t_mvn_tst (void)
13334 Rn
= inst
.operands
[0].reg
;
13335 Rm
= inst
.operands
[1].reg
;
13337 if (inst
.instruction
== T_MNEM_cmp
13338 || inst
.instruction
== T_MNEM_cmn
)
13339 constraint (Rn
== REG_PC
, BAD_PC
);
13341 reject_bad_reg (Rn
);
13342 reject_bad_reg (Rm
);
13344 if (unified_syntax
)
13346 int r0off
= (inst
.instruction
== T_MNEM_mvn
13347 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
13350 if (inst
.size_req
== 4
13351 || inst
.instruction
> 0xffff
13352 || inst
.operands
[1].shifted
13353 || Rn
> 7 || Rm
> 7)
13355 else if (inst
.instruction
== T_MNEM_cmn
13356 || inst
.instruction
== T_MNEM_tst
)
13358 else if (THUMB_SETS_FLAGS (inst
.instruction
))
13359 narrow
= !in_pred_block ();
13361 narrow
= in_pred_block ();
13363 if (!inst
.operands
[1].isreg
)
13365 /* For an immediate, we always generate a 32-bit opcode;
13366 section relaxation will shrink it later if possible. */
13367 if (inst
.instruction
< 0xffff)
13368 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13369 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
13370 inst
.instruction
|= Rn
<< r0off
;
13371 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
13375 /* See if we can do this with a 16-bit instruction. */
13378 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13379 inst
.instruction
|= Rn
;
13380 inst
.instruction
|= Rm
<< 3;
13384 constraint (inst
.operands
[1].shifted
13385 && inst
.operands
[1].immisreg
,
13386 _("shift must be constant"));
13387 if (inst
.instruction
< 0xffff)
13388 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13389 inst
.instruction
|= Rn
<< r0off
;
13390 encode_thumb32_shifted_operand (1);
13396 constraint (inst
.instruction
> 0xffff
13397 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
13398 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
13399 _("unshifted register required"));
13400 constraint (Rn
> 7 || Rm
> 7,
13403 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13404 inst
.instruction
|= Rn
;
13405 inst
.instruction
|= Rm
<< 3;
13414 if (do_vfp_nsyn_mrs () == SUCCESS
)
13417 Rd
= inst
.operands
[0].reg
;
13418 reject_bad_reg (Rd
);
13419 inst
.instruction
|= Rd
<< 8;
13421 if (inst
.operands
[1].isreg
)
13423 unsigned br
= inst
.operands
[1].reg
;
13424 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
13425 as_bad (_("bad register for mrs"));
13427 inst
.instruction
|= br
& (0xf << 16);
13428 inst
.instruction
|= (br
& 0x300) >> 4;
13429 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
13433 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
13435 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
13437 /* PR gas/12698: The constraint is only applied for m_profile.
13438 If the user has specified -march=all, we want to ignore it as
13439 we are building for any CPU type, including non-m variants. */
13441 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
13442 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
13443 "not support requested special purpose register"));
13446 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
13448 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
13449 _("'APSR', 'CPSR' or 'SPSR' expected"));
13451 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
13452 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
13453 inst
.instruction
|= 0xf0000;
13463 if (do_vfp_nsyn_msr () == SUCCESS
)
13466 constraint (!inst
.operands
[1].isreg
,
13467 _("Thumb encoding does not support an immediate here"));
13469 if (inst
.operands
[0].isreg
)
13470 flags
= (int)(inst
.operands
[0].reg
);
13472 flags
= inst
.operands
[0].imm
;
13474 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
13476 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
13478 /* PR gas/12698: The constraint is only applied for m_profile.
13479 If the user has specified -march=all, we want to ignore it as
13480 we are building for any CPU type, including non-m variants. */
13482 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
13483 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
13484 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
13485 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
13486 && bits
!= PSR_f
)) && m_profile
,
13487 _("selected processor does not support requested special "
13488 "purpose register"));
13491 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
13492 "requested special purpose register"));
13494 Rn
= inst
.operands
[1].reg
;
13495 reject_bad_reg (Rn
);
13497 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
13498 inst
.instruction
|= (flags
& 0xf0000) >> 8;
13499 inst
.instruction
|= (flags
& 0x300) >> 4;
13500 inst
.instruction
|= (flags
& 0xff);
13501 inst
.instruction
|= Rn
<< 16;
13508 unsigned Rd
, Rn
, Rm
;
13510 if (!inst
.operands
[2].present
)
13511 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
13513 Rd
= inst
.operands
[0].reg
;
13514 Rn
= inst
.operands
[1].reg
;
13515 Rm
= inst
.operands
[2].reg
;
13517 if (unified_syntax
)
13519 if (inst
.size_req
== 4
13525 else if (inst
.instruction
== T_MNEM_muls
)
13526 narrow
= !in_pred_block ();
13528 narrow
= in_pred_block ();
13532 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
13533 constraint (Rn
> 7 || Rm
> 7,
13540 /* 16-bit MULS/Conditional MUL. */
13541 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13542 inst
.instruction
|= Rd
;
13545 inst
.instruction
|= Rm
<< 3;
13547 inst
.instruction
|= Rn
<< 3;
13549 constraint (1, _("dest must overlap one source register"));
13553 constraint (inst
.instruction
!= T_MNEM_mul
,
13554 _("Thumb-2 MUL must not set flags"));
13556 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13557 inst
.instruction
|= Rd
<< 8;
13558 inst
.instruction
|= Rn
<< 16;
13559 inst
.instruction
|= Rm
<< 0;
13561 reject_bad_reg (Rd
);
13562 reject_bad_reg (Rn
);
13563 reject_bad_reg (Rm
);
13570 unsigned RdLo
, RdHi
, Rn
, Rm
;
13572 RdLo
= inst
.operands
[0].reg
;
13573 RdHi
= inst
.operands
[1].reg
;
13574 Rn
= inst
.operands
[2].reg
;
13575 Rm
= inst
.operands
[3].reg
;
13577 reject_bad_reg (RdLo
);
13578 reject_bad_reg (RdHi
);
13579 reject_bad_reg (Rn
);
13580 reject_bad_reg (Rm
);
13582 inst
.instruction
|= RdLo
<< 12;
13583 inst
.instruction
|= RdHi
<< 8;
13584 inst
.instruction
|= Rn
<< 16;
13585 inst
.instruction
|= Rm
;
13588 as_tsktsk (_("rdhi and rdlo must be different"));
13594 set_pred_insn_type (NEUTRAL_IT_INSN
);
13596 if (unified_syntax
)
13598 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
13600 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13601 inst
.instruction
|= inst
.operands
[0].imm
;
13605 /* PR9722: Check for Thumb2 availability before
13606 generating a thumb2 nop instruction. */
13607 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
13609 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13610 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
13613 inst
.instruction
= 0x46c0;
13618 constraint (inst
.operands
[0].present
,
13619 _("Thumb does not support NOP with hints"));
13620 inst
.instruction
= 0x46c0;
13627 if (unified_syntax
)
13631 if (THUMB_SETS_FLAGS (inst
.instruction
))
13632 narrow
= !in_pred_block ();
13634 narrow
= in_pred_block ();
13635 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
13637 if (inst
.size_req
== 4)
13642 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13643 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13644 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13648 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13649 inst
.instruction
|= inst
.operands
[0].reg
;
13650 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13655 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
13657 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
13659 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13660 inst
.instruction
|= inst
.operands
[0].reg
;
13661 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13670 Rd
= inst
.operands
[0].reg
;
13671 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
13673 reject_bad_reg (Rd
);
13674 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
13675 reject_bad_reg (Rn
);
13677 inst
.instruction
|= Rd
<< 8;
13678 inst
.instruction
|= Rn
<< 16;
13680 if (!inst
.operands
[2].isreg
)
13682 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
13683 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
13689 Rm
= inst
.operands
[2].reg
;
13690 reject_bad_reg (Rm
);
13692 constraint (inst
.operands
[2].shifted
13693 && inst
.operands
[2].immisreg
,
13694 _("shift must be constant"));
13695 encode_thumb32_shifted_operand (2);
13702 unsigned Rd
, Rn
, Rm
;
13704 Rd
= inst
.operands
[0].reg
;
13705 Rn
= inst
.operands
[1].reg
;
13706 Rm
= inst
.operands
[2].reg
;
13708 reject_bad_reg (Rd
);
13709 reject_bad_reg (Rn
);
13710 reject_bad_reg (Rm
);
13712 inst
.instruction
|= Rd
<< 8;
13713 inst
.instruction
|= Rn
<< 16;
13714 inst
.instruction
|= Rm
;
13715 if (inst
.operands
[3].present
)
13717 unsigned int val
= inst
.relocs
[0].exp
.X_add_number
;
13718 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13719 _("expression too complex"));
13720 inst
.instruction
|= (val
& 0x1c) << 10;
13721 inst
.instruction
|= (val
& 0x03) << 6;
13728 if (!inst
.operands
[3].present
)
13732 inst
.instruction
&= ~0x00000020;
13734 /* PR 10168. Swap the Rm and Rn registers. */
13735 Rtmp
= inst
.operands
[1].reg
;
13736 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
13737 inst
.operands
[2].reg
= Rtmp
;
13745 if (inst
.operands
[0].immisreg
)
13746 reject_bad_reg (inst
.operands
[0].imm
);
13748 encode_thumb32_addr_mode (0, /*is_t=*/false, /*is_d=*/false);
13752 do_t_push_pop (void)
13756 constraint (inst
.operands
[0].writeback
,
13757 _("push/pop do not support {reglist}^"));
13758 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
13759 _("expression too complex"));
13761 mask
= inst
.operands
[0].imm
;
13762 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
13763 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
13764 else if (inst
.size_req
!= 4
13765 && (mask
& ~0xff) == (1U << (inst
.instruction
== T_MNEM_push
13766 ? REG_LR
: REG_PC
)))
13768 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13769 inst
.instruction
|= THUMB_PP_PC_LR
;
13770 inst
.instruction
|= mask
& 0xff;
13772 else if (unified_syntax
)
13774 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13775 encode_thumb2_multi (true /* do_io */, 13, mask
, true);
13779 inst
.error
= _("invalid register list to push/pop instruction");
13787 if (unified_syntax
)
13788 encode_thumb2_multi (false /* do_io */, -1, inst
.operands
[0].imm
, false);
13791 inst
.error
= _("invalid register list to push/pop instruction");
13797 do_t_vscclrm (void)
13799 if (inst
.operands
[0].issingle
)
13801 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1) << 22;
13802 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1e) << 11;
13803 inst
.instruction
|= inst
.operands
[0].imm
;
13807 inst
.instruction
|= (inst
.operands
[0].reg
& 0x10) << 18;
13808 inst
.instruction
|= (inst
.operands
[0].reg
& 0xf) << 12;
13809 inst
.instruction
|= 1 << 8;
13810 inst
.instruction
|= inst
.operands
[0].imm
<< 1;
13819 Rd
= inst
.operands
[0].reg
;
13820 Rm
= inst
.operands
[1].reg
;
13822 reject_bad_reg (Rd
);
13823 reject_bad_reg (Rm
);
13825 inst
.instruction
|= Rd
<< 8;
13826 inst
.instruction
|= Rm
<< 16;
13827 inst
.instruction
|= Rm
;
13835 Rd
= inst
.operands
[0].reg
;
13836 Rm
= inst
.operands
[1].reg
;
13838 reject_bad_reg (Rd
);
13839 reject_bad_reg (Rm
);
13841 if (Rd
<= 7 && Rm
<= 7
13842 && inst
.size_req
!= 4)
13844 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13845 inst
.instruction
|= Rd
;
13846 inst
.instruction
|= Rm
<< 3;
13848 else if (unified_syntax
)
13850 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13851 inst
.instruction
|= Rd
<< 8;
13852 inst
.instruction
|= Rm
<< 16;
13853 inst
.instruction
|= Rm
;
13856 inst
.error
= BAD_HIREG
;
13864 Rd
= inst
.operands
[0].reg
;
13865 Rm
= inst
.operands
[1].reg
;
13867 reject_bad_reg (Rd
);
13868 reject_bad_reg (Rm
);
13870 inst
.instruction
|= Rd
<< 8;
13871 inst
.instruction
|= Rm
;
13879 Rd
= inst
.operands
[0].reg
;
13880 Rs
= (inst
.operands
[1].present
13881 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
13882 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
13884 reject_bad_reg (Rd
);
13885 reject_bad_reg (Rs
);
13886 if (inst
.operands
[2].isreg
)
13887 reject_bad_reg (inst
.operands
[2].reg
);
13889 inst
.instruction
|= Rd
<< 8;
13890 inst
.instruction
|= Rs
<< 16;
13891 if (!inst
.operands
[2].isreg
)
13895 if ((inst
.instruction
& 0x00100000) != 0)
13896 narrow
= !in_pred_block ();
13898 narrow
= in_pred_block ();
13900 if (Rd
> 7 || Rs
> 7)
13903 if (inst
.size_req
== 4 || !unified_syntax
)
13906 if (inst
.relocs
[0].exp
.X_op
!= O_constant
13907 || inst
.relocs
[0].exp
.X_add_number
!= 0)
13910 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13911 relaxation, but it doesn't seem worth the hassle. */
13914 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13915 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
13916 inst
.instruction
|= Rs
<< 3;
13917 inst
.instruction
|= Rd
;
13921 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
13922 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
13926 encode_thumb32_shifted_operand (2);
13932 if (warn_on_deprecated
13933 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13934 as_tsktsk (_("setend use is deprecated for ARMv8"));
13936 set_pred_insn_type (OUTSIDE_PRED_INSN
);
13937 if (inst
.operands
[0].imm
)
13938 inst
.instruction
|= 0x8;
13944 if (!inst
.operands
[1].present
)
13945 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
13947 if (unified_syntax
)
13952 switch (inst
.instruction
)
13955 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
13957 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
13959 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
13961 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
13965 if (THUMB_SETS_FLAGS (inst
.instruction
))
13966 narrow
= !in_pred_block ();
13968 narrow
= in_pred_block ();
13969 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
13971 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
13973 if (inst
.operands
[2].isreg
13974 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
13975 || inst
.operands
[2].reg
> 7))
13977 if (inst
.size_req
== 4)
13980 reject_bad_reg (inst
.operands
[0].reg
);
13981 reject_bad_reg (inst
.operands
[1].reg
);
13985 if (inst
.operands
[2].isreg
)
13987 reject_bad_reg (inst
.operands
[2].reg
);
13988 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13989 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13990 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13991 inst
.instruction
|= inst
.operands
[2].reg
;
13993 /* PR 12854: Error on extraneous shifts. */
13994 constraint (inst
.operands
[2].shifted
,
13995 _("extraneous shift as part of operand to shift insn"));
13999 inst
.operands
[1].shifted
= 1;
14000 inst
.operands
[1].shift_kind
= shift_kind
;
14001 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
14002 ? T_MNEM_movs
: T_MNEM_mov
);
14003 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
14004 encode_thumb32_shifted_operand (1);
14005 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
14006 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
14011 if (inst
.operands
[2].isreg
)
14013 switch (shift_kind
)
14015 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
14016 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
14017 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
14018 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
14022 inst
.instruction
|= inst
.operands
[0].reg
;
14023 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
14025 /* PR 12854: Error on extraneous shifts. */
14026 constraint (inst
.operands
[2].shifted
,
14027 _("extraneous shift as part of operand to shift insn"));
14031 switch (shift_kind
)
14033 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
14034 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
14035 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
14038 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
14039 inst
.instruction
|= inst
.operands
[0].reg
;
14040 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
14046 constraint (inst
.operands
[0].reg
> 7
14047 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
14048 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
14050 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
14052 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
14053 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
14054 _("source1 and dest must be same register"));
14056 switch (inst
.instruction
)
14058 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
14059 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
14060 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
14061 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
14065 inst
.instruction
|= inst
.operands
[0].reg
;
14066 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
14068 /* PR 12854: Error on extraneous shifts. */
14069 constraint (inst
.operands
[2].shifted
,
14070 _("extraneous shift as part of operand to shift insn"));
14074 switch (inst
.instruction
)
14076 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
14077 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
14078 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
14079 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
14082 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
14083 inst
.instruction
|= inst
.operands
[0].reg
;
14084 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
14092 unsigned Rd
, Rn
, Rm
;
14094 Rd
= inst
.operands
[0].reg
;
14095 Rn
= inst
.operands
[1].reg
;
14096 Rm
= inst
.operands
[2].reg
;
14098 reject_bad_reg (Rd
);
14099 reject_bad_reg (Rn
);
14100 reject_bad_reg (Rm
);
14102 inst
.instruction
|= Rd
<< 8;
14103 inst
.instruction
|= Rn
<< 16;
14104 inst
.instruction
|= Rm
;
14110 unsigned Rd
, Rn
, Rm
;
14112 Rd
= inst
.operands
[0].reg
;
14113 Rm
= inst
.operands
[1].reg
;
14114 Rn
= inst
.operands
[2].reg
;
14116 reject_bad_reg (Rd
);
14117 reject_bad_reg (Rn
);
14118 reject_bad_reg (Rm
);
14120 inst
.instruction
|= Rd
<< 8;
14121 inst
.instruction
|= Rn
<< 16;
14122 inst
.instruction
|= Rm
;
14128 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
14129 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
14130 _("SMC is not permitted on this architecture"));
14131 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
14132 _("expression too complex"));
14133 constraint (value
> 0xf, _("immediate too large (bigger than 0xF)"));
14135 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
14136 inst
.instruction
|= (value
& 0x000f) << 16;
14138 /* PR gas/15623: SMC instructions must be last in an IT block. */
14139 set_pred_insn_type_last ();
14145 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
14147 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
14148 inst
.instruction
|= (value
& 0x0fff);
14149 inst
.instruction
|= (value
& 0xf000) << 4;
14153 do_t_ssat_usat (int bias
)
14157 Rd
= inst
.operands
[0].reg
;
14158 Rn
= inst
.operands
[2].reg
;
14160 reject_bad_reg (Rd
);
14161 reject_bad_reg (Rn
);
14163 inst
.instruction
|= Rd
<< 8;
14164 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
14165 inst
.instruction
|= Rn
<< 16;
14167 if (inst
.operands
[3].present
)
14169 offsetT shift_amount
= inst
.relocs
[0].exp
.X_add_number
;
14171 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
14173 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
14174 _("expression too complex"));
14176 if (shift_amount
!= 0)
14178 constraint (shift_amount
> 31,
14179 _("shift expression is too large"));
14181 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
14182 inst
.instruction
|= 0x00200000; /* sh bit. */
14184 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
14185 inst
.instruction
|= (shift_amount
& 0x03) << 6;
14193 do_t_ssat_usat (1);
14201 Rd
= inst
.operands
[0].reg
;
14202 Rn
= inst
.operands
[2].reg
;
14204 reject_bad_reg (Rd
);
14205 reject_bad_reg (Rn
);
14207 inst
.instruction
|= Rd
<< 8;
14208 inst
.instruction
|= inst
.operands
[1].imm
- 1;
14209 inst
.instruction
|= Rn
<< 16;
14215 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
14216 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
14217 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
14218 || inst
.operands
[2].negative
,
14221 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
14223 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
14224 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
14225 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
14226 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
14232 if (!inst
.operands
[2].present
)
14233 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
14235 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
14236 || inst
.operands
[0].reg
== inst
.operands
[2].reg
14237 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
14240 inst
.instruction
|= inst
.operands
[0].reg
;
14241 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
14242 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
14243 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
14249 unsigned Rd
, Rn
, Rm
;
14251 Rd
= inst
.operands
[0].reg
;
14252 Rn
= inst
.operands
[1].reg
;
14253 Rm
= inst
.operands
[2].reg
;
14255 reject_bad_reg (Rd
);
14256 reject_bad_reg (Rn
);
14257 reject_bad_reg (Rm
);
14259 inst
.instruction
|= Rd
<< 8;
14260 inst
.instruction
|= Rn
<< 16;
14261 inst
.instruction
|= Rm
;
14262 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
14270 Rd
= inst
.operands
[0].reg
;
14271 Rm
= inst
.operands
[1].reg
;
14273 reject_bad_reg (Rd
);
14274 reject_bad_reg (Rm
);
14276 if (inst
.instruction
<= 0xffff
14277 && inst
.size_req
!= 4
14278 && Rd
<= 7 && Rm
<= 7
14279 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
14281 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
14282 inst
.instruction
|= Rd
;
14283 inst
.instruction
|= Rm
<< 3;
14285 else if (unified_syntax
)
14287 if (inst
.instruction
<= 0xffff)
14288 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
14289 inst
.instruction
|= Rd
<< 8;
14290 inst
.instruction
|= Rm
;
14291 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
14295 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
14296 _("Thumb encoding does not support rotation"));
14297 constraint (1, BAD_HIREG
);
14304 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
14313 half
= (inst
.instruction
& 0x10) != 0;
14314 set_pred_insn_type_last ();
14315 constraint (inst
.operands
[0].immisreg
,
14316 _("instruction requires register index"));
14318 Rn
= inst
.operands
[0].reg
;
14319 Rm
= inst
.operands
[0].imm
;
14321 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
14322 constraint (Rn
== REG_SP
, BAD_SP
);
14323 reject_bad_reg (Rm
);
14325 constraint (!half
&& inst
.operands
[0].shifted
,
14326 _("instruction does not allow shifted index"));
14327 inst
.instruction
|= (Rn
<< 16) | Rm
;
14333 if (!inst
.operands
[0].present
)
14334 inst
.operands
[0].imm
= 0;
14336 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
14338 constraint (inst
.size_req
== 2,
14339 _("immediate value out of range"));
14340 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
14341 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
14342 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
14346 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
14347 inst
.instruction
|= inst
.operands
[0].imm
;
14350 set_pred_insn_type (NEUTRAL_IT_INSN
);
14357 do_t_ssat_usat (0);
14365 Rd
= inst
.operands
[0].reg
;
14366 Rn
= inst
.operands
[2].reg
;
14368 reject_bad_reg (Rd
);
14369 reject_bad_reg (Rn
);
14371 inst
.instruction
|= Rd
<< 8;
14372 inst
.instruction
|= inst
.operands
[1].imm
;
14373 inst
.instruction
|= Rn
<< 16;
14376 /* Checking the range of the branch offset (VAL) with NBITS bits
14377 and IS_SIGNED signedness. Also checks the LSB to be 0. */
14379 v8_1_branch_value_check (int val
, int nbits
, int is_signed
)
14381 gas_assert (nbits
> 0 && nbits
<= 32);
14384 int cmp
= (1 << (nbits
- 1));
14385 if ((val
< -cmp
) || (val
>= cmp
) || (val
& 0x01))
14390 if ((val
<= 0) || (val
>= (1 << nbits
)) || (val
& 0x1))
14396 /* For branches in Armv8.1-M Mainline. */
14398 do_t_branch_future (void)
14400 unsigned long insn
= inst
.instruction
;
14402 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
14403 if (inst
.operands
[0].hasreloc
== 0)
14405 if (v8_1_branch_value_check (inst
.operands
[0].imm
, 5, false) == FAIL
)
14406 as_bad (BAD_BRANCH_OFF
);
14408 inst
.instruction
|= ((inst
.operands
[0].imm
& 0x1f) >> 1) << 23;
14412 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH5
;
14413 inst
.relocs
[0].pc_rel
= 1;
14419 if (inst
.operands
[1].hasreloc
== 0)
14421 int val
= inst
.operands
[1].imm
;
14422 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 17, true) == FAIL
)
14423 as_bad (BAD_BRANCH_OFF
);
14425 int immA
= (val
& 0x0001f000) >> 12;
14426 int immB
= (val
& 0x00000ffc) >> 2;
14427 int immC
= (val
& 0x00000002) >> 1;
14428 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
14432 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF17
;
14433 inst
.relocs
[1].pc_rel
= 1;
14438 if (inst
.operands
[1].hasreloc
== 0)
14440 int val
= inst
.operands
[1].imm
;
14441 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 19, true) == FAIL
)
14442 as_bad (BAD_BRANCH_OFF
);
14444 int immA
= (val
& 0x0007f000) >> 12;
14445 int immB
= (val
& 0x00000ffc) >> 2;
14446 int immC
= (val
& 0x00000002) >> 1;
14447 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
14451 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF19
;
14452 inst
.relocs
[1].pc_rel
= 1;
14456 case T_MNEM_bfcsel
:
14458 if (inst
.operands
[1].hasreloc
== 0)
14460 int val
= inst
.operands
[1].imm
;
14461 int immA
= (val
& 0x00001000) >> 12;
14462 int immB
= (val
& 0x00000ffc) >> 2;
14463 int immC
= (val
& 0x00000002) >> 1;
14464 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
14468 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF13
;
14469 inst
.relocs
[1].pc_rel
= 1;
14473 if (inst
.operands
[2].hasreloc
== 0)
14475 constraint ((inst
.operands
[0].hasreloc
!= 0), BAD_ARGS
);
14476 int val2
= inst
.operands
[2].imm
;
14477 int val0
= inst
.operands
[0].imm
& 0x1f;
14478 int diff
= val2
- val0
;
14480 inst
.instruction
|= 1 << 17; /* T bit. */
14481 else if (diff
!= 2)
14482 as_bad (_("out of range label-relative fixup value"));
14486 constraint ((inst
.operands
[0].hasreloc
== 0), BAD_ARGS
);
14487 inst
.relocs
[2].type
= BFD_RELOC_THUMB_PCREL_BFCSEL
;
14488 inst
.relocs
[2].pc_rel
= 1;
14492 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
14493 inst
.instruction
|= (inst
.operands
[3].imm
& 0xf) << 18;
14498 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
14505 /* Helper function for do_t_loloop to handle relocations. */
14507 v8_1_loop_reloc (int is_le
)
14509 if (inst
.relocs
[0].exp
.X_op
== O_constant
)
14511 int value
= inst
.relocs
[0].exp
.X_add_number
;
14512 value
= (is_le
) ? -value
: value
;
14514 if (v8_1_branch_value_check (value
, 12, false) == FAIL
)
14515 as_bad (BAD_BRANCH_OFF
);
14519 immh
= (value
& 0x00000ffc) >> 2;
14520 imml
= (value
& 0x00000002) >> 1;
14522 inst
.instruction
|= (imml
<< 11) | (immh
<< 1);
14526 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_LOOP12
;
14527 inst
.relocs
[0].pc_rel
= 1;
14531 /* For shifts with four operands in MVE. */
14533 do_mve_scalar_shift1 (void)
14535 unsigned int value
= inst
.operands
[2].imm
;
14537 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
14538 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
14540 /* Setting the bit for saturation. */
14541 inst
.instruction
|= ((value
== 64) ? 0: 1) << 7;
14543 /* Assuming Rm is already checked not to be 11x1. */
14544 constraint (inst
.operands
[3].reg
== inst
.operands
[0].reg
, BAD_OVERLAP
);
14545 constraint (inst
.operands
[3].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
14546 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
14549 /* For shifts in MVE. */
14551 do_mve_scalar_shift (void)
14553 if (!inst
.operands
[2].present
)
14555 inst
.operands
[2] = inst
.operands
[1];
14556 inst
.operands
[1].reg
= 0xf;
14559 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
14560 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
14562 if (inst
.operands
[2].isreg
)
14564 /* Assuming Rm is already checked not to be 11x1. */
14565 constraint (inst
.operands
[2].reg
== inst
.operands
[0].reg
, BAD_OVERLAP
);
14566 constraint (inst
.operands
[2].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
14567 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
14571 /* Assuming imm is already checked as [1,32]. */
14572 unsigned int value
= inst
.operands
[2].imm
;
14573 inst
.instruction
|= (value
& 0x1c) << 10;
14574 inst
.instruction
|= (value
& 0x03) << 6;
14575 /* Change last 4 bits from 0xd to 0xf. */
14576 inst
.instruction
|= 0x2;
14580 /* MVE instruction encoder helpers. */
14581 #define M_MNEM_vabav 0xee800f01
14582 #define M_MNEM_vmladav 0xeef00e00
14583 #define M_MNEM_vmladava 0xeef00e20
14584 #define M_MNEM_vmladavx 0xeef01e00
14585 #define M_MNEM_vmladavax 0xeef01e20
14586 #define M_MNEM_vmlsdav 0xeef00e01
14587 #define M_MNEM_vmlsdava 0xeef00e21
14588 #define M_MNEM_vmlsdavx 0xeef01e01
14589 #define M_MNEM_vmlsdavax 0xeef01e21
14590 #define M_MNEM_vmullt 0xee011e00
14591 #define M_MNEM_vmullb 0xee010e00
14592 #define M_MNEM_vctp 0xf000e801
14593 #define M_MNEM_vst20 0xfc801e00
14594 #define M_MNEM_vst21 0xfc801e20
14595 #define M_MNEM_vst40 0xfc801e01
14596 #define M_MNEM_vst41 0xfc801e21
14597 #define M_MNEM_vst42 0xfc801e41
14598 #define M_MNEM_vst43 0xfc801e61
14599 #define M_MNEM_vld20 0xfc901e00
14600 #define M_MNEM_vld21 0xfc901e20
14601 #define M_MNEM_vld40 0xfc901e01
14602 #define M_MNEM_vld41 0xfc901e21
14603 #define M_MNEM_vld42 0xfc901e41
14604 #define M_MNEM_vld43 0xfc901e61
14605 #define M_MNEM_vstrb 0xec000e00
14606 #define M_MNEM_vstrh 0xec000e10
14607 #define M_MNEM_vstrw 0xec000e40
14608 #define M_MNEM_vstrd 0xec000e50
14609 #define M_MNEM_vldrb 0xec100e00
14610 #define M_MNEM_vldrh 0xec100e10
14611 #define M_MNEM_vldrw 0xec100e40
14612 #define M_MNEM_vldrd 0xec100e50
14613 #define M_MNEM_vmovlt 0xeea01f40
14614 #define M_MNEM_vmovlb 0xeea00f40
14615 #define M_MNEM_vmovnt 0xfe311e81
14616 #define M_MNEM_vmovnb 0xfe310e81
14617 #define M_MNEM_vadc 0xee300f00
14618 #define M_MNEM_vadci 0xee301f00
14619 #define M_MNEM_vbrsr 0xfe011e60
14620 #define M_MNEM_vaddlv 0xee890f00
14621 #define M_MNEM_vaddlva 0xee890f20
14622 #define M_MNEM_vaddv 0xeef10f00
14623 #define M_MNEM_vaddva 0xeef10f20
14624 #define M_MNEM_vddup 0xee011f6e
14625 #define M_MNEM_vdwdup 0xee011f60
14626 #define M_MNEM_vidup 0xee010f6e
14627 #define M_MNEM_viwdup 0xee010f60
14628 #define M_MNEM_vmaxv 0xeee20f00
14629 #define M_MNEM_vmaxav 0xeee00f00
14630 #define M_MNEM_vminv 0xeee20f80
14631 #define M_MNEM_vminav 0xeee00f80
14632 #define M_MNEM_vmlaldav 0xee800e00
14633 #define M_MNEM_vmlaldava 0xee800e20
14634 #define M_MNEM_vmlaldavx 0xee801e00
14635 #define M_MNEM_vmlaldavax 0xee801e20
14636 #define M_MNEM_vmlsldav 0xee800e01
14637 #define M_MNEM_vmlsldava 0xee800e21
14638 #define M_MNEM_vmlsldavx 0xee801e01
14639 #define M_MNEM_vmlsldavax 0xee801e21
14640 #define M_MNEM_vrmlaldavhx 0xee801f00
14641 #define M_MNEM_vrmlaldavhax 0xee801f20
14642 #define M_MNEM_vrmlsldavh 0xfe800e01
14643 #define M_MNEM_vrmlsldavha 0xfe800e21
14644 #define M_MNEM_vrmlsldavhx 0xfe801e01
14645 #define M_MNEM_vrmlsldavhax 0xfe801e21
14646 #define M_MNEM_vqmovnt 0xee331e01
14647 #define M_MNEM_vqmovnb 0xee330e01
14648 #define M_MNEM_vqmovunt 0xee311e81
14649 #define M_MNEM_vqmovunb 0xee310e81
14650 #define M_MNEM_vshrnt 0xee801fc1
14651 #define M_MNEM_vshrnb 0xee800fc1
14652 #define M_MNEM_vrshrnt 0xfe801fc1
14653 #define M_MNEM_vqshrnt 0xee801f40
14654 #define M_MNEM_vqshrnb 0xee800f40
14655 #define M_MNEM_vqshrunt 0xee801fc0
14656 #define M_MNEM_vqshrunb 0xee800fc0
14657 #define M_MNEM_vrshrnb 0xfe800fc1
14658 #define M_MNEM_vqrshrnt 0xee801f41
14659 #define M_MNEM_vqrshrnb 0xee800f41
14660 #define M_MNEM_vqrshrunt 0xfe801fc0
14661 #define M_MNEM_vqrshrunb 0xfe800fc0
14663 /* Bfloat16 instruction encoder helpers. */
14664 #define B_MNEM_vfmat 0xfc300850
14665 #define B_MNEM_vfmab 0xfc300810
14667 /* Neon instruction encoder helpers. */
14669 /* Encodings for the different types for various Neon opcodes. */
14671 /* An "invalid" code for the following tables. */
14674 struct neon_tab_entry
14677 unsigned float_or_poly
;
14678 unsigned scalar_or_imm
;
14681 /* Map overloaded Neon opcodes to their respective encodings. */
14682 #define NEON_ENC_TAB \
14683 X(vabd, 0x0000700, 0x1200d00, N_INV), \
14684 X(vabdl, 0x0800700, N_INV, N_INV), \
14685 X(vmax, 0x0000600, 0x0000f00, N_INV), \
14686 X(vmin, 0x0000610, 0x0200f00, N_INV), \
14687 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
14688 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
14689 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
14690 X(vadd, 0x0000800, 0x0000d00, N_INV), \
14691 X(vaddl, 0x0800000, N_INV, N_INV), \
14692 X(vsub, 0x1000800, 0x0200d00, N_INV), \
14693 X(vsubl, 0x0800200, N_INV, N_INV), \
14694 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
14695 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
14696 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
14697 /* Register variants of the following two instructions are encoded as
14698 vcge / vcgt with the operands reversed. */ \
14699 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
14700 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
14701 X(vfma, N_INV, 0x0000c10, N_INV), \
14702 X(vfms, N_INV, 0x0200c10, N_INV), \
14703 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
14704 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
14705 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
14706 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
14707 X(vmlal, 0x0800800, N_INV, 0x0800240), \
14708 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
14709 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
14710 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
14711 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
14712 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
14713 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
14714 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
14715 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
14716 X(vshl, 0x0000400, N_INV, 0x0800510), \
14717 X(vqshl, 0x0000410, N_INV, 0x0800710), \
14718 X(vand, 0x0000110, N_INV, 0x0800030), \
14719 X(vbic, 0x0100110, N_INV, 0x0800030), \
14720 X(veor, 0x1000110, N_INV, N_INV), \
14721 X(vorn, 0x0300110, N_INV, 0x0800010), \
14722 X(vorr, 0x0200110, N_INV, 0x0800010), \
14723 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
14724 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
14725 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
14726 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
14727 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
14728 X(vst1, 0x0000000, 0x0800000, N_INV), \
14729 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
14730 X(vst2, 0x0000100, 0x0800100, N_INV), \
14731 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
14732 X(vst3, 0x0000200, 0x0800200, N_INV), \
14733 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
14734 X(vst4, 0x0000300, 0x0800300, N_INV), \
14735 X(vmovn, 0x1b20200, N_INV, N_INV), \
14736 X(vtrn, 0x1b20080, N_INV, N_INV), \
14737 X(vqmovn, 0x1b20200, N_INV, N_INV), \
14738 X(vqmovun, 0x1b20240, N_INV, N_INV), \
14739 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
14740 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
14741 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
14742 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
14743 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
14744 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
14745 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
14746 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
14747 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
14748 X(vseleq, 0xe000a00, N_INV, N_INV), \
14749 X(vselvs, 0xe100a00, N_INV, N_INV), \
14750 X(vselge, 0xe200a00, N_INV, N_INV), \
14751 X(vselgt, 0xe300a00, N_INV, N_INV), \
14752 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
14753 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
14754 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
14755 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
14756 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
14757 X(aes, 0x3b00300, N_INV, N_INV), \
14758 X(sha3op, 0x2000c00, N_INV, N_INV), \
14759 X(sha1h, 0x3b902c0, N_INV, N_INV), \
14760 X(sha2op, 0x3ba0380, N_INV, N_INV)
14764 #define X(OPC,I,F,S) N_MNEM_##OPC
14769 static const struct neon_tab_entry neon_enc_tab
[] =
14771 #define X(OPC,I,F,S) { (I), (F), (S) }
14776 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
14777 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14778 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14779 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14780 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14781 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14782 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14783 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14784 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14785 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14786 #define NEON_ENC_SINGLE_(X) \
14787 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
14788 #define NEON_ENC_DOUBLE_(X) \
14789 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
14790 #define NEON_ENC_FPV8_(X) \
14791 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
14793 #define NEON_ENCODE(type, inst) \
14796 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
14797 inst.is_neon = 1; \
14801 #define check_neon_suffixes \
14804 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
14806 as_bad (_("invalid neon suffix for non neon instruction")); \
14812 /* Define shapes for instruction operands. The following mnemonic characters
14813 are used in this table:
14815 F - VFP S<n> register
14816 D - Neon D<n> register
14817 Q - Neon Q<n> register
14821 L - D<n> register list
14823 This table is used to generate various data:
14824 - enumerations of the form NS_DDR to be used as arguments to
14826 - a table classifying shapes into single, double, quad, mixed.
14827 - a table used to drive neon_select_shape. */
14829 #define NEON_SHAPE_DEF \
14830 X(4, (R, R, Q, Q), QUAD), \
14831 X(4, (Q, R, R, I), QUAD), \
14832 X(4, (R, R, S, S), QUAD), \
14833 X(4, (S, S, R, R), QUAD), \
14834 X(3, (Q, R, I), QUAD), \
14835 X(3, (I, Q, Q), QUAD), \
14836 X(3, (I, Q, R), QUAD), \
14837 X(3, (R, Q, Q), QUAD), \
14838 X(3, (D, D, D), DOUBLE), \
14839 X(3, (Q, Q, Q), QUAD), \
14840 X(3, (D, D, I), DOUBLE), \
14841 X(3, (Q, Q, I), QUAD), \
14842 X(3, (D, D, S), DOUBLE), \
14843 X(3, (Q, Q, S), QUAD), \
14844 X(3, (Q, Q, R), QUAD), \
14845 X(3, (R, R, Q), QUAD), \
14846 X(2, (R, Q), QUAD), \
14847 X(2, (D, D), DOUBLE), \
14848 X(2, (Q, Q), QUAD), \
14849 X(2, (D, S), DOUBLE), \
14850 X(2, (Q, S), QUAD), \
14851 X(2, (D, R), DOUBLE), \
14852 X(2, (Q, R), QUAD), \
14853 X(2, (D, I), DOUBLE), \
14854 X(2, (Q, I), QUAD), \
14855 X(3, (P, F, I), SINGLE), \
14856 X(3, (P, D, I), DOUBLE), \
14857 X(3, (P, Q, I), QUAD), \
14858 X(4, (P, F, F, I), SINGLE), \
14859 X(4, (P, D, D, I), DOUBLE), \
14860 X(4, (P, Q, Q, I), QUAD), \
14861 X(5, (P, F, F, F, I), SINGLE), \
14862 X(5, (P, D, D, D, I), DOUBLE), \
14863 X(5, (P, Q, Q, Q, I), QUAD), \
14864 X(3, (D, L, D), DOUBLE), \
14865 X(2, (D, Q), MIXED), \
14866 X(2, (Q, D), MIXED), \
14867 X(3, (D, Q, I), MIXED), \
14868 X(3, (Q, D, I), MIXED), \
14869 X(3, (Q, D, D), MIXED), \
14870 X(3, (D, Q, Q), MIXED), \
14871 X(3, (Q, Q, D), MIXED), \
14872 X(3, (Q, D, S), MIXED), \
14873 X(3, (D, Q, S), MIXED), \
14874 X(4, (D, D, D, I), DOUBLE), \
14875 X(4, (Q, Q, Q, I), QUAD), \
14876 X(4, (D, D, S, I), DOUBLE), \
14877 X(4, (Q, Q, S, I), QUAD), \
14878 X(2, (F, F), SINGLE), \
14879 X(3, (F, F, F), SINGLE), \
14880 X(2, (F, I), SINGLE), \
14881 X(2, (F, D), MIXED), \
14882 X(2, (D, F), MIXED), \
14883 X(3, (F, F, I), MIXED), \
14884 X(4, (R, R, F, F), SINGLE), \
14885 X(4, (F, F, R, R), SINGLE), \
14886 X(3, (D, R, R), DOUBLE), \
14887 X(3, (R, R, D), DOUBLE), \
14888 X(2, (S, R), SINGLE), \
14889 X(2, (R, S), SINGLE), \
14890 X(2, (F, R), SINGLE), \
14891 X(2, (R, F), SINGLE), \
14892 /* Used for MVE tail predicated loop instructions. */\
14893 X(2, (R, R), QUAD), \
14894 /* Half float shape supported so far. */\
14895 X (2, (H, D), MIXED), \
14896 X (2, (D, H), MIXED), \
14897 X (2, (H, F), MIXED), \
14898 X (2, (F, H), MIXED), \
14899 X (2, (H, H), HALF), \
14900 X (2, (H, R), HALF), \
14901 X (2, (R, H), HALF), \
14902 X (2, (H, I), HALF), \
14903 X (3, (H, H, H), HALF), \
14904 X (3, (H, F, I), MIXED), \
14905 X (3, (F, H, I), MIXED), \
14906 X (3, (D, H, H), MIXED), \
14907 X (3, (D, H, S), MIXED)
14909 #define S2(A,B) NS_##A##B
14910 #define S3(A,B,C) NS_##A##B##C
14911 #define S4(A,B,C,D) NS_##A##B##C##D
14912 #define S5(A,B,C,D,E) NS_##A##B##C##D##E
14914 #define X(N, L, C) S##N L
14928 enum neon_shape_class
14937 #define X(N, L, C) SC_##C
14939 static enum neon_shape_class neon_shape_class
[] =
14959 /* Register widths of above. */
14960 static unsigned neon_shape_el_size
[] =
14973 struct neon_shape_info
14976 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
14979 #define S2(A,B) { SE_##A, SE_##B }
14980 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
14981 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
14982 #define S5(A,B,C,D,E) { SE_##A, SE_##B, SE_##C, SE_##D, SE_##E }
14984 #define X(N, L, C) { N, S##N L }
14986 static struct neon_shape_info neon_shape_tab
[] =
14997 /* Bit masks used in type checking given instructions.
14998 'N_EQK' means the type must be the same as (or based on in some way) the key
14999 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
15000 set, various other bits can be set as well in order to modify the meaning of
15001 the type constraint. */
15003 enum neon_type_mask
15027 N_BF16
= 0x0400000,
15028 N_KEY
= 0x1000000, /* Key element (main type specifier). */
15029 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
15030 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
15031 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
15032 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
15033 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
15034 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
15035 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
15036 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
15037 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
15038 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
15040 N_MAX_NONSPECIAL
= N_P64
15043 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
15045 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
15046 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
15047 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
15048 #define N_S_32 (N_S8 | N_S16 | N_S32)
15049 #define N_F_16_32 (N_F16 | N_F32)
15050 #define N_SUF_32 (N_SU_32 | N_F_16_32)
15051 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
15052 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
15053 #define N_F_ALL (N_F16 | N_F32 | N_F64)
15054 #define N_I_MVE (N_I8 | N_I16 | N_I32)
15055 #define N_F_MVE (N_F16 | N_F32)
15056 #define N_SU_MVE (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
15058 /* Pass this as the first type argument to neon_check_type to ignore types
15060 #define N_IGNORE_TYPE (N_KEY | N_EQK)
15062 /* Select a "shape" for the current instruction (describing register types or
15063 sizes) from a list of alternatives. Return NS_NULL if the current instruction
15064 doesn't fit. For non-polymorphic shapes, checking is usually done as a
15065 function of operand parsing, so this function doesn't need to be called.
15066 Shapes should be listed in order of decreasing length. */
15068 static enum neon_shape
15069 neon_select_shape (enum neon_shape shape
, ...)
15072 enum neon_shape first_shape
= shape
;
15074 /* Fix missing optional operands. FIXME: we don't know at this point how
15075 many arguments we should have, so this makes the assumption that we have
15076 > 1. This is true of all current Neon opcodes, I think, but may not be
15077 true in the future. */
15078 if (!inst
.operands
[1].present
)
15079 inst
.operands
[1] = inst
.operands
[0];
15081 va_start (ap
, shape
);
15083 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
15088 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
15090 if (!inst
.operands
[j
].present
)
15096 switch (neon_shape_tab
[shape
].el
[j
])
15098 /* If a .f16, .16, .u16, .s16 type specifier is given over
15099 a VFP single precision register operand, it's essentially
15100 means only half of the register is used.
15102 If the type specifier is given after the mnemonics, the
15103 information is stored in inst.vectype. If the type specifier
15104 is given after register operand, the information is stored
15105 in inst.operands[].vectype.
15107 When there is only one type specifier, and all the register
15108 operands are the same type of hardware register, the type
15109 specifier applies to all register operands.
15111 If no type specifier is given, the shape is inferred from
15112 operand information.
15115 vadd.f16 s0, s1, s2: NS_HHH
15116 vabs.f16 s0, s1: NS_HH
15117 vmov.f16 s0, r1: NS_HR
15118 vmov.f16 r0, s1: NS_RH
15119 vcvt.f16 r0, s1: NS_RH
15120 vcvt.f16.s32 s2, s2, #29: NS_HFI
15121 vcvt.f16.s32 s2, s2: NS_HF
15124 if (!(inst
.operands
[j
].isreg
15125 && inst
.operands
[j
].isvec
15126 && inst
.operands
[j
].issingle
15127 && !inst
.operands
[j
].isquad
15128 && ((inst
.vectype
.elems
== 1
15129 && inst
.vectype
.el
[0].size
== 16)
15130 || (inst
.vectype
.elems
> 1
15131 && inst
.vectype
.el
[j
].size
== 16)
15132 || (inst
.vectype
.elems
== 0
15133 && inst
.operands
[j
].vectype
.type
!= NT_invtype
15134 && inst
.operands
[j
].vectype
.size
== 16))))
15139 if (!(inst
.operands
[j
].isreg
15140 && inst
.operands
[j
].isvec
15141 && inst
.operands
[j
].issingle
15142 && !inst
.operands
[j
].isquad
15143 && ((inst
.vectype
.elems
== 1 && inst
.vectype
.el
[0].size
== 32)
15144 || (inst
.vectype
.elems
> 1 && inst
.vectype
.el
[j
].size
== 32)
15145 || (inst
.vectype
.elems
== 0
15146 && (inst
.operands
[j
].vectype
.size
== 32
15147 || inst
.operands
[j
].vectype
.type
== NT_invtype
)))))
15152 if (!(inst
.operands
[j
].isreg
15153 && inst
.operands
[j
].isvec
15154 && !inst
.operands
[j
].isquad
15155 && !inst
.operands
[j
].issingle
))
15160 if (!(inst
.operands
[j
].isreg
15161 && !inst
.operands
[j
].isvec
))
15166 if (!(inst
.operands
[j
].isreg
15167 && inst
.operands
[j
].isvec
15168 && inst
.operands
[j
].isquad
15169 && !inst
.operands
[j
].issingle
))
15174 if (!(!inst
.operands
[j
].isreg
15175 && !inst
.operands
[j
].isscalar
))
15180 if (!(!inst
.operands
[j
].isreg
15181 && inst
.operands
[j
].isscalar
))
15192 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
15193 /* We've matched all the entries in the shape table, and we don't
15194 have any left over operands which have not been matched. */
15200 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
15201 first_error (_("invalid instruction shape"));
15206 /* True if SHAPE is predominantly a quadword operation (most of the time, this
15207 means the Q bit should be set). */
15210 neon_quad (enum neon_shape shape
)
15212 return neon_shape_class
[shape
] == SC_QUAD
;
15216 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
15219 /* Allow modification to be made to types which are constrained to be
15220 based on the key element, based on bits set alongside N_EQK. */
15221 if ((typebits
& N_EQK
) != 0)
15223 if ((typebits
& N_HLF
) != 0)
15225 else if ((typebits
& N_DBL
) != 0)
15227 if ((typebits
& N_SGN
) != 0)
15228 *g_type
= NT_signed
;
15229 else if ((typebits
& N_UNS
) != 0)
15230 *g_type
= NT_unsigned
;
15231 else if ((typebits
& N_INT
) != 0)
15232 *g_type
= NT_integer
;
15233 else if ((typebits
& N_FLT
) != 0)
15234 *g_type
= NT_float
;
15235 else if ((typebits
& N_SIZ
) != 0)
15236 *g_type
= NT_untyped
;
15240 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
15241 operand type, i.e. the single type specified in a Neon instruction when it
15242 is the only one given. */
15244 static struct neon_type_el
15245 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
15247 struct neon_type_el dest
= *key
;
15249 gas_assert ((thisarg
& N_EQK
) != 0);
15251 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
15256 /* Convert Neon type and size into compact bitmask representation. */
15258 static enum neon_type_mask
15259 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
15266 case 8: return N_8
;
15267 case 16: return N_16
;
15268 case 32: return N_32
;
15269 case 64: return N_64
;
15277 case 8: return N_I8
;
15278 case 16: return N_I16
;
15279 case 32: return N_I32
;
15280 case 64: return N_I64
;
15288 case 16: return N_F16
;
15289 case 32: return N_F32
;
15290 case 64: return N_F64
;
15298 case 8: return N_P8
;
15299 case 16: return N_P16
;
15300 case 64: return N_P64
;
15308 case 8: return N_S8
;
15309 case 16: return N_S16
;
15310 case 32: return N_S32
;
15311 case 64: return N_S64
;
15319 case 8: return N_U8
;
15320 case 16: return N_U16
;
15321 case 32: return N_U32
;
15322 case 64: return N_U64
;
15328 if (size
== 16) return N_BF16
;
15337 /* Convert compact Neon bitmask type representation to a type and size. Only
15338 handles the case where a single bit is set in the mask. */
15341 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
15342 enum neon_type_mask mask
)
15344 if ((mask
& N_EQK
) != 0)
15347 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
15349 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
| N_BF16
))
15352 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
15354 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
15359 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
15361 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
15362 *type
= NT_unsigned
;
15363 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
15364 *type
= NT_integer
;
15365 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
15366 *type
= NT_untyped
;
15367 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
15369 else if ((mask
& (N_F_ALL
)) != 0)
15371 else if ((mask
& (N_BF16
)) != 0)
15379 /* Modify a bitmask of allowed types. This is only needed for type
15383 modify_types_allowed (unsigned allowed
, unsigned mods
)
15386 enum neon_el_type type
;
15392 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
15394 if (el_type_of_type_chk (&type
, &size
,
15395 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
15397 neon_modify_type_size (mods
, &type
, &size
);
15398 destmask
|= type_chk_of_el_type (type
, size
);
15405 /* Check type and return type classification.
15406 The manual states (paraphrase): If one datatype is given, it indicates the
15408 - the second operand, if there is one
15409 - the operand, if there is no second operand
15410 - the result, if there are no operands.
15411 This isn't quite good enough though, so we use a concept of a "key" datatype
15412 which is set on a per-instruction basis, which is the one which matters when
15413 only one data type is written.
15414 Note: this function has side-effects (e.g. filling in missing operands). All
15415 Neon instructions should call it before performing bit encoding. */
15417 static struct neon_type_el
15418 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
15421 unsigned i
, pass
, key_el
= 0;
15422 unsigned types
[NEON_MAX_TYPE_ELS
];
15423 enum neon_el_type k_type
= NT_invtype
;
15424 unsigned k_size
= -1u;
15425 struct neon_type_el badtype
= {NT_invtype
, -1};
15426 unsigned key_allowed
= 0;
15428 /* Optional registers in Neon instructions are always (not) in operand 1.
15429 Fill in the missing operand here, if it was omitted. */
15430 if (els
> 1 && !inst
.operands
[1].present
)
15431 inst
.operands
[1] = inst
.operands
[0];
15433 /* Suck up all the varargs. */
15435 for (i
= 0; i
< els
; i
++)
15437 unsigned thisarg
= va_arg (ap
, unsigned);
15438 if (thisarg
== N_IGNORE_TYPE
)
15443 types
[i
] = thisarg
;
15444 if ((thisarg
& N_KEY
) != 0)
15449 if (inst
.vectype
.elems
> 0)
15450 for (i
= 0; i
< els
; i
++)
15451 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
15453 first_error (_("types specified in both the mnemonic and operands"));
15457 /* Duplicate inst.vectype elements here as necessary.
15458 FIXME: No idea if this is exactly the same as the ARM assembler,
15459 particularly when an insn takes one register and one non-register
15461 if (inst
.vectype
.elems
== 1 && els
> 1)
15464 inst
.vectype
.elems
= els
;
15465 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
15466 for (j
= 0; j
< els
; j
++)
15468 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
15471 else if (inst
.vectype
.elems
== 0 && els
> 0)
15474 /* No types were given after the mnemonic, so look for types specified
15475 after each operand. We allow some flexibility here; as long as the
15476 "key" operand has a type, we can infer the others. */
15477 for (j
= 0; j
< els
; j
++)
15478 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
15479 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
15481 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
15483 for (j
= 0; j
< els
; j
++)
15484 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
15485 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
15490 first_error (_("operand types can't be inferred"));
15494 else if (inst
.vectype
.elems
!= els
)
15496 first_error (_("type specifier has the wrong number of parts"));
15500 for (pass
= 0; pass
< 2; pass
++)
15502 for (i
= 0; i
< els
; i
++)
15504 unsigned thisarg
= types
[i
];
15505 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
15506 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
15507 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
15508 unsigned g_size
= inst
.vectype
.el
[i
].size
;
15510 /* Decay more-specific signed & unsigned types to sign-insensitive
15511 integer types if sign-specific variants are unavailable. */
15512 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
15513 && (types_allowed
& N_SU_ALL
) == 0)
15514 g_type
= NT_integer
;
15516 /* If only untyped args are allowed, decay any more specific types to
15517 them. Some instructions only care about signs for some element
15518 sizes, so handle that properly. */
15519 if (((types_allowed
& N_UNT
) == 0)
15520 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
15521 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
15522 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
15523 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
15524 g_type
= NT_untyped
;
15528 if ((thisarg
& N_KEY
) != 0)
15532 key_allowed
= thisarg
& ~N_KEY
;
15534 /* Check architecture constraint on FP16 extension. */
15536 && k_type
== NT_float
15537 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
15539 inst
.error
= _(BAD_FP16
);
15546 if ((thisarg
& N_VFP
) != 0)
15548 enum neon_shape_el regshape
;
15549 unsigned regwidth
, match
;
15551 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
15554 first_error (_("invalid instruction shape"));
15557 regshape
= neon_shape_tab
[ns
].el
[i
];
15558 regwidth
= neon_shape_el_size
[regshape
];
15560 /* In VFP mode, operands must match register widths. If we
15561 have a key operand, use its width, else use the width of
15562 the current operand. */
15568 /* FP16 will use a single precision register. */
15569 if (regwidth
== 32 && match
== 16)
15571 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
15575 inst
.error
= _(BAD_FP16
);
15580 if (regwidth
!= match
)
15582 first_error (_("operand size must match register width"));
15587 if ((thisarg
& N_EQK
) == 0)
15589 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
15591 if ((given_type
& types_allowed
) == 0)
15593 first_error (BAD_SIMD_TYPE
);
15599 enum neon_el_type mod_k_type
= k_type
;
15600 unsigned mod_k_size
= k_size
;
15601 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
15602 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
15604 first_error (_("inconsistent types in Neon instruction"));
15612 return inst
.vectype
.el
[key_el
];
15615 /* Neon-style VFP instruction forwarding. */
15617 /* Thumb VFP instructions have 0xE in the condition field. */
15620 do_vfp_cond_or_thumb (void)
15625 inst
.instruction
|= 0xe0000000;
15627 inst
.instruction
|= inst
.cond
<< 28;
15630 /* Look up and encode a simple mnemonic, for use as a helper function for the
15631 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
15632 etc. It is assumed that operand parsing has already been done, and that the
15633 operands are in the form expected by the given opcode (this isn't necessarily
15634 the same as the form in which they were parsed, hence some massaging must
15635 take place before this function is called).
15636 Checks current arch version against that in the looked-up opcode. */
15639 do_vfp_nsyn_opcode (const char *opname
)
15641 const struct asm_opcode
*opcode
;
15643 opcode
= (const struct asm_opcode
*) str_hash_find (arm_ops_hsh
, opname
);
15648 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
15649 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
15656 inst
.instruction
= opcode
->tvalue
;
15657 opcode
->tencode ();
15661 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
15662 opcode
->aencode ();
15667 do_vfp_nsyn_add_sub (enum neon_shape rs
)
15669 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
15671 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15674 do_vfp_nsyn_opcode ("fadds");
15676 do_vfp_nsyn_opcode ("fsubs");
15678 /* ARMv8.2 fp16 instruction. */
15680 do_scalar_fp16_v82_encode ();
15685 do_vfp_nsyn_opcode ("faddd");
15687 do_vfp_nsyn_opcode ("fsubd");
15691 /* Check operand types to see if this is a VFP instruction, and if so call
15695 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
15697 enum neon_shape rs
;
15698 struct neon_type_el et
;
15703 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
15704 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
15708 rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
15709 et
= neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
15710 N_F_ALL
| N_KEY
| N_VFP
);
15717 if (et
.type
!= NT_invtype
)
15728 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
15730 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
15732 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15735 do_vfp_nsyn_opcode ("fmacs");
15737 do_vfp_nsyn_opcode ("fnmacs");
15739 /* ARMv8.2 fp16 instruction. */
15741 do_scalar_fp16_v82_encode ();
15746 do_vfp_nsyn_opcode ("fmacd");
15748 do_vfp_nsyn_opcode ("fnmacd");
15753 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
15755 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
15757 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15760 do_vfp_nsyn_opcode ("ffmas");
15762 do_vfp_nsyn_opcode ("ffnmas");
15764 /* ARMv8.2 fp16 instruction. */
15766 do_scalar_fp16_v82_encode ();
15771 do_vfp_nsyn_opcode ("ffmad");
15773 do_vfp_nsyn_opcode ("ffnmad");
15778 do_vfp_nsyn_mul (enum neon_shape rs
)
15780 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15782 do_vfp_nsyn_opcode ("fmuls");
15784 /* ARMv8.2 fp16 instruction. */
15786 do_scalar_fp16_v82_encode ();
15789 do_vfp_nsyn_opcode ("fmuld");
15793 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
15795 int is_neg
= (inst
.instruction
& 0x80) != 0;
15796 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_VFP
| N_KEY
);
15798 if (rs
== NS_FF
|| rs
== NS_HH
)
15801 do_vfp_nsyn_opcode ("fnegs");
15803 do_vfp_nsyn_opcode ("fabss");
15805 /* ARMv8.2 fp16 instruction. */
15807 do_scalar_fp16_v82_encode ();
15812 do_vfp_nsyn_opcode ("fnegd");
15814 do_vfp_nsyn_opcode ("fabsd");
15818 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
15819 insns belong to Neon, and are handled elsewhere. */
15822 do_vfp_nsyn_ldm_stm (int is_dbmode
)
15824 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
15828 do_vfp_nsyn_opcode ("fldmdbs");
15830 do_vfp_nsyn_opcode ("fldmias");
15835 do_vfp_nsyn_opcode ("fstmdbs");
15837 do_vfp_nsyn_opcode ("fstmias");
15842 do_vfp_nsyn_sqrt (void)
15844 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
15845 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
15847 if (rs
== NS_FF
|| rs
== NS_HH
)
15849 do_vfp_nsyn_opcode ("fsqrts");
15851 /* ARMv8.2 fp16 instruction. */
15853 do_scalar_fp16_v82_encode ();
15856 do_vfp_nsyn_opcode ("fsqrtd");
15860 do_vfp_nsyn_div (void)
15862 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
15863 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
15864 N_F_ALL
| N_KEY
| N_VFP
);
15866 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15868 do_vfp_nsyn_opcode ("fdivs");
15870 /* ARMv8.2 fp16 instruction. */
15872 do_scalar_fp16_v82_encode ();
15875 do_vfp_nsyn_opcode ("fdivd");
15879 do_vfp_nsyn_nmul (void)
15881 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
15882 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
15883 N_F_ALL
| N_KEY
| N_VFP
);
15885 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15887 NEON_ENCODE (SINGLE
, inst
);
15888 do_vfp_sp_dyadic ();
15890 /* ARMv8.2 fp16 instruction. */
15892 do_scalar_fp16_v82_encode ();
15896 NEON_ENCODE (DOUBLE
, inst
);
15897 do_vfp_dp_rd_rn_rm ();
15899 do_vfp_cond_or_thumb ();
15903 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15907 neon_logbits (unsigned x
)
15909 return ffs (x
) - 4;
15912 #define LOW4(R) ((R) & 0xf)
15913 #define HI1(R) (((R) >> 4) & 1)
15914 #define LOW1(R) ((R) & 0x1)
15915 #define HI4(R) (((R) >> 1) & 0xf)
15918 mve_get_vcmp_vpt_cond (struct neon_type_el et
)
15923 first_error (BAD_EL_TYPE
);
15926 switch (inst
.operands
[0].imm
)
15929 first_error (_("invalid condition"));
15951 /* only accept eq and ne. */
15952 if (inst
.operands
[0].imm
> 1)
15954 first_error (_("invalid condition"));
15957 return inst
.operands
[0].imm
;
15959 if (inst
.operands
[0].imm
== 0x2)
15961 else if (inst
.operands
[0].imm
== 0x8)
15965 first_error (_("invalid condition"));
15969 switch (inst
.operands
[0].imm
)
15972 first_error (_("invalid condition"));
15988 /* Should be unreachable. */
15992 /* For VCTP (create vector tail predicate) in MVE. */
15997 unsigned size
= 0x0;
15999 if (inst
.cond
> COND_ALWAYS
)
16000 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16002 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16004 /* This is a typical MVE instruction which has no type but have size 8, 16,
16005 32 and 64. For instructions with no type, inst.vectype.el[j].type is set
16006 to NT_untyped and size is updated in inst.vectype.el[j].size. */
16007 if ((inst
.operands
[0].present
) && (inst
.vectype
.el
[0].type
== NT_untyped
))
16008 dt
= inst
.vectype
.el
[0].size
;
16010 /* Setting this does not indicate an actual NEON instruction, but only
16011 indicates that the mnemonic accepts neon-style type suffixes. */
16025 first_error (_("Type is not allowed for this instruction"));
16027 inst
.instruction
|= size
<< 20;
16028 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
16034 /* We are dealing with a vector predicated block. */
16035 if (inst
.operands
[0].present
)
16037 enum neon_shape rs
= neon_select_shape (NS_IQQ
, NS_IQR
, NS_NULL
);
16038 struct neon_type_el et
16039 = neon_check_type (3, rs
, N_EQK
, N_KEY
| N_F_MVE
| N_I_MVE
| N_SU_32
,
16042 unsigned fcond
= mve_get_vcmp_vpt_cond (et
);
16044 constraint (inst
.operands
[1].reg
> 14, MVE_BAD_QREG
);
16046 if (et
.type
== NT_invtype
)
16049 if (et
.type
== NT_float
)
16051 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_fp_ext
),
16053 constraint (et
.size
!= 16 && et
.size
!= 32, BAD_EL_TYPE
);
16054 inst
.instruction
|= (et
.size
== 16) << 28;
16055 inst
.instruction
|= 0x3 << 20;
16059 constraint (et
.size
!= 8 && et
.size
!= 16 && et
.size
!= 32,
16061 inst
.instruction
|= 1 << 28;
16062 inst
.instruction
|= neon_logbits (et
.size
) << 20;
16065 if (inst
.operands
[2].isquad
)
16067 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16068 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16069 inst
.instruction
|= (fcond
& 0x2) >> 1;
16073 if (inst
.operands
[2].reg
== REG_SP
)
16074 as_tsktsk (MVE_BAD_SP
);
16075 inst
.instruction
|= 1 << 6;
16076 inst
.instruction
|= (fcond
& 0x2) << 4;
16077 inst
.instruction
|= inst
.operands
[2].reg
;
16079 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16080 inst
.instruction
|= (fcond
& 0x4) << 10;
16081 inst
.instruction
|= (fcond
& 0x1) << 7;
16084 set_pred_insn_type (VPT_INSN
);
16086 now_pred
.mask
= ((inst
.instruction
& 0x00400000) >> 19)
16087 | ((inst
.instruction
& 0xe000) >> 13);
16088 now_pred
.warn_deprecated
= false;
16089 now_pred
.type
= VECTOR_PRED
;
16096 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
), BAD_FPU
);
16097 if (!inst
.operands
[1].isreg
|| !inst
.operands
[1].isquad
)
16098 first_error (_(reg_expected_msgs
[REG_TYPE_MQ
]));
16099 if (!inst
.operands
[2].present
)
16100 first_error (_("MVE vector or ARM register expected"));
16101 constraint (inst
.operands
[1].reg
> 14, MVE_BAD_QREG
);
16103 /* Deal with 'else' conditional MVE's vcmp, it will be parsed as vcmpe. */
16104 if ((inst
.instruction
& 0xffffffff) == N_MNEM_vcmpe
16105 && inst
.operands
[1].isquad
)
16107 inst
.instruction
= N_MNEM_vcmp
;
16111 if (inst
.cond
> COND_ALWAYS
)
16112 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16114 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16116 enum neon_shape rs
= neon_select_shape (NS_IQQ
, NS_IQR
, NS_NULL
);
16117 struct neon_type_el et
16118 = neon_check_type (3, rs
, N_EQK
, N_KEY
| N_F_MVE
| N_I_MVE
| N_SU_32
,
16121 constraint (rs
== NS_IQR
&& inst
.operands
[2].reg
== REG_PC
16122 && !inst
.operands
[2].iszr
, BAD_PC
);
16124 unsigned fcond
= mve_get_vcmp_vpt_cond (et
);
16126 inst
.instruction
= 0xee010f00;
16127 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16128 inst
.instruction
|= (fcond
& 0x4) << 10;
16129 inst
.instruction
|= (fcond
& 0x1) << 7;
16130 if (et
.type
== NT_float
)
16132 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_fp_ext
),
16134 inst
.instruction
|= (et
.size
== 16) << 28;
16135 inst
.instruction
|= 0x3 << 20;
16139 inst
.instruction
|= 1 << 28;
16140 inst
.instruction
|= neon_logbits (et
.size
) << 20;
16142 if (inst
.operands
[2].isquad
)
16144 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16145 inst
.instruction
|= (fcond
& 0x2) >> 1;
16146 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16150 if (inst
.operands
[2].reg
== REG_SP
)
16151 as_tsktsk (MVE_BAD_SP
);
16152 inst
.instruction
|= 1 << 6;
16153 inst
.instruction
|= (fcond
& 0x2) << 4;
16154 inst
.instruction
|= inst
.operands
[2].reg
;
16162 do_mve_vmaxa_vmina (void)
16164 if (inst
.cond
> COND_ALWAYS
)
16165 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16167 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16169 enum neon_shape rs
= neon_select_shape (NS_QQ
, NS_NULL
);
16170 struct neon_type_el et
16171 = neon_check_type (2, rs
, N_EQK
, N_KEY
| N_S8
| N_S16
| N_S32
);
16173 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16174 inst
.instruction
|= neon_logbits (et
.size
) << 18;
16175 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16176 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16177 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16182 do_mve_vfmas (void)
16184 enum neon_shape rs
= neon_select_shape (NS_QQR
, NS_NULL
);
16185 struct neon_type_el et
16186 = neon_check_type (3, rs
, N_F_MVE
| N_KEY
, N_EQK
, N_EQK
);
16188 if (inst
.cond
> COND_ALWAYS
)
16189 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16191 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16193 if (inst
.operands
[2].reg
== REG_SP
)
16194 as_tsktsk (MVE_BAD_SP
);
16195 else if (inst
.operands
[2].reg
== REG_PC
)
16196 as_tsktsk (MVE_BAD_PC
);
16198 inst
.instruction
|= (et
.size
== 16) << 28;
16199 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16200 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16201 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16202 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16203 inst
.instruction
|= inst
.operands
[2].reg
;
16208 do_mve_viddup (void)
16210 if (inst
.cond
> COND_ALWAYS
)
16211 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16213 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16215 unsigned imm
= inst
.relocs
[0].exp
.X_add_number
;
16216 constraint (imm
!= 1 && imm
!= 2 && imm
!= 4 && imm
!= 8,
16217 _("immediate must be either 1, 2, 4 or 8"));
16219 enum neon_shape rs
;
16220 struct neon_type_el et
;
16222 if (inst
.instruction
== M_MNEM_vddup
|| inst
.instruction
== M_MNEM_vidup
)
16224 rs
= neon_select_shape (NS_QRI
, NS_NULL
);
16225 et
= neon_check_type (2, rs
, N_KEY
| N_U8
| N_U16
| N_U32
, N_EQK
);
16230 constraint ((inst
.operands
[2].reg
% 2) != 1, BAD_EVEN
);
16231 if (inst
.operands
[2].reg
== REG_SP
)
16232 as_tsktsk (MVE_BAD_SP
);
16233 else if (inst
.operands
[2].reg
== REG_PC
)
16234 first_error (BAD_PC
);
16236 rs
= neon_select_shape (NS_QRRI
, NS_NULL
);
16237 et
= neon_check_type (3, rs
, N_KEY
| N_U8
| N_U16
| N_U32
, N_EQK
, N_EQK
);
16238 Rm
= inst
.operands
[2].reg
>> 1;
16240 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16241 inst
.instruction
|= neon_logbits (et
.size
) << 20;
16242 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16243 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16244 inst
.instruction
|= (imm
> 2) << 7;
16245 inst
.instruction
|= Rm
<< 1;
16246 inst
.instruction
|= (imm
== 2 || imm
== 8);
16251 do_mve_vmlas (void)
16253 enum neon_shape rs
= neon_select_shape (NS_QQR
, NS_NULL
);
16254 struct neon_type_el et
16255 = neon_check_type (3, rs
, N_EQK
, N_EQK
, N_SU_MVE
| N_KEY
);
16257 if (inst
.operands
[2].reg
== REG_PC
)
16258 as_tsktsk (MVE_BAD_PC
);
16259 else if (inst
.operands
[2].reg
== REG_SP
)
16260 as_tsktsk (MVE_BAD_SP
);
16262 if (inst
.cond
> COND_ALWAYS
)
16263 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16265 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16267 inst
.instruction
|= (et
.type
== NT_unsigned
) << 28;
16268 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16269 inst
.instruction
|= neon_logbits (et
.size
) << 20;
16270 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16271 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16272 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16273 inst
.instruction
|= inst
.operands
[2].reg
;
16278 do_mve_vshll (void)
16280 struct neon_type_el et
16281 = neon_check_type (2, NS_QQI
, N_EQK
, N_S8
| N_U8
| N_S16
| N_U16
| N_KEY
);
16283 if (inst
.cond
> COND_ALWAYS
)
16284 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16286 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16288 int imm
= inst
.operands
[2].imm
;
16289 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16290 _("immediate value out of range"));
16292 if ((unsigned)imm
== et
.size
)
16294 inst
.instruction
|= neon_logbits (et
.size
) << 18;
16295 inst
.instruction
|= 0x110001;
16299 inst
.instruction
|= (et
.size
+ imm
) << 16;
16300 inst
.instruction
|= 0x800140;
16303 inst
.instruction
|= (et
.type
== NT_unsigned
) << 28;
16304 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16305 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16306 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16307 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16312 do_mve_vshlc (void)
16314 if (inst
.cond
> COND_ALWAYS
)
16315 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16317 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16319 if (inst
.operands
[1].reg
== REG_PC
)
16320 as_tsktsk (MVE_BAD_PC
);
16321 else if (inst
.operands
[1].reg
== REG_SP
)
16322 as_tsktsk (MVE_BAD_SP
);
16324 int imm
= inst
.operands
[2].imm
;
16325 constraint (imm
< 1 || imm
> 32, _("immediate value out of range"));
16327 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16328 inst
.instruction
|= (imm
& 0x1f) << 16;
16329 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16330 inst
.instruction
|= inst
.operands
[1].reg
;
16335 do_mve_vshrn (void)
16338 switch (inst
.instruction
)
16340 case M_MNEM_vshrnt
:
16341 case M_MNEM_vshrnb
:
16342 case M_MNEM_vrshrnt
:
16343 case M_MNEM_vrshrnb
:
16344 types
= N_I16
| N_I32
;
16346 case M_MNEM_vqshrnt
:
16347 case M_MNEM_vqshrnb
:
16348 case M_MNEM_vqrshrnt
:
16349 case M_MNEM_vqrshrnb
:
16350 types
= N_U16
| N_U32
| N_S16
| N_S32
;
16352 case M_MNEM_vqshrunt
:
16353 case M_MNEM_vqshrunb
:
16354 case M_MNEM_vqrshrunt
:
16355 case M_MNEM_vqrshrunb
:
16356 types
= N_S16
| N_S32
;
16362 struct neon_type_el et
= neon_check_type (2, NS_QQI
, N_EQK
, types
| N_KEY
);
16364 if (inst
.cond
> COND_ALWAYS
)
16365 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16367 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16369 unsigned Qd
= inst
.operands
[0].reg
;
16370 unsigned Qm
= inst
.operands
[1].reg
;
16371 unsigned imm
= inst
.operands
[2].imm
;
16372 constraint (imm
< 1 || ((unsigned) imm
) > (et
.size
/ 2),
16374 ? _("immediate operand expected in the range [1,8]")
16375 : _("immediate operand expected in the range [1,16]"));
16377 inst
.instruction
|= (et
.type
== NT_unsigned
) << 28;
16378 inst
.instruction
|= HI1 (Qd
) << 22;
16379 inst
.instruction
|= (et
.size
- imm
) << 16;
16380 inst
.instruction
|= LOW4 (Qd
) << 12;
16381 inst
.instruction
|= HI1 (Qm
) << 5;
16382 inst
.instruction
|= LOW4 (Qm
);
16387 do_mve_vqmovn (void)
16389 struct neon_type_el et
;
16390 if (inst
.instruction
== M_MNEM_vqmovnt
16391 || inst
.instruction
== M_MNEM_vqmovnb
)
16392 et
= neon_check_type (2, NS_QQ
, N_EQK
,
16393 N_U16
| N_U32
| N_S16
| N_S32
| N_KEY
);
16395 et
= neon_check_type (2, NS_QQ
, N_EQK
, N_S16
| N_S32
| N_KEY
);
16397 if (inst
.cond
> COND_ALWAYS
)
16398 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16400 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16402 inst
.instruction
|= (et
.type
== NT_unsigned
) << 28;
16403 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16404 inst
.instruction
|= (et
.size
== 32) << 18;
16405 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16406 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16407 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16412 do_mve_vpsel (void)
16414 neon_select_shape (NS_QQQ
, NS_NULL
);
16416 if (inst
.cond
> COND_ALWAYS
)
16417 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16419 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16421 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16422 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16423 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16424 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16425 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16426 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16431 do_mve_vpnot (void)
16433 if (inst
.cond
> COND_ALWAYS
)
16434 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16436 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16440 do_mve_vmaxnma_vminnma (void)
16442 enum neon_shape rs
= neon_select_shape (NS_QQ
, NS_NULL
);
16443 struct neon_type_el et
16444 = neon_check_type (2, rs
, N_EQK
, N_F_MVE
| N_KEY
);
16446 if (inst
.cond
> COND_ALWAYS
)
16447 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16449 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16451 inst
.instruction
|= (et
.size
== 16) << 28;
16452 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16453 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16454 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16455 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16460 do_mve_vcmul (void)
16462 enum neon_shape rs
= neon_select_shape (NS_QQQI
, NS_NULL
);
16463 struct neon_type_el et
16464 = neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F_MVE
| N_KEY
);
16466 if (inst
.cond
> COND_ALWAYS
)
16467 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16469 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16471 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
16472 constraint (rot
!= 0 && rot
!= 90 && rot
!= 180 && rot
!= 270,
16473 _("immediate out of range"));
16475 if (et
.size
== 32 && (inst
.operands
[0].reg
== inst
.operands
[1].reg
16476 || inst
.operands
[0].reg
== inst
.operands
[2].reg
))
16477 as_tsktsk (BAD_MVE_SRCDEST
);
16479 inst
.instruction
|= (et
.size
== 32) << 28;
16480 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16481 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16482 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16483 inst
.instruction
|= (rot
> 90) << 12;
16484 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16485 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16486 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16487 inst
.instruction
|= (rot
== 90 || rot
== 270);
16491 /* To handle the Low Overhead Loop instructions
16492 in Armv8.1-M Mainline and MVE. */
16496 unsigned long insn
= inst
.instruction
;
16498 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
16500 if (insn
== T_MNEM_lctp
)
16503 set_pred_insn_type (MVE_OUTSIDE_PRED_INSN
);
16505 if (insn
== T_MNEM_wlstp
|| insn
== T_MNEM_dlstp
)
16507 struct neon_type_el et
16508 = neon_check_type (2, NS_RR
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16509 inst
.instruction
|= neon_logbits (et
.size
) << 20;
16516 constraint (!inst
.operands
[0].present
,
16518 /* fall through. */
16521 if (!inst
.operands
[0].present
)
16522 inst
.instruction
|= 1 << 21;
16524 v8_1_loop_reloc (true);
16529 v8_1_loop_reloc (false);
16530 /* fall through. */
16533 constraint (inst
.operands
[1].isreg
!= 1, BAD_ARGS
);
16535 if (insn
== T_MNEM_wlstp
|| insn
== T_MNEM_dlstp
)
16536 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
16537 else if (inst
.operands
[1].reg
== REG_PC
)
16538 as_tsktsk (MVE_BAD_PC
);
16539 if (inst
.operands
[1].reg
== REG_SP
)
16540 as_tsktsk (MVE_BAD_SP
);
16542 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
16552 do_vfp_nsyn_cmp (void)
16554 enum neon_shape rs
;
16555 if (!inst
.operands
[0].isreg
)
16562 constraint (inst
.operands
[2].present
, BAD_SYNTAX
);
16563 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1xd
),
16567 if (inst
.operands
[1].isreg
)
16569 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
16570 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
16572 if (rs
== NS_FF
|| rs
== NS_HH
)
16574 NEON_ENCODE (SINGLE
, inst
);
16575 do_vfp_sp_monadic ();
16579 NEON_ENCODE (DOUBLE
, inst
);
16580 do_vfp_dp_rd_rm ();
16585 rs
= neon_select_shape (NS_HI
, NS_FI
, NS_DI
, NS_NULL
);
16586 neon_check_type (2, rs
, N_F_ALL
| N_KEY
| N_VFP
, N_EQK
);
16588 switch (inst
.instruction
& 0x0fffffff)
16591 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
16594 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
16600 if (rs
== NS_FI
|| rs
== NS_HI
)
16602 NEON_ENCODE (SINGLE
, inst
);
16603 do_vfp_sp_compare_z ();
16607 NEON_ENCODE (DOUBLE
, inst
);
16611 do_vfp_cond_or_thumb ();
16613 /* ARMv8.2 fp16 instruction. */
16614 if (rs
== NS_HI
|| rs
== NS_HH
)
16615 do_scalar_fp16_v82_encode ();
16619 nsyn_insert_sp (void)
16621 inst
.operands
[1] = inst
.operands
[0];
16622 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
16623 inst
.operands
[0].reg
= REG_SP
;
16624 inst
.operands
[0].isreg
= 1;
16625 inst
.operands
[0].writeback
= 1;
16626 inst
.operands
[0].present
= 1;
16629 /* Fix up Neon data-processing instructions, ORing in the correct bits for
16630 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
16633 neon_dp_fixup (struct arm_it
* insn
)
16635 unsigned int i
= insn
->instruction
;
16640 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
16651 insn
->instruction
= i
;
16655 mve_encode_qqr (int size
, int U
, int fp
)
16657 if (inst
.operands
[2].reg
== REG_SP
)
16658 as_tsktsk (MVE_BAD_SP
);
16659 else if (inst
.operands
[2].reg
== REG_PC
)
16660 as_tsktsk (MVE_BAD_PC
);
16665 if (((unsigned)inst
.instruction
) == 0xd00)
16666 inst
.instruction
= 0xee300f40;
16668 else if (((unsigned)inst
.instruction
) == 0x200d00)
16669 inst
.instruction
= 0xee301f40;
16671 else if (((unsigned)inst
.instruction
) == 0x1000d10)
16672 inst
.instruction
= 0xee310e60;
16674 /* Setting size which is 1 for F16 and 0 for F32. */
16675 inst
.instruction
|= (size
== 16) << 28;
16680 if (((unsigned)inst
.instruction
) == 0x800)
16681 inst
.instruction
= 0xee010f40;
16683 else if (((unsigned)inst
.instruction
) == 0x1000800)
16684 inst
.instruction
= 0xee011f40;
16686 else if (((unsigned)inst
.instruction
) == 0)
16687 inst
.instruction
= 0xee000f40;
16689 else if (((unsigned)inst
.instruction
) == 0x200)
16690 inst
.instruction
= 0xee001f40;
16692 else if (((unsigned)inst
.instruction
) == 0x900)
16693 inst
.instruction
= 0xee010e40;
16695 else if (((unsigned)inst
.instruction
) == 0x910)
16696 inst
.instruction
= 0xee011e60;
16698 else if (((unsigned)inst
.instruction
) == 0x10)
16699 inst
.instruction
= 0xee000f60;
16701 else if (((unsigned)inst
.instruction
) == 0x210)
16702 inst
.instruction
= 0xee001f60;
16704 else if (((unsigned)inst
.instruction
) == 0x3000b10)
16705 inst
.instruction
= 0xee000e40;
16707 else if (((unsigned)inst
.instruction
) == 0x0000b00)
16708 inst
.instruction
= 0xee010e60;
16710 else if (((unsigned)inst
.instruction
) == 0x1000b00)
16711 inst
.instruction
= 0xfe010e60;
16714 inst
.instruction
|= U
<< 28;
16716 /* Setting bits for size. */
16717 inst
.instruction
|= neon_logbits (size
) << 20;
16719 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16720 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16721 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16722 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16723 inst
.instruction
|= inst
.operands
[2].reg
;
16728 mve_encode_rqq (unsigned bit28
, unsigned size
)
16730 inst
.instruction
|= bit28
<< 28;
16731 inst
.instruction
|= neon_logbits (size
) << 20;
16732 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16733 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16734 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16735 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16736 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16741 mve_encode_qqq (int ubit
, int size
)
16744 inst
.instruction
|= (ubit
!= 0) << 28;
16745 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16746 inst
.instruction
|= neon_logbits (size
) << 20;
16747 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16748 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16749 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16750 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16751 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16757 mve_encode_rq (unsigned bit28
, unsigned size
)
16759 inst
.instruction
|= bit28
<< 28;
16760 inst
.instruction
|= neon_logbits (size
) << 18;
16761 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16762 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16767 mve_encode_rrqq (unsigned U
, unsigned size
)
16769 constraint (inst
.operands
[3].reg
> 14, MVE_BAD_QREG
);
16771 inst
.instruction
|= U
<< 28;
16772 inst
.instruction
|= (inst
.operands
[1].reg
>> 1) << 20;
16773 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
) << 16;
16774 inst
.instruction
|= (size
== 32) << 16;
16775 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
16776 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 7;
16777 inst
.instruction
|= inst
.operands
[3].reg
;
16781 /* Helper function for neon_three_same handling the operands. */
16783 neon_three_args (int isquad
)
16785 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16786 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16787 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16788 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16789 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16790 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16791 inst
.instruction
|= (isquad
!= 0) << 6;
16795 /* Encode insns with bit pattern:
16797 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16798 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
16800 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
16801 different meaning for some instruction. */
16804 neon_three_same (int isquad
, int ubit
, int size
)
16806 neon_three_args (isquad
);
16807 inst
.instruction
|= (ubit
!= 0) << 24;
16809 inst
.instruction
|= neon_logbits (size
) << 20;
16811 neon_dp_fixup (&inst
);
16814 /* Encode instructions of the form:
16816 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
16817 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
16819 Don't write size if SIZE == -1. */
16822 neon_two_same (int qbit
, int ubit
, int size
)
16824 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16825 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16826 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16827 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16828 inst
.instruction
|= (qbit
!= 0) << 6;
16829 inst
.instruction
|= (ubit
!= 0) << 24;
16832 inst
.instruction
|= neon_logbits (size
) << 18;
16834 neon_dp_fixup (&inst
);
16837 enum vfp_or_neon_is_neon_bits
16840 NEON_CHECK_ARCH
= 2,
16841 NEON_CHECK_ARCH8
= 4
16844 /* Call this function if an instruction which may have belonged to the VFP or
16845 Neon instruction sets, but turned out to be a Neon instruction (due to the
16846 operand types involved, etc.). We have to check and/or fix-up a couple of
16849 - Make sure the user hasn't attempted to make a Neon instruction
16851 - Alter the value in the condition code field if necessary.
16852 - Make sure that the arch supports Neon instructions.
16854 Which of these operations take place depends on bits from enum
16855 vfp_or_neon_is_neon_bits.
16857 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
16858 current instruction's condition is COND_ALWAYS, the condition field is
16859 changed to inst.uncond_value. This is necessary because instructions shared
16860 between VFP and Neon may be conditional for the VFP variants only, and the
16861 unconditional Neon version must have, e.g., 0xF in the condition field. */
16864 vfp_or_neon_is_neon (unsigned check
)
16866 /* Conditions are always legal in Thumb mode (IT blocks). */
16867 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
16869 if (inst
.cond
!= COND_ALWAYS
)
16871 first_error (_(BAD_COND
));
16874 if (inst
.uncond_value
!= -1u)
16875 inst
.instruction
|= inst
.uncond_value
<< 28;
16879 if (((check
& NEON_CHECK_ARCH
) && !mark_feature_used (&fpu_neon_ext_v1
))
16880 || ((check
& NEON_CHECK_ARCH8
)
16881 && !mark_feature_used (&fpu_neon_ext_armv8
)))
16883 first_error (_(BAD_FPU
));
16891 /* Return TRUE if the SIMD instruction is available for the current
16892 cpu_variant. FP is set to TRUE if this is a SIMD floating-point
16893 instruction. CHECK contains th. CHECK contains the set of bits to pass to
16894 vfp_or_neon_is_neon for the NEON specific checks. */
16897 check_simd_pred_availability (int fp
, unsigned check
)
16899 if (inst
.cond
> COND_ALWAYS
)
16901 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
16903 inst
.error
= BAD_FPU
;
16906 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16908 else if (inst
.cond
< COND_ALWAYS
)
16910 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
16911 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16912 else if (vfp_or_neon_is_neon (check
) == FAIL
)
16917 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fp
? mve_fp_ext
: mve_ext
)
16918 && vfp_or_neon_is_neon (check
) == FAIL
)
16921 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
16922 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16927 /* Neon instruction encoders, in approximate order of appearance. */
16930 do_neon_dyadic_i_su (void)
16932 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH
| NEON_CHECK_CC
))
16935 enum neon_shape rs
;
16936 struct neon_type_el et
;
16937 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
16938 rs
= neon_select_shape (NS_QQQ
, NS_QQR
, NS_NULL
);
16940 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16942 et
= neon_check_type (3, rs
, N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
16946 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
16948 mve_encode_qqr (et
.size
, et
.type
== NT_unsigned
, 0);
16952 do_neon_dyadic_i64_su (void)
16954 if (!check_simd_pred_availability (false, NEON_CHECK_CC
| NEON_CHECK_ARCH
))
16956 enum neon_shape rs
;
16957 struct neon_type_el et
;
16958 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
16960 rs
= neon_select_shape (NS_QQR
, NS_QQQ
, NS_NULL
);
16961 et
= neon_check_type (3, rs
, N_EQK
, N_EQK
, N_SU_MVE
| N_KEY
);
16965 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16966 et
= neon_check_type (3, rs
, N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
16969 mve_encode_qqr (et
.size
, et
.type
== NT_unsigned
, 0);
16971 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
16975 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
16978 unsigned size
= et
.size
>> 3;
16979 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16980 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16981 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16982 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16983 inst
.instruction
|= (isquad
!= 0) << 6;
16984 inst
.instruction
|= immbits
<< 16;
16985 inst
.instruction
|= (size
>> 3) << 7;
16986 inst
.instruction
|= (size
& 0x7) << 19;
16988 inst
.instruction
|= (uval
!= 0) << 24;
16990 neon_dp_fixup (&inst
);
16996 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH
| NEON_CHECK_CC
))
16999 if (!inst
.operands
[2].isreg
)
17001 enum neon_shape rs
;
17002 struct neon_type_el et
;
17003 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
17005 rs
= neon_select_shape (NS_QQI
, NS_NULL
);
17006 et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_MVE
);
17010 rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
17011 et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
17013 int imm
= inst
.operands
[2].imm
;
17015 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
17016 _("immediate out of range for shift"));
17017 NEON_ENCODE (IMMED
, inst
);
17018 neon_imm_shift (false, 0, neon_quad (rs
), et
, imm
);
17022 enum neon_shape rs
;
17023 struct neon_type_el et
;
17024 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
17026 rs
= neon_select_shape (NS_QQQ
, NS_QQR
, NS_NULL
);
17027 et
= neon_check_type (3, rs
, N_EQK
, N_SU_MVE
| N_KEY
, N_EQK
| N_EQK
);
17031 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
17032 et
= neon_check_type (3, rs
, N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
17038 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
17039 _("invalid instruction shape"));
17040 if (inst
.operands
[2].reg
== REG_SP
)
17041 as_tsktsk (MVE_BAD_SP
);
17042 else if (inst
.operands
[2].reg
== REG_PC
)
17043 as_tsktsk (MVE_BAD_PC
);
17045 inst
.instruction
= 0xee311e60;
17046 inst
.instruction
|= (et
.type
== NT_unsigned
) << 28;
17047 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17048 inst
.instruction
|= neon_logbits (et
.size
) << 18;
17049 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17050 inst
.instruction
|= inst
.operands
[2].reg
;
17057 /* VSHL/VQSHL 3-register variants have syntax such as:
17059 whereas other 3-register operations encoded by neon_three_same have
17062 (i.e. with Dn & Dm reversed). Swap operands[1].reg and
17063 operands[2].reg here. */
17064 tmp
= inst
.operands
[2].reg
;
17065 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
17066 inst
.operands
[1].reg
= tmp
;
17067 NEON_ENCODE (INTEGER
, inst
);
17068 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
17074 do_neon_qshl (void)
17076 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH
| NEON_CHECK_CC
))
17079 if (!inst
.operands
[2].isreg
)
17081 enum neon_shape rs
;
17082 struct neon_type_el et
;
17083 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
17085 rs
= neon_select_shape (NS_QQI
, NS_NULL
);
17086 et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_SU_MVE
);
17090 rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
17091 et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
17093 int imm
= inst
.operands
[2].imm
;
17095 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
17096 _("immediate out of range for shift"));
17097 NEON_ENCODE (IMMED
, inst
);
17098 neon_imm_shift (true, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
17102 enum neon_shape rs
;
17103 struct neon_type_el et
;
17105 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
17107 rs
= neon_select_shape (NS_QQQ
, NS_QQR
, NS_NULL
);
17108 et
= neon_check_type (3, rs
, N_EQK
, N_SU_MVE
| N_KEY
, N_EQK
| N_EQK
);
17112 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
17113 et
= neon_check_type (3, rs
, N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
17118 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
17119 _("invalid instruction shape"));
17120 if (inst
.operands
[2].reg
== REG_SP
)
17121 as_tsktsk (MVE_BAD_SP
);
17122 else if (inst
.operands
[2].reg
== REG_PC
)
17123 as_tsktsk (MVE_BAD_PC
);
17125 inst
.instruction
= 0xee311ee0;
17126 inst
.instruction
|= (et
.type
== NT_unsigned
) << 28;
17127 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17128 inst
.instruction
|= neon_logbits (et
.size
) << 18;
17129 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17130 inst
.instruction
|= inst
.operands
[2].reg
;
17137 /* See note in do_neon_shl. */
17138 tmp
= inst
.operands
[2].reg
;
17139 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
17140 inst
.operands
[1].reg
= tmp
;
17141 NEON_ENCODE (INTEGER
, inst
);
17142 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
17148 do_neon_rshl (void)
17150 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH
| NEON_CHECK_CC
))
17153 enum neon_shape rs
;
17154 struct neon_type_el et
;
17155 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
17157 rs
= neon_select_shape (NS_QQR
, NS_QQQ
, NS_NULL
);
17158 et
= neon_check_type (3, rs
, N_EQK
, N_EQK
, N_SU_MVE
| N_KEY
);
17162 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
17163 et
= neon_check_type (3, rs
, N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
17170 if (inst
.operands
[2].reg
== REG_PC
)
17171 as_tsktsk (MVE_BAD_PC
);
17172 else if (inst
.operands
[2].reg
== REG_SP
)
17173 as_tsktsk (MVE_BAD_SP
);
17175 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
17176 _("invalid instruction shape"));
17178 if (inst
.instruction
== 0x0000510)
17179 /* We are dealing with vqrshl. */
17180 inst
.instruction
= 0xee331ee0;
17182 /* We are dealing with vrshl. */
17183 inst
.instruction
= 0xee331e60;
17185 inst
.instruction
|= (et
.type
== NT_unsigned
) << 28;
17186 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17187 inst
.instruction
|= neon_logbits (et
.size
) << 18;
17188 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17189 inst
.instruction
|= inst
.operands
[2].reg
;
17194 tmp
= inst
.operands
[2].reg
;
17195 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
17196 inst
.operands
[1].reg
= tmp
;
17197 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
17202 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
17204 /* Handle .I8 pseudo-instructions. */
17207 /* Unfortunately, this will make everything apart from zero out-of-range.
17208 FIXME is this the intended semantics? There doesn't seem much point in
17209 accepting .I8 if so. */
17210 immediate
|= immediate
<< 8;
17216 if (immediate
== (immediate
& 0x000000ff))
17218 *immbits
= immediate
;
17221 else if (immediate
== (immediate
& 0x0000ff00))
17223 *immbits
= immediate
>> 8;
17226 else if (immediate
== (immediate
& 0x00ff0000))
17228 *immbits
= immediate
>> 16;
17231 else if (immediate
== (immediate
& 0xff000000))
17233 *immbits
= immediate
>> 24;
17236 if ((immediate
& 0xffff) != (immediate
>> 16))
17237 goto bad_immediate
;
17238 immediate
&= 0xffff;
17241 if (immediate
== (immediate
& 0x000000ff))
17243 *immbits
= immediate
;
17246 else if (immediate
== (immediate
& 0x0000ff00))
17248 *immbits
= immediate
>> 8;
17253 first_error (_("immediate value out of range"));
17258 do_neon_logic (void)
17260 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
17262 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
17264 && !check_simd_pred_availability (false,
17265 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
17267 else if (rs
!= NS_QQQ
17268 && !ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
17269 first_error (BAD_FPU
);
17271 neon_check_type (3, rs
, N_IGNORE_TYPE
);
17272 /* U bit and size field were set as part of the bitmask. */
17273 NEON_ENCODE (INTEGER
, inst
);
17274 neon_three_same (neon_quad (rs
), 0, -1);
17278 const int three_ops_form
= (inst
.operands
[2].present
17279 && !inst
.operands
[2].isreg
);
17280 const int immoperand
= (three_ops_form
? 2 : 1);
17281 enum neon_shape rs
= (three_ops_form
17282 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
17283 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
17284 /* Because neon_select_shape makes the second operand a copy of the first
17285 if the second operand is not present. */
17287 && !check_simd_pred_availability (false,
17288 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
17290 else if (rs
!= NS_QQI
17291 && !ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
17292 first_error (BAD_FPU
);
17294 struct neon_type_el et
;
17295 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
17296 et
= neon_check_type (2, rs
, N_I32
| N_I16
| N_KEY
, N_EQK
);
17298 et
= neon_check_type (2, rs
, N_I8
| N_I16
| N_I32
| N_I64
| N_F32
17301 if (et
.type
== NT_invtype
)
17303 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
17308 if (three_ops_form
)
17309 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
17310 _("first and second operands shall be the same register"));
17312 NEON_ENCODE (IMMED
, inst
);
17314 immbits
= inst
.operands
[immoperand
].imm
;
17317 /* .i64 is a pseudo-op, so the immediate must be a repeating
17319 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
17320 inst
.operands
[immoperand
].reg
: 0))
17322 /* Set immbits to an invalid constant. */
17323 immbits
= 0xdeadbeef;
17330 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
17334 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
17338 /* Pseudo-instruction for VBIC. */
17339 neon_invert_size (&immbits
, 0, et
.size
);
17340 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
17344 /* Pseudo-instruction for VORR. */
17345 neon_invert_size (&immbits
, 0, et
.size
);
17346 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
17356 inst
.instruction
|= neon_quad (rs
) << 6;
17357 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17358 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17359 inst
.instruction
|= cmode
<< 8;
17360 neon_write_immbits (immbits
);
17362 neon_dp_fixup (&inst
);
17367 do_neon_bitfield (void)
17369 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
17370 neon_check_type (3, rs
, N_IGNORE_TYPE
);
17371 neon_three_same (neon_quad (rs
), 0, -1);
17375 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
17378 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
17379 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
17381 if (et
.type
== NT_float
)
17383 NEON_ENCODE (FLOAT
, inst
);
17385 mve_encode_qqr (et
.size
, 0, 1);
17387 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
17391 NEON_ENCODE (INTEGER
, inst
);
17393 mve_encode_qqr (et
.size
, et
.type
== ubit_meaning
, 0);
17395 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
17401 do_neon_dyadic_if_su_d (void)
17403 /* This version only allow D registers, but that constraint is enforced during
17404 operand parsing so we don't need to do anything extra here. */
17405 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
17409 do_neon_dyadic_if_i_d (void)
17411 /* The "untyped" case can't happen. Do this to stop the "U" bit being
17412 affected if we specify unsigned args. */
17413 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
17417 do_mve_vstr_vldr_QI (int size
, int elsize
, int load
)
17419 constraint (size
< 32, BAD_ADDR_MODE
);
17420 constraint (size
!= elsize
, BAD_EL_TYPE
);
17421 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
17422 constraint (!inst
.operands
[1].preind
, BAD_ADDR_MODE
);
17423 constraint (load
&& inst
.operands
[0].reg
== inst
.operands
[1].reg
,
17424 _("destination register and offset register may not be the"
17427 int imm
= inst
.relocs
[0].exp
.X_add_number
;
17434 constraint ((imm
% (size
/ 8) != 0)
17435 || imm
> (0x7f << neon_logbits (size
)),
17436 (size
== 32) ? _("immediate must be a multiple of 4 in the"
17437 " range of +/-[0,508]")
17438 : _("immediate must be a multiple of 8 in the"
17439 " range of +/-[0,1016]"));
17440 inst
.instruction
|= 0x11 << 24;
17441 inst
.instruction
|= add
<< 23;
17442 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17443 inst
.instruction
|= inst
.operands
[1].writeback
<< 21;
17444 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17445 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17446 inst
.instruction
|= 1 << 12;
17447 inst
.instruction
|= (size
== 64) << 8;
17448 inst
.instruction
&= 0xffffff00;
17449 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17450 inst
.instruction
|= imm
>> neon_logbits (size
);
17454 do_mve_vstr_vldr_RQ (int size
, int elsize
, int load
)
17456 unsigned os
= inst
.operands
[1].imm
>> 5;
17457 unsigned type
= inst
.vectype
.el
[0].type
;
17458 constraint (os
!= 0 && size
== 8,
17459 _("can not shift offsets when accessing less than half-word"));
17460 constraint (os
&& os
!= neon_logbits (size
),
17461 _("shift immediate must be 1, 2 or 3 for half-word, word"
17462 " or double-word accesses respectively"));
17463 if (inst
.operands
[1].reg
== REG_PC
)
17464 as_tsktsk (MVE_BAD_PC
);
17469 constraint (elsize
>= 64, BAD_EL_TYPE
);
17472 constraint (elsize
< 16 || elsize
>= 64, BAD_EL_TYPE
);
17476 constraint (elsize
!= size
, BAD_EL_TYPE
);
17481 constraint (inst
.operands
[1].writeback
|| !inst
.operands
[1].preind
,
17485 constraint (inst
.operands
[0].reg
== (inst
.operands
[1].imm
& 0x1f),
17486 _("destination register and offset register may not be"
17488 constraint (size
== elsize
&& type
== NT_signed
, BAD_EL_TYPE
);
17489 constraint (size
!= elsize
&& type
!= NT_unsigned
&& type
!= NT_signed
,
17491 inst
.instruction
|= ((size
== elsize
) || (type
== NT_unsigned
)) << 28;
17495 constraint (type
!= NT_untyped
, BAD_EL_TYPE
);
17498 inst
.instruction
|= 1 << 23;
17499 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17500 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17501 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17502 inst
.instruction
|= neon_logbits (elsize
) << 7;
17503 inst
.instruction
|= HI1 (inst
.operands
[1].imm
) << 5;
17504 inst
.instruction
|= LOW4 (inst
.operands
[1].imm
);
17505 inst
.instruction
|= !!os
;
17509 do_mve_vstr_vldr_RI (int size
, int elsize
, int load
)
17511 enum neon_el_type type
= inst
.vectype
.el
[0].type
;
17513 constraint (size
>= 64, BAD_ADDR_MODE
);
17517 constraint (elsize
< 16 || elsize
>= 64, BAD_EL_TYPE
);
17520 constraint (elsize
!= size
, BAD_EL_TYPE
);
17527 constraint (elsize
!= size
&& type
!= NT_unsigned
17528 && type
!= NT_signed
, BAD_EL_TYPE
);
17532 constraint (elsize
!= size
&& type
!= NT_untyped
, BAD_EL_TYPE
);
17535 int imm
= inst
.relocs
[0].exp
.X_add_number
;
17543 if ((imm
% (size
/ 8) != 0) || imm
> (0x7f << neon_logbits (size
)))
17548 constraint (1, _("immediate must be in the range of +/-[0,127]"));
17551 constraint (1, _("immediate must be a multiple of 2 in the"
17552 " range of +/-[0,254]"));
17555 constraint (1, _("immediate must be a multiple of 4 in the"
17556 " range of +/-[0,508]"));
17561 if (size
!= elsize
)
17563 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
17564 constraint (inst
.operands
[0].reg
> 14,
17565 _("MVE vector register in the range [Q0..Q7] expected"));
17566 inst
.instruction
|= (load
&& type
== NT_unsigned
) << 28;
17567 inst
.instruction
|= (size
== 16) << 19;
17568 inst
.instruction
|= neon_logbits (elsize
) << 7;
17572 if (inst
.operands
[1].reg
== REG_PC
)
17573 as_tsktsk (MVE_BAD_PC
);
17574 else if (inst
.operands
[1].reg
== REG_SP
&& inst
.operands
[1].writeback
)
17575 as_tsktsk (MVE_BAD_SP
);
17576 inst
.instruction
|= 1 << 12;
17577 inst
.instruction
|= neon_logbits (size
) << 7;
17579 inst
.instruction
|= inst
.operands
[1].preind
<< 24;
17580 inst
.instruction
|= add
<< 23;
17581 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17582 inst
.instruction
|= inst
.operands
[1].writeback
<< 21;
17583 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17584 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17585 inst
.instruction
&= 0xffffff80;
17586 inst
.instruction
|= imm
>> neon_logbits (size
);
17591 do_mve_vstr_vldr (void)
17596 if (inst
.cond
> COND_ALWAYS
)
17597 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
17599 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
17601 switch (inst
.instruction
)
17608 /* fall through. */
17614 /* fall through. */
17620 /* fall through. */
17626 /* fall through. */
17631 unsigned elsize
= inst
.vectype
.el
[0].size
;
17633 if (inst
.operands
[1].isquad
)
17635 /* We are dealing with [Q, imm]{!} cases. */
17636 do_mve_vstr_vldr_QI (size
, elsize
, load
);
17640 if (inst
.operands
[1].immisreg
== 2)
17642 /* We are dealing with [R, Q, {UXTW #os}] cases. */
17643 do_mve_vstr_vldr_RQ (size
, elsize
, load
);
17645 else if (!inst
.operands
[1].immisreg
)
17647 /* We are dealing with [R, Imm]{!}/[R], Imm cases. */
17648 do_mve_vstr_vldr_RI (size
, elsize
, load
);
17651 constraint (1, BAD_ADDR_MODE
);
17658 do_mve_vst_vld (void)
17660 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
17663 constraint (!inst
.operands
[1].preind
|| inst
.relocs
[0].exp
.X_add_symbol
!= 0
17664 || inst
.relocs
[0].exp
.X_add_number
!= 0
17665 || inst
.operands
[1].immisreg
!= 0,
17667 constraint (inst
.vectype
.el
[0].size
> 32, BAD_EL_TYPE
);
17668 if (inst
.operands
[1].reg
== REG_PC
)
17669 as_tsktsk (MVE_BAD_PC
);
17670 else if (inst
.operands
[1].reg
== REG_SP
&& inst
.operands
[1].writeback
)
17671 as_tsktsk (MVE_BAD_SP
);
17674 /* These instructions are one of the "exceptions" mentioned in
17675 handle_pred_state. They are MVE instructions that are not VPT compatible
17676 and do not accept a VPT code, thus appending such a code is a syntax
17678 if (inst
.cond
> COND_ALWAYS
)
17679 first_error (BAD_SYNTAX
);
17680 /* If we append a scalar condition code we can set this to
17681 MVE_OUTSIDE_PRED_INSN as it will also lead to a syntax error. */
17682 else if (inst
.cond
< COND_ALWAYS
)
17683 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
17685 inst
.pred_insn_type
= MVE_UNPREDICABLE_INSN
;
17687 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17688 inst
.instruction
|= inst
.operands
[1].writeback
<< 21;
17689 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17690 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17691 inst
.instruction
|= neon_logbits (inst
.vectype
.el
[0].size
) << 7;
17696 do_mve_vaddlv (void)
17698 enum neon_shape rs
= neon_select_shape (NS_RRQ
, NS_NULL
);
17699 struct neon_type_el et
17700 = neon_check_type (3, rs
, N_EQK
, N_EQK
, N_S32
| N_U32
| N_KEY
);
17702 if (et
.type
== NT_invtype
)
17703 first_error (BAD_EL_TYPE
);
17705 if (inst
.cond
> COND_ALWAYS
)
17706 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
17708 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
17710 constraint (inst
.operands
[1].reg
> 14, MVE_BAD_QREG
);
17712 inst
.instruction
|= (et
.type
== NT_unsigned
) << 28;
17713 inst
.instruction
|= inst
.operands
[1].reg
<< 19;
17714 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
17715 inst
.instruction
|= inst
.operands
[2].reg
;
17720 do_neon_dyadic_if_su (void)
17722 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
17723 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
17726 constraint ((inst
.instruction
== ((unsigned) N_MNEM_vmax
)
17727 || inst
.instruction
== ((unsigned) N_MNEM_vmin
))
17728 && et
.type
== NT_float
17729 && !ARM_CPU_HAS_FEATURE (cpu_variant
,fpu_neon_ext_v1
), BAD_FPU
);
17731 if (!check_simd_pred_availability (et
.type
== NT_float
,
17732 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
17735 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
17739 do_neon_addsub_if_i (void)
17741 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1xd
)
17742 && try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
17745 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
17746 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
,
17747 N_EQK
, N_IF_32
| N_I64
| N_KEY
);
17749 constraint (rs
== NS_QQR
&& et
.size
== 64, BAD_FPU
);
17750 /* If we are parsing Q registers and the element types match MVE, which NEON
17751 also supports, then we must check whether this is an instruction that can
17752 be used by both MVE/NEON. This distinction can be made based on whether
17753 they are predicated or not. */
17754 if ((rs
== NS_QQQ
|| rs
== NS_QQR
) && et
.size
!= 64)
17756 if (!check_simd_pred_availability (et
.type
== NT_float
,
17757 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
17762 /* If they are either in a D register or are using an unsupported. */
17764 && vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
17768 /* The "untyped" case can't happen. Do this to stop the "U" bit being
17769 affected if we specify unsigned args. */
17770 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
17773 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
17775 V<op> A,B (A is operand 0, B is operand 2)
17780 so handle that case specially. */
17783 neon_exchange_operands (void)
17785 if (inst
.operands
[1].present
)
17787 void *scratch
= xmalloc (sizeof (inst
.operands
[0]));
17789 /* Swap operands[1] and operands[2]. */
17790 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
17791 inst
.operands
[1] = inst
.operands
[2];
17792 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
17797 inst
.operands
[1] = inst
.operands
[2];
17798 inst
.operands
[2] = inst
.operands
[0];
17803 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
17805 if (inst
.operands
[2].isreg
)
17808 neon_exchange_operands ();
17809 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
17813 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
17814 struct neon_type_el et
= neon_check_type (2, rs
,
17815 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
17817 NEON_ENCODE (IMMED
, inst
);
17818 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17819 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17820 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17821 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17822 inst
.instruction
|= neon_quad (rs
) << 6;
17823 inst
.instruction
|= (et
.type
== NT_float
) << 10;
17824 inst
.instruction
|= neon_logbits (et
.size
) << 18;
17826 neon_dp_fixup (&inst
);
17833 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, false);
17837 do_neon_cmp_inv (void)
17839 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, true);
17845 neon_compare (N_IF_32
, N_IF_32
, false);
17848 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
17849 scalars, which are encoded in 5 bits, M : Rm.
17850 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
17851 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
17854 Dot Product instructions are similar to multiply instructions except elsize
17855 should always be 32.
17857 This function translates SCALAR, which is GAS's internal encoding of indexed
17858 scalar register, to raw encoding. There is also register and index range
17859 check based on ELSIZE. */
17862 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
17864 unsigned regno
= NEON_SCALAR_REG (scalar
);
17865 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
17870 if (regno
> 7 || elno
> 3)
17872 return regno
| (elno
<< 3);
17875 if (regno
> 15 || elno
> 1)
17877 return regno
| (elno
<< 4);
17881 first_error (_("scalar out of range for multiply instruction"));
17887 /* Encode multiply / multiply-accumulate scalar instructions. */
17890 neon_mul_mac (struct neon_type_el et
, int ubit
)
17894 /* Give a more helpful error message if we have an invalid type. */
17895 if (et
.type
== NT_invtype
)
17898 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
17899 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17900 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17901 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17902 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17903 inst
.instruction
|= LOW4 (scalar
);
17904 inst
.instruction
|= HI1 (scalar
) << 5;
17905 inst
.instruction
|= (et
.type
== NT_float
) << 8;
17906 inst
.instruction
|= neon_logbits (et
.size
) << 20;
17907 inst
.instruction
|= (ubit
!= 0) << 24;
17909 neon_dp_fixup (&inst
);
17913 do_neon_mac_maybe_scalar (void)
17915 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
17918 if (!check_simd_pred_availability (false, NEON_CHECK_CC
| NEON_CHECK_ARCH
))
17921 if (inst
.operands
[2].isscalar
)
17923 constraint (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
), BAD_FPU
);
17924 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
17925 struct neon_type_el et
= neon_check_type (3, rs
,
17926 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F_16_32
| N_KEY
);
17927 NEON_ENCODE (SCALAR
, inst
);
17928 neon_mul_mac (et
, neon_quad (rs
));
17930 else if (!inst
.operands
[2].isvec
)
17932 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
), BAD_FPU
);
17934 enum neon_shape rs
= neon_select_shape (NS_QQR
, NS_NULL
);
17935 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_SU_MVE
| N_KEY
);
17937 neon_dyadic_misc (NT_unsigned
, N_SU_MVE
, 0);
17941 constraint (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
), BAD_FPU
);
17942 /* The "untyped" case can't happen. Do this to stop the "U" bit being
17943 affected if we specify unsigned args. */
17944 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
17949 do_bfloat_vfma (void)
17951 constraint (!mark_feature_used (&fpu_neon_ext_armv8
), _(BAD_FPU
));
17952 constraint (!mark_feature_used (&arm_ext_bf16
), _(BAD_BF16
));
17953 enum neon_shape rs
;
17956 if (inst
.instruction
!= B_MNEM_vfmab
)
17959 inst
.instruction
= B_MNEM_vfmat
;
17962 if (inst
.operands
[2].isscalar
)
17964 rs
= neon_select_shape (NS_QQS
, NS_NULL
);
17965 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_BF16
| N_KEY
);
17967 inst
.instruction
|= (1 << 25);
17968 int idx
= inst
.operands
[2].reg
& 0xf;
17969 constraint (!(idx
< 4), _("index must be in the range 0 to 3"));
17970 inst
.operands
[2].reg
>>= 4;
17971 constraint (!(inst
.operands
[2].reg
< 8),
17972 _("indexed register must be less than 8"));
17973 neon_three_args (t_bit
);
17974 inst
.instruction
|= ((idx
& 1) << 3);
17975 inst
.instruction
|= ((idx
& 2) << 4);
17979 rs
= neon_select_shape (NS_QQQ
, NS_NULL
);
17980 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_BF16
| N_KEY
);
17981 neon_three_args (t_bit
);
17987 do_neon_fmac (void)
17989 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_fma
)
17990 && try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
17993 if (!check_simd_pred_availability (true, NEON_CHECK_CC
| NEON_CHECK_ARCH
))
17996 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_fp_ext
))
17998 enum neon_shape rs
= neon_select_shape (NS_QQQ
, NS_QQR
, NS_NULL
);
17999 struct neon_type_el et
= neon_check_type (3, rs
, N_F_MVE
| N_KEY
, N_EQK
,
18005 if (inst
.operands
[2].reg
== REG_SP
)
18006 as_tsktsk (MVE_BAD_SP
);
18007 else if (inst
.operands
[2].reg
== REG_PC
)
18008 as_tsktsk (MVE_BAD_PC
);
18010 inst
.instruction
= 0xee310e40;
18011 inst
.instruction
|= (et
.size
== 16) << 28;
18012 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18013 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
18014 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18015 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 6;
18016 inst
.instruction
|= inst
.operands
[2].reg
;
18023 constraint (!inst
.operands
[2].isvec
, BAD_FPU
);
18026 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
18032 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_bf16
) &&
18033 inst
.cond
== COND_ALWAYS
)
18035 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
), BAD_FPU
);
18036 inst
.instruction
= N_MNEM_vfma
;
18037 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
18039 return do_neon_fmac();
18050 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
18051 struct neon_type_el et
= neon_check_type (3, rs
,
18052 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
18053 neon_three_same (neon_quad (rs
), 0, et
.size
);
18056 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
18057 same types as the MAC equivalents. The polynomial type for this instruction
18058 is encoded the same as the integer type. */
18063 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
18066 if (!check_simd_pred_availability (false, NEON_CHECK_CC
| NEON_CHECK_ARCH
))
18069 if (inst
.operands
[2].isscalar
)
18071 constraint (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
), BAD_FPU
);
18072 do_neon_mac_maybe_scalar ();
18076 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
18078 enum neon_shape rs
= neon_select_shape (NS_QQR
, NS_QQQ
, NS_NULL
);
18079 struct neon_type_el et
18080 = neon_check_type (3, rs
, N_EQK
, N_EQK
, N_I_MVE
| N_F_MVE
| N_KEY
);
18081 if (et
.type
== NT_float
)
18082 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_fp_ext
),
18085 neon_dyadic_misc (NT_float
, N_I_MVE
| N_F_MVE
, 0);
18089 constraint (!inst
.operands
[2].isvec
, BAD_FPU
);
18090 neon_dyadic_misc (NT_poly
,
18091 N_I8
| N_I16
| N_I32
| N_F16
| N_F32
| N_P8
, 0);
18097 do_neon_qdmulh (void)
18099 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH
| NEON_CHECK_CC
))
18102 if (inst
.operands
[2].isscalar
)
18104 constraint (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
), BAD_FPU
);
18105 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
18106 struct neon_type_el et
= neon_check_type (3, rs
,
18107 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
18108 NEON_ENCODE (SCALAR
, inst
);
18109 neon_mul_mac (et
, neon_quad (rs
));
18113 enum neon_shape rs
;
18114 struct neon_type_el et
;
18115 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
18117 rs
= neon_select_shape (NS_QQR
, NS_QQQ
, NS_NULL
);
18118 et
= neon_check_type (3, rs
,
18119 N_EQK
, N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
18123 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
18124 et
= neon_check_type (3, rs
,
18125 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
18128 NEON_ENCODE (INTEGER
, inst
);
18130 mve_encode_qqr (et
.size
, 0, 0);
18132 /* The U bit (rounding) comes from bit mask. */
18133 neon_three_same (neon_quad (rs
), 0, et
.size
);
18138 do_mve_vaddv (void)
18140 enum neon_shape rs
= neon_select_shape (NS_RQ
, NS_NULL
);
18141 struct neon_type_el et
18142 = neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
18144 if (et
.type
== NT_invtype
)
18145 first_error (BAD_EL_TYPE
);
18147 if (inst
.cond
> COND_ALWAYS
)
18148 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
18150 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
18152 constraint (inst
.operands
[1].reg
> 14, MVE_BAD_QREG
);
18154 mve_encode_rq (et
.type
== NT_unsigned
, et
.size
);
18158 do_mve_vhcadd (void)
18160 enum neon_shape rs
= neon_select_shape (NS_QQQI
, NS_NULL
);
18161 struct neon_type_el et
18162 = neon_check_type (3, rs
, N_EQK
, N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
18164 if (inst
.cond
> COND_ALWAYS
)
18165 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
18167 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
18169 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
18170 constraint (rot
!= 90 && rot
!= 270, _("immediate out of range"));
18172 if (et
.size
== 32 && inst
.operands
[0].reg
== inst
.operands
[2].reg
)
18173 as_tsktsk (_("Warning: 32-bit element size and same first and third "
18174 "operand makes instruction UNPREDICTABLE"));
18176 mve_encode_qqq (0, et
.size
);
18177 inst
.instruction
|= (rot
== 270) << 12;
18182 do_mve_vqdmull (void)
18184 enum neon_shape rs
= neon_select_shape (NS_QQQ
, NS_QQR
, NS_NULL
);
18185 struct neon_type_el et
18186 = neon_check_type (3, rs
, N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
18189 && (inst
.operands
[0].reg
== inst
.operands
[1].reg
18190 || (rs
== NS_QQQ
&& inst
.operands
[0].reg
== inst
.operands
[2].reg
)))
18191 as_tsktsk (BAD_MVE_SRCDEST
);
18193 if (inst
.cond
> COND_ALWAYS
)
18194 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
18196 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
18200 mve_encode_qqq (et
.size
== 32, 64);
18201 inst
.instruction
|= 1;
18205 mve_encode_qqr (64, et
.size
== 32, 0);
18206 inst
.instruction
|= 0x3 << 5;
18213 enum neon_shape rs
= neon_select_shape (NS_QQQ
, NS_NULL
);
18214 struct neon_type_el et
18215 = neon_check_type (3, rs
, N_KEY
| N_I32
, N_EQK
, N_EQK
);
18217 if (et
.type
== NT_invtype
)
18218 first_error (BAD_EL_TYPE
);
18220 if (inst
.cond
> COND_ALWAYS
)
18221 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
18223 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
18225 mve_encode_qqq (0, 64);
18229 do_mve_vbrsr (void)
18231 enum neon_shape rs
= neon_select_shape (NS_QQR
, NS_NULL
);
18232 struct neon_type_el et
18233 = neon_check_type (3, rs
, N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
18235 if (inst
.cond
> COND_ALWAYS
)
18236 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
18238 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
18240 mve_encode_qqr (et
.size
, 0, 0);
18246 neon_check_type (3, NS_QQQ
, N_EQK
, N_EQK
, N_I32
| N_KEY
);
18248 if (inst
.cond
> COND_ALWAYS
)
18249 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
18251 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
18253 mve_encode_qqq (1, 64);
18257 do_mve_vmulh (void)
18259 enum neon_shape rs
= neon_select_shape (NS_QQQ
, NS_NULL
);
18260 struct neon_type_el et
18261 = neon_check_type (3, rs
, N_EQK
, N_EQK
, N_SU_MVE
| N_KEY
);
18263 if (inst
.cond
> COND_ALWAYS
)
18264 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
18266 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
18268 mve_encode_qqq (et
.type
== NT_unsigned
, et
.size
);
18272 do_mve_vqdmlah (void)
18274 enum neon_shape rs
= neon_select_shape (NS_QQR
, NS_NULL
);
18275 struct neon_type_el et
18276 = neon_check_type (3, rs
, N_EQK
, N_EQK
, N_S_32
| N_KEY
);
18278 if (inst
.cond
> COND_ALWAYS
)
18279 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
18281 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
18283 mve_encode_qqr (et
.size
, et
.type
== NT_unsigned
, 0);
18287 do_mve_vqdmladh (void)
18289 enum neon_shape rs
= neon_select_shape (NS_QQQ
, NS_NULL
);
18290 struct neon_type_el et
18291 = neon_check_type (3, rs
, N_EQK
, N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
18293 if (inst
.cond
> COND_ALWAYS
)
18294 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
18296 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
18298 mve_encode_qqq (0, et
.size
);
18303 do_mve_vmull (void)
18306 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_DDS
,
18307 NS_QQS
, NS_QQQ
, NS_QQR
, NS_NULL
);
18308 if (inst
.cond
== COND_ALWAYS
18309 && ((unsigned)inst
.instruction
) == M_MNEM_vmullt
)
18314 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
18321 constraint (rs
!= NS_QQQ
, BAD_FPU
);
18322 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18323 N_SU_32
| N_P8
| N_P16
| N_KEY
);
18325 /* We are dealing with MVE's vmullt. */
18327 && (inst
.operands
[0].reg
== inst
.operands
[1].reg
18328 || inst
.operands
[0].reg
== inst
.operands
[2].reg
))
18329 as_tsktsk (BAD_MVE_SRCDEST
);
18331 if (inst
.cond
> COND_ALWAYS
)
18332 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
18334 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
18336 if (et
.type
== NT_poly
)
18337 mve_encode_qqq (neon_logbits (et
.size
), 64);
18339 mve_encode_qqq (et
.type
== NT_unsigned
, et
.size
);
18344 inst
.instruction
= N_MNEM_vmul
;
18347 inst
.pred_insn_type
= INSIDE_IT_INSN
;
18352 do_mve_vabav (void)
18354 enum neon_shape rs
= neon_select_shape (NS_RQQ
, NS_NULL
);
18359 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
18362 struct neon_type_el et
= neon_check_type (2, NS_NULL
, N_EQK
, N_KEY
| N_S8
18363 | N_S16
| N_S32
| N_U8
| N_U16
18366 if (inst
.cond
> COND_ALWAYS
)
18367 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
18369 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
18371 mve_encode_rqq (et
.type
== NT_unsigned
, et
.size
);
18375 do_mve_vmladav (void)
18377 enum neon_shape rs
= neon_select_shape (NS_RQQ
, NS_NULL
);
18378 struct neon_type_el et
= neon_check_type (3, rs
,
18379 N_EQK
, N_EQK
, N_SU_MVE
| N_KEY
);
18381 if (et
.type
== NT_unsigned
18382 && (inst
.instruction
== M_MNEM_vmladavx
18383 || inst
.instruction
== M_MNEM_vmladavax
18384 || inst
.instruction
== M_MNEM_vmlsdav
18385 || inst
.instruction
== M_MNEM_vmlsdava
18386 || inst
.instruction
== M_MNEM_vmlsdavx
18387 || inst
.instruction
== M_MNEM_vmlsdavax
))
18388 first_error (BAD_SIMD_TYPE
);
18390 constraint (inst
.operands
[2].reg
> 14,
18391 _("MVE vector register in the range [Q0..Q7] expected"));
18393 if (inst
.cond
> COND_ALWAYS
)
18394 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
18396 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
18398 if (inst
.instruction
== M_MNEM_vmlsdav
18399 || inst
.instruction
== M_MNEM_vmlsdava
18400 || inst
.instruction
== M_MNEM_vmlsdavx
18401 || inst
.instruction
== M_MNEM_vmlsdavax
)
18402 inst
.instruction
|= (et
.size
== 8) << 28;
18404 inst
.instruction
|= (et
.size
== 8) << 8;
18406 mve_encode_rqq (et
.type
== NT_unsigned
, 64);
18407 inst
.instruction
|= (et
.size
== 32) << 16;
18411 do_mve_vmlaldav (void)
18413 enum neon_shape rs
= neon_select_shape (NS_RRQQ
, NS_NULL
);
18414 struct neon_type_el et
18415 = neon_check_type (4, rs
, N_EQK
, N_EQK
, N_EQK
,
18416 N_S16
| N_S32
| N_U16
| N_U32
| N_KEY
);
18418 if (et
.type
== NT_unsigned
18419 && (inst
.instruction
== M_MNEM_vmlsldav
18420 || inst
.instruction
== M_MNEM_vmlsldava
18421 || inst
.instruction
== M_MNEM_vmlsldavx
18422 || inst
.instruction
== M_MNEM_vmlsldavax
))
18423 first_error (BAD_SIMD_TYPE
);
18425 if (inst
.cond
> COND_ALWAYS
)
18426 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
18428 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
18430 mve_encode_rrqq (et
.type
== NT_unsigned
, et
.size
);
18434 do_mve_vrmlaldavh (void)
18436 struct neon_type_el et
;
18437 if (inst
.instruction
== M_MNEM_vrmlsldavh
18438 || inst
.instruction
== M_MNEM_vrmlsldavha
18439 || inst
.instruction
== M_MNEM_vrmlsldavhx
18440 || inst
.instruction
== M_MNEM_vrmlsldavhax
)
18442 et
= neon_check_type (4, NS_RRQQ
, N_EQK
, N_EQK
, N_EQK
, N_S32
| N_KEY
);
18443 if (inst
.operands
[1].reg
== REG_SP
)
18444 as_tsktsk (MVE_BAD_SP
);
18448 if (inst
.instruction
== M_MNEM_vrmlaldavhx
18449 || inst
.instruction
== M_MNEM_vrmlaldavhax
)
18450 et
= neon_check_type (4, NS_RRQQ
, N_EQK
, N_EQK
, N_EQK
, N_S32
| N_KEY
);
18452 et
= neon_check_type (4, NS_RRQQ
, N_EQK
, N_EQK
, N_EQK
,
18453 N_U32
| N_S32
| N_KEY
);
18454 /* vrmlaldavh's encoding with SP as the second, odd, GPR operand may alias
18455 with vmax/min instructions, making the use of SP in assembly really
18456 nonsensical, so instead of issuing a warning like we do for other uses
18457 of SP for the odd register operand we error out. */
18458 constraint (inst
.operands
[1].reg
== REG_SP
, BAD_SP
);
18461 /* Make sure we still check the second operand is an odd one and that PC is
18462 disallowed. This because we are parsing for any GPR operand, to be able
18463 to distinguish between giving a warning or an error for SP as described
18465 constraint ((inst
.operands
[1].reg
% 2) != 1, BAD_EVEN
);
18466 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
18468 if (inst
.cond
> COND_ALWAYS
)
18469 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
18471 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
18473 mve_encode_rrqq (et
.type
== NT_unsigned
, 0);
18478 do_mve_vmaxnmv (void)
18480 enum neon_shape rs
= neon_select_shape (NS_RQ
, NS_NULL
);
18481 struct neon_type_el et
18482 = neon_check_type (2, rs
, N_EQK
, N_F_MVE
| N_KEY
);
18484 if (inst
.cond
> COND_ALWAYS
)
18485 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
18487 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
18489 if (inst
.operands
[0].reg
== REG_SP
)
18490 as_tsktsk (MVE_BAD_SP
);
18491 else if (inst
.operands
[0].reg
== REG_PC
)
18492 as_tsktsk (MVE_BAD_PC
);
18494 mve_encode_rq (et
.size
== 16, 64);
18498 do_mve_vmaxv (void)
18500 enum neon_shape rs
= neon_select_shape (NS_RQ
, NS_NULL
);
18501 struct neon_type_el et
;
18503 if (inst
.instruction
== M_MNEM_vmaxv
|| inst
.instruction
== M_MNEM_vminv
)
18504 et
= neon_check_type (2, rs
, N_EQK
, N_SU_MVE
| N_KEY
);
18506 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
18508 if (inst
.cond
> COND_ALWAYS
)
18509 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
18511 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
18513 if (inst
.operands
[0].reg
== REG_SP
)
18514 as_tsktsk (MVE_BAD_SP
);
18515 else if (inst
.operands
[0].reg
== REG_PC
)
18516 as_tsktsk (MVE_BAD_PC
);
18518 mve_encode_rq (et
.type
== NT_unsigned
, et
.size
);
18523 do_neon_qrdmlah (void)
18525 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH
| NEON_CHECK_CC
))
18527 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
18529 /* Check we're on the correct architecture. */
18530 if (!mark_feature_used (&fpu_neon_ext_armv8
))
18532 = _("instruction form not available on this architecture.");
18533 else if (!mark_feature_used (&fpu_neon_ext_v8_1
))
18535 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
18536 record_feature_use (&fpu_neon_ext_v8_1
);
18538 if (inst
.operands
[2].isscalar
)
18540 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
18541 struct neon_type_el et
= neon_check_type (3, rs
,
18542 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
18543 NEON_ENCODE (SCALAR
, inst
);
18544 neon_mul_mac (et
, neon_quad (rs
));
18548 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
18549 struct neon_type_el et
= neon_check_type (3, rs
,
18550 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
18551 NEON_ENCODE (INTEGER
, inst
);
18552 /* The U bit (rounding) comes from bit mask. */
18553 neon_three_same (neon_quad (rs
), 0, et
.size
);
18558 enum neon_shape rs
= neon_select_shape (NS_QQR
, NS_NULL
);
18559 struct neon_type_el et
18560 = neon_check_type (3, rs
, N_EQK
, N_EQK
, N_S_32
| N_KEY
);
18562 NEON_ENCODE (INTEGER
, inst
);
18563 mve_encode_qqr (et
.size
, et
.type
== NT_unsigned
, 0);
18568 do_neon_fcmp_absolute (void)
18570 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
18571 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18572 N_F_16_32
| N_KEY
);
18573 /* Size field comes from bit mask. */
18574 neon_three_same (neon_quad (rs
), 1, et
.size
== 16 ? (int) et
.size
: -1);
18578 do_neon_fcmp_absolute_inv (void)
18580 neon_exchange_operands ();
18581 do_neon_fcmp_absolute ();
18585 do_neon_step (void)
18587 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
18588 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18589 N_F_16_32
| N_KEY
);
18590 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
18594 do_neon_abs_neg (void)
18596 enum neon_shape rs
;
18597 struct neon_type_el et
;
18599 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
18602 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18603 et
= neon_check_type (2, rs
, N_EQK
, N_S_32
| N_F_16_32
| N_KEY
);
18605 if (!check_simd_pred_availability (et
.type
== NT_float
,
18606 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
18609 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18610 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18611 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
18612 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
18613 inst
.instruction
|= neon_quad (rs
) << 6;
18614 inst
.instruction
|= (et
.type
== NT_float
) << 10;
18615 inst
.instruction
|= neon_logbits (et
.size
) << 18;
18617 neon_dp_fixup (&inst
);
18623 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH
| NEON_CHECK_CC
))
18626 enum neon_shape rs
;
18627 struct neon_type_el et
;
18628 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
18630 rs
= neon_select_shape (NS_QQI
, NS_NULL
);
18631 et
= neon_check_type (2, rs
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
18635 rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
18636 et
= neon_check_type (2, rs
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
18640 int imm
= inst
.operands
[2].imm
;
18641 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
18642 _("immediate out of range for insert"));
18643 neon_imm_shift (false, 0, neon_quad (rs
), et
, imm
);
18649 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH
| NEON_CHECK_CC
))
18652 enum neon_shape rs
;
18653 struct neon_type_el et
;
18654 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
18656 rs
= neon_select_shape (NS_QQI
, NS_NULL
);
18657 et
= neon_check_type (2, rs
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
18661 rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
18662 et
= neon_check_type (2, rs
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
18665 int imm
= inst
.operands
[2].imm
;
18666 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
18667 _("immediate out of range for insert"));
18668 neon_imm_shift (false, 0, neon_quad (rs
), et
, et
.size
- imm
);
18672 do_neon_qshlu_imm (void)
18674 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH
| NEON_CHECK_CC
))
18677 enum neon_shape rs
;
18678 struct neon_type_el et
;
18679 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
18681 rs
= neon_select_shape (NS_QQI
, NS_NULL
);
18682 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
18686 rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
18687 et
= neon_check_type (2, rs
, N_EQK
| N_UNS
,
18688 N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
18691 int imm
= inst
.operands
[2].imm
;
18692 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
18693 _("immediate out of range for shift"));
18694 /* Only encodes the 'U present' variant of the instruction.
18695 In this case, signed types have OP (bit 8) set to 0.
18696 Unsigned types have OP set to 1. */
18697 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
18698 /* The rest of the bits are the same as other immediate shifts. */
18699 neon_imm_shift (false, 0, neon_quad (rs
), et
, imm
);
18703 do_neon_qmovn (void)
18705 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
18706 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
18707 /* Saturating move where operands can be signed or unsigned, and the
18708 destination has the same signedness. */
18709 NEON_ENCODE (INTEGER
, inst
);
18710 if (et
.type
== NT_unsigned
)
18711 inst
.instruction
|= 0xc0;
18713 inst
.instruction
|= 0x80;
18714 neon_two_same (0, 1, et
.size
/ 2);
18718 do_neon_qmovun (void)
18720 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
18721 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
18722 /* Saturating move with unsigned results. Operands must be signed. */
18723 NEON_ENCODE (INTEGER
, inst
);
18724 neon_two_same (0, 1, et
.size
/ 2);
18728 do_neon_rshift_sat_narrow (void)
18730 /* FIXME: Types for narrowing. If operands are signed, results can be signed
18731 or unsigned. If operands are unsigned, results must also be unsigned. */
18732 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
18733 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
18734 int imm
= inst
.operands
[2].imm
;
18735 /* This gets the bounds check, size encoding and immediate bits calculation
18739 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
18740 VQMOVN.I<size> <Dd>, <Qm>. */
18743 inst
.operands
[2].present
= 0;
18744 inst
.instruction
= N_MNEM_vqmovn
;
18749 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
18750 _("immediate out of range"));
18751 neon_imm_shift (true, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
18755 do_neon_rshift_sat_narrow_u (void)
18757 /* FIXME: Types for narrowing. If operands are signed, results can be signed
18758 or unsigned. If operands are unsigned, results must also be unsigned. */
18759 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
18760 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
18761 int imm
= inst
.operands
[2].imm
;
18762 /* This gets the bounds check, size encoding and immediate bits calculation
18766 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
18767 VQMOVUN.I<size> <Dd>, <Qm>. */
18770 inst
.operands
[2].present
= 0;
18771 inst
.instruction
= N_MNEM_vqmovun
;
18776 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
18777 _("immediate out of range"));
18778 /* FIXME: The manual is kind of unclear about what value U should have in
18779 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
18781 neon_imm_shift (true, 1, 0, et
, et
.size
- imm
);
18785 do_neon_movn (void)
18787 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
18788 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
18789 NEON_ENCODE (INTEGER
, inst
);
18790 neon_two_same (0, 1, et
.size
/ 2);
18794 do_neon_rshift_narrow (void)
18796 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
18797 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
18798 int imm
= inst
.operands
[2].imm
;
18799 /* This gets the bounds check, size encoding and immediate bits calculation
18803 /* If immediate is zero then we are a pseudo-instruction for
18804 VMOVN.I<size> <Dd>, <Qm> */
18807 inst
.operands
[2].present
= 0;
18808 inst
.instruction
= N_MNEM_vmovn
;
18813 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
18814 _("immediate out of range for narrowing operation"));
18815 neon_imm_shift (false, 0, 0, et
, et
.size
- imm
);
18819 do_neon_shll (void)
18821 /* FIXME: Type checking when lengthening. */
18822 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
18823 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
18824 unsigned imm
= inst
.operands
[2].imm
;
18826 if (imm
== et
.size
)
18828 /* Maximum shift variant. */
18829 NEON_ENCODE (INTEGER
, inst
);
18830 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18831 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18832 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
18833 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
18834 inst
.instruction
|= neon_logbits (et
.size
) << 18;
18836 neon_dp_fixup (&inst
);
18840 /* A more-specific type check for non-max versions. */
18841 et
= neon_check_type (2, NS_QDI
,
18842 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
18843 NEON_ENCODE (IMMED
, inst
);
18844 neon_imm_shift (true, et
.type
== NT_unsigned
, 0, et
, imm
);
18848 /* Check the various types for the VCVT instruction, and return which version
18849 the current instruction is. */
18851 #define CVT_FLAVOUR_VAR \
18852 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
18853 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
18854 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
18855 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
18856 /* Half-precision conversions. */ \
18857 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
18858 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
18859 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
18860 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
18861 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
18862 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
18863 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
18864 Compared with single/double precision variants, only the co-processor \
18865 field is different, so the encoding flow is reused here. */ \
18866 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
18867 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
18868 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
18869 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
18870 CVT_VAR (bf16_f32, N_BF16, N_F32, whole_reg, NULL, NULL, NULL) \
18871 /* VFP instructions. */ \
18872 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
18873 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
18874 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
18875 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
18876 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
18877 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
18878 /* VFP instructions with bitshift. */ \
18879 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
18880 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
18881 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
18882 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
18883 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
18884 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
18885 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
18886 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
18888 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
18889 neon_cvt_flavour_##C,
18891 /* The different types of conversions we can do. */
18892 enum neon_cvt_flavour
18895 neon_cvt_flavour_invalid
,
18896 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
18901 static enum neon_cvt_flavour
18902 get_neon_cvt_flavour (enum neon_shape rs
)
18904 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
18905 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
18906 if (et.type != NT_invtype) \
18908 inst.error = NULL; \
18909 return (neon_cvt_flavour_##C); \
18912 struct neon_type_el et
;
18913 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
18914 || rs
== NS_FF
) ? N_VFP
: 0;
18915 /* The instruction versions which take an immediate take one register
18916 argument, which is extended to the width of the full register. Thus the
18917 "source" and "destination" registers must have the same width. Hack that
18918 here by making the size equal to the key (wider, in this case) operand. */
18919 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
18923 return neon_cvt_flavour_invalid
;
18938 /* Neon-syntax VFP conversions. */
18941 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
18943 const char *opname
= 0;
18945 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
18946 || rs
== NS_FHI
|| rs
== NS_HFI
)
18948 /* Conversions with immediate bitshift. */
18949 const char *enc
[] =
18951 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
18957 if (flavour
< (int) ARRAY_SIZE (enc
))
18959 opname
= enc
[flavour
];
18960 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
18961 _("operands 0 and 1 must be the same register"));
18962 inst
.operands
[1] = inst
.operands
[2];
18963 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
18968 /* Conversions without bitshift. */
18969 const char *enc
[] =
18971 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
18977 if (flavour
< (int) ARRAY_SIZE (enc
))
18978 opname
= enc
[flavour
];
18982 do_vfp_nsyn_opcode (opname
);
18984 /* ARMv8.2 fp16 VCVT instruction. */
18985 if (flavour
== neon_cvt_flavour_s32_f16
18986 || flavour
== neon_cvt_flavour_u32_f16
18987 || flavour
== neon_cvt_flavour_f16_u32
18988 || flavour
== neon_cvt_flavour_f16_s32
)
18989 do_scalar_fp16_v82_encode ();
18993 do_vfp_nsyn_cvtz (void)
18995 enum neon_shape rs
= neon_select_shape (NS_FH
, NS_FF
, NS_FD
, NS_NULL
);
18996 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
18997 const char *enc
[] =
18999 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
19005 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
19006 do_vfp_nsyn_opcode (enc
[flavour
]);
19010 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
19011 enum neon_cvt_mode mode
)
19016 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
19017 D register operands. */
19018 if (flavour
== neon_cvt_flavour_s32_f64
19019 || flavour
== neon_cvt_flavour_u32_f64
)
19020 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
19023 if (flavour
== neon_cvt_flavour_s32_f16
19024 || flavour
== neon_cvt_flavour_u32_f16
)
19025 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
19028 set_pred_insn_type (OUTSIDE_PRED_INSN
);
19032 case neon_cvt_flavour_s32_f64
:
19036 case neon_cvt_flavour_s32_f32
:
19040 case neon_cvt_flavour_s32_f16
:
19044 case neon_cvt_flavour_u32_f64
:
19048 case neon_cvt_flavour_u32_f32
:
19052 case neon_cvt_flavour_u32_f16
:
19057 first_error (_("invalid instruction shape"));
19063 case neon_cvt_mode_a
: rm
= 0; break;
19064 case neon_cvt_mode_n
: rm
= 1; break;
19065 case neon_cvt_mode_p
: rm
= 2; break;
19066 case neon_cvt_mode_m
: rm
= 3; break;
19067 default: first_error (_("invalid rounding mode")); return;
19070 NEON_ENCODE (FPV8
, inst
);
19071 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
19072 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
19073 inst
.instruction
|= sz
<< 8;
19075 /* ARMv8.2 fp16 VCVT instruction. */
19076 if (flavour
== neon_cvt_flavour_s32_f16
19077 ||flavour
== neon_cvt_flavour_u32_f16
)
19078 do_scalar_fp16_v82_encode ();
19079 inst
.instruction
|= op
<< 7;
19080 inst
.instruction
|= rm
<< 16;
19081 inst
.instruction
|= 0xf0000000;
19082 inst
.is_neon
= true;
19086 do_neon_cvt_1 (enum neon_cvt_mode mode
)
19088 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
19089 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
,
19090 NS_FH
, NS_HF
, NS_FHI
, NS_HFI
,
19092 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
19094 if (flavour
== neon_cvt_flavour_invalid
)
19097 /* PR11109: Handle round-to-zero for VCVT conversions. */
19098 if (mode
== neon_cvt_mode_z
19099 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
19100 && (flavour
== neon_cvt_flavour_s16_f16
19101 || flavour
== neon_cvt_flavour_u16_f16
19102 || flavour
== neon_cvt_flavour_s32_f32
19103 || flavour
== neon_cvt_flavour_u32_f32
19104 || flavour
== neon_cvt_flavour_s32_f64
19105 || flavour
== neon_cvt_flavour_u32_f64
)
19106 && (rs
== NS_FD
|| rs
== NS_FF
))
19108 do_vfp_nsyn_cvtz ();
19112 /* ARMv8.2 fp16 VCVT conversions. */
19113 if (mode
== neon_cvt_mode_z
19114 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
)
19115 && (flavour
== neon_cvt_flavour_s32_f16
19116 || flavour
== neon_cvt_flavour_u32_f16
)
19119 do_vfp_nsyn_cvtz ();
19120 do_scalar_fp16_v82_encode ();
19124 if ((rs
== NS_FD
|| rs
== NS_QQI
) && mode
== neon_cvt_mode_n
19125 && ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
19127 /* We are dealing with vcvt with the 'ne' condition. */
19129 inst
.instruction
= N_MNEM_vcvt
;
19130 do_neon_cvt_1 (neon_cvt_mode_z
);
19134 /* VFP rather than Neon conversions. */
19135 if (flavour
>= neon_cvt_flavour_first_fp
)
19137 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
19138 do_vfp_nsyn_cvt (rs
, flavour
);
19140 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
19148 if (mode
== neon_cvt_mode_z
19149 && (flavour
== neon_cvt_flavour_f16_s16
19150 || flavour
== neon_cvt_flavour_f16_u16
19151 || flavour
== neon_cvt_flavour_s16_f16
19152 || flavour
== neon_cvt_flavour_u16_f16
19153 || flavour
== neon_cvt_flavour_f32_u32
19154 || flavour
== neon_cvt_flavour_f32_s32
19155 || flavour
== neon_cvt_flavour_s32_f32
19156 || flavour
== neon_cvt_flavour_u32_f32
))
19158 if (!check_simd_pred_availability (true,
19159 NEON_CHECK_CC
| NEON_CHECK_ARCH
))
19162 /* fall through. */
19166 unsigned enctab
[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
19167 0x0000100, 0x1000100, 0x0, 0x1000000};
19169 if ((rs
!= NS_QQI
|| !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_fp_ext
))
19170 && vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
19173 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_fp_ext
))
19175 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0,
19176 _("immediate value out of range"));
19179 case neon_cvt_flavour_f16_s16
:
19180 case neon_cvt_flavour_f16_u16
:
19181 case neon_cvt_flavour_s16_f16
:
19182 case neon_cvt_flavour_u16_f16
:
19183 constraint (inst
.operands
[2].imm
> 16,
19184 _("immediate value out of range"));
19186 case neon_cvt_flavour_f32_u32
:
19187 case neon_cvt_flavour_f32_s32
:
19188 case neon_cvt_flavour_s32_f32
:
19189 case neon_cvt_flavour_u32_f32
:
19190 constraint (inst
.operands
[2].imm
> 32,
19191 _("immediate value out of range"));
19194 inst
.error
= BAD_FPU
;
19199 /* Fixed-point conversion with #0 immediate is encoded as an
19200 integer conversion. */
19201 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
19203 NEON_ENCODE (IMMED
, inst
);
19204 if (flavour
!= neon_cvt_flavour_invalid
)
19205 inst
.instruction
|= enctab
[flavour
];
19206 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
19207 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
19208 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
19209 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
19210 inst
.instruction
|= neon_quad (rs
) << 6;
19211 inst
.instruction
|= 1 << 21;
19212 if (flavour
< neon_cvt_flavour_s16_f16
)
19214 inst
.instruction
|= 1 << 21;
19215 immbits
= 32 - inst
.operands
[2].imm
;
19216 inst
.instruction
|= immbits
<< 16;
19220 inst
.instruction
|= 3 << 20;
19221 immbits
= 16 - inst
.operands
[2].imm
;
19222 inst
.instruction
|= immbits
<< 16;
19223 inst
.instruction
&= ~(1 << 9);
19226 neon_dp_fixup (&inst
);
19231 if ((mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
19232 || mode
== neon_cvt_mode_m
|| mode
== neon_cvt_mode_p
)
19233 && (flavour
== neon_cvt_flavour_s16_f16
19234 || flavour
== neon_cvt_flavour_u16_f16
19235 || flavour
== neon_cvt_flavour_s32_f32
19236 || flavour
== neon_cvt_flavour_u32_f32
))
19238 if (!check_simd_pred_availability (true,
19239 NEON_CHECK_CC
| NEON_CHECK_ARCH8
))
19242 else if (mode
== neon_cvt_mode_z
19243 && (flavour
== neon_cvt_flavour_f16_s16
19244 || flavour
== neon_cvt_flavour_f16_u16
19245 || flavour
== neon_cvt_flavour_s16_f16
19246 || flavour
== neon_cvt_flavour_u16_f16
19247 || flavour
== neon_cvt_flavour_f32_u32
19248 || flavour
== neon_cvt_flavour_f32_s32
19249 || flavour
== neon_cvt_flavour_s32_f32
19250 || flavour
== neon_cvt_flavour_u32_f32
))
19252 if (!check_simd_pred_availability (true,
19253 NEON_CHECK_CC
| NEON_CHECK_ARCH
))
19256 /* fall through. */
19258 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
19261 NEON_ENCODE (FLOAT
, inst
);
19262 if (!check_simd_pred_availability (true,
19263 NEON_CHECK_CC
| NEON_CHECK_ARCH8
))
19266 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
19267 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
19268 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
19269 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
19270 inst
.instruction
|= neon_quad (rs
) << 6;
19271 inst
.instruction
|= (flavour
== neon_cvt_flavour_u16_f16
19272 || flavour
== neon_cvt_flavour_u32_f32
) << 7;
19273 inst
.instruction
|= mode
<< 8;
19274 if (flavour
== neon_cvt_flavour_u16_f16
19275 || flavour
== neon_cvt_flavour_s16_f16
)
19276 /* Mask off the original size bits and reencode them. */
19277 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff) | (1 << 18));
19280 inst
.instruction
|= 0xfc000000;
19282 inst
.instruction
|= 0xf0000000;
19288 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080,
19289 0x100, 0x180, 0x0, 0x080};
19291 NEON_ENCODE (INTEGER
, inst
);
19293 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_fp_ext
))
19295 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
19299 if (flavour
!= neon_cvt_flavour_invalid
)
19300 inst
.instruction
|= enctab
[flavour
];
19302 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
19303 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
19304 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
19305 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
19306 inst
.instruction
|= neon_quad (rs
) << 6;
19307 if (flavour
>= neon_cvt_flavour_s16_f16
19308 && flavour
<= neon_cvt_flavour_f16_u16
)
19309 /* Half precision. */
19310 inst
.instruction
|= 1 << 18;
19312 inst
.instruction
|= 2 << 18;
19314 neon_dp_fixup (&inst
);
19319 /* Half-precision conversions for Advanced SIMD -- neon. */
19322 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
19326 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
19328 as_bad (_("operand size must match register width"));
19333 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
19335 as_bad (_("operand size must match register width"));
19341 if (flavour
== neon_cvt_flavour_bf16_f32
)
19343 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH8
) == FAIL
)
19345 constraint (!mark_feature_used (&arm_ext_bf16
), _(BAD_BF16
));
19346 /* VCVT.bf16.f32. */
19347 inst
.instruction
= 0x11b60640;
19350 /* VCVT.f16.f32. */
19351 inst
.instruction
= 0x3b60600;
19354 /* VCVT.f32.f16. */
19355 inst
.instruction
= 0x3b60700;
19357 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
19358 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
19359 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
19360 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
19361 neon_dp_fixup (&inst
);
19365 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
19366 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
19367 do_vfp_nsyn_cvt (rs
, flavour
);
19369 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
19374 do_neon_cvtr (void)
19376 do_neon_cvt_1 (neon_cvt_mode_x
);
19382 do_neon_cvt_1 (neon_cvt_mode_z
);
19386 do_neon_cvta (void)
19388 do_neon_cvt_1 (neon_cvt_mode_a
);
19392 do_neon_cvtn (void)
19394 do_neon_cvt_1 (neon_cvt_mode_n
);
19398 do_neon_cvtp (void)
19400 do_neon_cvt_1 (neon_cvt_mode_p
);
19404 do_neon_cvtm (void)
19406 do_neon_cvt_1 (neon_cvt_mode_m
);
19410 do_neon_cvttb_2 (bool t
, bool to
, bool is_double
)
19413 mark_feature_used (&fpu_vfp_ext_armv8
);
19415 encode_arm_vfp_reg (inst
.operands
[0].reg
,
19416 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
19417 encode_arm_vfp_reg (inst
.operands
[1].reg
,
19418 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
19419 inst
.instruction
|= to
? 0x10000 : 0;
19420 inst
.instruction
|= t
? 0x80 : 0;
19421 inst
.instruction
|= is_double
? 0x100 : 0;
19422 do_vfp_cond_or_thumb ();
19426 do_neon_cvttb_1 (bool t
)
19428 enum neon_shape rs
= neon_select_shape (NS_HF
, NS_HD
, NS_FH
, NS_FF
, NS_FD
,
19429 NS_DF
, NS_DH
, NS_QQ
, NS_QQI
, NS_NULL
);
19433 else if (rs
== NS_QQ
|| rs
== NS_QQI
)
19435 int single_to_half
= 0;
19436 if (!check_simd_pred_availability (true, NEON_CHECK_ARCH
))
19439 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
19441 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
19442 && (flavour
== neon_cvt_flavour_u16_f16
19443 || flavour
== neon_cvt_flavour_s16_f16
19444 || flavour
== neon_cvt_flavour_f16_s16
19445 || flavour
== neon_cvt_flavour_f16_u16
19446 || flavour
== neon_cvt_flavour_u32_f32
19447 || flavour
== neon_cvt_flavour_s32_f32
19448 || flavour
== neon_cvt_flavour_f32_s32
19449 || flavour
== neon_cvt_flavour_f32_u32
))
19452 inst
.instruction
= N_MNEM_vcvt
;
19453 set_pred_insn_type (INSIDE_VPT_INSN
);
19454 do_neon_cvt_1 (neon_cvt_mode_z
);
19457 else if (rs
== NS_QQ
&& flavour
== neon_cvt_flavour_f32_f16
)
19458 single_to_half
= 1;
19459 else if (rs
== NS_QQ
&& flavour
!= neon_cvt_flavour_f16_f32
)
19461 first_error (BAD_FPU
);
19465 inst
.instruction
= 0xee3f0e01;
19466 inst
.instruction
|= single_to_half
<< 28;
19467 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
19468 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 13;
19469 inst
.instruction
|= t
<< 12;
19470 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
19471 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 1;
19474 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
19477 do_neon_cvttb_2 (t
, /*to=*/true, /*is_double=*/false);
19479 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
19482 do_neon_cvttb_2 (t
, /*to=*/false, /*is_double=*/false);
19484 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
19486 /* The VCVTB and VCVTT instructions with D-register operands
19487 don't work for SP only targets. */
19488 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
19492 do_neon_cvttb_2 (t
, /*to=*/true, /*is_double=*/true);
19494 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
19496 /* The VCVTB and VCVTT instructions with D-register operands
19497 don't work for SP only targets. */
19498 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
19502 do_neon_cvttb_2 (t
, /*to=*/false, /*is_double=*/true);
19504 else if (neon_check_type (2, rs
, N_BF16
| N_VFP
, N_F32
).type
!= NT_invtype
)
19506 constraint (!mark_feature_used (&arm_ext_bf16
), _(BAD_BF16
));
19508 inst
.instruction
|= (1 << 8);
19509 inst
.instruction
&= ~(1 << 9);
19510 do_neon_cvttb_2 (t
, /*to=*/true, /*is_double=*/false);
19517 do_neon_cvtb (void)
19519 do_neon_cvttb_1 (false);
19524 do_neon_cvtt (void)
19526 do_neon_cvttb_1 (true);
19530 neon_move_immediate (void)
19532 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
19533 struct neon_type_el et
= neon_check_type (2, rs
,
19534 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
19535 unsigned immlo
, immhi
= 0, immbits
;
19536 int op
, cmode
, float_p
;
19538 constraint (et
.type
== NT_invtype
,
19539 _("operand size must be specified for immediate VMOV"));
19541 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
19542 op
= (inst
.instruction
& (1 << 5)) != 0;
19544 immlo
= inst
.operands
[1].imm
;
19545 if (inst
.operands
[1].regisimm
)
19546 immhi
= inst
.operands
[1].reg
;
19548 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
19549 _("immediate has bits set outside the operand size"));
19551 float_p
= inst
.operands
[1].immisfloat
;
19553 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
19554 et
.size
, et
.type
)) == FAIL
)
19556 /* Invert relevant bits only. */
19557 neon_invert_size (&immlo
, &immhi
, et
.size
);
19558 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
19559 with one or the other; those cases are caught by
19560 neon_cmode_for_move_imm. */
19562 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
19563 &op
, et
.size
, et
.type
)) == FAIL
)
19565 first_error (_("immediate out of range"));
19570 inst
.instruction
&= ~(1 << 5);
19571 inst
.instruction
|= op
<< 5;
19573 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
19574 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
19575 inst
.instruction
|= neon_quad (rs
) << 6;
19576 inst
.instruction
|= cmode
<< 8;
19578 neon_write_immbits (immbits
);
19584 if (!check_simd_pred_availability (false, NEON_CHECK_CC
| NEON_CHECK_ARCH
))
19587 if (inst
.operands
[1].isreg
)
19589 enum neon_shape rs
;
19590 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
19591 rs
= neon_select_shape (NS_QQ
, NS_NULL
);
19593 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
19598 NEON_ENCODE (INTEGER
, inst
);
19599 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
19600 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
19601 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
19602 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
19603 inst
.instruction
|= neon_quad (rs
) << 6;
19607 NEON_ENCODE (IMMED
, inst
);
19608 neon_move_immediate ();
19611 neon_dp_fixup (&inst
);
19613 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
19615 constraint (!inst
.operands
[1].isreg
&& !inst
.operands
[0].isquad
, BAD_FPU
);
19619 /* Encode instructions of form:
19621 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
19622 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
19625 neon_mixed_length (struct neon_type_el et
, unsigned size
)
19627 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
19628 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
19629 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
19630 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
19631 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
19632 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
19633 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
19634 inst
.instruction
|= neon_logbits (size
) << 20;
19636 neon_dp_fixup (&inst
);
19640 do_neon_dyadic_long (void)
19642 enum neon_shape rs
= neon_select_shape (NS_QDD
, NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
19645 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH
| NEON_CHECK_CC
) == FAIL
)
19648 NEON_ENCODE (INTEGER
, inst
);
19649 /* FIXME: Type checking for lengthening op. */
19650 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
19651 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
19652 neon_mixed_length (et
, et
.size
);
19654 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
19655 && (inst
.cond
== 0xf || inst
.cond
== 0x10))
19657 /* If parsing for MVE, vaddl/vsubl/vabdl{e,t} can only be vadd/vsub/vabd
19658 in an IT block with le/lt conditions. */
19660 if (inst
.cond
== 0xf)
19662 else if (inst
.cond
== 0x10)
19665 inst
.pred_insn_type
= INSIDE_IT_INSN
;
19667 if (inst
.instruction
== N_MNEM_vaddl
)
19669 inst
.instruction
= N_MNEM_vadd
;
19670 do_neon_addsub_if_i ();
19672 else if (inst
.instruction
== N_MNEM_vsubl
)
19674 inst
.instruction
= N_MNEM_vsub
;
19675 do_neon_addsub_if_i ();
19677 else if (inst
.instruction
== N_MNEM_vabdl
)
19679 inst
.instruction
= N_MNEM_vabd
;
19680 do_neon_dyadic_if_su ();
19684 first_error (BAD_FPU
);
19688 do_neon_abal (void)
19690 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
19691 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
19692 neon_mixed_length (et
, et
.size
);
19696 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
19698 if (inst
.operands
[2].isscalar
)
19700 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
19701 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
19702 NEON_ENCODE (SCALAR
, inst
);
19703 neon_mul_mac (et
, et
.type
== NT_unsigned
);
19707 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
19708 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
19709 NEON_ENCODE (INTEGER
, inst
);
19710 neon_mixed_length (et
, et
.size
);
19715 do_neon_mac_maybe_scalar_long (void)
19717 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
19720 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
19721 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
19724 neon_scalar_for_fmac_fp16_long (unsigned scalar
, unsigned quad_p
)
19726 unsigned regno
= NEON_SCALAR_REG (scalar
);
19727 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
19731 if (regno
> 7 || elno
> 3)
19734 return ((regno
& 0x7)
19735 | ((elno
& 0x1) << 3)
19736 | (((elno
>> 1) & 0x1) << 5));
19740 if (regno
> 15 || elno
> 1)
19743 return (((regno
& 0x1) << 5)
19744 | ((regno
>> 1) & 0x7)
19745 | ((elno
& 0x1) << 3));
19749 first_error (_("scalar out of range for multiply instruction"));
19754 do_neon_fmac_maybe_scalar_long (int subtype
)
19756 enum neon_shape rs
;
19758 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
19759 field (bits[21:20]) has different meaning. For scalar index variant, it's
19760 used to differentiate add and subtract, otherwise it's with fixed value
19764 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
19765 be a scalar index register. */
19766 if (inst
.operands
[2].isscalar
)
19768 high8
= 0xfe000000;
19771 rs
= neon_select_shape (NS_DHS
, NS_QDS
, NS_NULL
);
19775 high8
= 0xfc000000;
19778 inst
.instruction
|= (0x1 << 23);
19779 rs
= neon_select_shape (NS_DHH
, NS_QDD
, NS_NULL
);
19783 if (inst
.cond
!= COND_ALWAYS
)
19784 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
19785 "behaviour is UNPREDICTABLE"));
19787 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16_fml
),
19790 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
19793 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
19794 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
19795 so we simply pass -1 as size. */
19796 unsigned quad_p
= (rs
== NS_QDD
|| rs
== NS_QDS
);
19797 neon_three_same (quad_p
, 0, size
);
19799 /* Undo neon_dp_fixup. Redo the high eight bits. */
19800 inst
.instruction
&= 0x00ffffff;
19801 inst
.instruction
|= high8
;
19803 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
19804 whether the instruction is in Q form and whether Vm is a scalar indexed
19806 if (inst
.operands
[2].isscalar
)
19809 = neon_scalar_for_fmac_fp16_long (inst
.operands
[2].reg
, quad_p
);
19810 inst
.instruction
&= 0xffffffd0;
19811 inst
.instruction
|= rm
;
19815 /* Redo Rn as well. */
19816 inst
.instruction
&= 0xfff0ff7f;
19817 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
19818 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
19823 /* Redo Rn and Rm. */
19824 inst
.instruction
&= 0xfff0ff50;
19825 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
19826 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
19827 inst
.instruction
|= HI4 (inst
.operands
[2].reg
);
19828 inst
.instruction
|= LOW1 (inst
.operands
[2].reg
) << 5;
19833 do_neon_vfmal (void)
19835 return do_neon_fmac_maybe_scalar_long (0);
19839 do_neon_vfmsl (void)
19841 return do_neon_fmac_maybe_scalar_long (1);
19845 do_neon_dyadic_wide (void)
19847 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
19848 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
19849 neon_mixed_length (et
, et
.size
);
19853 do_neon_dyadic_narrow (void)
19855 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
19856 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
19857 /* Operand sign is unimportant, and the U bit is part of the opcode,
19858 so force the operand type to integer. */
19859 et
.type
= NT_integer
;
19860 neon_mixed_length (et
, et
.size
/ 2);
19864 do_neon_mul_sat_scalar_long (void)
19866 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
19870 do_neon_vmull (void)
19872 if (inst
.operands
[2].isscalar
)
19873 do_neon_mac_maybe_scalar_long ();
19876 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
19877 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
19879 if (et
.type
== NT_poly
)
19880 NEON_ENCODE (POLY
, inst
);
19882 NEON_ENCODE (INTEGER
, inst
);
19884 /* For polynomial encoding the U bit must be zero, and the size must
19885 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
19886 obviously, as 0b10). */
19889 /* Check we're on the correct architecture. */
19890 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
19892 _("Instruction form not available on this architecture.");
19897 neon_mixed_length (et
, et
.size
);
19904 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
19905 struct neon_type_el et
= neon_check_type (3, rs
,
19906 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
19907 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
19909 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
19910 _("shift out of range"));
19911 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
19912 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
19913 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
19914 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
19915 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
19916 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
19917 inst
.instruction
|= neon_quad (rs
) << 6;
19918 inst
.instruction
|= imm
<< 8;
19920 neon_dp_fixup (&inst
);
19926 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH
| NEON_CHECK_CC
))
19929 enum neon_shape rs
;
19930 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
19931 rs
= neon_select_shape (NS_QQ
, NS_NULL
);
19933 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
19935 struct neon_type_el et
= neon_check_type (2, rs
,
19936 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
19938 unsigned op
= (inst
.instruction
>> 7) & 3;
19939 /* N (width of reversed regions) is encoded as part of the bitmask. We
19940 extract it here to check the elements to be reversed are smaller.
19941 Otherwise we'd get a reserved instruction. */
19942 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
19944 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
) && elsize
== 64
19945 && inst
.operands
[0].reg
== inst
.operands
[1].reg
)
19946 as_tsktsk (_("Warning: 64-bit element size and same destination and source"
19947 " operands makes instruction UNPREDICTABLE"));
19949 gas_assert (elsize
!= 0);
19950 constraint (et
.size
>= elsize
,
19951 _("elements must be smaller than reversal region"));
19952 neon_two_same (neon_quad (rs
), 1, et
.size
);
19958 if (inst
.operands
[1].isscalar
)
19960 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
),
19962 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
19963 struct neon_type_el et
= neon_check_type (2, rs
,
19964 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
19965 unsigned sizebits
= et
.size
>> 3;
19966 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
19967 int logsize
= neon_logbits (et
.size
);
19968 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
19970 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
19973 NEON_ENCODE (SCALAR
, inst
);
19974 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
19975 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
19976 inst
.instruction
|= LOW4 (dm
);
19977 inst
.instruction
|= HI1 (dm
) << 5;
19978 inst
.instruction
|= neon_quad (rs
) << 6;
19979 inst
.instruction
|= x
<< 17;
19980 inst
.instruction
|= sizebits
<< 16;
19982 neon_dp_fixup (&inst
);
19986 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
19987 struct neon_type_el et
= neon_check_type (2, rs
,
19988 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
19991 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH
))
19995 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
),
19998 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
20000 if (inst
.operands
[1].reg
== REG_SP
)
20001 as_tsktsk (MVE_BAD_SP
);
20002 else if (inst
.operands
[1].reg
== REG_PC
)
20003 as_tsktsk (MVE_BAD_PC
);
20006 /* Duplicate ARM register to lanes of vector. */
20007 NEON_ENCODE (ARMREG
, inst
);
20010 case 8: inst
.instruction
|= 0x400000; break;
20011 case 16: inst
.instruction
|= 0x000020; break;
20012 case 32: inst
.instruction
|= 0x000000; break;
20015 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
20016 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
20017 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
20018 inst
.instruction
|= neon_quad (rs
) << 21;
20019 /* The encoding for this instruction is identical for the ARM and Thumb
20020 variants, except for the condition field. */
20021 do_vfp_cond_or_thumb ();
20026 do_mve_mov (int toQ
)
20028 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
20030 if (inst
.cond
> COND_ALWAYS
)
20031 inst
.pred_insn_type
= MVE_UNPREDICABLE_INSN
;
20033 unsigned Rt
= 0, Rt2
= 1, Q0
= 2, Q1
= 3;
20042 constraint (inst
.operands
[Q0
].reg
!= inst
.operands
[Q1
].reg
+ 2,
20043 _("Index one must be [2,3] and index two must be two less than"
20045 constraint (!toQ
&& inst
.operands
[Rt
].reg
== inst
.operands
[Rt2
].reg
,
20046 _("Destination registers may not be the same"));
20047 constraint (inst
.operands
[Rt
].reg
== REG_SP
20048 || inst
.operands
[Rt2
].reg
== REG_SP
,
20050 constraint (inst
.operands
[Rt
].reg
== REG_PC
20051 || inst
.operands
[Rt2
].reg
== REG_PC
,
20054 inst
.instruction
= 0xec000f00;
20055 inst
.instruction
|= HI1 (inst
.operands
[Q1
].reg
/ 32) << 23;
20056 inst
.instruction
|= !!toQ
<< 20;
20057 inst
.instruction
|= inst
.operands
[Rt2
].reg
<< 16;
20058 inst
.instruction
|= LOW4 (inst
.operands
[Q1
].reg
/ 32) << 13;
20059 inst
.instruction
|= (inst
.operands
[Q1
].reg
% 4) << 4;
20060 inst
.instruction
|= inst
.operands
[Rt
].reg
;
20066 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
20069 if (inst
.cond
> COND_ALWAYS
)
20070 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
20072 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
20074 struct neon_type_el et
= neon_check_type (2, NS_QQ
, N_EQK
, N_I16
| N_I32
20077 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
20078 inst
.instruction
|= (neon_logbits (et
.size
) - 1) << 18;
20079 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
20080 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
20081 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
20086 /* VMOV has particularly many variations. It can be one of:
20087 0. VMOV<c><q> <Qd>, <Qm>
20088 1. VMOV<c><q> <Dd>, <Dm>
20089 (Register operations, which are VORR with Rm = Rn.)
20090 2. VMOV<c><q>.<dt> <Qd>, #<imm>
20091 3. VMOV<c><q>.<dt> <Dd>, #<imm>
20093 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
20094 (ARM register to scalar.)
20095 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
20096 (Two ARM registers to vector.)
20097 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
20098 (Scalar to ARM register.)
20099 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
20100 (Vector to two ARM registers.)
20101 8. VMOV.F32 <Sd>, <Sm>
20102 9. VMOV.F64 <Dd>, <Dm>
20103 (VFP register moves.)
20104 10. VMOV.F32 <Sd>, #imm
20105 11. VMOV.F64 <Dd>, #imm
20106 (VFP float immediate load.)
20107 12. VMOV <Rd>, <Sm>
20108 (VFP single to ARM reg.)
20109 13. VMOV <Sd>, <Rm>
20110 (ARM reg to VFP single.)
20111 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
20112 (Two ARM regs to two VFP singles.)
20113 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
20114 (Two VFP singles to two ARM regs.)
20115 16. VMOV<c> <Rt>, <Rt2>, <Qd[idx]>, <Qd[idx2]>
20116 17. VMOV<c> <Qd[idx]>, <Qd[idx2]>, <Rt>, <Rt2>
20117 18. VMOV<c>.<dt> <Rt>, <Qn[idx]>
20118 19. VMOV<c>.<dt> <Qd[idx]>, <Rt>
20120 These cases can be disambiguated using neon_select_shape, except cases 1/9
20121 and 3/11 which depend on the operand type too.
20123 All the encoded bits are hardcoded by this function.
20125 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
20126 Cases 5, 7 may be used with VFPv2 and above.
20128 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
20129 can specify a type where it doesn't make sense to, and is ignored). */
20134 enum neon_shape rs
= neon_select_shape (NS_RRSS
, NS_SSRR
, NS_RRFF
, NS_FFRR
,
20135 NS_DRR
, NS_RRD
, NS_QQ
, NS_DD
, NS_QI
,
20136 NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
,
20137 NS_RF
, NS_FR
, NS_HR
, NS_RH
, NS_HI
,
20139 struct neon_type_el et
;
20140 const char *ldconst
= 0;
20144 case NS_DD
: /* case 1/9. */
20145 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
20146 /* It is not an error here if no type is given. */
20149 /* In MVE we interpret the following instructions as same, so ignoring
20150 the following type (float) and size (64) checks.
20151 a: VMOV<c><q> <Dd>, <Dm>
20152 b: VMOV<c><q>.F64 <Dd>, <Dm>. */
20153 if ((et
.type
== NT_float
&& et
.size
== 64)
20154 || (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)))
20156 do_vfp_nsyn_opcode ("fcpyd");
20159 /* fall through. */
20161 case NS_QQ
: /* case 0/1. */
20163 if (!check_simd_pred_availability (false,
20164 NEON_CHECK_CC
| NEON_CHECK_ARCH
))
20166 /* The architecture manual I have doesn't explicitly state which
20167 value the U bit should have for register->register moves, but
20168 the equivalent VORR instruction has U = 0, so do that. */
20169 inst
.instruction
= 0x0200110;
20170 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
20171 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
20172 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
20173 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
20174 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
20175 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
20176 inst
.instruction
|= neon_quad (rs
) << 6;
20178 neon_dp_fixup (&inst
);
20182 case NS_DI
: /* case 3/11. */
20183 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
20185 if (et
.type
== NT_float
&& et
.size
== 64)
20187 /* case 11 (fconstd). */
20188 ldconst
= "fconstd";
20189 goto encode_fconstd
;
20191 /* fall through. */
20193 case NS_QI
: /* case 2/3. */
20194 if (!check_simd_pred_availability (false,
20195 NEON_CHECK_CC
| NEON_CHECK_ARCH
))
20197 inst
.instruction
= 0x0800010;
20198 neon_move_immediate ();
20199 neon_dp_fixup (&inst
);
20202 case NS_SR
: /* case 4. */
20204 unsigned bcdebits
= 0;
20206 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
20207 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
20209 /* .<size> is optional here, defaulting to .32. */
20210 if (inst
.vectype
.elems
== 0
20211 && inst
.operands
[0].vectype
.type
== NT_invtype
20212 && inst
.operands
[1].vectype
.type
== NT_invtype
)
20214 inst
.vectype
.el
[0].type
= NT_untyped
;
20215 inst
.vectype
.el
[0].size
= 32;
20216 inst
.vectype
.elems
= 1;
20219 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
20220 logsize
= neon_logbits (et
.size
);
20224 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
20225 && vfp_or_neon_is_neon (NEON_CHECK_ARCH
) == FAIL
)
20230 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
)
20231 && !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
),
20235 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
20237 if (inst
.operands
[1].reg
== REG_SP
)
20238 as_tsktsk (MVE_BAD_SP
);
20239 else if (inst
.operands
[1].reg
== REG_PC
)
20240 as_tsktsk (MVE_BAD_PC
);
20242 unsigned size
= inst
.operands
[0].isscalar
== 1 ? 64 : 128;
20244 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
20245 constraint (x
>= size
/ et
.size
, _("scalar index out of range"));
20250 case 8: bcdebits
= 0x8; break;
20251 case 16: bcdebits
= 0x1; break;
20252 case 32: bcdebits
= 0x0; break;
20256 bcdebits
|= (x
& ((1 << (3-logsize
)) - 1)) << logsize
;
20258 inst
.instruction
= 0xe000b10;
20259 do_vfp_cond_or_thumb ();
20260 inst
.instruction
|= LOW4 (dn
) << 16;
20261 inst
.instruction
|= HI1 (dn
) << 7;
20262 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
20263 inst
.instruction
|= (bcdebits
& 3) << 5;
20264 inst
.instruction
|= ((bcdebits
>> 2) & 3) << 21;
20265 inst
.instruction
|= (x
>> (3-logsize
)) << 16;
20269 case NS_DRR
: /* case 5 (fmdrr). */
20270 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
)
20271 && !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
),
20274 inst
.instruction
= 0xc400b10;
20275 do_vfp_cond_or_thumb ();
20276 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
20277 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
20278 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
20279 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
20282 case NS_RS
: /* case 6. */
20285 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
20286 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
20287 unsigned abcdebits
= 0;
20289 /* .<dt> is optional here, defaulting to .32. */
20290 if (inst
.vectype
.elems
== 0
20291 && inst
.operands
[0].vectype
.type
== NT_invtype
20292 && inst
.operands
[1].vectype
.type
== NT_invtype
)
20294 inst
.vectype
.el
[0].type
= NT_untyped
;
20295 inst
.vectype
.el
[0].size
= 32;
20296 inst
.vectype
.elems
= 1;
20299 et
= neon_check_type (2, NS_NULL
,
20300 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
20301 logsize
= neon_logbits (et
.size
);
20305 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
20306 && vfp_or_neon_is_neon (NEON_CHECK_CC
20307 | NEON_CHECK_ARCH
) == FAIL
)
20312 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
)
20313 && !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
),
20317 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
20319 if (inst
.operands
[0].reg
== REG_SP
)
20320 as_tsktsk (MVE_BAD_SP
);
20321 else if (inst
.operands
[0].reg
== REG_PC
)
20322 as_tsktsk (MVE_BAD_PC
);
20325 unsigned size
= inst
.operands
[1].isscalar
== 1 ? 64 : 128;
20327 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
20328 constraint (x
>= size
/ et
.size
, _("scalar index out of range"));
20332 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
20333 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
20334 case 32: abcdebits
= 0x00; break;
20338 abcdebits
|= (x
& ((1 << (3-logsize
)) - 1)) << logsize
;
20339 inst
.instruction
= 0xe100b10;
20340 do_vfp_cond_or_thumb ();
20341 inst
.instruction
|= LOW4 (dn
) << 16;
20342 inst
.instruction
|= HI1 (dn
) << 7;
20343 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
20344 inst
.instruction
|= (abcdebits
& 3) << 5;
20345 inst
.instruction
|= (abcdebits
>> 2) << 21;
20346 inst
.instruction
|= (x
>> (3-logsize
)) << 16;
20350 case NS_RRD
: /* case 7 (fmrrd). */
20351 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
)
20352 && !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
),
20355 inst
.instruction
= 0xc500b10;
20356 do_vfp_cond_or_thumb ();
20357 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
20358 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
20359 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
20360 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
20363 case NS_FF
: /* case 8 (fcpys). */
20364 do_vfp_nsyn_opcode ("fcpys");
20368 case NS_FI
: /* case 10 (fconsts). */
20369 ldconst
= "fconsts";
20371 if (!inst
.operands
[1].immisfloat
)
20374 /* Immediate has to fit in 8 bits so float is enough. */
20375 float imm
= (float) inst
.operands
[1].imm
;
20376 memcpy (&new_imm
, &imm
, sizeof (float));
20377 /* But the assembly may have been written to provide an integer
20378 bit pattern that equates to a float, so check that the
20379 conversion has worked. */
20380 if (is_quarter_float (new_imm
))
20382 if (is_quarter_float (inst
.operands
[1].imm
))
20383 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
20385 inst
.operands
[1].imm
= new_imm
;
20386 inst
.operands
[1].immisfloat
= 1;
20390 if (is_quarter_float (inst
.operands
[1].imm
))
20392 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
20393 do_vfp_nsyn_opcode (ldconst
);
20395 /* ARMv8.2 fp16 vmov.f16 instruction. */
20397 do_scalar_fp16_v82_encode ();
20400 first_error (_("immediate out of range"));
20404 case NS_RF
: /* case 12 (fmrs). */
20405 do_vfp_nsyn_opcode ("fmrs");
20406 /* ARMv8.2 fp16 vmov.f16 instruction. */
20408 do_scalar_fp16_v82_encode ();
20412 case NS_FR
: /* case 13 (fmsr). */
20413 do_vfp_nsyn_opcode ("fmsr");
20414 /* ARMv8.2 fp16 vmov.f16 instruction. */
20416 do_scalar_fp16_v82_encode ();
20426 /* The encoders for the fmrrs and fmsrr instructions expect three operands
20427 (one of which is a list), but we have parsed four. Do some fiddling to
20428 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
20430 case NS_RRFF
: /* case 14 (fmrrs). */
20431 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
)
20432 && !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
),
20434 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
20435 _("VFP registers must be adjacent"));
20436 inst
.operands
[2].imm
= 2;
20437 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
20438 do_vfp_nsyn_opcode ("fmrrs");
20441 case NS_FFRR
: /* case 15 (fmsrr). */
20442 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
)
20443 && !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
),
20445 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
20446 _("VFP registers must be adjacent"));
20447 inst
.operands
[1] = inst
.operands
[2];
20448 inst
.operands
[2] = inst
.operands
[3];
20449 inst
.operands
[0].imm
= 2;
20450 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
20451 do_vfp_nsyn_opcode ("fmsrr");
20455 /* neon_select_shape has determined that the instruction
20456 shape is wrong and has already set the error message. */
20467 if (!(inst
.operands
[0].present
&& inst
.operands
[0].isquad
20468 && inst
.operands
[1].present
&& inst
.operands
[1].isquad
20469 && !inst
.operands
[2].present
))
20471 inst
.instruction
= 0;
20474 set_pred_insn_type (INSIDE_IT_INSN
);
20479 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
20482 if (inst
.cond
!= COND_ALWAYS
)
20483 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
20485 struct neon_type_el et
= neon_check_type (2, NS_QQ
, N_EQK
, N_S8
| N_U8
20486 | N_S16
| N_U16
| N_KEY
);
20488 inst
.instruction
|= (et
.type
== NT_unsigned
) << 28;
20489 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
20490 inst
.instruction
|= (neon_logbits (et
.size
) + 1) << 19;
20491 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
20492 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
20493 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
20498 do_neon_rshift_round_imm (void)
20500 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH
| NEON_CHECK_CC
))
20503 enum neon_shape rs
;
20504 struct neon_type_el et
;
20506 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
20508 rs
= neon_select_shape (NS_QQI
, NS_NULL
);
20509 et
= neon_check_type (2, rs
, N_EQK
, N_SU_MVE
| N_KEY
);
20513 rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
20514 et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
20516 int imm
= inst
.operands
[2].imm
;
20518 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
20521 inst
.operands
[2].present
= 0;
20526 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
20527 _("immediate out of range for shift"));
20528 neon_imm_shift (true, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
20533 do_neon_movhf (void)
20535 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_NULL
);
20536 constraint (rs
!= NS_HH
, _("invalid suffix"));
20538 if (inst
.cond
!= COND_ALWAYS
)
20542 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
20543 " the behaviour is UNPREDICTABLE"));
20547 inst
.error
= BAD_COND
;
20552 do_vfp_sp_monadic ();
20555 inst
.instruction
|= 0xf0000000;
20559 do_neon_movl (void)
20561 struct neon_type_el et
= neon_check_type (2, NS_QD
,
20562 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
20563 unsigned sizebits
= et
.size
>> 3;
20564 inst
.instruction
|= sizebits
<< 19;
20565 neon_two_same (0, et
.type
== NT_unsigned
, -1);
20571 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
20572 struct neon_type_el et
= neon_check_type (2, rs
,
20573 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
20574 NEON_ENCODE (INTEGER
, inst
);
20575 neon_two_same (neon_quad (rs
), 1, et
.size
);
20579 do_neon_zip_uzp (void)
20581 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
20582 struct neon_type_el et
= neon_check_type (2, rs
,
20583 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
20584 if (rs
== NS_DD
&& et
.size
== 32)
20586 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
20587 inst
.instruction
= N_MNEM_vtrn
;
20591 neon_two_same (neon_quad (rs
), 1, et
.size
);
20595 do_neon_sat_abs_neg (void)
20597 if (!check_simd_pred_availability (false, NEON_CHECK_CC
| NEON_CHECK_ARCH
))
20600 enum neon_shape rs
;
20601 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
20602 rs
= neon_select_shape (NS_QQ
, NS_NULL
);
20604 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
20605 struct neon_type_el et
= neon_check_type (2, rs
,
20606 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
20607 neon_two_same (neon_quad (rs
), 1, et
.size
);
20611 do_neon_pair_long (void)
20613 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
20614 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
20615 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
20616 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
20617 neon_two_same (neon_quad (rs
), 1, et
.size
);
20621 do_neon_recip_est (void)
20623 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
20624 struct neon_type_el et
= neon_check_type (2, rs
,
20625 N_EQK
| N_FLT
, N_F_16_32
| N_U32
| N_KEY
);
20626 inst
.instruction
|= (et
.type
== NT_float
) << 8;
20627 neon_two_same (neon_quad (rs
), 1, et
.size
);
20633 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH
| NEON_CHECK_CC
))
20636 enum neon_shape rs
;
20637 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
20638 rs
= neon_select_shape (NS_QQ
, NS_NULL
);
20640 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
20642 struct neon_type_el et
= neon_check_type (2, rs
,
20643 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
20644 neon_two_same (neon_quad (rs
), 1, et
.size
);
20650 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH
| NEON_CHECK_CC
))
20653 enum neon_shape rs
;
20654 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
20655 rs
= neon_select_shape (NS_QQ
, NS_NULL
);
20657 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
20659 struct neon_type_el et
= neon_check_type (2, rs
,
20660 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
20661 neon_two_same (neon_quad (rs
), 1, et
.size
);
20667 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
20668 struct neon_type_el et
= neon_check_type (2, rs
,
20669 N_EQK
| N_INT
, N_8
| N_KEY
);
20670 neon_two_same (neon_quad (rs
), 1, et
.size
);
20676 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
20679 neon_two_same (neon_quad (rs
), 1, -1);
20683 do_neon_tbl_tbx (void)
20685 unsigned listlenbits
;
20686 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
20688 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
20690 first_error (_("bad list length for table lookup"));
20694 listlenbits
= inst
.operands
[1].imm
- 1;
20695 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
20696 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
20697 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
20698 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
20699 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
20700 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
20701 inst
.instruction
|= listlenbits
<< 8;
20703 neon_dp_fixup (&inst
);
20707 do_neon_ldm_stm (void)
20709 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1xd
)
20710 && !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
),
20712 /* P, U and L bits are part of bitmask. */
20713 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
20714 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
20716 if (inst
.operands
[1].issingle
)
20718 do_vfp_nsyn_ldm_stm (is_dbmode
);
20722 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
20723 _("writeback (!) must be used for VLDMDB and VSTMDB"));
20725 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
20726 _("register list must contain at least 1 and at most 16 "
20729 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
20730 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
20731 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
20732 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
20734 inst
.instruction
|= offsetbits
;
20736 do_vfp_cond_or_thumb ();
20740 do_vfp_nsyn_push_pop_check (void)
20742 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1xd
), _(BAD_FPU
));
20744 if (inst
.operands
[1].issingle
)
20746 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 32,
20747 _("register list must contain at least 1 and at most 32 registers"));
20751 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
20752 _("register list must contain at least 1 and at most 16 registers"));
20757 do_vfp_nsyn_pop (void)
20761 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
20762 return do_vfp_nsyn_opcode ("vldm");
20764 do_vfp_nsyn_push_pop_check ();
20766 if (inst
.operands
[1].issingle
)
20767 do_vfp_nsyn_opcode ("fldmias");
20769 do_vfp_nsyn_opcode ("fldmiad");
20773 do_vfp_nsyn_push (void)
20777 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
20778 return do_vfp_nsyn_opcode ("vstmdb");
20780 do_vfp_nsyn_push_pop_check ();
20782 if (inst
.operands
[1].issingle
)
20783 do_vfp_nsyn_opcode ("fstmdbs");
20785 do_vfp_nsyn_opcode ("fstmdbd");
20789 do_neon_ldr_str (void)
20791 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
20793 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
20794 And is UNPREDICTABLE in thumb mode. */
20796 && inst
.operands
[1].reg
== REG_PC
20797 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
20800 inst
.error
= _("Use of PC here is UNPREDICTABLE");
20801 else if (warn_on_deprecated
)
20802 as_tsktsk (_("Use of PC here is deprecated"));
20805 if (inst
.operands
[0].issingle
)
20808 do_vfp_nsyn_opcode ("flds");
20810 do_vfp_nsyn_opcode ("fsts");
20812 /* ARMv8.2 vldr.16/vstr.16 instruction. */
20813 if (inst
.vectype
.el
[0].size
== 16)
20814 do_scalar_fp16_v82_encode ();
20819 do_vfp_nsyn_opcode ("fldd");
20821 do_vfp_nsyn_opcode ("fstd");
20826 do_t_vldr_vstr_sysreg (void)
20828 int fp_vldr_bitno
= 20, sysreg_vldr_bitno
= 20;
20829 bool is_vldr
= ((inst
.instruction
& (1 << fp_vldr_bitno
)) != 0);
20831 /* Use of PC is UNPREDICTABLE. */
20832 if (inst
.operands
[1].reg
== REG_PC
)
20833 inst
.error
= _("Use of PC here is UNPREDICTABLE");
20835 if (inst
.operands
[1].immisreg
)
20836 inst
.error
= _("instruction does not accept register index");
20838 if (!inst
.operands
[1].isreg
)
20839 inst
.error
= _("instruction does not accept PC-relative addressing");
20841 if (abs (inst
.operands
[1].imm
) >= (1 << 7))
20842 inst
.error
= _("immediate value out of range");
20844 inst
.instruction
= 0xec000f80;
20846 inst
.instruction
|= 1 << sysreg_vldr_bitno
;
20847 encode_arm_cp_address (1, true, false, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
);
20848 inst
.instruction
|= (inst
.operands
[0].imm
& 0x7) << 13;
20849 inst
.instruction
|= (inst
.operands
[0].imm
& 0x8) << 19;
20853 do_vldr_vstr (void)
20855 bool sysreg_op
= !inst
.operands
[0].isreg
;
20857 /* VLDR/VSTR (System Register). */
20860 if (!mark_feature_used (&arm_ext_v8_1m_main
))
20861 as_bad (_("Instruction not permitted on this architecture"));
20863 do_t_vldr_vstr_sysreg ();
20868 if (!mark_feature_used (&fpu_vfp_ext_v1xd
)
20869 && !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
20870 as_bad (_("Instruction not permitted on this architecture"));
20871 do_neon_ldr_str ();
20875 /* "interleave" version also handles non-interleaving register VLD1/VST1
20879 do_neon_ld_st_interleave (void)
20881 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
20882 N_8
| N_16
| N_32
| N_64
);
20883 unsigned alignbits
= 0;
20885 /* The bits in this table go:
20886 0: register stride of one (0) or two (1)
20887 1,2: register list length, minus one (1, 2, 3, 4).
20888 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
20889 We use -1 for invalid entries. */
20890 const int typetable
[] =
20892 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
20893 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
20894 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
20895 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
20899 if (et
.type
== NT_invtype
)
20902 if (inst
.operands
[1].immisalign
)
20903 switch (inst
.operands
[1].imm
>> 8)
20905 case 64: alignbits
= 1; break;
20907 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
20908 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
20909 goto bad_alignment
;
20913 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
20914 goto bad_alignment
;
20919 first_error (_("bad alignment"));
20923 inst
.instruction
|= alignbits
<< 4;
20924 inst
.instruction
|= neon_logbits (et
.size
) << 6;
20926 /* Bits [4:6] of the immediate in a list specifier encode register stride
20927 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
20928 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
20929 up the right value for "type" in a table based on this value and the given
20930 list style, then stick it back. */
20931 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
20932 | (((inst
.instruction
>> 8) & 3) << 3);
20934 typebits
= typetable
[idx
];
20936 constraint (typebits
== -1, _("bad list type for instruction"));
20937 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
20940 inst
.instruction
&= ~0xf00;
20941 inst
.instruction
|= typebits
<< 8;
20944 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
20945 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
20946 otherwise. The variable arguments are a list of pairs of legal (size, align)
20947 values, terminated with -1. */
20950 neon_alignment_bit (int size
, int align
, int *do_alignment
, ...)
20953 int result
= FAIL
, thissize
, thisalign
;
20955 if (!inst
.operands
[1].immisalign
)
20961 va_start (ap
, do_alignment
);
20965 thissize
= va_arg (ap
, int);
20966 if (thissize
== -1)
20968 thisalign
= va_arg (ap
, int);
20970 if (size
== thissize
&& align
== thisalign
)
20973 while (result
!= SUCCESS
);
20977 if (result
== SUCCESS
)
20980 first_error (_("unsupported alignment for instruction"));
20986 do_neon_ld_st_lane (void)
20988 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
20989 int align_good
, do_alignment
= 0;
20990 int logsize
= neon_logbits (et
.size
);
20991 int align
= inst
.operands
[1].imm
>> 8;
20992 int n
= (inst
.instruction
>> 8) & 3;
20993 int max_el
= 64 / et
.size
;
20995 if (et
.type
== NT_invtype
)
20998 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
20999 _("bad list length"));
21000 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
21001 _("scalar index out of range"));
21002 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
21004 _("stride of 2 unavailable when element size is 8"));
21008 case 0: /* VLD1 / VST1. */
21009 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 16, 16,
21011 if (align_good
== FAIL
)
21015 unsigned alignbits
= 0;
21018 case 16: alignbits
= 0x1; break;
21019 case 32: alignbits
= 0x3; break;
21022 inst
.instruction
|= alignbits
<< 4;
21026 case 1: /* VLD2 / VST2. */
21027 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 16,
21028 16, 32, 32, 64, -1);
21029 if (align_good
== FAIL
)
21032 inst
.instruction
|= 1 << 4;
21035 case 2: /* VLD3 / VST3. */
21036 constraint (inst
.operands
[1].immisalign
,
21037 _("can't use alignment with this instruction"));
21040 case 3: /* VLD4 / VST4. */
21041 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
21042 16, 64, 32, 64, 32, 128, -1);
21043 if (align_good
== FAIL
)
21047 unsigned alignbits
= 0;
21050 case 8: alignbits
= 0x1; break;
21051 case 16: alignbits
= 0x1; break;
21052 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
21055 inst
.instruction
|= alignbits
<< 4;
21062 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
21063 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
21064 inst
.instruction
|= 1 << (4 + logsize
);
21066 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
21067 inst
.instruction
|= logsize
<< 10;
21070 /* Encode single n-element structure to all lanes VLD<n> instructions. */
21073 do_neon_ld_dup (void)
21075 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
21076 int align_good
, do_alignment
= 0;
21078 if (et
.type
== NT_invtype
)
21081 switch ((inst
.instruction
>> 8) & 3)
21083 case 0: /* VLD1. */
21084 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
21085 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
21086 &do_alignment
, 16, 16, 32, 32, -1);
21087 if (align_good
== FAIL
)
21089 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
21092 case 2: inst
.instruction
|= 1 << 5; break;
21093 default: first_error (_("bad list length")); return;
21095 inst
.instruction
|= neon_logbits (et
.size
) << 6;
21098 case 1: /* VLD2. */
21099 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
21100 &do_alignment
, 8, 16, 16, 32, 32, 64,
21102 if (align_good
== FAIL
)
21104 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
21105 _("bad list length"));
21106 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
21107 inst
.instruction
|= 1 << 5;
21108 inst
.instruction
|= neon_logbits (et
.size
) << 6;
21111 case 2: /* VLD3. */
21112 constraint (inst
.operands
[1].immisalign
,
21113 _("can't use alignment with this instruction"));
21114 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
21115 _("bad list length"));
21116 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
21117 inst
.instruction
|= 1 << 5;
21118 inst
.instruction
|= neon_logbits (et
.size
) << 6;
21121 case 3: /* VLD4. */
21123 int align
= inst
.operands
[1].imm
>> 8;
21124 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
21125 16, 64, 32, 64, 32, 128, -1);
21126 if (align_good
== FAIL
)
21128 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
21129 _("bad list length"));
21130 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
21131 inst
.instruction
|= 1 << 5;
21132 if (et
.size
== 32 && align
== 128)
21133 inst
.instruction
|= 0x3 << 6;
21135 inst
.instruction
|= neon_logbits (et
.size
) << 6;
21142 inst
.instruction
|= do_alignment
<< 4;
21145 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
21146 apart from bits [11:4]. */
21149 do_neon_ldx_stx (void)
21151 if (inst
.operands
[1].isreg
)
21152 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
21154 switch (NEON_LANE (inst
.operands
[0].imm
))
21156 case NEON_INTERLEAVE_LANES
:
21157 NEON_ENCODE (INTERLV
, inst
);
21158 do_neon_ld_st_interleave ();
21161 case NEON_ALL_LANES
:
21162 NEON_ENCODE (DUP
, inst
);
21163 if (inst
.instruction
== N_INV
)
21165 first_error ("only loads support such operands");
21172 NEON_ENCODE (LANE
, inst
);
21173 do_neon_ld_st_lane ();
21176 /* L bit comes from bit mask. */
21177 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
21178 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
21179 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
21181 if (inst
.operands
[1].postind
)
21183 int postreg
= inst
.operands
[1].imm
& 0xf;
21184 constraint (!inst
.operands
[1].immisreg
,
21185 _("post-index must be a register"));
21186 constraint (postreg
== 0xd || postreg
== 0xf,
21187 _("bad register for post-index"));
21188 inst
.instruction
|= postreg
;
21192 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
21193 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
21194 || inst
.relocs
[0].exp
.X_add_number
!= 0,
21197 if (inst
.operands
[1].writeback
)
21199 inst
.instruction
|= 0xd;
21202 inst
.instruction
|= 0xf;
21206 inst
.instruction
|= 0xf9000000;
21208 inst
.instruction
|= 0xf4000000;
21213 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
21215 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
21216 D register operands. */
21217 if (neon_shape_class
[rs
] == SC_DOUBLE
)
21218 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
21221 NEON_ENCODE (FPV8
, inst
);
21223 if (rs
== NS_FFF
|| rs
== NS_HHH
)
21225 do_vfp_sp_dyadic ();
21227 /* ARMv8.2 fp16 instruction. */
21229 do_scalar_fp16_v82_encode ();
21232 do_vfp_dp_rd_rn_rm ();
21235 inst
.instruction
|= 0x100;
21237 inst
.instruction
|= 0xf0000000;
21243 set_pred_insn_type (OUTSIDE_PRED_INSN
);
21245 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
21246 first_error (_("invalid instruction shape"));
21252 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
21253 set_pred_insn_type (OUTSIDE_PRED_INSN
);
21255 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
21258 if (!check_simd_pred_availability (true, NEON_CHECK_CC
| NEON_CHECK_ARCH8
))
21261 neon_dyadic_misc (NT_untyped
, N_F_16_32
, 0);
21265 do_vrint_1 (enum neon_cvt_mode mode
)
21267 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
21268 struct neon_type_el et
;
21273 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
21274 D register operands. */
21275 if (neon_shape_class
[rs
] == SC_DOUBLE
)
21276 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
21279 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
21281 if (et
.type
!= NT_invtype
)
21283 /* VFP encodings. */
21284 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
21285 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
21286 set_pred_insn_type (OUTSIDE_PRED_INSN
);
21288 NEON_ENCODE (FPV8
, inst
);
21289 if (rs
== NS_FF
|| rs
== NS_HH
)
21290 do_vfp_sp_monadic ();
21292 do_vfp_dp_rd_rm ();
21296 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
21297 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
21298 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
21299 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
21300 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
21301 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
21302 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
21306 inst
.instruction
|= (rs
== NS_DD
) << 8;
21307 do_vfp_cond_or_thumb ();
21309 /* ARMv8.2 fp16 vrint instruction. */
21311 do_scalar_fp16_v82_encode ();
21315 /* Neon encodings (or something broken...). */
21317 et
= neon_check_type (2, rs
, N_EQK
, N_F_16_32
| N_KEY
);
21319 if (et
.type
== NT_invtype
)
21322 if (!check_simd_pred_availability (true,
21323 NEON_CHECK_CC
| NEON_CHECK_ARCH8
))
21326 NEON_ENCODE (FLOAT
, inst
);
21328 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
21329 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
21330 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
21331 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
21332 inst
.instruction
|= neon_quad (rs
) << 6;
21333 /* Mask off the original size bits and reencode them. */
21334 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff)
21335 | neon_logbits (et
.size
) << 18);
21339 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
21340 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
21341 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
21342 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
21343 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
21344 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
21345 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
21350 inst
.instruction
|= 0xfc000000;
21352 inst
.instruction
|= 0xf0000000;
21359 do_vrint_1 (neon_cvt_mode_x
);
21365 do_vrint_1 (neon_cvt_mode_z
);
21371 do_vrint_1 (neon_cvt_mode_r
);
21377 do_vrint_1 (neon_cvt_mode_a
);
21383 do_vrint_1 (neon_cvt_mode_n
);
21389 do_vrint_1 (neon_cvt_mode_p
);
21395 do_vrint_1 (neon_cvt_mode_m
);
21399 neon_scalar_for_vcmla (unsigned opnd
, unsigned elsize
)
21401 unsigned regno
= NEON_SCALAR_REG (opnd
);
21402 unsigned elno
= NEON_SCALAR_INDEX (opnd
);
21404 if (elsize
== 16 && elno
< 2 && regno
< 16)
21405 return regno
| (elno
<< 4);
21406 else if (elsize
== 32 && elno
== 0)
21409 first_error (_("scalar out of range"));
21416 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_fp_ext
)
21417 && (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
)
21418 || !mark_feature_used (&arm_ext_v8_3
)), (BAD_FPU
));
21419 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
21420 _("expression too complex"));
21421 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
21422 constraint (rot
!= 0 && rot
!= 90 && rot
!= 180 && rot
!= 270,
21423 _("immediate out of range"));
21426 if (!check_simd_pred_availability (true,
21427 NEON_CHECK_ARCH8
| NEON_CHECK_CC
))
21430 if (inst
.operands
[2].isscalar
)
21432 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_fp_ext
))
21433 first_error (_("invalid instruction shape"));
21434 enum neon_shape rs
= neon_select_shape (NS_DDSI
, NS_QQSI
, NS_NULL
);
21435 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
21436 N_KEY
| N_F16
| N_F32
).size
;
21437 unsigned m
= neon_scalar_for_vcmla (inst
.operands
[2].reg
, size
);
21439 inst
.instruction
= 0xfe000800;
21440 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
21441 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
21442 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
21443 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
21444 inst
.instruction
|= LOW4 (m
);
21445 inst
.instruction
|= HI1 (m
) << 5;
21446 inst
.instruction
|= neon_quad (rs
) << 6;
21447 inst
.instruction
|= rot
<< 20;
21448 inst
.instruction
|= (size
== 32) << 23;
21452 enum neon_shape rs
;
21453 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_fp_ext
))
21454 rs
= neon_select_shape (NS_QQQI
, NS_NULL
);
21456 rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
21458 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
21459 N_KEY
| N_F16
| N_F32
).size
;
21460 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_fp_ext
) && size
== 32
21461 && (inst
.operands
[0].reg
== inst
.operands
[1].reg
21462 || inst
.operands
[0].reg
== inst
.operands
[2].reg
))
21463 as_tsktsk (BAD_MVE_SRCDEST
);
21465 neon_three_same (neon_quad (rs
), 0, -1);
21466 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
21467 inst
.instruction
|= 0xfc200800;
21468 inst
.instruction
|= rot
<< 23;
21469 inst
.instruction
|= (size
== 32) << 20;
21476 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
21477 && (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
)
21478 || !mark_feature_used (&arm_ext_v8_3
)), (BAD_FPU
));
21479 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
21480 _("expression too complex"));
21482 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
21483 constraint (rot
!= 90 && rot
!= 270, _("immediate out of range"));
21484 enum neon_shape rs
;
21485 struct neon_type_el et
;
21486 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
21488 rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
21489 et
= neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_F16
| N_F32
);
21493 rs
= neon_select_shape (NS_QQQI
, NS_NULL
);
21494 et
= neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_F16
| N_F32
| N_I8
21496 if (et
.size
== 32 && inst
.operands
[0].reg
== inst
.operands
[2].reg
)
21497 as_tsktsk (_("Warning: 32-bit element size and same first and third "
21498 "operand makes instruction UNPREDICTABLE"));
21501 if (et
.type
== NT_invtype
)
21504 if (!check_simd_pred_availability (et
.type
== NT_float
,
21505 NEON_CHECK_ARCH8
| NEON_CHECK_CC
))
21508 if (et
.type
== NT_float
)
21510 neon_three_same (neon_quad (rs
), 0, -1);
21511 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
21512 inst
.instruction
|= 0xfc800800;
21513 inst
.instruction
|= (rot
== 270) << 24;
21514 inst
.instruction
|= (et
.size
== 32) << 20;
21518 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
), BAD_FPU
);
21519 inst
.instruction
= 0xfe000f00;
21520 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
21521 inst
.instruction
|= neon_logbits (et
.size
) << 20;
21522 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
21523 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
21524 inst
.instruction
|= (rot
== 270) << 12;
21525 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
21526 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
21527 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
21532 /* Dot Product instructions encoding support. */
21535 do_neon_dotproduct (int unsigned_p
)
21537 enum neon_shape rs
;
21538 unsigned scalar_oprd2
= 0;
21541 if (inst
.cond
!= COND_ALWAYS
)
21542 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
21543 "is UNPREDICTABLE"));
21545 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
21548 /* Dot Product instructions are in three-same D/Q register format or the third
21549 operand can be a scalar index register. */
21550 if (inst
.operands
[2].isscalar
)
21552 scalar_oprd2
= neon_scalar_for_mul (inst
.operands
[2].reg
, 32);
21553 high8
= 0xfe000000;
21554 rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
21558 high8
= 0xfc000000;
21559 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
21563 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_U8
);
21565 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_S8
);
21567 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
21568 Product instruction, so we pass 0 as the "ubit" parameter. And the
21569 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
21570 neon_three_same (neon_quad (rs
), 0, 32);
21572 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
21573 different NEON three-same encoding. */
21574 inst
.instruction
&= 0x00ffffff;
21575 inst
.instruction
|= high8
;
21576 /* Encode 'U' bit which indicates signedness. */
21577 inst
.instruction
|= (unsigned_p
? 1 : 0) << 4;
21578 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
21579 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
21580 the instruction encoding. */
21581 if (inst
.operands
[2].isscalar
)
21583 inst
.instruction
&= 0xffffffd0;
21584 inst
.instruction
|= LOW4 (scalar_oprd2
);
21585 inst
.instruction
|= HI1 (scalar_oprd2
) << 5;
21589 /* Dot Product instructions for signed integer. */
21592 do_neon_dotproduct_s (void)
21594 return do_neon_dotproduct (0);
21597 /* Dot Product instructions for unsigned integer. */
21600 do_neon_dotproduct_u (void)
21602 return do_neon_dotproduct (1);
21608 enum neon_shape rs
;
21609 set_pred_insn_type (OUTSIDE_PRED_INSN
);
21610 if (inst
.operands
[2].isscalar
)
21612 rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
21613 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_S8
| N_KEY
);
21615 inst
.instruction
|= (1 << 25);
21616 int idx
= inst
.operands
[2].reg
& 0xf;
21617 constraint ((idx
!= 1 && idx
!= 0), _("index must be 0 or 1"));
21618 inst
.operands
[2].reg
>>= 4;
21619 constraint (!(inst
.operands
[2].reg
< 16),
21620 _("indexed register must be less than 16"));
21621 neon_three_args (rs
== NS_QQS
);
21622 inst
.instruction
|= (idx
<< 5);
21626 inst
.instruction
|= (1 << 21);
21627 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
21628 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_S8
| N_KEY
);
21629 neon_three_args (rs
== NS_QQQ
);
21636 enum neon_shape rs
;
21637 set_pred_insn_type (OUTSIDE_PRED_INSN
);
21638 if (inst
.operands
[2].isscalar
)
21640 rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
21641 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_U8
| N_KEY
);
21643 inst
.instruction
|= (1 << 25);
21644 int idx
= inst
.operands
[2].reg
& 0xf;
21645 constraint ((idx
!= 1 && idx
!= 0), _("index must be 0 or 1"));
21646 inst
.operands
[2].reg
>>= 4;
21647 constraint (!(inst
.operands
[2].reg
< 16),
21648 _("indexed register must be less than 16"));
21649 neon_three_args (rs
== NS_QQS
);
21650 inst
.instruction
|= (idx
<< 5);
21657 enum neon_shape rs
= neon_select_shape (NS_QQQ
, NS_NULL
);
21658 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_S8
| N_KEY
);
21660 set_pred_insn_type (OUTSIDE_PRED_INSN
);
21662 neon_three_args (1);
21669 enum neon_shape rs
= neon_select_shape (NS_QQQ
, NS_NULL
);
21670 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_U8
| N_KEY
);
21672 set_pred_insn_type (OUTSIDE_PRED_INSN
);
21674 neon_three_args (1);
21679 check_cde_operand (size_t idx
, int is_dual
)
21681 unsigned Rx
= inst
.operands
[idx
].reg
;
21682 bool isvec
= inst
.operands
[idx
].isvec
;
21683 if (is_dual
== 0 && thumb_mode
)
21685 !((Rx
<= 14 && Rx
!= 13) || (Rx
== REG_PC
&& isvec
)),
21686 _("Register must be r0-r14 except r13, or APSR_nzcv."));
21688 constraint ( !((Rx
<= 10 && Rx
% 2 == 0 )),
21689 _("Register must be an even register between r0-r10."));
21693 cde_coproc_enabled (unsigned coproc
)
21697 case 0: return mark_feature_used (&arm_ext_cde0
);
21698 case 1: return mark_feature_used (&arm_ext_cde1
);
21699 case 2: return mark_feature_used (&arm_ext_cde2
);
21700 case 3: return mark_feature_used (&arm_ext_cde3
);
21701 case 4: return mark_feature_used (&arm_ext_cde4
);
21702 case 5: return mark_feature_used (&arm_ext_cde5
);
21703 case 6: return mark_feature_used (&arm_ext_cde6
);
21704 case 7: return mark_feature_used (&arm_ext_cde7
);
21705 default: return false;
21709 #define cde_coproc_pos 8
21711 cde_handle_coproc (void)
21713 unsigned coproc
= inst
.operands
[0].reg
;
21714 constraint (coproc
> 7, _("CDE Coprocessor must be in range 0-7"));
21715 constraint (!(cde_coproc_enabled (coproc
)), BAD_CDE_COPROC
);
21716 inst
.instruction
|= coproc
<< cde_coproc_pos
;
21718 #undef cde_coproc_pos
21721 cxn_handle_predication (bool is_accum
)
21723 if (is_accum
&& conditional_insn ())
21724 set_pred_insn_type (INSIDE_IT_INSN
);
21725 else if (conditional_insn ())
21726 /* conditional_insn essentially checks for a suffix, not whether the
21727 instruction is inside an IT block or not.
21728 The non-accumulator versions should not have suffixes. */
21729 inst
.error
= BAD_SYNTAX
;
21731 set_pred_insn_type (OUTSIDE_PRED_INSN
);
21735 do_custom_instruction_1 (int is_dual
, bool is_accum
)
21738 constraint (!mark_feature_used (&arm_ext_cde
), _(BAD_CDE
));
21742 Rd
= inst
.operands
[1].reg
;
21743 check_cde_operand (1, is_dual
);
21747 constraint (inst
.operands
[2].reg
!= Rd
+ 1,
21748 _("cx1d requires consecutive destination registers."));
21749 imm
= inst
.operands
[3].imm
;
21751 else if (is_dual
== 0)
21752 imm
= inst
.operands
[2].imm
;
21756 inst
.instruction
|= Rd
<< 12;
21757 inst
.instruction
|= (imm
& 0x1F80) << 9;
21758 inst
.instruction
|= (imm
& 0x0040) << 1;
21759 inst
.instruction
|= (imm
& 0x003f);
21761 cde_handle_coproc ();
21762 cxn_handle_predication (is_accum
);
21766 do_custom_instruction_2 (int is_dual
, bool is_accum
)
21769 constraint (!mark_feature_used (&arm_ext_cde
), _(BAD_CDE
));
21771 unsigned imm
, Rd
, Rn
;
21773 Rd
= inst
.operands
[1].reg
;
21777 constraint (inst
.operands
[2].reg
!= Rd
+ 1,
21778 _("cx2d requires consecutive destination registers."));
21779 imm
= inst
.operands
[4].imm
;
21780 Rn
= inst
.operands
[3].reg
;
21782 else if (is_dual
== 0)
21784 imm
= inst
.operands
[3].imm
;
21785 Rn
= inst
.operands
[2].reg
;
21790 check_cde_operand (2 + is_dual
, /* is_dual = */0);
21791 check_cde_operand (1, is_dual
);
21793 inst
.instruction
|= Rd
<< 12;
21794 inst
.instruction
|= Rn
<< 16;
21796 inst
.instruction
|= (imm
& 0x0380) << 13;
21797 inst
.instruction
|= (imm
& 0x0040) << 1;
21798 inst
.instruction
|= (imm
& 0x003f);
21800 cde_handle_coproc ();
21801 cxn_handle_predication (is_accum
);
21805 do_custom_instruction_3 (int is_dual
, bool is_accum
)
21808 constraint (!mark_feature_used (&arm_ext_cde
), _(BAD_CDE
));
21810 unsigned imm
, Rd
, Rn
, Rm
;
21812 Rd
= inst
.operands
[1].reg
;
21816 constraint (inst
.operands
[2].reg
!= Rd
+ 1,
21817 _("cx3d requires consecutive destination registers."));
21818 imm
= inst
.operands
[5].imm
;
21819 Rn
= inst
.operands
[3].reg
;
21820 Rm
= inst
.operands
[4].reg
;
21822 else if (is_dual
== 0)
21824 imm
= inst
.operands
[4].imm
;
21825 Rn
= inst
.operands
[2].reg
;
21826 Rm
= inst
.operands
[3].reg
;
21831 check_cde_operand (1, is_dual
);
21832 check_cde_operand (2 + is_dual
, /* is_dual = */0);
21833 check_cde_operand (3 + is_dual
, /* is_dual = */0);
21835 inst
.instruction
|= Rd
;
21836 inst
.instruction
|= Rn
<< 16;
21837 inst
.instruction
|= Rm
<< 12;
21839 inst
.instruction
|= (imm
& 0x0038) << 17;
21840 inst
.instruction
|= (imm
& 0x0004) << 5;
21841 inst
.instruction
|= (imm
& 0x0003) << 4;
21843 cde_handle_coproc ();
21844 cxn_handle_predication (is_accum
);
21850 return do_custom_instruction_1 (0, 0);
21856 return do_custom_instruction_1 (0, 1);
21862 return do_custom_instruction_1 (1, 0);
21868 return do_custom_instruction_1 (1, 1);
21874 return do_custom_instruction_2 (0, 0);
21880 return do_custom_instruction_2 (0, 1);
21886 return do_custom_instruction_2 (1, 0);
21892 return do_custom_instruction_2 (1, 1);
21898 return do_custom_instruction_3 (0, 0);
21904 return do_custom_instruction_3 (0, 1);
21910 return do_custom_instruction_3 (1, 0);
21916 return do_custom_instruction_3 (1, 1);
21920 vcx_assign_vec_d (unsigned regnum
)
21922 inst
.instruction
|= HI4 (regnum
) << 12;
21923 inst
.instruction
|= LOW1 (regnum
) << 22;
21927 vcx_assign_vec_m (unsigned regnum
)
21929 inst
.instruction
|= HI4 (regnum
);
21930 inst
.instruction
|= LOW1 (regnum
) << 5;
21934 vcx_assign_vec_n (unsigned regnum
)
21936 inst
.instruction
|= HI4 (regnum
) << 16;
21937 inst
.instruction
|= LOW1 (regnum
) << 7;
21940 enum vcx_reg_type
{
21946 static enum vcx_reg_type
21947 vcx_get_reg_type (enum neon_shape ns
)
21949 gas_assert (ns
== NS_PQI
21957 || ns
== NS_PFFFI
);
21958 if (ns
== NS_PQI
|| ns
== NS_PQQI
|| ns
== NS_PQQQI
)
21960 if (ns
== NS_PDI
|| ns
== NS_PDDI
|| ns
== NS_PDDDI
)
21965 #define vcx_size_pos 24
21966 #define vcx_vec_pos 6
21968 vcx_handle_shape (enum vcx_reg_type reg_type
)
21971 if (reg_type
== q_reg
)
21972 inst
.instruction
|= 1 << vcx_vec_pos
;
21973 else if (reg_type
== d_reg
)
21974 inst
.instruction
|= 1 << vcx_size_pos
;
21978 The documentation says that the Q registers are encoded as 2*N in the D:Vd
21979 bits (or equivalent for N and M registers).
21980 Similarly the D registers are encoded as N in D:Vd bits.
21981 While the S registers are encoded as N in the Vd:D bits.
21983 Taking into account the maximum values of these registers we can see a
21984 nicer pattern for calculation:
21985 Q -> 7, D -> 15, S -> 31
21987 If we say that everything is encoded in the Vd:D bits, then we can say
21988 that Q is encoded as 4*N, and D is encoded as 2*N.
21989 This way the bits will end up the same, and calculation is simpler.
21990 (calculation is now:
21991 1. Multiply by a number determined by the register letter.
21992 2. Encode resulting number in Vd:D bits.)
21994 This is made a little more complicated by automatic handling of 'Q'
21995 registers elsewhere, which means the register number is already 2*N where
21996 N is the number the user wrote after the register letter.
22001 #undef vcx_size_pos
22004 vcx_ensure_register_in_range (unsigned R
, enum vcx_reg_type reg_type
)
22006 if (reg_type
== q_reg
)
22008 gas_assert (R
% 2 == 0);
22009 constraint (R
>= 16, _("'q' register must be in range 0-7"));
22011 else if (reg_type
== d_reg
)
22012 constraint (R
>= 16, _("'d' register must be in range 0-15"));
22014 constraint (R
>= 32, _("'s' register must be in range 0-31"));
22017 static void (*vcx_assign_vec
[3]) (unsigned) = {
22024 vcx_handle_register_arguments (unsigned num_registers
,
22025 enum vcx_reg_type reg_type
)
22028 unsigned reg_mult
= vcx_handle_shape (reg_type
);
22029 for (i
= 0; i
< num_registers
; i
++)
22031 R
= inst
.operands
[i
+1].reg
;
22032 vcx_ensure_register_in_range (R
, reg_type
);
22033 if (num_registers
== 3 && i
> 0)
22036 vcx_assign_vec
[1] (R
* reg_mult
);
22038 vcx_assign_vec
[2] (R
* reg_mult
);
22041 vcx_assign_vec
[i
](R
* reg_mult
);
22046 vcx_handle_insn_block (enum vcx_reg_type reg_type
)
22048 if (reg_type
== q_reg
)
22049 if (inst
.cond
> COND_ALWAYS
)
22050 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
22052 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
22053 else if (inst
.cond
== COND_ALWAYS
)
22054 inst
.pred_insn_type
= OUTSIDE_PRED_INSN
;
22056 inst
.error
= BAD_NOT_IT
;
22060 vcx_handle_common_checks (unsigned num_args
, enum neon_shape rs
)
22062 constraint (!mark_feature_used (&arm_ext_cde
), _(BAD_CDE
));
22063 cde_handle_coproc ();
22064 enum vcx_reg_type reg_type
= vcx_get_reg_type (rs
);
22065 vcx_handle_register_arguments (num_args
, reg_type
);
22066 vcx_handle_insn_block (reg_type
);
22067 if (reg_type
== q_reg
)
22068 constraint (!mark_feature_used (&mve_ext
),
22069 _("vcx instructions with Q registers require MVE"));
22071 constraint (!(ARM_FSET_CPU_SUBSET (armv8m_fp
, cpu_variant
)
22072 && mark_feature_used (&armv8m_fp
))
22073 && !mark_feature_used (&mve_ext
),
22074 _("vcx instructions with S or D registers require either MVE"
22075 " or Armv8-M floating point extension."));
22081 enum neon_shape rs
= neon_select_shape (NS_PQI
, NS_PDI
, NS_PFI
, NS_NULL
);
22082 vcx_handle_common_checks (1, rs
);
22084 unsigned imm
= inst
.operands
[2].imm
;
22085 inst
.instruction
|= (imm
& 0x03f);
22086 inst
.instruction
|= (imm
& 0x040) << 1;
22087 inst
.instruction
|= (imm
& 0x780) << 9;
22089 constraint (imm
>= 2048,
22090 _("vcx1 with S or D registers takes immediate within 0-2047"));
22091 inst
.instruction
|= (imm
& 0x800) << 13;
22097 enum neon_shape rs
= neon_select_shape (NS_PQQI
, NS_PDDI
, NS_PFFI
, NS_NULL
);
22098 vcx_handle_common_checks (2, rs
);
22100 unsigned imm
= inst
.operands
[3].imm
;
22101 inst
.instruction
|= (imm
& 0x01) << 4;
22102 inst
.instruction
|= (imm
& 0x02) << 6;
22103 inst
.instruction
|= (imm
& 0x3c) << 14;
22105 constraint (imm
>= 64,
22106 _("vcx2 with S or D registers takes immediate within 0-63"));
22107 inst
.instruction
|= (imm
& 0x40) << 18;
22113 enum neon_shape rs
= neon_select_shape (NS_PQQQI
, NS_PDDDI
, NS_PFFFI
, NS_NULL
);
22114 vcx_handle_common_checks (3, rs
);
22116 unsigned imm
= inst
.operands
[4].imm
;
22117 inst
.instruction
|= (imm
& 0x1) << 4;
22118 inst
.instruction
|= (imm
& 0x6) << 19;
22119 if (rs
!= NS_PQQQI
)
22120 constraint (imm
>= 8,
22121 _("vcx2 with S or D registers takes immediate within 0-7"));
22122 inst
.instruction
|= (imm
& 0x8) << 21;
22125 /* Crypto v1 instructions. */
22127 do_crypto_2op_1 (unsigned elttype
, int op
)
22129 set_pred_insn_type (OUTSIDE_PRED_INSN
);
22131 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
22137 NEON_ENCODE (INTEGER
, inst
);
22138 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
22139 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
22140 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
22141 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
22143 inst
.instruction
|= op
<< 6;
22146 inst
.instruction
|= 0xfc000000;
22148 inst
.instruction
|= 0xf0000000;
22152 do_crypto_3op_1 (int u
, int op
)
22154 set_pred_insn_type (OUTSIDE_PRED_INSN
);
22156 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
22157 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
22162 NEON_ENCODE (INTEGER
, inst
);
22163 neon_three_same (1, u
, 8 << op
);
22169 do_crypto_2op_1 (N_8
, 0);
22175 do_crypto_2op_1 (N_8
, 1);
22181 do_crypto_2op_1 (N_8
, 2);
22187 do_crypto_2op_1 (N_8
, 3);
22193 do_crypto_3op_1 (0, 0);
22199 do_crypto_3op_1 (0, 1);
22205 do_crypto_3op_1 (0, 2);
22211 do_crypto_3op_1 (0, 3);
22217 do_crypto_3op_1 (1, 0);
22223 do_crypto_3op_1 (1, 1);
22227 do_sha256su1 (void)
22229 do_crypto_3op_1 (1, 2);
22235 do_crypto_2op_1 (N_32
, -1);
22241 do_crypto_2op_1 (N_32
, 0);
22245 do_sha256su0 (void)
22247 do_crypto_2op_1 (N_32
, 1);
22251 do_crc32_1 (unsigned int poly
, unsigned int sz
)
22253 unsigned int Rd
= inst
.operands
[0].reg
;
22254 unsigned int Rn
= inst
.operands
[1].reg
;
22255 unsigned int Rm
= inst
.operands
[2].reg
;
22257 set_pred_insn_type (OUTSIDE_PRED_INSN
);
22258 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
22259 inst
.instruction
|= LOW4 (Rn
) << 16;
22260 inst
.instruction
|= LOW4 (Rm
);
22261 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
22262 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
22264 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
22265 as_warn (UNPRED_REG ("r15"));
22307 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
22309 neon_check_type (2, NS_FD
, N_S32
, N_F64
);
22310 do_vfp_sp_dp_cvt ();
22311 do_vfp_cond_or_thumb ();
22317 enum neon_shape rs
;
22318 constraint (!mark_feature_used (&fpu_neon_ext_armv8
), _(BAD_FPU
));
22319 set_pred_insn_type (OUTSIDE_PRED_INSN
);
22320 if (inst
.operands
[2].isscalar
)
22322 rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
22323 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_BF16
| N_KEY
);
22325 inst
.instruction
|= (1 << 25);
22326 int idx
= inst
.operands
[2].reg
& 0xf;
22327 constraint ((idx
!= 1 && idx
!= 0), _("index must be 0 or 1"));
22328 inst
.operands
[2].reg
>>= 4;
22329 constraint (!(inst
.operands
[2].reg
< 16),
22330 _("indexed register must be less than 16"));
22331 neon_three_args (rs
== NS_QQS
);
22332 inst
.instruction
|= (idx
<< 5);
22336 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
22337 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_BF16
| N_KEY
);
22338 neon_three_args (rs
== NS_QQQ
);
22345 enum neon_shape rs
= neon_select_shape (NS_QQQ
, NS_NULL
);
22346 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_BF16
| N_KEY
);
22348 constraint (!mark_feature_used (&fpu_neon_ext_armv8
), _(BAD_FPU
));
22349 set_pred_insn_type (OUTSIDE_PRED_INSN
);
22351 neon_three_args (1);
22357 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
22361 do_t_pacbti_nonop (void)
22363 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, pacbti_ext
),
22366 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
22367 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
22368 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
22369 inst
.instruction
|= inst
.operands
[2].reg
;
22373 do_t_pacbti_pacg (void)
22375 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, pacbti_ext
),
22378 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
22379 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
22380 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
22381 inst
.instruction
|= inst
.operands
[2].reg
;
22385 /* Overall per-instruction processing. */
22387 /* We need to be able to fix up arbitrary expressions in some statements.
22388 This is so that we can handle symbols that are an arbitrary distance from
22389 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
22390 which returns part of an address in a form which will be valid for
22391 a data instruction. We do this by pushing the expression into a symbol
22392 in the expr_section, and creating a fix for that. */
22395 fix_new_arm (fragS
* frag
,
22409 /* Create an absolute valued symbol, so we have something to
22410 refer to in the object file. Unfortunately for us, gas's
22411 generic expression parsing will already have folded out
22412 any use of .set foo/.type foo %function that may have
22413 been used to set type information of the target location,
22414 that's being specified symbolically. We have to presume
22415 the user knows what they are doing. */
22419 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
22421 symbol
= symbol_find_or_make (name
);
22422 S_SET_SEGMENT (symbol
, absolute_section
);
22423 symbol_set_frag (symbol
, &zero_address_frag
);
22424 S_SET_VALUE (symbol
, exp
->X_add_number
);
22425 exp
->X_op
= O_symbol
;
22426 exp
->X_add_symbol
= symbol
;
22427 exp
->X_add_number
= 0;
22433 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
22434 (enum bfd_reloc_code_real
) reloc
);
22438 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
22439 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
22443 /* Mark whether the fix is to a THUMB instruction, or an ARM
22445 new_fix
->tc_fix_data
= thumb_mode
;
22448 /* Create a frg for an instruction requiring relaxation. */
22450 output_relax_insn (void)
22456 /* The size of the instruction is unknown, so tie the debug info to the
22457 start of the instruction. */
22458 dwarf2_emit_insn (0);
22460 switch (inst
.relocs
[0].exp
.X_op
)
22463 sym
= inst
.relocs
[0].exp
.X_add_symbol
;
22464 offset
= inst
.relocs
[0].exp
.X_add_number
;
22468 offset
= inst
.relocs
[0].exp
.X_add_number
;
22471 sym
= make_expr_symbol (&inst
.relocs
[0].exp
);
22475 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
22476 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
22477 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
22480 /* Write a 32-bit thumb instruction to buf. */
22482 put_thumb32_insn (char * buf
, unsigned long insn
)
22484 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
22485 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
22489 output_inst (const char * str
)
22495 as_bad ("%s -- `%s'", inst
.error
, str
);
22500 output_relax_insn ();
22503 if (inst
.size
== 0)
22506 to
= frag_more (inst
.size
);
22507 /* PR 9814: Record the thumb mode into the current frag so that we know
22508 what type of NOP padding to use, if necessary. We override any previous
22509 setting so that if the mode has changed then the NOPS that we use will
22510 match the encoding of the last instruction in the frag. */
22511 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
22513 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
22515 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
22516 put_thumb32_insn (to
, inst
.instruction
);
22518 else if (inst
.size
> INSN_SIZE
)
22520 gas_assert (inst
.size
== (2 * INSN_SIZE
));
22521 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
22522 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
22525 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
22528 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
22530 if (inst
.relocs
[r
].type
!= BFD_RELOC_UNUSED
)
22531 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
22532 inst
.size
, & inst
.relocs
[r
].exp
, inst
.relocs
[r
].pc_rel
,
22533 inst
.relocs
[r
].type
);
22536 dwarf2_emit_insn (inst
.size
);
22540 output_it_inst (int cond
, int mask
, char * to
)
22542 unsigned long instruction
= 0xbf00;
22545 instruction
|= mask
;
22546 instruction
|= cond
<< 4;
22550 to
= frag_more (2);
22552 dwarf2_emit_insn (2);
22556 md_number_to_chars (to
, instruction
, 2);
22561 /* Tag values used in struct asm_opcode's tag field. */
22564 OT_unconditional
, /* Instruction cannot be conditionalized.
22565 The ARM condition field is still 0xE. */
22566 OT_unconditionalF
, /* Instruction cannot be conditionalized
22567 and carries 0xF in its ARM condition field. */
22568 OT_csuffix
, /* Instruction takes a conditional suffix. */
22569 OT_csuffixF
, /* Some forms of the instruction take a scalar
22570 conditional suffix, others place 0xF where the
22571 condition field would be, others take a vector
22572 conditional suffix. */
22573 OT_cinfix3
, /* Instruction takes a conditional infix,
22574 beginning at character index 3. (In
22575 unified mode, it becomes a suffix.) */
22576 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
22577 tsts, cmps, cmns, and teqs. */
22578 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
22579 character index 3, even in unified mode. Used for
22580 legacy instructions where suffix and infix forms
22581 may be ambiguous. */
22582 OT_csuf_or_in3
, /* Instruction takes either a conditional
22583 suffix or an infix at character index 3. */
22584 OT_odd_infix_unc
, /* This is the unconditional variant of an
22585 instruction that takes a conditional infix
22586 at an unusual position. In unified mode,
22587 this variant will accept a suffix. */
22588 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
22589 are the conditional variants of instructions that
22590 take conditional infixes in unusual positions.
22591 The infix appears at character index
22592 (tag - OT_odd_infix_0). These are not accepted
22593 in unified mode. */
22596 /* Subroutine of md_assemble, responsible for looking up the primary
22597 opcode from the mnemonic the user wrote. STR points to the
22598 beginning of the mnemonic.
22600 This is not simply a hash table lookup, because of conditional
22601 variants. Most instructions have conditional variants, which are
22602 expressed with a _conditional affix_ to the mnemonic. If we were
22603 to encode each conditional variant as a literal string in the opcode
22604 table, it would have approximately 20,000 entries.
22606 Most mnemonics take this affix as a suffix, and in unified syntax,
22607 'most' is upgraded to 'all'. However, in the divided syntax, some
22608 instructions take the affix as an infix, notably the s-variants of
22609 the arithmetic instructions. Of those instructions, all but six
22610 have the infix appear after the third character of the mnemonic.
22612 Accordingly, the algorithm for looking up primary opcodes given
22615 1. Look up the identifier in the opcode table.
22616 If we find a match, go to step U.
22618 2. Look up the last two characters of the identifier in the
22619 conditions table. If we find a match, look up the first N-2
22620 characters of the identifier in the opcode table. If we
22621 find a match, go to step CE.
22623 3. Look up the fourth and fifth characters of the identifier in
22624 the conditions table. If we find a match, extract those
22625 characters from the identifier, and look up the remaining
22626 characters in the opcode table. If we find a match, go
22631 U. Examine the tag field of the opcode structure, in case this is
22632 one of the six instructions with its conditional infix in an
22633 unusual place. If it is, the tag tells us where to find the
22634 infix; look it up in the conditions table and set inst.cond
22635 accordingly. Otherwise, this is an unconditional instruction.
22636 Again set inst.cond accordingly. Return the opcode structure.
22638 CE. Examine the tag field to make sure this is an instruction that
22639 should receive a conditional suffix. If it is not, fail.
22640 Otherwise, set inst.cond from the suffix we already looked up,
22641 and return the opcode structure.
22643 CM. Examine the tag field to make sure this is an instruction that
22644 should receive a conditional infix after the third character.
22645 If it is not, fail. Otherwise, undo the edits to the current
22646 line of input and proceed as for case CE. */
22648 static const struct asm_opcode
*
22649 opcode_lookup (char **str
)
22653 const struct asm_opcode
*opcode
;
22654 const struct asm_cond
*cond
;
22657 /* Scan up to the end of the mnemonic, which must end in white space,
22658 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
22659 for (base
= end
= *str
; *end
!= '\0'; end
++)
22660 if (*end
== ' ' || *end
== '.')
22666 /* Handle a possible width suffix and/or Neon type suffix. */
22671 /* The .w and .n suffixes are only valid if the unified syntax is in
22673 if (unified_syntax
&& end
[1] == 'w')
22675 else if (unified_syntax
&& end
[1] == 'n')
22680 inst
.vectype
.elems
= 0;
22682 *str
= end
+ offset
;
22684 if (end
[offset
] == '.')
22686 /* See if we have a Neon type suffix (possible in either unified or
22687 non-unified ARM syntax mode). */
22688 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
22691 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
22697 /* Look for unaffixed or special-case affixed mnemonic. */
22698 opcode
= (const struct asm_opcode
*) str_hash_find_n (arm_ops_hsh
, base
,
22704 if (opcode
->tag
< OT_odd_infix_0
)
22706 inst
.cond
= COND_ALWAYS
;
22710 if (warn_on_deprecated
&& unified_syntax
)
22711 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
22712 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
22713 cond
= (const struct asm_cond
*) str_hash_find_n (arm_cond_hsh
, affix
, 2);
22716 inst
.cond
= cond
->value
;
22719 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
22721 /* Cannot have a conditional suffix on a mnemonic of less than a character.
22723 if (end
- base
< 2)
22726 cond
= (const struct asm_cond
*) str_hash_find_n (arm_vcond_hsh
, affix
, 1);
22727 opcode
= (const struct asm_opcode
*) str_hash_find_n (arm_ops_hsh
, base
,
22729 /* If this opcode can not be vector predicated then don't accept it with a
22730 vector predication code. */
22731 if (opcode
&& !opcode
->mayBeVecPred
)
22734 if (!opcode
|| !cond
)
22736 /* Cannot have a conditional suffix on a mnemonic of less than two
22738 if (end
- base
< 3)
22741 /* Look for suffixed mnemonic. */
22743 cond
= (const struct asm_cond
*) str_hash_find_n (arm_cond_hsh
, affix
, 2);
22744 opcode
= (const struct asm_opcode
*) str_hash_find_n (arm_ops_hsh
, base
,
22748 if (opcode
&& cond
)
22751 switch (opcode
->tag
)
22753 case OT_cinfix3_legacy
:
22754 /* Ignore conditional suffixes matched on infix only mnemonics. */
22758 case OT_cinfix3_deprecated
:
22759 case OT_odd_infix_unc
:
22760 if (!unified_syntax
)
22762 /* Fall through. */
22766 case OT_csuf_or_in3
:
22767 inst
.cond
= cond
->value
;
22770 case OT_unconditional
:
22771 case OT_unconditionalF
:
22773 inst
.cond
= cond
->value
;
22776 /* Delayed diagnostic. */
22777 inst
.error
= BAD_COND
;
22778 inst
.cond
= COND_ALWAYS
;
22787 /* Cannot have a usual-position infix on a mnemonic of less than
22788 six characters (five would be a suffix). */
22789 if (end
- base
< 6)
22792 /* Look for infixed mnemonic in the usual position. */
22794 cond
= (const struct asm_cond
*) str_hash_find_n (arm_cond_hsh
, affix
, 2);
22798 memcpy (save
, affix
, 2);
22799 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
22800 opcode
= (const struct asm_opcode
*) str_hash_find_n (arm_ops_hsh
, base
,
22802 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
22803 memcpy (affix
, save
, 2);
22806 && (opcode
->tag
== OT_cinfix3
22807 || opcode
->tag
== OT_cinfix3_deprecated
22808 || opcode
->tag
== OT_csuf_or_in3
22809 || opcode
->tag
== OT_cinfix3_legacy
))
22812 if (warn_on_deprecated
&& unified_syntax
22813 && (opcode
->tag
== OT_cinfix3
22814 || opcode
->tag
== OT_cinfix3_deprecated
))
22815 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
22817 inst
.cond
= cond
->value
;
22824 /* This function generates an initial IT instruction, leaving its block
22825 virtually open for the new instructions. Eventually,
22826 the mask will be updated by now_pred_add_mask () each time
22827 a new instruction needs to be included in the IT block.
22828 Finally, the block is closed with close_automatic_it_block ().
22829 The block closure can be requested either from md_assemble (),
22830 a tencode (), or due to a label hook. */
22833 new_automatic_it_block (int cond
)
22835 now_pred
.state
= AUTOMATIC_PRED_BLOCK
;
22836 now_pred
.mask
= 0x18;
22837 now_pred
.cc
= cond
;
22838 now_pred
.block_length
= 1;
22839 mapping_state (MAP_THUMB
);
22840 now_pred
.insn
= output_it_inst (cond
, now_pred
.mask
, NULL
);
22841 now_pred
.warn_deprecated
= false;
22842 now_pred
.insn_cond
= true;
22845 /* Close an automatic IT block.
22846 See comments in new_automatic_it_block (). */
22849 close_automatic_it_block (void)
22851 now_pred
.mask
= 0x10;
22852 now_pred
.block_length
= 0;
22855 /* Update the mask of the current automatically-generated IT
22856 instruction. See comments in new_automatic_it_block (). */
22859 now_pred_add_mask (int cond
)
22861 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
22862 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
22863 | ((bitvalue) << (nbit)))
22864 const int resulting_bit
= (cond
& 1);
22866 now_pred
.mask
&= 0xf;
22867 now_pred
.mask
= SET_BIT_VALUE (now_pred
.mask
,
22869 (5 - now_pred
.block_length
));
22870 now_pred
.mask
= SET_BIT_VALUE (now_pred
.mask
,
22872 ((5 - now_pred
.block_length
) - 1));
22873 output_it_inst (now_pred
.cc
, now_pred
.mask
, now_pred
.insn
);
22876 #undef SET_BIT_VALUE
22879 /* The IT blocks handling machinery is accessed through the these functions:
22880 it_fsm_pre_encode () from md_assemble ()
22881 set_pred_insn_type () optional, from the tencode functions
22882 set_pred_insn_type_last () ditto
22883 in_pred_block () ditto
22884 it_fsm_post_encode () from md_assemble ()
22885 force_automatic_it_block_close () from label handling functions
22888 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
22889 initializing the IT insn type with a generic initial value depending
22890 on the inst.condition.
22891 2) During the tencode function, two things may happen:
22892 a) The tencode function overrides the IT insn type by
22893 calling either set_pred_insn_type (type) or
22894 set_pred_insn_type_last ().
22895 b) The tencode function queries the IT block state by
22896 calling in_pred_block () (i.e. to determine narrow/not narrow mode).
22898 Both set_pred_insn_type and in_pred_block run the internal FSM state
22899 handling function (handle_pred_state), because: a) setting the IT insn
22900 type may incur in an invalid state (exiting the function),
22901 and b) querying the state requires the FSM to be updated.
22902 Specifically we want to avoid creating an IT block for conditional
22903 branches, so it_fsm_pre_encode is actually a guess and we can't
22904 determine whether an IT block is required until the tencode () routine
22905 has decided what type of instruction this actually it.
22906 Because of this, if set_pred_insn_type and in_pred_block have to be
22907 used, set_pred_insn_type has to be called first.
22909 set_pred_insn_type_last () is a wrapper of set_pred_insn_type (type),
22910 that determines the insn IT type depending on the inst.cond code.
22911 When a tencode () routine encodes an instruction that can be
22912 either outside an IT block, or, in the case of being inside, has to be
22913 the last one, set_pred_insn_type_last () will determine the proper
22914 IT instruction type based on the inst.cond code. Otherwise,
22915 set_pred_insn_type can be called for overriding that logic or
22916 for covering other cases.
22918 Calling handle_pred_state () may not transition the IT block state to
22919 OUTSIDE_PRED_BLOCK immediately, since the (current) state could be
22920 still queried. Instead, if the FSM determines that the state should
22921 be transitioned to OUTSIDE_PRED_BLOCK, a flag is marked to be closed
22922 after the tencode () function: that's what it_fsm_post_encode () does.
22924 Since in_pred_block () calls the state handling function to get an
22925 updated state, an error may occur (due to invalid insns combination).
22926 In that case, inst.error is set.
22927 Therefore, inst.error has to be checked after the execution of
22928 the tencode () routine.
22930 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
22931 any pending state change (if any) that didn't take place in
22932 handle_pred_state () as explained above. */
22935 it_fsm_pre_encode (void)
22937 if (inst
.cond
!= COND_ALWAYS
)
22938 inst
.pred_insn_type
= INSIDE_IT_INSN
;
22940 inst
.pred_insn_type
= OUTSIDE_PRED_INSN
;
22942 now_pred
.state_handled
= 0;
22945 /* IT state FSM handling function. */
22946 /* MVE instructions and non-MVE instructions are handled differently because of
22947 the introduction of VPT blocks.
22948 Specifications say that any non-MVE instruction inside a VPT block is
22949 UNPREDICTABLE, with the exception of the BKPT instruction. Whereas most MVE
22950 instructions are deemed to be UNPREDICTABLE if inside an IT block. For the
22951 few exceptions we have MVE_UNPREDICABLE_INSN.
22952 The error messages provided depending on the different combinations possible
22953 are described in the cases below:
22954 For 'most' MVE instructions:
22955 1) In an IT block, with an IT code: syntax error
22956 2) In an IT block, with a VPT code: error: must be in a VPT block
22957 3) In an IT block, with no code: warning: UNPREDICTABLE
22958 4) In a VPT block, with an IT code: syntax error
22959 5) In a VPT block, with a VPT code: OK!
22960 6) In a VPT block, with no code: error: missing code
22961 7) Outside a pred block, with an IT code: error: syntax error
22962 8) Outside a pred block, with a VPT code: error: should be in a VPT block
22963 9) Outside a pred block, with no code: OK!
22964 For non-MVE instructions:
22965 10) In an IT block, with an IT code: OK!
22966 11) In an IT block, with a VPT code: syntax error
22967 12) In an IT block, with no code: error: missing code
22968 13) In a VPT block, with an IT code: error: should be in an IT block
22969 14) In a VPT block, with a VPT code: syntax error
22970 15) In a VPT block, with no code: UNPREDICTABLE
22971 16) Outside a pred block, with an IT code: error: should be in an IT block
22972 17) Outside a pred block, with a VPT code: syntax error
22973 18) Outside a pred block, with no code: OK!
22978 handle_pred_state (void)
22980 now_pred
.state_handled
= 1;
22981 now_pred
.insn_cond
= false;
22983 switch (now_pred
.state
)
22985 case OUTSIDE_PRED_BLOCK
:
22986 switch (inst
.pred_insn_type
)
22988 case MVE_UNPREDICABLE_INSN
:
22989 case MVE_OUTSIDE_PRED_INSN
:
22990 if (inst
.cond
< COND_ALWAYS
)
22992 /* Case 7: Outside a pred block, with an IT code: error: syntax
22994 inst
.error
= BAD_SYNTAX
;
22997 /* Case 9: Outside a pred block, with no code: OK! */
22999 case OUTSIDE_PRED_INSN
:
23000 if (inst
.cond
> COND_ALWAYS
)
23002 /* Case 17: Outside a pred block, with a VPT code: syntax error.
23004 inst
.error
= BAD_SYNTAX
;
23007 /* Case 18: Outside a pred block, with no code: OK! */
23010 case INSIDE_VPT_INSN
:
23011 /* Case 8: Outside a pred block, with a VPT code: error: should be in
23013 inst
.error
= BAD_OUT_VPT
;
23016 case INSIDE_IT_INSN
:
23017 case INSIDE_IT_LAST_INSN
:
23018 if (inst
.cond
< COND_ALWAYS
)
23020 /* Case 16: Outside a pred block, with an IT code: error: should
23021 be in an IT block. */
23022 if (thumb_mode
== 0)
23025 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
23026 as_tsktsk (_("Warning: conditional outside an IT block"\
23031 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
23032 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
23034 /* Automatically generate the IT instruction. */
23035 new_automatic_it_block (inst
.cond
);
23036 if (inst
.pred_insn_type
== INSIDE_IT_LAST_INSN
)
23037 close_automatic_it_block ();
23041 inst
.error
= BAD_OUT_IT
;
23047 else if (inst
.cond
> COND_ALWAYS
)
23049 /* Case 17: Outside a pred block, with a VPT code: syntax error.
23051 inst
.error
= BAD_SYNTAX
;
23056 case IF_INSIDE_IT_LAST_INSN
:
23057 case NEUTRAL_IT_INSN
:
23061 if (inst
.cond
!= COND_ALWAYS
)
23062 first_error (BAD_SYNTAX
);
23063 now_pred
.state
= MANUAL_PRED_BLOCK
;
23064 now_pred
.block_length
= 0;
23065 now_pred
.type
= VECTOR_PRED
;
23069 now_pred
.state
= MANUAL_PRED_BLOCK
;
23070 now_pred
.block_length
= 0;
23071 now_pred
.type
= SCALAR_PRED
;
23076 case AUTOMATIC_PRED_BLOCK
:
23077 /* Three things may happen now:
23078 a) We should increment current it block size;
23079 b) We should close current it block (closing insn or 4 insns);
23080 c) We should close current it block and start a new one (due
23081 to incompatible conditions or
23082 4 insns-length block reached). */
23084 switch (inst
.pred_insn_type
)
23086 case INSIDE_VPT_INSN
:
23088 case MVE_UNPREDICABLE_INSN
:
23089 case MVE_OUTSIDE_PRED_INSN
:
23091 case OUTSIDE_PRED_INSN
:
23092 /* The closure of the block shall happen immediately,
23093 so any in_pred_block () call reports the block as closed. */
23094 force_automatic_it_block_close ();
23097 case INSIDE_IT_INSN
:
23098 case INSIDE_IT_LAST_INSN
:
23099 case IF_INSIDE_IT_LAST_INSN
:
23100 now_pred
.block_length
++;
23102 if (now_pred
.block_length
> 4
23103 || !now_pred_compatible (inst
.cond
))
23105 force_automatic_it_block_close ();
23106 if (inst
.pred_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
23107 new_automatic_it_block (inst
.cond
);
23111 now_pred
.insn_cond
= true;
23112 now_pred_add_mask (inst
.cond
);
23115 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
23116 && (inst
.pred_insn_type
== INSIDE_IT_LAST_INSN
23117 || inst
.pred_insn_type
== IF_INSIDE_IT_LAST_INSN
))
23118 close_automatic_it_block ();
23122 case NEUTRAL_IT_INSN
:
23123 now_pred
.block_length
++;
23124 now_pred
.insn_cond
= true;
23126 if (now_pred
.block_length
> 4)
23127 force_automatic_it_block_close ();
23129 now_pred_add_mask (now_pred
.cc
& 1);
23133 close_automatic_it_block ();
23134 now_pred
.state
= MANUAL_PRED_BLOCK
;
23139 case MANUAL_PRED_BLOCK
:
23143 if (now_pred
.type
== SCALAR_PRED
)
23145 /* Check conditional suffixes. */
23146 cond
= now_pred
.cc
^ ((now_pred
.mask
>> 4) & 1) ^ 1;
23147 now_pred
.mask
<<= 1;
23148 now_pred
.mask
&= 0x1f;
23149 is_last
= (now_pred
.mask
== 0x10);
23153 now_pred
.cc
^= (now_pred
.mask
>> 4);
23154 cond
= now_pred
.cc
+ 0xf;
23155 now_pred
.mask
<<= 1;
23156 now_pred
.mask
&= 0x1f;
23157 is_last
= now_pred
.mask
== 0x10;
23159 now_pred
.insn_cond
= true;
23161 switch (inst
.pred_insn_type
)
23163 case OUTSIDE_PRED_INSN
:
23164 if (now_pred
.type
== SCALAR_PRED
)
23166 if (inst
.cond
== COND_ALWAYS
)
23168 /* Case 12: In an IT block, with no code: error: missing
23170 inst
.error
= BAD_NOT_IT
;
23173 else if (inst
.cond
> COND_ALWAYS
)
23175 /* Case 11: In an IT block, with a VPT code: syntax error.
23177 inst
.error
= BAD_SYNTAX
;
23180 else if (thumb_mode
)
23182 /* This is for some special cases where a non-MVE
23183 instruction is not allowed in an IT block, such as cbz,
23184 but are put into one with a condition code.
23185 You could argue this should be a syntax error, but we
23186 gave the 'not allowed in IT block' diagnostic in the
23187 past so we will keep doing so. */
23188 inst
.error
= BAD_NOT_IT
;
23195 /* Case 15: In a VPT block, with no code: UNPREDICTABLE. */
23196 as_tsktsk (MVE_NOT_VPT
);
23199 case MVE_OUTSIDE_PRED_INSN
:
23200 if (now_pred
.type
== SCALAR_PRED
)
23202 if (inst
.cond
== COND_ALWAYS
)
23204 /* Case 3: In an IT block, with no code: warning:
23206 as_tsktsk (MVE_NOT_IT
);
23209 else if (inst
.cond
< COND_ALWAYS
)
23211 /* Case 1: In an IT block, with an IT code: syntax error.
23213 inst
.error
= BAD_SYNTAX
;
23221 if (inst
.cond
< COND_ALWAYS
)
23223 /* Case 4: In a VPT block, with an IT code: syntax error.
23225 inst
.error
= BAD_SYNTAX
;
23228 else if (inst
.cond
== COND_ALWAYS
)
23230 /* Case 6: In a VPT block, with no code: error: missing
23232 inst
.error
= BAD_NOT_VPT
;
23240 case MVE_UNPREDICABLE_INSN
:
23241 as_tsktsk (now_pred
.type
== SCALAR_PRED
? MVE_NOT_IT
: MVE_NOT_VPT
);
23243 case INSIDE_IT_INSN
:
23244 if (inst
.cond
> COND_ALWAYS
)
23246 /* Case 11: In an IT block, with a VPT code: syntax error. */
23247 /* Case 14: In a VPT block, with a VPT code: syntax error. */
23248 inst
.error
= BAD_SYNTAX
;
23251 else if (now_pred
.type
== SCALAR_PRED
)
23253 /* Case 10: In an IT block, with an IT code: OK! */
23254 if (cond
!= inst
.cond
)
23256 inst
.error
= now_pred
.type
== SCALAR_PRED
? BAD_IT_COND
:
23263 /* Case 13: In a VPT block, with an IT code: error: should be
23265 inst
.error
= BAD_OUT_IT
;
23270 case INSIDE_VPT_INSN
:
23271 if (now_pred
.type
== SCALAR_PRED
)
23273 /* Case 2: In an IT block, with a VPT code: error: must be in a
23275 inst
.error
= BAD_OUT_VPT
;
23278 /* Case 5: In a VPT block, with a VPT code: OK! */
23279 else if (cond
!= inst
.cond
)
23281 inst
.error
= BAD_VPT_COND
;
23285 case INSIDE_IT_LAST_INSN
:
23286 case IF_INSIDE_IT_LAST_INSN
:
23287 if (now_pred
.type
== VECTOR_PRED
|| inst
.cond
> COND_ALWAYS
)
23289 /* Case 4: In a VPT block, with an IT code: syntax error. */
23290 /* Case 11: In an IT block, with a VPT code: syntax error. */
23291 inst
.error
= BAD_SYNTAX
;
23294 else if (cond
!= inst
.cond
)
23296 inst
.error
= BAD_IT_COND
;
23301 inst
.error
= BAD_BRANCH
;
23306 case NEUTRAL_IT_INSN
:
23307 /* The BKPT instruction is unconditional even in a IT or VPT
23312 if (now_pred
.type
== SCALAR_PRED
)
23314 inst
.error
= BAD_IT_IT
;
23317 /* fall through. */
23319 if (inst
.cond
== COND_ALWAYS
)
23321 /* Executing a VPT/VPST instruction inside an IT block or a
23322 VPT/VPST/IT instruction inside a VPT block is UNPREDICTABLE.
23324 if (now_pred
.type
== SCALAR_PRED
)
23325 as_tsktsk (MVE_NOT_IT
);
23327 as_tsktsk (MVE_NOT_VPT
);
23332 /* VPT/VPST do not accept condition codes. */
23333 inst
.error
= BAD_SYNTAX
;
23344 struct depr_insn_mask
23346 unsigned long pattern
;
23347 unsigned long mask
;
23348 const char* description
;
23351 /* List of 16-bit instruction patterns deprecated in an IT block in
23353 static const struct depr_insn_mask depr_it_insns
[] = {
23354 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
23355 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
23356 { 0xa000, 0xb800, N_("ADR") },
23357 { 0x4800, 0xf800, N_("Literal loads") },
23358 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
23359 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
23360 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
23361 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
23362 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
23367 it_fsm_post_encode (void)
23371 if (!now_pred
.state_handled
)
23372 handle_pred_state ();
23374 if (now_pred
.insn_cond
23375 && warn_on_restrict_it
23376 && !now_pred
.warn_deprecated
23377 && warn_on_deprecated
23378 && (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
)
23379 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8r
))
23380 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
))
23382 if (inst
.instruction
>= 0x10000)
23384 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
23385 "performance deprecated in ARMv8-A and ARMv8-R"));
23386 now_pred
.warn_deprecated
= true;
23390 const struct depr_insn_mask
*p
= depr_it_insns
;
23392 while (p
->mask
!= 0)
23394 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
23396 as_tsktsk (_("IT blocks containing 16-bit Thumb "
23397 "instructions of the following class are "
23398 "performance deprecated in ARMv8-A and "
23399 "ARMv8-R: %s"), p
->description
);
23400 now_pred
.warn_deprecated
= true;
23408 if (now_pred
.block_length
> 1)
23410 as_tsktsk (_("IT blocks containing more than one conditional "
23411 "instruction are performance deprecated in ARMv8-A and "
23413 now_pred
.warn_deprecated
= true;
23417 is_last
= (now_pred
.mask
== 0x10);
23420 now_pred
.state
= OUTSIDE_PRED_BLOCK
;
23426 force_automatic_it_block_close (void)
23428 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
)
23430 close_automatic_it_block ();
23431 now_pred
.state
= OUTSIDE_PRED_BLOCK
;
23437 in_pred_block (void)
23439 if (!now_pred
.state_handled
)
23440 handle_pred_state ();
23442 return now_pred
.state
!= OUTSIDE_PRED_BLOCK
;
23445 /* Whether OPCODE only has T32 encoding. Since this function is only used by
23446 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
23447 here, hence the "known" in the function name. */
23450 known_t32_only_insn (const struct asm_opcode
*opcode
)
23452 /* Original Thumb-1 wide instruction. */
23453 if (opcode
->tencode
== do_t_blx
23454 || opcode
->tencode
== do_t_branch23
23455 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
23456 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
23459 /* Wide-only instruction added to ARMv8-M Baseline. */
23460 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m_m_only
)
23461 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
23462 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
23463 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
23469 /* Whether wide instruction variant can be used if available for a valid OPCODE
23473 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
23475 if (known_t32_only_insn (opcode
))
23478 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
23479 of variant T3 of B.W is checked in do_t_branch. */
23480 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
23481 && opcode
->tencode
== do_t_branch
)
23484 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
23485 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
23486 && opcode
->tencode
== do_t_mov_cmp
23487 /* Make sure CMP instruction is not affected. */
23488 && opcode
->aencode
== do_mov
)
23491 /* Wide instruction variants of all instructions with narrow *and* wide
23492 variants become available with ARMv6t2. Other opcodes are either
23493 narrow-only or wide-only and are thus available if OPCODE is valid. */
23494 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
23497 /* OPCODE with narrow only instruction variant or wide variant not
23503 md_assemble (char *str
)
23506 const struct asm_opcode
* opcode
;
23508 /* Align the previous label if needed. */
23509 if (last_label_seen
!= NULL
)
23511 symbol_set_frag (last_label_seen
, frag_now
);
23512 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
23513 S_SET_SEGMENT (last_label_seen
, now_seg
);
23516 memset (&inst
, '\0', sizeof (inst
));
23518 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
23519 inst
.relocs
[r
].type
= BFD_RELOC_UNUSED
;
23521 opcode
= opcode_lookup (&p
);
23524 /* It wasn't an instruction, but it might be a register alias of
23525 the form alias .req reg, or a Neon .dn/.qn directive. */
23526 if (! create_register_alias (str
, p
)
23527 && ! create_neon_reg_alias (str
, p
))
23528 as_bad (_("bad instruction `%s'"), str
);
23533 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
23534 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
23536 /* The value which unconditional instructions should have in place of the
23537 condition field. */
23538 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1u;
23542 arm_feature_set variant
;
23544 variant
= cpu_variant
;
23545 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
23546 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
23547 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
23548 /* Check that this instruction is supported for this CPU. */
23549 if (!opcode
->tvariant
23550 || (thumb_mode
== 1
23551 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
23553 if (opcode
->tencode
== do_t_swi
)
23554 as_bad (_("SVC is not permitted on this architecture"));
23556 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
23559 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
23560 && opcode
->tencode
!= do_t_branch
)
23562 as_bad (_("Thumb does not support conditional execution"));
23566 /* Two things are addressed here:
23567 1) Implicit require narrow instructions on Thumb-1.
23568 This avoids relaxation accidentally introducing Thumb-2
23570 2) Reject wide instructions in non Thumb-2 cores.
23572 Only instructions with narrow and wide variants need to be handled
23573 but selecting all non wide-only instructions is easier. */
23574 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
23575 && !t32_insn_ok (variant
, opcode
))
23577 if (inst
.size_req
== 0)
23579 else if (inst
.size_req
== 4)
23581 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
23582 as_bad (_("selected processor does not support 32bit wide "
23583 "variant of instruction `%s'"), str
);
23585 as_bad (_("selected processor does not support `%s' in "
23586 "Thumb-2 mode"), str
);
23591 inst
.instruction
= opcode
->tvalue
;
23593 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/true))
23595 /* Prepare the pred_insn_type for those encodings that don't set
23597 it_fsm_pre_encode ();
23599 opcode
->tencode ();
23601 it_fsm_post_encode ();
23604 if (!(inst
.error
|| inst
.relax
))
23606 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
23607 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
23608 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
23610 as_bad (_("cannot honor width suffix -- `%s'"), str
);
23615 /* Something has gone badly wrong if we try to relax a fixed size
23617 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
23619 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
23620 *opcode
->tvariant
);
23621 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
23622 set those bits when Thumb-2 32-bit instructions are seen. The impact
23623 of relaxable instructions will be considered later after we finish all
23625 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
23626 variant
= arm_arch_none
;
23628 variant
= cpu_variant
;
23629 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
23630 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
23633 check_neon_suffixes
;
23637 mapping_state (MAP_THUMB
);
23640 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
23644 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
23645 is_bx
= (opcode
->aencode
== do_bx
);
23647 /* Check that this instruction is supported for this CPU. */
23648 if (!(is_bx
&& fix_v4bx
)
23649 && !(opcode
->avariant
&&
23650 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
23652 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
23657 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
23661 inst
.instruction
= opcode
->avalue
;
23662 if (opcode
->tag
== OT_unconditionalF
)
23663 inst
.instruction
|= 0xFU
<< 28;
23665 inst
.instruction
|= inst
.cond
<< 28;
23666 inst
.size
= INSN_SIZE
;
23667 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/false))
23669 it_fsm_pre_encode ();
23670 opcode
->aencode ();
23671 it_fsm_post_encode ();
23673 /* Arm mode bx is marked as both v4T and v5 because it's still required
23674 on a hypothetical non-thumb v5 core. */
23676 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
23678 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
23679 *opcode
->avariant
);
23681 check_neon_suffixes
;
23685 mapping_state (MAP_ARM
);
23690 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
23698 check_pred_blocks_finished (void)
23703 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
23704 if (seg_info (sect
)->tc_segment_info_data
.current_pred
.state
23705 == MANUAL_PRED_BLOCK
)
23707 if (now_pred
.type
== SCALAR_PRED
)
23708 as_warn (_("section '%s' finished with an open IT block."),
23711 as_warn (_("section '%s' finished with an open VPT/VPST block."),
23715 if (now_pred
.state
== MANUAL_PRED_BLOCK
)
23717 if (now_pred
.type
== SCALAR_PRED
)
23718 as_warn (_("file finished with an open IT block."));
23720 as_warn (_("file finished with an open VPT/VPST block."));
23725 /* Various frobbings of labels and their addresses. */
23728 arm_start_line_hook (void)
23730 last_label_seen
= NULL
;
23734 arm_frob_label (symbolS
* sym
)
23736 last_label_seen
= sym
;
23738 ARM_SET_THUMB (sym
, thumb_mode
);
23740 #if defined OBJ_COFF || defined OBJ_ELF
23741 ARM_SET_INTERWORK (sym
, support_interwork
);
23744 force_automatic_it_block_close ();
23746 /* Note - do not allow local symbols (.Lxxx) to be labelled
23747 as Thumb functions. This is because these labels, whilst
23748 they exist inside Thumb code, are not the entry points for
23749 possible ARM->Thumb calls. Also, these labels can be used
23750 as part of a computed goto or switch statement. eg gcc
23751 can generate code that looks like this:
23753 ldr r2, [pc, .Laaa]
23763 The first instruction loads the address of the jump table.
23764 The second instruction converts a table index into a byte offset.
23765 The third instruction gets the jump address out of the table.
23766 The fourth instruction performs the jump.
23768 If the address stored at .Laaa is that of a symbol which has the
23769 Thumb_Func bit set, then the linker will arrange for this address
23770 to have the bottom bit set, which in turn would mean that the
23771 address computation performed by the third instruction would end
23772 up with the bottom bit set. Since the ARM is capable of unaligned
23773 word loads, the instruction would then load the incorrect address
23774 out of the jump table, and chaos would ensue. */
23775 if (label_is_thumb_function_name
23776 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
23777 && (bfd_section_flags (now_seg
) & SEC_CODE
) != 0)
23779 /* When the address of a Thumb function is taken the bottom
23780 bit of that address should be set. This will allow
23781 interworking between Arm and Thumb functions to work
23784 THUMB_SET_FUNC (sym
, 1);
23786 label_is_thumb_function_name
= false;
23789 dwarf2_emit_label (sym
);
23793 arm_data_in_code (void)
23795 if (thumb_mode
&& startswith (input_line_pointer
+ 1, "data:"))
23797 *input_line_pointer
= '/';
23798 input_line_pointer
+= 5;
23799 *input_line_pointer
= 0;
23807 arm_canonicalize_symbol_name (char * name
)
23811 if (thumb_mode
&& (len
= strlen (name
)) > 5
23812 && streq (name
+ len
- 5, "/data"))
23813 *(name
+ len
- 5) = 0;
23818 /* Table of all register names defined by default. The user can
23819 define additional names with .req. Note that all register names
23820 should appear in both upper and lowercase variants. Some registers
23821 also have mixed-case names. */
23823 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true, 0 }
23824 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
23825 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
23826 #define REGSET(p,t) \
23827 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
23828 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
23829 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
23830 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
23831 #define REGSETH(p,t) \
23832 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
23833 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
23834 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
23835 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
23836 #define REGSET2(p,t) \
23837 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
23838 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
23839 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
23840 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
23841 #define SPLRBANK(base,bank,t) \
23842 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
23843 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
23844 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
23845 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
23846 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
23847 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
23849 static const struct reg_entry reg_names
[] =
23851 /* ARM integer registers. */
23852 REGSET(r
, RN
), REGSET(R
, RN
),
23854 /* ATPCS synonyms. */
23855 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
23856 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
23857 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
23859 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
23860 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
23861 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
23863 /* Well-known aliases. */
23864 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
23865 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
23867 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
23868 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
23870 /* Defining the new Zero register from ARMv8.1-M. */
23874 /* Coprocessor numbers. */
23875 REGSET(p
, CP
), REGSET(P
, CP
),
23877 /* Coprocessor register numbers. The "cr" variants are for backward
23879 REGSET(c
, CN
), REGSET(C
, CN
),
23880 REGSET(cr
, CN
), REGSET(CR
, CN
),
23882 /* ARM banked registers. */
23883 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
23884 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
23885 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
23886 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
23887 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
23888 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
23889 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
23891 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
23892 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
23893 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
23894 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
23895 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
23896 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
23897 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
23898 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
23900 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
23901 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
23902 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
23903 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
23904 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
23905 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
23906 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
23907 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
23908 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
23910 /* FPA registers. */
23911 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
23912 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
23914 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
23915 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
23917 /* VFP SP registers. */
23918 REGSET(s
,VFS
), REGSET(S
,VFS
),
23919 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
23921 /* VFP DP Registers. */
23922 REGSET(d
,VFD
), REGSET(D
,VFD
),
23923 /* Extra Neon DP registers. */
23924 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
23926 /* Neon QP registers. */
23927 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
23929 /* VFP control registers. */
23930 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
23931 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
23932 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
23933 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
23934 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
23935 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
23936 REGDEF(mvfr2
,5,VFC
), REGDEF(MVFR2
,5,VFC
),
23937 REGDEF(fpscr_nzcvqc
,2,VFC
), REGDEF(FPSCR_nzcvqc
,2,VFC
),
23938 REGDEF(vpr
,12,VFC
), REGDEF(VPR
,12,VFC
),
23939 REGDEF(fpcxt_ns
,14,VFC
), REGDEF(FPCXT_NS
,14,VFC
),
23940 REGDEF(fpcxt_s
,15,VFC
), REGDEF(FPCXT_S
,15,VFC
),
23942 /* Maverick DSP coprocessor registers. */
23943 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
23944 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
23946 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
23947 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
23948 REGDEF(dspsc
,0,DSPSC
),
23950 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
23951 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
23952 REGDEF(DSPSC
,0,DSPSC
),
23954 /* iWMMXt data registers - p0, c0-15. */
23955 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
23957 /* iWMMXt control registers - p1, c0-3. */
23958 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
23959 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
23960 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
23961 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
23963 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
23964 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
23965 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
23966 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
23967 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
23969 /* XScale accumulator registers. */
23970 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
23972 /* DWARF ABI defines RA_AUTH_CODE to 143. It also reserves 134-142 for future
23973 expansion. RA_AUTH_CODE here is given the value 143 % 134 to make it easy
23974 for tc_arm_regname_to_dw2regnum to translate to DWARF reg number using
23975 134 + reg_number should the range 134 to 142 be used for more pseudo regs
23976 in the future. This also helps fit RA_AUTH_CODE into a bitmask. */
23977 REGDEF(ra_auth_code
,9,PSEUDO
),
23983 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
23984 within psr_required_here. */
23985 static const struct asm_psr psrs
[] =
23987 /* Backward compatibility notation. Note that "all" is no longer
23988 truly all possible PSR bits. */
23989 {"all", PSR_c
| PSR_f
},
23993 /* Individual flags. */
23999 /* Combinations of flags. */
24000 {"fs", PSR_f
| PSR_s
},
24001 {"fx", PSR_f
| PSR_x
},
24002 {"fc", PSR_f
| PSR_c
},
24003 {"sf", PSR_s
| PSR_f
},
24004 {"sx", PSR_s
| PSR_x
},
24005 {"sc", PSR_s
| PSR_c
},
24006 {"xf", PSR_x
| PSR_f
},
24007 {"xs", PSR_x
| PSR_s
},
24008 {"xc", PSR_x
| PSR_c
},
24009 {"cf", PSR_c
| PSR_f
},
24010 {"cs", PSR_c
| PSR_s
},
24011 {"cx", PSR_c
| PSR_x
},
24012 {"fsx", PSR_f
| PSR_s
| PSR_x
},
24013 {"fsc", PSR_f
| PSR_s
| PSR_c
},
24014 {"fxs", PSR_f
| PSR_x
| PSR_s
},
24015 {"fxc", PSR_f
| PSR_x
| PSR_c
},
24016 {"fcs", PSR_f
| PSR_c
| PSR_s
},
24017 {"fcx", PSR_f
| PSR_c
| PSR_x
},
24018 {"sfx", PSR_s
| PSR_f
| PSR_x
},
24019 {"sfc", PSR_s
| PSR_f
| PSR_c
},
24020 {"sxf", PSR_s
| PSR_x
| PSR_f
},
24021 {"sxc", PSR_s
| PSR_x
| PSR_c
},
24022 {"scf", PSR_s
| PSR_c
| PSR_f
},
24023 {"scx", PSR_s
| PSR_c
| PSR_x
},
24024 {"xfs", PSR_x
| PSR_f
| PSR_s
},
24025 {"xfc", PSR_x
| PSR_f
| PSR_c
},
24026 {"xsf", PSR_x
| PSR_s
| PSR_f
},
24027 {"xsc", PSR_x
| PSR_s
| PSR_c
},
24028 {"xcf", PSR_x
| PSR_c
| PSR_f
},
24029 {"xcs", PSR_x
| PSR_c
| PSR_s
},
24030 {"cfs", PSR_c
| PSR_f
| PSR_s
},
24031 {"cfx", PSR_c
| PSR_f
| PSR_x
},
24032 {"csf", PSR_c
| PSR_s
| PSR_f
},
24033 {"csx", PSR_c
| PSR_s
| PSR_x
},
24034 {"cxf", PSR_c
| PSR_x
| PSR_f
},
24035 {"cxs", PSR_c
| PSR_x
| PSR_s
},
24036 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
24037 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
24038 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
24039 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
24040 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
24041 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
24042 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
24043 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
24044 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
24045 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
24046 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
24047 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
24048 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
24049 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
24050 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
24051 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
24052 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
24053 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
24054 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
24055 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
24056 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
24057 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
24058 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
24059 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
24062 /* Table of V7M psr names. */
24063 static const struct asm_psr v7m_psrs
[] =
24065 {"apsr", 0x0 }, {"APSR", 0x0 },
24066 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
24067 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
24068 {"psr", 0x3 }, {"PSR", 0x3 },
24069 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
24070 {"ipsr", 0x5 }, {"IPSR", 0x5 },
24071 {"epsr", 0x6 }, {"EPSR", 0x6 },
24072 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
24073 {"msp", 0x8 }, {"MSP", 0x8 },
24074 {"psp", 0x9 }, {"PSP", 0x9 },
24075 {"msplim", 0xa }, {"MSPLIM", 0xa },
24076 {"psplim", 0xb }, {"PSPLIM", 0xb },
24077 {"primask", 0x10}, {"PRIMASK", 0x10},
24078 {"basepri", 0x11}, {"BASEPRI", 0x11},
24079 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
24080 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
24081 {"control", 0x14}, {"CONTROL", 0x14},
24082 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
24083 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
24084 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
24085 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
24086 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
24087 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
24088 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
24089 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
24090 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
24093 /* Table of all shift-in-operand names. */
24094 static const struct asm_shift_name shift_names
[] =
24096 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
24097 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
24098 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
24099 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
24100 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
24101 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
},
24102 { "uxtw", SHIFT_UXTW
}, { "UXTW", SHIFT_UXTW
}
24105 /* Table of all explicit relocation names. */
24107 static struct reloc_entry reloc_names
[] =
24109 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
24110 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
24111 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
24112 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
24113 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
24114 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
24115 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
24116 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
24117 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
24118 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
24119 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
24120 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
24121 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
24122 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
24123 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
24124 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
24125 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
24126 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
},
24127 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC
},
24128 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC
},
24129 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
24130 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
24131 { "funcdesc", BFD_RELOC_ARM_FUNCDESC
},
24132 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC
},
24133 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC
}, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC
},
24134 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC
}, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC
},
24135 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC
}, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC
},
24139 /* Table of all conditional affixes. */
24140 static const struct asm_cond conds
[] =
24144 {"cs", 0x2}, {"hs", 0x2},
24145 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
24158 static const struct asm_cond vconds
[] =
24164 #define UL_BARRIER(L,U,CODE,FEAT) \
24165 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
24166 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
24168 static struct asm_barrier_opt barrier_opt_names
[] =
24170 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
24171 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
24172 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
24173 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
24174 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
24175 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
24176 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
24177 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
24178 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
24179 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
24180 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
24181 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
24182 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
24183 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
24184 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
24185 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
24190 /* Table of ARM-format instructions. */
24192 /* Macros for gluing together operand strings. N.B. In all cases
24193 other than OPS0, the trailing OP_stop comes from default
24194 zero-initialization of the unspecified elements of the array. */
24195 #define OPS0() { OP_stop, }
24196 #define OPS1(a) { OP_##a, }
24197 #define OPS2(a,b) { OP_##a,OP_##b, }
24198 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
24199 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
24200 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
24201 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
24203 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
24204 This is useful when mixing operands for ARM and THUMB, i.e. using the
24205 MIX_ARM_THUMB_OPERANDS macro.
24206 In order to use these macros, prefix the number of operands with _
24208 #define OPS_1(a) { a, }
24209 #define OPS_2(a,b) { a,b, }
24210 #define OPS_3(a,b,c) { a,b,c, }
24211 #define OPS_4(a,b,c,d) { a,b,c,d, }
24212 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
24213 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
24215 /* These macros abstract out the exact format of the mnemonic table and
24216 save some repeated characters. */
24218 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
24219 #define TxCE(mnem, op, top, nops, ops, ae, te) \
24220 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
24221 THUMB_VARIANT, do_##ae, do_##te, 0 }
24223 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
24224 a T_MNEM_xyz enumerator. */
24225 #define TCE(mnem, aop, top, nops, ops, ae, te) \
24226 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
24227 #define tCE(mnem, aop, top, nops, ops, ae, te) \
24228 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
24230 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
24231 infix after the third character. */
24232 #define TxC3(mnem, op, top, nops, ops, ae, te) \
24233 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
24234 THUMB_VARIANT, do_##ae, do_##te, 0 }
24235 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
24236 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
24237 THUMB_VARIANT, do_##ae, do_##te, 0 }
24238 #define TC3(mnem, aop, top, nops, ops, ae, te) \
24239 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
24240 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
24241 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
24242 #define tC3(mnem, aop, top, nops, ops, ae, te) \
24243 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
24244 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
24245 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
24247 /* Mnemonic that cannot be conditionalized. The ARM condition-code
24248 field is still 0xE. Many of the Thumb variants can be executed
24249 conditionally, so this is checked separately. */
24250 #define TUE(mnem, op, top, nops, ops, ae, te) \
24251 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
24252 THUMB_VARIANT, do_##ae, do_##te, 0 }
24254 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
24255 Used by mnemonics that have very minimal differences in the encoding for
24256 ARM and Thumb variants and can be handled in a common function. */
24257 #define TUEc(mnem, op, top, nops, ops, en) \
24258 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
24259 THUMB_VARIANT, do_##en, do_##en, 0 }
24261 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
24262 condition code field. */
24263 #define TUF(mnem, op, top, nops, ops, ae, te) \
24264 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
24265 THUMB_VARIANT, do_##ae, do_##te, 0 }
24267 /* ARM-only variants of all the above. */
24268 #define CE(mnem, op, nops, ops, ae) \
24269 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24271 #define C3(mnem, op, nops, ops, ae) \
24272 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24274 /* Thumb-only variants of TCE and TUE. */
24275 #define ToC(mnem, top, nops, ops, te) \
24276 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
24279 #define ToU(mnem, top, nops, ops, te) \
24280 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
24283 /* T_MNEM_xyz enumerator variants of ToC. */
24284 #define toC(mnem, top, nops, ops, te) \
24285 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
24288 /* T_MNEM_xyz enumerator variants of ToU. */
24289 #define toU(mnem, top, nops, ops, te) \
24290 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
24293 /* Legacy mnemonics that always have conditional infix after the third
24295 #define CL(mnem, op, nops, ops, ae) \
24296 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
24297 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24299 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
24300 #define cCE(mnem, op, nops, ops, ae) \
24301 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
24303 /* mov instructions that are shared between coprocessor and MVE. */
24304 #define mcCE(mnem, op, nops, ops, ae) \
24305 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##ae, 0 }
24307 /* Legacy coprocessor instructions where conditional infix and conditional
24308 suffix are ambiguous. For consistency this includes all FPA instructions,
24309 not just the potentially ambiguous ones. */
24310 #define cCL(mnem, op, nops, ops, ae) \
24311 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
24312 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
24314 /* Coprocessor, takes either a suffix or a position-3 infix
24315 (for an FPA corner case). */
24316 #define C3E(mnem, op, nops, ops, ae) \
24317 { mnem, OPS##nops ops, OT_csuf_or_in3, \
24318 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
24320 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
24321 { m1 #m2 m3, OPS##nops ops, \
24322 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
24323 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24325 #define CM(m1, m2, op, nops, ops, ae) \
24326 xCM_ (m1, , m2, op, nops, ops, ae), \
24327 xCM_ (m1, eq, m2, op, nops, ops, ae), \
24328 xCM_ (m1, ne, m2, op, nops, ops, ae), \
24329 xCM_ (m1, cs, m2, op, nops, ops, ae), \
24330 xCM_ (m1, hs, m2, op, nops, ops, ae), \
24331 xCM_ (m1, cc, m2, op, nops, ops, ae), \
24332 xCM_ (m1, ul, m2, op, nops, ops, ae), \
24333 xCM_ (m1, lo, m2, op, nops, ops, ae), \
24334 xCM_ (m1, mi, m2, op, nops, ops, ae), \
24335 xCM_ (m1, pl, m2, op, nops, ops, ae), \
24336 xCM_ (m1, vs, m2, op, nops, ops, ae), \
24337 xCM_ (m1, vc, m2, op, nops, ops, ae), \
24338 xCM_ (m1, hi, m2, op, nops, ops, ae), \
24339 xCM_ (m1, ls, m2, op, nops, ops, ae), \
24340 xCM_ (m1, ge, m2, op, nops, ops, ae), \
24341 xCM_ (m1, lt, m2, op, nops, ops, ae), \
24342 xCM_ (m1, gt, m2, op, nops, ops, ae), \
24343 xCM_ (m1, le, m2, op, nops, ops, ae), \
24344 xCM_ (m1, al, m2, op, nops, ops, ae)
24346 #define UE(mnem, op, nops, ops, ae) \
24347 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24349 #define UF(mnem, op, nops, ops, ae) \
24350 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24352 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
24353 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
24354 use the same encoding function for each. */
24355 #define NUF(mnem, op, nops, ops, enc) \
24356 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
24357 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
24359 /* Neon data processing, version which indirects through neon_enc_tab for
24360 the various overloaded versions of opcodes. */
24361 #define nUF(mnem, op, nops, ops, enc) \
24362 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
24363 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
24365 /* Neon insn with conditional suffix for the ARM version, non-overloaded
24367 #define NCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
24368 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
24369 THUMB_VARIANT, do_##enc, do_##enc, mve_p }
24371 #define NCE(mnem, op, nops, ops, enc) \
24372 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
24374 #define NCEF(mnem, op, nops, ops, enc) \
24375 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
24377 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
24378 #define nCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
24379 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
24380 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, mve_p }
24382 #define nCE(mnem, op, nops, ops, enc) \
24383 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
24385 #define nCEF(mnem, op, nops, ops, enc) \
24386 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
24389 #define mCEF(mnem, op, nops, ops, enc) \
24390 { #mnem, OPS##nops ops, OT_csuffixF, M_MNEM##op, M_MNEM##op, \
24391 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
24394 /* nCEF but for MVE predicated instructions. */
24395 #define mnCEF(mnem, op, nops, ops, enc) \
24396 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
24398 /* nCE but for MVE predicated instructions. */
24399 #define mnCE(mnem, op, nops, ops, enc) \
24400 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
24402 /* NUF but for potentially MVE predicated instructions. */
24403 #define MNUF(mnem, op, nops, ops, enc) \
24404 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
24405 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
24407 /* nUF but for potentially MVE predicated instructions. */
24408 #define mnUF(mnem, op, nops, ops, enc) \
24409 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
24410 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
24412 /* ToC but for potentially MVE predicated instructions. */
24413 #define mToC(mnem, top, nops, ops, te) \
24414 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
24417 /* NCE but for MVE predicated instructions. */
24418 #define MNCE(mnem, op, nops, ops, enc) \
24419 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
24421 /* NCEF but for MVE predicated instructions. */
24422 #define MNCEF(mnem, op, nops, ops, enc) \
24423 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
24426 static const struct asm_opcode insns
[] =
24428 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
24429 #define THUMB_VARIANT & arm_ext_v4t
24430 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
24431 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
24432 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
24433 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
24434 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
24435 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
24436 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
24437 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
24438 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
24439 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
24440 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
24441 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
24442 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
24443 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
24444 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
24445 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
24447 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
24448 for setting PSR flag bits. They are obsolete in V6 and do not
24449 have Thumb equivalents. */
24450 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
24451 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
24452 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
24453 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
24454 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
24455 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
24456 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
24457 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
24458 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
24460 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
24461 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
24462 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
24463 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
24465 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
24466 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
24467 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
24469 OP_ADDRGLDR
),ldst
, t_ldst
),
24470 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
24472 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
24473 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
24474 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
24475 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
24476 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
24477 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
24479 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
24480 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
24483 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
24484 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
24485 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
24486 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
24488 /* Thumb-compatibility pseudo ops. */
24489 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
24490 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
24491 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
24492 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
24493 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
24494 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
24495 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
24496 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
24497 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
24498 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
24499 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
24500 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
24502 /* These may simplify to neg. */
24503 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
24504 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
24506 #undef THUMB_VARIANT
24507 #define THUMB_VARIANT & arm_ext_os
24509 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
24510 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
24512 #undef THUMB_VARIANT
24513 #define THUMB_VARIANT & arm_ext_v6
24515 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
24517 /* V1 instructions with no Thumb analogue prior to V6T2. */
24518 #undef THUMB_VARIANT
24519 #define THUMB_VARIANT & arm_ext_v6t2
24521 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
24522 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
24523 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
24525 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
24526 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
24527 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
24528 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
24530 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
24531 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
24533 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
24534 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
24536 /* V1 instructions with no Thumb analogue at all. */
24537 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
24538 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
24540 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
24541 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
24542 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
24543 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
24544 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
24545 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
24546 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
24547 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
24550 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
24551 #undef THUMB_VARIANT
24552 #define THUMB_VARIANT & arm_ext_v4t
24554 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
24555 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
24557 #undef THUMB_VARIANT
24558 #define THUMB_VARIANT & arm_ext_v6t2
24560 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
24561 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
24563 /* Generic coprocessor instructions. */
24564 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
24565 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
24566 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
24567 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
24568 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
24569 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
24570 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
24573 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
24575 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
24576 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
24579 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
24580 #undef THUMB_VARIANT
24581 #define THUMB_VARIANT & arm_ext_msr
24583 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
24584 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
24587 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
24588 #undef THUMB_VARIANT
24589 #define THUMB_VARIANT & arm_ext_v6t2
24591 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
24592 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
24593 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
24594 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
24595 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
24596 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
24597 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
24598 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
24601 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
24602 #undef THUMB_VARIANT
24603 #define THUMB_VARIANT & arm_ext_v4t
24605 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
24606 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
24607 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
24608 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
24609 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
24610 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
24613 #define ARM_VARIANT & arm_ext_v4t_5
24615 /* ARM Architecture 4T. */
24616 /* Note: bx (and blx) are required on V5, even if the processor does
24617 not support Thumb. */
24618 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
24621 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
24622 #undef THUMB_VARIANT
24623 #define THUMB_VARIANT & arm_ext_v5t
24625 /* Note: blx has 2 variants; the .value coded here is for
24626 BLX(2). Only this variant has conditional execution. */
24627 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
24628 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
24630 #undef THUMB_VARIANT
24631 #define THUMB_VARIANT & arm_ext_v6t2
24633 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
24634 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
24635 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
24636 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
24637 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
24638 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
24639 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
24640 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
24643 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
24644 #undef THUMB_VARIANT
24645 #define THUMB_VARIANT & arm_ext_v5exp
24647 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
24648 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
24649 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
24650 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
24652 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
24653 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
24655 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
24656 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
24657 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
24658 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
24660 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
24661 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
24662 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
24663 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
24665 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
24666 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
24668 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
24669 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
24670 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
24671 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
24674 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
24675 #undef THUMB_VARIANT
24676 #define THUMB_VARIANT & arm_ext_v6t2
24678 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
24679 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
24681 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
24682 ADDRGLDRS
), ldrd
, t_ldstd
),
24684 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
24685 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
24688 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
24690 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
24693 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
24694 #undef THUMB_VARIANT
24695 #define THUMB_VARIANT & arm_ext_v6
24697 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
24698 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
24699 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
24700 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
24701 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
24702 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
24703 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
24704 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
24705 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
24706 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
24708 #undef THUMB_VARIANT
24709 #define THUMB_VARIANT & arm_ext_v6t2_v8m
24711 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
24712 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
24714 #undef THUMB_VARIANT
24715 #define THUMB_VARIANT & arm_ext_v6t2
24717 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
24718 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
24720 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
24721 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
24723 /* ARM V6 not included in V7M. */
24724 #undef THUMB_VARIANT
24725 #define THUMB_VARIANT & arm_ext_v6_notm
24726 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
24727 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
24728 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
24729 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
24730 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
24731 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
24732 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
24733 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
24734 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
24735 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
24736 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
24737 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
24738 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
24739 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
24740 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
24741 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
24742 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
24743 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
24744 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
24746 /* ARM V6 not included in V7M (eg. integer SIMD). */
24747 #undef THUMB_VARIANT
24748 #define THUMB_VARIANT & arm_ext_v6_dsp
24749 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
24750 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
24751 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24752 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24753 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24754 /* Old name for QASX. */
24755 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24756 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24757 /* Old name for QSAX. */
24758 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24759 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24760 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24761 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24762 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24763 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24764 /* Old name for SASX. */
24765 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24766 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24767 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24768 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24769 /* Old name for SHASX. */
24770 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24771 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24772 /* Old name for SHSAX. */
24773 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24774 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24775 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24776 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24777 /* Old name for SSAX. */
24778 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24779 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24780 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24781 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24782 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24783 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24784 /* Old name for UASX. */
24785 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24786 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24787 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24788 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24789 /* Old name for UHASX. */
24790 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24791 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24792 /* Old name for UHSAX. */
24793 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24794 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24795 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24796 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24797 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24798 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24799 /* Old name for UQASX. */
24800 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24801 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24802 /* Old name for UQSAX. */
24803 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24804 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24805 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24806 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24807 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24808 /* Old name for USAX. */
24809 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24810 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24811 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
24812 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
24813 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
24814 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
24815 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
24816 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
24817 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
24818 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
24819 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
24820 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
24821 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
24822 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
24823 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
24824 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
24825 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
24826 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
24827 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
24828 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
24829 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
24830 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
24831 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
24832 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
24833 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
24834 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
24835 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
24836 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
24837 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
24838 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
24839 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
24840 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
24841 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
24842 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
24845 #define ARM_VARIANT & arm_ext_v6k_v6t2
24846 #undef THUMB_VARIANT
24847 #define THUMB_VARIANT & arm_ext_v6k_v6t2
24849 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
24850 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
24851 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
24852 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
24854 #undef THUMB_VARIANT
24855 #define THUMB_VARIANT & arm_ext_v6_notm
24856 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
24858 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
24859 RRnpcb
), strexd
, t_strexd
),
24861 #undef THUMB_VARIANT
24862 #define THUMB_VARIANT & arm_ext_v6t2_v8m
24863 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
24865 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
24867 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
24869 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
24871 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
24874 #define ARM_VARIANT & arm_ext_sec
24875 #undef THUMB_VARIANT
24876 #define THUMB_VARIANT & arm_ext_sec
24878 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
24881 #define ARM_VARIANT & arm_ext_virt
24882 #undef THUMB_VARIANT
24883 #define THUMB_VARIANT & arm_ext_virt
24885 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
24886 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
24889 #define ARM_VARIANT & arm_ext_pan
24890 #undef THUMB_VARIANT
24891 #define THUMB_VARIANT & arm_ext_pan
24893 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
24896 #define ARM_VARIANT & arm_ext_v6t2
24897 #undef THUMB_VARIANT
24898 #define THUMB_VARIANT & arm_ext_v6t2
24900 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
24901 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
24902 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
24903 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
24905 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
24906 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
24908 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
24909 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
24910 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
24911 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
24914 #define ARM_VARIANT & arm_ext_v3
24915 #undef THUMB_VARIANT
24916 #define THUMB_VARIANT & arm_ext_v6t2
24918 TUE("csdb", 320f014
, f3af8014
, 0, (), noargs
, t_csdb
),
24919 TUF("ssbb", 57ff040
, f3bf8f40
, 0, (), noargs
, t_csdb
),
24920 TUF("pssbb", 57ff044
, f3bf8f44
, 0, (), noargs
, t_csdb
),
24923 #define ARM_VARIANT & arm_ext_v6t2
24924 #undef THUMB_VARIANT
24925 #define THUMB_VARIANT & arm_ext_v6t2_v8m
24926 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
24927 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
24929 /* Thumb-only instructions. */
24931 #define ARM_VARIANT NULL
24932 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
24933 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
24935 /* ARM does not really have an IT instruction, so always allow it.
24936 The opcode is copied from Thumb in order to allow warnings in
24937 -mimplicit-it=[never | arm] modes. */
24939 #define ARM_VARIANT & arm_ext_v1
24940 #undef THUMB_VARIANT
24941 #define THUMB_VARIANT & arm_ext_v6t2
24943 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
24944 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
24945 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
24946 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
24947 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
24948 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
24949 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
24950 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
24951 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
24952 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
24953 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
24954 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
24955 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
24956 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
24957 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
24958 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
24959 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
24960 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
24962 /* Thumb2 only instructions. */
24964 #define ARM_VARIANT NULL
24966 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
24967 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
24968 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
24969 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
24970 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
24971 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
24973 /* Hardware division instructions. */
24975 #define ARM_VARIANT & arm_ext_adiv
24976 #undef THUMB_VARIANT
24977 #define THUMB_VARIANT & arm_ext_div
24979 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
24980 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
24982 /* ARM V6M/V7 instructions. */
24984 #define ARM_VARIANT & arm_ext_barrier
24985 #undef THUMB_VARIANT
24986 #define THUMB_VARIANT & arm_ext_barrier
24988 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
24989 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
24990 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
24992 /* ARM V7 instructions. */
24994 #define ARM_VARIANT & arm_ext_v7
24995 #undef THUMB_VARIANT
24996 #define THUMB_VARIANT & arm_ext_v7
24998 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
24999 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
25002 #define ARM_VARIANT & arm_ext_mp
25003 #undef THUMB_VARIANT
25004 #define THUMB_VARIANT & arm_ext_mp
25006 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
25008 /* AArchv8 instructions. */
25010 #define ARM_VARIANT & arm_ext_v8
25012 /* Instructions shared between armv8-a and armv8-m. */
25013 #undef THUMB_VARIANT
25014 #define THUMB_VARIANT & arm_ext_atomics
25016 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
25017 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
25018 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
25019 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
25020 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
25021 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
25022 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
25023 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
25024 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
25025 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
25027 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
25029 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
25031 #undef THUMB_VARIANT
25032 #define THUMB_VARIANT & arm_ext_v8
25034 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
25035 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
25037 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
25039 #undef THUMB_VARIANT
25040 #define THUMB_VARIANT & arm_ext_v8r
25042 #define ARM_VARIANT & arm_ext_v8r
25044 /* ARMv8-R instructions. */
25045 TUF("dfb", 57ff04c
, f3bf8f4c
, 0, (), noargs
, noargs
),
25047 /* Defined in V8 but is in undefined encoding space for earlier
25048 architectures. However earlier architectures are required to treat
25049 this instuction as a semihosting trap as well. Hence while not explicitly
25050 defined as such, it is in fact correct to define the instruction for all
25052 #undef THUMB_VARIANT
25053 #define THUMB_VARIANT & arm_ext_v1
25055 #define ARM_VARIANT & arm_ext_v1
25056 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
25058 /* ARMv8 T32 only. */
25060 #define ARM_VARIANT NULL
25061 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
25062 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
25063 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
25065 /* FP for ARMv8. */
25067 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
25068 #undef THUMB_VARIANT
25069 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
25071 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
25072 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
25073 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
25074 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
25075 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
25076 mnCE(vrintz
, _vrintr
, 2, (RNSDQMQ
, oRNSDQMQ
), vrintz
),
25077 mnCE(vrintx
, _vrintr
, 2, (RNSDQMQ
, oRNSDQMQ
), vrintx
),
25078 mnUF(vrinta
, _vrinta
, 2, (RNSDQMQ
, oRNSDQMQ
), vrinta
),
25079 mnUF(vrintn
, _vrinta
, 2, (RNSDQMQ
, oRNSDQMQ
), vrintn
),
25080 mnUF(vrintp
, _vrinta
, 2, (RNSDQMQ
, oRNSDQMQ
), vrintp
),
25081 mnUF(vrintm
, _vrinta
, 2, (RNSDQMQ
, oRNSDQMQ
), vrintm
),
25083 /* Crypto v1 extensions. */
25085 #define ARM_VARIANT & fpu_crypto_ext_armv8
25086 #undef THUMB_VARIANT
25087 #define THUMB_VARIANT & fpu_crypto_ext_armv8
25089 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
25090 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
25091 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
25092 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
25093 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
25094 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
25095 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
25096 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
25097 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
25098 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
25099 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
25100 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
25101 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
25102 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
25105 #define ARM_VARIANT & arm_ext_crc
25106 #undef THUMB_VARIANT
25107 #define THUMB_VARIANT & arm_ext_crc
25108 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
25109 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
25110 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
25111 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
25112 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
25113 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
25115 /* ARMv8.2 RAS extension. */
25117 #define ARM_VARIANT & arm_ext_ras
25118 #undef THUMB_VARIANT
25119 #define THUMB_VARIANT & arm_ext_ras
25120 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
25123 #define ARM_VARIANT & arm_ext_v8_3
25124 #undef THUMB_VARIANT
25125 #define THUMB_VARIANT & arm_ext_v8_3
25126 NCE (vjcvt
, eb90bc0
, 2, (RVS
, RVD
), vjcvt
),
25129 #define ARM_VARIANT & fpu_neon_ext_dotprod
25130 #undef THUMB_VARIANT
25131 #define THUMB_VARIANT & fpu_neon_ext_dotprod
25132 NUF (vsdot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_s
),
25133 NUF (vudot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_u
),
25136 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
25137 #undef THUMB_VARIANT
25138 #define THUMB_VARIANT NULL
25140 cCE("wfs", e200110
, 1, (RR
), rd
),
25141 cCE("rfs", e300110
, 1, (RR
), rd
),
25142 cCE("wfc", e400110
, 1, (RR
), rd
),
25143 cCE("rfc", e500110
, 1, (RR
), rd
),
25145 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
25146 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
25147 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
25148 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
25150 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
25151 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
25152 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
25153 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
25155 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
25156 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
25157 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
25158 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
25159 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
25160 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
25161 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
25162 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
25163 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
25164 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
25165 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
25166 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
25168 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
25169 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
25170 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
25171 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
25172 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
25173 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
25174 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
25175 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
25176 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
25177 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
25178 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
25179 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
25181 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
25182 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
25183 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
25184 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
25185 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
25186 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
25187 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
25188 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
25189 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
25190 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
25191 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
25192 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
25194 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
25195 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
25196 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
25197 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
25198 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
25199 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
25200 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
25201 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
25202 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
25203 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
25204 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
25205 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
25207 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
25208 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
25209 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
25210 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
25211 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
25212 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
25213 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
25214 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
25215 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
25216 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
25217 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
25218 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
25220 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
25221 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
25222 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
25223 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
25224 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
25225 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
25226 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
25227 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
25228 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
25229 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
25230 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
25231 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
25233 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
25234 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
25235 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
25236 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
25237 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
25238 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
25239 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
25240 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
25241 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
25242 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
25243 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
25244 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
25246 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
25247 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
25248 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
25249 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
25250 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
25251 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
25252 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
25253 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
25254 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
25255 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
25256 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
25257 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
25259 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
25260 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
25261 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
25262 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
25263 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
25264 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
25265 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
25266 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
25267 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
25268 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
25269 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
25270 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
25272 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
25273 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
25274 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
25275 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
25276 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
25277 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
25278 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
25279 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
25280 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
25281 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
25282 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
25283 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
25285 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
25286 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
25287 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
25288 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
25289 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
25290 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
25291 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
25292 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
25293 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
25294 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
25295 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
25296 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
25298 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
25299 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
25300 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
25301 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
25302 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
25303 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
25304 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
25305 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
25306 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
25307 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
25308 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
25309 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
25311 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
25312 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
25313 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
25314 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
25315 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
25316 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
25317 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
25318 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
25319 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
25320 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
25321 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
25322 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
25324 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
25325 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
25326 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
25327 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
25328 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
25329 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
25330 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
25331 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
25332 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
25333 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
25334 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
25335 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
25337 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
25338 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
25339 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
25340 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
25341 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
25342 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
25343 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
25344 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
25345 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
25346 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
25347 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
25348 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
25350 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
25351 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
25352 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
25353 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
25354 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
25355 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
25356 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
25357 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
25358 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
25359 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
25360 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
25361 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
25363 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25364 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25365 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25366 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25367 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25368 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25369 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25370 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25371 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25372 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25373 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25374 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25376 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25377 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25378 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25379 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25380 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25381 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25382 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25383 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25384 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25385 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25386 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25387 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25389 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25390 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25391 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25392 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25393 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25394 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25395 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25396 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25397 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25398 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25399 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25400 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25402 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25403 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25404 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25405 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25406 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25407 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25408 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25409 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25410 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25411 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25412 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25413 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25415 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25416 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25417 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25418 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25419 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25420 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25421 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25422 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25423 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25424 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25425 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25426 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25428 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25429 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25430 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25431 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25432 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25433 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25434 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25435 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25436 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25437 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25438 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25439 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25441 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25442 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25443 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25444 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25445 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25446 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25447 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25448 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25449 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25450 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25451 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25452 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25454 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25455 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25456 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25457 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25458 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25459 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25460 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25461 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25462 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25463 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25464 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25465 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25467 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25468 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25469 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25470 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25471 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25472 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25473 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25474 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25475 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25476 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25477 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25478 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25480 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25481 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25482 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25483 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25484 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25485 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25486 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25487 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25488 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25489 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25490 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25491 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25493 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25494 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25495 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25496 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25497 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25498 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25499 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25500 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25501 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25502 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25503 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25504 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25506 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25507 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25508 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25509 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25510 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25511 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25512 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25513 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25514 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25515 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25516 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25517 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25519 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25520 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25521 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25522 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25523 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25524 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25525 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25526 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25527 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25528 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25529 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25530 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
25532 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
25533 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
25534 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
25535 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
25537 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
25538 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
25539 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
25540 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
25541 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
25542 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
25543 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
25544 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
25545 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
25546 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
25547 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
25548 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
25550 /* The implementation of the FIX instruction is broken on some
25551 assemblers, in that it accepts a precision specifier as well as a
25552 rounding specifier, despite the fact that this is meaningless.
25553 To be more compatible, we accept it as well, though of course it
25554 does not set any bits. */
25555 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
25556 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
25557 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
25558 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
25559 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
25560 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
25561 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
25562 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
25563 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
25564 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
25565 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
25566 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
25567 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
25569 /* Instructions that were new with the real FPA, call them V2. */
25571 #define ARM_VARIANT & fpu_fpa_ext_v2
25573 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
25574 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
25575 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
25576 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
25577 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
25578 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
25581 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
25582 #undef THUMB_VARIANT
25583 #define THUMB_VARIANT & arm_ext_v6t2
25584 mcCE(vmrs
, ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
25585 mcCE(vmsr
, ee00a10
, 2, (RVC
, RR
), vmsr
),
25586 mcCE(fldd
, d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
25587 mcCE(fstd
, d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
25588 mcCE(flds
, d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
25589 mcCE(fsts
, d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
25591 /* Memory operations. */
25592 mcCE(fldmias
, c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
25593 mcCE(fldmdbs
, d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
25594 mcCE(fstmias
, c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
25595 mcCE(fstmdbs
, d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
25596 #undef THUMB_VARIANT
25598 /* Moves and type conversions. */
25599 cCE("fmstat", ef1fa10
, 0, (), noargs
),
25600 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
25601 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
25602 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
25603 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
25604 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
25605 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
25606 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
25607 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
25609 /* Memory operations. */
25610 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
25611 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
25612 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
25613 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
25614 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
25615 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
25616 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
25617 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
25618 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
25619 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
25620 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
25621 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
25623 /* Monadic operations. */
25624 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
25625 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
25626 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
25628 /* Dyadic operations. */
25629 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
25630 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
25631 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
25632 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
25633 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
25634 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
25635 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
25636 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
25637 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
25640 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
25641 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
25642 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
25643 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
25645 /* Double precision load/store are still present on single precision
25646 implementations. */
25647 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
25648 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
25649 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
25650 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
25651 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
25652 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
25653 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
25654 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
25657 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
25659 /* Moves and type conversions. */
25660 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
25661 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
25662 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
25663 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
25664 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
25665 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
25666 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
25667 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
25668 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
25669 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
25670 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
25671 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
25673 /* Monadic operations. */
25674 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
25675 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
25676 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
25678 /* Dyadic operations. */
25679 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
25680 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
25681 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
25682 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
25683 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
25684 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
25685 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
25686 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
25687 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
25690 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
25691 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
25692 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
25693 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
25695 /* Instructions which may belong to either the Neon or VFP instruction sets.
25696 Individual encoder functions perform additional architecture checks. */
25698 #define ARM_VARIANT & fpu_vfp_ext_v1xd
25699 #undef THUMB_VARIANT
25700 #define THUMB_VARIANT & arm_ext_v6t2
25702 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
25703 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
25704 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
25705 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
25706 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
25707 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
25709 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
25710 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
25712 #undef THUMB_VARIANT
25713 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
25715 /* These mnemonics are unique to VFP. */
25716 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
25717 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
25718 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
25719 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
25720 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
25721 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
25723 /* Mnemonics shared by Neon and VFP. */
25724 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
25726 mnCEF(vcvt
, _vcvt
, 3, (RNSDQMQ
, RNSDQMQ
, oI32z
), neon_cvt
),
25727 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
25728 MNCEF(vcvtb
, eb20a40
, 3, (RVSDMQ
, RVSDMQ
, oI32b
), neon_cvtb
),
25729 MNCEF(vcvtt
, eb20a40
, 3, (RVSDMQ
, RVSDMQ
, oI32b
), neon_cvtt
),
25732 /* NOTE: All VMOV encoding is special-cased! */
25733 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
25735 #undef THUMB_VARIANT
25736 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
25737 by different feature bits. Since we are setting the Thumb guard, we can
25738 require Thumb-1 which makes it a nop guard and set the right feature bit in
25739 do_vldr_vstr (). */
25740 #define THUMB_VARIANT & arm_ext_v4t
25741 NCE(vldr
, d100b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
25742 NCE(vstr
, d000b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
25745 #define ARM_VARIANT & arm_ext_fp16
25746 #undef THUMB_VARIANT
25747 #define THUMB_VARIANT & arm_ext_fp16
25748 /* New instructions added from v8.2, allowing the extraction and insertion of
25749 the upper 16 bits of a 32-bit vector register. */
25750 NCE (vmovx
, eb00a40
, 2, (RVS
, RVS
), neon_movhf
),
25751 NCE (vins
, eb00ac0
, 2, (RVS
, RVS
), neon_movhf
),
25753 /* New backported fma/fms instructions optional in v8.2. */
25754 NUF (vfmsl
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmsl
),
25755 NUF (vfmal
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmal
),
25757 #undef THUMB_VARIANT
25758 #define THUMB_VARIANT & fpu_neon_ext_v1
25760 #define ARM_VARIANT & fpu_neon_ext_v1
25762 /* Data processing with three registers of the same length. */
25763 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
25764 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
25765 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
25766 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
25767 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
25768 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
25769 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
25770 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
25771 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
25772 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
25773 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
25774 /* If not immediate, fall back to neon_dyadic_i64_su.
25775 shl should accept I8 I16 I32 I64,
25776 qshl should accept S8 S16 S32 S64 U8 U16 U32 U64. */
25777 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl
),
25778 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl
),
25779 /* Logic ops, types optional & ignored. */
25780 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
25781 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
25782 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
25783 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
25784 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
25785 /* Bitfield ops, untyped. */
25786 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
25787 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
25788 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
25789 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
25790 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
25791 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
25792 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
25793 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
25794 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
25795 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
25796 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
25797 back to neon_dyadic_if_su. */
25798 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
25799 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
25800 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
25801 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
25802 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
25803 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
25804 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
25805 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
25806 /* Comparison. Type I8 I16 I32 F32. */
25807 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
25808 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
25809 /* As above, D registers only. */
25810 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
25811 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
25812 /* Int and float variants, signedness unimportant. */
25813 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
25814 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
25815 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
25816 /* Add/sub take types I8 I16 I32 I64 F32. */
25817 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
25818 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
25819 /* vtst takes sizes 8, 16, 32. */
25820 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
25821 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
25822 /* VMUL takes I8 I16 I32 F32 P8. */
25823 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
25824 /* VQD{R}MULH takes S16 S32. */
25825 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
25826 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
25827 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
25828 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
25829 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
25830 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
25831 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
25832 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
25833 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
25834 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
25835 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
25836 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
25837 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
25838 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
25839 /* ARM v8.1 extension. */
25840 nUF (vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
25841 nUF (vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
25842 nUF (vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
25844 /* Two address, int/float. Types S8 S16 S32 F32. */
25845 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
25846 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
25848 /* Data processing with two registers and a shift amount. */
25849 /* Right shifts, and variants with rounding.
25850 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
25851 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
25852 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
25853 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
25854 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
25855 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
25856 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
25857 /* Shift and insert. Sizes accepted 8 16 32 64. */
25858 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
25859 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
25860 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
25861 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
25862 /* Right shift immediate, saturating & narrowing, with rounding variants.
25863 Types accepted S16 S32 S64 U16 U32 U64. */
25864 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
25865 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
25866 /* As above, unsigned. Types accepted S16 S32 S64. */
25867 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
25868 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
25869 /* Right shift narrowing. Types accepted I16 I32 I64. */
25870 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
25871 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
25872 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
25873 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
25874 /* CVT with optional immediate for fixed-point variant. */
25875 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
25877 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
25879 /* Data processing, three registers of different lengths. */
25880 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
25881 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
25882 /* If not scalar, fall back to neon_dyadic_long.
25883 Vector types as above, scalar types S16 S32 U16 U32. */
25884 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
25885 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
25886 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
25887 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
25888 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
25889 /* Dyadic, narrowing insns. Types I16 I32 I64. */
25890 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
25891 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
25892 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
25893 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
25894 /* Saturating doubling multiplies. Types S16 S32. */
25895 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
25896 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
25897 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
25898 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
25899 S16 S32 U16 U32. */
25900 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
25902 /* Extract. Size 8. */
25903 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
25904 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
25906 /* Two registers, miscellaneous. */
25907 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
25908 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
25909 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
25910 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
25911 /* Vector replicate. Sizes 8 16 32. */
25912 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
25913 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
25914 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
25915 /* VMOVN. Types I16 I32 I64. */
25916 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
25917 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
25918 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
25919 /* VQMOVUN. Types S16 S32 S64. */
25920 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
25921 /* VZIP / VUZP. Sizes 8 16 32. */
25922 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
25923 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
25924 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
25925 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
25926 /* VQABS / VQNEG. Types S8 S16 S32. */
25927 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
25928 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
25929 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
25930 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
25931 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
25932 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
25933 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
25934 /* Reciprocal estimates. Types U32 F16 F32. */
25935 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
25936 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
25937 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
25938 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
25939 /* VCLS. Types S8 S16 S32. */
25940 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
25941 /* VCLZ. Types I8 I16 I32. */
25942 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
25943 /* VCNT. Size 8. */
25944 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
25945 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
25946 /* Two address, untyped. */
25947 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
25948 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
25949 /* VTRN. Sizes 8 16 32. */
25950 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
25951 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
25953 /* Table lookup. Size 8. */
25954 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
25955 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
25957 #undef THUMB_VARIANT
25958 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
25960 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
25962 /* Neon element/structure load/store. */
25963 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
25964 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
25965 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
25966 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
25967 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
25968 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
25969 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
25970 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
25972 #undef THUMB_VARIANT
25973 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
25975 #define ARM_VARIANT & fpu_vfp_ext_v3xd
25976 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
25977 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
25978 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
25979 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
25980 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
25981 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
25982 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
25983 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
25984 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
25986 #undef THUMB_VARIANT
25987 #define THUMB_VARIANT & fpu_vfp_ext_v3
25989 #define ARM_VARIANT & fpu_vfp_ext_v3
25991 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
25992 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
25993 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
25994 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
25995 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
25996 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
25997 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
25998 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
25999 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
26002 #define ARM_VARIANT & fpu_vfp_ext_fma
26003 #undef THUMB_VARIANT
26004 #define THUMB_VARIANT & fpu_vfp_ext_fma
26005 /* Mnemonics shared by Neon, VFP, MVE and BF16. These are included in the
26006 VFP FMA variant; NEON and VFP FMA always includes the NEON
26007 FMA instructions. */
26008 mnCEF(vfma
, _vfma
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQMQR
), neon_fmac
),
26009 TUF ("vfmat", c300850
, fc300850
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQ_RNSC_MQ_RR
), mve_vfma
, mve_vfma
),
26010 mnCEF(vfms
, _vfms
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQMQ
), neon_fmac
),
26012 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
26013 the v form should always be used. */
26014 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
26015 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
26016 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
26017 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
26018 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
26019 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
26021 #undef THUMB_VARIANT
26023 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
26025 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
26026 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
26027 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
26028 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
26029 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
26030 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
26031 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
26032 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
26035 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
26037 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
26038 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
26039 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
26040 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
26041 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
26042 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
26043 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
26044 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
26045 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
26046 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
26047 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
26048 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
26049 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
26050 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
26051 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
26052 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
26053 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
26054 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
26055 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
26056 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
26057 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
26058 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
26059 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
26060 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
26061 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
26062 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
26063 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
26064 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
26065 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
26066 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
26067 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
26068 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
26069 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
26070 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
26071 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
26072 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
26073 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
26074 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26075 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26076 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26077 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26078 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26079 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26080 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26081 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26082 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26083 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
26084 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26085 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26086 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26087 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26088 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26089 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26090 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26091 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26092 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26093 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26094 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26095 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26096 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26097 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26098 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26099 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26100 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26101 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26102 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26103 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
26104 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
26105 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
26106 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
26107 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26108 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26109 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26110 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26111 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26112 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26113 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26114 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26115 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26116 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26117 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26118 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26119 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26120 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26121 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26122 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26123 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26124 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26125 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
26126 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26127 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26128 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26129 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26130 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26131 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26132 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26133 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26134 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26135 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26136 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26137 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
26138 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
26139 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
26140 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
26141 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
26142 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
26143 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26144 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26145 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26146 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26147 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
26148 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
26149 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
26150 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
26151 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
26152 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
26153 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
26154 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
26155 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
26156 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
26157 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
26158 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
26159 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
26160 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
26161 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
26162 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
26163 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
26164 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
26165 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
26166 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
26167 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
26168 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
26169 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
26170 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26171 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26172 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26173 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26174 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26175 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26176 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26177 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26178 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26179 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
26180 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
26181 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
26182 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
26183 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
26184 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
26185 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26186 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26187 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26188 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
26189 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
26190 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
26191 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
26192 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
26193 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
26194 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26195 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26196 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26197 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26198 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
26201 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
26203 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
26204 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
26205 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
26206 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
26207 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
26208 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
26209 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26210 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26211 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26212 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26213 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26214 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26215 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26216 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26217 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26218 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26219 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26220 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26221 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26222 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26223 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
26224 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26225 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26226 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26227 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26228 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26229 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26230 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26231 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26232 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26233 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26234 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26235 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26236 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26237 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26238 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26239 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26240 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26241 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26242 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26243 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26244 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26245 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26246 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26247 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26248 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26249 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26250 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26251 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26252 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26253 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26254 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26255 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26256 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26257 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26258 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26259 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
26262 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
26264 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
26265 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
26266 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
26267 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
26268 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
26269 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
26270 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
26271 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
26272 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
26273 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
26274 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
26275 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
26276 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
26277 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
26278 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
26279 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
26280 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
26281 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
26282 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
26283 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
26284 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
26285 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
26286 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
26287 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
26288 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
26289 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
26290 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
26291 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
26292 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
26293 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
26294 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
26295 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
26296 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
26297 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
26298 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
26299 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
26300 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
26301 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
26302 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
26303 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
26304 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
26305 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
26306 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
26307 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
26308 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
26309 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
26310 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
26311 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
26312 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
26313 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
26314 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
26315 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
26316 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
26317 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
26318 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
26319 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
26320 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
26321 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
26322 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
26323 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
26324 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
26325 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
26326 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
26327 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
26328 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
26329 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
26330 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
26331 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
26332 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
26333 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
26334 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
26335 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
26336 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
26337 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
26338 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
26339 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
26341 /* ARMv8.5-A instructions. */
26343 #define ARM_VARIANT & arm_ext_sb
26344 #undef THUMB_VARIANT
26345 #define THUMB_VARIANT & arm_ext_sb
26346 TUF("sb", 57ff070
, f3bf8f70
, 0, (), noargs
, noargs
),
26349 #define ARM_VARIANT & arm_ext_predres
26350 #undef THUMB_VARIANT
26351 #define THUMB_VARIANT & arm_ext_predres
26352 CE("cfprctx", e070f93
, 1, (RRnpc
), rd
),
26353 CE("dvprctx", e070fb3
, 1, (RRnpc
), rd
),
26354 CE("cpprctx", e070ff3
, 1, (RRnpc
), rd
),
26356 /* ARMv8-M instructions. */
26358 #define ARM_VARIANT NULL
26359 #undef THUMB_VARIANT
26360 #define THUMB_VARIANT & arm_ext_v8m
26361 ToU("sg", e97fe97f
, 0, (), noargs
),
26362 ToC("blxns", 4784, 1, (RRnpc
), t_blx
),
26363 ToC("bxns", 4704, 1, (RRnpc
), t_bx
),
26364 ToC("tt", e840f000
, 2, (RRnpc
, RRnpc
), tt
),
26365 ToC("ttt", e840f040
, 2, (RRnpc
, RRnpc
), tt
),
26366 ToC("tta", e840f080
, 2, (RRnpc
, RRnpc
), tt
),
26367 ToC("ttat", e840f0c0
, 2, (RRnpc
, RRnpc
), tt
),
26369 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
26370 instructions behave as nop if no VFP is present. */
26371 #undef THUMB_VARIANT
26372 #define THUMB_VARIANT & arm_ext_v8m_main
26373 ToC("vlldm", ec300a00
, 1, (RRnpc
), rn
),
26374 ToC("vlstm", ec200a00
, 1, (RRnpc
), rn
),
26376 /* Armv8.1-M Mainline instructions. */
26377 #undef THUMB_VARIANT
26378 #define THUMB_VARIANT & arm_ext_v8_1m_main
26379 toU("aut", _aut
, 3, (R12
, LR
, SP
), t_pacbti
),
26380 toU("autg", _autg
, 3, (RR
, RR
, RR
), t_pacbti_nonop
),
26381 ToU("bti", f3af800f
, 0, (), noargs
),
26382 toU("bxaut", _bxaut
, 3, (RR
, RR
, RR
), t_pacbti_nonop
),
26383 toU("pac", _pac
, 3, (R12
, LR
, SP
), t_pacbti
),
26384 toU("pacbti", _pacbti
, 3, (R12
, LR
, SP
), t_pacbti
),
26385 toU("pacg", _pacg
, 3, (RR
, RR
, RR
), t_pacbti_pacg
),
26386 toU("cinc", _cinc
, 3, (RRnpcsp
, RR_ZR
, COND
), t_cond
),
26387 toU("cinv", _cinv
, 3, (RRnpcsp
, RR_ZR
, COND
), t_cond
),
26388 toU("cneg", _cneg
, 3, (RRnpcsp
, RR_ZR
, COND
), t_cond
),
26389 toU("csel", _csel
, 4, (RRnpcsp
, RR_ZR
, RR_ZR
, COND
), t_cond
),
26390 toU("csetm", _csetm
, 2, (RRnpcsp
, COND
), t_cond
),
26391 toU("cset", _cset
, 2, (RRnpcsp
, COND
), t_cond
),
26392 toU("csinc", _csinc
, 4, (RRnpcsp
, RR_ZR
, RR_ZR
, COND
), t_cond
),
26393 toU("csinv", _csinv
, 4, (RRnpcsp
, RR_ZR
, RR_ZR
, COND
), t_cond
),
26394 toU("csneg", _csneg
, 4, (RRnpcsp
, RR_ZR
, RR_ZR
, COND
), t_cond
),
26396 toC("bf", _bf
, 2, (EXPs
, EXPs
), t_branch_future
),
26397 toU("bfcsel", _bfcsel
, 4, (EXPs
, EXPs
, EXPs
, COND
), t_branch_future
),
26398 toC("bfx", _bfx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
26399 toC("bfl", _bfl
, 2, (EXPs
, EXPs
), t_branch_future
),
26400 toC("bflx", _bflx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
26402 toU("dls", _dls
, 2, (LR
, RRnpcsp
), t_loloop
),
26403 toU("wls", _wls
, 3, (LR
, RRnpcsp
, EXP
), t_loloop
),
26404 toU("le", _le
, 2, (oLR
, EXP
), t_loloop
),
26406 ToC("clrm", e89f0000
, 1, (CLRMLST
), t_clrm
),
26407 ToC("vscclrm", ec9f0a00
, 1, (VRSDVLST
), t_vscclrm
),
26409 #undef THUMB_VARIANT
26410 #define THUMB_VARIANT & mve_ext
26411 ToC("lsll", ea50010d
, 3, (RRe
, RRo
, RRnpcsp_I32
), mve_scalar_shift
),
26412 ToC("lsrl", ea50011f
, 3, (RRe
, RRo
, I32
), mve_scalar_shift
),
26413 ToC("asrl", ea50012d
, 3, (RRe
, RRo
, RRnpcsp_I32
), mve_scalar_shift
),
26414 ToC("uqrshll", ea51010d
, 4, (RRe
, RRo
, I48_I64
, RRnpcsp
), mve_scalar_shift1
),
26415 ToC("sqrshrl", ea51012d
, 4, (RRe
, RRo
, I48_I64
, RRnpcsp
), mve_scalar_shift1
),
26416 ToC("uqshll", ea51010f
, 3, (RRe
, RRo
, I32
), mve_scalar_shift
),
26417 ToC("urshrl", ea51011f
, 3, (RRe
, RRo
, I32
), mve_scalar_shift
),
26418 ToC("srshrl", ea51012f
, 3, (RRe
, RRo
, I32
), mve_scalar_shift
),
26419 ToC("sqshll", ea51013f
, 3, (RRe
, RRo
, I32
), mve_scalar_shift
),
26420 ToC("uqrshl", ea500f0d
, 2, (RRnpcsp
, RRnpcsp
), mve_scalar_shift
),
26421 ToC("sqrshr", ea500f2d
, 2, (RRnpcsp
, RRnpcsp
), mve_scalar_shift
),
26422 ToC("uqshl", ea500f0f
, 2, (RRnpcsp
, I32
), mve_scalar_shift
),
26423 ToC("urshr", ea500f1f
, 2, (RRnpcsp
, I32
), mve_scalar_shift
),
26424 ToC("srshr", ea500f2f
, 2, (RRnpcsp
, I32
), mve_scalar_shift
),
26425 ToC("sqshl", ea500f3f
, 2, (RRnpcsp
, I32
), mve_scalar_shift
),
26427 ToC("vpt", ee410f00
, 3, (COND
, RMQ
, RMQRZ
), mve_vpt
),
26428 ToC("vptt", ee018f00
, 3, (COND
, RMQ
, RMQRZ
), mve_vpt
),
26429 ToC("vpte", ee418f00
, 3, (COND
, RMQ
, RMQRZ
), mve_vpt
),
26430 ToC("vpttt", ee014f00
, 3, (COND
, RMQ
, RMQRZ
), mve_vpt
),
26431 ToC("vptte", ee01cf00
, 3, (COND
, RMQ
, RMQRZ
), mve_vpt
),
26432 ToC("vptet", ee41cf00
, 3, (COND
, RMQ
, RMQRZ
), mve_vpt
),
26433 ToC("vptee", ee414f00
, 3, (COND
, RMQ
, RMQRZ
), mve_vpt
),
26434 ToC("vptttt", ee012f00
, 3, (COND
, RMQ
, RMQRZ
), mve_vpt
),
26435 ToC("vpttte", ee016f00
, 3, (COND
, RMQ
, RMQRZ
), mve_vpt
),
26436 ToC("vpttet", ee01ef00
, 3, (COND
, RMQ
, RMQRZ
), mve_vpt
),
26437 ToC("vpttee", ee01af00
, 3, (COND
, RMQ
, RMQRZ
), mve_vpt
),
26438 ToC("vptett", ee41af00
, 3, (COND
, RMQ
, RMQRZ
), mve_vpt
),
26439 ToC("vptete", ee41ef00
, 3, (COND
, RMQ
, RMQRZ
), mve_vpt
),
26440 ToC("vpteet", ee416f00
, 3, (COND
, RMQ
, RMQRZ
), mve_vpt
),
26441 ToC("vpteee", ee412f00
, 3, (COND
, RMQ
, RMQRZ
), mve_vpt
),
26443 ToC("vpst", fe710f4d
, 0, (), mve_vpt
),
26444 ToC("vpstt", fe318f4d
, 0, (), mve_vpt
),
26445 ToC("vpste", fe718f4d
, 0, (), mve_vpt
),
26446 ToC("vpsttt", fe314f4d
, 0, (), mve_vpt
),
26447 ToC("vpstte", fe31cf4d
, 0, (), mve_vpt
),
26448 ToC("vpstet", fe71cf4d
, 0, (), mve_vpt
),
26449 ToC("vpstee", fe714f4d
, 0, (), mve_vpt
),
26450 ToC("vpstttt", fe312f4d
, 0, (), mve_vpt
),
26451 ToC("vpsttte", fe316f4d
, 0, (), mve_vpt
),
26452 ToC("vpsttet", fe31ef4d
, 0, (), mve_vpt
),
26453 ToC("vpsttee", fe31af4d
, 0, (), mve_vpt
),
26454 ToC("vpstett", fe71af4d
, 0, (), mve_vpt
),
26455 ToC("vpstete", fe71ef4d
, 0, (), mve_vpt
),
26456 ToC("vpsteet", fe716f4d
, 0, (), mve_vpt
),
26457 ToC("vpsteee", fe712f4d
, 0, (), mve_vpt
),
26459 /* MVE and MVE FP only. */
26460 mToC("vhcadd", ee000f00
, 4, (RMQ
, RMQ
, RMQ
, EXPi
), mve_vhcadd
),
26461 mCEF(vctp
, _vctp
, 1, (RRnpc
), mve_vctp
),
26462 mCEF(vadc
, _vadc
, 3, (RMQ
, RMQ
, RMQ
), mve_vadc
),
26463 mCEF(vadci
, _vadci
, 3, (RMQ
, RMQ
, RMQ
), mve_vadc
),
26464 mToC("vsbc", fe300f00
, 3, (RMQ
, RMQ
, RMQ
), mve_vsbc
),
26465 mToC("vsbci", fe301f00
, 3, (RMQ
, RMQ
, RMQ
), mve_vsbc
),
26466 mCEF(vmullb
, _vmullb
, 3, (RMQ
, RMQ
, RMQ
), mve_vmull
),
26467 mCEF(vabav
, _vabav
, 3, (RRnpcsp
, RMQ
, RMQ
), mve_vabav
),
26468 mCEF(vmladav
, _vmladav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
26469 mCEF(vmladava
, _vmladava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
26470 mCEF(vmladavx
, _vmladavx
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
26471 mCEF(vmladavax
, _vmladavax
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
26472 mCEF(vmlav
, _vmladav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
26473 mCEF(vmlava
, _vmladava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
26474 mCEF(vmlsdav
, _vmlsdav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
26475 mCEF(vmlsdava
, _vmlsdava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
26476 mCEF(vmlsdavx
, _vmlsdavx
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
26477 mCEF(vmlsdavax
, _vmlsdavax
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
26479 mCEF(vst20
, _vst20
, 2, (MSTRLST2
, ADDRMVE
), mve_vst_vld
),
26480 mCEF(vst21
, _vst21
, 2, (MSTRLST2
, ADDRMVE
), mve_vst_vld
),
26481 mCEF(vst40
, _vst40
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
26482 mCEF(vst41
, _vst41
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
26483 mCEF(vst42
, _vst42
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
26484 mCEF(vst43
, _vst43
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
26485 mCEF(vld20
, _vld20
, 2, (MSTRLST2
, ADDRMVE
), mve_vst_vld
),
26486 mCEF(vld21
, _vld21
, 2, (MSTRLST2
, ADDRMVE
), mve_vst_vld
),
26487 mCEF(vld40
, _vld40
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
26488 mCEF(vld41
, _vld41
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
26489 mCEF(vld42
, _vld42
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
26490 mCEF(vld43
, _vld43
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
26491 mCEF(vstrb
, _vstrb
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
26492 mCEF(vstrh
, _vstrh
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
26493 mCEF(vstrw
, _vstrw
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
26494 mCEF(vstrd
, _vstrd
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
26495 mCEF(vldrb
, _vldrb
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
26496 mCEF(vldrh
, _vldrh
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
26497 mCEF(vldrw
, _vldrw
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
26498 mCEF(vldrd
, _vldrd
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
26500 mCEF(vmovnt
, _vmovnt
, 2, (RMQ
, RMQ
), mve_movn
),
26501 mCEF(vmovnb
, _vmovnb
, 2, (RMQ
, RMQ
), mve_movn
),
26502 mCEF(vbrsr
, _vbrsr
, 3, (RMQ
, RMQ
, RR
), mve_vbrsr
),
26503 mCEF(vaddlv
, _vaddlv
, 3, (RRe
, RRo
, RMQ
), mve_vaddlv
),
26504 mCEF(vaddlva
, _vaddlva
, 3, (RRe
, RRo
, RMQ
), mve_vaddlv
),
26505 mCEF(vaddv
, _vaddv
, 2, (RRe
, RMQ
), mve_vaddv
),
26506 mCEF(vaddva
, _vaddva
, 2, (RRe
, RMQ
), mve_vaddv
),
26507 mCEF(vddup
, _vddup
, 3, (RMQ
, RRe
, EXPi
), mve_viddup
),
26508 mCEF(vdwdup
, _vdwdup
, 4, (RMQ
, RRe
, RR
, EXPi
), mve_viddup
),
26509 mCEF(vidup
, _vidup
, 3, (RMQ
, RRe
, EXPi
), mve_viddup
),
26510 mCEF(viwdup
, _viwdup
, 4, (RMQ
, RRe
, RR
, EXPi
), mve_viddup
),
26511 mToC("vmaxa", ee330e81
, 2, (RMQ
, RMQ
), mve_vmaxa_vmina
),
26512 mToC("vmina", ee331e81
, 2, (RMQ
, RMQ
), mve_vmaxa_vmina
),
26513 mCEF(vmaxv
, _vmaxv
, 2, (RR
, RMQ
), mve_vmaxv
),
26514 mCEF(vmaxav
, _vmaxav
, 2, (RR
, RMQ
), mve_vmaxv
),
26515 mCEF(vminv
, _vminv
, 2, (RR
, RMQ
), mve_vmaxv
),
26516 mCEF(vminav
, _vminav
, 2, (RR
, RMQ
), mve_vmaxv
),
26518 mCEF(vmlaldav
, _vmlaldav
, 4, (RRe
, RRo
, RMQ
, RMQ
), mve_vmlaldav
),
26519 mCEF(vmlaldava
, _vmlaldava
, 4, (RRe
, RRo
, RMQ
, RMQ
), mve_vmlaldav
),
26520 mCEF(vmlaldavx
, _vmlaldavx
, 4, (RRe
, RRo
, RMQ
, RMQ
), mve_vmlaldav
),
26521 mCEF(vmlaldavax
, _vmlaldavax
, 4, (RRe
, RRo
, RMQ
, RMQ
), mve_vmlaldav
),
26522 mCEF(vmlalv
, _vmlaldav
, 4, (RRe
, RRo
, RMQ
, RMQ
), mve_vmlaldav
),
26523 mCEF(vmlalva
, _vmlaldava
, 4, (RRe
, RRo
, RMQ
, RMQ
), mve_vmlaldav
),
26524 mCEF(vmlsldav
, _vmlsldav
, 4, (RRe
, RRo
, RMQ
, RMQ
), mve_vmlaldav
),
26525 mCEF(vmlsldava
, _vmlsldava
, 4, (RRe
, RRo
, RMQ
, RMQ
), mve_vmlaldav
),
26526 mCEF(vmlsldavx
, _vmlsldavx
, 4, (RRe
, RRo
, RMQ
, RMQ
), mve_vmlaldav
),
26527 mCEF(vmlsldavax
, _vmlsldavax
, 4, (RRe
, RRo
, RMQ
, RMQ
), mve_vmlaldav
),
26528 mToC("vrmlaldavh", ee800f00
, 4, (RRe
, RR
, RMQ
, RMQ
), mve_vrmlaldavh
),
26529 mToC("vrmlaldavha",ee800f20
, 4, (RRe
, RR
, RMQ
, RMQ
), mve_vrmlaldavh
),
26530 mCEF(vrmlaldavhx
, _vrmlaldavhx
, 4, (RRe
, RR
, RMQ
, RMQ
), mve_vrmlaldavh
),
26531 mCEF(vrmlaldavhax
, _vrmlaldavhax
, 4, (RRe
, RR
, RMQ
, RMQ
), mve_vrmlaldavh
),
26532 mToC("vrmlalvh", ee800f00
, 4, (RRe
, RR
, RMQ
, RMQ
), mve_vrmlaldavh
),
26533 mToC("vrmlalvha", ee800f20
, 4, (RRe
, RR
, RMQ
, RMQ
), mve_vrmlaldavh
),
26534 mCEF(vrmlsldavh
, _vrmlsldavh
, 4, (RRe
, RR
, RMQ
, RMQ
), mve_vrmlaldavh
),
26535 mCEF(vrmlsldavha
, _vrmlsldavha
, 4, (RRe
, RR
, RMQ
, RMQ
), mve_vrmlaldavh
),
26536 mCEF(vrmlsldavhx
, _vrmlsldavhx
, 4, (RRe
, RR
, RMQ
, RMQ
), mve_vrmlaldavh
),
26537 mCEF(vrmlsldavhax
, _vrmlsldavhax
, 4, (RRe
, RR
, RMQ
, RMQ
), mve_vrmlaldavh
),
26539 mToC("vmlas", ee011e40
, 3, (RMQ
, RMQ
, RR
), mve_vmlas
),
26540 mToC("vmulh", ee010e01
, 3, (RMQ
, RMQ
, RMQ
), mve_vmulh
),
26541 mToC("vrmulh", ee011e01
, 3, (RMQ
, RMQ
, RMQ
), mve_vmulh
),
26542 mToC("vpnot", fe310f4d
, 0, (), mve_vpnot
),
26543 mToC("vpsel", fe310f01
, 3, (RMQ
, RMQ
, RMQ
), mve_vpsel
),
26545 mToC("vqdmladh", ee000e00
, 3, (RMQ
, RMQ
, RMQ
), mve_vqdmladh
),
26546 mToC("vqdmladhx", ee001e00
, 3, (RMQ
, RMQ
, RMQ
), mve_vqdmladh
),
26547 mToC("vqrdmladh", ee000e01
, 3, (RMQ
, RMQ
, RMQ
), mve_vqdmladh
),
26548 mToC("vqrdmladhx",ee001e01
, 3, (RMQ
, RMQ
, RMQ
), mve_vqdmladh
),
26549 mToC("vqdmlsdh", fe000e00
, 3, (RMQ
, RMQ
, RMQ
), mve_vqdmladh
),
26550 mToC("vqdmlsdhx", fe001e00
, 3, (RMQ
, RMQ
, RMQ
), mve_vqdmladh
),
26551 mToC("vqrdmlsdh", fe000e01
, 3, (RMQ
, RMQ
, RMQ
), mve_vqdmladh
),
26552 mToC("vqrdmlsdhx",fe001e01
, 3, (RMQ
, RMQ
, RMQ
), mve_vqdmladh
),
26553 mToC("vqdmlah", ee000e60
, 3, (RMQ
, RMQ
, RR
), mve_vqdmlah
),
26554 mToC("vqdmlash", ee001e60
, 3, (RMQ
, RMQ
, RR
), mve_vqdmlah
),
26555 mToC("vqrdmlash", ee001e40
, 3, (RMQ
, RMQ
, RR
), mve_vqdmlah
),
26556 mToC("vqdmullt", ee301f00
, 3, (RMQ
, RMQ
, RMQRR
), mve_vqdmull
),
26557 mToC("vqdmullb", ee300f00
, 3, (RMQ
, RMQ
, RMQRR
), mve_vqdmull
),
26558 mCEF(vqmovnt
, _vqmovnt
, 2, (RMQ
, RMQ
), mve_vqmovn
),
26559 mCEF(vqmovnb
, _vqmovnb
, 2, (RMQ
, RMQ
), mve_vqmovn
),
26560 mCEF(vqmovunt
, _vqmovunt
, 2, (RMQ
, RMQ
), mve_vqmovn
),
26561 mCEF(vqmovunb
, _vqmovunb
, 2, (RMQ
, RMQ
), mve_vqmovn
),
26563 mCEF(vshrnt
, _vshrnt
, 3, (RMQ
, RMQ
, I32z
), mve_vshrn
),
26564 mCEF(vshrnb
, _vshrnb
, 3, (RMQ
, RMQ
, I32z
), mve_vshrn
),
26565 mCEF(vrshrnt
, _vrshrnt
, 3, (RMQ
, RMQ
, I32z
), mve_vshrn
),
26566 mCEF(vrshrnb
, _vrshrnb
, 3, (RMQ
, RMQ
, I32z
), mve_vshrn
),
26567 mCEF(vqshrnt
, _vqrshrnt
, 3, (RMQ
, RMQ
, I32z
), mve_vshrn
),
26568 mCEF(vqshrnb
, _vqrshrnb
, 3, (RMQ
, RMQ
, I32z
), mve_vshrn
),
26569 mCEF(vqshrunt
, _vqrshrunt
, 3, (RMQ
, RMQ
, I32z
), mve_vshrn
),
26570 mCEF(vqshrunb
, _vqrshrunb
, 3, (RMQ
, RMQ
, I32z
), mve_vshrn
),
26571 mCEF(vqrshrnt
, _vqrshrnt
, 3, (RMQ
, RMQ
, I32z
), mve_vshrn
),
26572 mCEF(vqrshrnb
, _vqrshrnb
, 3, (RMQ
, RMQ
, I32z
), mve_vshrn
),
26573 mCEF(vqrshrunt
, _vqrshrunt
, 3, (RMQ
, RMQ
, I32z
), mve_vshrn
),
26574 mCEF(vqrshrunb
, _vqrshrunb
, 3, (RMQ
, RMQ
, I32z
), mve_vshrn
),
26576 mToC("vshlc", eea00fc0
, 3, (RMQ
, RR
, I32z
), mve_vshlc
),
26577 mToC("vshllt", ee201e00
, 3, (RMQ
, RMQ
, I32
), mve_vshll
),
26578 mToC("vshllb", ee200e00
, 3, (RMQ
, RMQ
, I32
), mve_vshll
),
26580 toU("dlstp", _dlstp
, 2, (LR
, RR
), t_loloop
),
26581 toU("wlstp", _wlstp
, 3, (LR
, RR
, EXP
), t_loloop
),
26582 toU("letp", _letp
, 2, (LR
, EXP
), t_loloop
),
26583 toU("lctp", _lctp
, 0, (), t_loloop
),
26585 #undef THUMB_VARIANT
26586 #define THUMB_VARIANT & mve_fp_ext
26587 mToC("vcmul", ee300e00
, 4, (RMQ
, RMQ
, RMQ
, EXPi
), mve_vcmul
),
26588 mToC("vfmas", ee311e40
, 3, (RMQ
, RMQ
, RR
), mve_vfmas
),
26589 mToC("vmaxnma", ee3f0e81
, 2, (RMQ
, RMQ
), mve_vmaxnma_vminnma
),
26590 mToC("vminnma", ee3f1e81
, 2, (RMQ
, RMQ
), mve_vmaxnma_vminnma
),
26591 mToC("vmaxnmv", eeee0f00
, 2, (RR
, RMQ
), mve_vmaxnmv
),
26592 mToC("vmaxnmav",eeec0f00
, 2, (RR
, RMQ
), mve_vmaxnmv
),
26593 mToC("vminnmv", eeee0f80
, 2, (RR
, RMQ
), mve_vmaxnmv
),
26594 mToC("vminnmav",eeec0f80
, 2, (RR
, RMQ
), mve_vmaxnmv
),
26597 #define ARM_VARIANT & fpu_vfp_ext_v1
26598 #undef THUMB_VARIANT
26599 #define THUMB_VARIANT & arm_ext_v6t2
26601 mcCE(fcpyd
, eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
26604 #define ARM_VARIANT & fpu_vfp_ext_v1xd
26606 mnCEF(vmla
, _vmla
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQ_RNSC_MQ_RR
), neon_mac_maybe_scalar
),
26607 mnCEF(vmul
, _vmul
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQ_RNSC_MQ_RR
), neon_mul
),
26608 MNCE(vmov
, 0, 1, (VMOV
), neon_mov
),
26609 mcCE(fmrs
, e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
26610 mcCE(fmsr
, e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
26611 mcCE(fcpys
, eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
26613 mCEF(vmullt
, _vmullt
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQ_RNSC_MQ
), mve_vmull
),
26614 mnCEF(vadd
, _vadd
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQMQR
), neon_addsub_if_i
),
26615 mnCEF(vsub
, _vsub
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQMQR
), neon_addsub_if_i
),
26617 MNCEF(vabs
, 1b10300
, 2, (RNSDQMQ
, RNSDQMQ
), neon_abs_neg
),
26618 MNCEF(vneg
, 1b10380
, 2, (RNSDQMQ
, RNSDQMQ
), neon_abs_neg
),
26620 mCEF(vmovlt
, _vmovlt
, 1, (VMOV
), mve_movl
),
26621 mCEF(vmovlb
, _vmovlb
, 1, (VMOV
), mve_movl
),
26623 mnCE(vcmp
, _vcmp
, 3, (RVSD_COND
, RSVDMQ_FI0
, oRMQRZ
), vfp_nsyn_cmp
),
26624 mnCE(vcmpe
, _vcmpe
, 3, (RVSD_COND
, RSVDMQ_FI0
, oRMQRZ
), vfp_nsyn_cmp
),
26627 #define ARM_VARIANT & fpu_vfp_ext_v2
26629 mcCE(fmsrr
, c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
26630 mcCE(fmrrs
, c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
26631 mcCE(fmdrr
, c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
26632 mcCE(fmrrd
, c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
26635 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
26636 mnUF(vcvta
, _vcvta
, 2, (RNSDQMQ
, oRNSDQMQ
), neon_cvta
),
26637 mnUF(vcvtp
, _vcvta
, 2, (RNSDQMQ
, oRNSDQMQ
), neon_cvtp
),
26638 mnUF(vcvtn
, _vcvta
, 3, (RNSDQMQ
, oRNSDQMQ
, oI32z
), neon_cvtn
),
26639 mnUF(vcvtm
, _vcvta
, 2, (RNSDQMQ
, oRNSDQMQ
), neon_cvtm
),
26640 mnUF(vmaxnm
, _vmaxnm
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQMQ
), vmaxnm
),
26641 mnUF(vminnm
, _vminnm
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQMQ
), vmaxnm
),
26644 #define ARM_VARIANT & fpu_neon_ext_v1
26645 mnUF(vabd
, _vabd
, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQ
), neon_dyadic_if_su
),
26646 mnUF(vabdl
, _vabdl
, 3, (RNQMQ
, RNDMQ
, RNDMQ
), neon_dyadic_long
),
26647 mnUF(vaddl
, _vaddl
, 3, (RNSDQMQ
, oRNSDMQ
, RNSDMQR
), neon_dyadic_long
),
26648 mnUF(vsubl
, _vsubl
, 3, (RNSDQMQ
, oRNSDMQ
, RNSDMQR
), neon_dyadic_long
),
26649 mnUF(vand
, _vand
, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQ_Ibig
), neon_logic
),
26650 mnUF(vbic
, _vbic
, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQ_Ibig
), neon_logic
),
26651 mnUF(vorr
, _vorr
, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQ_Ibig
), neon_logic
),
26652 mnUF(vorn
, _vorn
, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQ_Ibig
), neon_logic
),
26653 mnUF(veor
, _veor
, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQ
), neon_logic
),
26654 MNUF(vcls
, 1b00400
, 2, (RNDQMQ
, RNDQMQ
), neon_cls
),
26655 MNUF(vclz
, 1b00480
, 2, (RNDQMQ
, RNDQMQ
), neon_clz
),
26656 mnCE(vdup
, _vdup
, 2, (RNDQMQ
, RR_RNSC
), neon_dup
),
26657 MNUF(vhadd
, 00000000, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQR
), neon_dyadic_i_su
),
26658 MNUF(vrhadd
, 00000100, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQ
), neon_dyadic_i_su
),
26659 MNUF(vhsub
, 00000200, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQR
), neon_dyadic_i_su
),
26660 mnUF(vmin
, _vmin
, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQ
), neon_dyadic_if_su
),
26661 mnUF(vmax
, _vmax
, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQ
), neon_dyadic_if_su
),
26662 MNUF(vqadd
, 0000010, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQR
), neon_dyadic_i64_su
),
26663 MNUF(vqsub
, 0000210, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQR
), neon_dyadic_i64_su
),
26664 mnUF(vmvn
, _vmvn
, 2, (RNDQMQ
, RNDQMQ_Ibig
), neon_mvn
),
26665 MNUF(vqabs
, 1b00700
, 2, (RNDQMQ
, RNDQMQ
), neon_sat_abs_neg
),
26666 MNUF(vqneg
, 1b00780
, 2, (RNDQMQ
, RNDQMQ
), neon_sat_abs_neg
),
26667 mnUF(vqrdmlah
, _vqrdmlah
,3, (RNDQMQ
, oRNDQMQ
, RNDQ_RNSC_RR
), neon_qrdmlah
),
26668 mnUF(vqdmulh
, _vqdmulh
, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQ_RNSC_RR
), neon_qdmulh
),
26669 mnUF(vqrdmulh
, _vqrdmulh
,3, (RNDQMQ
, oRNDQMQ
, RNDQMQ_RNSC_RR
), neon_qdmulh
),
26670 MNUF(vqrshl
, 0000510, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQR
), neon_rshl
),
26671 MNUF(vrshl
, 0000500, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQR
), neon_rshl
),
26672 MNUF(vshr
, 0800010, 3, (RNDQMQ
, oRNDQMQ
, I64z
), neon_rshift_round_imm
),
26673 MNUF(vrshr
, 0800210, 3, (RNDQMQ
, oRNDQMQ
, I64z
), neon_rshift_round_imm
),
26674 MNUF(vsli
, 1800510, 3, (RNDQMQ
, oRNDQMQ
, I63
), neon_sli
),
26675 MNUF(vsri
, 1800410, 3, (RNDQMQ
, oRNDQMQ
, I64z
), neon_sri
),
26676 MNUF(vrev64
, 1b00000
, 2, (RNDQMQ
, RNDQMQ
), neon_rev
),
26677 MNUF(vrev32
, 1b00080
, 2, (RNDQMQ
, RNDQMQ
), neon_rev
),
26678 MNUF(vrev16
, 1b00100
, 2, (RNDQMQ
, RNDQMQ
), neon_rev
),
26679 mnUF(vshl
, _vshl
, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQ_I63b_RR
), neon_shl
),
26680 mnUF(vqshl
, _vqshl
, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQ_I63b_RR
), neon_qshl
),
26681 MNUF(vqshlu
, 1800610, 3, (RNDQMQ
, oRNDQMQ
, I63
), neon_qshlu_imm
),
26684 #define ARM_VARIANT & arm_ext_v8_3
26685 #undef THUMB_VARIANT
26686 #define THUMB_VARIANT & arm_ext_v6t2_v8m
26687 MNUF (vcadd
, 0, 4, (RNDQMQ
, RNDQMQ
, RNDQMQ
, EXPi
), vcadd
),
26688 MNUF (vcmla
, 0, 4, (RNDQMQ
, RNDQMQ
, RNDQMQ_RNSC
, EXPi
), vcmla
),
26691 #define ARM_VARIANT &arm_ext_bf16
26692 #undef THUMB_VARIANT
26693 #define THUMB_VARIANT &arm_ext_bf16
26694 TUF ("vdot", c000d00
, fc000d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), vdot
, vdot
),
26695 TUF ("vmmla", c000c40
, fc000c40
, 3, (RNQ
, RNQ
, RNQ
), vmmla
, vmmla
),
26696 TUF ("vfmab", c300810
, fc300810
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), bfloat_vfma
, bfloat_vfma
),
26699 #define ARM_VARIANT &arm_ext_i8mm
26700 #undef THUMB_VARIANT
26701 #define THUMB_VARIANT &arm_ext_i8mm
26702 TUF ("vsmmla", c200c40
, fc200c40
, 3, (RNQ
, RNQ
, RNQ
), vsmmla
, vsmmla
),
26703 TUF ("vummla", c200c50
, fc200c50
, 3, (RNQ
, RNQ
, RNQ
), vummla
, vummla
),
26704 TUF ("vusmmla", ca00c40
, fca00c40
, 3, (RNQ
, RNQ
, RNQ
), vsmmla
, vsmmla
),
26705 TUF ("vusdot", c800d00
, fc800d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), vusdot
, vusdot
),
26706 TUF ("vsudot", c800d10
, fc800d10
, 3, (RNDQ
, RNDQ
, RNSC
), vsudot
, vsudot
),
26709 #undef THUMB_VARIANT
26710 #define THUMB_VARIANT &arm_ext_cde
26711 ToC ("cx1", ee000000
, 3, (RCP
, APSR_RR
, I8191
), cx1
),
26712 ToC ("cx1a", fe000000
, 3, (RCP
, APSR_RR
, I8191
), cx1a
),
26713 ToC ("cx1d", ee000040
, 4, (RCP
, RR
, APSR_RR
, I8191
), cx1d
),
26714 ToC ("cx1da", fe000040
, 4, (RCP
, RR
, APSR_RR
, I8191
), cx1da
),
26716 ToC ("cx2", ee400000
, 4, (RCP
, APSR_RR
, APSR_RR
, I511
), cx2
),
26717 ToC ("cx2a", fe400000
, 4, (RCP
, APSR_RR
, APSR_RR
, I511
), cx2a
),
26718 ToC ("cx2d", ee400040
, 5, (RCP
, RR
, APSR_RR
, APSR_RR
, I511
), cx2d
),
26719 ToC ("cx2da", fe400040
, 5, (RCP
, RR
, APSR_RR
, APSR_RR
, I511
), cx2da
),
26721 ToC ("cx3", ee800000
, 5, (RCP
, APSR_RR
, APSR_RR
, APSR_RR
, I63
), cx3
),
26722 ToC ("cx3a", fe800000
, 5, (RCP
, APSR_RR
, APSR_RR
, APSR_RR
, I63
), cx3a
),
26723 ToC ("cx3d", ee800040
, 6, (RCP
, RR
, APSR_RR
, APSR_RR
, APSR_RR
, I63
), cx3d
),
26724 ToC ("cx3da", fe800040
, 6, (RCP
, RR
, APSR_RR
, APSR_RR
, APSR_RR
, I63
), cx3da
),
26726 mToC ("vcx1", ec200000
, 3, (RCP
, RNSDMQ
, I4095
), vcx1
),
26727 mToC ("vcx1a", fc200000
, 3, (RCP
, RNSDMQ
, I4095
), vcx1
),
26729 mToC ("vcx2", ec300000
, 4, (RCP
, RNSDMQ
, RNSDMQ
, I127
), vcx2
),
26730 mToC ("vcx2a", fc300000
, 4, (RCP
, RNSDMQ
, RNSDMQ
, I127
), vcx2
),
26732 mToC ("vcx3", ec800000
, 5, (RCP
, RNSDMQ
, RNSDMQ
, RNSDMQ
, I15
), vcx3
),
26733 mToC ("vcx3a", fc800000
, 5, (RCP
, RNSDMQ
, RNSDMQ
, RNSDMQ
, I15
), vcx3
),
26737 #undef THUMB_VARIANT
26769 /* MD interface: bits in the object file. */
26771 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
26772 for use in the a.out file, and stores them in the array pointed to by buf.
26773 This knows about the endian-ness of the target machine and does
26774 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
26775 2 (short) and 4 (long) Floating numbers are put out as a series of
26776 LITTLENUMS (shorts, here at least). */
26779 md_number_to_chars (char * buf
, valueT val
, int n
)
26781 if (target_big_endian
)
26782 number_to_chars_bigendian (buf
, val
, n
);
26784 number_to_chars_littleendian (buf
, val
, n
);
26788 md_chars_to_number (char * buf
, int n
)
26791 unsigned char * where
= (unsigned char *) buf
;
26793 if (target_big_endian
)
26798 result
|= (*where
++ & 255);
26806 result
|= (where
[n
] & 255);
26813 /* MD interface: Sections. */
26815 /* Calculate the maximum variable size (i.e., excluding fr_fix)
26816 that an rs_machine_dependent frag may reach. */
26819 arm_frag_max_var (fragS
*fragp
)
26821 /* We only use rs_machine_dependent for variable-size Thumb instructions,
26822 which are either THUMB_SIZE (2) or INSN_SIZE (4).
26824 Note that we generate relaxable instructions even for cases that don't
26825 really need it, like an immediate that's a trivial constant. So we're
26826 overestimating the instruction size for some of those cases. Rather
26827 than putting more intelligence here, it would probably be better to
26828 avoid generating a relaxation frag in the first place when it can be
26829 determined up front that a short instruction will suffice. */
26831 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
26835 /* Estimate the size of a frag before relaxing. Assume everything fits in
26839 md_estimate_size_before_relax (fragS
* fragp
,
26840 segT segtype ATTRIBUTE_UNUSED
)
26846 /* Convert a machine dependent frag. */
26849 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
26851 unsigned long insn
;
26852 unsigned long old_op
;
26860 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
26862 old_op
= bfd_get_16(abfd
, buf
);
26863 if (fragp
->fr_symbol
)
26865 exp
.X_op
= O_symbol
;
26866 exp
.X_add_symbol
= fragp
->fr_symbol
;
26870 exp
.X_op
= O_constant
;
26872 exp
.X_add_number
= fragp
->fr_offset
;
26873 opcode
= fragp
->fr_subtype
;
26876 case T_MNEM_ldr_pc
:
26877 case T_MNEM_ldr_pc2
:
26878 case T_MNEM_ldr_sp
:
26879 case T_MNEM_str_sp
:
26886 if (fragp
->fr_var
== 4)
26888 insn
= THUMB_OP32 (opcode
);
26889 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
26891 insn
|= (old_op
& 0x700) << 4;
26895 insn
|= (old_op
& 7) << 12;
26896 insn
|= (old_op
& 0x38) << 13;
26898 insn
|= 0x00000c00;
26899 put_thumb32_insn (buf
, insn
);
26900 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
26904 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
26906 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
26909 /* Thumb bits should be set in the frag handling so we process them
26910 after all symbols have been seen. PR gas/25235. */
26911 if (exp
.X_op
== O_symbol
26912 && exp
.X_add_symbol
!= NULL
26913 && S_IS_DEFINED (exp
.X_add_symbol
)
26914 && THUMB_IS_FUNC (exp
.X_add_symbol
))
26915 exp
.X_add_number
|= 1;
26917 if (fragp
->fr_var
== 4)
26919 insn
= THUMB_OP32 (opcode
);
26920 insn
|= (old_op
& 0xf0) << 4;
26921 put_thumb32_insn (buf
, insn
);
26922 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
26926 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
26927 exp
.X_add_number
-= 4;
26935 if (fragp
->fr_var
== 4)
26937 int r0off
= (opcode
== T_MNEM_mov
26938 || opcode
== T_MNEM_movs
) ? 0 : 8;
26939 insn
= THUMB_OP32 (opcode
);
26940 insn
= (insn
& 0xe1ffffff) | 0x10000000;
26941 insn
|= (old_op
& 0x700) << r0off
;
26942 put_thumb32_insn (buf
, insn
);
26943 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
26947 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
26952 if (fragp
->fr_var
== 4)
26954 insn
= THUMB_OP32(opcode
);
26955 put_thumb32_insn (buf
, insn
);
26956 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
26959 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
26963 if (fragp
->fr_var
== 4)
26965 insn
= THUMB_OP32(opcode
);
26966 insn
|= (old_op
& 0xf00) << 14;
26967 put_thumb32_insn (buf
, insn
);
26968 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
26971 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
26974 case T_MNEM_add_sp
:
26975 case T_MNEM_add_pc
:
26976 case T_MNEM_inc_sp
:
26977 case T_MNEM_dec_sp
:
26978 if (fragp
->fr_var
== 4)
26980 /* ??? Choose between add and addw. */
26981 insn
= THUMB_OP32 (opcode
);
26982 insn
|= (old_op
& 0xf0) << 4;
26983 put_thumb32_insn (buf
, insn
);
26984 if (opcode
== T_MNEM_add_pc
)
26985 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
26987 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
26990 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
26998 if (fragp
->fr_var
== 4)
27000 insn
= THUMB_OP32 (opcode
);
27001 insn
|= (old_op
& 0xf0) << 4;
27002 insn
|= (old_op
& 0xf) << 16;
27003 put_thumb32_insn (buf
, insn
);
27004 if (insn
& (1 << 20))
27005 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
27007 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
27010 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
27016 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
27017 (enum bfd_reloc_code_real
) reloc_type
);
27018 fixp
->fx_file
= fragp
->fr_file
;
27019 fixp
->fx_line
= fragp
->fr_line
;
27020 fragp
->fr_fix
+= fragp
->fr_var
;
27022 /* Set whether we use thumb-2 ISA based on final relaxation results. */
27023 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
27024 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
27025 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
27028 /* Return the size of a relaxable immediate operand instruction.
27029 SHIFT and SIZE specify the form of the allowable immediate. */
27031 relax_immediate (fragS
*fragp
, int size
, int shift
)
27037 /* ??? Should be able to do better than this. */
27038 if (fragp
->fr_symbol
)
27041 low
= (1 << shift
) - 1;
27042 mask
= (1 << (shift
+ size
)) - (1 << shift
);
27043 offset
= fragp
->fr_offset
;
27044 /* Force misaligned offsets to 32-bit variant. */
27047 if (offset
& ~mask
)
27052 /* Get the address of a symbol during relaxation. */
27054 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
27060 sym
= fragp
->fr_symbol
;
27061 sym_frag
= symbol_get_frag (sym
);
27062 know (S_GET_SEGMENT (sym
) != absolute_section
27063 || sym_frag
== &zero_address_frag
);
27064 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
27066 /* If frag has yet to be reached on this pass, assume it will
27067 move by STRETCH just as we did. If this is not so, it will
27068 be because some frag between grows, and that will force
27072 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
27076 /* Adjust stretch for any alignment frag. Note that if have
27077 been expanding the earlier code, the symbol may be
27078 defined in what appears to be an earlier frag. FIXME:
27079 This doesn't handle the fr_subtype field, which specifies
27080 a maximum number of bytes to skip when doing an
27082 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
27084 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
27087 stretch
= - ((- stretch
)
27088 & ~ ((1 << (int) f
->fr_offset
) - 1));
27090 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
27102 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
27105 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
27110 /* Assume worst case for symbols not known to be in the same section. */
27111 if (fragp
->fr_symbol
== NULL
27112 || !S_IS_DEFINED (fragp
->fr_symbol
)
27113 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
27114 || S_IS_WEAK (fragp
->fr_symbol
)
27115 || THUMB_IS_FUNC (fragp
->fr_symbol
))
27118 val
= relaxed_symbol_addr (fragp
, stretch
);
27119 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
27120 addr
= (addr
+ 4) & ~3;
27121 /* Force misaligned targets to 32-bit variant. */
27125 if (val
< 0 || val
> 1020)
27130 /* Return the size of a relaxable add/sub immediate instruction. */
27132 relax_addsub (fragS
*fragp
, asection
*sec
)
27137 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
27138 op
= bfd_get_16(sec
->owner
, buf
);
27139 if ((op
& 0xf) == ((op
>> 4) & 0xf))
27140 return relax_immediate (fragp
, 8, 0);
27142 return relax_immediate (fragp
, 3, 0);
27145 /* Return TRUE iff the definition of symbol S could be pre-empted
27146 (overridden) at link or load time. */
27148 symbol_preemptible (symbolS
*s
)
27150 /* Weak symbols can always be pre-empted. */
27154 /* Non-global symbols cannot be pre-empted. */
27155 if (! S_IS_EXTERNAL (s
))
27159 /* In ELF, a global symbol can be marked protected, or private. In that
27160 case it can't be pre-empted (other definitions in the same link unit
27161 would violate the ODR). */
27162 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
27166 /* Other global symbols might be pre-empted. */
27170 /* Return the size of a relaxable branch instruction. BITS is the
27171 size of the offset field in the narrow instruction. */
27174 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
27180 /* Assume worst case for symbols not known to be in the same section. */
27181 if (!S_IS_DEFINED (fragp
->fr_symbol
)
27182 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
27183 || S_IS_WEAK (fragp
->fr_symbol
))
27187 /* A branch to a function in ARM state will require interworking. */
27188 if (S_IS_DEFINED (fragp
->fr_symbol
)
27189 && ARM_IS_FUNC (fragp
->fr_symbol
))
27193 if (symbol_preemptible (fragp
->fr_symbol
))
27196 val
= relaxed_symbol_addr (fragp
, stretch
);
27197 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
27200 /* Offset is a signed value *2 */
27202 if (val
>= limit
|| val
< -limit
)
27208 /* Relax a machine dependent frag. This returns the amount by which
27209 the current size of the frag should change. */
27212 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
27217 oldsize
= fragp
->fr_var
;
27218 switch (fragp
->fr_subtype
)
27220 case T_MNEM_ldr_pc2
:
27221 newsize
= relax_adr (fragp
, sec
, stretch
);
27223 case T_MNEM_ldr_pc
:
27224 case T_MNEM_ldr_sp
:
27225 case T_MNEM_str_sp
:
27226 newsize
= relax_immediate (fragp
, 8, 2);
27230 newsize
= relax_immediate (fragp
, 5, 2);
27234 newsize
= relax_immediate (fragp
, 5, 1);
27238 newsize
= relax_immediate (fragp
, 5, 0);
27241 newsize
= relax_adr (fragp
, sec
, stretch
);
27247 newsize
= relax_immediate (fragp
, 8, 0);
27250 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
27253 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
27255 case T_MNEM_add_sp
:
27256 case T_MNEM_add_pc
:
27257 newsize
= relax_immediate (fragp
, 8, 2);
27259 case T_MNEM_inc_sp
:
27260 case T_MNEM_dec_sp
:
27261 newsize
= relax_immediate (fragp
, 7, 2);
27267 newsize
= relax_addsub (fragp
, sec
);
27273 fragp
->fr_var
= newsize
;
27274 /* Freeze wide instructions that are at or before the same location as
27275 in the previous pass. This avoids infinite loops.
27276 Don't freeze them unconditionally because targets may be artificially
27277 misaligned by the expansion of preceding frags. */
27278 if (stretch
<= 0 && newsize
> 2)
27280 md_convert_frag (sec
->owner
, sec
, fragp
);
27284 return newsize
- oldsize
;
27287 /* Round up a section size to the appropriate boundary. */
27290 md_section_align (segT segment ATTRIBUTE_UNUSED
,
27296 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
27297 of an rs_align_code fragment. */
27300 arm_handle_align (fragS
* fragP
)
27302 static unsigned char const arm_noop
[2][2][4] =
27305 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
27306 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
27309 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
27310 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
27313 static unsigned char const thumb_noop
[2][2][2] =
27316 {0xc0, 0x46}, /* LE */
27317 {0x46, 0xc0}, /* BE */
27320 {0x00, 0xbf}, /* LE */
27321 {0xbf, 0x00} /* BE */
27324 static unsigned char const wide_thumb_noop
[2][4] =
27325 { /* Wide Thumb-2 */
27326 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
27327 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
27330 unsigned bytes
, fix
, noop_size
;
27332 const unsigned char * noop
;
27333 const unsigned char *narrow_noop
= NULL
;
27338 if (fragP
->fr_type
!= rs_align_code
)
27341 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
27342 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
27345 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
27346 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
27348 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
27350 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
27352 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
27353 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
27355 narrow_noop
= thumb_noop
[1][target_big_endian
];
27356 noop
= wide_thumb_noop
[target_big_endian
];
27359 noop
= thumb_noop
[0][target_big_endian
];
27367 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
27368 ? selected_cpu
: arm_arch_none
,
27370 [target_big_endian
];
27377 fragP
->fr_var
= noop_size
;
27379 if (bytes
& (noop_size
- 1))
27381 fix
= bytes
& (noop_size
- 1);
27383 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
27385 memset (p
, 0, fix
);
27392 if (bytes
& noop_size
)
27394 /* Insert a narrow noop. */
27395 memcpy (p
, narrow_noop
, noop_size
);
27397 bytes
-= noop_size
;
27401 /* Use wide noops for the remainder */
27405 while (bytes
>= noop_size
)
27407 memcpy (p
, noop
, noop_size
);
27409 bytes
-= noop_size
;
27413 fragP
->fr_fix
+= fix
;
27416 /* Called from md_do_align. Used to create an alignment
27417 frag in a code section. */
27420 arm_frag_align_code (int n
, int max
)
27424 /* We assume that there will never be a requirement
27425 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
27426 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
27431 _("alignments greater than %d bytes not supported in .text sections."),
27432 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
27433 as_fatal ("%s", err_msg
);
27436 p
= frag_var (rs_align_code
,
27437 MAX_MEM_FOR_RS_ALIGN_CODE
,
27439 (relax_substateT
) max
,
27446 /* Perform target specific initialisation of a frag.
27447 Note - despite the name this initialisation is not done when the frag
27448 is created, but only when its type is assigned. A frag can be created
27449 and used a long time before its type is set, so beware of assuming that
27450 this initialisation is performed first. */
27454 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
27456 /* Record whether this frag is in an ARM or a THUMB area. */
27457 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
27460 #else /* OBJ_ELF is defined. */
27462 arm_init_frag (fragS
* fragP
, int max_chars
)
27464 bool frag_thumb_mode
;
27466 /* If the current ARM vs THUMB mode has not already
27467 been recorded into this frag then do so now. */
27468 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
27469 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
27471 /* PR 21809: Do not set a mapping state for debug sections
27472 - it just confuses other tools. */
27473 if (bfd_section_flags (now_seg
) & SEC_DEBUGGING
)
27476 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
27478 /* Record a mapping symbol for alignment frags. We will delete this
27479 later if the alignment ends up empty. */
27480 switch (fragP
->fr_type
)
27483 case rs_align_test
:
27485 mapping_state_2 (MAP_DATA
, max_chars
);
27487 case rs_align_code
:
27488 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
27495 /* When we change sections we need to issue a new mapping symbol. */
27498 arm_elf_change_section (void)
27500 /* Link an unlinked unwind index table section to the .text section. */
27501 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
27502 && elf_linked_to_section (now_seg
) == NULL
)
27503 elf_linked_to_section (now_seg
) = text_section
;
27507 arm_elf_section_type (const char * str
, size_t len
)
27509 if (len
== 5 && startswith (str
, "exidx"))
27510 return SHT_ARM_EXIDX
;
27515 /* Code to deal with unwinding tables. */
27517 static void add_unwind_adjustsp (offsetT
);
27519 /* Generate any deferred unwind frame offset. */
27522 flush_pending_unwind (void)
27526 offset
= unwind
.pending_offset
;
27527 unwind
.pending_offset
= 0;
27529 add_unwind_adjustsp (offset
);
27532 /* Add an opcode to this list for this function. Two-byte opcodes should
27533 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
27537 add_unwind_opcode (valueT op
, int length
)
27539 /* Add any deferred stack adjustment. */
27540 if (unwind
.pending_offset
)
27541 flush_pending_unwind ();
27543 unwind
.sp_restored
= 0;
27545 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
27547 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
27548 if (unwind
.opcodes
)
27549 unwind
.opcodes
= XRESIZEVEC (unsigned char, unwind
.opcodes
,
27550 unwind
.opcode_alloc
);
27552 unwind
.opcodes
= XNEWVEC (unsigned char, unwind
.opcode_alloc
);
27557 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
27559 unwind
.opcode_count
++;
27563 /* Add unwind opcodes to adjust the stack pointer. */
27566 add_unwind_adjustsp (offsetT offset
)
27570 if (offset
> 0x200)
27572 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
27577 /* Long form: 0xb2, uleb128. */
27578 /* This might not fit in a word so add the individual bytes,
27579 remembering the list is built in reverse order. */
27580 o
= (valueT
) ((offset
- 0x204) >> 2);
27582 add_unwind_opcode (0, 1);
27584 /* Calculate the uleb128 encoding of the offset. */
27588 bytes
[n
] = o
& 0x7f;
27594 /* Add the insn. */
27596 add_unwind_opcode (bytes
[n
- 1], 1);
27597 add_unwind_opcode (0xb2, 1);
27599 else if (offset
> 0x100)
27601 /* Two short opcodes. */
27602 add_unwind_opcode (0x3f, 1);
27603 op
= (offset
- 0x104) >> 2;
27604 add_unwind_opcode (op
, 1);
27606 else if (offset
> 0)
27608 /* Short opcode. */
27609 op
= (offset
- 4) >> 2;
27610 add_unwind_opcode (op
, 1);
27612 else if (offset
< 0)
27615 while (offset
> 0x100)
27617 add_unwind_opcode (0x7f, 1);
27620 op
= ((offset
- 4) >> 2) | 0x40;
27621 add_unwind_opcode (op
, 1);
27625 /* Finish the list of unwind opcodes for this function. */
27628 finish_unwind_opcodes (void)
27632 if (unwind
.fp_used
)
27634 /* Adjust sp as necessary. */
27635 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
27636 flush_pending_unwind ();
27638 /* After restoring sp from the frame pointer. */
27639 op
= 0x90 | unwind
.fp_reg
;
27640 add_unwind_opcode (op
, 1);
27643 flush_pending_unwind ();
27647 /* Start an exception table entry. If idx is nonzero this is an index table
27651 start_unwind_section (const segT text_seg
, int idx
)
27653 const char * text_name
;
27654 const char * prefix
;
27655 const char * prefix_once
;
27656 struct elf_section_match match
;
27664 prefix
= ELF_STRING_ARM_unwind
;
27665 prefix_once
= ELF_STRING_ARM_unwind_once
;
27666 type
= SHT_ARM_EXIDX
;
27670 prefix
= ELF_STRING_ARM_unwind_info
;
27671 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
27672 type
= SHT_PROGBITS
;
27675 text_name
= segment_name (text_seg
);
27676 if (streq (text_name
, ".text"))
27679 if (startswith (text_name
, ".gnu.linkonce.t."))
27681 prefix
= prefix_once
;
27682 text_name
+= strlen (".gnu.linkonce.t.");
27685 sec_name
= concat (prefix
, text_name
, (char *) NULL
);
27689 memset (&match
, 0, sizeof (match
));
27691 /* Handle COMDAT group. */
27692 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
27694 match
.group_name
= elf_group_name (text_seg
);
27695 if (match
.group_name
== NULL
)
27697 as_bad (_("Group section `%s' has no group signature"),
27698 segment_name (text_seg
));
27699 ignore_rest_of_line ();
27702 flags
|= SHF_GROUP
;
27706 obj_elf_change_section (sec_name
, type
, flags
, 0, &match
,
27709 /* Set the section link for index tables. */
27711 elf_linked_to_section (now_seg
) = text_seg
;
27715 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
27716 personality routine data. Returns zero, or the index table value for
27717 an inline entry. */
27720 create_unwind_entry (int have_data
)
27725 /* The current word of data. */
27727 /* The number of bytes left in this word. */
27730 finish_unwind_opcodes ();
27732 /* Remember the current text section. */
27733 unwind
.saved_seg
= now_seg
;
27734 unwind
.saved_subseg
= now_subseg
;
27736 start_unwind_section (now_seg
, 0);
27738 if (unwind
.personality_routine
== NULL
)
27740 if (unwind
.personality_index
== -2)
27743 as_bad (_("handlerdata in cantunwind frame"));
27744 return 1; /* EXIDX_CANTUNWIND. */
27747 /* Use a default personality routine if none is specified. */
27748 if (unwind
.personality_index
== -1)
27750 if (unwind
.opcode_count
> 3)
27751 unwind
.personality_index
= 1;
27753 unwind
.personality_index
= 0;
27756 /* Space for the personality routine entry. */
27757 if (unwind
.personality_index
== 0)
27759 if (unwind
.opcode_count
> 3)
27760 as_bad (_("too many unwind opcodes for personality routine 0"));
27764 /* All the data is inline in the index table. */
27767 while (unwind
.opcode_count
> 0)
27769 unwind
.opcode_count
--;
27770 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
27774 /* Pad with "finish" opcodes. */
27776 data
= (data
<< 8) | 0xb0;
27783 /* We get two opcodes "free" in the first word. */
27784 size
= unwind
.opcode_count
- 2;
27788 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
27789 if (unwind
.personality_index
!= -1)
27791 as_bad (_("attempt to recreate an unwind entry"));
27795 /* An extra byte is required for the opcode count. */
27796 size
= unwind
.opcode_count
+ 1;
27799 size
= (size
+ 3) >> 2;
27801 as_bad (_("too many unwind opcodes"));
27803 frag_align (2, 0, 0);
27804 record_alignment (now_seg
, 2);
27805 unwind
.table_entry
= expr_build_dot ();
27807 /* Allocate the table entry. */
27808 ptr
= frag_more ((size
<< 2) + 4);
27809 /* PR 13449: Zero the table entries in case some of them are not used. */
27810 memset (ptr
, 0, (size
<< 2) + 4);
27811 where
= frag_now_fix () - ((size
<< 2) + 4);
27813 switch (unwind
.personality_index
)
27816 /* ??? Should this be a PLT generating relocation? */
27817 /* Custom personality routine. */
27818 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
27819 BFD_RELOC_ARM_PREL31
);
27824 /* Set the first byte to the number of additional words. */
27825 data
= size
> 0 ? size
- 1 : 0;
27829 /* ABI defined personality routines. */
27831 /* Three opcodes bytes are packed into the first word. */
27838 /* The size and first two opcode bytes go in the first word. */
27839 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
27844 /* Should never happen. */
27848 /* Pack the opcodes into words (MSB first), reversing the list at the same
27850 while (unwind
.opcode_count
> 0)
27854 md_number_to_chars (ptr
, data
, 4);
27859 unwind
.opcode_count
--;
27861 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
27864 /* Finish off the last word. */
27867 /* Pad with "finish" opcodes. */
27869 data
= (data
<< 8) | 0xb0;
27871 md_number_to_chars (ptr
, data
, 4);
27876 /* Add an empty descriptor if there is no user-specified data. */
27877 ptr
= frag_more (4);
27878 md_number_to_chars (ptr
, 0, 4);
27885 /* Initialize the DWARF-2 unwind information for this procedure. */
27888 tc_arm_frame_initial_instructions (void)
27890 cfi_add_CFA_def_cfa (REG_SP
, 0);
27892 #endif /* OBJ_ELF */
27894 /* Convert REGNAME to a DWARF-2 register number. */
27897 tc_arm_regname_to_dw2regnum (char *regname
)
27899 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
27903 /* PR 16694: Allow VFP registers as well. */
27904 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
27908 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
27917 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
27921 exp
.X_op
= O_secrel
;
27922 exp
.X_add_symbol
= symbol
;
27923 exp
.X_add_number
= 0;
27924 emit_expr (&exp
, size
);
27928 /* MD interface: Symbol and relocation handling. */
27930 /* Return the address within the segment that a PC-relative fixup is
27931 relative to. For ARM, PC-relative fixups applied to instructions
27932 are generally relative to the location of the fixup plus 8 bytes.
27933 Thumb branches are offset by 4, and Thumb loads relative to PC
27934 require special handling. */
27937 md_pcrel_from_section (fixS
* fixP
, segT seg
)
27939 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
27941 /* If this is pc-relative and we are going to emit a relocation
27942 then we just want to put out any pipeline compensation that the linker
27943 will need. Otherwise we want to use the calculated base.
27944 For WinCE we skip the bias for externals as well, since this
27945 is how the MS ARM-CE assembler behaves and we want to be compatible. */
27947 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
27948 || (arm_force_relocation (fixP
)
27950 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
27956 switch (fixP
->fx_r_type
)
27958 /* PC relative addressing on the Thumb is slightly odd as the
27959 bottom two bits of the PC are forced to zero for the
27960 calculation. This happens *after* application of the
27961 pipeline offset. However, Thumb adrl already adjusts for
27962 this, so we need not do it again. */
27963 case BFD_RELOC_ARM_THUMB_ADD
:
27966 case BFD_RELOC_ARM_THUMB_OFFSET
:
27967 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
27968 case BFD_RELOC_ARM_T32_ADD_PC12
:
27969 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
27970 return (base
+ 4) & ~3;
27972 /* Thumb branches are simply offset by +4. */
27973 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
27974 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
27975 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
27976 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
27977 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
27978 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
27979 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
27980 case BFD_RELOC_ARM_THUMB_BF17
:
27981 case BFD_RELOC_ARM_THUMB_BF19
:
27982 case BFD_RELOC_ARM_THUMB_BF13
:
27983 case BFD_RELOC_ARM_THUMB_LOOP12
:
27986 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
27988 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
27989 && !S_FORCE_RELOC (fixP
->fx_addsy
, true)
27990 && ARM_IS_FUNC (fixP
->fx_addsy
)
27991 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
27992 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
27995 /* BLX is like branches above, but forces the low two bits of PC to
27997 case BFD_RELOC_THUMB_PCREL_BLX
:
27999 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
28000 && !S_FORCE_RELOC (fixP
->fx_addsy
, true)
28001 && THUMB_IS_FUNC (fixP
->fx_addsy
)
28002 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
28003 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
28004 return (base
+ 4) & ~3;
28006 /* ARM mode branches are offset by +8. However, the Windows CE
28007 loader expects the relocation not to take this into account. */
28008 case BFD_RELOC_ARM_PCREL_BLX
:
28010 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
28011 && !S_FORCE_RELOC (fixP
->fx_addsy
, true)
28012 && ARM_IS_FUNC (fixP
->fx_addsy
)
28013 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
28014 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
28017 case BFD_RELOC_ARM_PCREL_CALL
:
28019 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
28020 && !S_FORCE_RELOC (fixP
->fx_addsy
, true)
28021 && THUMB_IS_FUNC (fixP
->fx_addsy
)
28022 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
28023 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
28026 case BFD_RELOC_ARM_PCREL_BRANCH
:
28027 case BFD_RELOC_ARM_PCREL_JUMP
:
28028 case BFD_RELOC_ARM_PLT32
:
28030 /* When handling fixups immediately, because we have already
28031 discovered the value of a symbol, or the address of the frag involved
28032 we must account for the offset by +8, as the OS loader will never see the reloc.
28033 see fixup_segment() in write.c
28034 The S_IS_EXTERNAL test handles the case of global symbols.
28035 Those need the calculated base, not just the pipe compensation the linker will need. */
28037 && fixP
->fx_addsy
!= NULL
28038 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
28039 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
28047 /* ARM mode loads relative to PC are also offset by +8. Unlike
28048 branches, the Windows CE loader *does* expect the relocation
28049 to take this into account. */
28050 case BFD_RELOC_ARM_OFFSET_IMM
:
28051 case BFD_RELOC_ARM_OFFSET_IMM8
:
28052 case BFD_RELOC_ARM_HWLITERAL
:
28053 case BFD_RELOC_ARM_LITERAL
:
28054 case BFD_RELOC_ARM_CP_OFF_IMM
:
28058 /* Other PC-relative relocations are un-offset. */
28064 static bool flag_warn_syms
= true;
28067 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
28069 /* PR 18347 - Warn if the user attempts to create a symbol with the same
28070 name as an ARM instruction. Whilst strictly speaking it is allowed, it
28071 does mean that the resulting code might be very confusing to the reader.
28072 Also this warning can be triggered if the user omits an operand before
28073 an immediate address, eg:
28077 GAS treats this as an assignment of the value of the symbol foo to a
28078 symbol LDR, and so (without this code) it will not issue any kind of
28079 warning or error message.
28081 Note - ARM instructions are case-insensitive but the strings in the hash
28082 table are all stored in lower case, so we must first ensure that name is
28084 if (flag_warn_syms
&& arm_ops_hsh
)
28086 char * nbuf
= strdup (name
);
28089 for (p
= nbuf
; *p
; p
++)
28091 if (str_hash_find (arm_ops_hsh
, nbuf
) != NULL
)
28093 static htab_t already_warned
= NULL
;
28095 if (already_warned
== NULL
)
28096 already_warned
= str_htab_create ();
28097 /* Only warn about the symbol once. To keep the code
28098 simple we let str_hash_insert do the lookup for us. */
28099 if (str_hash_find (already_warned
, nbuf
) == NULL
)
28101 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
28102 str_hash_insert (already_warned
, nbuf
, NULL
, 0);
28112 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
28113 Otherwise we have no need to default values of symbols. */
28116 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
28119 if (name
[0] == '_' && name
[1] == 'G'
28120 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
28124 if (symbol_find (name
))
28125 as_bad (_("GOT already in the symbol table"));
28127 GOT_symbol
= symbol_new (name
, undefined_section
,
28128 &zero_address_frag
, 0);
28138 /* Subroutine of md_apply_fix. Check to see if an immediate can be
28139 computed as two separate immediate values, added together. We
28140 already know that this value cannot be computed by just one ARM
28143 static unsigned int
28144 validate_immediate_twopart (unsigned int val
,
28145 unsigned int * highpart
)
28150 for (i
= 0; i
< 32; i
+= 2)
28151 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
28157 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
28159 else if (a
& 0xff0000)
28161 if (a
& 0xff000000)
28163 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
28167 gas_assert (a
& 0xff000000);
28168 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
28171 return (a
& 0xff) | (i
<< 7);
28178 validate_offset_imm (unsigned int val
, int hwse
)
28180 if ((hwse
&& val
> 255) || val
> 4095)
28185 /* Subroutine of md_apply_fix. Do those data_ops which can take a
28186 negative immediate constant by altering the instruction. A bit of
28191 by inverting the second operand, and
28194 by negating the second operand. */
28197 negate_data_op (unsigned long * instruction
,
28198 unsigned long value
)
28201 unsigned long negated
, inverted
;
28203 negated
= encode_arm_immediate (-value
);
28204 inverted
= encode_arm_immediate (~value
);
28206 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
28209 /* First negates. */
28210 case OPCODE_SUB
: /* ADD <-> SUB */
28211 new_inst
= OPCODE_ADD
;
28216 new_inst
= OPCODE_SUB
;
28220 case OPCODE_CMP
: /* CMP <-> CMN */
28221 new_inst
= OPCODE_CMN
;
28226 new_inst
= OPCODE_CMP
;
28230 /* Now Inverted ops. */
28231 case OPCODE_MOV
: /* MOV <-> MVN */
28232 new_inst
= OPCODE_MVN
;
28237 new_inst
= OPCODE_MOV
;
28241 case OPCODE_AND
: /* AND <-> BIC */
28242 new_inst
= OPCODE_BIC
;
28247 new_inst
= OPCODE_AND
;
28251 case OPCODE_ADC
: /* ADC <-> SBC */
28252 new_inst
= OPCODE_SBC
;
28257 new_inst
= OPCODE_ADC
;
28261 /* We cannot do anything. */
28266 if (value
== (unsigned) FAIL
)
28269 *instruction
&= OPCODE_MASK
;
28270 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
28274 /* Like negate_data_op, but for Thumb-2. */
28276 static unsigned int
28277 thumb32_negate_data_op (valueT
*instruction
, unsigned int value
)
28279 unsigned int op
, new_inst
;
28281 unsigned int negated
, inverted
;
28283 negated
= encode_thumb32_immediate (-value
);
28284 inverted
= encode_thumb32_immediate (~value
);
28286 rd
= (*instruction
>> 8) & 0xf;
28287 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
28290 /* ADD <-> SUB. Includes CMP <-> CMN. */
28291 case T2_OPCODE_SUB
:
28292 new_inst
= T2_OPCODE_ADD
;
28296 case T2_OPCODE_ADD
:
28297 new_inst
= T2_OPCODE_SUB
;
28301 /* ORR <-> ORN. Includes MOV <-> MVN. */
28302 case T2_OPCODE_ORR
:
28303 new_inst
= T2_OPCODE_ORN
;
28307 case T2_OPCODE_ORN
:
28308 new_inst
= T2_OPCODE_ORR
;
28312 /* AND <-> BIC. TST has no inverted equivalent. */
28313 case T2_OPCODE_AND
:
28314 new_inst
= T2_OPCODE_BIC
;
28321 case T2_OPCODE_BIC
:
28322 new_inst
= T2_OPCODE_AND
;
28327 case T2_OPCODE_ADC
:
28328 new_inst
= T2_OPCODE_SBC
;
28332 case T2_OPCODE_SBC
:
28333 new_inst
= T2_OPCODE_ADC
;
28337 /* We cannot do anything. */
28342 if (value
== (unsigned int)FAIL
)
28345 *instruction
&= T2_OPCODE_MASK
;
28346 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
28350 /* Read a 32-bit thumb instruction from buf. */
28352 static unsigned long
28353 get_thumb32_insn (char * buf
)
28355 unsigned long insn
;
28356 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
28357 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
28362 /* We usually want to set the low bit on the address of thumb function
28363 symbols. In particular .word foo - . should have the low bit set.
28364 Generic code tries to fold the difference of two symbols to
28365 a constant. Prevent this and force a relocation when the first symbols
28366 is a thumb function. */
28369 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
28371 if (op
== O_subtract
28372 && l
->X_op
== O_symbol
28373 && r
->X_op
== O_symbol
28374 && THUMB_IS_FUNC (l
->X_add_symbol
))
28376 l
->X_op
= O_subtract
;
28377 l
->X_op_symbol
= r
->X_add_symbol
;
28378 l
->X_add_number
-= r
->X_add_number
;
28382 /* Process as normal. */
28386 /* Encode Thumb2 unconditional branches and calls. The encoding
28387 for the 2 are identical for the immediate values. */
28390 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
28392 #define T2I1I2MASK ((1 << 13) | (1 << 11))
28395 addressT S
, I1
, I2
, lo
, hi
;
28397 S
= (value
>> 24) & 0x01;
28398 I1
= (value
>> 23) & 0x01;
28399 I2
= (value
>> 22) & 0x01;
28400 hi
= (value
>> 12) & 0x3ff;
28401 lo
= (value
>> 1) & 0x7ff;
28402 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
28403 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
28404 newval
|= (S
<< 10) | hi
;
28405 newval2
&= ~T2I1I2MASK
;
28406 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
28407 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
28408 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
28412 md_apply_fix (fixS
* fixP
,
28416 valueT value
= * valP
;
28418 unsigned int newimm
;
28419 unsigned long temp
;
28421 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
28423 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
28425 /* Note whether this will delete the relocation. */
28427 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
28430 /* On a 64-bit host, silently truncate 'value' to 32 bits for
28431 consistency with the behaviour on 32-bit hosts. Remember value
28433 value
&= 0xffffffff;
28434 value
^= 0x80000000;
28435 value
-= 0x80000000;
28438 fixP
->fx_addnumber
= value
;
28440 /* Same treatment for fixP->fx_offset. */
28441 fixP
->fx_offset
&= 0xffffffff;
28442 fixP
->fx_offset
^= 0x80000000;
28443 fixP
->fx_offset
-= 0x80000000;
28445 switch (fixP
->fx_r_type
)
28447 case BFD_RELOC_NONE
:
28448 /* This will need to go in the object file. */
28452 case BFD_RELOC_ARM_IMMEDIATE
:
28453 /* We claim that this fixup has been processed here,
28454 even if in fact we generate an error because we do
28455 not have a reloc for it, so tc_gen_reloc will reject it. */
28458 if (fixP
->fx_addsy
)
28460 const char *msg
= 0;
28462 if (! S_IS_DEFINED (fixP
->fx_addsy
))
28463 msg
= _("undefined symbol %s used as an immediate value");
28464 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
28465 msg
= _("symbol %s is in a different section");
28466 else if (S_IS_WEAK (fixP
->fx_addsy
))
28467 msg
= _("symbol %s is weak and may be overridden later");
28471 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28472 msg
, S_GET_NAME (fixP
->fx_addsy
));
28477 temp
= md_chars_to_number (buf
, INSN_SIZE
);
28479 /* If the offset is negative, we should use encoding A2 for ADR. */
28480 if ((temp
& 0xfff0000) == 0x28f0000 && (offsetT
) value
< 0)
28481 newimm
= negate_data_op (&temp
, value
);
28484 newimm
= encode_arm_immediate (value
);
28486 /* If the instruction will fail, see if we can fix things up by
28487 changing the opcode. */
28488 if (newimm
== (unsigned int) FAIL
)
28489 newimm
= negate_data_op (&temp
, value
);
28490 /* MOV accepts both ARM modified immediate (A1 encoding) and
28491 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
28492 When disassembling, MOV is preferred when there is no encoding
28494 if (newimm
== (unsigned int) FAIL
28495 && ((temp
>> DATA_OP_SHIFT
) & 0xf) == OPCODE_MOV
28496 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
28497 && !((temp
>> SBIT_SHIFT
) & 0x1)
28498 && value
<= 0xffff)
28500 /* Clear bits[23:20] to change encoding from A1 to A2. */
28501 temp
&= 0xff0fffff;
28502 /* Encoding high 4bits imm. Code below will encode the remaining
28504 temp
|= (value
& 0x0000f000) << 4;
28505 newimm
= value
& 0x00000fff;
28509 if (newimm
== (unsigned int) FAIL
)
28511 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28512 _("invalid constant (%lx) after fixup"),
28513 (unsigned long) value
);
28517 newimm
|= (temp
& 0xfffff000);
28518 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
28521 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
28523 unsigned int highpart
= 0;
28524 unsigned int newinsn
= 0xe1a00000; /* nop. */
28526 if (fixP
->fx_addsy
)
28528 const char *msg
= 0;
28530 if (! S_IS_DEFINED (fixP
->fx_addsy
))
28531 msg
= _("undefined symbol %s used as an immediate value");
28532 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
28533 msg
= _("symbol %s is in a different section");
28534 else if (S_IS_WEAK (fixP
->fx_addsy
))
28535 msg
= _("symbol %s is weak and may be overridden later");
28539 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28540 msg
, S_GET_NAME (fixP
->fx_addsy
));
28545 newimm
= encode_arm_immediate (value
);
28546 temp
= md_chars_to_number (buf
, INSN_SIZE
);
28548 /* If the instruction will fail, see if we can fix things up by
28549 changing the opcode. */
28550 if (newimm
== (unsigned int) FAIL
28551 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
28553 /* No ? OK - try using two ADD instructions to generate
28555 newimm
= validate_immediate_twopart (value
, & highpart
);
28557 /* Yes - then make sure that the second instruction is
28559 if (newimm
!= (unsigned int) FAIL
)
28561 /* Still No ? Try using a negated value. */
28562 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
28563 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
28564 /* Otherwise - give up. */
28567 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28568 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
28573 /* Replace the first operand in the 2nd instruction (which
28574 is the PC) with the destination register. We have
28575 already added in the PC in the first instruction and we
28576 do not want to do it again. */
28577 newinsn
&= ~ 0xf0000;
28578 newinsn
|= ((newinsn
& 0x0f000) << 4);
28581 newimm
|= (temp
& 0xfffff000);
28582 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
28584 highpart
|= (newinsn
& 0xfffff000);
28585 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
28589 case BFD_RELOC_ARM_OFFSET_IMM
:
28590 if (!fixP
->fx_done
&& seg
->use_rela_p
)
28592 /* Fall through. */
28594 case BFD_RELOC_ARM_LITERAL
:
28595 sign
= (offsetT
) value
> 0;
28597 if ((offsetT
) value
< 0)
28600 if (validate_offset_imm (value
, 0) == FAIL
)
28602 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
28603 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28604 _("invalid literal constant: pool needs to be closer"));
28606 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28607 _("bad immediate value for offset (%ld)"),
28612 newval
= md_chars_to_number (buf
, INSN_SIZE
);
28614 newval
&= 0xfffff000;
28617 newval
&= 0xff7ff000;
28618 newval
|= value
| (sign
? INDEX_UP
: 0);
28620 md_number_to_chars (buf
, newval
, INSN_SIZE
);
28623 case BFD_RELOC_ARM_OFFSET_IMM8
:
28624 case BFD_RELOC_ARM_HWLITERAL
:
28625 sign
= (offsetT
) value
> 0;
28627 if ((offsetT
) value
< 0)
28630 if (validate_offset_imm (value
, 1) == FAIL
)
28632 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
28633 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28634 _("invalid literal constant: pool needs to be closer"));
28636 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28637 _("bad immediate value for 8-bit offset (%ld)"),
28642 newval
= md_chars_to_number (buf
, INSN_SIZE
);
28644 newval
&= 0xfffff0f0;
28647 newval
&= 0xff7ff0f0;
28648 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
28650 md_number_to_chars (buf
, newval
, INSN_SIZE
);
28653 case BFD_RELOC_ARM_T32_OFFSET_U8
:
28654 if (value
> 1020 || value
% 4 != 0)
28655 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28656 _("bad immediate value for offset (%ld)"), (long) value
);
28659 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
28661 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
28664 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
28665 /* This is a complicated relocation used for all varieties of Thumb32
28666 load/store instruction with immediate offset:
28668 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
28669 *4, optional writeback(W)
28670 (doubleword load/store)
28672 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
28673 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
28674 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
28675 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
28676 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
28678 Uppercase letters indicate bits that are already encoded at
28679 this point. Lowercase letters are our problem. For the
28680 second block of instructions, the secondary opcode nybble
28681 (bits 8..11) is present, and bit 23 is zero, even if this is
28682 a PC-relative operation. */
28683 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
28685 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
28687 if ((newval
& 0xf0000000) == 0xe0000000)
28689 /* Doubleword load/store: 8-bit offset, scaled by 4. */
28690 if ((offsetT
) value
>= 0)
28691 newval
|= (1 << 23);
28694 if (value
% 4 != 0)
28696 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28697 _("offset not a multiple of 4"));
28703 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28704 _("offset out of range"));
28709 else if ((newval
& 0x000f0000) == 0x000f0000)
28711 /* PC-relative, 12-bit offset. */
28712 if ((offsetT
) value
>= 0)
28713 newval
|= (1 << 23);
28718 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28719 _("offset out of range"));
28724 else if ((newval
& 0x00000100) == 0x00000100)
28726 /* Writeback: 8-bit, +/- offset. */
28727 if ((offsetT
) value
>= 0)
28728 newval
|= (1 << 9);
28733 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28734 _("offset out of range"));
28739 else if ((newval
& 0x00000f00) == 0x00000e00)
28741 /* T-instruction: positive 8-bit offset. */
28744 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28745 _("offset out of range"));
28753 /* Positive 12-bit or negative 8-bit offset. */
28754 unsigned int limit
;
28755 if ((offsetT
) value
>= 0)
28757 newval
|= (1 << 23);
28767 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28768 _("offset out of range"));
28775 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
28776 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
28779 case BFD_RELOC_ARM_SHIFT_IMM
:
28780 newval
= md_chars_to_number (buf
, INSN_SIZE
);
28783 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
28785 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28786 _("shift expression is too large"));
28791 /* Shifts of zero must be done as lsl. */
28793 else if (value
== 32)
28795 newval
&= 0xfffff07f;
28796 newval
|= (value
& 0x1f) << 7;
28797 md_number_to_chars (buf
, newval
, INSN_SIZE
);
28800 case BFD_RELOC_ARM_T32_IMMEDIATE
:
28801 case BFD_RELOC_ARM_T32_ADD_IMM
:
28802 case BFD_RELOC_ARM_T32_IMM12
:
28803 case BFD_RELOC_ARM_T32_ADD_PC12
:
28804 /* We claim that this fixup has been processed here,
28805 even if in fact we generate an error because we do
28806 not have a reloc for it, so tc_gen_reloc will reject it. */
28810 && ! S_IS_DEFINED (fixP
->fx_addsy
))
28812 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28813 _("undefined symbol %s used as an immediate value"),
28814 S_GET_NAME (fixP
->fx_addsy
));
28818 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
28820 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
28823 if ((fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
28824 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
28825 Thumb2 modified immediate encoding (T2). */
28826 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
28827 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
28829 newimm
= encode_thumb32_immediate (value
);
28830 if (newimm
== (unsigned int) FAIL
)
28831 newimm
= thumb32_negate_data_op (&newval
, value
);
28833 if (newimm
== (unsigned int) FAIL
)
28835 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
)
28837 /* Turn add/sum into addw/subw. */
28838 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
28839 newval
= (newval
& 0xfeffffff) | 0x02000000;
28840 /* No flat 12-bit imm encoding for addsw/subsw. */
28841 if ((newval
& 0x00100000) == 0)
28843 /* 12 bit immediate for addw/subw. */
28844 if ((offsetT
) value
< 0)
28847 newval
^= 0x00a00000;
28850 newimm
= (unsigned int) FAIL
;
28857 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
28858 UINT16 (T3 encoding), MOVW only accepts UINT16. When
28859 disassembling, MOV is preferred when there is no encoding
28861 if (((newval
>> T2_DATA_OP_SHIFT
) & 0xf) == T2_OPCODE_ORR
28862 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
28863 but with the Rn field [19:16] set to 1111. */
28864 && (((newval
>> 16) & 0xf) == 0xf)
28865 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
)
28866 && !((newval
>> T2_SBIT_SHIFT
) & 0x1)
28867 && value
<= 0xffff)
28869 /* Toggle bit[25] to change encoding from T2 to T3. */
28871 /* Clear bits[19:16]. */
28872 newval
&= 0xfff0ffff;
28873 /* Encoding high 4bits imm. Code below will encode the
28874 remaining low 12bits. */
28875 newval
|= (value
& 0x0000f000) << 4;
28876 newimm
= value
& 0x00000fff;
28881 if (newimm
== (unsigned int)FAIL
)
28883 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28884 _("invalid constant (%lx) after fixup"),
28885 (unsigned long) value
);
28889 newval
|= (newimm
& 0x800) << 15;
28890 newval
|= (newimm
& 0x700) << 4;
28891 newval
|= (newimm
& 0x0ff);
28893 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
28894 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
28897 case BFD_RELOC_ARM_SMC
:
28899 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28900 _("invalid smc expression"));
28902 newval
= md_chars_to_number (buf
, INSN_SIZE
);
28903 newval
|= (value
& 0xf);
28904 md_number_to_chars (buf
, newval
, INSN_SIZE
);
28907 case BFD_RELOC_ARM_HVC
:
28908 if (value
> 0xffff)
28909 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28910 _("invalid hvc expression"));
28911 newval
= md_chars_to_number (buf
, INSN_SIZE
);
28912 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
28913 md_number_to_chars (buf
, newval
, INSN_SIZE
);
28916 case BFD_RELOC_ARM_SWI
:
28917 if (fixP
->tc_fix_data
!= 0)
28920 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28921 _("invalid swi expression"));
28922 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
28924 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
28928 if (value
> 0x00ffffff)
28929 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28930 _("invalid swi expression"));
28931 newval
= md_chars_to_number (buf
, INSN_SIZE
);
28933 md_number_to_chars (buf
, newval
, INSN_SIZE
);
28937 case BFD_RELOC_ARM_MULTI
:
28938 if (value
> 0xffff)
28939 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
28940 _("invalid expression in load/store multiple"));
28941 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
28942 md_number_to_chars (buf
, newval
, INSN_SIZE
);
28946 case BFD_RELOC_ARM_PCREL_CALL
:
28948 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
28950 && !S_FORCE_RELOC (fixP
->fx_addsy
, true)
28951 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
28952 && THUMB_IS_FUNC (fixP
->fx_addsy
))
28953 /* Flip the bl to blx. This is a simple flip
28954 bit here because we generate PCREL_CALL for
28955 unconditional bls. */
28957 newval
= md_chars_to_number (buf
, INSN_SIZE
);
28958 newval
= newval
| 0x10000000;
28959 md_number_to_chars (buf
, newval
, INSN_SIZE
);
28965 goto arm_branch_common
;
28967 case BFD_RELOC_ARM_PCREL_JUMP
:
28968 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
28970 && !S_FORCE_RELOC (fixP
->fx_addsy
, true)
28971 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
28972 && THUMB_IS_FUNC (fixP
->fx_addsy
))
28974 /* This would map to a bl<cond>, b<cond>,
28975 b<always> to a Thumb function. We
28976 need to force a relocation for this particular
28978 newval
= md_chars_to_number (buf
, INSN_SIZE
);
28981 /* Fall through. */
28983 case BFD_RELOC_ARM_PLT32
:
28985 case BFD_RELOC_ARM_PCREL_BRANCH
:
28987 goto arm_branch_common
;
28989 case BFD_RELOC_ARM_PCREL_BLX
:
28992 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
28994 && !S_FORCE_RELOC (fixP
->fx_addsy
, true)
28995 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
28996 && ARM_IS_FUNC (fixP
->fx_addsy
))
28998 /* Flip the blx to a bl and warn. */
28999 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
29000 newval
= 0xeb000000;
29001 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
29002 _("blx to '%s' an ARM ISA state function changed to bl"),
29004 md_number_to_chars (buf
, newval
, INSN_SIZE
);
29010 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
29011 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
29015 /* We are going to store value (shifted right by two) in the
29016 instruction, in a 24 bit, signed field. Bits 26 through 32 either
29017 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
29020 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29021 _("misaligned branch destination"));
29022 if ((value
& 0xfe000000) != 0
29023 && (value
& 0xfe000000) != 0xfe000000)
29024 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
29026 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29028 newval
= md_chars_to_number (buf
, INSN_SIZE
);
29029 newval
|= (value
>> 2) & 0x00ffffff;
29030 /* Set the H bit on BLX instructions. */
29034 newval
|= 0x01000000;
29036 newval
&= ~0x01000000;
29038 md_number_to_chars (buf
, newval
, INSN_SIZE
);
29042 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
29043 /* CBZ can only branch forward. */
29045 /* Attempts to use CBZ to branch to the next instruction
29046 (which, strictly speaking, are prohibited) will be turned into
29049 FIXME: It may be better to remove the instruction completely and
29050 perform relaxation. */
29051 if ((offsetT
) value
== -2)
29053 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
29054 newval
= 0xbf00; /* NOP encoding T1 */
29055 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
29060 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
29062 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29064 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
29065 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
29066 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
29071 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
29072 if (out_of_range_p (value
, 8))
29073 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
29075 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29077 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
29078 newval
|= (value
& 0x1ff) >> 1;
29079 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
29083 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
29084 if (out_of_range_p (value
, 11))
29085 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
29087 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29089 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
29090 newval
|= (value
& 0xfff) >> 1;
29091 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
29095 /* This relocation is misnamed, it should be BRANCH21. */
29096 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
29098 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
29099 && !S_FORCE_RELOC (fixP
->fx_addsy
, true)
29100 && ARM_IS_FUNC (fixP
->fx_addsy
)
29101 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
29103 /* Force a relocation for a branch 20 bits wide. */
29106 if (out_of_range_p (value
, 20))
29107 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29108 _("conditional branch out of range"));
29110 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29113 addressT S
, J1
, J2
, lo
, hi
;
29115 S
= (value
& 0x00100000) >> 20;
29116 J2
= (value
& 0x00080000) >> 19;
29117 J1
= (value
& 0x00040000) >> 18;
29118 hi
= (value
& 0x0003f000) >> 12;
29119 lo
= (value
& 0x00000ffe) >> 1;
29121 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
29122 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
29123 newval
|= (S
<< 10) | hi
;
29124 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
29125 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
29126 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
29130 case BFD_RELOC_THUMB_PCREL_BLX
:
29131 /* If there is a blx from a thumb state function to
29132 another thumb function flip this to a bl and warn
29136 && !S_FORCE_RELOC (fixP
->fx_addsy
, true)
29137 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
29138 && THUMB_IS_FUNC (fixP
->fx_addsy
))
29140 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
29141 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
29142 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
29144 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
29145 newval
= newval
| 0x1000;
29146 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
29147 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
29152 goto thumb_bl_common
;
29154 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
29155 /* A bl from Thumb state ISA to an internal ARM state function
29156 is converted to a blx. */
29158 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
29159 && !S_FORCE_RELOC (fixP
->fx_addsy
, true)
29160 && ARM_IS_FUNC (fixP
->fx_addsy
)
29161 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
29163 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
29164 newval
= newval
& ~0x1000;
29165 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
29166 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
29172 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
29173 /* For a BLX instruction, make sure that the relocation is rounded up
29174 to a word boundary. This follows the semantics of the instruction
29175 which specifies that bit 1 of the target address will come from bit
29176 1 of the base address. */
29177 value
= (value
+ 3) & ~ 3;
29180 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
29181 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
29182 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
29185 if (out_of_range_p (value
, 22))
29187 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
29188 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
29189 else if (out_of_range_p (value
, 24))
29190 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29191 _("Thumb2 branch out of range"));
29194 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29195 encode_thumb2_b_bl_offset (buf
, value
);
29199 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
29200 if (out_of_range_p (value
, 24))
29201 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
29203 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29204 encode_thumb2_b_bl_offset (buf
, value
);
29209 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29214 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29215 md_number_to_chars (buf
, value
, 2);
29219 case BFD_RELOC_ARM_TLS_CALL
:
29220 case BFD_RELOC_ARM_THM_TLS_CALL
:
29221 case BFD_RELOC_ARM_TLS_DESCSEQ
:
29222 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
29223 case BFD_RELOC_ARM_TLS_GOTDESC
:
29224 case BFD_RELOC_ARM_TLS_GD32
:
29225 case BFD_RELOC_ARM_TLS_LE32
:
29226 case BFD_RELOC_ARM_TLS_IE32
:
29227 case BFD_RELOC_ARM_TLS_LDM32
:
29228 case BFD_RELOC_ARM_TLS_LDO32
:
29229 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
29232 /* Same handling as above, but with the arm_fdpic guard. */
29233 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
29234 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
29235 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
29238 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
29242 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29243 _("Relocation supported only in FDPIC mode"));
29247 case BFD_RELOC_ARM_GOT32
:
29248 case BFD_RELOC_ARM_GOTOFF
:
29251 case BFD_RELOC_ARM_GOT_PREL
:
29252 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29253 md_number_to_chars (buf
, value
, 4);
29256 case BFD_RELOC_ARM_TARGET2
:
29257 /* TARGET2 is not partial-inplace, so we need to write the
29258 addend here for REL targets, because it won't be written out
29259 during reloc processing later. */
29260 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29261 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
29264 /* Relocations for FDPIC. */
29265 case BFD_RELOC_ARM_GOTFUNCDESC
:
29266 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
29267 case BFD_RELOC_ARM_FUNCDESC
:
29270 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29271 md_number_to_chars (buf
, 0, 4);
29275 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29276 _("Relocation supported only in FDPIC mode"));
29281 case BFD_RELOC_RVA
:
29283 case BFD_RELOC_ARM_TARGET1
:
29284 case BFD_RELOC_ARM_ROSEGREL32
:
29285 case BFD_RELOC_ARM_SBREL32
:
29286 case BFD_RELOC_32_PCREL
:
29288 case BFD_RELOC_32_SECREL
:
29290 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29292 /* For WinCE we only do this for pcrel fixups. */
29293 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
29295 md_number_to_chars (buf
, value
, 4);
29299 case BFD_RELOC_ARM_PREL31
:
29300 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29302 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
29303 if ((value
^ (value
>> 1)) & 0x40000000)
29305 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29306 _("rel31 relocation overflow"));
29308 newval
|= value
& 0x7fffffff;
29309 md_number_to_chars (buf
, newval
, 4);
29314 case BFD_RELOC_ARM_CP_OFF_IMM
:
29315 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
29316 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
:
29317 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
)
29318 newval
= md_chars_to_number (buf
, INSN_SIZE
);
29320 newval
= get_thumb32_insn (buf
);
29321 if ((newval
& 0x0f200f00) == 0x0d000900)
29323 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
29324 has permitted values that are multiples of 2, in the range -510
29326 if (value
+ 510 > 510 + 510 || (value
& 1))
29327 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29328 _("co-processor offset out of range"));
29330 else if ((newval
& 0xfe001f80) == 0xec000f80)
29332 if (value
+ 511 > 512 + 511 || (value
& 3))
29333 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29334 _("co-processor offset out of range"));
29336 else if (value
+ 1023 > 1023 + 1023 || (value
& 3))
29337 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29338 _("co-processor offset out of range"));
29340 sign
= (offsetT
) value
> 0;
29341 if ((offsetT
) value
< 0)
29343 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
29344 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
29345 newval
= md_chars_to_number (buf
, INSN_SIZE
);
29347 newval
= get_thumb32_insn (buf
);
29350 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
29351 newval
&= 0xffffff80;
29353 newval
&= 0xffffff00;
29357 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
29358 newval
&= 0xff7fff80;
29360 newval
&= 0xff7fff00;
29361 if ((newval
& 0x0f200f00) == 0x0d000900)
29363 /* This is a fp16 vstr/vldr.
29365 It requires the immediate offset in the instruction is shifted
29366 left by 1 to be a half-word offset.
29368 Here, left shift by 1 first, and later right shift by 2
29369 should get the right offset. */
29372 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
29374 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
29375 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
29376 md_number_to_chars (buf
, newval
, INSN_SIZE
);
29378 put_thumb32_insn (buf
, newval
);
29381 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
29382 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
29383 if (value
+ 255 > 255 + 255)
29384 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29385 _("co-processor offset out of range"));
29387 goto cp_off_common
;
29389 case BFD_RELOC_ARM_THUMB_OFFSET
:
29390 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
29391 /* Exactly what ranges, and where the offset is inserted depends
29392 on the type of instruction, we can establish this from the
29394 switch (newval
>> 12)
29396 case 4: /* PC load. */
29397 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
29398 forced to zero for these loads; md_pcrel_from has already
29399 compensated for this. */
29401 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29402 _("invalid offset, target not word aligned (0x%08lX)"),
29403 (((unsigned long) fixP
->fx_frag
->fr_address
29404 + (unsigned long) fixP
->fx_where
) & ~3)
29405 + (unsigned long) value
);
29406 else if (get_recorded_alignment (seg
) < 2)
29407 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
29408 _("section does not have enough alignment to ensure safe PC-relative loads"));
29410 if (value
& ~0x3fc)
29411 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29412 _("invalid offset, value too big (0x%08lX)"),
29415 newval
|= value
>> 2;
29418 case 9: /* SP load/store. */
29419 if (value
& ~0x3fc)
29420 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29421 _("invalid offset, value too big (0x%08lX)"),
29423 newval
|= value
>> 2;
29426 case 6: /* Word load/store. */
29428 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29429 _("invalid offset, value too big (0x%08lX)"),
29431 newval
|= value
<< 4; /* 6 - 2. */
29434 case 7: /* Byte load/store. */
29436 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29437 _("invalid offset, value too big (0x%08lX)"),
29439 newval
|= value
<< 6;
29442 case 8: /* Halfword load/store. */
29444 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29445 _("invalid offset, value too big (0x%08lX)"),
29447 newval
|= value
<< 5; /* 6 - 1. */
29451 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29452 "Unable to process relocation for thumb opcode: %lx",
29453 (unsigned long) newval
);
29456 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
29459 case BFD_RELOC_ARM_THUMB_ADD
:
29460 /* This is a complicated relocation, since we use it for all of
29461 the following immediate relocations:
29465 9bit ADD/SUB SP word-aligned
29466 10bit ADD PC/SP word-aligned
29468 The type of instruction being processed is encoded in the
29475 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
29477 int rd
= (newval
>> 4) & 0xf;
29478 int rs
= newval
& 0xf;
29479 int subtract
= !!(newval
& 0x8000);
29481 /* Check for HI regs, only very restricted cases allowed:
29482 Adjusting SP, and using PC or SP to get an address. */
29483 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
29484 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
29485 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29486 _("invalid Hi register with immediate"));
29488 /* If value is negative, choose the opposite instruction. */
29489 if ((offsetT
) value
< 0)
29492 subtract
= !subtract
;
29493 if ((offsetT
) value
< 0)
29494 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29495 _("immediate value out of range"));
29500 if (value
& ~0x1fc)
29501 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29502 _("invalid immediate for stack address calculation"));
29503 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
29504 newval
|= value
>> 2;
29506 else if (rs
== REG_PC
|| rs
== REG_SP
)
29508 /* PR gas/18541. If the addition is for a defined symbol
29509 within range of an ADR instruction then accept it. */
29512 && fixP
->fx_addsy
!= NULL
)
29516 if (! S_IS_DEFINED (fixP
->fx_addsy
)
29517 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
29518 || S_IS_WEAK (fixP
->fx_addsy
))
29520 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29521 _("address calculation needs a strongly defined nearby symbol"));
29525 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
29527 /* Round up to the next 4-byte boundary. */
29532 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
29536 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29537 _("symbol too far away"));
29547 if (subtract
|| value
& ~0x3fc)
29548 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29549 _("invalid immediate for address calculation (value = 0x%08lX)"),
29550 (unsigned long) (subtract
? - value
: value
));
29551 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
29553 newval
|= value
>> 2;
29558 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29559 _("immediate value out of range"));
29560 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
29561 newval
|= (rd
<< 8) | value
;
29566 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29567 _("immediate value out of range"));
29568 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
29569 newval
|= rd
| (rs
<< 3) | (value
<< 6);
29572 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
29575 case BFD_RELOC_ARM_THUMB_IMM
:
29576 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
29578 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29579 _("invalid immediate: %ld is out of range"),
29582 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
29585 case BFD_RELOC_ARM_THUMB_SHIFT
:
29586 /* 5bit shift value (0..32). LSL cannot take 32. */
29587 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
29588 temp
= newval
& 0xf800;
29589 if (value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
29590 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29591 _("invalid shift value: %ld"), (long) value
);
29592 /* Shifts of zero must be encoded as LSL. */
29594 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
29595 /* Shifts of 32 are encoded as zero. */
29596 else if (value
== 32)
29598 newval
|= value
<< 6;
29599 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
29602 case BFD_RELOC_VTABLE_INHERIT
:
29603 case BFD_RELOC_VTABLE_ENTRY
:
29607 case BFD_RELOC_ARM_MOVW
:
29608 case BFD_RELOC_ARM_MOVT
:
29609 case BFD_RELOC_ARM_THUMB_MOVW
:
29610 case BFD_RELOC_ARM_THUMB_MOVT
:
29611 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29613 /* REL format relocations are limited to a 16-bit addend. */
29614 if (!fixP
->fx_done
)
29616 if (value
+ 0x8000 > 0x7fff + 0x8000)
29617 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29618 _("offset out of range"));
29620 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
29621 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
29626 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
29627 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
29629 newval
= get_thumb32_insn (buf
);
29630 newval
&= 0xfbf08f00;
29631 newval
|= (value
& 0xf000) << 4;
29632 newval
|= (value
& 0x0800) << 15;
29633 newval
|= (value
& 0x0700) << 4;
29634 newval
|= (value
& 0x00ff);
29635 put_thumb32_insn (buf
, newval
);
29639 newval
= md_chars_to_number (buf
, 4);
29640 newval
&= 0xfff0f000;
29641 newval
|= value
& 0x0fff;
29642 newval
|= (value
& 0xf000) << 4;
29643 md_number_to_chars (buf
, newval
, 4);
29648 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
29649 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
29650 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
29651 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
29652 gas_assert (!fixP
->fx_done
);
29656 bfd_vma encoded_addend
= value
;
29658 /* Check that addend can be encoded in instruction. */
29659 if (!seg
->use_rela_p
&& value
> 255)
29660 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29661 _("the offset 0x%08lX is not representable"),
29662 (unsigned long) encoded_addend
);
29664 /* Extract the instruction. */
29665 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
29666 is_mov
= (insn
& 0xf800) == 0x2000;
29671 if (!seg
->use_rela_p
)
29672 insn
|= encoded_addend
;
29678 /* Extract the instruction. */
29679 /* Encoding is the following
29684 /* The following conditions must be true :
29689 rd
= (insn
>> 4) & 0xf;
29691 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
29692 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29693 _("Unable to process relocation for thumb opcode: %lx"),
29694 (unsigned long) insn
);
29696 /* Encode as ADD immediate8 thumb 1 code. */
29697 insn
= 0x3000 | (rd
<< 8);
29699 /* Place the encoded addend into the first 8 bits of the
29701 if (!seg
->use_rela_p
)
29702 insn
|= encoded_addend
;
29705 /* Update the instruction. */
29706 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
29710 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
29711 case BFD_RELOC_ARM_ALU_PC_G0
:
29712 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
29713 case BFD_RELOC_ARM_ALU_PC_G1
:
29714 case BFD_RELOC_ARM_ALU_PC_G2
:
29715 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
29716 case BFD_RELOC_ARM_ALU_SB_G0
:
29717 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
29718 case BFD_RELOC_ARM_ALU_SB_G1
:
29719 case BFD_RELOC_ARM_ALU_SB_G2
:
29720 gas_assert (!fixP
->fx_done
);
29721 if (!seg
->use_rela_p
)
29724 bfd_vma encoded_addend
;
29725 bfd_vma addend_abs
= llabs ((offsetT
) value
);
29727 /* Check that the absolute value of the addend can be
29728 expressed as an 8-bit constant plus a rotation. */
29729 encoded_addend
= encode_arm_immediate (addend_abs
);
29730 if (encoded_addend
== (unsigned int) FAIL
)
29731 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29732 _("the offset 0x%08lX is not representable"),
29733 (unsigned long) addend_abs
);
29735 /* Extract the instruction. */
29736 insn
= md_chars_to_number (buf
, INSN_SIZE
);
29738 /* If the addend is positive, use an ADD instruction.
29739 Otherwise use a SUB. Take care not to destroy the S bit. */
29740 insn
&= 0xff1fffff;
29741 if ((offsetT
) value
< 0)
29746 /* Place the encoded addend into the first 12 bits of the
29748 insn
&= 0xfffff000;
29749 insn
|= encoded_addend
;
29751 /* Update the instruction. */
29752 md_number_to_chars (buf
, insn
, INSN_SIZE
);
29756 case BFD_RELOC_ARM_LDR_PC_G0
:
29757 case BFD_RELOC_ARM_LDR_PC_G1
:
29758 case BFD_RELOC_ARM_LDR_PC_G2
:
29759 case BFD_RELOC_ARM_LDR_SB_G0
:
29760 case BFD_RELOC_ARM_LDR_SB_G1
:
29761 case BFD_RELOC_ARM_LDR_SB_G2
:
29762 gas_assert (!fixP
->fx_done
);
29763 if (!seg
->use_rela_p
)
29766 bfd_vma addend_abs
= llabs ((offsetT
) value
);
29768 /* Check that the absolute value of the addend can be
29769 encoded in 12 bits. */
29770 if (addend_abs
>= 0x1000)
29771 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29772 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
29773 (unsigned long) addend_abs
);
29775 /* Extract the instruction. */
29776 insn
= md_chars_to_number (buf
, INSN_SIZE
);
29778 /* If the addend is negative, clear bit 23 of the instruction.
29779 Otherwise set it. */
29780 if ((offsetT
) value
< 0)
29781 insn
&= ~(1 << 23);
29785 /* Place the absolute value of the addend into the first 12 bits
29786 of the instruction. */
29787 insn
&= 0xfffff000;
29788 insn
|= addend_abs
;
29790 /* Update the instruction. */
29791 md_number_to_chars (buf
, insn
, INSN_SIZE
);
29795 case BFD_RELOC_ARM_LDRS_PC_G0
:
29796 case BFD_RELOC_ARM_LDRS_PC_G1
:
29797 case BFD_RELOC_ARM_LDRS_PC_G2
:
29798 case BFD_RELOC_ARM_LDRS_SB_G0
:
29799 case BFD_RELOC_ARM_LDRS_SB_G1
:
29800 case BFD_RELOC_ARM_LDRS_SB_G2
:
29801 gas_assert (!fixP
->fx_done
);
29802 if (!seg
->use_rela_p
)
29805 bfd_vma addend_abs
= llabs ((offsetT
) value
);
29807 /* Check that the absolute value of the addend can be
29808 encoded in 8 bits. */
29809 if (addend_abs
>= 0x100)
29810 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29811 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
29812 (unsigned long) addend_abs
);
29814 /* Extract the instruction. */
29815 insn
= md_chars_to_number (buf
, INSN_SIZE
);
29817 /* If the addend is negative, clear bit 23 of the instruction.
29818 Otherwise set it. */
29819 if ((offsetT
) value
< 0)
29820 insn
&= ~(1 << 23);
29824 /* Place the first four bits of the absolute value of the addend
29825 into the first 4 bits of the instruction, and the remaining
29826 four into bits 8 .. 11. */
29827 insn
&= 0xfffff0f0;
29828 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
29830 /* Update the instruction. */
29831 md_number_to_chars (buf
, insn
, INSN_SIZE
);
29835 case BFD_RELOC_ARM_LDC_PC_G0
:
29836 case BFD_RELOC_ARM_LDC_PC_G1
:
29837 case BFD_RELOC_ARM_LDC_PC_G2
:
29838 case BFD_RELOC_ARM_LDC_SB_G0
:
29839 case BFD_RELOC_ARM_LDC_SB_G1
:
29840 case BFD_RELOC_ARM_LDC_SB_G2
:
29841 gas_assert (!fixP
->fx_done
);
29842 if (!seg
->use_rela_p
)
29845 bfd_vma addend_abs
= llabs ((offsetT
) value
);
29847 /* Check that the absolute value of the addend is a multiple of
29848 four and, when divided by four, fits in 8 bits. */
29849 if (addend_abs
& 0x3)
29850 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29851 _("bad offset 0x%08lX (must be word-aligned)"),
29852 (unsigned long) addend_abs
);
29854 if ((addend_abs
>> 2) > 0xff)
29855 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29856 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
29857 (unsigned long) addend_abs
);
29859 /* Extract the instruction. */
29860 insn
= md_chars_to_number (buf
, INSN_SIZE
);
29862 /* If the addend is negative, clear bit 23 of the instruction.
29863 Otherwise set it. */
29864 if ((offsetT
) value
< 0)
29865 insn
&= ~(1 << 23);
29869 /* Place the addend (divided by four) into the first eight
29870 bits of the instruction. */
29871 insn
&= 0xfffffff0;
29872 insn
|= addend_abs
>> 2;
29874 /* Update the instruction. */
29875 md_number_to_chars (buf
, insn
, INSN_SIZE
);
29879 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
29881 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
29882 && !S_FORCE_RELOC (fixP
->fx_addsy
, true)
29883 && ARM_IS_FUNC (fixP
->fx_addsy
)
29884 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
29886 /* Force a relocation for a branch 5 bits wide. */
29889 if (v8_1_branch_value_check (value
, 5, false) == FAIL
)
29890 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29893 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29895 addressT boff
= value
>> 1;
29897 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
29898 newval
|= (boff
<< 7);
29899 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
29903 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
29905 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
29906 && !S_FORCE_RELOC (fixP
->fx_addsy
, true)
29907 && ARM_IS_FUNC (fixP
->fx_addsy
)
29908 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
29912 if ((value
& ~0x7f) && ((value
& ~0x3f) != (valueT
) ~0x3f))
29913 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29914 _("branch out of range"));
29916 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29918 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
29920 addressT boff
= ((newval
& 0x0780) >> 7) << 1;
29921 addressT diff
= value
- boff
;
29925 newval
|= 1 << 1; /* T bit. */
29927 else if (diff
!= 2)
29929 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29930 _("out of range label-relative fixup value"));
29932 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
29936 case BFD_RELOC_ARM_THUMB_BF17
:
29938 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
29939 && !S_FORCE_RELOC (fixP
->fx_addsy
, true)
29940 && ARM_IS_FUNC (fixP
->fx_addsy
)
29941 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
29943 /* Force a relocation for a branch 17 bits wide. */
29947 if (v8_1_branch_value_check (value
, 17, true) == FAIL
)
29948 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29951 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29954 addressT immA
, immB
, immC
;
29956 immA
= (value
& 0x0001f000) >> 12;
29957 immB
= (value
& 0x00000ffc) >> 2;
29958 immC
= (value
& 0x00000002) >> 1;
29960 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
29961 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
29963 newval2
|= (immC
<< 11) | (immB
<< 1);
29964 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
29965 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
29969 case BFD_RELOC_ARM_THUMB_BF19
:
29971 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
29972 && !S_FORCE_RELOC (fixP
->fx_addsy
, true)
29973 && ARM_IS_FUNC (fixP
->fx_addsy
)
29974 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
29976 /* Force a relocation for a branch 19 bits wide. */
29980 if (v8_1_branch_value_check (value
, 19, true) == FAIL
)
29981 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
29984 if (fixP
->fx_done
|| !seg
->use_rela_p
)
29987 addressT immA
, immB
, immC
;
29989 immA
= (value
& 0x0007f000) >> 12;
29990 immB
= (value
& 0x00000ffc) >> 2;
29991 immC
= (value
& 0x00000002) >> 1;
29993 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
29994 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
29996 newval2
|= (immC
<< 11) | (immB
<< 1);
29997 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
29998 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
30002 case BFD_RELOC_ARM_THUMB_BF13
:
30004 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
30005 && !S_FORCE_RELOC (fixP
->fx_addsy
, true)
30006 && ARM_IS_FUNC (fixP
->fx_addsy
)
30007 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
30009 /* Force a relocation for a branch 13 bits wide. */
30013 if (v8_1_branch_value_check (value
, 13, true) == FAIL
)
30014 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
30017 if (fixP
->fx_done
|| !seg
->use_rela_p
)
30020 addressT immA
, immB
, immC
;
30022 immA
= (value
& 0x00001000) >> 12;
30023 immB
= (value
& 0x00000ffc) >> 2;
30024 immC
= (value
& 0x00000002) >> 1;
30026 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
30027 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
30029 newval2
|= (immC
<< 11) | (immB
<< 1);
30030 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
30031 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
30035 case BFD_RELOC_ARM_THUMB_LOOP12
:
30037 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
30038 && !S_FORCE_RELOC (fixP
->fx_addsy
, true)
30039 && ARM_IS_FUNC (fixP
->fx_addsy
)
30040 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
30042 /* Force a relocation for a branch 12 bits wide. */
30046 bfd_vma insn
= get_thumb32_insn (buf
);
30047 /* le lr, <label>, le <label> or letp lr, <label> */
30048 if (((insn
& 0xffffffff) == 0xf00fc001)
30049 || ((insn
& 0xffffffff) == 0xf02fc001)
30050 || ((insn
& 0xffffffff) == 0xf01fc001))
30053 if (v8_1_branch_value_check (value
, 12, false) == FAIL
)
30054 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
30056 if (fixP
->fx_done
|| !seg
->use_rela_p
)
30058 addressT imml
, immh
;
30060 immh
= (value
& 0x00000ffc) >> 2;
30061 imml
= (value
& 0x00000002) >> 1;
30063 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
30064 newval
|= (imml
<< 11) | (immh
<< 1);
30065 md_number_to_chars (buf
+ THUMB_SIZE
, newval
, THUMB_SIZE
);
30069 case BFD_RELOC_ARM_V4BX
:
30070 /* This will need to go in the object file. */
30074 case BFD_RELOC_UNUSED
:
30076 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
30077 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
30081 /* Translate internal representation of relocation info to BFD target
30085 tc_gen_reloc (asection
*section
, fixS
*fixp
)
30088 bfd_reloc_code_real_type code
;
30090 reloc
= XNEW (arelent
);
30092 reloc
->sym_ptr_ptr
= XNEW (asymbol
*);
30093 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
30094 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
30096 if (fixp
->fx_pcrel
)
30098 if (section
->use_rela_p
)
30099 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
30101 fixp
->fx_offset
= reloc
->address
;
30103 reloc
->addend
= fixp
->fx_offset
;
30105 switch (fixp
->fx_r_type
)
30108 if (fixp
->fx_pcrel
)
30110 code
= BFD_RELOC_8_PCREL
;
30113 /* Fall through. */
30116 if (fixp
->fx_pcrel
)
30118 code
= BFD_RELOC_16_PCREL
;
30121 /* Fall through. */
30124 if (fixp
->fx_pcrel
)
30126 code
= BFD_RELOC_32_PCREL
;
30129 /* Fall through. */
30131 case BFD_RELOC_ARM_MOVW
:
30132 if (fixp
->fx_pcrel
)
30134 code
= BFD_RELOC_ARM_MOVW_PCREL
;
30137 /* Fall through. */
30139 case BFD_RELOC_ARM_MOVT
:
30140 if (fixp
->fx_pcrel
)
30142 code
= BFD_RELOC_ARM_MOVT_PCREL
;
30145 /* Fall through. */
30147 case BFD_RELOC_ARM_THUMB_MOVW
:
30148 if (fixp
->fx_pcrel
)
30150 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
30153 /* Fall through. */
30155 case BFD_RELOC_ARM_THUMB_MOVT
:
30156 if (fixp
->fx_pcrel
)
30158 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
30161 /* Fall through. */
30163 case BFD_RELOC_NONE
:
30164 case BFD_RELOC_ARM_PCREL_BRANCH
:
30165 case BFD_RELOC_ARM_PCREL_BLX
:
30166 case BFD_RELOC_RVA
:
30167 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
30168 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
30169 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
30170 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
30171 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
30172 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
30173 case BFD_RELOC_VTABLE_ENTRY
:
30174 case BFD_RELOC_VTABLE_INHERIT
:
30176 case BFD_RELOC_32_SECREL
:
30178 code
= fixp
->fx_r_type
;
30181 case BFD_RELOC_THUMB_PCREL_BLX
:
30183 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
30184 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
30187 code
= BFD_RELOC_THUMB_PCREL_BLX
;
30190 case BFD_RELOC_ARM_LITERAL
:
30191 case BFD_RELOC_ARM_HWLITERAL
:
30192 /* If this is called then the a literal has
30193 been referenced across a section boundary. */
30194 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
30195 _("literal referenced across section boundary"));
30199 case BFD_RELOC_ARM_TLS_CALL
:
30200 case BFD_RELOC_ARM_THM_TLS_CALL
:
30201 case BFD_RELOC_ARM_TLS_DESCSEQ
:
30202 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
30203 case BFD_RELOC_ARM_GOT32
:
30204 case BFD_RELOC_ARM_GOTOFF
:
30205 case BFD_RELOC_ARM_GOT_PREL
:
30206 case BFD_RELOC_ARM_PLT32
:
30207 case BFD_RELOC_ARM_TARGET1
:
30208 case BFD_RELOC_ARM_ROSEGREL32
:
30209 case BFD_RELOC_ARM_SBREL32
:
30210 case BFD_RELOC_ARM_PREL31
:
30211 case BFD_RELOC_ARM_TARGET2
:
30212 case BFD_RELOC_ARM_TLS_LDO32
:
30213 case BFD_RELOC_ARM_PCREL_CALL
:
30214 case BFD_RELOC_ARM_PCREL_JUMP
:
30215 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
30216 case BFD_RELOC_ARM_ALU_PC_G0
:
30217 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
30218 case BFD_RELOC_ARM_ALU_PC_G1
:
30219 case BFD_RELOC_ARM_ALU_PC_G2
:
30220 case BFD_RELOC_ARM_LDR_PC_G0
:
30221 case BFD_RELOC_ARM_LDR_PC_G1
:
30222 case BFD_RELOC_ARM_LDR_PC_G2
:
30223 case BFD_RELOC_ARM_LDRS_PC_G0
:
30224 case BFD_RELOC_ARM_LDRS_PC_G1
:
30225 case BFD_RELOC_ARM_LDRS_PC_G2
:
30226 case BFD_RELOC_ARM_LDC_PC_G0
:
30227 case BFD_RELOC_ARM_LDC_PC_G1
:
30228 case BFD_RELOC_ARM_LDC_PC_G2
:
30229 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
30230 case BFD_RELOC_ARM_ALU_SB_G0
:
30231 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
30232 case BFD_RELOC_ARM_ALU_SB_G1
:
30233 case BFD_RELOC_ARM_ALU_SB_G2
:
30234 case BFD_RELOC_ARM_LDR_SB_G0
:
30235 case BFD_RELOC_ARM_LDR_SB_G1
:
30236 case BFD_RELOC_ARM_LDR_SB_G2
:
30237 case BFD_RELOC_ARM_LDRS_SB_G0
:
30238 case BFD_RELOC_ARM_LDRS_SB_G1
:
30239 case BFD_RELOC_ARM_LDRS_SB_G2
:
30240 case BFD_RELOC_ARM_LDC_SB_G0
:
30241 case BFD_RELOC_ARM_LDC_SB_G1
:
30242 case BFD_RELOC_ARM_LDC_SB_G2
:
30243 case BFD_RELOC_ARM_V4BX
:
30244 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
30245 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
30246 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
30247 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
30248 case BFD_RELOC_ARM_GOTFUNCDESC
:
30249 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
30250 case BFD_RELOC_ARM_FUNCDESC
:
30251 case BFD_RELOC_ARM_THUMB_BF17
:
30252 case BFD_RELOC_ARM_THUMB_BF19
:
30253 case BFD_RELOC_ARM_THUMB_BF13
:
30254 code
= fixp
->fx_r_type
;
30257 case BFD_RELOC_ARM_TLS_GOTDESC
:
30258 case BFD_RELOC_ARM_TLS_GD32
:
30259 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
30260 case BFD_RELOC_ARM_TLS_LE32
:
30261 case BFD_RELOC_ARM_TLS_IE32
:
30262 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
30263 case BFD_RELOC_ARM_TLS_LDM32
:
30264 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
30265 /* BFD will include the symbol's address in the addend.
30266 But we don't want that, so subtract it out again here. */
30267 if (!S_IS_COMMON (fixp
->fx_addsy
))
30268 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
30269 code
= fixp
->fx_r_type
;
30273 case BFD_RELOC_ARM_IMMEDIATE
:
30274 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
30275 _("internal relocation (type: IMMEDIATE) not fixed up"));
30278 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
30279 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
30280 _("ADRL used for a symbol not defined in the same file"));
30283 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
30284 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
30285 case BFD_RELOC_ARM_THUMB_LOOP12
:
30286 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
30287 _("%s used for a symbol not defined in the same file"),
30288 bfd_get_reloc_code_name (fixp
->fx_r_type
));
30291 case BFD_RELOC_ARM_OFFSET_IMM
:
30292 if (section
->use_rela_p
)
30294 code
= fixp
->fx_r_type
;
30298 if (fixp
->fx_addsy
!= NULL
30299 && !S_IS_DEFINED (fixp
->fx_addsy
)
30300 && S_IS_LOCAL (fixp
->fx_addsy
))
30302 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
30303 _("undefined local label `%s'"),
30304 S_GET_NAME (fixp
->fx_addsy
));
30308 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
30309 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
30316 switch (fixp
->fx_r_type
)
30318 case BFD_RELOC_NONE
: type
= "NONE"; break;
30319 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
30320 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
30321 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
30322 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
30323 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
30324 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
30325 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
30326 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
30327 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
30328 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
30329 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
30330 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
30331 default: type
= _("<unknown>"); break;
30333 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
30334 _("cannot represent %s relocation in this object file format"),
30341 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
30343 && fixp
->fx_addsy
== GOT_symbol
)
30345 code
= BFD_RELOC_ARM_GOTPC
;
30346 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
30350 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
30352 if (reloc
->howto
== NULL
)
30354 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
30355 _("cannot represent %s relocation in this object file format"),
30356 bfd_get_reloc_code_name (code
));
30360 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
30361 vtable entry to be used in the relocation's section offset. */
30362 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
30363 reloc
->address
= fixp
->fx_offset
;
30368 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
30371 cons_fix_new_arm (fragS
* frag
,
30375 bfd_reloc_code_real_type reloc
)
30380 FIXME: @@ Should look at CPU word size. */
30384 reloc
= BFD_RELOC_8
;
30387 reloc
= BFD_RELOC_16
;
30391 reloc
= BFD_RELOC_32
;
30394 reloc
= BFD_RELOC_64
;
30399 if (exp
->X_op
== O_secrel
)
30401 exp
->X_op
= O_symbol
;
30402 reloc
= BFD_RELOC_32_SECREL
;
30406 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
30409 #if defined (OBJ_COFF)
30411 arm_validate_fix (fixS
* fixP
)
30413 /* If the destination of the branch is a defined symbol which does not have
30414 the THUMB_FUNC attribute, then we must be calling a function which has
30415 the (interfacearm) attribute. We look for the Thumb entry point to that
30416 function and change the branch to refer to that function instead. */
30417 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
30418 && fixP
->fx_addsy
!= NULL
30419 && S_IS_DEFINED (fixP
->fx_addsy
)
30420 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
30422 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
30429 arm_force_relocation (struct fix
* fixp
)
30431 #if defined (OBJ_COFF) && defined (TE_PE)
30432 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
30436 /* In case we have a call or a branch to a function in ARM ISA mode from
30437 a thumb function or vice-versa force the relocation. These relocations
30438 are cleared off for some cores that might have blx and simple transformations
30442 switch (fixp
->fx_r_type
)
30444 case BFD_RELOC_ARM_PCREL_JUMP
:
30445 case BFD_RELOC_ARM_PCREL_CALL
:
30446 case BFD_RELOC_THUMB_PCREL_BLX
:
30447 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
30451 case BFD_RELOC_ARM_PCREL_BLX
:
30452 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
30453 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
30454 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
30455 if (ARM_IS_FUNC (fixp
->fx_addsy
))
30464 /* Resolve these relocations even if the symbol is extern or weak.
30465 Technically this is probably wrong due to symbol preemption.
30466 In practice these relocations do not have enough range to be useful
30467 at dynamic link time, and some code (e.g. in the Linux kernel)
30468 expects these references to be resolved. */
30469 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
30470 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
30471 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
30472 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
30473 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
30474 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
30475 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
30476 || fixp
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH12
30477 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
30478 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
30479 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
30480 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
30481 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
30482 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
30483 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
30486 /* Always leave these relocations for the linker. */
30487 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
30488 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
30489 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
30492 /* Always generate relocations against function symbols. */
30493 if (fixp
->fx_r_type
== BFD_RELOC_32
30495 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
30498 return generic_force_reloc (fixp
);
30501 #if defined (OBJ_ELF) || defined (OBJ_COFF)
30502 /* Relocations against function names must be left unadjusted,
30503 so that the linker can use this information to generate interworking
30504 stubs. The MIPS version of this function
30505 also prevents relocations that are mips-16 specific, but I do not
30506 know why it does this.
30509 There is one other problem that ought to be addressed here, but
30510 which currently is not: Taking the address of a label (rather
30511 than a function) and then later jumping to that address. Such
30512 addresses also ought to have their bottom bit set (assuming that
30513 they reside in Thumb code), but at the moment they will not. */
30516 arm_fix_adjustable (fixS
* fixP
)
30518 if (fixP
->fx_addsy
== NULL
)
30521 /* Preserve relocations against symbols with function type. */
30522 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
30525 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
30526 && fixP
->fx_subsy
== NULL
)
30529 /* We need the symbol name for the VTABLE entries. */
30530 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
30531 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
30534 /* Don't allow symbols to be discarded on GOT related relocs. */
30535 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
30536 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
30537 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
30538 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
30539 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32_FDPIC
30540 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
30541 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
30542 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32_FDPIC
30543 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
30544 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32_FDPIC
30545 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
30546 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
30547 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
30548 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
30549 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
30550 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
30551 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
30554 /* Similarly for group relocations. */
30555 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
30556 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
30557 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
30560 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
30561 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
30562 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
30563 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
30564 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
30565 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
30566 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
30567 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
30568 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
30571 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
30572 offsets, so keep these symbols. */
30573 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
30574 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
30579 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
30583 elf32_arm_target_format (void)
30585 #if defined (TE_VXWORKS)
30586 return (target_big_endian
30587 ? "elf32-bigarm-vxworks"
30588 : "elf32-littlearm-vxworks");
30589 #elif defined (TE_NACL)
30590 return (target_big_endian
30591 ? "elf32-bigarm-nacl"
30592 : "elf32-littlearm-nacl");
30596 if (target_big_endian
)
30597 return "elf32-bigarm-fdpic";
30599 return "elf32-littlearm-fdpic";
30603 if (target_big_endian
)
30604 return "elf32-bigarm";
30606 return "elf32-littlearm";
30612 armelf_frob_symbol (symbolS
* symp
,
30615 elf_frob_symbol (symp
, puntp
);
30619 /* MD interface: Finalization. */
30624 literal_pool
* pool
;
30626 /* Ensure that all the predication blocks are properly closed. */
30627 check_pred_blocks_finished ();
30629 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
30631 /* Put it at the end of the relevant section. */
30632 subseg_set (pool
->section
, pool
->sub_section
);
30634 arm_elf_change_section ();
30641 /* Remove any excess mapping symbols generated for alignment frags in
30642 SEC. We may have created a mapping symbol before a zero byte
30643 alignment; remove it if there's a mapping symbol after the
30646 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
30647 void *dummy ATTRIBUTE_UNUSED
)
30649 segment_info_type
*seginfo
= seg_info (sec
);
30652 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
30655 for (fragp
= seginfo
->frchainP
->frch_root
;
30657 fragp
= fragp
->fr_next
)
30659 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
30660 fragS
*next
= fragp
->fr_next
;
30662 /* Variable-sized frags have been converted to fixed size by
30663 this point. But if this was variable-sized to start with,
30664 there will be a fixed-size frag after it. So don't handle
30666 if (sym
== NULL
|| next
== NULL
)
30669 if (S_GET_VALUE (sym
) < next
->fr_address
)
30670 /* Not at the end of this frag. */
30672 know (S_GET_VALUE (sym
) == next
->fr_address
);
30676 if (next
->tc_frag_data
.first_map
!= NULL
)
30678 /* Next frag starts with a mapping symbol. Discard this
30680 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
30684 if (next
->fr_next
== NULL
)
30686 /* This mapping symbol is at the end of the section. Discard
30688 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
30689 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
30693 /* As long as we have empty frags without any mapping symbols,
30695 /* If the next frag is non-empty and does not start with a
30696 mapping symbol, then this mapping symbol is required. */
30697 if (next
->fr_address
!= next
->fr_next
->fr_address
)
30700 next
= next
->fr_next
;
30702 while (next
!= NULL
);
30707 /* Adjust the symbol table. This marks Thumb symbols as distinct from
30711 arm_adjust_symtab (void)
30716 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
30718 if (ARM_IS_THUMB (sym
))
30720 if (THUMB_IS_FUNC (sym
))
30722 /* Mark the symbol as a Thumb function. */
30723 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
30724 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
30725 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
30727 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
30728 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
30730 as_bad (_("%s: unexpected function type: %d"),
30731 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
30733 else switch (S_GET_STORAGE_CLASS (sym
))
30736 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
30739 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
30742 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
30750 if (ARM_IS_INTERWORK (sym
))
30751 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
30758 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
30760 if (ARM_IS_THUMB (sym
))
30762 elf_symbol_type
* elf_sym
;
30764 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
30765 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
30767 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
30768 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
30770 /* If it's a .thumb_func, declare it as so,
30771 otherwise tag label as .code 16. */
30772 if (THUMB_IS_FUNC (sym
))
30773 ARM_SET_SYM_BRANCH_TYPE (elf_sym
->internal_elf_sym
.st_target_internal
,
30774 ST_BRANCH_TO_THUMB
);
30775 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
30776 elf_sym
->internal_elf_sym
.st_info
=
30777 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
30782 /* Remove any overlapping mapping symbols generated by alignment frags. */
30783 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
30784 /* Now do generic ELF adjustments. */
30785 elf_adjust_symtab ();
30789 /* MD interface: Initialization. */
30792 set_constant_flonums (void)
30796 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
30797 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
30801 /* Auto-select Thumb mode if it's the only available instruction set for the
30802 given architecture. */
30805 autoselect_thumb_from_cpu_variant (void)
30807 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
30808 opcode_select (16);
30817 arm_ops_hsh
= str_htab_create ();
30818 arm_cond_hsh
= str_htab_create ();
30819 arm_vcond_hsh
= str_htab_create ();
30820 arm_shift_hsh
= str_htab_create ();
30821 arm_psr_hsh
= str_htab_create ();
30822 arm_v7m_psr_hsh
= str_htab_create ();
30823 arm_reg_hsh
= str_htab_create ();
30824 arm_reloc_hsh
= str_htab_create ();
30825 arm_barrier_opt_hsh
= str_htab_create ();
30827 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
30828 if (str_hash_find (arm_ops_hsh
, insns
[i
].template_name
) == NULL
)
30829 str_hash_insert (arm_ops_hsh
, insns
[i
].template_name
, insns
+ i
, 0);
30830 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
30831 str_hash_insert (arm_cond_hsh
, conds
[i
].template_name
, conds
+ i
, 0);
30832 for (i
= 0; i
< sizeof (vconds
) / sizeof (struct asm_cond
); i
++)
30833 str_hash_insert (arm_vcond_hsh
, vconds
[i
].template_name
, vconds
+ i
, 0);
30834 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
30835 str_hash_insert (arm_shift_hsh
, shift_names
[i
].name
, shift_names
+ i
, 0);
30836 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
30837 str_hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, psrs
+ i
, 0);
30838 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
30839 str_hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
30841 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
30842 str_hash_insert (arm_reg_hsh
, reg_names
[i
].name
, reg_names
+ i
, 0);
30844 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
30846 str_hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
30847 barrier_opt_names
+ i
, 0);
30849 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
30851 struct reloc_entry
* entry
= reloc_names
+ i
;
30853 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
30854 /* This makes encode_branch() use the EABI versions of this relocation. */
30855 entry
->reloc
= BFD_RELOC_UNUSED
;
30857 str_hash_insert (arm_reloc_hsh
, entry
->name
, entry
, 0);
30861 set_constant_flonums ();
30863 /* Set the cpu variant based on the command-line options. We prefer
30864 -mcpu= over -march= if both are set (as for GCC); and we prefer
30865 -mfpu= over any other way of setting the floating point unit.
30866 Use of legacy options with new options are faulted. */
30869 if (mcpu_cpu_opt
|| march_cpu_opt
)
30870 as_bad (_("use of old and new-style options to set CPU type"));
30872 selected_arch
= *legacy_cpu
;
30874 else if (mcpu_cpu_opt
)
30876 selected_arch
= *mcpu_cpu_opt
;
30877 selected_ext
= *mcpu_ext_opt
;
30879 else if (march_cpu_opt
)
30881 selected_arch
= *march_cpu_opt
;
30882 selected_ext
= *march_ext_opt
;
30884 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
30889 as_bad (_("use of old and new-style options to set FPU type"));
30891 selected_fpu
= *legacy_fpu
;
30894 selected_fpu
= *mfpu_opt
;
30897 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
30898 || defined (TE_NetBSD) || defined (TE_VXWORKS))
30899 /* Some environments specify a default FPU. If they don't, infer it
30900 from the processor. */
30902 selected_fpu
= *mcpu_fpu_opt
;
30903 else if (march_fpu_opt
)
30904 selected_fpu
= *march_fpu_opt
;
30906 selected_fpu
= fpu_default
;
30910 if (ARM_FEATURE_ZERO (selected_fpu
))
30912 if (!no_cpu_selected ())
30913 selected_fpu
= fpu_default
;
30915 selected_fpu
= fpu_arch_fpa
;
30919 if (ARM_FEATURE_ZERO (selected_arch
))
30921 selected_arch
= cpu_default
;
30922 selected_cpu
= selected_arch
;
30924 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
30926 /* Autodection of feature mode: allow all features in cpu_variant but leave
30927 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
30928 after all instruction have been processed and we can decide what CPU
30929 should be selected. */
30930 if (ARM_FEATURE_ZERO (selected_arch
))
30931 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
30933 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
30936 autoselect_thumb_from_cpu_variant ();
30938 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
30940 #if defined OBJ_COFF || defined OBJ_ELF
30942 unsigned int flags
= 0;
30944 #if defined OBJ_ELF
30945 flags
= meabi_flags
;
30947 switch (meabi_flags
)
30949 case EF_ARM_EABI_UNKNOWN
:
30951 /* Set the flags in the private structure. */
30952 if (uses_apcs_26
) flags
|= F_APCS26
;
30953 if (support_interwork
) flags
|= F_INTERWORK
;
30954 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
30955 if (pic_code
) flags
|= F_PIC
;
30956 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
30957 flags
|= F_SOFT_FLOAT
;
30959 switch (mfloat_abi_opt
)
30961 case ARM_FLOAT_ABI_SOFT
:
30962 case ARM_FLOAT_ABI_SOFTFP
:
30963 flags
|= F_SOFT_FLOAT
;
30966 case ARM_FLOAT_ABI_HARD
:
30967 if (flags
& F_SOFT_FLOAT
)
30968 as_bad (_("hard-float conflicts with specified fpu"));
30972 /* Using pure-endian doubles (even if soft-float). */
30973 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
30974 flags
|= F_VFP_FLOAT
;
30976 #if defined OBJ_ELF
30977 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
30978 flags
|= EF_ARM_MAVERICK_FLOAT
;
30981 case EF_ARM_EABI_VER4
:
30982 case EF_ARM_EABI_VER5
:
30983 /* No additional flags to set. */
30990 bfd_set_private_flags (stdoutput
, flags
);
30992 /* We have run out flags in the COFF header to encode the
30993 status of ATPCS support, so instead we create a dummy,
30994 empty, debug section called .arm.atpcs. */
30999 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
31003 bfd_set_section_flags (sec
, SEC_READONLY
| SEC_DEBUGGING
);
31004 bfd_set_section_size (sec
, 0);
31005 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
31011 /* Record the CPU type as well. */
31012 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
31013 mach
= bfd_mach_arm_iWMMXt2
;
31014 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
31015 mach
= bfd_mach_arm_iWMMXt
;
31016 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
31017 mach
= bfd_mach_arm_XScale
;
31018 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
31019 mach
= bfd_mach_arm_ep9312
;
31020 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
31021 mach
= bfd_mach_arm_5TE
;
31022 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
31024 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
31025 mach
= bfd_mach_arm_5T
;
31027 mach
= bfd_mach_arm_5
;
31029 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
31031 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
31032 mach
= bfd_mach_arm_4T
;
31034 mach
= bfd_mach_arm_4
;
31036 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
31037 mach
= bfd_mach_arm_3M
;
31038 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
31039 mach
= bfd_mach_arm_3
;
31040 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
31041 mach
= bfd_mach_arm_2a
;
31042 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
31043 mach
= bfd_mach_arm_2
;
31045 mach
= bfd_mach_arm_unknown
;
31047 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
31050 /* Command line processing. */
31053 Invocation line includes a switch not recognized by the base assembler.
31054 See if it's a processor-specific option.
31056 This routine is somewhat complicated by the need for backwards
31057 compatibility (since older releases of gcc can't be changed).
31058 The new options try to make the interface as compatible as
31061 New options (supported) are:
31063 -mcpu=<cpu name> Assemble for selected processor
31064 -march=<architecture name> Assemble for selected architecture
31065 -mfpu=<fpu architecture> Assemble for selected FPU.
31066 -EB/-mbig-endian Big-endian
31067 -EL/-mlittle-endian Little-endian
31068 -k Generate PIC code
31069 -mthumb Start in Thumb mode
31070 -mthumb-interwork Code supports ARM/Thumb interworking
31072 -m[no-]warn-deprecated Warn about deprecated features
31073 -m[no-]warn-syms Warn when symbols match instructions
31075 For now we will also provide support for:
31077 -mapcs-32 32-bit Program counter
31078 -mapcs-26 26-bit Program counter
31079 -macps-float Floats passed in FP registers
31080 -mapcs-reentrant Reentrant code
31082 (sometime these will probably be replaced with -mapcs=<list of options>
31083 and -matpcs=<list of options>)
31085 The remaining options are only supported for back-wards compatibility.
31086 Cpu variants, the arm part is optional:
31087 -m[arm]1 Currently not supported.
31088 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
31089 -m[arm]3 Arm 3 processor
31090 -m[arm]6[xx], Arm 6 processors
31091 -m[arm]7[xx][t][[d]m] Arm 7 processors
31092 -m[arm]8[10] Arm 8 processors
31093 -m[arm]9[20][tdmi] Arm 9 processors
31094 -mstrongarm[110[0]] StrongARM processors
31095 -mxscale XScale processors
31096 -m[arm]v[2345[t[e]]] Arm architectures
31097 -mall All (except the ARM1)
31099 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
31100 -mfpe-old (No float load/store multiples)
31101 -mvfpxd VFP Single precision
31103 -mno-fpu Disable all floating point instructions
31105 The following CPU names are recognized:
31106 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
31107 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
31108 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
31109 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
31110 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
31111 arm10t arm10e, arm1020t, arm1020e, arm10200e,
31112 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
31116 const char * md_shortopts
= "m:k";
31118 #ifdef ARM_BI_ENDIAN
31119 #define OPTION_EB (OPTION_MD_BASE + 0)
31120 #define OPTION_EL (OPTION_MD_BASE + 1)
31122 #if TARGET_BYTES_BIG_ENDIAN
31123 #define OPTION_EB (OPTION_MD_BASE + 0)
31125 #define OPTION_EL (OPTION_MD_BASE + 1)
31128 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
31129 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
31131 struct option md_longopts
[] =
31134 {"EB", no_argument
, NULL
, OPTION_EB
},
31137 {"EL", no_argument
, NULL
, OPTION_EL
},
31139 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
31141 {"fdpic", no_argument
, NULL
, OPTION_FDPIC
},
31143 {NULL
, no_argument
, NULL
, 0}
31146 size_t md_longopts_size
= sizeof (md_longopts
);
31148 struct arm_option_table
31150 const char * option
; /* Option name to match. */
31151 const char * help
; /* Help information. */
31152 int * var
; /* Variable to change. */
31153 int value
; /* What to change it to. */
31154 const char * deprecated
; /* If non-null, print this message. */
31157 struct arm_option_table arm_opts
[] =
31159 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
31160 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
31161 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
31162 &support_interwork
, 1, NULL
},
31163 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
31164 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
31165 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
31167 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
31168 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
31169 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
31170 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
31173 /* These are recognized by the assembler, but have no affect on code. */
31174 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
31175 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
31177 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
31178 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
31179 &warn_on_deprecated
, 0, NULL
},
31181 {"mwarn-restrict-it", N_("warn about performance deprecated IT instructions"
31182 " in ARMv8-A and ARMv8-R"), &warn_on_restrict_it
, 1, NULL
},
31183 {"mno-warn-restrict-it", NULL
, &warn_on_restrict_it
, 0, NULL
},
31185 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), true, NULL
},
31186 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), false, NULL
},
31187 {NULL
, NULL
, NULL
, 0, NULL
}
31190 struct arm_legacy_option_table
31192 const char * option
; /* Option name to match. */
31193 const arm_feature_set
** var
; /* Variable to change. */
31194 const arm_feature_set value
; /* What to change it to. */
31195 const char * deprecated
; /* If non-null, print this message. */
31198 const struct arm_legacy_option_table arm_legacy_opts
[] =
31200 /* DON'T add any new processors to this list -- we want the whole list
31201 to go away... Add them to the processors table instead. */
31202 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
31203 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
31204 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
31205 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
31206 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
31207 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
31208 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
31209 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
31210 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
31211 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
31212 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
31213 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
31214 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
31215 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
31216 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
31217 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
31218 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
31219 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
31220 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
31221 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
31222 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
31223 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
31224 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
31225 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
31226 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
31227 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
31228 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
31229 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
31230 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
31231 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
31232 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
31233 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
31234 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
31235 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
31236 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
31237 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
31238 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
31239 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
31240 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
31241 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
31242 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
31243 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
31244 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
31245 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
31246 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
31247 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
31248 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
31249 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
31250 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
31251 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
31252 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
31253 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
31254 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
31255 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
31256 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
31257 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
31258 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
31259 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
31260 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
31261 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
31262 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
31263 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
31264 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
31265 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
31266 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
31267 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
31268 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
31269 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
31270 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
31271 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
31272 N_("use -mcpu=strongarm110")},
31273 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
31274 N_("use -mcpu=strongarm1100")},
31275 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
31276 N_("use -mcpu=strongarm1110")},
31277 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
31278 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
31279 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
31281 /* Architecture variants -- don't add any more to this list either. */
31282 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
31283 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
31284 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
31285 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
31286 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
31287 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
31288 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
31289 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
31290 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
31291 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
31292 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
31293 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
31294 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
31295 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
31296 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
31297 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
31298 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
31299 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
31301 /* Floating point variants -- don't add any more to this list either. */
31302 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
31303 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
31304 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
31305 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
31306 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
31308 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
31311 struct arm_cpu_option_table
31315 const arm_feature_set value
;
31316 const arm_feature_set ext
;
31317 /* For some CPUs we assume an FPU unless the user explicitly sets
31319 const arm_feature_set default_fpu
;
31320 /* The canonical name of the CPU, or NULL to use NAME converted to upper
31322 const char * canonical_name
;
31325 /* This list should, at a minimum, contain all the cpu names
31326 recognized by GCC. */
31327 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
31329 static const struct arm_cpu_option_table arm_cpus
[] =
31331 ARM_CPU_OPT ("all", NULL
, ARM_ANY
,
31334 ARM_CPU_OPT ("arm1", NULL
, ARM_ARCH_V1
,
31337 ARM_CPU_OPT ("arm2", NULL
, ARM_ARCH_V2
,
31340 ARM_CPU_OPT ("arm250", NULL
, ARM_ARCH_V2S
,
31343 ARM_CPU_OPT ("arm3", NULL
, ARM_ARCH_V2S
,
31346 ARM_CPU_OPT ("arm6", NULL
, ARM_ARCH_V3
,
31349 ARM_CPU_OPT ("arm60", NULL
, ARM_ARCH_V3
,
31352 ARM_CPU_OPT ("arm600", NULL
, ARM_ARCH_V3
,
31355 ARM_CPU_OPT ("arm610", NULL
, ARM_ARCH_V3
,
31358 ARM_CPU_OPT ("arm620", NULL
, ARM_ARCH_V3
,
31361 ARM_CPU_OPT ("arm7", NULL
, ARM_ARCH_V3
,
31364 ARM_CPU_OPT ("arm7m", NULL
, ARM_ARCH_V3M
,
31367 ARM_CPU_OPT ("arm7d", NULL
, ARM_ARCH_V3
,
31370 ARM_CPU_OPT ("arm7dm", NULL
, ARM_ARCH_V3M
,
31373 ARM_CPU_OPT ("arm7di", NULL
, ARM_ARCH_V3
,
31376 ARM_CPU_OPT ("arm7dmi", NULL
, ARM_ARCH_V3M
,
31379 ARM_CPU_OPT ("arm70", NULL
, ARM_ARCH_V3
,
31382 ARM_CPU_OPT ("arm700", NULL
, ARM_ARCH_V3
,
31385 ARM_CPU_OPT ("arm700i", NULL
, ARM_ARCH_V3
,
31388 ARM_CPU_OPT ("arm710", NULL
, ARM_ARCH_V3
,
31391 ARM_CPU_OPT ("arm710t", NULL
, ARM_ARCH_V4T
,
31394 ARM_CPU_OPT ("arm720", NULL
, ARM_ARCH_V3
,
31397 ARM_CPU_OPT ("arm720t", NULL
, ARM_ARCH_V4T
,
31400 ARM_CPU_OPT ("arm740t", NULL
, ARM_ARCH_V4T
,
31403 ARM_CPU_OPT ("arm710c", NULL
, ARM_ARCH_V3
,
31406 ARM_CPU_OPT ("arm7100", NULL
, ARM_ARCH_V3
,
31409 ARM_CPU_OPT ("arm7500", NULL
, ARM_ARCH_V3
,
31412 ARM_CPU_OPT ("arm7500fe", NULL
, ARM_ARCH_V3
,
31415 ARM_CPU_OPT ("arm7t", NULL
, ARM_ARCH_V4T
,
31418 ARM_CPU_OPT ("arm7tdmi", NULL
, ARM_ARCH_V4T
,
31421 ARM_CPU_OPT ("arm7tdmi-s", NULL
, ARM_ARCH_V4T
,
31424 ARM_CPU_OPT ("arm8", NULL
, ARM_ARCH_V4
,
31427 ARM_CPU_OPT ("arm810", NULL
, ARM_ARCH_V4
,
31430 ARM_CPU_OPT ("strongarm", NULL
, ARM_ARCH_V4
,
31433 ARM_CPU_OPT ("strongarm1", NULL
, ARM_ARCH_V4
,
31436 ARM_CPU_OPT ("strongarm110", NULL
, ARM_ARCH_V4
,
31439 ARM_CPU_OPT ("strongarm1100", NULL
, ARM_ARCH_V4
,
31442 ARM_CPU_OPT ("strongarm1110", NULL
, ARM_ARCH_V4
,
31445 ARM_CPU_OPT ("arm9", NULL
, ARM_ARCH_V4T
,
31448 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T
,
31451 ARM_CPU_OPT ("arm920t", NULL
, ARM_ARCH_V4T
,
31454 ARM_CPU_OPT ("arm922t", NULL
, ARM_ARCH_V4T
,
31457 ARM_CPU_OPT ("arm940t", NULL
, ARM_ARCH_V4T
,
31460 ARM_CPU_OPT ("arm9tdmi", NULL
, ARM_ARCH_V4T
,
31463 ARM_CPU_OPT ("fa526", NULL
, ARM_ARCH_V4
,
31466 ARM_CPU_OPT ("fa626", NULL
, ARM_ARCH_V4
,
31470 /* For V5 or later processors we default to using VFP; but the user
31471 should really set the FPU type explicitly. */
31472 ARM_CPU_OPT ("arm9e-r0", NULL
, ARM_ARCH_V5TExP
,
31475 ARM_CPU_OPT ("arm9e", NULL
, ARM_ARCH_V5TE
,
31478 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
31481 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
31484 ARM_CPU_OPT ("arm926ej-s", NULL
, ARM_ARCH_V5TEJ
,
31487 ARM_CPU_OPT ("arm946e-r0", NULL
, ARM_ARCH_V5TExP
,
31490 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE
,
31493 ARM_CPU_OPT ("arm946e-s", NULL
, ARM_ARCH_V5TE
,
31496 ARM_CPU_OPT ("arm966e-r0", NULL
, ARM_ARCH_V5TExP
,
31499 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE
,
31502 ARM_CPU_OPT ("arm966e-s", NULL
, ARM_ARCH_V5TE
,
31505 ARM_CPU_OPT ("arm968e-s", NULL
, ARM_ARCH_V5TE
,
31508 ARM_CPU_OPT ("arm10t", NULL
, ARM_ARCH_V5T
,
31511 ARM_CPU_OPT ("arm10tdmi", NULL
, ARM_ARCH_V5T
,
31514 ARM_CPU_OPT ("arm10e", NULL
, ARM_ARCH_V5TE
,
31517 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE
,
31520 ARM_CPU_OPT ("arm1020t", NULL
, ARM_ARCH_V5T
,
31523 ARM_CPU_OPT ("arm1020e", NULL
, ARM_ARCH_V5TE
,
31526 ARM_CPU_OPT ("arm1022e", NULL
, ARM_ARCH_V5TE
,
31529 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ
,
31532 ARM_CPU_OPT ("arm1026ej-s", NULL
, ARM_ARCH_V5TEJ
,
31535 ARM_CPU_OPT ("fa606te", NULL
, ARM_ARCH_V5TE
,
31538 ARM_CPU_OPT ("fa616te", NULL
, ARM_ARCH_V5TE
,
31541 ARM_CPU_OPT ("fa626te", NULL
, ARM_ARCH_V5TE
,
31544 ARM_CPU_OPT ("fmp626", NULL
, ARM_ARCH_V5TE
,
31547 ARM_CPU_OPT ("fa726te", NULL
, ARM_ARCH_V5TE
,
31550 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6
,
31553 ARM_CPU_OPT ("arm1136j-s", NULL
, ARM_ARCH_V6
,
31556 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6
,
31559 ARM_CPU_OPT ("arm1136jf-s", NULL
, ARM_ARCH_V6
,
31562 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K
,
31565 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K
,
31568 ARM_CPU_OPT ("arm1156t2-s", NULL
, ARM_ARCH_V6T2
,
31571 ARM_CPU_OPT ("arm1156t2f-s", NULL
, ARM_ARCH_V6T2
,
31574 ARM_CPU_OPT ("arm1176jz-s", NULL
, ARM_ARCH_V6KZ
,
31577 ARM_CPU_OPT ("arm1176jzf-s", NULL
, ARM_ARCH_V6KZ
,
31580 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A
,
31581 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
31583 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE
,
31585 FPU_ARCH_NEON_VFP_V4
),
31586 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A
,
31587 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
31588 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
31589 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A
,
31590 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
31591 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
31592 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE
,
31594 FPU_ARCH_NEON_VFP_V4
),
31595 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE
,
31597 FPU_ARCH_NEON_VFP_V4
),
31598 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE
,
31600 FPU_ARCH_NEON_VFP_V4
),
31601 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A
,
31602 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC
),
31603 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
31604 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A
,
31605 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC
),
31606 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
31607 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A
,
31608 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC
),
31609 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
31610 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A
,
31611 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
31612 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
31613 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A
,
31614 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC
),
31615 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
31616 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A
,
31617 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC
),
31618 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
31619 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A
,
31620 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC
),
31621 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
31622 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A
,
31623 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
31624 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
31625 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A
,
31626 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
31627 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
31628 ARM_CPU_OPT ("cortex-a76ae", "Cortex-A76AE", ARM_ARCH_V8_2A
,
31629 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
31630 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
31631 ARM_CPU_OPT ("cortex-a77", "Cortex-A77", ARM_ARCH_V8_2A
,
31632 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
31633 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
31634 ARM_CPU_OPT ("cortex-a78", "Cortex-A78", ARM_ARCH_V8_2A
,
31635 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
| ARM_EXT2_SB
),
31636 FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
31637 ARM_CPU_OPT ("cortex-a78ae", "Cortex-A78AE", ARM_ARCH_V8_2A
,
31638 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
| ARM_EXT2_SB
),
31639 FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
31640 ARM_CPU_OPT ("cortex-a78c", "Cortex-A78C", ARM_ARCH_V8_2A
,
31641 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
| ARM_EXT2_SB
),
31642 FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
31643 ARM_CPU_OPT ("cortex-a710", "Cortex-A710", ARM_ARCH_V9A
,
31644 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
31647 FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
31648 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A
,
31649 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
31650 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
31651 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R
,
31654 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R
,
31656 FPU_ARCH_VFP_V3D16
),
31657 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R
,
31658 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
31660 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R
,
31661 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
31662 FPU_ARCH_VFP_V3D16
),
31663 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R
,
31664 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
31665 FPU_ARCH_VFP_V3D16
),
31666 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R
,
31667 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC
),
31668 FPU_ARCH_NEON_VFP_ARMV8
),
31669 ARM_CPU_OPT ("cortex-r52plus", "Cortex-R52+", ARM_ARCH_V8R
,
31670 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC
),
31671 FPU_ARCH_NEON_VFP_ARMV8
),
31672 ARM_CPU_OPT ("cortex-m35p", "Cortex-M35P", ARM_ARCH_V8M_MAIN
,
31673 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
31675 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN
,
31676 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
31678 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE
,
31681 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM
,
31684 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM
,
31687 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M
,
31690 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM
,
31693 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM
,
31696 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM
,
31699 ARM_CPU_OPT ("cortex-x1", "Cortex-X1", ARM_ARCH_V8_2A
,
31700 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
| ARM_EXT2_SB
),
31701 FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
31702 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A
,
31703 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC
),
31704 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
31705 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A
,
31706 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
31707 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
31708 ARM_CPU_OPT ("neoverse-n2", "Neoverse N2", ARM_ARCH_V8_5A
,
31709 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
31712 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
),
31713 ARM_CPU_OPT ("neoverse-v1", "Neoverse V1", ARM_ARCH_V8_4A
,
31714 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
31717 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
),
31718 /* ??? XSCALE is really an architecture. */
31719 ARM_CPU_OPT ("xscale", NULL
, ARM_ARCH_XSCALE
,
31723 /* ??? iwmmxt is not a processor. */
31724 ARM_CPU_OPT ("iwmmxt", NULL
, ARM_ARCH_IWMMXT
,
31727 ARM_CPU_OPT ("iwmmxt2", NULL
, ARM_ARCH_IWMMXT2
,
31730 ARM_CPU_OPT ("i80200", NULL
, ARM_ARCH_XSCALE
,
31735 ARM_CPU_OPT ("ep9312", "ARM920T",
31736 ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
31737 ARM_ARCH_NONE
, FPU_ARCH_MAVERICK
),
31739 /* Marvell processors. */
31740 ARM_CPU_OPT ("marvell-pj4", NULL
, ARM_ARCH_V7A
,
31741 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
31742 FPU_ARCH_VFP_V3D16
),
31743 ARM_CPU_OPT ("marvell-whitney", NULL
, ARM_ARCH_V7A
,
31744 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
31745 FPU_ARCH_NEON_VFP_V4
),
31747 /* APM X-Gene family. */
31748 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A
,
31750 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
31751 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A
,
31752 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC
),
31753 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
31755 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
31759 struct arm_ext_table
31763 const arm_feature_set merge
;
31764 const arm_feature_set clear
;
31767 struct arm_arch_option_table
31771 const arm_feature_set value
;
31772 const arm_feature_set default_fpu
;
31773 const struct arm_ext_table
* ext_table
;
31776 /* Used to add support for +E and +noE extension. */
31777 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
31778 /* Used to add support for a +E extension. */
31779 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
31780 /* Used to add support for a +noE extension. */
31781 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
31783 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
31784 ~0 & ~FPU_ENDIAN_PURE)
31786 static const struct arm_ext_table armv5te_ext_table
[] =
31788 ARM_EXT ("fp", FPU_ARCH_VFP_V2
, ALL_FP
),
31789 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
31792 static const struct arm_ext_table armv7_ext_table
[] =
31794 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
31795 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
31798 static const struct arm_ext_table armv7ve_ext_table
[] =
31800 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16
, ALL_FP
),
31801 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
),
31802 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
31803 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
31804 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
31805 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
), /* Alias for +fp. */
31806 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
31808 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4
,
31809 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
31811 /* Aliases for +simd. */
31812 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
31814 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
31815 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
31816 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
31818 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
31821 static const struct arm_ext_table armv7a_ext_table
[] =
31823 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
31824 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
31825 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
31826 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
31827 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
31828 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
),
31829 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
31831 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1
,
31832 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
31834 /* Aliases for +simd. */
31835 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
31836 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
31838 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
31839 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
31841 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
)),
31842 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
)),
31843 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
31846 static const struct arm_ext_table armv7r_ext_table
[] =
31848 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD
),
31849 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD
), /* Alias for +fp.sp. */
31850 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
31851 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
31852 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
),
31853 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
31854 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
31855 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
)),
31856 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
31859 static const struct arm_ext_table armv7em_ext_table
[] =
31861 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16
, ALL_FP
),
31862 /* Alias for +fp, used to be known as fpv4-sp-d16. */
31863 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
),
31864 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16
),
31865 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
31866 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16
),
31867 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
31870 static const struct arm_ext_table armv8a_ext_table
[] =
31872 ARM_ADD ("crc", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC
)),
31873 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
31874 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
31875 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
31877 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31878 should use the +simd option to turn on FP. */
31879 ARM_REMOVE ("fp", ALL_FP
),
31880 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
31881 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
31882 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
31886 static const struct arm_ext_table armv81a_ext_table
[] =
31888 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
31889 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
31890 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
31892 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31893 should use the +simd option to turn on FP. */
31894 ARM_REMOVE ("fp", ALL_FP
),
31895 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
31896 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
31897 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
31900 static const struct arm_ext_table armv82a_ext_table
[] =
31902 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
31903 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16
),
31904 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML
),
31905 ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16
)),
31906 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM
)),
31907 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
31908 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
31909 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
31911 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31912 should use the +simd option to turn on FP. */
31913 ARM_REMOVE ("fp", ALL_FP
),
31914 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
31915 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
31916 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
31919 static const struct arm_ext_table armv84a_ext_table
[] =
31921 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
31922 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
31923 ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16
)),
31924 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM
)),
31925 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
31926 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
31928 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31929 should use the +simd option to turn on FP. */
31930 ARM_REMOVE ("fp", ALL_FP
),
31931 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
31932 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
31933 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
31936 static const struct arm_ext_table armv85a_ext_table
[] =
31938 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
31939 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
31940 ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16
)),
31941 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM
)),
31942 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
31943 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
31945 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31946 should use the +simd option to turn on FP. */
31947 ARM_REMOVE ("fp", ALL_FP
),
31948 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
31951 static const struct arm_ext_table armv86a_ext_table
[] =
31953 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM
)),
31954 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
31957 #define armv87a_ext_table armv86a_ext_table
31958 #define armv88a_ext_table armv87a_ext_table
31960 static const struct arm_ext_table armv9a_ext_table
[] =
31962 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
31963 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
31964 ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16
)),
31965 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM
)),
31966 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
31967 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
31969 /* Armv9-a does not allow an FP implementation without SIMD, so the user
31970 should use the +simd option to turn on FP. */
31971 ARM_REMOVE ("fp", ALL_FP
),
31972 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
31975 #define armv91a_ext_table armv86a_ext_table
31976 #define armv92a_ext_table armv91a_ext_table
31977 #define armv93a_ext_table armv92a_ext_table
31979 #define CDE_EXTENSIONS \
31980 ARM_ADD ("cdecp0", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE0)), \
31981 ARM_ADD ("cdecp1", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE1)), \
31982 ARM_ADD ("cdecp2", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE2)), \
31983 ARM_ADD ("cdecp3", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE3)), \
31984 ARM_ADD ("cdecp4", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE4)), \
31985 ARM_ADD ("cdecp5", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE5)), \
31986 ARM_ADD ("cdecp6", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE6)), \
31987 ARM_ADD ("cdecp7", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE7))
31989 static const struct arm_ext_table armv8m_main_ext_table
[] =
31991 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP
),
31992 ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP
)),
31993 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16
, ALL_FP
),
31994 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
31996 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
32000 static const struct arm_ext_table armv8_1m_main_ext_table
[] =
32002 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP
),
32003 ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP
)),
32005 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
32006 FPU_VFP_V5_SP_D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
),
32009 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
32010 FPU_VFP_V5D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
)),
32011 ARM_EXT ("mve", ARM_FEATURE (ARM_AEXT_V8M_MAIN_DSP
, ARM_EXT2_MVE
, 0),
32012 ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE
| ARM_EXT2_MVE_FP
)),
32014 ARM_FEATURE (ARM_AEXT_V8M_MAIN_DSP
,
32015 ARM_EXT2_FP16_INST
| ARM_EXT2_MVE
| ARM_EXT2_MVE_FP
,
32016 FPU_VFP_V5_SP_D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
)),
32018 ARM_ADD ("pacbti", ARM_FEATURE_CORE_HIGH_HIGH (ARM_AEXT3_V8_1M_MAIN_PACBTI
)),
32019 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
32022 #undef CDE_EXTENSIONS
32024 static const struct arm_ext_table armv8r_ext_table
[] =
32026 ARM_ADD ("crc", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC
)),
32027 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
32028 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
32029 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
32030 ARM_REMOVE ("fp", ALL_FP
),
32031 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16
),
32032 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
32035 /* This list should, at a minimum, contain all the architecture names
32036 recognized by GCC. */
32037 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
32038 #define ARM_ARCH_OPT2(N, V, DF, ext) \
32039 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
32041 static const struct arm_arch_option_table arm_archs
[] =
32043 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
32044 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
32045 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
32046 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
32047 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
32048 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
32049 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
32050 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
32051 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
32052 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
32053 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
32054 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
32055 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
32056 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
32057 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
, armv5te
),
32058 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
, armv5te
),
32059 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
, armv5te
),
32060 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
32061 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
32062 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
, armv5te
),
32063 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
, armv5te
),
32064 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
32065 kept to preserve existing behaviour. */
32066 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
32067 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
32068 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
, armv5te
),
32069 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
, armv5te
),
32070 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
, armv5te
),
32071 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
32072 kept to preserve existing behaviour. */
32073 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
32074 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
32075 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
32076 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
32077 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
, armv7
),
32078 /* The official spelling of the ARMv7 profile variants is the dashed form.
32079 Accept the non-dashed form for compatibility with old toolchains. */
32080 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
32081 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
, armv7ve
),
32082 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
32083 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
32084 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
32085 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
32086 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
32087 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
, armv7em
),
32088 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
32089 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
,
32091 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN
, FPU_ARCH_VFP
,
32093 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
, armv8a
),
32094 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
, armv81a
),
32095 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
, armv82a
),
32096 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A
, FPU_ARCH_VFP
, armv82a
),
32097 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R
, FPU_ARCH_VFP
, armv8r
),
32098 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A
, FPU_ARCH_VFP
, armv84a
),
32099 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A
, FPU_ARCH_VFP
, armv85a
),
32100 ARM_ARCH_OPT2 ("armv8.6-a", ARM_ARCH_V8_6A
, FPU_ARCH_VFP
, armv86a
),
32101 ARM_ARCH_OPT2 ("armv8.7-a", ARM_ARCH_V8_7A
, FPU_ARCH_VFP
, armv87a
),
32102 ARM_ARCH_OPT2 ("armv8.8-a", ARM_ARCH_V8_8A
, FPU_ARCH_VFP
, armv88a
),
32103 ARM_ARCH_OPT2 ("armv9-a", ARM_ARCH_V9A
, FPU_ARCH_VFP
, armv9a
),
32104 ARM_ARCH_OPT2 ("armv9.1-a", ARM_ARCH_V9_1A
, FPU_ARCH_VFP
, armv91a
),
32105 ARM_ARCH_OPT2 ("armv9.2-a", ARM_ARCH_V9_2A
, FPU_ARCH_VFP
, armv92a
),
32106 ARM_ARCH_OPT2 ("armv9.3-a", ARM_ARCH_V9_2A
, FPU_ARCH_VFP
, armv93a
),
32107 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
32108 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
32109 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
, FPU_ARCH_VFP
),
32110 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
32112 #undef ARM_ARCH_OPT
32114 /* ISA extensions in the co-processor and main instruction set space. */
32116 struct arm_option_extension_value_table
32120 const arm_feature_set merge_value
;
32121 const arm_feature_set clear_value
;
32122 /* List of architectures for which an extension is available. ARM_ARCH_NONE
32123 indicates that an extension is available for all architectures while
32124 ARM_ANY marks an empty entry. */
32125 const arm_feature_set allowed_archs
[2];
32128 /* The following table must be in alphabetical order with a NULL last entry. */
32130 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
32131 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
32133 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
32134 use the context sensitive approach using arm_ext_table's. */
32135 static const struct arm_option_extension_value_table arm_extensions
[] =
32137 ARM_EXT_OPT ("crc", ARM_FEATURE_CORE_HIGH(ARM_EXT2_CRC
),
32138 ARM_FEATURE_CORE_HIGH(ARM_EXT2_CRC
),
32139 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
32140 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
32141 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
32142 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
32143 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
,
32144 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
),
32146 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
32147 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
32148 ARM_FEATURE_CORE (ARM_EXT_V7M
, ARM_EXT2_V8M
)),
32149 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
32150 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
32151 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
32152 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
32154 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
32155 | ARM_EXT2_FP16_FML
),
32156 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
32157 | ARM_EXT2_FP16_FML
),
32159 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
32160 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
32161 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
32162 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
32163 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
32164 Thumb divide instruction. Due to this having the same name as the
32165 previous entry, this will be ignored when doing command-line parsing and
32166 only considered by build attribute selection code. */
32167 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
32168 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
32169 ARM_FEATURE_CORE_LOW (ARM_EXT_V7
)),
32170 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
32171 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ARCH_NONE
),
32172 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
32173 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ARCH_NONE
),
32174 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
32175 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ARCH_NONE
),
32176 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
32177 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
32178 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
32179 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
32180 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
32181 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
32182 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
32183 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
32184 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
32185 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
32186 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
32187 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
32189 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
),
32190 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_RAS
, 0),
32191 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
32192 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1
,
32193 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
32194 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
32195 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
32196 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
32198 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
32199 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
32200 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
),
32201 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
32202 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
32203 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
32204 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
32205 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
32207 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
32208 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
32209 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
32210 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ARCH_NONE
),
32211 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, { ARM_ARCH_NONE
, ARM_ARCH_NONE
} }
32215 /* ISA floating-point and Advanced SIMD extensions. */
32216 struct arm_option_fpu_value_table
32219 const arm_feature_set value
;
32222 /* This list should, at a minimum, contain all the fpu names
32223 recognized by GCC. */
32224 static const struct arm_option_fpu_value_table arm_fpus
[] =
32226 {"softfpa", FPU_NONE
},
32227 {"fpe", FPU_ARCH_FPE
},
32228 {"fpe2", FPU_ARCH_FPE
},
32229 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
32230 {"fpa", FPU_ARCH_FPA
},
32231 {"fpa10", FPU_ARCH_FPA
},
32232 {"fpa11", FPU_ARCH_FPA
},
32233 {"arm7500fe", FPU_ARCH_FPA
},
32234 {"softvfp", FPU_ARCH_VFP
},
32235 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
32236 {"vfp", FPU_ARCH_VFP_V2
},
32237 {"vfp9", FPU_ARCH_VFP_V2
},
32238 {"vfp3", FPU_ARCH_VFP_V3
}, /* Undocumented, use vfpv3. */
32239 {"vfp10", FPU_ARCH_VFP_V2
},
32240 {"vfp10-r0", FPU_ARCH_VFP_V1
},
32241 {"vfpxd", FPU_ARCH_VFP_V1xD
},
32242 {"vfpv2", FPU_ARCH_VFP_V2
},
32243 {"vfpv3", FPU_ARCH_VFP_V3
},
32244 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
32245 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
32246 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
32247 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
32248 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
32249 {"arm1020t", FPU_ARCH_VFP_V1
},
32250 {"arm1020e", FPU_ARCH_VFP_V2
},
32251 {"arm1136jfs", FPU_ARCH_VFP_V2
}, /* Undocumented, use arm1136jf-s. */
32252 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
32253 {"maverick", FPU_ARCH_MAVERICK
},
32254 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
32255 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
32256 {"neon-fp16", FPU_ARCH_NEON_FP16
},
32257 {"vfpv4", FPU_ARCH_VFP_V4
},
32258 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
32259 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
32260 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
32261 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
32262 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
32263 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
32264 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
32265 {"crypto-neon-fp-armv8",
32266 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
32267 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
32268 {"crypto-neon-fp-armv8.1",
32269 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
32270 {NULL
, ARM_ARCH_NONE
}
32273 struct arm_option_value_table
32279 static const struct arm_option_value_table arm_float_abis
[] =
32281 {"hard", ARM_FLOAT_ABI_HARD
},
32282 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
32283 {"soft", ARM_FLOAT_ABI_SOFT
},
32288 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
32289 static const struct arm_option_value_table arm_eabis
[] =
32291 {"gnu", EF_ARM_EABI_UNKNOWN
},
32292 {"4", EF_ARM_EABI_VER4
},
32293 {"5", EF_ARM_EABI_VER5
},
32298 struct arm_long_option_table
32300 const char *option
; /* Substring to match. */
32301 const char *help
; /* Help information. */
32302 bool (*func
) (const char *subopt
); /* Function to decode sub-option. */
32303 const char *deprecated
; /* If non-null, print this message. */
32307 arm_parse_extension (const char *str
, const arm_feature_set
*opt_set
,
32308 arm_feature_set
*ext_set
,
32309 const struct arm_ext_table
*ext_table
)
32311 /* We insist on extensions being specified in alphabetical order, and with
32312 extensions being added before being removed. We achieve this by having
32313 the global ARM_EXTENSIONS table in alphabetical order, and using the
32314 ADDING_VALUE variable to indicate whether we are adding an extension (1)
32315 or removing it (0) and only allowing it to change in the order
32317 const struct arm_option_extension_value_table
* opt
= NULL
;
32318 const arm_feature_set arm_any
= ARM_ANY
;
32319 int adding_value
= -1;
32321 while (str
!= NULL
&& *str
!= 0)
32328 as_bad (_("invalid architectural extension"));
32333 ext
= strchr (str
, '+');
32338 len
= strlen (str
);
32340 if (len
>= 2 && startswith (str
, "no"))
32342 if (adding_value
!= 0)
32345 opt
= arm_extensions
;
32353 if (adding_value
== -1)
32356 opt
= arm_extensions
;
32358 else if (adding_value
!= 1)
32360 as_bad (_("must specify extensions to add before specifying "
32361 "those to remove"));
32368 as_bad (_("missing architectural extension"));
32372 gas_assert (adding_value
!= -1);
32373 gas_assert (opt
!= NULL
);
32375 if (ext_table
!= NULL
)
32377 const struct arm_ext_table
* ext_opt
= ext_table
;
32378 bool found
= false;
32379 for (; ext_opt
->name
!= NULL
; ext_opt
++)
32380 if (ext_opt
->name_len
== len
32381 && strncmp (ext_opt
->name
, str
, len
) == 0)
32385 if (ARM_FEATURE_ZERO (ext_opt
->merge
))
32386 /* TODO: Option not supported. When we remove the
32387 legacy table this case should error out. */
32390 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, ext_opt
->merge
);
32394 if (ARM_FEATURE_ZERO (ext_opt
->clear
))
32395 /* TODO: Option not supported. When we remove the
32396 legacy table this case should error out. */
32398 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, ext_opt
->clear
);
32410 /* Scan over the options table trying to find an exact match. */
32411 for (; opt
->name
!= NULL
; opt
++)
32412 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
32414 int i
, nb_allowed_archs
=
32415 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
32416 /* Check we can apply the extension to this architecture. */
32417 for (i
= 0; i
< nb_allowed_archs
; i
++)
32420 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
32422 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *opt_set
))
32425 if (i
== nb_allowed_archs
)
32427 as_bad (_("extension does not apply to the base architecture"));
32431 /* Add or remove the extension. */
32433 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
32435 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
32437 /* Allowing Thumb division instructions for ARMv7 in autodetection
32438 rely on this break so that duplicate extensions (extensions
32439 with the same name as a previous extension in the list) are not
32440 considered for command-line parsing. */
32444 if (opt
->name
== NULL
)
32446 /* Did we fail to find an extension because it wasn't specified in
32447 alphabetical order, or because it does not exist? */
32449 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
32450 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
32453 if (opt
->name
== NULL
)
32454 as_bad (_("unknown architectural extension `%s'"), str
);
32456 as_bad (_("architectural extensions must be specified in "
32457 "alphabetical order"));
32463 /* We should skip the extension we've just matched the next time
32475 arm_parse_fp16_opt (const char *str
)
32477 if (strcasecmp (str
, "ieee") == 0)
32478 fp16_format
= ARM_FP16_FORMAT_IEEE
;
32479 else if (strcasecmp (str
, "alternative") == 0)
32480 fp16_format
= ARM_FP16_FORMAT_ALTERNATIVE
;
32483 as_bad (_("unrecognised float16 format \"%s\""), str
);
32491 arm_parse_cpu (const char *str
)
32493 const struct arm_cpu_option_table
*opt
;
32494 const char *ext
= strchr (str
, '+');
32500 len
= strlen (str
);
32504 as_bad (_("missing cpu name `%s'"), str
);
32508 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
32509 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
32511 mcpu_cpu_opt
= &opt
->value
;
32512 if (mcpu_ext_opt
== NULL
)
32513 mcpu_ext_opt
= XNEW (arm_feature_set
);
32514 *mcpu_ext_opt
= opt
->ext
;
32515 mcpu_fpu_opt
= &opt
->default_fpu
;
32516 if (opt
->canonical_name
)
32518 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
32519 strcpy (selected_cpu_name
, opt
->canonical_name
);
32525 if (len
>= sizeof selected_cpu_name
)
32526 len
= (sizeof selected_cpu_name
) - 1;
32528 for (i
= 0; i
< len
; i
++)
32529 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
32530 selected_cpu_name
[i
] = 0;
32534 return arm_parse_extension (ext
, mcpu_cpu_opt
, mcpu_ext_opt
, NULL
);
32539 as_bad (_("unknown cpu `%s'"), str
);
32544 arm_parse_arch (const char *str
)
32546 const struct arm_arch_option_table
*opt
;
32547 const char *ext
= strchr (str
, '+');
32553 len
= strlen (str
);
32557 as_bad (_("missing architecture name `%s'"), str
);
32561 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
32562 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
32564 march_cpu_opt
= &opt
->value
;
32565 if (march_ext_opt
== NULL
)
32566 march_ext_opt
= XNEW (arm_feature_set
);
32567 *march_ext_opt
= arm_arch_none
;
32568 march_fpu_opt
= &opt
->default_fpu
;
32569 selected_ctx_ext_table
= opt
->ext_table
;
32570 strcpy (selected_cpu_name
, opt
->name
);
32573 return arm_parse_extension (ext
, march_cpu_opt
, march_ext_opt
,
32579 as_bad (_("unknown architecture `%s'\n"), str
);
32584 arm_parse_fpu (const char * str
)
32586 const struct arm_option_fpu_value_table
* opt
;
32588 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
32589 if (streq (opt
->name
, str
))
32591 mfpu_opt
= &opt
->value
;
32595 as_bad (_("unknown floating point format `%s'\n"), str
);
32600 arm_parse_float_abi (const char * str
)
32602 const struct arm_option_value_table
* opt
;
32604 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
32605 if (streq (opt
->name
, str
))
32607 mfloat_abi_opt
= opt
->value
;
32611 as_bad (_("unknown floating point abi `%s'\n"), str
);
32617 arm_parse_eabi (const char * str
)
32619 const struct arm_option_value_table
*opt
;
32621 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
32622 if (streq (opt
->name
, str
))
32624 meabi_flags
= opt
->value
;
32627 as_bad (_("unknown EABI `%s'\n"), str
);
32633 arm_parse_it_mode (const char * str
)
32637 if (streq ("arm", str
))
32638 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
32639 else if (streq ("thumb", str
))
32640 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
32641 else if (streq ("always", str
))
32642 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
32643 else if (streq ("never", str
))
32644 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
32647 as_bad (_("unknown implicit IT mode `%s', should be "\
32648 "arm, thumb, always, or never."), str
);
32656 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED
)
32658 codecomposer_syntax
= true;
32659 arm_comment_chars
[0] = ';';
32660 arm_line_separator_chars
[0] = 0;
32664 struct arm_long_option_table arm_long_opts
[] =
32666 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
32667 arm_parse_cpu
, NULL
},
32668 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
32669 arm_parse_arch
, NULL
},
32670 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
32671 arm_parse_fpu
, NULL
},
32672 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
32673 arm_parse_float_abi
, NULL
},
32675 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
32676 arm_parse_eabi
, NULL
},
32678 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
32679 arm_parse_it_mode
, NULL
},
32680 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
32681 arm_ccs_mode
, NULL
},
32683 N_("[ieee|alternative]\n\
32684 set the encoding for half precision floating point "
32685 "numbers to IEEE\n\
32686 or Arm alternative format."),
32687 arm_parse_fp16_opt
, NULL
},
32688 {NULL
, NULL
, 0, NULL
}
32692 md_parse_option (int c
, const char * arg
)
32694 struct arm_option_table
*opt
;
32695 const struct arm_legacy_option_table
*fopt
;
32696 struct arm_long_option_table
*lopt
;
32702 target_big_endian
= 1;
32708 target_big_endian
= 0;
32712 case OPTION_FIX_V4BX
:
32720 #endif /* OBJ_ELF */
32723 /* Listing option. Just ignore these, we don't support additional
32728 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
32730 if (c
== opt
->option
[0]
32731 && ((arg
== NULL
&& opt
->option
[1] == 0)
32732 || streq (arg
, opt
->option
+ 1)))
32734 /* If the option is deprecated, tell the user. */
32735 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
32736 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
32737 arg
? arg
: "", _(opt
->deprecated
));
32739 if (opt
->var
!= NULL
)
32740 *opt
->var
= opt
->value
;
32746 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
32748 if (c
== fopt
->option
[0]
32749 && ((arg
== NULL
&& fopt
->option
[1] == 0)
32750 || streq (arg
, fopt
->option
+ 1)))
32752 /* If the option is deprecated, tell the user. */
32753 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
32754 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
32755 arg
? arg
: "", _(fopt
->deprecated
));
32757 if (fopt
->var
!= NULL
)
32758 *fopt
->var
= &fopt
->value
;
32764 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
32766 /* These options are expected to have an argument. */
32767 if (c
== lopt
->option
[0]
32769 && strncmp (arg
, lopt
->option
+ 1,
32770 strlen (lopt
->option
+ 1)) == 0)
32772 /* If the option is deprecated, tell the user. */
32773 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
32774 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
32775 _(lopt
->deprecated
));
32777 /* Call the sup-option parser. */
32778 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
32789 md_show_usage (FILE * fp
)
32791 struct arm_option_table
*opt
;
32792 struct arm_long_option_table
*lopt
;
32794 fprintf (fp
, _(" ARM-specific assembler options:\n"));
32796 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
32797 if (opt
->help
!= NULL
)
32798 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
32800 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
32801 if (lopt
->help
!= NULL
)
32802 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
32806 -EB assemble code for a big-endian cpu\n"));
32811 -EL assemble code for a little-endian cpu\n"));
32815 --fix-v4bx Allow BX in ARMv4 code\n"));
32819 --fdpic generate an FDPIC object file\n"));
32820 #endif /* OBJ_ELF */
32828 arm_feature_set flags
;
32829 } cpu_arch_ver_table
;
32831 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
32832 chronologically for architectures, with an exception for ARMv6-M and
32833 ARMv6S-M due to legacy reasons. No new architecture should have a
32834 special case. This allows for build attribute selection results to be
32835 stable when new architectures are added. */
32836 static const cpu_arch_ver_table cpu_arch_ver
[] =
32838 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V1
},
32839 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2
},
32840 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2S
},
32841 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3
},
32842 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3M
},
32843 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4xM
},
32844 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4
},
32845 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4TxM
},
32846 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4T
},
32847 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5xM
},
32848 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5
},
32849 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5TxM
},
32850 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5T
},
32851 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TExP
},
32852 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TE
},
32853 {TAG_CPU_ARCH_V5TEJ
, ARM_ARCH_V5TEJ
},
32854 {TAG_CPU_ARCH_V6
, ARM_ARCH_V6
},
32855 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6Z
},
32856 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6KZ
},
32857 {TAG_CPU_ARCH_V6K
, ARM_ARCH_V6K
},
32858 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6T2
},
32859 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KT2
},
32860 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6ZT2
},
32861 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KZT2
},
32863 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
32864 always selected build attributes to match those of ARMv6-M
32865 (resp. ARMv6S-M). However, due to these architectures being a strict
32866 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
32867 would be selected when fully respecting chronology of architectures.
32868 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
32869 move them before ARMv7 architectures. */
32870 {TAG_CPU_ARCH_V6_M
, ARM_ARCH_V6M
},
32871 {TAG_CPU_ARCH_V6S_M
, ARM_ARCH_V6SM
},
32873 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7
},
32874 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7A
},
32875 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7R
},
32876 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7M
},
32877 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7VE
},
32878 {TAG_CPU_ARCH_V7E_M
, ARM_ARCH_V7EM
},
32879 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8A
},
32880 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_1A
},
32881 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_2A
},
32882 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_3A
},
32883 {TAG_CPU_ARCH_V8M_BASE
, ARM_ARCH_V8M_BASE
},
32884 {TAG_CPU_ARCH_V8M_MAIN
, ARM_ARCH_V8M_MAIN
},
32885 {TAG_CPU_ARCH_V8R
, ARM_ARCH_V8R
},
32886 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_4A
},
32887 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_5A
},
32888 {TAG_CPU_ARCH_V8_1M_MAIN
, ARM_ARCH_V8_1M_MAIN
},
32889 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_6A
},
32890 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_7A
},
32891 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_8A
},
32892 {TAG_CPU_ARCH_V9
, ARM_ARCH_V9A
},
32893 {TAG_CPU_ARCH_V9
, ARM_ARCH_V9_1A
},
32894 {TAG_CPU_ARCH_V9
, ARM_ARCH_V9_2A
},
32895 {TAG_CPU_ARCH_V9
, ARM_ARCH_V9_3A
},
32896 {-1, ARM_ARCH_NONE
}
32899 /* Set an attribute if it has not already been set by the user. */
32902 aeabi_set_attribute_int (int tag
, int value
)
32905 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
32906 || !attributes_set_explicitly
[tag
])
32907 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
32911 aeabi_set_attribute_string (int tag
, const char *value
)
32914 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
32915 || !attributes_set_explicitly
[tag
])
32916 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
32919 /* Return whether features in the *NEEDED feature set are available via
32920 extensions for the architecture whose feature set is *ARCH_FSET. */
32923 have_ext_for_needed_feat_p (const arm_feature_set
*arch_fset
,
32924 const arm_feature_set
*needed
)
32926 int i
, nb_allowed_archs
;
32927 arm_feature_set ext_fset
;
32928 const struct arm_option_extension_value_table
*opt
;
32930 ext_fset
= arm_arch_none
;
32931 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
32933 /* Extension does not provide any feature we need. */
32934 if (!ARM_CPU_HAS_FEATURE (*needed
, opt
->merge_value
))
32938 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
32939 for (i
= 0; i
< nb_allowed_archs
; i
++)
32942 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_arch_any
))
32945 /* Extension is available, add it. */
32946 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *arch_fset
))
32947 ARM_MERGE_FEATURE_SETS (ext_fset
, ext_fset
, opt
->merge_value
);
32951 /* Can we enable all features in *needed? */
32952 return ARM_FSET_CPU_SUBSET (*needed
, ext_fset
);
32955 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
32956 a given architecture feature set *ARCH_EXT_FSET including extension feature
32957 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
32958 - if true, check for an exact match of the architecture modulo extensions;
32959 - otherwise, select build attribute value of the first superset
32960 architecture released so that results remains stable when new architectures
32962 For -march/-mcpu=all the build attribute value of the most featureful
32963 architecture is returned. Tag_CPU_arch_profile result is returned in
32967 get_aeabi_cpu_arch_from_fset (const arm_feature_set
*arch_ext_fset
,
32968 const arm_feature_set
*ext_fset
,
32969 char *profile
, int exact_match
)
32971 arm_feature_set arch_fset
;
32972 const cpu_arch_ver_table
*p_ver
, *p_ver_ret
= NULL
;
32974 /* Select most featureful architecture with all its extensions if building
32975 for -march=all as the feature sets used to set build attributes. */
32976 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, arm_arch_any
))
32978 /* Force revisiting of decision for each new architecture. */
32979 gas_assert (MAX_TAG_CPU_ARCH
<= TAG_CPU_ARCH_V9
);
32981 return TAG_CPU_ARCH_V9
;
32984 ARM_CLEAR_FEATURE (arch_fset
, *arch_ext_fset
, *ext_fset
);
32986 for (p_ver
= cpu_arch_ver
; p_ver
->val
!= -1; p_ver
++)
32988 arm_feature_set known_arch_fset
;
32990 ARM_CLEAR_FEATURE (known_arch_fset
, p_ver
->flags
, fpu_any
);
32993 /* Base architecture match user-specified architecture and
32994 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
32995 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, known_arch_fset
))
33000 /* Base architecture match user-specified architecture only
33001 (eg. ARMv6-M in the same case as above). Record it in case we
33002 find a match with above condition. */
33003 else if (p_ver_ret
== NULL
33004 && ARM_FEATURE_EQUAL (arch_fset
, known_arch_fset
))
33010 /* Architecture has all features wanted. */
33011 if (ARM_FSET_CPU_SUBSET (arch_fset
, known_arch_fset
))
33013 arm_feature_set added_fset
;
33015 /* Compute features added by this architecture over the one
33016 recorded in p_ver_ret. */
33017 if (p_ver_ret
!= NULL
)
33018 ARM_CLEAR_FEATURE (added_fset
, known_arch_fset
,
33020 /* First architecture that match incl. with extensions, or the
33021 only difference in features over the recorded match is
33022 features that were optional and are now mandatory. */
33023 if (p_ver_ret
== NULL
33024 || ARM_FSET_CPU_SUBSET (added_fset
, arch_fset
))
33030 else if (p_ver_ret
== NULL
)
33032 arm_feature_set needed_ext_fset
;
33034 ARM_CLEAR_FEATURE (needed_ext_fset
, arch_fset
, known_arch_fset
);
33036 /* Architecture has all features needed when using some
33037 extensions. Record it and continue searching in case there
33038 exist an architecture providing all needed features without
33039 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
33041 if (have_ext_for_needed_feat_p (&known_arch_fset
,
33048 if (p_ver_ret
== NULL
)
33052 /* Tag_CPU_arch_profile. */
33053 if (!ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8r
)
33054 && (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7a
)
33055 || ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8
)
33056 || (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_atomics
)
33057 && !ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8m_m_only
))))
33059 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7r
)
33060 || ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8r
))
33062 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_m
))
33066 return p_ver_ret
->val
;
33069 /* Set the public EABI object attributes. */
33072 aeabi_set_public_attributes (void)
33074 char profile
= '\0';
33077 int fp16_optional
= 0;
33078 int skip_exact_match
= 0;
33079 arm_feature_set flags
, flags_arch
, flags_ext
;
33081 /* Autodetection mode, choose the architecture based the instructions
33083 if (no_cpu_selected ())
33085 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
33087 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
33088 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
33090 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
33091 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
33093 /* Code run during relaxation relies on selected_cpu being set. */
33094 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
33095 flags_ext
= arm_arch_none
;
33096 ARM_CLEAR_FEATURE (selected_arch
, flags_arch
, flags_ext
);
33097 selected_ext
= flags_ext
;
33098 selected_cpu
= flags
;
33100 /* Otherwise, choose the architecture based on the capabilities of the
33104 ARM_MERGE_FEATURE_SETS (flags_arch
, selected_arch
, selected_ext
);
33105 ARM_CLEAR_FEATURE (flags_arch
, flags_arch
, fpu_any
);
33106 flags_ext
= selected_ext
;
33107 flags
= selected_cpu
;
33109 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_fpu
);
33111 /* Allow the user to override the reported architecture. */
33112 if (!ARM_FEATURE_ZERO (selected_object_arch
))
33114 ARM_CLEAR_FEATURE (flags_arch
, selected_object_arch
, fpu_any
);
33115 flags_ext
= arm_arch_none
;
33118 skip_exact_match
= ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_any
);
33120 /* When this function is run again after relaxation has happened there is no
33121 way to determine whether an architecture or CPU was specified by the user:
33122 - selected_cpu is set above for relaxation to work;
33123 - march_cpu_opt is not set if only -mcpu or .cpu is used;
33124 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
33125 Therefore, if not in -march=all case we first try an exact match and fall
33126 back to autodetection. */
33127 if (!skip_exact_match
)
33128 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 1);
33130 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 0);
33132 as_bad (_("no architecture contains all the instructions used\n"));
33134 /* Tag_CPU_name. */
33135 if (selected_cpu_name
[0])
33139 q
= selected_cpu_name
;
33140 if (startswith (q
, "armv"))
33145 for (i
= 0; q
[i
]; i
++)
33146 q
[i
] = TOUPPER (q
[i
]);
33148 aeabi_set_attribute_string (Tag_CPU_name
, q
);
33151 /* Tag_CPU_arch. */
33152 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
33154 /* Tag_CPU_arch_profile. */
33155 if (profile
!= '\0')
33156 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
33158 /* Tag_DSP_extension. */
33159 if (ARM_CPU_HAS_FEATURE (selected_ext
, arm_ext_dsp
))
33160 aeabi_set_attribute_int (Tag_DSP_extension
, 1);
33162 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
33163 /* Tag_ARM_ISA_use. */
33164 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
33165 || ARM_FEATURE_ZERO (flags_arch
))
33166 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
33168 /* Tag_THUMB_ISA_use. */
33169 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
33170 || ARM_FEATURE_ZERO (flags_arch
))
33174 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
33175 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
))
33177 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
33181 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
33184 /* Tag_VFP_arch. */
33185 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
33186 aeabi_set_attribute_int (Tag_VFP_arch
,
33187 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
33189 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
33190 aeabi_set_attribute_int (Tag_VFP_arch
,
33191 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
33193 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
33196 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
33198 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
33200 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
33203 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
33204 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
33205 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
33206 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
33207 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
33209 /* Tag_ABI_HardFP_use. */
33210 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
33211 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
33212 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
33214 /* Tag_WMMX_arch. */
33215 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
33216 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
33217 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
33218 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
33220 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
33221 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v8_1
))
33222 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 4);
33223 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
33224 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
33225 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
33227 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
33229 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
33233 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
33238 if (ARM_CPU_HAS_FEATURE (flags
, mve_fp_ext
))
33239 aeabi_set_attribute_int (Tag_MVE_arch
, 2);
33240 else if (ARM_CPU_HAS_FEATURE (flags
, mve_ext
))
33241 aeabi_set_attribute_int (Tag_MVE_arch
, 1);
33243 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
33244 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
33245 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
33249 We set Tag_DIV_use to two when integer divide instructions have been used
33250 in ARM state, or when Thumb integer divide instructions have been used,
33251 but we have no architecture profile set, nor have we any ARM instructions.
33253 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
33254 by the base architecture.
33256 For new architectures we will have to check these tests. */
33257 gas_assert (arch
<= TAG_CPU_ARCH_V9
);
33258 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
33259 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
33260 aeabi_set_attribute_int (Tag_DIV_use
, 0);
33261 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
33262 || (profile
== '\0'
33263 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
33264 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
33265 aeabi_set_attribute_int (Tag_DIV_use
, 2);
33267 /* Tag_MP_extension_use. */
33268 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
33269 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
33271 /* Tag Virtualization_use. */
33272 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
33274 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
33277 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
33279 if (fp16_format
!= ARM_FP16_FORMAT_DEFAULT
)
33280 aeabi_set_attribute_int (Tag_ABI_FP_16bit_format
, fp16_format
);
33283 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
33284 finished and free extension feature bits which will not be used anymore. */
33287 arm_md_post_relax (void)
33289 aeabi_set_public_attributes ();
33290 XDELETE (mcpu_ext_opt
);
33291 mcpu_ext_opt
= NULL
;
33292 XDELETE (march_ext_opt
);
33293 march_ext_opt
= NULL
;
33296 /* Add the default contents for the .ARM.attributes section. */
33301 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
33304 aeabi_set_public_attributes ();
33306 #endif /* OBJ_ELF */
33308 /* Parse a .cpu directive. */
33311 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
33313 const struct arm_cpu_option_table
*opt
;
33317 name
= input_line_pointer
;
33318 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
33319 input_line_pointer
++;
33320 saved_char
= *input_line_pointer
;
33321 *input_line_pointer
= 0;
33323 /* Skip the first "all" entry. */
33324 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
33325 if (streq (opt
->name
, name
))
33327 selected_arch
= opt
->value
;
33328 selected_ext
= opt
->ext
;
33329 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
33330 if (opt
->canonical_name
)
33331 strcpy (selected_cpu_name
, opt
->canonical_name
);
33335 for (i
= 0; opt
->name
[i
]; i
++)
33336 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
33338 selected_cpu_name
[i
] = 0;
33340 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
33342 *input_line_pointer
= saved_char
;
33343 demand_empty_rest_of_line ();
33346 as_bad (_("unknown cpu `%s'"), name
);
33347 *input_line_pointer
= saved_char
;
33348 ignore_rest_of_line ();
33351 /* Parse a .arch directive. */
33354 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
33356 const struct arm_arch_option_table
*opt
;
33360 name
= input_line_pointer
;
33361 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
33362 input_line_pointer
++;
33363 saved_char
= *input_line_pointer
;
33364 *input_line_pointer
= 0;
33366 /* Skip the first "all" entry. */
33367 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
33368 if (streq (opt
->name
, name
))
33370 selected_arch
= opt
->value
;
33371 selected_ctx_ext_table
= opt
->ext_table
;
33372 selected_ext
= arm_arch_none
;
33373 selected_cpu
= selected_arch
;
33374 strcpy (selected_cpu_name
, opt
->name
);
33375 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
33376 *input_line_pointer
= saved_char
;
33377 demand_empty_rest_of_line ();
33381 as_bad (_("unknown architecture `%s'\n"), name
);
33382 *input_line_pointer
= saved_char
;
33383 ignore_rest_of_line ();
33386 /* Parse a .object_arch directive. */
33389 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
33391 const struct arm_arch_option_table
*opt
;
33395 name
= input_line_pointer
;
33396 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
33397 input_line_pointer
++;
33398 saved_char
= *input_line_pointer
;
33399 *input_line_pointer
= 0;
33401 /* Skip the first "all" entry. */
33402 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
33403 if (streq (opt
->name
, name
))
33405 selected_object_arch
= opt
->value
;
33406 *input_line_pointer
= saved_char
;
33407 demand_empty_rest_of_line ();
33411 as_bad (_("unknown architecture `%s'\n"), name
);
33412 *input_line_pointer
= saved_char
;
33413 ignore_rest_of_line ();
33416 /* Parse a .arch_extension directive. */
33419 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
33421 const struct arm_option_extension_value_table
*opt
;
33424 int adding_value
= 1;
33426 name
= input_line_pointer
;
33427 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
33428 input_line_pointer
++;
33429 saved_char
= *input_line_pointer
;
33430 *input_line_pointer
= 0;
33432 if (strlen (name
) >= 2
33433 && startswith (name
, "no"))
33439 /* Check the context specific extension table */
33440 if (selected_ctx_ext_table
)
33442 const struct arm_ext_table
* ext_opt
;
33443 for (ext_opt
= selected_ctx_ext_table
; ext_opt
->name
!= NULL
; ext_opt
++)
33445 if (streq (ext_opt
->name
, name
))
33449 if (ARM_FEATURE_ZERO (ext_opt
->merge
))
33450 /* TODO: Option not supported. When we remove the
33451 legacy table this case should error out. */
33453 ARM_MERGE_FEATURE_SETS (selected_ext
, selected_ext
,
33457 ARM_CLEAR_FEATURE (selected_ext
, selected_ext
, ext_opt
->clear
);
33459 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
33460 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
33461 *input_line_pointer
= saved_char
;
33462 demand_empty_rest_of_line ();
33468 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
33469 if (streq (opt
->name
, name
))
33471 int i
, nb_allowed_archs
=
33472 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[i
]);
33473 for (i
= 0; i
< nb_allowed_archs
; i
++)
33476 if (ARM_CPU_IS_ANY (opt
->allowed_archs
[i
]))
33478 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], selected_arch
))
33482 if (i
== nb_allowed_archs
)
33484 as_bad (_("architectural extension `%s' is not allowed for the "
33485 "current base architecture"), name
);
33490 ARM_MERGE_FEATURE_SETS (selected_ext
, selected_ext
,
33493 ARM_CLEAR_FEATURE (selected_ext
, selected_ext
, opt
->clear_value
);
33495 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
33496 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
33497 *input_line_pointer
= saved_char
;
33498 demand_empty_rest_of_line ();
33499 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
33500 on this return so that duplicate extensions (extensions with the
33501 same name as a previous extension in the list) are not considered
33502 for command-line parsing. */
33506 if (opt
->name
== NULL
)
33507 as_bad (_("unknown architecture extension `%s'\n"), name
);
33509 *input_line_pointer
= saved_char
;
33510 ignore_rest_of_line ();
33513 /* Parse a .fpu directive. */
33516 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
33518 const struct arm_option_fpu_value_table
*opt
;
33522 name
= input_line_pointer
;
33523 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
33524 input_line_pointer
++;
33525 saved_char
= *input_line_pointer
;
33526 *input_line_pointer
= 0;
33528 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
33529 if (streq (opt
->name
, name
))
33531 selected_fpu
= opt
->value
;
33532 ARM_CLEAR_FEATURE (selected_cpu
, selected_cpu
, fpu_any
);
33533 #ifndef CPU_DEFAULT
33534 if (no_cpu_selected ())
33535 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
33538 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
33539 *input_line_pointer
= saved_char
;
33540 demand_empty_rest_of_line ();
33544 as_bad (_("unknown floating point format `%s'\n"), name
);
33545 *input_line_pointer
= saved_char
;
33546 ignore_rest_of_line ();
33549 /* Copy symbol information. */
33552 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
33554 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
33558 /* Given a symbolic attribute NAME, return the proper integer value.
33559 Returns -1 if the attribute is not known. */
33562 arm_convert_symbolic_attribute (const char *name
)
33564 static const struct
33569 attribute_table
[] =
33571 /* When you modify this table you should
33572 also modify the list in doc/c-arm.texi. */
33573 #define T(tag) {#tag, tag}
33574 T (Tag_CPU_raw_name
),
33577 T (Tag_CPU_arch_profile
),
33578 T (Tag_ARM_ISA_use
),
33579 T (Tag_THUMB_ISA_use
),
33583 T (Tag_Advanced_SIMD_arch
),
33584 T (Tag_PCS_config
),
33585 T (Tag_ABI_PCS_R9_use
),
33586 T (Tag_ABI_PCS_RW_data
),
33587 T (Tag_ABI_PCS_RO_data
),
33588 T (Tag_ABI_PCS_GOT_use
),
33589 T (Tag_ABI_PCS_wchar_t
),
33590 T (Tag_ABI_FP_rounding
),
33591 T (Tag_ABI_FP_denormal
),
33592 T (Tag_ABI_FP_exceptions
),
33593 T (Tag_ABI_FP_user_exceptions
),
33594 T (Tag_ABI_FP_number_model
),
33595 T (Tag_ABI_align_needed
),
33596 T (Tag_ABI_align8_needed
),
33597 T (Tag_ABI_align_preserved
),
33598 T (Tag_ABI_align8_preserved
),
33599 T (Tag_ABI_enum_size
),
33600 T (Tag_ABI_HardFP_use
),
33601 T (Tag_ABI_VFP_args
),
33602 T (Tag_ABI_WMMX_args
),
33603 T (Tag_ABI_optimization_goals
),
33604 T (Tag_ABI_FP_optimization_goals
),
33605 T (Tag_compatibility
),
33606 T (Tag_CPU_unaligned_access
),
33607 T (Tag_FP_HP_extension
),
33608 T (Tag_VFP_HP_extension
),
33609 T (Tag_ABI_FP_16bit_format
),
33610 T (Tag_MPextension_use
),
33612 T (Tag_nodefaults
),
33613 T (Tag_also_compatible_with
),
33614 T (Tag_conformance
),
33616 T (Tag_Virtualization_use
),
33617 T (Tag_DSP_extension
),
33619 T (Tag_PAC_extension
),
33620 T (Tag_BTI_extension
),
33622 T (Tag_PACRET_use
),
33623 /* We deliberately do not include Tag_MPextension_use_legacy. */
33631 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
33632 if (streq (name
, attribute_table
[i
].name
))
33633 return attribute_table
[i
].tag
;
33638 /* Apply sym value for relocations only in the case that they are for
33639 local symbols in the same segment as the fixup and you have the
33640 respective architectural feature for blx and simple switches. */
33643 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
33646 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
33647 /* PR 17444: If the local symbol is in a different section then a reloc
33648 will always be generated for it, so applying the symbol value now
33649 will result in a double offset being stored in the relocation. */
33650 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
33651 && !S_FORCE_RELOC (fixP
->fx_addsy
, true))
33653 switch (fixP
->fx_r_type
)
33655 case BFD_RELOC_ARM_PCREL_BLX
:
33656 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
33657 if (ARM_IS_FUNC (fixP
->fx_addsy
))
33661 case BFD_RELOC_ARM_PCREL_CALL
:
33662 case BFD_RELOC_THUMB_PCREL_BLX
:
33663 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
33674 #endif /* OBJ_ELF */