]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-arm.c
Update year range in copyright notice of binutils files
[thirdparty/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2022 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35 #include "cpu-arm.h"
36
37 #ifdef OBJ_ELF
38 #include "elf/arm.h"
39 #include "dw2gencfi.h"
40 #endif
41
42 #include "dwarf2dbg.h"
43
44 #ifdef OBJ_ELF
45 /* Must be at least the size of the largest unwind opcode (currently two). */
46 #define ARM_OPCODE_CHUNK_SIZE 8
47
48 /* This structure holds the unwinding state. */
49
50 static struct
51 {
52 symbolS * proc_start;
53 symbolS * table_entry;
54 symbolS * personality_routine;
55 int personality_index;
56 /* The segment containing the function. */
57 segT saved_seg;
58 subsegT saved_subseg;
59 /* Opcodes generated from this function. */
60 unsigned char * opcodes;
61 int opcode_count;
62 int opcode_alloc;
63 /* The number of bytes pushed to the stack. */
64 offsetT frame_size;
65 /* We don't add stack adjustment opcodes immediately so that we can merge
66 multiple adjustments. We can also omit the final adjustment
67 when using a frame pointer. */
68 offsetT pending_offset;
69 /* These two fields are set by both unwind_movsp and unwind_setfp. They
70 hold the reg+offset to use when restoring sp from a frame pointer. */
71 offsetT fp_offset;
72 int fp_reg;
73 /* Nonzero if an unwind_setfp directive has been seen. */
74 unsigned fp_used:1;
75 /* Nonzero if the last opcode restores sp from fp_reg. */
76 unsigned sp_restored:1;
77 } unwind;
78
79 /* Whether --fdpic was given. */
80 static int arm_fdpic;
81
82 #endif /* OBJ_ELF */
83
84 /* Results from operand parsing worker functions. */
85
86 typedef enum
87 {
88 PARSE_OPERAND_SUCCESS,
89 PARSE_OPERAND_FAIL,
90 PARSE_OPERAND_FAIL_NO_BACKTRACK
91 } parse_operand_result;
92
93 enum arm_float_abi
94 {
95 ARM_FLOAT_ABI_HARD,
96 ARM_FLOAT_ABI_SOFTFP,
97 ARM_FLOAT_ABI_SOFT
98 };
99
100 /* Types of processor to assemble for. */
101 #ifndef CPU_DEFAULT
102 /* The code that was here used to select a default CPU depending on compiler
103 pre-defines which were only present when doing native builds, thus
104 changing gas' default behaviour depending upon the build host.
105
106 If you have a target that requires a default CPU option then the you
107 should define CPU_DEFAULT here. */
108 #endif
109
110 /* Perform range checks on positive and negative overflows by checking if the
111 VALUE given fits within the range of an BITS sized immediate. */
112 static bool out_of_range_p (offsetT value, offsetT bits)
113 {
114 gas_assert (bits < (offsetT)(sizeof (value) * 8));
115 return (value & ~((1 << bits)-1))
116 && ((value & ~((1 << bits)-1)) != ~((1 << bits)-1));
117 }
118
119 #ifndef FPU_DEFAULT
120 # ifdef TE_LINUX
121 # define FPU_DEFAULT FPU_ARCH_FPA
122 # elif defined (TE_NetBSD)
123 # ifdef OBJ_ELF
124 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
125 # else
126 /* Legacy a.out format. */
127 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
128 # endif
129 # elif defined (TE_VXWORKS)
130 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
131 # else
132 /* For backwards compatibility, default to FPA. */
133 # define FPU_DEFAULT FPU_ARCH_FPA
134 # endif
135 #endif /* ifndef FPU_DEFAULT */
136
137 #define streq(a, b) (strcmp (a, b) == 0)
138
139 /* Current set of feature bits available (CPU+FPU). Different from
140 selected_cpu + selected_fpu in case of autodetection since the CPU
141 feature bits are then all set. */
142 static arm_feature_set cpu_variant;
143 /* Feature bits used in each execution state. Used to set build attribute
144 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
145 static arm_feature_set arm_arch_used;
146 static arm_feature_set thumb_arch_used;
147
148 /* Flags stored in private area of BFD structure. */
149 static int uses_apcs_26 = false;
150 static int atpcs = false;
151 static int support_interwork = false;
152 static int uses_apcs_float = false;
153 static int pic_code = false;
154 static int fix_v4bx = false;
155 /* Warn on using deprecated features. */
156 static int warn_on_deprecated = true;
157 static int warn_on_restrict_it = false;
158
159 /* Understand CodeComposer Studio assembly syntax. */
160 bool codecomposer_syntax = false;
161
162 /* Variables that we set while parsing command-line options. Once all
163 options have been read we re-process these values to set the real
164 assembly flags. */
165
166 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
167 instead of -mcpu=arm1). */
168 static const arm_feature_set *legacy_cpu = NULL;
169 static const arm_feature_set *legacy_fpu = NULL;
170
171 /* CPU, extension and FPU feature bits selected by -mcpu. */
172 static const arm_feature_set *mcpu_cpu_opt = NULL;
173 static arm_feature_set *mcpu_ext_opt = NULL;
174 static const arm_feature_set *mcpu_fpu_opt = NULL;
175
176 /* CPU, extension and FPU feature bits selected by -march. */
177 static const arm_feature_set *march_cpu_opt = NULL;
178 static arm_feature_set *march_ext_opt = NULL;
179 static const arm_feature_set *march_fpu_opt = NULL;
180
181 /* Feature bits selected by -mfpu. */
182 static const arm_feature_set *mfpu_opt = NULL;
183
184 /* Constants for known architecture features. */
185 static const arm_feature_set fpu_default = FPU_DEFAULT;
186 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
187 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
188 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
189 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
190 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
191 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
192 #ifdef OBJ_ELF
193 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
194 #endif
195 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
196
197 #ifdef CPU_DEFAULT
198 static const arm_feature_set cpu_default = CPU_DEFAULT;
199 #endif
200
201 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
202 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
203 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
204 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
205 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
206 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
207 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
208 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
209 static const arm_feature_set arm_ext_v4t_5 =
210 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
211 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
212 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
213 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
214 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
215 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
216 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
217 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
218 /* Only for compatability of hint instructions. */
219 static const arm_feature_set arm_ext_v6k_v6t2 =
220 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V6T2);
221 static const arm_feature_set arm_ext_v6_notm =
222 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
223 static const arm_feature_set arm_ext_v6_dsp =
224 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
225 static const arm_feature_set arm_ext_barrier =
226 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
227 static const arm_feature_set arm_ext_msr =
228 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
229 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
230 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
231 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
232 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
233 static const arm_feature_set arm_ext_v8r = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8R);
234 #ifdef OBJ_ELF
235 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
236 #endif
237 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
238 static const arm_feature_set arm_ext_m =
239 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
240 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
241 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
242 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
243 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
244 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
245 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
246 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
247 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
248 static const arm_feature_set arm_ext_v8m_main =
249 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
250 static const arm_feature_set arm_ext_v8_1m_main =
251 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN);
252 /* Instructions in ARMv8-M only found in M profile architectures. */
253 static const arm_feature_set arm_ext_v8m_m_only =
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
255 static const arm_feature_set arm_ext_v6t2_v8m =
256 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
257 /* Instructions shared between ARMv8-A and ARMv8-M. */
258 static const arm_feature_set arm_ext_atomics =
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
260 #ifdef OBJ_ELF
261 /* DSP instructions Tag_DSP_extension refers to. */
262 static const arm_feature_set arm_ext_dsp =
263 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
264 #endif
265 static const arm_feature_set arm_ext_ras =
266 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
267 /* FP16 instructions. */
268 static const arm_feature_set arm_ext_fp16 =
269 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
270 static const arm_feature_set arm_ext_fp16_fml =
271 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
272 static const arm_feature_set arm_ext_v8_2 =
273 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
274 static const arm_feature_set arm_ext_v8_3 =
275 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
276 static const arm_feature_set arm_ext_sb =
277 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
278 static const arm_feature_set arm_ext_predres =
279 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES);
280 static const arm_feature_set arm_ext_bf16 =
281 ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16);
282 static const arm_feature_set arm_ext_i8mm =
283 ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM);
284 static const arm_feature_set arm_ext_crc =
285 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC);
286 static const arm_feature_set arm_ext_cde =
287 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE);
288 static const arm_feature_set arm_ext_cde0 =
289 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE0);
290 static const arm_feature_set arm_ext_cde1 =
291 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE1);
292 static const arm_feature_set arm_ext_cde2 =
293 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE2);
294 static const arm_feature_set arm_ext_cde3 =
295 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE3);
296 static const arm_feature_set arm_ext_cde4 =
297 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE4);
298 static const arm_feature_set arm_ext_cde5 =
299 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE5);
300 static const arm_feature_set arm_ext_cde6 =
301 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE6);
302 static const arm_feature_set arm_ext_cde7 =
303 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE7);
304
305 static const arm_feature_set arm_arch_any = ARM_ANY;
306 static const arm_feature_set fpu_any = FPU_ANY;
307 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
308 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
309 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
310
311 static const arm_feature_set arm_cext_iwmmxt2 =
312 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
313 static const arm_feature_set arm_cext_iwmmxt =
314 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
315 static const arm_feature_set arm_cext_xscale =
316 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
317 static const arm_feature_set arm_cext_maverick =
318 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
319 static const arm_feature_set fpu_fpa_ext_v1 =
320 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
321 static const arm_feature_set fpu_fpa_ext_v2 =
322 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
323 static const arm_feature_set fpu_vfp_ext_v1xd =
324 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
325 static const arm_feature_set fpu_vfp_ext_v1 =
326 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
327 static const arm_feature_set fpu_vfp_ext_v2 =
328 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
329 static const arm_feature_set fpu_vfp_ext_v3xd =
330 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
331 static const arm_feature_set fpu_vfp_ext_v3 =
332 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
333 static const arm_feature_set fpu_vfp_ext_d32 =
334 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
335 static const arm_feature_set fpu_neon_ext_v1 =
336 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
337 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
338 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
339 static const arm_feature_set mve_ext =
340 ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE);
341 static const arm_feature_set mve_fp_ext =
342 ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP);
343 /* Note: This has more than one bit set, which means using it with
344 mark_feature_used (which returns if *any* of the bits are set in the current
345 cpu variant) can give surprising results. */
346 static const arm_feature_set armv8m_fp =
347 ARM_FEATURE_COPROC (FPU_VFP_V5_SP_D16);
348 #ifdef OBJ_ELF
349 static const arm_feature_set fpu_vfp_fp16 =
350 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
351 static const arm_feature_set fpu_neon_ext_fma =
352 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
353 #endif
354 static const arm_feature_set fpu_vfp_ext_fma =
355 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
356 static const arm_feature_set fpu_vfp_ext_armv8 =
357 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
358 static const arm_feature_set fpu_vfp_ext_armv8xd =
359 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
360 static const arm_feature_set fpu_neon_ext_armv8 =
361 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
362 static const arm_feature_set fpu_crypto_ext_armv8 =
363 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
364 static const arm_feature_set fpu_neon_ext_v8_1 =
365 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
366 static const arm_feature_set fpu_neon_ext_dotprod =
367 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
368 static const arm_feature_set pacbti_ext =
369 ARM_FEATURE_CORE_HIGH_HIGH (ARM_EXT3_PACBTI);
370
371 static int mfloat_abi_opt = -1;
372 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
373 directive. */
374 static arm_feature_set selected_arch = ARM_ARCH_NONE;
375 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
376 directive. */
377 static arm_feature_set selected_ext = ARM_ARCH_NONE;
378 /* Feature bits selected by the last -mcpu/-march or by the combination of the
379 last .cpu/.arch directive .arch_extension directives since that
380 directive. */
381 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
382 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
383 static arm_feature_set selected_fpu = FPU_NONE;
384 /* Feature bits selected by the last .object_arch directive. */
385 static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
386 /* Must be long enough to hold any of the names in arm_cpus. */
387 static const struct arm_ext_table * selected_ctx_ext_table = NULL;
388 static char selected_cpu_name[20];
389
390 extern FLONUM_TYPE generic_floating_point_number;
391
392 /* Return if no cpu was selected on command-line. */
393 static bool
394 no_cpu_selected (void)
395 {
396 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
397 }
398
399 #ifdef OBJ_ELF
400 # ifdef EABI_DEFAULT
401 static int meabi_flags = EABI_DEFAULT;
402 # else
403 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
404 # endif
405
406 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
407
408 bool
409 arm_is_eabi (void)
410 {
411 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
412 }
413 #endif
414
415 #ifdef OBJ_ELF
416 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
417 symbolS * GOT_symbol;
418 #endif
419
420 /* 0: assemble for ARM,
421 1: assemble for Thumb,
422 2: assemble for Thumb even though target CPU does not support thumb
423 instructions. */
424 static int thumb_mode = 0;
425 /* A value distinct from the possible values for thumb_mode that we
426 can use to record whether thumb_mode has been copied into the
427 tc_frag_data field of a frag. */
428 #define MODE_RECORDED (1 << 4)
429
430 /* Specifies the intrinsic IT insn behavior mode. */
431 enum implicit_it_mode
432 {
433 IMPLICIT_IT_MODE_NEVER = 0x00,
434 IMPLICIT_IT_MODE_ARM = 0x01,
435 IMPLICIT_IT_MODE_THUMB = 0x02,
436 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
437 };
438 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
439
440 /* If unified_syntax is true, we are processing the new unified
441 ARM/Thumb syntax. Important differences from the old ARM mode:
442
443 - Immediate operands do not require a # prefix.
444 - Conditional affixes always appear at the end of the
445 instruction. (For backward compatibility, those instructions
446 that formerly had them in the middle, continue to accept them
447 there.)
448 - The IT instruction may appear, and if it does is validated
449 against subsequent conditional affixes. It does not generate
450 machine code.
451
452 Important differences from the old Thumb mode:
453
454 - Immediate operands do not require a # prefix.
455 - Most of the V6T2 instructions are only available in unified mode.
456 - The .N and .W suffixes are recognized and honored (it is an error
457 if they cannot be honored).
458 - All instructions set the flags if and only if they have an 's' affix.
459 - Conditional affixes may be used. They are validated against
460 preceding IT instructions. Unlike ARM mode, you cannot use a
461 conditional affix except in the scope of an IT instruction. */
462
463 static bool unified_syntax = false;
464
465 /* An immediate operand can start with #, and ld*, st*, pld operands
466 can contain [ and ]. We need to tell APP not to elide whitespace
467 before a [, which can appear as the first operand for pld.
468 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
469 const char arm_symbol_chars[] = "#[]{}";
470
471 enum neon_el_type
472 {
473 NT_invtype,
474 NT_untyped,
475 NT_integer,
476 NT_float,
477 NT_poly,
478 NT_signed,
479 NT_bfloat,
480 NT_unsigned
481 };
482
483 struct neon_type_el
484 {
485 enum neon_el_type type;
486 unsigned size;
487 };
488
489 #define NEON_MAX_TYPE_ELS 5
490
491 struct neon_type
492 {
493 struct neon_type_el el[NEON_MAX_TYPE_ELS];
494 unsigned elems;
495 };
496
497 enum pred_instruction_type
498 {
499 OUTSIDE_PRED_INSN,
500 INSIDE_VPT_INSN,
501 INSIDE_IT_INSN,
502 INSIDE_IT_LAST_INSN,
503 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
504 if inside, should be the last one. */
505 NEUTRAL_IT_INSN, /* This could be either inside or outside,
506 i.e. BKPT and NOP. */
507 IT_INSN, /* The IT insn has been parsed. */
508 VPT_INSN, /* The VPT/VPST insn has been parsed. */
509 MVE_OUTSIDE_PRED_INSN , /* Instruction to indicate a MVE instruction without
510 a predication code. */
511 MVE_UNPREDICABLE_INSN, /* MVE instruction that is non-predicable. */
512 };
513
514 /* The maximum number of operands we need. */
515 #define ARM_IT_MAX_OPERANDS 6
516 #define ARM_IT_MAX_RELOCS 3
517
518 struct arm_it
519 {
520 const char * error;
521 unsigned long instruction;
522 unsigned int size;
523 unsigned int size_req;
524 unsigned int cond;
525 /* "uncond_value" is set to the value in place of the conditional field in
526 unconditional versions of the instruction, or -1u if nothing is
527 appropriate. */
528 unsigned int uncond_value;
529 struct neon_type vectype;
530 /* This does not indicate an actual NEON instruction, only that
531 the mnemonic accepts neon-style type suffixes. */
532 int is_neon;
533 /* Set to the opcode if the instruction needs relaxation.
534 Zero if the instruction is not relaxed. */
535 unsigned long relax;
536 struct
537 {
538 bfd_reloc_code_real_type type;
539 expressionS exp;
540 int pc_rel;
541 } relocs[ARM_IT_MAX_RELOCS];
542
543 enum pred_instruction_type pred_insn_type;
544
545 struct
546 {
547 unsigned reg;
548 signed int imm;
549 struct neon_type_el vectype;
550 unsigned present : 1; /* Operand present. */
551 unsigned isreg : 1; /* Operand was a register. */
552 unsigned immisreg : 2; /* .imm field is a second register.
553 0: imm, 1: gpr, 2: MVE Q-register. */
554 unsigned isscalar : 2; /* Operand is a (SIMD) scalar:
555 0) not scalar,
556 1) Neon scalar,
557 2) MVE scalar. */
558 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
559 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
560 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
561 instructions. This allows us to disambiguate ARM <-> vector insns. */
562 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
563 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
564 unsigned isquad : 1; /* Operand is SIMD quad register. */
565 unsigned issingle : 1; /* Operand is VFP single-precision register. */
566 unsigned iszr : 1; /* Operand is ZR register. */
567 unsigned hasreloc : 1; /* Operand has relocation suffix. */
568 unsigned writeback : 1; /* Operand has trailing ! */
569 unsigned preind : 1; /* Preindexed address. */
570 unsigned postind : 1; /* Postindexed address. */
571 unsigned negative : 1; /* Index register was negated. */
572 unsigned shifted : 1; /* Shift applied to operation. */
573 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
574 } operands[ARM_IT_MAX_OPERANDS];
575 };
576
577 static struct arm_it inst;
578
579 #define NUM_FLOAT_VALS 8
580
581 const char * fp_const[] =
582 {
583 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
584 };
585
586 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
587
588 #define FAIL (-1)
589 #define SUCCESS (0)
590
591 #define SUFF_S 1
592 #define SUFF_D 2
593 #define SUFF_E 3
594 #define SUFF_P 4
595
596 #define CP_T_X 0x00008000
597 #define CP_T_Y 0x00400000
598
599 #define CONDS_BIT 0x00100000
600 #define LOAD_BIT 0x00100000
601
602 #define DOUBLE_LOAD_FLAG 0x00000001
603
604 struct asm_cond
605 {
606 const char * template_name;
607 unsigned long value;
608 };
609
610 #define COND_ALWAYS 0xE
611
612 struct asm_psr
613 {
614 const char * template_name;
615 unsigned long field;
616 };
617
618 struct asm_barrier_opt
619 {
620 const char * template_name;
621 unsigned long value;
622 const arm_feature_set arch;
623 };
624
625 /* The bit that distinguishes CPSR and SPSR. */
626 #define SPSR_BIT (1 << 22)
627
628 /* The individual PSR flag bits. */
629 #define PSR_c (1 << 16)
630 #define PSR_x (1 << 17)
631 #define PSR_s (1 << 18)
632 #define PSR_f (1 << 19)
633
634 struct reloc_entry
635 {
636 const char * name;
637 bfd_reloc_code_real_type reloc;
638 };
639
640 enum vfp_reg_pos
641 {
642 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
643 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
644 };
645
646 enum vfp_ldstm_type
647 {
648 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
649 };
650
651 /* Bits for DEFINED field in neon_typed_alias. */
652 #define NTA_HASTYPE 1
653 #define NTA_HASINDEX 2
654
655 struct neon_typed_alias
656 {
657 unsigned char defined;
658 unsigned char index;
659 struct neon_type_el eltype;
660 };
661
662 /* ARM register categories. This includes coprocessor numbers and various
663 architecture extensions' registers. Each entry should have an error message
664 in reg_expected_msgs below. */
665 enum arm_reg_type
666 {
667 REG_TYPE_RN,
668 REG_TYPE_CP,
669 REG_TYPE_CN,
670 REG_TYPE_FN,
671 REG_TYPE_VFS,
672 REG_TYPE_VFD,
673 REG_TYPE_NQ,
674 REG_TYPE_VFSD,
675 REG_TYPE_NDQ,
676 REG_TYPE_NSD,
677 REG_TYPE_NSDQ,
678 REG_TYPE_VFC,
679 REG_TYPE_MVF,
680 REG_TYPE_MVD,
681 REG_TYPE_MVFX,
682 REG_TYPE_MVDX,
683 REG_TYPE_MVAX,
684 REG_TYPE_MQ,
685 REG_TYPE_DSPSC,
686 REG_TYPE_MMXWR,
687 REG_TYPE_MMXWC,
688 REG_TYPE_MMXWCG,
689 REG_TYPE_XSCALE,
690 REG_TYPE_RNB,
691 REG_TYPE_ZR,
692 REG_TYPE_PSEUDO
693 };
694
695 /* Structure for a hash table entry for a register.
696 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
697 information which states whether a vector type or index is specified (for a
698 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
699 struct reg_entry
700 {
701 const char * name;
702 unsigned int number;
703 unsigned char type;
704 unsigned char builtin;
705 struct neon_typed_alias * neon;
706 };
707
708 /* Diagnostics used when we don't get a register of the expected type. */
709 const char * const reg_expected_msgs[] =
710 {
711 [REG_TYPE_RN] = N_("ARM register expected"),
712 [REG_TYPE_CP] = N_("bad or missing co-processor number"),
713 [REG_TYPE_CN] = N_("co-processor register expected"),
714 [REG_TYPE_FN] = N_("FPA register expected"),
715 [REG_TYPE_VFS] = N_("VFP single precision register expected"),
716 [REG_TYPE_VFD] = N_("VFP/Neon double precision register expected"),
717 [REG_TYPE_NQ] = N_("Neon quad precision register expected"),
718 [REG_TYPE_VFSD] = N_("VFP single or double precision register expected"),
719 [REG_TYPE_NDQ] = N_("Neon double or quad precision register expected"),
720 [REG_TYPE_NSD] = N_("Neon single or double precision register expected"),
721 [REG_TYPE_NSDQ] = N_("VFP single, double or Neon quad precision register"
722 " expected"),
723 [REG_TYPE_VFC] = N_("VFP system register expected"),
724 [REG_TYPE_MVF] = N_("Maverick MVF register expected"),
725 [REG_TYPE_MVD] = N_("Maverick MVD register expected"),
726 [REG_TYPE_MVFX] = N_("Maverick MVFX register expected"),
727 [REG_TYPE_MVDX] = N_("Maverick MVDX register expected"),
728 [REG_TYPE_MVAX] = N_("Maverick MVAX register expected"),
729 [REG_TYPE_DSPSC] = N_("Maverick DSPSC register expected"),
730 [REG_TYPE_MMXWR] = N_("iWMMXt data register expected"),
731 [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"),
732 [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
733 [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
734 [REG_TYPE_MQ] = N_("MVE vector register expected"),
735 [REG_TYPE_RNB] = "",
736 [REG_TYPE_ZR] = N_("ZR register expected"),
737 [REG_TYPE_PSEUDO] = N_("Pseudo register expected"),
738 };
739
740 /* Some well known registers that we refer to directly elsewhere. */
741 #define REG_R12 12
742 #define REG_SP 13
743 #define REG_LR 14
744 #define REG_PC 15
745
746 /* ARM instructions take 4bytes in the object file, Thumb instructions
747 take 2: */
748 #define INSN_SIZE 4
749
750 struct asm_opcode
751 {
752 /* Basic string to match. */
753 const char * template_name;
754
755 /* Parameters to instruction. */
756 unsigned int operands[8];
757
758 /* Conditional tag - see opcode_lookup. */
759 unsigned int tag : 4;
760
761 /* Basic instruction code. */
762 unsigned int avalue;
763
764 /* Thumb-format instruction code. */
765 unsigned int tvalue;
766
767 /* Which architecture variant provides this instruction. */
768 const arm_feature_set * avariant;
769 const arm_feature_set * tvariant;
770
771 /* Function to call to encode instruction in ARM format. */
772 void (* aencode) (void);
773
774 /* Function to call to encode instruction in Thumb format. */
775 void (* tencode) (void);
776
777 /* Indicates whether this instruction may be vector predicated. */
778 unsigned int mayBeVecPred : 1;
779 };
780
781 /* Defines for various bits that we will want to toggle. */
782 #define INST_IMMEDIATE 0x02000000
783 #define OFFSET_REG 0x02000000
784 #define HWOFFSET_IMM 0x00400000
785 #define SHIFT_BY_REG 0x00000010
786 #define PRE_INDEX 0x01000000
787 #define INDEX_UP 0x00800000
788 #define WRITE_BACK 0x00200000
789 #define LDM_TYPE_2_OR_3 0x00400000
790 #define CPSI_MMOD 0x00020000
791
792 #define LITERAL_MASK 0xf000f000
793 #define OPCODE_MASK 0xfe1fffff
794 #define V4_STR_BIT 0x00000020
795 #define VLDR_VMOV_SAME 0x0040f000
796
797 #define T2_SUBS_PC_LR 0xf3de8f00
798
799 #define DATA_OP_SHIFT 21
800 #define SBIT_SHIFT 20
801
802 #define T2_OPCODE_MASK 0xfe1fffff
803 #define T2_DATA_OP_SHIFT 21
804 #define T2_SBIT_SHIFT 20
805
806 #define A_COND_MASK 0xf0000000
807 #define A_PUSH_POP_OP_MASK 0x0fff0000
808
809 /* Opcodes for pushing/poping registers to/from the stack. */
810 #define A1_OPCODE_PUSH 0x092d0000
811 #define A2_OPCODE_PUSH 0x052d0004
812 #define A2_OPCODE_POP 0x049d0004
813
814 /* Codes to distinguish the arithmetic instructions. */
815 #define OPCODE_AND 0
816 #define OPCODE_EOR 1
817 #define OPCODE_SUB 2
818 #define OPCODE_RSB 3
819 #define OPCODE_ADD 4
820 #define OPCODE_ADC 5
821 #define OPCODE_SBC 6
822 #define OPCODE_RSC 7
823 #define OPCODE_TST 8
824 #define OPCODE_TEQ 9
825 #define OPCODE_CMP 10
826 #define OPCODE_CMN 11
827 #define OPCODE_ORR 12
828 #define OPCODE_MOV 13
829 #define OPCODE_BIC 14
830 #define OPCODE_MVN 15
831
832 #define T2_OPCODE_AND 0
833 #define T2_OPCODE_BIC 1
834 #define T2_OPCODE_ORR 2
835 #define T2_OPCODE_ORN 3
836 #define T2_OPCODE_EOR 4
837 #define T2_OPCODE_ADD 8
838 #define T2_OPCODE_ADC 10
839 #define T2_OPCODE_SBC 11
840 #define T2_OPCODE_SUB 13
841 #define T2_OPCODE_RSB 14
842
843 #define T_OPCODE_MUL 0x4340
844 #define T_OPCODE_TST 0x4200
845 #define T_OPCODE_CMN 0x42c0
846 #define T_OPCODE_NEG 0x4240
847 #define T_OPCODE_MVN 0x43c0
848
849 #define T_OPCODE_ADD_R3 0x1800
850 #define T_OPCODE_SUB_R3 0x1a00
851 #define T_OPCODE_ADD_HI 0x4400
852 #define T_OPCODE_ADD_ST 0xb000
853 #define T_OPCODE_SUB_ST 0xb080
854 #define T_OPCODE_ADD_SP 0xa800
855 #define T_OPCODE_ADD_PC 0xa000
856 #define T_OPCODE_ADD_I8 0x3000
857 #define T_OPCODE_SUB_I8 0x3800
858 #define T_OPCODE_ADD_I3 0x1c00
859 #define T_OPCODE_SUB_I3 0x1e00
860
861 #define T_OPCODE_ASR_R 0x4100
862 #define T_OPCODE_LSL_R 0x4080
863 #define T_OPCODE_LSR_R 0x40c0
864 #define T_OPCODE_ROR_R 0x41c0
865 #define T_OPCODE_ASR_I 0x1000
866 #define T_OPCODE_LSL_I 0x0000
867 #define T_OPCODE_LSR_I 0x0800
868
869 #define T_OPCODE_MOV_I8 0x2000
870 #define T_OPCODE_CMP_I8 0x2800
871 #define T_OPCODE_CMP_LR 0x4280
872 #define T_OPCODE_MOV_HR 0x4600
873 #define T_OPCODE_CMP_HR 0x4500
874
875 #define T_OPCODE_LDR_PC 0x4800
876 #define T_OPCODE_LDR_SP 0x9800
877 #define T_OPCODE_STR_SP 0x9000
878 #define T_OPCODE_LDR_IW 0x6800
879 #define T_OPCODE_STR_IW 0x6000
880 #define T_OPCODE_LDR_IH 0x8800
881 #define T_OPCODE_STR_IH 0x8000
882 #define T_OPCODE_LDR_IB 0x7800
883 #define T_OPCODE_STR_IB 0x7000
884 #define T_OPCODE_LDR_RW 0x5800
885 #define T_OPCODE_STR_RW 0x5000
886 #define T_OPCODE_LDR_RH 0x5a00
887 #define T_OPCODE_STR_RH 0x5200
888 #define T_OPCODE_LDR_RB 0x5c00
889 #define T_OPCODE_STR_RB 0x5400
890
891 #define T_OPCODE_PUSH 0xb400
892 #define T_OPCODE_POP 0xbc00
893
894 #define T_OPCODE_BRANCH 0xe000
895
896 #define THUMB_SIZE 2 /* Size of thumb instruction. */
897 #define THUMB_PP_PC_LR 0x0100
898 #define THUMB_LOAD_BIT 0x0800
899 #define THUMB2_LOAD_BIT 0x00100000
900
901 #define BAD_SYNTAX _("syntax error")
902 #define BAD_ARGS _("bad arguments to instruction")
903 #define BAD_SP _("r13 not allowed here")
904 #define BAD_PC _("r15 not allowed here")
905 #define BAD_ODD _("Odd register not allowed here")
906 #define BAD_EVEN _("Even register not allowed here")
907 #define BAD_COND _("instruction cannot be conditional")
908 #define BAD_OVERLAP _("registers may not be the same")
909 #define BAD_HIREG _("lo register required")
910 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
911 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode")
912 #define BAD_BRANCH _("branch must be last instruction in IT block")
913 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
914 #define BAD_NO_VPT _("instruction not allowed in VPT block")
915 #define BAD_NOT_IT _("instruction not allowed in IT block")
916 #define BAD_NOT_VPT _("instruction missing MVE vector predication code")
917 #define BAD_FPU _("selected FPU does not support instruction")
918 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
919 #define BAD_OUT_VPT \
920 _("vector predicated instruction should be in VPT/VPST block")
921 #define BAD_IT_COND _("incorrect condition in IT block")
922 #define BAD_VPT_COND _("incorrect condition in VPT/VPST block")
923 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
924 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
925 #define BAD_PC_ADDRESSING \
926 _("cannot use register index with PC-relative addressing")
927 #define BAD_PC_WRITEBACK \
928 _("cannot use writeback with PC-relative addressing")
929 #define BAD_RANGE _("branch out of range")
930 #define BAD_FP16 _("selected processor does not support fp16 instruction")
931 #define BAD_BF16 _("selected processor does not support bf16 instruction")
932 #define BAD_CDE _("selected processor does not support cde instruction")
933 #define BAD_CDE_COPROC _("coprocessor for insn is not enabled for cde")
934 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
935 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
936 #define MVE_NOT_IT _("Warning: instruction is UNPREDICTABLE in an IT " \
937 "block")
938 #define MVE_NOT_VPT _("Warning: instruction is UNPREDICTABLE in a VPT " \
939 "block")
940 #define MVE_BAD_PC _("Warning: instruction is UNPREDICTABLE with PC" \
941 " operand")
942 #define MVE_BAD_SP _("Warning: instruction is UNPREDICTABLE with SP" \
943 " operand")
944 #define BAD_SIMD_TYPE _("bad type in SIMD instruction")
945 #define BAD_MVE_AUTO \
946 _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \
947 " use a valid -march or -mcpu option.")
948 #define BAD_MVE_SRCDEST _("Warning: 32-bit element size and same destination "\
949 "and source operands makes instruction UNPREDICTABLE")
950 #define BAD_EL_TYPE _("bad element type for instruction")
951 #define MVE_BAD_QREG _("MVE vector register Q[0..7] expected")
952 #define BAD_PACBTI _("selected processor does not support PACBTI extention")
953
954 static htab_t arm_ops_hsh;
955 static htab_t arm_cond_hsh;
956 static htab_t arm_vcond_hsh;
957 static htab_t arm_shift_hsh;
958 static htab_t arm_psr_hsh;
959 static htab_t arm_v7m_psr_hsh;
960 static htab_t arm_reg_hsh;
961 static htab_t arm_reloc_hsh;
962 static htab_t arm_barrier_opt_hsh;
963
964 /* Stuff needed to resolve the label ambiguity
965 As:
966 ...
967 label: <insn>
968 may differ from:
969 ...
970 label:
971 <insn> */
972
973 symbolS * last_label_seen;
974 static int label_is_thumb_function_name = false;
975
976 /* Literal pool structure. Held on a per-section
977 and per-sub-section basis. */
978
979 #define MAX_LITERAL_POOL_SIZE 1024
980 typedef struct literal_pool
981 {
982 expressionS literals [MAX_LITERAL_POOL_SIZE];
983 unsigned int next_free_entry;
984 unsigned int id;
985 symbolS * symbol;
986 segT section;
987 subsegT sub_section;
988 #ifdef OBJ_ELF
989 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
990 #endif
991 struct literal_pool * next;
992 unsigned int alignment;
993 } literal_pool;
994
995 /* Pointer to a linked list of literal pools. */
996 literal_pool * list_of_pools = NULL;
997
998 typedef enum asmfunc_states
999 {
1000 OUTSIDE_ASMFUNC,
1001 WAITING_ASMFUNC_NAME,
1002 WAITING_ENDASMFUNC
1003 } asmfunc_states;
1004
1005 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
1006
1007 #ifdef OBJ_ELF
1008 # define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred
1009 #else
1010 static struct current_pred now_pred;
1011 #endif
1012
1013 static inline int
1014 now_pred_compatible (int cond)
1015 {
1016 return (cond & ~1) == (now_pred.cc & ~1);
1017 }
1018
1019 static inline int
1020 conditional_insn (void)
1021 {
1022 return inst.cond != COND_ALWAYS;
1023 }
1024
1025 static int in_pred_block (void);
1026
1027 static int handle_pred_state (void);
1028
1029 static void force_automatic_it_block_close (void);
1030
1031 static void it_fsm_post_encode (void);
1032
1033 #define set_pred_insn_type(type) \
1034 do \
1035 { \
1036 inst.pred_insn_type = type; \
1037 if (handle_pred_state () == FAIL) \
1038 return; \
1039 } \
1040 while (0)
1041
1042 #define set_pred_insn_type_nonvoid(type, failret) \
1043 do \
1044 { \
1045 inst.pred_insn_type = type; \
1046 if (handle_pred_state () == FAIL) \
1047 return failret; \
1048 } \
1049 while(0)
1050
1051 #define set_pred_insn_type_last() \
1052 do \
1053 { \
1054 if (inst.cond == COND_ALWAYS) \
1055 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); \
1056 else \
1057 set_pred_insn_type (INSIDE_IT_LAST_INSN); \
1058 } \
1059 while (0)
1060
1061 /* Toggle value[pos]. */
1062 #define TOGGLE_BIT(value, pos) (value ^ (1 << pos))
1063
1064 /* Pure syntax. */
1065
1066 /* This array holds the chars that always start a comment. If the
1067 pre-processor is disabled, these aren't very useful. */
1068 char arm_comment_chars[] = "@";
1069
1070 /* This array holds the chars that only start a comment at the beginning of
1071 a line. If the line seems to have the form '# 123 filename'
1072 .line and .file directives will appear in the pre-processed output. */
1073 /* Note that input_file.c hand checks for '#' at the beginning of the
1074 first line of the input file. This is because the compiler outputs
1075 #NO_APP at the beginning of its output. */
1076 /* Also note that comments like this one will always work. */
1077 const char line_comment_chars[] = "#";
1078
1079 char arm_line_separator_chars[] = ";";
1080
1081 /* Chars that can be used to separate mant
1082 from exp in floating point numbers. */
1083 const char EXP_CHARS[] = "eE";
1084
1085 /* Chars that mean this number is a floating point constant. */
1086 /* As in 0f12.456 */
1087 /* or 0d1.2345e12 */
1088
1089 const char FLT_CHARS[] = "rRsSfFdDxXeEpPHh";
1090
1091 /* Prefix characters that indicate the start of an immediate
1092 value. */
1093 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1094
1095 /* Separator character handling. */
1096
1097 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1098
1099 enum fp_16bit_format
1100 {
1101 ARM_FP16_FORMAT_IEEE = 0x1,
1102 ARM_FP16_FORMAT_ALTERNATIVE = 0x2,
1103 ARM_FP16_FORMAT_DEFAULT = 0x3
1104 };
1105
1106 static enum fp_16bit_format fp16_format = ARM_FP16_FORMAT_DEFAULT;
1107
1108
1109 static inline int
1110 skip_past_char (char ** str, char c)
1111 {
1112 /* PR gas/14987: Allow for whitespace before the expected character. */
1113 skip_whitespace (*str);
1114
1115 if (**str == c)
1116 {
1117 (*str)++;
1118 return SUCCESS;
1119 }
1120 else
1121 return FAIL;
1122 }
1123
1124 #define skip_past_comma(str) skip_past_char (str, ',')
1125
1126 /* Arithmetic expressions (possibly involving symbols). */
1127
1128 /* Return TRUE if anything in the expression is a bignum. */
1129
1130 static bool
1131 walk_no_bignums (symbolS * sp)
1132 {
1133 if (symbol_get_value_expression (sp)->X_op == O_big)
1134 return true;
1135
1136 if (symbol_get_value_expression (sp)->X_add_symbol)
1137 {
1138 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1139 || (symbol_get_value_expression (sp)->X_op_symbol
1140 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1141 }
1142
1143 return false;
1144 }
1145
1146 static bool in_my_get_expression = false;
1147
1148 /* Third argument to my_get_expression. */
1149 #define GE_NO_PREFIX 0
1150 #define GE_IMM_PREFIX 1
1151 #define GE_OPT_PREFIX 2
1152 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1153 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1154 #define GE_OPT_PREFIX_BIG 3
1155
1156 static int
1157 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1158 {
1159 char * save_in;
1160
1161 /* In unified syntax, all prefixes are optional. */
1162 if (unified_syntax)
1163 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1164 : GE_OPT_PREFIX;
1165
1166 switch (prefix_mode)
1167 {
1168 case GE_NO_PREFIX: break;
1169 case GE_IMM_PREFIX:
1170 if (!is_immediate_prefix (**str))
1171 {
1172 inst.error = _("immediate expression requires a # prefix");
1173 return FAIL;
1174 }
1175 (*str)++;
1176 break;
1177 case GE_OPT_PREFIX:
1178 case GE_OPT_PREFIX_BIG:
1179 if (is_immediate_prefix (**str))
1180 (*str)++;
1181 break;
1182 default:
1183 abort ();
1184 }
1185
1186 memset (ep, 0, sizeof (expressionS));
1187
1188 save_in = input_line_pointer;
1189 input_line_pointer = *str;
1190 in_my_get_expression = true;
1191 expression (ep);
1192 in_my_get_expression = false;
1193
1194 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1195 {
1196 /* We found a bad or missing expression in md_operand(). */
1197 *str = input_line_pointer;
1198 input_line_pointer = save_in;
1199 if (inst.error == NULL)
1200 inst.error = (ep->X_op == O_absent
1201 ? _("missing expression") :_("bad expression"));
1202 return 1;
1203 }
1204
1205 /* Get rid of any bignums now, so that we don't generate an error for which
1206 we can't establish a line number later on. Big numbers are never valid
1207 in instructions, which is where this routine is always called. */
1208 if (prefix_mode != GE_OPT_PREFIX_BIG
1209 && (ep->X_op == O_big
1210 || (ep->X_add_symbol
1211 && (walk_no_bignums (ep->X_add_symbol)
1212 || (ep->X_op_symbol
1213 && walk_no_bignums (ep->X_op_symbol))))))
1214 {
1215 inst.error = _("invalid constant");
1216 *str = input_line_pointer;
1217 input_line_pointer = save_in;
1218 return 1;
1219 }
1220
1221 *str = input_line_pointer;
1222 input_line_pointer = save_in;
1223 return SUCCESS;
1224 }
1225
1226 /* Turn a string in input_line_pointer into a floating point constant
1227 of type TYPE, and store the appropriate bytes in *LITP. The number
1228 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1229 returned, or NULL on OK.
1230
1231 Note that fp constants aren't represent in the normal way on the ARM.
1232 In big endian mode, things are as expected. However, in little endian
1233 mode fp constants are big-endian word-wise, and little-endian byte-wise
1234 within the words. For example, (double) 1.1 in big endian mode is
1235 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1236 the byte sequence 99 99 f1 3f 9a 99 99 99.
1237
1238 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1239
1240 const char *
1241 md_atof (int type, char * litP, int * sizeP)
1242 {
1243 int prec;
1244 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1245 char *t;
1246 int i;
1247
1248 switch (type)
1249 {
1250 case 'H':
1251 case 'h':
1252 /* bfloat16, despite not being part of the IEEE specification, can also
1253 be handled by atof_ieee(). */
1254 case 'b':
1255 prec = 1;
1256 break;
1257
1258 case 'f':
1259 case 'F':
1260 case 's':
1261 case 'S':
1262 prec = 2;
1263 break;
1264
1265 case 'd':
1266 case 'D':
1267 case 'r':
1268 case 'R':
1269 prec = 4;
1270 break;
1271
1272 case 'x':
1273 case 'X':
1274 prec = 5;
1275 break;
1276
1277 case 'p':
1278 case 'P':
1279 prec = 5;
1280 break;
1281
1282 default:
1283 *sizeP = 0;
1284 return _("Unrecognized or unsupported floating point constant");
1285 }
1286
1287 t = atof_ieee (input_line_pointer, type, words);
1288 if (t)
1289 input_line_pointer = t;
1290 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1291
1292 if (target_big_endian || prec == 1)
1293 for (i = 0; i < prec; i++)
1294 {
1295 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1296 litP += sizeof (LITTLENUM_TYPE);
1297 }
1298 else if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1299 for (i = prec - 1; i >= 0; i--)
1300 {
1301 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1302 litP += sizeof (LITTLENUM_TYPE);
1303 }
1304 else
1305 /* For a 4 byte float the order of elements in `words' is 1 0.
1306 For an 8 byte float the order is 1 0 3 2. */
1307 for (i = 0; i < prec; i += 2)
1308 {
1309 md_number_to_chars (litP, (valueT) words[i + 1],
1310 sizeof (LITTLENUM_TYPE));
1311 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1312 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1313 litP += 2 * sizeof (LITTLENUM_TYPE);
1314 }
1315
1316 return NULL;
1317 }
1318
1319 /* We handle all bad expressions here, so that we can report the faulty
1320 instruction in the error message. */
1321
1322 void
1323 md_operand (expressionS * exp)
1324 {
1325 if (in_my_get_expression)
1326 exp->X_op = O_illegal;
1327 }
1328
1329 /* Immediate values. */
1330
1331 #ifdef OBJ_ELF
1332 /* Generic immediate-value read function for use in directives.
1333 Accepts anything that 'expression' can fold to a constant.
1334 *val receives the number. */
1335
1336 static int
1337 immediate_for_directive (int *val)
1338 {
1339 expressionS exp;
1340 exp.X_op = O_illegal;
1341
1342 if (is_immediate_prefix (*input_line_pointer))
1343 {
1344 input_line_pointer++;
1345 expression (&exp);
1346 }
1347
1348 if (exp.X_op != O_constant)
1349 {
1350 as_bad (_("expected #constant"));
1351 ignore_rest_of_line ();
1352 return FAIL;
1353 }
1354 *val = exp.X_add_number;
1355 return SUCCESS;
1356 }
1357 #endif
1358
1359 /* Register parsing. */
1360
1361 /* Generic register parser. CCP points to what should be the
1362 beginning of a register name. If it is indeed a valid register
1363 name, advance CCP over it and return the reg_entry structure;
1364 otherwise return NULL. Does not issue diagnostics. */
1365
1366 static struct reg_entry *
1367 arm_reg_parse_multi (char **ccp)
1368 {
1369 char *start = *ccp;
1370 char *p;
1371 struct reg_entry *reg;
1372
1373 skip_whitespace (start);
1374
1375 #ifdef REGISTER_PREFIX
1376 if (*start != REGISTER_PREFIX)
1377 return NULL;
1378 start++;
1379 #endif
1380 #ifdef OPTIONAL_REGISTER_PREFIX
1381 if (*start == OPTIONAL_REGISTER_PREFIX)
1382 start++;
1383 #endif
1384
1385 p = start;
1386 if (!ISALPHA (*p) || !is_name_beginner (*p))
1387 return NULL;
1388
1389 do
1390 p++;
1391 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1392
1393 reg = (struct reg_entry *) str_hash_find_n (arm_reg_hsh, start, p - start);
1394
1395 if (!reg)
1396 return NULL;
1397
1398 *ccp = p;
1399 return reg;
1400 }
1401
1402 static int
1403 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1404 enum arm_reg_type type)
1405 {
1406 /* Alternative syntaxes are accepted for a few register classes. */
1407 switch (type)
1408 {
1409 case REG_TYPE_MVF:
1410 case REG_TYPE_MVD:
1411 case REG_TYPE_MVFX:
1412 case REG_TYPE_MVDX:
1413 /* Generic coprocessor register names are allowed for these. */
1414 if (reg && reg->type == REG_TYPE_CN)
1415 return reg->number;
1416 break;
1417
1418 case REG_TYPE_CP:
1419 /* For backward compatibility, a bare number is valid here. */
1420 {
1421 unsigned long processor = strtoul (start, ccp, 10);
1422 if (*ccp != start && processor <= 15)
1423 return processor;
1424 }
1425 /* Fall through. */
1426
1427 case REG_TYPE_MMXWC:
1428 /* WC includes WCG. ??? I'm not sure this is true for all
1429 instructions that take WC registers. */
1430 if (reg && reg->type == REG_TYPE_MMXWCG)
1431 return reg->number;
1432 break;
1433
1434 default:
1435 break;
1436 }
1437
1438 return FAIL;
1439 }
1440
1441 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1442 return value is the register number or FAIL. */
1443
1444 static int
1445 arm_reg_parse (char **ccp, enum arm_reg_type type)
1446 {
1447 char *start = *ccp;
1448 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1449 int ret;
1450
1451 /* Do not allow a scalar (reg+index) to parse as a register. */
1452 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1453 return FAIL;
1454
1455 if (reg && reg->type == type)
1456 return reg->number;
1457
1458 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1459 return ret;
1460
1461 *ccp = start;
1462 return FAIL;
1463 }
1464
1465 /* Parse a Neon type specifier. *STR should point at the leading '.'
1466 character. Does no verification at this stage that the type fits the opcode
1467 properly. E.g.,
1468
1469 .i32.i32.s16
1470 .s32.f32
1471 .u16
1472
1473 Can all be legally parsed by this function.
1474
1475 Fills in neon_type struct pointer with parsed information, and updates STR
1476 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1477 type, FAIL if not. */
1478
1479 static int
1480 parse_neon_type (struct neon_type *type, char **str)
1481 {
1482 char *ptr = *str;
1483
1484 if (type)
1485 type->elems = 0;
1486
1487 while (type->elems < NEON_MAX_TYPE_ELS)
1488 {
1489 enum neon_el_type thistype = NT_untyped;
1490 unsigned thissize = -1u;
1491
1492 if (*ptr != '.')
1493 break;
1494
1495 ptr++;
1496
1497 /* Just a size without an explicit type. */
1498 if (ISDIGIT (*ptr))
1499 goto parsesize;
1500
1501 switch (TOLOWER (*ptr))
1502 {
1503 case 'i': thistype = NT_integer; break;
1504 case 'f': thistype = NT_float; break;
1505 case 'p': thistype = NT_poly; break;
1506 case 's': thistype = NT_signed; break;
1507 case 'u': thistype = NT_unsigned; break;
1508 case 'd':
1509 thistype = NT_float;
1510 thissize = 64;
1511 ptr++;
1512 goto done;
1513 case 'b':
1514 thistype = NT_bfloat;
1515 switch (TOLOWER (*(++ptr)))
1516 {
1517 case 'f':
1518 ptr += 1;
1519 thissize = strtoul (ptr, &ptr, 10);
1520 if (thissize != 16)
1521 {
1522 as_bad (_("bad size %d in type specifier"), thissize);
1523 return FAIL;
1524 }
1525 goto done;
1526 case '0': case '1': case '2': case '3': case '4':
1527 case '5': case '6': case '7': case '8': case '9':
1528 case ' ': case '.':
1529 as_bad (_("unexpected type character `b' -- did you mean `bf'?"));
1530 return FAIL;
1531 default:
1532 break;
1533 }
1534 break;
1535 default:
1536 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1537 return FAIL;
1538 }
1539
1540 ptr++;
1541
1542 /* .f is an abbreviation for .f32. */
1543 if (thistype == NT_float && !ISDIGIT (*ptr))
1544 thissize = 32;
1545 else
1546 {
1547 parsesize:
1548 thissize = strtoul (ptr, &ptr, 10);
1549
1550 if (thissize != 8 && thissize != 16 && thissize != 32
1551 && thissize != 64)
1552 {
1553 as_bad (_("bad size %d in type specifier"), thissize);
1554 return FAIL;
1555 }
1556 }
1557
1558 done:
1559 if (type)
1560 {
1561 type->el[type->elems].type = thistype;
1562 type->el[type->elems].size = thissize;
1563 type->elems++;
1564 }
1565 }
1566
1567 /* Empty/missing type is not a successful parse. */
1568 if (type->elems == 0)
1569 return FAIL;
1570
1571 *str = ptr;
1572
1573 return SUCCESS;
1574 }
1575
1576 /* Errors may be set multiple times during parsing or bit encoding
1577 (particularly in the Neon bits), but usually the earliest error which is set
1578 will be the most meaningful. Avoid overwriting it with later (cascading)
1579 errors by calling this function. */
1580
1581 static void
1582 first_error (const char *err)
1583 {
1584 if (!inst.error)
1585 inst.error = err;
1586 }
1587
1588 /* Parse a single type, e.g. ".s32", leading period included. */
1589 static int
1590 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1591 {
1592 char *str = *ccp;
1593 struct neon_type optype;
1594
1595 if (*str == '.')
1596 {
1597 if (parse_neon_type (&optype, &str) == SUCCESS)
1598 {
1599 if (optype.elems == 1)
1600 *vectype = optype.el[0];
1601 else
1602 {
1603 first_error (_("only one type should be specified for operand"));
1604 return FAIL;
1605 }
1606 }
1607 else
1608 {
1609 first_error (_("vector type expected"));
1610 return FAIL;
1611 }
1612 }
1613 else
1614 return FAIL;
1615
1616 *ccp = str;
1617
1618 return SUCCESS;
1619 }
1620
1621 /* Special meanings for indices (which have a range of 0-7), which will fit into
1622 a 4-bit integer. */
1623
1624 #define NEON_ALL_LANES 15
1625 #define NEON_INTERLEAVE_LANES 14
1626
1627 /* Record a use of the given feature. */
1628 static void
1629 record_feature_use (const arm_feature_set *feature)
1630 {
1631 if (thumb_mode)
1632 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
1633 else
1634 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
1635 }
1636
1637 /* If the given feature available in the selected CPU, mark it as used.
1638 Returns TRUE iff feature is available. */
1639 static bool
1640 mark_feature_used (const arm_feature_set *feature)
1641 {
1642
1643 /* Do not support the use of MVE only instructions when in auto-detection or
1644 -march=all. */
1645 if (((feature == &mve_ext) || (feature == &mve_fp_ext))
1646 && ARM_CPU_IS_ANY (cpu_variant))
1647 {
1648 first_error (BAD_MVE_AUTO);
1649 return false;
1650 }
1651 /* Ensure the option is valid on the current architecture. */
1652 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
1653 return false;
1654
1655 /* Add the appropriate architecture feature for the barrier option used.
1656 */
1657 record_feature_use (feature);
1658
1659 return true;
1660 }
1661
1662 /* Parse either a register or a scalar, with an optional type. Return the
1663 register number, and optionally fill in the actual type of the register
1664 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1665 type/index information in *TYPEINFO. */
1666
1667 static int
1668 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1669 enum arm_reg_type *rtype,
1670 struct neon_typed_alias *typeinfo)
1671 {
1672 char *str = *ccp;
1673 struct reg_entry *reg = arm_reg_parse_multi (&str);
1674 struct neon_typed_alias atype;
1675 struct neon_type_el parsetype;
1676
1677 atype.defined = 0;
1678 atype.index = -1;
1679 atype.eltype.type = NT_invtype;
1680 atype.eltype.size = -1;
1681
1682 /* Try alternate syntax for some types of register. Note these are mutually
1683 exclusive with the Neon syntax extensions. */
1684 if (reg == NULL)
1685 {
1686 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1687 if (altreg != FAIL)
1688 *ccp = str;
1689 if (typeinfo)
1690 *typeinfo = atype;
1691 return altreg;
1692 }
1693
1694 /* Undo polymorphism when a set of register types may be accepted. */
1695 if ((type == REG_TYPE_NDQ
1696 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1697 || (type == REG_TYPE_VFSD
1698 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1699 || (type == REG_TYPE_NSDQ
1700 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1701 || reg->type == REG_TYPE_NQ))
1702 || (type == REG_TYPE_NSD
1703 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1704 || (type == REG_TYPE_MMXWC
1705 && (reg->type == REG_TYPE_MMXWCG)))
1706 type = (enum arm_reg_type) reg->type;
1707
1708 if (type == REG_TYPE_MQ)
1709 {
1710 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1711 return FAIL;
1712
1713 if (!reg || reg->type != REG_TYPE_NQ)
1714 return FAIL;
1715
1716 if (reg->number > 14 && !mark_feature_used (&fpu_vfp_ext_d32))
1717 {
1718 first_error (_("expected MVE register [q0..q7]"));
1719 return FAIL;
1720 }
1721 type = REG_TYPE_NQ;
1722 }
1723 else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
1724 && (type == REG_TYPE_NQ))
1725 return FAIL;
1726
1727
1728 if (type != reg->type)
1729 return FAIL;
1730
1731 if (reg->neon)
1732 atype = *reg->neon;
1733
1734 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1735 {
1736 if ((atype.defined & NTA_HASTYPE) != 0)
1737 {
1738 first_error (_("can't redefine type for operand"));
1739 return FAIL;
1740 }
1741 atype.defined |= NTA_HASTYPE;
1742 atype.eltype = parsetype;
1743 }
1744
1745 if (skip_past_char (&str, '[') == SUCCESS)
1746 {
1747 if (type != REG_TYPE_VFD
1748 && !(type == REG_TYPE_VFS
1749 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2))
1750 && !(type == REG_TYPE_NQ
1751 && ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)))
1752 {
1753 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1754 first_error (_("only D and Q registers may be indexed"));
1755 else
1756 first_error (_("only D registers may be indexed"));
1757 return FAIL;
1758 }
1759
1760 if ((atype.defined & NTA_HASINDEX) != 0)
1761 {
1762 first_error (_("can't change index for operand"));
1763 return FAIL;
1764 }
1765
1766 atype.defined |= NTA_HASINDEX;
1767
1768 if (skip_past_char (&str, ']') == SUCCESS)
1769 atype.index = NEON_ALL_LANES;
1770 else
1771 {
1772 expressionS exp;
1773
1774 my_get_expression (&exp, &str, GE_NO_PREFIX);
1775
1776 if (exp.X_op != O_constant)
1777 {
1778 first_error (_("constant expression required"));
1779 return FAIL;
1780 }
1781
1782 if (skip_past_char (&str, ']') == FAIL)
1783 return FAIL;
1784
1785 atype.index = exp.X_add_number;
1786 }
1787 }
1788
1789 if (typeinfo)
1790 *typeinfo = atype;
1791
1792 if (rtype)
1793 *rtype = type;
1794
1795 *ccp = str;
1796
1797 return reg->number;
1798 }
1799
1800 /* Like arm_reg_parse, but also allow the following extra features:
1801 - If RTYPE is non-zero, return the (possibly restricted) type of the
1802 register (e.g. Neon double or quad reg when either has been requested).
1803 - If this is a Neon vector type with additional type information, fill
1804 in the struct pointed to by VECTYPE (if non-NULL).
1805 This function will fault on encountering a scalar. */
1806
1807 static int
1808 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1809 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1810 {
1811 struct neon_typed_alias atype;
1812 char *str = *ccp;
1813 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1814
1815 if (reg == FAIL)
1816 return FAIL;
1817
1818 /* Do not allow regname(... to parse as a register. */
1819 if (*str == '(')
1820 return FAIL;
1821
1822 /* Do not allow a scalar (reg+index) to parse as a register. */
1823 if ((atype.defined & NTA_HASINDEX) != 0)
1824 {
1825 first_error (_("register operand expected, but got scalar"));
1826 return FAIL;
1827 }
1828
1829 if (vectype)
1830 *vectype = atype.eltype;
1831
1832 *ccp = str;
1833
1834 return reg;
1835 }
1836
1837 #define NEON_SCALAR_REG(X) ((X) >> 4)
1838 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1839
1840 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1841 have enough information to be able to do a good job bounds-checking. So, we
1842 just do easy checks here, and do further checks later. */
1843
1844 static int
1845 parse_scalar (char **ccp, int elsize, struct neon_type_el *type, enum
1846 arm_reg_type reg_type)
1847 {
1848 int reg;
1849 char *str = *ccp;
1850 struct neon_typed_alias atype;
1851 unsigned reg_size;
1852
1853 reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1854
1855 switch (reg_type)
1856 {
1857 case REG_TYPE_VFS:
1858 reg_size = 32;
1859 break;
1860 case REG_TYPE_VFD:
1861 reg_size = 64;
1862 break;
1863 case REG_TYPE_MQ:
1864 reg_size = 128;
1865 break;
1866 default:
1867 gas_assert (0);
1868 return FAIL;
1869 }
1870
1871 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1872 return FAIL;
1873
1874 if (reg_type != REG_TYPE_MQ && atype.index == NEON_ALL_LANES)
1875 {
1876 first_error (_("scalar must have an index"));
1877 return FAIL;
1878 }
1879 else if (atype.index >= reg_size / elsize)
1880 {
1881 first_error (_("scalar index out of range"));
1882 return FAIL;
1883 }
1884
1885 if (type)
1886 *type = atype.eltype;
1887
1888 *ccp = str;
1889
1890 return reg * 16 + atype.index;
1891 }
1892
1893 /* Types of registers in a list. */
1894
1895 enum reg_list_els
1896 {
1897 REGLIST_RN,
1898 REGLIST_PSEUDO,
1899 REGLIST_CLRM,
1900 REGLIST_VFP_S,
1901 REGLIST_VFP_S_VPR,
1902 REGLIST_VFP_D,
1903 REGLIST_VFP_D_VPR,
1904 REGLIST_NEON_D
1905 };
1906
1907 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1908
1909 static long
1910 parse_reg_list (char ** strp, enum reg_list_els etype)
1911 {
1912 char *str = *strp;
1913 long range = 0;
1914 int another_range;
1915
1916 gas_assert (etype == REGLIST_RN || etype == REGLIST_CLRM
1917 || etype == REGLIST_PSEUDO);
1918
1919 /* We come back here if we get ranges concatenated by '+' or '|'. */
1920 do
1921 {
1922 skip_whitespace (str);
1923
1924 another_range = 0;
1925
1926 if (*str == '{')
1927 {
1928 int in_range = 0;
1929 int cur_reg = -1;
1930
1931 str++;
1932 do
1933 {
1934 int reg;
1935 const char apsr_str[] = "apsr";
1936 int apsr_str_len = strlen (apsr_str);
1937 enum arm_reg_type rt;
1938
1939 if (etype == REGLIST_RN || etype == REGLIST_CLRM)
1940 rt = REG_TYPE_RN;
1941 else
1942 rt = REG_TYPE_PSEUDO;
1943
1944 reg = arm_reg_parse (&str, rt);
1945 if (etype == REGLIST_CLRM)
1946 {
1947 if (reg == REG_SP || reg == REG_PC)
1948 reg = FAIL;
1949 else if (reg == FAIL
1950 && !strncasecmp (str, apsr_str, apsr_str_len)
1951 && !ISALPHA (*(str + apsr_str_len)))
1952 {
1953 reg = 15;
1954 str += apsr_str_len;
1955 }
1956
1957 if (reg == FAIL)
1958 {
1959 first_error (_("r0-r12, lr or APSR expected"));
1960 return FAIL;
1961 }
1962 }
1963 else if (etype == REGLIST_PSEUDO)
1964 {
1965 if (reg == FAIL)
1966 {
1967 first_error (_(reg_expected_msgs[REG_TYPE_PSEUDO]));
1968 return FAIL;
1969 }
1970 }
1971 else /* etype == REGLIST_RN. */
1972 {
1973 if (reg == FAIL)
1974 {
1975 first_error (_(reg_expected_msgs[REGLIST_RN]));
1976 return FAIL;
1977 }
1978 }
1979
1980 if (in_range)
1981 {
1982 int i;
1983
1984 if (reg <= cur_reg)
1985 {
1986 first_error (_("bad range in register list"));
1987 return FAIL;
1988 }
1989
1990 for (i = cur_reg + 1; i < reg; i++)
1991 {
1992 if (range & (1 << i))
1993 as_tsktsk
1994 (_("Warning: duplicated register (r%d) in register list"),
1995 i);
1996 else
1997 range |= 1 << i;
1998 }
1999 in_range = 0;
2000 }
2001
2002 if (range & (1 << reg))
2003 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
2004 reg);
2005 else if (reg <= cur_reg)
2006 as_tsktsk (_("Warning: register range not in ascending order"));
2007
2008 range |= 1 << reg;
2009 cur_reg = reg;
2010 }
2011 while (skip_past_comma (&str) != FAIL
2012 || (in_range = 1, *str++ == '-'));
2013 str--;
2014
2015 if (skip_past_char (&str, '}') == FAIL)
2016 {
2017 first_error (_("missing `}'"));
2018 return FAIL;
2019 }
2020 }
2021 else if (etype == REGLIST_RN)
2022 {
2023 expressionS exp;
2024
2025 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
2026 return FAIL;
2027
2028 if (exp.X_op == O_constant)
2029 {
2030 if (exp.X_add_number
2031 != (exp.X_add_number & 0x0000ffff))
2032 {
2033 inst.error = _("invalid register mask");
2034 return FAIL;
2035 }
2036
2037 if ((range & exp.X_add_number) != 0)
2038 {
2039 int regno = range & exp.X_add_number;
2040
2041 regno &= -regno;
2042 regno = (1 << regno) - 1;
2043 as_tsktsk
2044 (_("Warning: duplicated register (r%d) in register list"),
2045 regno);
2046 }
2047
2048 range |= exp.X_add_number;
2049 }
2050 else
2051 {
2052 if (inst.relocs[0].type != 0)
2053 {
2054 inst.error = _("expression too complex");
2055 return FAIL;
2056 }
2057
2058 memcpy (&inst.relocs[0].exp, &exp, sizeof (expressionS));
2059 inst.relocs[0].type = BFD_RELOC_ARM_MULTI;
2060 inst.relocs[0].pc_rel = 0;
2061 }
2062 }
2063
2064 if (*str == '|' || *str == '+')
2065 {
2066 str++;
2067 another_range = 1;
2068 }
2069 }
2070 while (another_range);
2071
2072 *strp = str;
2073 return range;
2074 }
2075
2076 /* Parse a VFP register list. If the string is invalid return FAIL.
2077 Otherwise return the number of registers, and set PBASE to the first
2078 register. Parses registers of type ETYPE.
2079 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
2080 - Q registers can be used to specify pairs of D registers
2081 - { } can be omitted from around a singleton register list
2082 FIXME: This is not implemented, as it would require backtracking in
2083 some cases, e.g.:
2084 vtbl.8 d3,d4,d5
2085 This could be done (the meaning isn't really ambiguous), but doesn't
2086 fit in well with the current parsing framework.
2087 - 32 D registers may be used (also true for VFPv3).
2088 FIXME: Types are ignored in these register lists, which is probably a
2089 bug. */
2090
2091 static int
2092 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype,
2093 bool *partial_match)
2094 {
2095 char *str = *ccp;
2096 int base_reg;
2097 int new_base;
2098 enum arm_reg_type regtype = (enum arm_reg_type) 0;
2099 int max_regs = 0;
2100 int count = 0;
2101 int warned = 0;
2102 unsigned long mask = 0;
2103 int i;
2104 bool vpr_seen = false;
2105 bool expect_vpr =
2106 (etype == REGLIST_VFP_S_VPR) || (etype == REGLIST_VFP_D_VPR);
2107
2108 if (skip_past_char (&str, '{') == FAIL)
2109 {
2110 inst.error = _("expecting {");
2111 return FAIL;
2112 }
2113
2114 switch (etype)
2115 {
2116 case REGLIST_VFP_S:
2117 case REGLIST_VFP_S_VPR:
2118 regtype = REG_TYPE_VFS;
2119 max_regs = 32;
2120 break;
2121
2122 case REGLIST_VFP_D:
2123 case REGLIST_VFP_D_VPR:
2124 regtype = REG_TYPE_VFD;
2125 break;
2126
2127 case REGLIST_NEON_D:
2128 regtype = REG_TYPE_NDQ;
2129 break;
2130
2131 default:
2132 gas_assert (0);
2133 }
2134
2135 if (etype != REGLIST_VFP_S && etype != REGLIST_VFP_S_VPR)
2136 {
2137 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
2138 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
2139 {
2140 max_regs = 32;
2141 if (thumb_mode)
2142 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
2143 fpu_vfp_ext_d32);
2144 else
2145 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
2146 fpu_vfp_ext_d32);
2147 }
2148 else
2149 max_regs = 16;
2150 }
2151
2152 base_reg = max_regs;
2153 *partial_match = false;
2154
2155 do
2156 {
2157 unsigned int setmask = 1, addregs = 1;
2158 const char vpr_str[] = "vpr";
2159 size_t vpr_str_len = strlen (vpr_str);
2160
2161 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
2162
2163 if (expect_vpr)
2164 {
2165 if (new_base == FAIL
2166 && !strncasecmp (str, vpr_str, vpr_str_len)
2167 && !ISALPHA (*(str + vpr_str_len))
2168 && !vpr_seen)
2169 {
2170 vpr_seen = true;
2171 str += vpr_str_len;
2172 if (count == 0)
2173 base_reg = 0; /* Canonicalize VPR only on d0 with 0 regs. */
2174 }
2175 else if (vpr_seen)
2176 {
2177 first_error (_("VPR expected last"));
2178 return FAIL;
2179 }
2180 else if (new_base == FAIL)
2181 {
2182 if (regtype == REG_TYPE_VFS)
2183 first_error (_("VFP single precision register or VPR "
2184 "expected"));
2185 else /* regtype == REG_TYPE_VFD. */
2186 first_error (_("VFP/Neon double precision register or VPR "
2187 "expected"));
2188 return FAIL;
2189 }
2190 }
2191 else if (new_base == FAIL)
2192 {
2193 first_error (_(reg_expected_msgs[regtype]));
2194 return FAIL;
2195 }
2196
2197 *partial_match = true;
2198 if (vpr_seen)
2199 continue;
2200
2201 if (new_base >= max_regs)
2202 {
2203 first_error (_("register out of range in list"));
2204 return FAIL;
2205 }
2206
2207 /* Note: a value of 2 * n is returned for the register Q<n>. */
2208 if (regtype == REG_TYPE_NQ)
2209 {
2210 setmask = 3;
2211 addregs = 2;
2212 }
2213
2214 if (new_base < base_reg)
2215 base_reg = new_base;
2216
2217 if (mask & (setmask << new_base))
2218 {
2219 first_error (_("invalid register list"));
2220 return FAIL;
2221 }
2222
2223 if ((mask >> new_base) != 0 && ! warned && !vpr_seen)
2224 {
2225 as_tsktsk (_("register list not in ascending order"));
2226 warned = 1;
2227 }
2228
2229 mask |= setmask << new_base;
2230 count += addregs;
2231
2232 if (*str == '-') /* We have the start of a range expression */
2233 {
2234 int high_range;
2235
2236 str++;
2237
2238 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
2239 == FAIL)
2240 {
2241 inst.error = gettext (reg_expected_msgs[regtype]);
2242 return FAIL;
2243 }
2244
2245 if (high_range >= max_regs)
2246 {
2247 first_error (_("register out of range in list"));
2248 return FAIL;
2249 }
2250
2251 if (regtype == REG_TYPE_NQ)
2252 high_range = high_range + 1;
2253
2254 if (high_range <= new_base)
2255 {
2256 inst.error = _("register range not in ascending order");
2257 return FAIL;
2258 }
2259
2260 for (new_base += addregs; new_base <= high_range; new_base += addregs)
2261 {
2262 if (mask & (setmask << new_base))
2263 {
2264 inst.error = _("invalid register list");
2265 return FAIL;
2266 }
2267
2268 mask |= setmask << new_base;
2269 count += addregs;
2270 }
2271 }
2272 }
2273 while (skip_past_comma (&str) != FAIL);
2274
2275 str++;
2276
2277 /* Sanity check -- should have raised a parse error above. */
2278 if ((!vpr_seen && count == 0) || count > max_regs)
2279 abort ();
2280
2281 *pbase = base_reg;
2282
2283 if (expect_vpr && !vpr_seen)
2284 {
2285 first_error (_("VPR expected last"));
2286 return FAIL;
2287 }
2288
2289 /* Final test -- the registers must be consecutive. */
2290 mask >>= base_reg;
2291 for (i = 0; i < count; i++)
2292 {
2293 if ((mask & (1u << i)) == 0)
2294 {
2295 inst.error = _("non-contiguous register range");
2296 return FAIL;
2297 }
2298 }
2299
2300 *ccp = str;
2301
2302 return count;
2303 }
2304
2305 /* True if two alias types are the same. */
2306
2307 static bool
2308 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
2309 {
2310 if (!a && !b)
2311 return true;
2312
2313 if (!a || !b)
2314 return false;
2315
2316 if (a->defined != b->defined)
2317 return false;
2318
2319 if ((a->defined & NTA_HASTYPE) != 0
2320 && (a->eltype.type != b->eltype.type
2321 || a->eltype.size != b->eltype.size))
2322 return false;
2323
2324 if ((a->defined & NTA_HASINDEX) != 0
2325 && (a->index != b->index))
2326 return false;
2327
2328 return true;
2329 }
2330
2331 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2332 The base register is put in *PBASE.
2333 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2334 the return value.
2335 The register stride (minus one) is put in bit 4 of the return value.
2336 Bits [6:5] encode the list length (minus one).
2337 The type of the list elements is put in *ELTYPE, if non-NULL. */
2338
2339 #define NEON_LANE(X) ((X) & 0xf)
2340 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2341 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2342
2343 static int
2344 parse_neon_el_struct_list (char **str, unsigned *pbase,
2345 int mve,
2346 struct neon_type_el *eltype)
2347 {
2348 char *ptr = *str;
2349 int base_reg = -1;
2350 int reg_incr = -1;
2351 int count = 0;
2352 int lane = -1;
2353 int leading_brace = 0;
2354 enum arm_reg_type rtype = REG_TYPE_NDQ;
2355 const char *const incr_error = mve ? _("register stride must be 1") :
2356 _("register stride must be 1 or 2");
2357 const char *const type_error = _("mismatched element/structure types in list");
2358 struct neon_typed_alias firsttype;
2359 firsttype.defined = 0;
2360 firsttype.eltype.type = NT_invtype;
2361 firsttype.eltype.size = -1;
2362 firsttype.index = -1;
2363
2364 if (skip_past_char (&ptr, '{') == SUCCESS)
2365 leading_brace = 1;
2366
2367 do
2368 {
2369 struct neon_typed_alias atype;
2370 if (mve)
2371 rtype = REG_TYPE_MQ;
2372 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2373
2374 if (getreg == FAIL)
2375 {
2376 first_error (_(reg_expected_msgs[rtype]));
2377 return FAIL;
2378 }
2379
2380 if (base_reg == -1)
2381 {
2382 base_reg = getreg;
2383 if (rtype == REG_TYPE_NQ)
2384 {
2385 reg_incr = 1;
2386 }
2387 firsttype = atype;
2388 }
2389 else if (reg_incr == -1)
2390 {
2391 reg_incr = getreg - base_reg;
2392 if (reg_incr < 1 || reg_incr > 2)
2393 {
2394 first_error (_(incr_error));
2395 return FAIL;
2396 }
2397 }
2398 else if (getreg != base_reg + reg_incr * count)
2399 {
2400 first_error (_(incr_error));
2401 return FAIL;
2402 }
2403
2404 if (! neon_alias_types_same (&atype, &firsttype))
2405 {
2406 first_error (_(type_error));
2407 return FAIL;
2408 }
2409
2410 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2411 modes. */
2412 if (ptr[0] == '-')
2413 {
2414 struct neon_typed_alias htype;
2415 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2416 if (lane == -1)
2417 lane = NEON_INTERLEAVE_LANES;
2418 else if (lane != NEON_INTERLEAVE_LANES)
2419 {
2420 first_error (_(type_error));
2421 return FAIL;
2422 }
2423 if (reg_incr == -1)
2424 reg_incr = 1;
2425 else if (reg_incr != 1)
2426 {
2427 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2428 return FAIL;
2429 }
2430 ptr++;
2431 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2432 if (hireg == FAIL)
2433 {
2434 first_error (_(reg_expected_msgs[rtype]));
2435 return FAIL;
2436 }
2437 if (! neon_alias_types_same (&htype, &firsttype))
2438 {
2439 first_error (_(type_error));
2440 return FAIL;
2441 }
2442 count += hireg + dregs - getreg;
2443 continue;
2444 }
2445
2446 /* If we're using Q registers, we can't use [] or [n] syntax. */
2447 if (rtype == REG_TYPE_NQ)
2448 {
2449 count += 2;
2450 continue;
2451 }
2452
2453 if ((atype.defined & NTA_HASINDEX) != 0)
2454 {
2455 if (lane == -1)
2456 lane = atype.index;
2457 else if (lane != atype.index)
2458 {
2459 first_error (_(type_error));
2460 return FAIL;
2461 }
2462 }
2463 else if (lane == -1)
2464 lane = NEON_INTERLEAVE_LANES;
2465 else if (lane != NEON_INTERLEAVE_LANES)
2466 {
2467 first_error (_(type_error));
2468 return FAIL;
2469 }
2470 count++;
2471 }
2472 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2473
2474 /* No lane set by [x]. We must be interleaving structures. */
2475 if (lane == -1)
2476 lane = NEON_INTERLEAVE_LANES;
2477
2478 /* Sanity check. */
2479 if (lane == -1 || base_reg == -1 || count < 1 || (!mve && count > 4)
2480 || (count > 1 && reg_incr == -1))
2481 {
2482 first_error (_("error parsing element/structure list"));
2483 return FAIL;
2484 }
2485
2486 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2487 {
2488 first_error (_("expected }"));
2489 return FAIL;
2490 }
2491
2492 if (reg_incr == -1)
2493 reg_incr = 1;
2494
2495 if (eltype)
2496 *eltype = firsttype.eltype;
2497
2498 *pbase = base_reg;
2499 *str = ptr;
2500
2501 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2502 }
2503
2504 /* Parse an explicit relocation suffix on an expression. This is
2505 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2506 arm_reloc_hsh contains no entries, so this function can only
2507 succeed if there is no () after the word. Returns -1 on error,
2508 BFD_RELOC_UNUSED if there wasn't any suffix. */
2509
2510 static int
2511 parse_reloc (char **str)
2512 {
2513 struct reloc_entry *r;
2514 char *p, *q;
2515
2516 if (**str != '(')
2517 return BFD_RELOC_UNUSED;
2518
2519 p = *str + 1;
2520 q = p;
2521
2522 while (*q && *q != ')' && *q != ',')
2523 q++;
2524 if (*q != ')')
2525 return -1;
2526
2527 if ((r = (struct reloc_entry *)
2528 str_hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2529 return -1;
2530
2531 *str = q + 1;
2532 return r->reloc;
2533 }
2534
2535 /* Directives: register aliases. */
2536
2537 static struct reg_entry *
2538 insert_reg_alias (char *str, unsigned number, int type)
2539 {
2540 struct reg_entry *new_reg;
2541 const char *name;
2542
2543 if ((new_reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, str)) != 0)
2544 {
2545 if (new_reg->builtin)
2546 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2547
2548 /* Only warn about a redefinition if it's not defined as the
2549 same register. */
2550 else if (new_reg->number != number || new_reg->type != type)
2551 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2552
2553 return NULL;
2554 }
2555
2556 name = xstrdup (str);
2557 new_reg = XNEW (struct reg_entry);
2558
2559 new_reg->name = name;
2560 new_reg->number = number;
2561 new_reg->type = type;
2562 new_reg->builtin = false;
2563 new_reg->neon = NULL;
2564
2565 str_hash_insert (arm_reg_hsh, name, new_reg, 0);
2566
2567 return new_reg;
2568 }
2569
2570 static void
2571 insert_neon_reg_alias (char *str, int number, int type,
2572 struct neon_typed_alias *atype)
2573 {
2574 struct reg_entry *reg = insert_reg_alias (str, number, type);
2575
2576 if (!reg)
2577 {
2578 first_error (_("attempt to redefine typed alias"));
2579 return;
2580 }
2581
2582 if (atype)
2583 {
2584 reg->neon = XNEW (struct neon_typed_alias);
2585 *reg->neon = *atype;
2586 }
2587 }
2588
2589 /* Look for the .req directive. This is of the form:
2590
2591 new_register_name .req existing_register_name
2592
2593 If we find one, or if it looks sufficiently like one that we want to
2594 handle any error here, return TRUE. Otherwise return FALSE. */
2595
2596 static bool
2597 create_register_alias (char * newname, char *p)
2598 {
2599 struct reg_entry *old;
2600 char *oldname, *nbuf;
2601 size_t nlen;
2602
2603 /* The input scrubber ensures that whitespace after the mnemonic is
2604 collapsed to single spaces. */
2605 oldname = p;
2606 if (!startswith (oldname, " .req "))
2607 return false;
2608
2609 oldname += 6;
2610 if (*oldname == '\0')
2611 return false;
2612
2613 old = (struct reg_entry *) str_hash_find (arm_reg_hsh, oldname);
2614 if (!old)
2615 {
2616 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2617 return true;
2618 }
2619
2620 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2621 the desired alias name, and p points to its end. If not, then
2622 the desired alias name is in the global original_case_string. */
2623 #ifdef TC_CASE_SENSITIVE
2624 nlen = p - newname;
2625 #else
2626 newname = original_case_string;
2627 nlen = strlen (newname);
2628 #endif
2629
2630 nbuf = xmemdup0 (newname, nlen);
2631
2632 /* Create aliases under the new name as stated; an all-lowercase
2633 version of the new name; and an all-uppercase version of the new
2634 name. */
2635 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2636 {
2637 for (p = nbuf; *p; p++)
2638 *p = TOUPPER (*p);
2639
2640 if (strncmp (nbuf, newname, nlen))
2641 {
2642 /* If this attempt to create an additional alias fails, do not bother
2643 trying to create the all-lower case alias. We will fail and issue
2644 a second, duplicate error message. This situation arises when the
2645 programmer does something like:
2646 foo .req r0
2647 Foo .req r1
2648 The second .req creates the "Foo" alias but then fails to create
2649 the artificial FOO alias because it has already been created by the
2650 first .req. */
2651 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2652 {
2653 free (nbuf);
2654 return true;
2655 }
2656 }
2657
2658 for (p = nbuf; *p; p++)
2659 *p = TOLOWER (*p);
2660
2661 if (strncmp (nbuf, newname, nlen))
2662 insert_reg_alias (nbuf, old->number, old->type);
2663 }
2664
2665 free (nbuf);
2666 return true;
2667 }
2668
2669 /* Create a Neon typed/indexed register alias using directives, e.g.:
2670 X .dn d5.s32[1]
2671 Y .qn 6.s16
2672 Z .dn d7
2673 T .dn Z[0]
2674 These typed registers can be used instead of the types specified after the
2675 Neon mnemonic, so long as all operands given have types. Types can also be
2676 specified directly, e.g.:
2677 vadd d0.s32, d1.s32, d2.s32 */
2678
2679 static bool
2680 create_neon_reg_alias (char *newname, char *p)
2681 {
2682 enum arm_reg_type basetype;
2683 struct reg_entry *basereg;
2684 struct reg_entry mybasereg;
2685 struct neon_type ntype;
2686 struct neon_typed_alias typeinfo;
2687 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2688 int namelen;
2689
2690 typeinfo.defined = 0;
2691 typeinfo.eltype.type = NT_invtype;
2692 typeinfo.eltype.size = -1;
2693 typeinfo.index = -1;
2694
2695 nameend = p;
2696
2697 if (startswith (p, " .dn "))
2698 basetype = REG_TYPE_VFD;
2699 else if (startswith (p, " .qn "))
2700 basetype = REG_TYPE_NQ;
2701 else
2702 return false;
2703
2704 p += 5;
2705
2706 if (*p == '\0')
2707 return false;
2708
2709 basereg = arm_reg_parse_multi (&p);
2710
2711 if (basereg && basereg->type != basetype)
2712 {
2713 as_bad (_("bad type for register"));
2714 return false;
2715 }
2716
2717 if (basereg == NULL)
2718 {
2719 expressionS exp;
2720 /* Try parsing as an integer. */
2721 my_get_expression (&exp, &p, GE_NO_PREFIX);
2722 if (exp.X_op != O_constant)
2723 {
2724 as_bad (_("expression must be constant"));
2725 return false;
2726 }
2727 basereg = &mybasereg;
2728 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2729 : exp.X_add_number;
2730 basereg->neon = 0;
2731 }
2732
2733 if (basereg->neon)
2734 typeinfo = *basereg->neon;
2735
2736 if (parse_neon_type (&ntype, &p) == SUCCESS)
2737 {
2738 /* We got a type. */
2739 if (typeinfo.defined & NTA_HASTYPE)
2740 {
2741 as_bad (_("can't redefine the type of a register alias"));
2742 return false;
2743 }
2744
2745 typeinfo.defined |= NTA_HASTYPE;
2746 if (ntype.elems != 1)
2747 {
2748 as_bad (_("you must specify a single type only"));
2749 return false;
2750 }
2751 typeinfo.eltype = ntype.el[0];
2752 }
2753
2754 if (skip_past_char (&p, '[') == SUCCESS)
2755 {
2756 expressionS exp;
2757 /* We got a scalar index. */
2758
2759 if (typeinfo.defined & NTA_HASINDEX)
2760 {
2761 as_bad (_("can't redefine the index of a scalar alias"));
2762 return false;
2763 }
2764
2765 my_get_expression (&exp, &p, GE_NO_PREFIX);
2766
2767 if (exp.X_op != O_constant)
2768 {
2769 as_bad (_("scalar index must be constant"));
2770 return false;
2771 }
2772
2773 typeinfo.defined |= NTA_HASINDEX;
2774 typeinfo.index = exp.X_add_number;
2775
2776 if (skip_past_char (&p, ']') == FAIL)
2777 {
2778 as_bad (_("expecting ]"));
2779 return false;
2780 }
2781 }
2782
2783 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2784 the desired alias name, and p points to its end. If not, then
2785 the desired alias name is in the global original_case_string. */
2786 #ifdef TC_CASE_SENSITIVE
2787 namelen = nameend - newname;
2788 #else
2789 newname = original_case_string;
2790 namelen = strlen (newname);
2791 #endif
2792
2793 namebuf = xmemdup0 (newname, namelen);
2794
2795 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2796 typeinfo.defined != 0 ? &typeinfo : NULL);
2797
2798 /* Insert name in all uppercase. */
2799 for (p = namebuf; *p; p++)
2800 *p = TOUPPER (*p);
2801
2802 if (strncmp (namebuf, newname, namelen))
2803 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2804 typeinfo.defined != 0 ? &typeinfo : NULL);
2805
2806 /* Insert name in all lowercase. */
2807 for (p = namebuf; *p; p++)
2808 *p = TOLOWER (*p);
2809
2810 if (strncmp (namebuf, newname, namelen))
2811 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2812 typeinfo.defined != 0 ? &typeinfo : NULL);
2813
2814 free (namebuf);
2815 return true;
2816 }
2817
2818 /* Should never be called, as .req goes between the alias and the
2819 register name, not at the beginning of the line. */
2820
2821 static void
2822 s_req (int a ATTRIBUTE_UNUSED)
2823 {
2824 as_bad (_("invalid syntax for .req directive"));
2825 }
2826
2827 static void
2828 s_dn (int a ATTRIBUTE_UNUSED)
2829 {
2830 as_bad (_("invalid syntax for .dn directive"));
2831 }
2832
2833 static void
2834 s_qn (int a ATTRIBUTE_UNUSED)
2835 {
2836 as_bad (_("invalid syntax for .qn directive"));
2837 }
2838
2839 /* The .unreq directive deletes an alias which was previously defined
2840 by .req. For example:
2841
2842 my_alias .req r11
2843 .unreq my_alias */
2844
2845 static void
2846 s_unreq (int a ATTRIBUTE_UNUSED)
2847 {
2848 char * name;
2849 char saved_char;
2850
2851 name = input_line_pointer;
2852
2853 while (*input_line_pointer != 0
2854 && *input_line_pointer != ' '
2855 && *input_line_pointer != '\n')
2856 ++input_line_pointer;
2857
2858 saved_char = *input_line_pointer;
2859 *input_line_pointer = 0;
2860
2861 if (!*name)
2862 as_bad (_("invalid syntax for .unreq directive"));
2863 else
2864 {
2865 struct reg_entry *reg
2866 = (struct reg_entry *) str_hash_find (arm_reg_hsh, name);
2867
2868 if (!reg)
2869 as_bad (_("unknown register alias '%s'"), name);
2870 else if (reg->builtin)
2871 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2872 name);
2873 else
2874 {
2875 char * p;
2876 char * nbuf;
2877
2878 str_hash_delete (arm_reg_hsh, name);
2879 free ((char *) reg->name);
2880 free (reg->neon);
2881 free (reg);
2882
2883 /* Also locate the all upper case and all lower case versions.
2884 Do not complain if we cannot find one or the other as it
2885 was probably deleted above. */
2886
2887 nbuf = strdup (name);
2888 for (p = nbuf; *p; p++)
2889 *p = TOUPPER (*p);
2890 reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, nbuf);
2891 if (reg)
2892 {
2893 str_hash_delete (arm_reg_hsh, nbuf);
2894 free ((char *) reg->name);
2895 free (reg->neon);
2896 free (reg);
2897 }
2898
2899 for (p = nbuf; *p; p++)
2900 *p = TOLOWER (*p);
2901 reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, nbuf);
2902 if (reg)
2903 {
2904 str_hash_delete (arm_reg_hsh, nbuf);
2905 free ((char *) reg->name);
2906 free (reg->neon);
2907 free (reg);
2908 }
2909
2910 free (nbuf);
2911 }
2912 }
2913
2914 *input_line_pointer = saved_char;
2915 demand_empty_rest_of_line ();
2916 }
2917
2918 /* Directives: Instruction set selection. */
2919
2920 #ifdef OBJ_ELF
2921 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2922 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2923 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2924 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2925
2926 /* Create a new mapping symbol for the transition to STATE. */
2927
2928 static void
2929 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2930 {
2931 symbolS * symbolP;
2932 const char * symname;
2933 int type;
2934
2935 switch (state)
2936 {
2937 case MAP_DATA:
2938 symname = "$d";
2939 type = BSF_NO_FLAGS;
2940 break;
2941 case MAP_ARM:
2942 symname = "$a";
2943 type = BSF_NO_FLAGS;
2944 break;
2945 case MAP_THUMB:
2946 symname = "$t";
2947 type = BSF_NO_FLAGS;
2948 break;
2949 default:
2950 abort ();
2951 }
2952
2953 symbolP = symbol_new (symname, now_seg, frag, value);
2954 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2955
2956 switch (state)
2957 {
2958 case MAP_ARM:
2959 THUMB_SET_FUNC (symbolP, 0);
2960 ARM_SET_THUMB (symbolP, 0);
2961 ARM_SET_INTERWORK (symbolP, support_interwork);
2962 break;
2963
2964 case MAP_THUMB:
2965 THUMB_SET_FUNC (symbolP, 1);
2966 ARM_SET_THUMB (symbolP, 1);
2967 ARM_SET_INTERWORK (symbolP, support_interwork);
2968 break;
2969
2970 case MAP_DATA:
2971 default:
2972 break;
2973 }
2974
2975 /* Save the mapping symbols for future reference. Also check that
2976 we do not place two mapping symbols at the same offset within a
2977 frag. We'll handle overlap between frags in
2978 check_mapping_symbols.
2979
2980 If .fill or other data filling directive generates zero sized data,
2981 the mapping symbol for the following code will have the same value
2982 as the one generated for the data filling directive. In this case,
2983 we replace the old symbol with the new one at the same address. */
2984 if (value == 0)
2985 {
2986 if (frag->tc_frag_data.first_map != NULL)
2987 {
2988 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2989 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2990 }
2991 frag->tc_frag_data.first_map = symbolP;
2992 }
2993 if (frag->tc_frag_data.last_map != NULL)
2994 {
2995 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2996 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2997 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2998 }
2999 frag->tc_frag_data.last_map = symbolP;
3000 }
3001
3002 /* We must sometimes convert a region marked as code to data during
3003 code alignment, if an odd number of bytes have to be padded. The
3004 code mapping symbol is pushed to an aligned address. */
3005
3006 static void
3007 insert_data_mapping_symbol (enum mstate state,
3008 valueT value, fragS *frag, offsetT bytes)
3009 {
3010 /* If there was already a mapping symbol, remove it. */
3011 if (frag->tc_frag_data.last_map != NULL
3012 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
3013 {
3014 symbolS *symp = frag->tc_frag_data.last_map;
3015
3016 if (value == 0)
3017 {
3018 know (frag->tc_frag_data.first_map == symp);
3019 frag->tc_frag_data.first_map = NULL;
3020 }
3021 frag->tc_frag_data.last_map = NULL;
3022 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
3023 }
3024
3025 make_mapping_symbol (MAP_DATA, value, frag);
3026 make_mapping_symbol (state, value + bytes, frag);
3027 }
3028
3029 static void mapping_state_2 (enum mstate state, int max_chars);
3030
3031 /* Set the mapping state to STATE. Only call this when about to
3032 emit some STATE bytes to the file. */
3033
3034 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
3035 void
3036 mapping_state (enum mstate state)
3037 {
3038 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
3039
3040 if (mapstate == state)
3041 /* The mapping symbol has already been emitted.
3042 There is nothing else to do. */
3043 return;
3044
3045 if (state == MAP_ARM || state == MAP_THUMB)
3046 /* PR gas/12931
3047 All ARM instructions require 4-byte alignment.
3048 (Almost) all Thumb instructions require 2-byte alignment.
3049
3050 When emitting instructions into any section, mark the section
3051 appropriately.
3052
3053 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
3054 but themselves require 2-byte alignment; this applies to some
3055 PC- relative forms. However, these cases will involve implicit
3056 literal pool generation or an explicit .align >=2, both of
3057 which will cause the section to me marked with sufficient
3058 alignment. Thus, we don't handle those cases here. */
3059 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
3060
3061 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
3062 /* This case will be evaluated later. */
3063 return;
3064
3065 mapping_state_2 (state, 0);
3066 }
3067
3068 /* Same as mapping_state, but MAX_CHARS bytes have already been
3069 allocated. Put the mapping symbol that far back. */
3070
3071 static void
3072 mapping_state_2 (enum mstate state, int max_chars)
3073 {
3074 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
3075
3076 if (!SEG_NORMAL (now_seg))
3077 return;
3078
3079 if (mapstate == state)
3080 /* The mapping symbol has already been emitted.
3081 There is nothing else to do. */
3082 return;
3083
3084 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
3085 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
3086 {
3087 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
3088 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
3089
3090 if (add_symbol)
3091 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
3092 }
3093
3094 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
3095 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
3096 }
3097 #undef TRANSITION
3098 #else
3099 #define mapping_state(x) ((void)0)
3100 #define mapping_state_2(x, y) ((void)0)
3101 #endif
3102
3103 /* Find the real, Thumb encoded start of a Thumb function. */
3104
3105 #ifdef OBJ_COFF
3106 static symbolS *
3107 find_real_start (symbolS * symbolP)
3108 {
3109 char * real_start;
3110 const char * name = S_GET_NAME (symbolP);
3111 symbolS * new_target;
3112
3113 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
3114 #define STUB_NAME ".real_start_of"
3115
3116 if (name == NULL)
3117 abort ();
3118
3119 /* The compiler may generate BL instructions to local labels because
3120 it needs to perform a branch to a far away location. These labels
3121 do not have a corresponding ".real_start_of" label. We check
3122 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
3123 the ".real_start_of" convention for nonlocal branches. */
3124 if (S_IS_LOCAL (symbolP) || name[0] == '.')
3125 return symbolP;
3126
3127 real_start = concat (STUB_NAME, name, NULL);
3128 new_target = symbol_find (real_start);
3129 free (real_start);
3130
3131 if (new_target == NULL)
3132 {
3133 as_warn (_("Failed to find real start of function: %s\n"), name);
3134 new_target = symbolP;
3135 }
3136
3137 return new_target;
3138 }
3139 #endif
3140
3141 static void
3142 opcode_select (int width)
3143 {
3144 switch (width)
3145 {
3146 case 16:
3147 if (! thumb_mode)
3148 {
3149 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
3150 as_bad (_("selected processor does not support THUMB opcodes"));
3151
3152 thumb_mode = 1;
3153 /* No need to force the alignment, since we will have been
3154 coming from ARM mode, which is word-aligned. */
3155 record_alignment (now_seg, 1);
3156 }
3157 break;
3158
3159 case 32:
3160 if (thumb_mode)
3161 {
3162 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
3163 as_bad (_("selected processor does not support ARM opcodes"));
3164
3165 thumb_mode = 0;
3166
3167 if (!need_pass_2)
3168 frag_align (2, 0, 0);
3169
3170 record_alignment (now_seg, 1);
3171 }
3172 break;
3173
3174 default:
3175 as_bad (_("invalid instruction size selected (%d)"), width);
3176 }
3177 }
3178
3179 static void
3180 s_arm (int ignore ATTRIBUTE_UNUSED)
3181 {
3182 opcode_select (32);
3183 demand_empty_rest_of_line ();
3184 }
3185
3186 static void
3187 s_thumb (int ignore ATTRIBUTE_UNUSED)
3188 {
3189 opcode_select (16);
3190 demand_empty_rest_of_line ();
3191 }
3192
3193 static void
3194 s_code (int unused ATTRIBUTE_UNUSED)
3195 {
3196 int temp;
3197
3198 temp = get_absolute_expression ();
3199 switch (temp)
3200 {
3201 case 16:
3202 case 32:
3203 opcode_select (temp);
3204 break;
3205
3206 default:
3207 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
3208 }
3209 }
3210
3211 static void
3212 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
3213 {
3214 /* If we are not already in thumb mode go into it, EVEN if
3215 the target processor does not support thumb instructions.
3216 This is used by gcc/config/arm/lib1funcs.asm for example
3217 to compile interworking support functions even if the
3218 target processor should not support interworking. */
3219 if (! thumb_mode)
3220 {
3221 thumb_mode = 2;
3222 record_alignment (now_seg, 1);
3223 }
3224
3225 demand_empty_rest_of_line ();
3226 }
3227
3228 static void
3229 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
3230 {
3231 s_thumb (0);
3232
3233 /* The following label is the name/address of the start of a Thumb function.
3234 We need to know this for the interworking support. */
3235 label_is_thumb_function_name = true;
3236 }
3237
3238 /* Perform a .set directive, but also mark the alias as
3239 being a thumb function. */
3240
3241 static void
3242 s_thumb_set (int equiv)
3243 {
3244 /* XXX the following is a duplicate of the code for s_set() in read.c
3245 We cannot just call that code as we need to get at the symbol that
3246 is created. */
3247 char * name;
3248 char delim;
3249 char * end_name;
3250 symbolS * symbolP;
3251
3252 /* Especial apologies for the random logic:
3253 This just grew, and could be parsed much more simply!
3254 Dean - in haste. */
3255 delim = get_symbol_name (& name);
3256 end_name = input_line_pointer;
3257 (void) restore_line_pointer (delim);
3258
3259 if (*input_line_pointer != ',')
3260 {
3261 *end_name = 0;
3262 as_bad (_("expected comma after name \"%s\""), name);
3263 *end_name = delim;
3264 ignore_rest_of_line ();
3265 return;
3266 }
3267
3268 input_line_pointer++;
3269 *end_name = 0;
3270
3271 if (name[0] == '.' && name[1] == '\0')
3272 {
3273 /* XXX - this should not happen to .thumb_set. */
3274 abort ();
3275 }
3276
3277 if ((symbolP = symbol_find (name)) == NULL
3278 && (symbolP = md_undefined_symbol (name)) == NULL)
3279 {
3280 #ifndef NO_LISTING
3281 /* When doing symbol listings, play games with dummy fragments living
3282 outside the normal fragment chain to record the file and line info
3283 for this symbol. */
3284 if (listing & LISTING_SYMBOLS)
3285 {
3286 extern struct list_info_struct * listing_tail;
3287 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
3288
3289 memset (dummy_frag, 0, sizeof (fragS));
3290 dummy_frag->fr_type = rs_fill;
3291 dummy_frag->line = listing_tail;
3292 symbolP = symbol_new (name, undefined_section, dummy_frag, 0);
3293 dummy_frag->fr_symbol = symbolP;
3294 }
3295 else
3296 #endif
3297 symbolP = symbol_new (name, undefined_section, &zero_address_frag, 0);
3298
3299 #ifdef OBJ_COFF
3300 /* "set" symbols are local unless otherwise specified. */
3301 SF_SET_LOCAL (symbolP);
3302 #endif /* OBJ_COFF */
3303 } /* Make a new symbol. */
3304
3305 symbol_table_insert (symbolP);
3306
3307 * end_name = delim;
3308
3309 if (equiv
3310 && S_IS_DEFINED (symbolP)
3311 && S_GET_SEGMENT (symbolP) != reg_section)
3312 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3313
3314 pseudo_set (symbolP);
3315
3316 demand_empty_rest_of_line ();
3317
3318 /* XXX Now we come to the Thumb specific bit of code. */
3319
3320 THUMB_SET_FUNC (symbolP, 1);
3321 ARM_SET_THUMB (symbolP, 1);
3322 #if defined OBJ_ELF || defined OBJ_COFF
3323 ARM_SET_INTERWORK (symbolP, support_interwork);
3324 #endif
3325 }
3326
3327 /* Directives: Mode selection. */
3328
3329 /* .syntax [unified|divided] - choose the new unified syntax
3330 (same for Arm and Thumb encoding, modulo slight differences in what
3331 can be represented) or the old divergent syntax for each mode. */
3332 static void
3333 s_syntax (int unused ATTRIBUTE_UNUSED)
3334 {
3335 char *name, delim;
3336
3337 delim = get_symbol_name (& name);
3338
3339 if (!strcasecmp (name, "unified"))
3340 unified_syntax = true;
3341 else if (!strcasecmp (name, "divided"))
3342 unified_syntax = false;
3343 else
3344 {
3345 as_bad (_("unrecognized syntax mode \"%s\""), name);
3346 return;
3347 }
3348 (void) restore_line_pointer (delim);
3349 demand_empty_rest_of_line ();
3350 }
3351
3352 /* Directives: sectioning and alignment. */
3353
3354 static void
3355 s_bss (int ignore ATTRIBUTE_UNUSED)
3356 {
3357 /* We don't support putting frags in the BSS segment, we fake it by
3358 marking in_bss, then looking at s_skip for clues. */
3359 subseg_set (bss_section, 0);
3360 demand_empty_rest_of_line ();
3361
3362 #ifdef md_elf_section_change_hook
3363 md_elf_section_change_hook ();
3364 #endif
3365 }
3366
3367 static void
3368 s_even (int ignore ATTRIBUTE_UNUSED)
3369 {
3370 /* Never make frag if expect extra pass. */
3371 if (!need_pass_2)
3372 frag_align (1, 0, 0);
3373
3374 record_alignment (now_seg, 1);
3375
3376 demand_empty_rest_of_line ();
3377 }
3378
3379 /* Directives: CodeComposer Studio. */
3380
3381 /* .ref (for CodeComposer Studio syntax only). */
3382 static void
3383 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3384 {
3385 if (codecomposer_syntax)
3386 ignore_rest_of_line ();
3387 else
3388 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3389 }
3390
3391 /* If name is not NULL, then it is used for marking the beginning of a
3392 function, whereas if it is NULL then it means the function end. */
3393 static void
3394 asmfunc_debug (const char * name)
3395 {
3396 static const char * last_name = NULL;
3397
3398 if (name != NULL)
3399 {
3400 gas_assert (last_name == NULL);
3401 last_name = name;
3402
3403 if (debug_type == DEBUG_STABS)
3404 stabs_generate_asm_func (name, name);
3405 }
3406 else
3407 {
3408 gas_assert (last_name != NULL);
3409
3410 if (debug_type == DEBUG_STABS)
3411 stabs_generate_asm_endfunc (last_name, last_name);
3412
3413 last_name = NULL;
3414 }
3415 }
3416
3417 static void
3418 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3419 {
3420 if (codecomposer_syntax)
3421 {
3422 switch (asmfunc_state)
3423 {
3424 case OUTSIDE_ASMFUNC:
3425 asmfunc_state = WAITING_ASMFUNC_NAME;
3426 break;
3427
3428 case WAITING_ASMFUNC_NAME:
3429 as_bad (_(".asmfunc repeated."));
3430 break;
3431
3432 case WAITING_ENDASMFUNC:
3433 as_bad (_(".asmfunc without function."));
3434 break;
3435 }
3436 demand_empty_rest_of_line ();
3437 }
3438 else
3439 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3440 }
3441
3442 static void
3443 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3444 {
3445 if (codecomposer_syntax)
3446 {
3447 switch (asmfunc_state)
3448 {
3449 case OUTSIDE_ASMFUNC:
3450 as_bad (_(".endasmfunc without a .asmfunc."));
3451 break;
3452
3453 case WAITING_ASMFUNC_NAME:
3454 as_bad (_(".endasmfunc without function."));
3455 break;
3456
3457 case WAITING_ENDASMFUNC:
3458 asmfunc_state = OUTSIDE_ASMFUNC;
3459 asmfunc_debug (NULL);
3460 break;
3461 }
3462 demand_empty_rest_of_line ();
3463 }
3464 else
3465 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3466 }
3467
3468 static void
3469 s_ccs_def (int name)
3470 {
3471 if (codecomposer_syntax)
3472 s_globl (name);
3473 else
3474 as_bad (_(".def pseudo-op only available with -mccs flag."));
3475 }
3476
3477 /* Directives: Literal pools. */
3478
3479 static literal_pool *
3480 find_literal_pool (void)
3481 {
3482 literal_pool * pool;
3483
3484 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3485 {
3486 if (pool->section == now_seg
3487 && pool->sub_section == now_subseg)
3488 break;
3489 }
3490
3491 return pool;
3492 }
3493
3494 static literal_pool *
3495 find_or_make_literal_pool (void)
3496 {
3497 /* Next literal pool ID number. */
3498 static unsigned int latest_pool_num = 1;
3499 literal_pool * pool;
3500
3501 pool = find_literal_pool ();
3502
3503 if (pool == NULL)
3504 {
3505 /* Create a new pool. */
3506 pool = XNEW (literal_pool);
3507 if (! pool)
3508 return NULL;
3509
3510 pool->next_free_entry = 0;
3511 pool->section = now_seg;
3512 pool->sub_section = now_subseg;
3513 pool->next = list_of_pools;
3514 pool->symbol = NULL;
3515 pool->alignment = 2;
3516
3517 /* Add it to the list. */
3518 list_of_pools = pool;
3519 }
3520
3521 /* New pools, and emptied pools, will have a NULL symbol. */
3522 if (pool->symbol == NULL)
3523 {
3524 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3525 &zero_address_frag, 0);
3526 pool->id = latest_pool_num ++;
3527 }
3528
3529 /* Done. */
3530 return pool;
3531 }
3532
3533 /* Add the literal in the global 'inst'
3534 structure to the relevant literal pool. */
3535
3536 static int
3537 add_to_lit_pool (unsigned int nbytes)
3538 {
3539 #define PADDING_SLOT 0x1
3540 #define LIT_ENTRY_SIZE_MASK 0xFF
3541 literal_pool * pool;
3542 unsigned int entry, pool_size = 0;
3543 bool padding_slot_p = false;
3544 unsigned imm1 = 0;
3545 unsigned imm2 = 0;
3546
3547 if (nbytes == 8)
3548 {
3549 imm1 = inst.operands[1].imm;
3550 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3551 : inst.relocs[0].exp.X_unsigned ? 0
3552 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3553 if (target_big_endian)
3554 {
3555 imm1 = imm2;
3556 imm2 = inst.operands[1].imm;
3557 }
3558 }
3559
3560 pool = find_or_make_literal_pool ();
3561
3562 /* Check if this literal value is already in the pool. */
3563 for (entry = 0; entry < pool->next_free_entry; entry ++)
3564 {
3565 if (nbytes == 4)
3566 {
3567 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3568 && (inst.relocs[0].exp.X_op == O_constant)
3569 && (pool->literals[entry].X_add_number
3570 == inst.relocs[0].exp.X_add_number)
3571 && (pool->literals[entry].X_md == nbytes)
3572 && (pool->literals[entry].X_unsigned
3573 == inst.relocs[0].exp.X_unsigned))
3574 break;
3575
3576 if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3577 && (inst.relocs[0].exp.X_op == O_symbol)
3578 && (pool->literals[entry].X_add_number
3579 == inst.relocs[0].exp.X_add_number)
3580 && (pool->literals[entry].X_add_symbol
3581 == inst.relocs[0].exp.X_add_symbol)
3582 && (pool->literals[entry].X_op_symbol
3583 == inst.relocs[0].exp.X_op_symbol)
3584 && (pool->literals[entry].X_md == nbytes))
3585 break;
3586 }
3587 else if ((nbytes == 8)
3588 && !(pool_size & 0x7)
3589 && ((entry + 1) != pool->next_free_entry)
3590 && (pool->literals[entry].X_op == O_constant)
3591 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3592 && (pool->literals[entry].X_unsigned
3593 == inst.relocs[0].exp.X_unsigned)
3594 && (pool->literals[entry + 1].X_op == O_constant)
3595 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3596 && (pool->literals[entry + 1].X_unsigned
3597 == inst.relocs[0].exp.X_unsigned))
3598 break;
3599
3600 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3601 if (padding_slot_p && (nbytes == 4))
3602 break;
3603
3604 pool_size += 4;
3605 }
3606
3607 /* Do we need to create a new entry? */
3608 if (entry == pool->next_free_entry)
3609 {
3610 if (entry >= MAX_LITERAL_POOL_SIZE)
3611 {
3612 inst.error = _("literal pool overflow");
3613 return FAIL;
3614 }
3615
3616 if (nbytes == 8)
3617 {
3618 /* For 8-byte entries, we align to an 8-byte boundary,
3619 and split it into two 4-byte entries, because on 32-bit
3620 host, 8-byte constants are treated as big num, thus
3621 saved in "generic_bignum" which will be overwritten
3622 by later assignments.
3623
3624 We also need to make sure there is enough space for
3625 the split.
3626
3627 We also check to make sure the literal operand is a
3628 constant number. */
3629 if (!(inst.relocs[0].exp.X_op == O_constant
3630 || inst.relocs[0].exp.X_op == O_big))
3631 {
3632 inst.error = _("invalid type for literal pool");
3633 return FAIL;
3634 }
3635 else if (pool_size & 0x7)
3636 {
3637 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3638 {
3639 inst.error = _("literal pool overflow");
3640 return FAIL;
3641 }
3642
3643 pool->literals[entry] = inst.relocs[0].exp;
3644 pool->literals[entry].X_op = O_constant;
3645 pool->literals[entry].X_add_number = 0;
3646 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3647 pool->next_free_entry += 1;
3648 pool_size += 4;
3649 }
3650 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3651 {
3652 inst.error = _("literal pool overflow");
3653 return FAIL;
3654 }
3655
3656 pool->literals[entry] = inst.relocs[0].exp;
3657 pool->literals[entry].X_op = O_constant;
3658 pool->literals[entry].X_add_number = imm1;
3659 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3660 pool->literals[entry++].X_md = 4;
3661 pool->literals[entry] = inst.relocs[0].exp;
3662 pool->literals[entry].X_op = O_constant;
3663 pool->literals[entry].X_add_number = imm2;
3664 pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3665 pool->literals[entry].X_md = 4;
3666 pool->alignment = 3;
3667 pool->next_free_entry += 1;
3668 }
3669 else
3670 {
3671 pool->literals[entry] = inst.relocs[0].exp;
3672 pool->literals[entry].X_md = 4;
3673 }
3674
3675 #ifdef OBJ_ELF
3676 /* PR ld/12974: Record the location of the first source line to reference
3677 this entry in the literal pool. If it turns out during linking that the
3678 symbol does not exist we will be able to give an accurate line number for
3679 the (first use of the) missing reference. */
3680 if (debug_type == DEBUG_DWARF2)
3681 dwarf2_where (pool->locs + entry);
3682 #endif
3683 pool->next_free_entry += 1;
3684 }
3685 else if (padding_slot_p)
3686 {
3687 pool->literals[entry] = inst.relocs[0].exp;
3688 pool->literals[entry].X_md = nbytes;
3689 }
3690
3691 inst.relocs[0].exp.X_op = O_symbol;
3692 inst.relocs[0].exp.X_add_number = pool_size;
3693 inst.relocs[0].exp.X_add_symbol = pool->symbol;
3694
3695 return SUCCESS;
3696 }
3697
3698 bool
3699 tc_start_label_without_colon (void)
3700 {
3701 bool ret = true;
3702
3703 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3704 {
3705 const char *label = input_line_pointer;
3706
3707 while (!is_end_of_line[(int) label[-1]])
3708 --label;
3709
3710 if (*label == '.')
3711 {
3712 as_bad (_("Invalid label '%s'"), label);
3713 ret = false;
3714 }
3715
3716 asmfunc_debug (label);
3717
3718 asmfunc_state = WAITING_ENDASMFUNC;
3719 }
3720
3721 return ret;
3722 }
3723
3724 /* Can't use symbol_new here, so have to create a symbol and then at
3725 a later date assign it a value. That's what these functions do. */
3726
3727 static void
3728 symbol_locate (symbolS * symbolP,
3729 const char * name, /* It is copied, the caller can modify. */
3730 segT segment, /* Segment identifier (SEG_<something>). */
3731 valueT valu, /* Symbol value. */
3732 fragS * frag) /* Associated fragment. */
3733 {
3734 size_t name_length;
3735 char * preserved_copy_of_name;
3736
3737 name_length = strlen (name) + 1; /* +1 for \0. */
3738 obstack_grow (&notes, name, name_length);
3739 preserved_copy_of_name = (char *) obstack_finish (&notes);
3740
3741 #ifdef tc_canonicalize_symbol_name
3742 preserved_copy_of_name =
3743 tc_canonicalize_symbol_name (preserved_copy_of_name);
3744 #endif
3745
3746 S_SET_NAME (symbolP, preserved_copy_of_name);
3747
3748 S_SET_SEGMENT (symbolP, segment);
3749 S_SET_VALUE (symbolP, valu);
3750 symbol_clear_list_pointers (symbolP);
3751
3752 symbol_set_frag (symbolP, frag);
3753
3754 /* Link to end of symbol chain. */
3755 {
3756 extern int symbol_table_frozen;
3757
3758 if (symbol_table_frozen)
3759 abort ();
3760 }
3761
3762 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3763
3764 obj_symbol_new_hook (symbolP);
3765
3766 #ifdef tc_symbol_new_hook
3767 tc_symbol_new_hook (symbolP);
3768 #endif
3769
3770 #ifdef DEBUG_SYMS
3771 verify_symbol_chain (symbol_rootP, symbol_lastP);
3772 #endif /* DEBUG_SYMS */
3773 }
3774
3775 static void
3776 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3777 {
3778 unsigned int entry;
3779 literal_pool * pool;
3780 char sym_name[20];
3781
3782 pool = find_literal_pool ();
3783 if (pool == NULL
3784 || pool->symbol == NULL
3785 || pool->next_free_entry == 0)
3786 return;
3787
3788 /* Align pool as you have word accesses.
3789 Only make a frag if we have to. */
3790 if (!need_pass_2)
3791 frag_align (pool->alignment, 0, 0);
3792
3793 record_alignment (now_seg, 2);
3794
3795 #ifdef OBJ_ELF
3796 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3797 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3798 #endif
3799 sprintf (sym_name, "$$lit_\002%x", pool->id);
3800
3801 symbol_locate (pool->symbol, sym_name, now_seg,
3802 (valueT) frag_now_fix (), frag_now);
3803 symbol_table_insert (pool->symbol);
3804
3805 ARM_SET_THUMB (pool->symbol, thumb_mode);
3806
3807 #if defined OBJ_COFF || defined OBJ_ELF
3808 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3809 #endif
3810
3811 for (entry = 0; entry < pool->next_free_entry; entry ++)
3812 {
3813 #ifdef OBJ_ELF
3814 if (debug_type == DEBUG_DWARF2)
3815 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3816 #endif
3817 /* First output the expression in the instruction to the pool. */
3818 emit_expr (&(pool->literals[entry]),
3819 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3820 }
3821
3822 /* Mark the pool as empty. */
3823 pool->next_free_entry = 0;
3824 pool->symbol = NULL;
3825 }
3826
3827 #ifdef OBJ_ELF
3828 /* Forward declarations for functions below, in the MD interface
3829 section. */
3830 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3831 static valueT create_unwind_entry (int);
3832 static void start_unwind_section (const segT, int);
3833 static void add_unwind_opcode (valueT, int);
3834 static void flush_pending_unwind (void);
3835
3836 /* Directives: Data. */
3837
3838 static void
3839 s_arm_elf_cons (int nbytes)
3840 {
3841 expressionS exp;
3842
3843 #ifdef md_flush_pending_output
3844 md_flush_pending_output ();
3845 #endif
3846
3847 if (is_it_end_of_statement ())
3848 {
3849 demand_empty_rest_of_line ();
3850 return;
3851 }
3852
3853 #ifdef md_cons_align
3854 md_cons_align (nbytes);
3855 #endif
3856
3857 mapping_state (MAP_DATA);
3858 do
3859 {
3860 int reloc;
3861 char *base = input_line_pointer;
3862
3863 expression (& exp);
3864
3865 if (exp.X_op != O_symbol)
3866 emit_expr (&exp, (unsigned int) nbytes);
3867 else
3868 {
3869 char *before_reloc = input_line_pointer;
3870 reloc = parse_reloc (&input_line_pointer);
3871 if (reloc == -1)
3872 {
3873 as_bad (_("unrecognized relocation suffix"));
3874 ignore_rest_of_line ();
3875 return;
3876 }
3877 else if (reloc == BFD_RELOC_UNUSED)
3878 emit_expr (&exp, (unsigned int) nbytes);
3879 else
3880 {
3881 reloc_howto_type *howto = (reloc_howto_type *)
3882 bfd_reloc_type_lookup (stdoutput,
3883 (bfd_reloc_code_real_type) reloc);
3884 int size = bfd_get_reloc_size (howto);
3885
3886 if (reloc == BFD_RELOC_ARM_PLT32)
3887 {
3888 as_bad (_("(plt) is only valid on branch targets"));
3889 reloc = BFD_RELOC_UNUSED;
3890 size = 0;
3891 }
3892
3893 if (size > nbytes)
3894 as_bad (ngettext ("%s relocations do not fit in %d byte",
3895 "%s relocations do not fit in %d bytes",
3896 nbytes),
3897 howto->name, nbytes);
3898 else
3899 {
3900 /* We've parsed an expression stopping at O_symbol.
3901 But there may be more expression left now that we
3902 have parsed the relocation marker. Parse it again.
3903 XXX Surely there is a cleaner way to do this. */
3904 char *p = input_line_pointer;
3905 int offset;
3906 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3907
3908 memcpy (save_buf, base, input_line_pointer - base);
3909 memmove (base + (input_line_pointer - before_reloc),
3910 base, before_reloc - base);
3911
3912 input_line_pointer = base + (input_line_pointer-before_reloc);
3913 expression (&exp);
3914 memcpy (base, save_buf, p - base);
3915
3916 offset = nbytes - size;
3917 p = frag_more (nbytes);
3918 memset (p, 0, nbytes);
3919 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3920 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3921 free (save_buf);
3922 }
3923 }
3924 }
3925 }
3926 while (*input_line_pointer++ == ',');
3927
3928 /* Put terminator back into stream. */
3929 input_line_pointer --;
3930 demand_empty_rest_of_line ();
3931 }
3932
3933 /* Emit an expression containing a 32-bit thumb instruction.
3934 Implementation based on put_thumb32_insn. */
3935
3936 static void
3937 emit_thumb32_expr (expressionS * exp)
3938 {
3939 expressionS exp_high = *exp;
3940
3941 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3942 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3943 exp->X_add_number &= 0xffff;
3944 emit_expr (exp, (unsigned int) THUMB_SIZE);
3945 }
3946
3947 /* Guess the instruction size based on the opcode. */
3948
3949 static int
3950 thumb_insn_size (int opcode)
3951 {
3952 if ((unsigned int) opcode < 0xe800u)
3953 return 2;
3954 else if ((unsigned int) opcode >= 0xe8000000u)
3955 return 4;
3956 else
3957 return 0;
3958 }
3959
3960 static bool
3961 emit_insn (expressionS *exp, int nbytes)
3962 {
3963 int size = 0;
3964
3965 if (exp->X_op == O_constant)
3966 {
3967 size = nbytes;
3968
3969 if (size == 0)
3970 size = thumb_insn_size (exp->X_add_number);
3971
3972 if (size != 0)
3973 {
3974 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3975 {
3976 as_bad (_(".inst.n operand too big. "\
3977 "Use .inst.w instead"));
3978 size = 0;
3979 }
3980 else
3981 {
3982 if (now_pred.state == AUTOMATIC_PRED_BLOCK)
3983 set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN, 0);
3984 else
3985 set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3986
3987 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3988 emit_thumb32_expr (exp);
3989 else
3990 emit_expr (exp, (unsigned int) size);
3991
3992 it_fsm_post_encode ();
3993 }
3994 }
3995 else
3996 as_bad (_("cannot determine Thumb instruction size. " \
3997 "Use .inst.n/.inst.w instead"));
3998 }
3999 else
4000 as_bad (_("constant expression required"));
4001
4002 return (size != 0);
4003 }
4004
4005 /* Like s_arm_elf_cons but do not use md_cons_align and
4006 set the mapping state to MAP_ARM/MAP_THUMB. */
4007
4008 static void
4009 s_arm_elf_inst (int nbytes)
4010 {
4011 if (is_it_end_of_statement ())
4012 {
4013 demand_empty_rest_of_line ();
4014 return;
4015 }
4016
4017 /* Calling mapping_state () here will not change ARM/THUMB,
4018 but will ensure not to be in DATA state. */
4019
4020 if (thumb_mode)
4021 mapping_state (MAP_THUMB);
4022 else
4023 {
4024 if (nbytes != 0)
4025 {
4026 as_bad (_("width suffixes are invalid in ARM mode"));
4027 ignore_rest_of_line ();
4028 return;
4029 }
4030
4031 nbytes = 4;
4032
4033 mapping_state (MAP_ARM);
4034 }
4035
4036 do
4037 {
4038 expressionS exp;
4039
4040 expression (& exp);
4041
4042 if (! emit_insn (& exp, nbytes))
4043 {
4044 ignore_rest_of_line ();
4045 return;
4046 }
4047 }
4048 while (*input_line_pointer++ == ',');
4049
4050 /* Put terminator back into stream. */
4051 input_line_pointer --;
4052 demand_empty_rest_of_line ();
4053 }
4054
4055 /* Parse a .rel31 directive. */
4056
4057 static void
4058 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
4059 {
4060 expressionS exp;
4061 char *p;
4062 valueT highbit;
4063
4064 highbit = 0;
4065 if (*input_line_pointer == '1')
4066 highbit = 0x80000000;
4067 else if (*input_line_pointer != '0')
4068 as_bad (_("expected 0 or 1"));
4069
4070 input_line_pointer++;
4071 if (*input_line_pointer != ',')
4072 as_bad (_("missing comma"));
4073 input_line_pointer++;
4074
4075 #ifdef md_flush_pending_output
4076 md_flush_pending_output ();
4077 #endif
4078
4079 #ifdef md_cons_align
4080 md_cons_align (4);
4081 #endif
4082
4083 mapping_state (MAP_DATA);
4084
4085 expression (&exp);
4086
4087 p = frag_more (4);
4088 md_number_to_chars (p, highbit, 4);
4089 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
4090 BFD_RELOC_ARM_PREL31);
4091
4092 demand_empty_rest_of_line ();
4093 }
4094
4095 /* Directives: AEABI stack-unwind tables. */
4096
4097 /* Parse an unwind_fnstart directive. Simply records the current location. */
4098
4099 static void
4100 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
4101 {
4102 demand_empty_rest_of_line ();
4103 if (unwind.proc_start)
4104 {
4105 as_bad (_("duplicate .fnstart directive"));
4106 return;
4107 }
4108
4109 /* Mark the start of the function. */
4110 unwind.proc_start = expr_build_dot ();
4111
4112 /* Reset the rest of the unwind info. */
4113 unwind.opcode_count = 0;
4114 unwind.table_entry = NULL;
4115 unwind.personality_routine = NULL;
4116 unwind.personality_index = -1;
4117 unwind.frame_size = 0;
4118 unwind.fp_offset = 0;
4119 unwind.fp_reg = REG_SP;
4120 unwind.fp_used = 0;
4121 unwind.sp_restored = 0;
4122 }
4123
4124
4125 /* Parse a handlerdata directive. Creates the exception handling table entry
4126 for the function. */
4127
4128 static void
4129 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
4130 {
4131 demand_empty_rest_of_line ();
4132 if (!unwind.proc_start)
4133 as_bad (MISSING_FNSTART);
4134
4135 if (unwind.table_entry)
4136 as_bad (_("duplicate .handlerdata directive"));
4137
4138 create_unwind_entry (1);
4139 }
4140
4141 /* Parse an unwind_fnend directive. Generates the index table entry. */
4142
4143 static void
4144 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
4145 {
4146 long where;
4147 char *ptr;
4148 valueT val;
4149 unsigned int marked_pr_dependency;
4150
4151 demand_empty_rest_of_line ();
4152
4153 if (!unwind.proc_start)
4154 {
4155 as_bad (_(".fnend directive without .fnstart"));
4156 return;
4157 }
4158
4159 /* Add eh table entry. */
4160 if (unwind.table_entry == NULL)
4161 val = create_unwind_entry (0);
4162 else
4163 val = 0;
4164
4165 /* Add index table entry. This is two words. */
4166 start_unwind_section (unwind.saved_seg, 1);
4167 frag_align (2, 0, 0);
4168 record_alignment (now_seg, 2);
4169
4170 ptr = frag_more (8);
4171 memset (ptr, 0, 8);
4172 where = frag_now_fix () - 8;
4173
4174 /* Self relative offset of the function start. */
4175 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
4176 BFD_RELOC_ARM_PREL31);
4177
4178 /* Indicate dependency on EHABI-defined personality routines to the
4179 linker, if it hasn't been done already. */
4180 marked_pr_dependency
4181 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
4182 if (unwind.personality_index >= 0 && unwind.personality_index < 3
4183 && !(marked_pr_dependency & (1 << unwind.personality_index)))
4184 {
4185 static const char *const name[] =
4186 {
4187 "__aeabi_unwind_cpp_pr0",
4188 "__aeabi_unwind_cpp_pr1",
4189 "__aeabi_unwind_cpp_pr2"
4190 };
4191 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
4192 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
4193 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
4194 |= 1 << unwind.personality_index;
4195 }
4196
4197 if (val)
4198 /* Inline exception table entry. */
4199 md_number_to_chars (ptr + 4, val, 4);
4200 else
4201 /* Self relative offset of the table entry. */
4202 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
4203 BFD_RELOC_ARM_PREL31);
4204
4205 /* Restore the original section. */
4206 subseg_set (unwind.saved_seg, unwind.saved_subseg);
4207
4208 unwind.proc_start = NULL;
4209 }
4210
4211
4212 /* Parse an unwind_cantunwind directive. */
4213
4214 static void
4215 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
4216 {
4217 demand_empty_rest_of_line ();
4218 if (!unwind.proc_start)
4219 as_bad (MISSING_FNSTART);
4220
4221 if (unwind.personality_routine || unwind.personality_index != -1)
4222 as_bad (_("personality routine specified for cantunwind frame"));
4223
4224 unwind.personality_index = -2;
4225 }
4226
4227
4228 /* Parse a personalityindex directive. */
4229
4230 static void
4231 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
4232 {
4233 expressionS exp;
4234
4235 if (!unwind.proc_start)
4236 as_bad (MISSING_FNSTART);
4237
4238 if (unwind.personality_routine || unwind.personality_index != -1)
4239 as_bad (_("duplicate .personalityindex directive"));
4240
4241 expression (&exp);
4242
4243 if (exp.X_op != O_constant
4244 || exp.X_add_number < 0 || exp.X_add_number > 15)
4245 {
4246 as_bad (_("bad personality routine number"));
4247 ignore_rest_of_line ();
4248 return;
4249 }
4250
4251 unwind.personality_index = exp.X_add_number;
4252
4253 demand_empty_rest_of_line ();
4254 }
4255
4256
4257 /* Parse a personality directive. */
4258
4259 static void
4260 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
4261 {
4262 char *name, *p, c;
4263
4264 if (!unwind.proc_start)
4265 as_bad (MISSING_FNSTART);
4266
4267 if (unwind.personality_routine || unwind.personality_index != -1)
4268 as_bad (_("duplicate .personality directive"));
4269
4270 c = get_symbol_name (& name);
4271 p = input_line_pointer;
4272 if (c == '"')
4273 ++ input_line_pointer;
4274 unwind.personality_routine = symbol_find_or_make (name);
4275 *p = c;
4276 demand_empty_rest_of_line ();
4277 }
4278
4279 /* Parse a directive saving pseudo registers. */
4280
4281 static void
4282 s_arm_unwind_save_pseudo (void)
4283 {
4284 valueT op;
4285 long range;
4286
4287 range = parse_reg_list (&input_line_pointer, REGLIST_PSEUDO);
4288 if (range == FAIL)
4289 {
4290 as_bad (_("expected pseudo register list"));
4291 ignore_rest_of_line ();
4292 return;
4293 }
4294
4295 demand_empty_rest_of_line ();
4296
4297 if (range & (1 << 9))
4298 {
4299 /* Opcode for restoring RA_AUTH_CODE. */
4300 op = 0xb4;
4301 add_unwind_opcode (op, 1);
4302 }
4303 }
4304
4305
4306 /* Parse a directive saving core registers. */
4307
4308 static void
4309 s_arm_unwind_save_core (void)
4310 {
4311 valueT op;
4312 long range;
4313 int n;
4314
4315 range = parse_reg_list (&input_line_pointer, REGLIST_RN);
4316 if (range == FAIL)
4317 {
4318 as_bad (_("expected register list"));
4319 ignore_rest_of_line ();
4320 return;
4321 }
4322
4323 demand_empty_rest_of_line ();
4324
4325 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4326 into .unwind_save {..., sp...}. We aren't bothered about the value of
4327 ip because it is clobbered by calls. */
4328 if (unwind.sp_restored && unwind.fp_reg == 12
4329 && (range & 0x3000) == 0x1000)
4330 {
4331 unwind.opcode_count--;
4332 unwind.sp_restored = 0;
4333 range = (range | 0x2000) & ~0x1000;
4334 unwind.pending_offset = 0;
4335 }
4336
4337 /* Pop r4-r15. */
4338 if (range & 0xfff0)
4339 {
4340 /* See if we can use the short opcodes. These pop a block of up to 8
4341 registers starting with r4, plus maybe r14. */
4342 for (n = 0; n < 8; n++)
4343 {
4344 /* Break at the first non-saved register. */
4345 if ((range & (1 << (n + 4))) == 0)
4346 break;
4347 }
4348 /* See if there are any other bits set. */
4349 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4350 {
4351 /* Use the long form. */
4352 op = 0x8000 | ((range >> 4) & 0xfff);
4353 add_unwind_opcode (op, 2);
4354 }
4355 else
4356 {
4357 /* Use the short form. */
4358 if (range & 0x4000)
4359 op = 0xa8; /* Pop r14. */
4360 else
4361 op = 0xa0; /* Do not pop r14. */
4362 op |= (n - 1);
4363 add_unwind_opcode (op, 1);
4364 }
4365 }
4366
4367 /* Pop r0-r3. */
4368 if (range & 0xf)
4369 {
4370 op = 0xb100 | (range & 0xf);
4371 add_unwind_opcode (op, 2);
4372 }
4373
4374 /* Record the number of bytes pushed. */
4375 for (n = 0; n < 16; n++)
4376 {
4377 if (range & (1 << n))
4378 unwind.frame_size += 4;
4379 }
4380 }
4381
4382
4383 /* Parse a directive saving FPA registers. */
4384
4385 static void
4386 s_arm_unwind_save_fpa (int reg)
4387 {
4388 expressionS exp;
4389 int num_regs;
4390 valueT op;
4391
4392 /* Get Number of registers to transfer. */
4393 if (skip_past_comma (&input_line_pointer) != FAIL)
4394 expression (&exp);
4395 else
4396 exp.X_op = O_illegal;
4397
4398 if (exp.X_op != O_constant)
4399 {
4400 as_bad (_("expected , <constant>"));
4401 ignore_rest_of_line ();
4402 return;
4403 }
4404
4405 num_regs = exp.X_add_number;
4406
4407 if (num_regs < 1 || num_regs > 4)
4408 {
4409 as_bad (_("number of registers must be in the range [1:4]"));
4410 ignore_rest_of_line ();
4411 return;
4412 }
4413
4414 demand_empty_rest_of_line ();
4415
4416 if (reg == 4)
4417 {
4418 /* Short form. */
4419 op = 0xb4 | (num_regs - 1);
4420 add_unwind_opcode (op, 1);
4421 }
4422 else
4423 {
4424 /* Long form. */
4425 op = 0xc800 | (reg << 4) | (num_regs - 1);
4426 add_unwind_opcode (op, 2);
4427 }
4428 unwind.frame_size += num_regs * 12;
4429 }
4430
4431
4432 /* Parse a directive saving VFP registers for ARMv6 and above. */
4433
4434 static void
4435 s_arm_unwind_save_vfp_armv6 (void)
4436 {
4437 int count;
4438 unsigned int start;
4439 valueT op;
4440 int num_vfpv3_regs = 0;
4441 int num_regs_below_16;
4442 bool partial_match;
4443
4444 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D,
4445 &partial_match);
4446 if (count == FAIL)
4447 {
4448 as_bad (_("expected register list"));
4449 ignore_rest_of_line ();
4450 return;
4451 }
4452
4453 demand_empty_rest_of_line ();
4454
4455 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4456 than FSTMX/FLDMX-style ones). */
4457
4458 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4459 if (start >= 16)
4460 num_vfpv3_regs = count;
4461 else if (start + count > 16)
4462 num_vfpv3_regs = start + count - 16;
4463
4464 if (num_vfpv3_regs > 0)
4465 {
4466 int start_offset = start > 16 ? start - 16 : 0;
4467 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4468 add_unwind_opcode (op, 2);
4469 }
4470
4471 /* Generate opcode for registers numbered in the range 0 .. 15. */
4472 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4473 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4474 if (num_regs_below_16 > 0)
4475 {
4476 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4477 add_unwind_opcode (op, 2);
4478 }
4479
4480 unwind.frame_size += count * 8;
4481 }
4482
4483
4484 /* Parse a directive saving VFP registers for pre-ARMv6. */
4485
4486 static void
4487 s_arm_unwind_save_vfp (void)
4488 {
4489 int count;
4490 unsigned int reg;
4491 valueT op;
4492 bool partial_match;
4493
4494 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D,
4495 &partial_match);
4496 if (count == FAIL)
4497 {
4498 as_bad (_("expected register list"));
4499 ignore_rest_of_line ();
4500 return;
4501 }
4502
4503 demand_empty_rest_of_line ();
4504
4505 if (reg == 8)
4506 {
4507 /* Short form. */
4508 op = 0xb8 | (count - 1);
4509 add_unwind_opcode (op, 1);
4510 }
4511 else
4512 {
4513 /* Long form. */
4514 op = 0xb300 | (reg << 4) | (count - 1);
4515 add_unwind_opcode (op, 2);
4516 }
4517 unwind.frame_size += count * 8 + 4;
4518 }
4519
4520
4521 /* Parse a directive saving iWMMXt data registers. */
4522
4523 static void
4524 s_arm_unwind_save_mmxwr (void)
4525 {
4526 int reg;
4527 int hi_reg;
4528 int i;
4529 unsigned mask = 0;
4530 valueT op;
4531
4532 if (*input_line_pointer == '{')
4533 input_line_pointer++;
4534
4535 do
4536 {
4537 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4538
4539 if (reg == FAIL)
4540 {
4541 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4542 goto error;
4543 }
4544
4545 if (mask >> reg)
4546 as_tsktsk (_("register list not in ascending order"));
4547 mask |= 1 << reg;
4548
4549 if (*input_line_pointer == '-')
4550 {
4551 input_line_pointer++;
4552 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4553 if (hi_reg == FAIL)
4554 {
4555 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4556 goto error;
4557 }
4558 else if (reg >= hi_reg)
4559 {
4560 as_bad (_("bad register range"));
4561 goto error;
4562 }
4563 for (; reg < hi_reg; reg++)
4564 mask |= 1 << reg;
4565 }
4566 }
4567 while (skip_past_comma (&input_line_pointer) != FAIL);
4568
4569 skip_past_char (&input_line_pointer, '}');
4570
4571 demand_empty_rest_of_line ();
4572
4573 /* Generate any deferred opcodes because we're going to be looking at
4574 the list. */
4575 flush_pending_unwind ();
4576
4577 for (i = 0; i < 16; i++)
4578 {
4579 if (mask & (1 << i))
4580 unwind.frame_size += 8;
4581 }
4582
4583 /* Attempt to combine with a previous opcode. We do this because gcc
4584 likes to output separate unwind directives for a single block of
4585 registers. */
4586 if (unwind.opcode_count > 0)
4587 {
4588 i = unwind.opcodes[unwind.opcode_count - 1];
4589 if ((i & 0xf8) == 0xc0)
4590 {
4591 i &= 7;
4592 /* Only merge if the blocks are contiguous. */
4593 if (i < 6)
4594 {
4595 if ((mask & 0xfe00) == (1 << 9))
4596 {
4597 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4598 unwind.opcode_count--;
4599 }
4600 }
4601 else if (i == 6 && unwind.opcode_count >= 2)
4602 {
4603 i = unwind.opcodes[unwind.opcode_count - 2];
4604 reg = i >> 4;
4605 i &= 0xf;
4606
4607 op = 0xffff << (reg - 1);
4608 if (reg > 0
4609 && ((mask & op) == (1u << (reg - 1))))
4610 {
4611 op = (1 << (reg + i + 1)) - 1;
4612 op &= ~((1 << reg) - 1);
4613 mask |= op;
4614 unwind.opcode_count -= 2;
4615 }
4616 }
4617 }
4618 }
4619
4620 hi_reg = 15;
4621 /* We want to generate opcodes in the order the registers have been
4622 saved, ie. descending order. */
4623 for (reg = 15; reg >= -1; reg--)
4624 {
4625 /* Save registers in blocks. */
4626 if (reg < 0
4627 || !(mask & (1 << reg)))
4628 {
4629 /* We found an unsaved reg. Generate opcodes to save the
4630 preceding block. */
4631 if (reg != hi_reg)
4632 {
4633 if (reg == 9)
4634 {
4635 /* Short form. */
4636 op = 0xc0 | (hi_reg - 10);
4637 add_unwind_opcode (op, 1);
4638 }
4639 else
4640 {
4641 /* Long form. */
4642 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4643 add_unwind_opcode (op, 2);
4644 }
4645 }
4646 hi_reg = reg - 1;
4647 }
4648 }
4649
4650 return;
4651 error:
4652 ignore_rest_of_line ();
4653 }
4654
4655 static void
4656 s_arm_unwind_save_mmxwcg (void)
4657 {
4658 int reg;
4659 int hi_reg;
4660 unsigned mask = 0;
4661 valueT op;
4662
4663 if (*input_line_pointer == '{')
4664 input_line_pointer++;
4665
4666 skip_whitespace (input_line_pointer);
4667
4668 do
4669 {
4670 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4671
4672 if (reg == FAIL)
4673 {
4674 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4675 goto error;
4676 }
4677
4678 reg -= 8;
4679 if (mask >> reg)
4680 as_tsktsk (_("register list not in ascending order"));
4681 mask |= 1 << reg;
4682
4683 if (*input_line_pointer == '-')
4684 {
4685 input_line_pointer++;
4686 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4687 if (hi_reg == FAIL)
4688 {
4689 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4690 goto error;
4691 }
4692 else if (reg >= hi_reg)
4693 {
4694 as_bad (_("bad register range"));
4695 goto error;
4696 }
4697 for (; reg < hi_reg; reg++)
4698 mask |= 1 << reg;
4699 }
4700 }
4701 while (skip_past_comma (&input_line_pointer) != FAIL);
4702
4703 skip_past_char (&input_line_pointer, '}');
4704
4705 demand_empty_rest_of_line ();
4706
4707 /* Generate any deferred opcodes because we're going to be looking at
4708 the list. */
4709 flush_pending_unwind ();
4710
4711 for (reg = 0; reg < 16; reg++)
4712 {
4713 if (mask & (1 << reg))
4714 unwind.frame_size += 4;
4715 }
4716 op = 0xc700 | mask;
4717 add_unwind_opcode (op, 2);
4718 return;
4719 error:
4720 ignore_rest_of_line ();
4721 }
4722
4723
4724 /* Parse an unwind_save directive.
4725 If the argument is non-zero, this is a .vsave directive. */
4726
4727 static void
4728 s_arm_unwind_save (int arch_v6)
4729 {
4730 char *peek;
4731 struct reg_entry *reg;
4732 bool had_brace = false;
4733
4734 if (!unwind.proc_start)
4735 as_bad (MISSING_FNSTART);
4736
4737 /* Figure out what sort of save we have. */
4738 peek = input_line_pointer;
4739
4740 if (*peek == '{')
4741 {
4742 had_brace = true;
4743 peek++;
4744 }
4745
4746 reg = arm_reg_parse_multi (&peek);
4747
4748 if (!reg)
4749 {
4750 as_bad (_("register expected"));
4751 ignore_rest_of_line ();
4752 return;
4753 }
4754
4755 switch (reg->type)
4756 {
4757 case REG_TYPE_FN:
4758 if (had_brace)
4759 {
4760 as_bad (_("FPA .unwind_save does not take a register list"));
4761 ignore_rest_of_line ();
4762 return;
4763 }
4764 input_line_pointer = peek;
4765 s_arm_unwind_save_fpa (reg->number);
4766 return;
4767
4768 case REG_TYPE_RN:
4769 s_arm_unwind_save_core ();
4770 return;
4771
4772 case REG_TYPE_PSEUDO:
4773 s_arm_unwind_save_pseudo ();
4774 return;
4775
4776 case REG_TYPE_VFD:
4777 if (arch_v6)
4778 s_arm_unwind_save_vfp_armv6 ();
4779 else
4780 s_arm_unwind_save_vfp ();
4781 return;
4782
4783 case REG_TYPE_MMXWR:
4784 s_arm_unwind_save_mmxwr ();
4785 return;
4786
4787 case REG_TYPE_MMXWCG:
4788 s_arm_unwind_save_mmxwcg ();
4789 return;
4790
4791 default:
4792 as_bad (_(".unwind_save does not support this kind of register"));
4793 ignore_rest_of_line ();
4794 }
4795 }
4796
4797
4798 /* Parse an unwind_movsp directive. */
4799
4800 static void
4801 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4802 {
4803 int reg;
4804 valueT op;
4805 int offset;
4806
4807 if (!unwind.proc_start)
4808 as_bad (MISSING_FNSTART);
4809
4810 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4811 if (reg == FAIL)
4812 {
4813 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4814 ignore_rest_of_line ();
4815 return;
4816 }
4817
4818 /* Optional constant. */
4819 if (skip_past_comma (&input_line_pointer) != FAIL)
4820 {
4821 if (immediate_for_directive (&offset) == FAIL)
4822 return;
4823 }
4824 else
4825 offset = 0;
4826
4827 demand_empty_rest_of_line ();
4828
4829 if (reg == REG_SP || reg == REG_PC)
4830 {
4831 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4832 return;
4833 }
4834
4835 if (unwind.fp_reg != REG_SP)
4836 as_bad (_("unexpected .unwind_movsp directive"));
4837
4838 /* Generate opcode to restore the value. */
4839 op = 0x90 | reg;
4840 add_unwind_opcode (op, 1);
4841
4842 /* Record the information for later. */
4843 unwind.fp_reg = reg;
4844 unwind.fp_offset = unwind.frame_size - offset;
4845 unwind.sp_restored = 1;
4846 }
4847
4848 /* Parse an unwind_pad directive. */
4849
4850 static void
4851 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4852 {
4853 int offset;
4854
4855 if (!unwind.proc_start)
4856 as_bad (MISSING_FNSTART);
4857
4858 if (immediate_for_directive (&offset) == FAIL)
4859 return;
4860
4861 if (offset & 3)
4862 {
4863 as_bad (_("stack increment must be multiple of 4"));
4864 ignore_rest_of_line ();
4865 return;
4866 }
4867
4868 /* Don't generate any opcodes, just record the details for later. */
4869 unwind.frame_size += offset;
4870 unwind.pending_offset += offset;
4871
4872 demand_empty_rest_of_line ();
4873 }
4874
4875 /* Parse an unwind_setfp directive. */
4876
4877 static void
4878 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4879 {
4880 int sp_reg;
4881 int fp_reg;
4882 int offset;
4883
4884 if (!unwind.proc_start)
4885 as_bad (MISSING_FNSTART);
4886
4887 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4888 if (skip_past_comma (&input_line_pointer) == FAIL)
4889 sp_reg = FAIL;
4890 else
4891 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4892
4893 if (fp_reg == FAIL || sp_reg == FAIL)
4894 {
4895 as_bad (_("expected <reg>, <reg>"));
4896 ignore_rest_of_line ();
4897 return;
4898 }
4899
4900 /* Optional constant. */
4901 if (skip_past_comma (&input_line_pointer) != FAIL)
4902 {
4903 if (immediate_for_directive (&offset) == FAIL)
4904 return;
4905 }
4906 else
4907 offset = 0;
4908
4909 demand_empty_rest_of_line ();
4910
4911 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4912 {
4913 as_bad (_("register must be either sp or set by a previous"
4914 "unwind_movsp directive"));
4915 return;
4916 }
4917
4918 /* Don't generate any opcodes, just record the information for later. */
4919 unwind.fp_reg = fp_reg;
4920 unwind.fp_used = 1;
4921 if (sp_reg == REG_SP)
4922 unwind.fp_offset = unwind.frame_size - offset;
4923 else
4924 unwind.fp_offset -= offset;
4925 }
4926
4927 /* Parse an unwind_raw directive. */
4928
4929 static void
4930 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4931 {
4932 expressionS exp;
4933 /* This is an arbitrary limit. */
4934 unsigned char op[16];
4935 int count;
4936
4937 if (!unwind.proc_start)
4938 as_bad (MISSING_FNSTART);
4939
4940 expression (&exp);
4941 if (exp.X_op == O_constant
4942 && skip_past_comma (&input_line_pointer) != FAIL)
4943 {
4944 unwind.frame_size += exp.X_add_number;
4945 expression (&exp);
4946 }
4947 else
4948 exp.X_op = O_illegal;
4949
4950 if (exp.X_op != O_constant)
4951 {
4952 as_bad (_("expected <offset>, <opcode>"));
4953 ignore_rest_of_line ();
4954 return;
4955 }
4956
4957 count = 0;
4958
4959 /* Parse the opcode. */
4960 for (;;)
4961 {
4962 if (count >= 16)
4963 {
4964 as_bad (_("unwind opcode too long"));
4965 ignore_rest_of_line ();
4966 }
4967 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4968 {
4969 as_bad (_("invalid unwind opcode"));
4970 ignore_rest_of_line ();
4971 return;
4972 }
4973 op[count++] = exp.X_add_number;
4974
4975 /* Parse the next byte. */
4976 if (skip_past_comma (&input_line_pointer) == FAIL)
4977 break;
4978
4979 expression (&exp);
4980 }
4981
4982 /* Add the opcode bytes in reverse order. */
4983 while (count--)
4984 add_unwind_opcode (op[count], 1);
4985
4986 demand_empty_rest_of_line ();
4987 }
4988
4989
4990 /* Parse a .eabi_attribute directive. */
4991
4992 static void
4993 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4994 {
4995 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4996
4997 if (tag >= 0 && tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4998 attributes_set_explicitly[tag] = 1;
4999 }
5000
5001 /* Emit a tls fix for the symbol. */
5002
5003 static void
5004 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
5005 {
5006 char *p;
5007 expressionS exp;
5008 #ifdef md_flush_pending_output
5009 md_flush_pending_output ();
5010 #endif
5011
5012 #ifdef md_cons_align
5013 md_cons_align (4);
5014 #endif
5015
5016 /* Since we're just labelling the code, there's no need to define a
5017 mapping symbol. */
5018 expression (&exp);
5019 p = obstack_next_free (&frchain_now->frch_obstack);
5020 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
5021 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
5022 : BFD_RELOC_ARM_TLS_DESCSEQ);
5023 }
5024 #endif /* OBJ_ELF */
5025
5026 static void s_arm_arch (int);
5027 static void s_arm_object_arch (int);
5028 static void s_arm_cpu (int);
5029 static void s_arm_fpu (int);
5030 static void s_arm_arch_extension (int);
5031
5032 #ifdef TE_PE
5033
5034 static void
5035 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
5036 {
5037 expressionS exp;
5038
5039 do
5040 {
5041 expression (&exp);
5042 if (exp.X_op == O_symbol)
5043 exp.X_op = O_secrel;
5044
5045 emit_expr (&exp, 4);
5046 }
5047 while (*input_line_pointer++ == ',');
5048
5049 input_line_pointer--;
5050 demand_empty_rest_of_line ();
5051 }
5052 #endif /* TE_PE */
5053
5054 int
5055 arm_is_largest_exponent_ok (int precision)
5056 {
5057 /* precision == 1 ensures that this will only return
5058 true for 16 bit floats. */
5059 return (precision == 1) && (fp16_format == ARM_FP16_FORMAT_ALTERNATIVE);
5060 }
5061
5062 static void
5063 set_fp16_format (int dummy ATTRIBUTE_UNUSED)
5064 {
5065 char saved_char;
5066 char* name;
5067 enum fp_16bit_format new_format;
5068
5069 new_format = ARM_FP16_FORMAT_DEFAULT;
5070
5071 name = input_line_pointer;
5072 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
5073 input_line_pointer++;
5074
5075 saved_char = *input_line_pointer;
5076 *input_line_pointer = 0;
5077
5078 if (strcasecmp (name, "ieee") == 0)
5079 new_format = ARM_FP16_FORMAT_IEEE;
5080 else if (strcasecmp (name, "alternative") == 0)
5081 new_format = ARM_FP16_FORMAT_ALTERNATIVE;
5082 else
5083 {
5084 as_bad (_("unrecognised float16 format \"%s\""), name);
5085 goto cleanup;
5086 }
5087
5088 /* Only set fp16_format if it is still the default (aka not already
5089 been set yet). */
5090 if (fp16_format == ARM_FP16_FORMAT_DEFAULT)
5091 fp16_format = new_format;
5092 else
5093 {
5094 if (new_format != fp16_format)
5095 as_warn (_("float16 format cannot be set more than once, ignoring."));
5096 }
5097
5098 cleanup:
5099 *input_line_pointer = saved_char;
5100 ignore_rest_of_line ();
5101 }
5102
5103 /* This table describes all the machine specific pseudo-ops the assembler
5104 has to support. The fields are:
5105 pseudo-op name without dot
5106 function to call to execute this pseudo-op
5107 Integer arg to pass to the function. */
5108
5109 const pseudo_typeS md_pseudo_table[] =
5110 {
5111 /* Never called because '.req' does not start a line. */
5112 { "req", s_req, 0 },
5113 /* Following two are likewise never called. */
5114 { "dn", s_dn, 0 },
5115 { "qn", s_qn, 0 },
5116 { "unreq", s_unreq, 0 },
5117 { "bss", s_bss, 0 },
5118 { "align", s_align_ptwo, 2 },
5119 { "arm", s_arm, 0 },
5120 { "thumb", s_thumb, 0 },
5121 { "code", s_code, 0 },
5122 { "force_thumb", s_force_thumb, 0 },
5123 { "thumb_func", s_thumb_func, 0 },
5124 { "thumb_set", s_thumb_set, 0 },
5125 { "even", s_even, 0 },
5126 { "ltorg", s_ltorg, 0 },
5127 { "pool", s_ltorg, 0 },
5128 { "syntax", s_syntax, 0 },
5129 { "cpu", s_arm_cpu, 0 },
5130 { "arch", s_arm_arch, 0 },
5131 { "object_arch", s_arm_object_arch, 0 },
5132 { "fpu", s_arm_fpu, 0 },
5133 { "arch_extension", s_arm_arch_extension, 0 },
5134 #ifdef OBJ_ELF
5135 { "word", s_arm_elf_cons, 4 },
5136 { "long", s_arm_elf_cons, 4 },
5137 { "inst.n", s_arm_elf_inst, 2 },
5138 { "inst.w", s_arm_elf_inst, 4 },
5139 { "inst", s_arm_elf_inst, 0 },
5140 { "rel31", s_arm_rel31, 0 },
5141 { "fnstart", s_arm_unwind_fnstart, 0 },
5142 { "fnend", s_arm_unwind_fnend, 0 },
5143 { "cantunwind", s_arm_unwind_cantunwind, 0 },
5144 { "personality", s_arm_unwind_personality, 0 },
5145 { "personalityindex", s_arm_unwind_personalityindex, 0 },
5146 { "handlerdata", s_arm_unwind_handlerdata, 0 },
5147 { "save", s_arm_unwind_save, 0 },
5148 { "vsave", s_arm_unwind_save, 1 },
5149 { "movsp", s_arm_unwind_movsp, 0 },
5150 { "pad", s_arm_unwind_pad, 0 },
5151 { "setfp", s_arm_unwind_setfp, 0 },
5152 { "unwind_raw", s_arm_unwind_raw, 0 },
5153 { "eabi_attribute", s_arm_eabi_attribute, 0 },
5154 { "tlsdescseq", s_arm_tls_descseq, 0 },
5155 #else
5156 { "word", cons, 4},
5157
5158 /* These are used for dwarf. */
5159 {"2byte", cons, 2},
5160 {"4byte", cons, 4},
5161 {"8byte", cons, 8},
5162 /* These are used for dwarf2. */
5163 { "file", dwarf2_directive_file, 0 },
5164 { "loc", dwarf2_directive_loc, 0 },
5165 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
5166 #endif
5167 { "extend", float_cons, 'x' },
5168 { "ldouble", float_cons, 'x' },
5169 { "packed", float_cons, 'p' },
5170 { "bfloat16", float_cons, 'b' },
5171 #ifdef TE_PE
5172 {"secrel32", pe_directive_secrel, 0},
5173 #endif
5174
5175 /* These are for compatibility with CodeComposer Studio. */
5176 {"ref", s_ccs_ref, 0},
5177 {"def", s_ccs_def, 0},
5178 {"asmfunc", s_ccs_asmfunc, 0},
5179 {"endasmfunc", s_ccs_endasmfunc, 0},
5180
5181 {"float16", float_cons, 'h' },
5182 {"float16_format", set_fp16_format, 0 },
5183
5184 { 0, 0, 0 }
5185 };
5186
5187 /* Parser functions used exclusively in instruction operands. */
5188
5189 /* Generic immediate-value read function for use in insn parsing.
5190 STR points to the beginning of the immediate (the leading #);
5191 VAL receives the value; if the value is outside [MIN, MAX]
5192 issue an error. PREFIX_OPT is true if the immediate prefix is
5193 optional. */
5194
5195 static int
5196 parse_immediate (char **str, int *val, int min, int max,
5197 bool prefix_opt)
5198 {
5199 expressionS exp;
5200
5201 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
5202 if (exp.X_op != O_constant)
5203 {
5204 inst.error = _("constant expression required");
5205 return FAIL;
5206 }
5207
5208 if (exp.X_add_number < min || exp.X_add_number > max)
5209 {
5210 inst.error = _("immediate value out of range");
5211 return FAIL;
5212 }
5213
5214 *val = exp.X_add_number;
5215 return SUCCESS;
5216 }
5217
5218 /* Less-generic immediate-value read function with the possibility of loading a
5219 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
5220 instructions. Puts the result directly in inst.operands[i]. */
5221
5222 static int
5223 parse_big_immediate (char **str, int i, expressionS *in_exp,
5224 bool allow_symbol_p)
5225 {
5226 expressionS exp;
5227 expressionS *exp_p = in_exp ? in_exp : &exp;
5228 char *ptr = *str;
5229
5230 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
5231
5232 if (exp_p->X_op == O_constant)
5233 {
5234 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
5235 /* If we're on a 64-bit host, then a 64-bit number can be returned using
5236 O_constant. We have to be careful not to break compilation for
5237 32-bit X_add_number, though. */
5238 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
5239 {
5240 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
5241 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
5242 & 0xffffffff);
5243 inst.operands[i].regisimm = 1;
5244 }
5245 }
5246 else if (exp_p->X_op == O_big
5247 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
5248 {
5249 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
5250
5251 /* Bignums have their least significant bits in
5252 generic_bignum[0]. Make sure we put 32 bits in imm and
5253 32 bits in reg, in a (hopefully) portable way. */
5254 gas_assert (parts != 0);
5255
5256 /* Make sure that the number is not too big.
5257 PR 11972: Bignums can now be sign-extended to the
5258 size of a .octa so check that the out of range bits
5259 are all zero or all one. */
5260 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
5261 {
5262 LITTLENUM_TYPE m = -1;
5263
5264 if (generic_bignum[parts * 2] != 0
5265 && generic_bignum[parts * 2] != m)
5266 return FAIL;
5267
5268 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
5269 if (generic_bignum[j] != generic_bignum[j-1])
5270 return FAIL;
5271 }
5272
5273 inst.operands[i].imm = 0;
5274 for (j = 0; j < parts; j++, idx++)
5275 inst.operands[i].imm |= ((unsigned) generic_bignum[idx]
5276 << (LITTLENUM_NUMBER_OF_BITS * j));
5277 inst.operands[i].reg = 0;
5278 for (j = 0; j < parts; j++, idx++)
5279 inst.operands[i].reg |= ((unsigned) generic_bignum[idx]
5280 << (LITTLENUM_NUMBER_OF_BITS * j));
5281 inst.operands[i].regisimm = 1;
5282 }
5283 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
5284 return FAIL;
5285
5286 *str = ptr;
5287
5288 return SUCCESS;
5289 }
5290
5291 /* Returns the pseudo-register number of an FPA immediate constant,
5292 or FAIL if there isn't a valid constant here. */
5293
5294 static int
5295 parse_fpa_immediate (char ** str)
5296 {
5297 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5298 char * save_in;
5299 expressionS exp;
5300 int i;
5301 int j;
5302
5303 /* First try and match exact strings, this is to guarantee
5304 that some formats will work even for cross assembly. */
5305
5306 for (i = 0; fp_const[i]; i++)
5307 {
5308 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
5309 {
5310 char *start = *str;
5311
5312 *str += strlen (fp_const[i]);
5313 if (is_end_of_line[(unsigned char) **str])
5314 return i + 8;
5315 *str = start;
5316 }
5317 }
5318
5319 /* Just because we didn't get a match doesn't mean that the constant
5320 isn't valid, just that it is in a format that we don't
5321 automatically recognize. Try parsing it with the standard
5322 expression routines. */
5323
5324 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
5325
5326 /* Look for a raw floating point number. */
5327 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
5328 && is_end_of_line[(unsigned char) *save_in])
5329 {
5330 for (i = 0; i < NUM_FLOAT_VALS; i++)
5331 {
5332 for (j = 0; j < MAX_LITTLENUMS; j++)
5333 {
5334 if (words[j] != fp_values[i][j])
5335 break;
5336 }
5337
5338 if (j == MAX_LITTLENUMS)
5339 {
5340 *str = save_in;
5341 return i + 8;
5342 }
5343 }
5344 }
5345
5346 /* Try and parse a more complex expression, this will probably fail
5347 unless the code uses a floating point prefix (eg "0f"). */
5348 save_in = input_line_pointer;
5349 input_line_pointer = *str;
5350 if (expression (&exp) == absolute_section
5351 && exp.X_op == O_big
5352 && exp.X_add_number < 0)
5353 {
5354 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5355 Ditto for 15. */
5356 #define X_PRECISION 5
5357 #define E_PRECISION 15L
5358 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
5359 {
5360 for (i = 0; i < NUM_FLOAT_VALS; i++)
5361 {
5362 for (j = 0; j < MAX_LITTLENUMS; j++)
5363 {
5364 if (words[j] != fp_values[i][j])
5365 break;
5366 }
5367
5368 if (j == MAX_LITTLENUMS)
5369 {
5370 *str = input_line_pointer;
5371 input_line_pointer = save_in;
5372 return i + 8;
5373 }
5374 }
5375 }
5376 }
5377
5378 *str = input_line_pointer;
5379 input_line_pointer = save_in;
5380 inst.error = _("invalid FPA immediate expression");
5381 return FAIL;
5382 }
5383
5384 /* Returns 1 if a number has "quarter-precision" float format
5385 0baBbbbbbc defgh000 00000000 00000000. */
5386
5387 static int
5388 is_quarter_float (unsigned imm)
5389 {
5390 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
5391 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
5392 }
5393
5394
5395 /* Detect the presence of a floating point or integer zero constant,
5396 i.e. #0.0 or #0. */
5397
5398 static bool
5399 parse_ifimm_zero (char **in)
5400 {
5401 int error_code;
5402
5403 if (!is_immediate_prefix (**in))
5404 {
5405 /* In unified syntax, all prefixes are optional. */
5406 if (!unified_syntax)
5407 return false;
5408 }
5409 else
5410 ++*in;
5411
5412 /* Accept #0x0 as a synonym for #0. */
5413 if (startswith (*in, "0x"))
5414 {
5415 int val;
5416 if (parse_immediate (in, &val, 0, 0, true) == FAIL)
5417 return false;
5418 return true;
5419 }
5420
5421 error_code = atof_generic (in, ".", EXP_CHARS,
5422 &generic_floating_point_number);
5423
5424 if (!error_code
5425 && generic_floating_point_number.sign == '+'
5426 && (generic_floating_point_number.low
5427 > generic_floating_point_number.leader))
5428 return true;
5429
5430 return false;
5431 }
5432
5433 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5434 0baBbbbbbc defgh000 00000000 00000000.
5435 The zero and minus-zero cases need special handling, since they can't be
5436 encoded in the "quarter-precision" float format, but can nonetheless be
5437 loaded as integer constants. */
5438
5439 static unsigned
5440 parse_qfloat_immediate (char **ccp, int *immed)
5441 {
5442 char *str = *ccp;
5443 char *fpnum;
5444 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5445 int found_fpchar = 0;
5446
5447 skip_past_char (&str, '#');
5448
5449 /* We must not accidentally parse an integer as a floating-point number. Make
5450 sure that the value we parse is not an integer by checking for special
5451 characters '.' or 'e'.
5452 FIXME: This is a horrible hack, but doing better is tricky because type
5453 information isn't in a very usable state at parse time. */
5454 fpnum = str;
5455 skip_whitespace (fpnum);
5456
5457 if (startswith (fpnum, "0x"))
5458 return FAIL;
5459 else
5460 {
5461 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5462 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5463 {
5464 found_fpchar = 1;
5465 break;
5466 }
5467
5468 if (!found_fpchar)
5469 return FAIL;
5470 }
5471
5472 if ((str = atof_ieee (str, 's', words)) != NULL)
5473 {
5474 unsigned fpword = 0;
5475 int i;
5476
5477 /* Our FP word must be 32 bits (single-precision FP). */
5478 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5479 {
5480 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5481 fpword |= words[i];
5482 }
5483
5484 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5485 *immed = fpword;
5486 else
5487 return FAIL;
5488
5489 *ccp = str;
5490
5491 return SUCCESS;
5492 }
5493
5494 return FAIL;
5495 }
5496
5497 /* Shift operands. */
5498 enum shift_kind
5499 {
5500 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX, SHIFT_UXTW
5501 };
5502
5503 struct asm_shift_name
5504 {
5505 const char *name;
5506 enum shift_kind kind;
5507 };
5508
5509 /* Third argument to parse_shift. */
5510 enum parse_shift_mode
5511 {
5512 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5513 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5514 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5515 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5516 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5517 SHIFT_UXTW_IMMEDIATE /* Shift must be UXTW immediate. */
5518 };
5519
5520 /* Parse a <shift> specifier on an ARM data processing instruction.
5521 This has three forms:
5522
5523 (LSL|LSR|ASL|ASR|ROR) Rs
5524 (LSL|LSR|ASL|ASR|ROR) #imm
5525 RRX
5526
5527 Note that ASL is assimilated to LSL in the instruction encoding, and
5528 RRX to ROR #0 (which cannot be written as such). */
5529
5530 static int
5531 parse_shift (char **str, int i, enum parse_shift_mode mode)
5532 {
5533 const struct asm_shift_name *shift_name;
5534 enum shift_kind shift;
5535 char *s = *str;
5536 char *p = s;
5537 int reg;
5538
5539 for (p = *str; ISALPHA (*p); p++)
5540 ;
5541
5542 if (p == *str)
5543 {
5544 inst.error = _("shift expression expected");
5545 return FAIL;
5546 }
5547
5548 shift_name
5549 = (const struct asm_shift_name *) str_hash_find_n (arm_shift_hsh, *str,
5550 p - *str);
5551
5552 if (shift_name == NULL)
5553 {
5554 inst.error = _("shift expression expected");
5555 return FAIL;
5556 }
5557
5558 shift = shift_name->kind;
5559
5560 switch (mode)
5561 {
5562 case NO_SHIFT_RESTRICT:
5563 case SHIFT_IMMEDIATE:
5564 if (shift == SHIFT_UXTW)
5565 {
5566 inst.error = _("'UXTW' not allowed here");
5567 return FAIL;
5568 }
5569 break;
5570
5571 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5572 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5573 {
5574 inst.error = _("'LSL' or 'ASR' required");
5575 return FAIL;
5576 }
5577 break;
5578
5579 case SHIFT_LSL_IMMEDIATE:
5580 if (shift != SHIFT_LSL)
5581 {
5582 inst.error = _("'LSL' required");
5583 return FAIL;
5584 }
5585 break;
5586
5587 case SHIFT_ASR_IMMEDIATE:
5588 if (shift != SHIFT_ASR)
5589 {
5590 inst.error = _("'ASR' required");
5591 return FAIL;
5592 }
5593 break;
5594 case SHIFT_UXTW_IMMEDIATE:
5595 if (shift != SHIFT_UXTW)
5596 {
5597 inst.error = _("'UXTW' required");
5598 return FAIL;
5599 }
5600 break;
5601
5602 default: abort ();
5603 }
5604
5605 if (shift != SHIFT_RRX)
5606 {
5607 /* Whitespace can appear here if the next thing is a bare digit. */
5608 skip_whitespace (p);
5609
5610 if (mode == NO_SHIFT_RESTRICT
5611 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5612 {
5613 inst.operands[i].imm = reg;
5614 inst.operands[i].immisreg = 1;
5615 }
5616 else if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5617 return FAIL;
5618 }
5619 inst.operands[i].shift_kind = shift;
5620 inst.operands[i].shifted = 1;
5621 *str = p;
5622 return SUCCESS;
5623 }
5624
5625 /* Parse a <shifter_operand> for an ARM data processing instruction:
5626
5627 #<immediate>
5628 #<immediate>, <rotate>
5629 <Rm>
5630 <Rm>, <shift>
5631
5632 where <shift> is defined by parse_shift above, and <rotate> is a
5633 multiple of 2 between 0 and 30. Validation of immediate operands
5634 is deferred to md_apply_fix. */
5635
5636 static int
5637 parse_shifter_operand (char **str, int i)
5638 {
5639 int value;
5640 expressionS exp;
5641
5642 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5643 {
5644 inst.operands[i].reg = value;
5645 inst.operands[i].isreg = 1;
5646
5647 /* parse_shift will override this if appropriate */
5648 inst.relocs[0].exp.X_op = O_constant;
5649 inst.relocs[0].exp.X_add_number = 0;
5650
5651 if (skip_past_comma (str) == FAIL)
5652 return SUCCESS;
5653
5654 /* Shift operation on register. */
5655 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5656 }
5657
5658 if (my_get_expression (&inst.relocs[0].exp, str, GE_IMM_PREFIX))
5659 return FAIL;
5660
5661 if (skip_past_comma (str) == SUCCESS)
5662 {
5663 /* #x, y -- ie explicit rotation by Y. */
5664 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5665 return FAIL;
5666
5667 if (exp.X_op != O_constant || inst.relocs[0].exp.X_op != O_constant)
5668 {
5669 inst.error = _("constant expression expected");
5670 return FAIL;
5671 }
5672
5673 value = exp.X_add_number;
5674 if (value < 0 || value > 30 || value % 2 != 0)
5675 {
5676 inst.error = _("invalid rotation");
5677 return FAIL;
5678 }
5679 if (inst.relocs[0].exp.X_add_number < 0
5680 || inst.relocs[0].exp.X_add_number > 255)
5681 {
5682 inst.error = _("invalid constant");
5683 return FAIL;
5684 }
5685
5686 /* Encode as specified. */
5687 inst.operands[i].imm = inst.relocs[0].exp.X_add_number | value << 7;
5688 return SUCCESS;
5689 }
5690
5691 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
5692 inst.relocs[0].pc_rel = 0;
5693 return SUCCESS;
5694 }
5695
5696 /* Group relocation information. Each entry in the table contains the
5697 textual name of the relocation as may appear in assembler source
5698 and must end with a colon.
5699 Along with this textual name are the relocation codes to be used if
5700 the corresponding instruction is an ALU instruction (ADD or SUB only),
5701 an LDR, an LDRS, or an LDC. */
5702
5703 struct group_reloc_table_entry
5704 {
5705 const char *name;
5706 int alu_code;
5707 int ldr_code;
5708 int ldrs_code;
5709 int ldc_code;
5710 };
5711
5712 typedef enum
5713 {
5714 /* Varieties of non-ALU group relocation. */
5715
5716 GROUP_LDR,
5717 GROUP_LDRS,
5718 GROUP_LDC,
5719 GROUP_MVE
5720 } group_reloc_type;
5721
5722 static struct group_reloc_table_entry group_reloc_table[] =
5723 { /* Program counter relative: */
5724 { "pc_g0_nc",
5725 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5726 0, /* LDR */
5727 0, /* LDRS */
5728 0 }, /* LDC */
5729 { "pc_g0",
5730 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5731 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5732 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5733 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5734 { "pc_g1_nc",
5735 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5736 0, /* LDR */
5737 0, /* LDRS */
5738 0 }, /* LDC */
5739 { "pc_g1",
5740 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5741 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5742 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5743 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5744 { "pc_g2",
5745 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5746 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5747 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5748 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5749 /* Section base relative */
5750 { "sb_g0_nc",
5751 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5752 0, /* LDR */
5753 0, /* LDRS */
5754 0 }, /* LDC */
5755 { "sb_g0",
5756 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5757 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5758 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5759 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5760 { "sb_g1_nc",
5761 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5762 0, /* LDR */
5763 0, /* LDRS */
5764 0 }, /* LDC */
5765 { "sb_g1",
5766 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5767 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5768 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5769 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5770 { "sb_g2",
5771 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5772 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5773 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5774 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5775 /* Absolute thumb alu relocations. */
5776 { "lower0_7",
5777 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5778 0, /* LDR. */
5779 0, /* LDRS. */
5780 0 }, /* LDC. */
5781 { "lower8_15",
5782 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5783 0, /* LDR. */
5784 0, /* LDRS. */
5785 0 }, /* LDC. */
5786 { "upper0_7",
5787 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5788 0, /* LDR. */
5789 0, /* LDRS. */
5790 0 }, /* LDC. */
5791 { "upper8_15",
5792 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5793 0, /* LDR. */
5794 0, /* LDRS. */
5795 0 } }; /* LDC. */
5796
5797 /* Given the address of a pointer pointing to the textual name of a group
5798 relocation as may appear in assembler source, attempt to find its details
5799 in group_reloc_table. The pointer will be updated to the character after
5800 the trailing colon. On failure, FAIL will be returned; SUCCESS
5801 otherwise. On success, *entry will be updated to point at the relevant
5802 group_reloc_table entry. */
5803
5804 static int
5805 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5806 {
5807 unsigned int i;
5808 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5809 {
5810 int length = strlen (group_reloc_table[i].name);
5811
5812 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5813 && (*str)[length] == ':')
5814 {
5815 *out = &group_reloc_table[i];
5816 *str += (length + 1);
5817 return SUCCESS;
5818 }
5819 }
5820
5821 return FAIL;
5822 }
5823
5824 /* Parse a <shifter_operand> for an ARM data processing instruction
5825 (as for parse_shifter_operand) where group relocations are allowed:
5826
5827 #<immediate>
5828 #<immediate>, <rotate>
5829 #:<group_reloc>:<expression>
5830 <Rm>
5831 <Rm>, <shift>
5832
5833 where <group_reloc> is one of the strings defined in group_reloc_table.
5834 The hashes are optional.
5835
5836 Everything else is as for parse_shifter_operand. */
5837
5838 static parse_operand_result
5839 parse_shifter_operand_group_reloc (char **str, int i)
5840 {
5841 /* Determine if we have the sequence of characters #: or just :
5842 coming next. If we do, then we check for a group relocation.
5843 If we don't, punt the whole lot to parse_shifter_operand. */
5844
5845 if (((*str)[0] == '#' && (*str)[1] == ':')
5846 || (*str)[0] == ':')
5847 {
5848 struct group_reloc_table_entry *entry;
5849
5850 if ((*str)[0] == '#')
5851 (*str) += 2;
5852 else
5853 (*str)++;
5854
5855 /* Try to parse a group relocation. Anything else is an error. */
5856 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5857 {
5858 inst.error = _("unknown group relocation");
5859 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5860 }
5861
5862 /* We now have the group relocation table entry corresponding to
5863 the name in the assembler source. Next, we parse the expression. */
5864 if (my_get_expression (&inst.relocs[0].exp, str, GE_NO_PREFIX))
5865 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5866
5867 /* Record the relocation type (always the ALU variant here). */
5868 inst.relocs[0].type = (bfd_reloc_code_real_type) entry->alu_code;
5869 gas_assert (inst.relocs[0].type != 0);
5870
5871 return PARSE_OPERAND_SUCCESS;
5872 }
5873 else
5874 return parse_shifter_operand (str, i) == SUCCESS
5875 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5876
5877 /* Never reached. */
5878 }
5879
5880 /* Parse a Neon alignment expression. Information is written to
5881 inst.operands[i]. We assume the initial ':' has been skipped.
5882
5883 align .imm = align << 8, .immisalign=1, .preind=0 */
5884 static parse_operand_result
5885 parse_neon_alignment (char **str, int i)
5886 {
5887 char *p = *str;
5888 expressionS exp;
5889
5890 my_get_expression (&exp, &p, GE_NO_PREFIX);
5891
5892 if (exp.X_op != O_constant)
5893 {
5894 inst.error = _("alignment must be constant");
5895 return PARSE_OPERAND_FAIL;
5896 }
5897
5898 inst.operands[i].imm = exp.X_add_number << 8;
5899 inst.operands[i].immisalign = 1;
5900 /* Alignments are not pre-indexes. */
5901 inst.operands[i].preind = 0;
5902
5903 *str = p;
5904 return PARSE_OPERAND_SUCCESS;
5905 }
5906
5907 /* Parse all forms of an ARM address expression. Information is written
5908 to inst.operands[i] and/or inst.relocs[0].
5909
5910 Preindexed addressing (.preind=1):
5911
5912 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5913 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5914 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5915 .shift_kind=shift .relocs[0].exp=shift_imm
5916
5917 These three may have a trailing ! which causes .writeback to be set also.
5918
5919 Postindexed addressing (.postind=1, .writeback=1):
5920
5921 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5922 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5923 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5924 .shift_kind=shift .relocs[0].exp=shift_imm
5925
5926 Unindexed addressing (.preind=0, .postind=0):
5927
5928 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5929
5930 Other:
5931
5932 [Rn]{!} shorthand for [Rn,#0]{!}
5933 =immediate .isreg=0 .relocs[0].exp=immediate
5934 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5935
5936 It is the caller's responsibility to check for addressing modes not
5937 supported by the instruction, and to set inst.relocs[0].type. */
5938
5939 static parse_operand_result
5940 parse_address_main (char **str, int i, int group_relocations,
5941 group_reloc_type group_type)
5942 {
5943 char *p = *str;
5944 int reg;
5945
5946 if (skip_past_char (&p, '[') == FAIL)
5947 {
5948 if (group_type == GROUP_MVE
5949 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5950 {
5951 /* [r0-r15] expected as argument but receiving r0-r15 without
5952 [] brackets. */
5953 inst.error = BAD_SYNTAX;
5954 return PARSE_OPERAND_FAIL;
5955 }
5956 else if (skip_past_char (&p, '=') == FAIL)
5957 {
5958 /* Bare address - translate to PC-relative offset. */
5959 inst.relocs[0].pc_rel = 1;
5960 inst.operands[i].reg = REG_PC;
5961 inst.operands[i].isreg = 1;
5962 inst.operands[i].preind = 1;
5963
5964 if (my_get_expression (&inst.relocs[0].exp, &p, GE_OPT_PREFIX_BIG))
5965 return PARSE_OPERAND_FAIL;
5966 }
5967 else if (parse_big_immediate (&p, i, &inst.relocs[0].exp,
5968 /*allow_symbol_p=*/true))
5969 return PARSE_OPERAND_FAIL;
5970
5971 *str = p;
5972 return PARSE_OPERAND_SUCCESS;
5973 }
5974
5975 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5976 skip_whitespace (p);
5977
5978 if (group_type == GROUP_MVE)
5979 {
5980 enum arm_reg_type rtype = REG_TYPE_MQ;
5981 struct neon_type_el et;
5982 if ((reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5983 {
5984 inst.operands[i].isquad = 1;
5985 }
5986 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5987 {
5988 inst.error = BAD_ADDR_MODE;
5989 return PARSE_OPERAND_FAIL;
5990 }
5991 }
5992 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5993 {
5994 if (group_type == GROUP_MVE)
5995 inst.error = BAD_ADDR_MODE;
5996 else
5997 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5998 return PARSE_OPERAND_FAIL;
5999 }
6000 inst.operands[i].reg = reg;
6001 inst.operands[i].isreg = 1;
6002
6003 if (skip_past_comma (&p) == SUCCESS)
6004 {
6005 inst.operands[i].preind = 1;
6006
6007 if (*p == '+') p++;
6008 else if (*p == '-') p++, inst.operands[i].negative = 1;
6009
6010 enum arm_reg_type rtype = REG_TYPE_MQ;
6011 struct neon_type_el et;
6012 if (group_type == GROUP_MVE
6013 && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
6014 {
6015 inst.operands[i].immisreg = 2;
6016 inst.operands[i].imm = reg;
6017
6018 if (skip_past_comma (&p) == SUCCESS)
6019 {
6020 if (parse_shift (&p, i, SHIFT_UXTW_IMMEDIATE) == SUCCESS)
6021 {
6022 inst.operands[i].imm |= inst.relocs[0].exp.X_add_number << 5;
6023 inst.relocs[0].exp.X_add_number = 0;
6024 }
6025 else
6026 return PARSE_OPERAND_FAIL;
6027 }
6028 }
6029 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
6030 {
6031 inst.operands[i].imm = reg;
6032 inst.operands[i].immisreg = 1;
6033
6034 if (skip_past_comma (&p) == SUCCESS)
6035 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
6036 return PARSE_OPERAND_FAIL;
6037 }
6038 else if (skip_past_char (&p, ':') == SUCCESS)
6039 {
6040 /* FIXME: '@' should be used here, but it's filtered out by generic
6041 code before we get to see it here. This may be subject to
6042 change. */
6043 parse_operand_result result = parse_neon_alignment (&p, i);
6044
6045 if (result != PARSE_OPERAND_SUCCESS)
6046 return result;
6047 }
6048 else
6049 {
6050 if (inst.operands[i].negative)
6051 {
6052 inst.operands[i].negative = 0;
6053 p--;
6054 }
6055
6056 if (group_relocations
6057 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
6058 {
6059 struct group_reloc_table_entry *entry;
6060
6061 /* Skip over the #: or : sequence. */
6062 if (*p == '#')
6063 p += 2;
6064 else
6065 p++;
6066
6067 /* Try to parse a group relocation. Anything else is an
6068 error. */
6069 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
6070 {
6071 inst.error = _("unknown group relocation");
6072 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6073 }
6074
6075 /* We now have the group relocation table entry corresponding to
6076 the name in the assembler source. Next, we parse the
6077 expression. */
6078 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
6079 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6080
6081 /* Record the relocation type. */
6082 switch (group_type)
6083 {
6084 case GROUP_LDR:
6085 inst.relocs[0].type
6086 = (bfd_reloc_code_real_type) entry->ldr_code;
6087 break;
6088
6089 case GROUP_LDRS:
6090 inst.relocs[0].type
6091 = (bfd_reloc_code_real_type) entry->ldrs_code;
6092 break;
6093
6094 case GROUP_LDC:
6095 inst.relocs[0].type
6096 = (bfd_reloc_code_real_type) entry->ldc_code;
6097 break;
6098
6099 default:
6100 gas_assert (0);
6101 }
6102
6103 if (inst.relocs[0].type == 0)
6104 {
6105 inst.error = _("this group relocation is not allowed on this instruction");
6106 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6107 }
6108 }
6109 else
6110 {
6111 char *q = p;
6112
6113 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
6114 return PARSE_OPERAND_FAIL;
6115 /* If the offset is 0, find out if it's a +0 or -0. */
6116 if (inst.relocs[0].exp.X_op == O_constant
6117 && inst.relocs[0].exp.X_add_number == 0)
6118 {
6119 skip_whitespace (q);
6120 if (*q == '#')
6121 {
6122 q++;
6123 skip_whitespace (q);
6124 }
6125 if (*q == '-')
6126 inst.operands[i].negative = 1;
6127 }
6128 }
6129 }
6130 }
6131 else if (skip_past_char (&p, ':') == SUCCESS)
6132 {
6133 /* FIXME: '@' should be used here, but it's filtered out by generic code
6134 before we get to see it here. This may be subject to change. */
6135 parse_operand_result result = parse_neon_alignment (&p, i);
6136
6137 if (result != PARSE_OPERAND_SUCCESS)
6138 return result;
6139 }
6140
6141 if (skip_past_char (&p, ']') == FAIL)
6142 {
6143 inst.error = _("']' expected");
6144 return PARSE_OPERAND_FAIL;
6145 }
6146
6147 if (skip_past_char (&p, '!') == SUCCESS)
6148 inst.operands[i].writeback = 1;
6149
6150 else if (skip_past_comma (&p) == SUCCESS)
6151 {
6152 if (skip_past_char (&p, '{') == SUCCESS)
6153 {
6154 /* [Rn], {expr} - unindexed, with option */
6155 if (parse_immediate (&p, &inst.operands[i].imm,
6156 0, 255, true) == FAIL)
6157 return PARSE_OPERAND_FAIL;
6158
6159 if (skip_past_char (&p, '}') == FAIL)
6160 {
6161 inst.error = _("'}' expected at end of 'option' field");
6162 return PARSE_OPERAND_FAIL;
6163 }
6164 if (inst.operands[i].preind)
6165 {
6166 inst.error = _("cannot combine index with option");
6167 return PARSE_OPERAND_FAIL;
6168 }
6169 *str = p;
6170 return PARSE_OPERAND_SUCCESS;
6171 }
6172 else
6173 {
6174 inst.operands[i].postind = 1;
6175 inst.operands[i].writeback = 1;
6176
6177 if (inst.operands[i].preind)
6178 {
6179 inst.error = _("cannot combine pre- and post-indexing");
6180 return PARSE_OPERAND_FAIL;
6181 }
6182
6183 if (*p == '+') p++;
6184 else if (*p == '-') p++, inst.operands[i].negative = 1;
6185
6186 enum arm_reg_type rtype = REG_TYPE_MQ;
6187 struct neon_type_el et;
6188 if (group_type == GROUP_MVE
6189 && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
6190 {
6191 inst.operands[i].immisreg = 2;
6192 inst.operands[i].imm = reg;
6193 }
6194 else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
6195 {
6196 /* We might be using the immediate for alignment already. If we
6197 are, OR the register number into the low-order bits. */
6198 if (inst.operands[i].immisalign)
6199 inst.operands[i].imm |= reg;
6200 else
6201 inst.operands[i].imm = reg;
6202 inst.operands[i].immisreg = 1;
6203
6204 if (skip_past_comma (&p) == SUCCESS)
6205 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
6206 return PARSE_OPERAND_FAIL;
6207 }
6208 else
6209 {
6210 char *q = p;
6211
6212 if (inst.operands[i].negative)
6213 {
6214 inst.operands[i].negative = 0;
6215 p--;
6216 }
6217 if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
6218 return PARSE_OPERAND_FAIL;
6219 /* If the offset is 0, find out if it's a +0 or -0. */
6220 if (inst.relocs[0].exp.X_op == O_constant
6221 && inst.relocs[0].exp.X_add_number == 0)
6222 {
6223 skip_whitespace (q);
6224 if (*q == '#')
6225 {
6226 q++;
6227 skip_whitespace (q);
6228 }
6229 if (*q == '-')
6230 inst.operands[i].negative = 1;
6231 }
6232 }
6233 }
6234 }
6235
6236 /* If at this point neither .preind nor .postind is set, we have a
6237 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
6238 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
6239 {
6240 inst.operands[i].preind = 1;
6241 inst.relocs[0].exp.X_op = O_constant;
6242 inst.relocs[0].exp.X_add_number = 0;
6243 }
6244 *str = p;
6245 return PARSE_OPERAND_SUCCESS;
6246 }
6247
6248 static int
6249 parse_address (char **str, int i)
6250 {
6251 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
6252 ? SUCCESS : FAIL;
6253 }
6254
6255 static parse_operand_result
6256 parse_address_group_reloc (char **str, int i, group_reloc_type type)
6257 {
6258 return parse_address_main (str, i, 1, type);
6259 }
6260
6261 /* Parse an operand for a MOVW or MOVT instruction. */
6262 static int
6263 parse_half (char **str)
6264 {
6265 char * p;
6266
6267 p = *str;
6268 skip_past_char (&p, '#');
6269 if (strncasecmp (p, ":lower16:", 9) == 0)
6270 inst.relocs[0].type = BFD_RELOC_ARM_MOVW;
6271 else if (strncasecmp (p, ":upper16:", 9) == 0)
6272 inst.relocs[0].type = BFD_RELOC_ARM_MOVT;
6273
6274 if (inst.relocs[0].type != BFD_RELOC_UNUSED)
6275 {
6276 p += 9;
6277 skip_whitespace (p);
6278 }
6279
6280 if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
6281 return FAIL;
6282
6283 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
6284 {
6285 if (inst.relocs[0].exp.X_op != O_constant)
6286 {
6287 inst.error = _("constant expression expected");
6288 return FAIL;
6289 }
6290 if (inst.relocs[0].exp.X_add_number < 0
6291 || inst.relocs[0].exp.X_add_number > 0xffff)
6292 {
6293 inst.error = _("immediate value out of range");
6294 return FAIL;
6295 }
6296 }
6297 *str = p;
6298 return SUCCESS;
6299 }
6300
6301 /* Miscellaneous. */
6302
6303 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
6304 or a bitmask suitable to be or-ed into the ARM msr instruction. */
6305 static int
6306 parse_psr (char **str, bool lhs)
6307 {
6308 char *p;
6309 unsigned long psr_field;
6310 const struct asm_psr *psr;
6311 char *start;
6312 bool is_apsr = false;
6313 bool m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
6314
6315 /* PR gas/12698: If the user has specified -march=all then m_profile will
6316 be TRUE, but we want to ignore it in this case as we are building for any
6317 CPU type, including non-m variants. */
6318 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
6319 m_profile = false;
6320
6321 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
6322 feature for ease of use and backwards compatibility. */
6323 p = *str;
6324 if (strncasecmp (p, "SPSR", 4) == 0)
6325 {
6326 if (m_profile)
6327 goto unsupported_psr;
6328
6329 psr_field = SPSR_BIT;
6330 }
6331 else if (strncasecmp (p, "CPSR", 4) == 0)
6332 {
6333 if (m_profile)
6334 goto unsupported_psr;
6335
6336 psr_field = 0;
6337 }
6338 else if (strncasecmp (p, "APSR", 4) == 0)
6339 {
6340 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
6341 and ARMv7-R architecture CPUs. */
6342 is_apsr = true;
6343 psr_field = 0;
6344 }
6345 else if (m_profile)
6346 {
6347 start = p;
6348 do
6349 p++;
6350 while (ISALNUM (*p) || *p == '_');
6351
6352 if (strncasecmp (start, "iapsr", 5) == 0
6353 || strncasecmp (start, "eapsr", 5) == 0
6354 || strncasecmp (start, "xpsr", 4) == 0
6355 || strncasecmp (start, "psr", 3) == 0)
6356 p = start + strcspn (start, "rR") + 1;
6357
6358 psr = (const struct asm_psr *) str_hash_find_n (arm_v7m_psr_hsh, start,
6359 p - start);
6360
6361 if (!psr)
6362 return FAIL;
6363
6364 /* If APSR is being written, a bitfield may be specified. Note that
6365 APSR itself is handled above. */
6366 if (psr->field <= 3)
6367 {
6368 psr_field = psr->field;
6369 is_apsr = true;
6370 goto check_suffix;
6371 }
6372
6373 *str = p;
6374 /* M-profile MSR instructions have the mask field set to "10", except
6375 *PSR variants which modify APSR, which may use a different mask (and
6376 have been handled already). Do that by setting the PSR_f field
6377 here. */
6378 return psr->field | (lhs ? PSR_f : 0);
6379 }
6380 else
6381 goto unsupported_psr;
6382
6383 p += 4;
6384 check_suffix:
6385 if (*p == '_')
6386 {
6387 /* A suffix follows. */
6388 p++;
6389 start = p;
6390
6391 do
6392 p++;
6393 while (ISALNUM (*p) || *p == '_');
6394
6395 if (is_apsr)
6396 {
6397 /* APSR uses a notation for bits, rather than fields. */
6398 unsigned int nzcvq_bits = 0;
6399 unsigned int g_bit = 0;
6400 char *bit;
6401
6402 for (bit = start; bit != p; bit++)
6403 {
6404 switch (TOLOWER (*bit))
6405 {
6406 case 'n':
6407 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
6408 break;
6409
6410 case 'z':
6411 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
6412 break;
6413
6414 case 'c':
6415 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
6416 break;
6417
6418 case 'v':
6419 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
6420 break;
6421
6422 case 'q':
6423 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
6424 break;
6425
6426 case 'g':
6427 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
6428 break;
6429
6430 default:
6431 inst.error = _("unexpected bit specified after APSR");
6432 return FAIL;
6433 }
6434 }
6435
6436 if (nzcvq_bits == 0x1f)
6437 psr_field |= PSR_f;
6438
6439 if (g_bit == 0x1)
6440 {
6441 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
6442 {
6443 inst.error = _("selected processor does not "
6444 "support DSP extension");
6445 return FAIL;
6446 }
6447
6448 psr_field |= PSR_s;
6449 }
6450
6451 if ((nzcvq_bits & 0x20) != 0
6452 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
6453 || (g_bit & 0x2) != 0)
6454 {
6455 inst.error = _("bad bitmask specified after APSR");
6456 return FAIL;
6457 }
6458 }
6459 else
6460 {
6461 psr = (const struct asm_psr *) str_hash_find_n (arm_psr_hsh, start,
6462 p - start);
6463 if (!psr)
6464 goto error;
6465
6466 psr_field |= psr->field;
6467 }
6468 }
6469 else
6470 {
6471 if (ISALNUM (*p))
6472 goto error; /* Garbage after "[CS]PSR". */
6473
6474 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6475 is deprecated, but allow it anyway. */
6476 if (is_apsr && lhs)
6477 {
6478 psr_field |= PSR_f;
6479 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6480 "deprecated"));
6481 }
6482 else if (!m_profile)
6483 /* These bits are never right for M-profile devices: don't set them
6484 (only code paths which read/write APSR reach here). */
6485 psr_field |= (PSR_c | PSR_f);
6486 }
6487 *str = p;
6488 return psr_field;
6489
6490 unsupported_psr:
6491 inst.error = _("selected processor does not support requested special "
6492 "purpose register");
6493 return FAIL;
6494
6495 error:
6496 inst.error = _("flag for {c}psr instruction expected");
6497 return FAIL;
6498 }
6499
6500 static int
6501 parse_sys_vldr_vstr (char **str)
6502 {
6503 unsigned i;
6504 int val = FAIL;
6505 struct {
6506 const char *name;
6507 int regl;
6508 int regh;
6509 } sysregs[] = {
6510 {"FPSCR", 0x1, 0x0},
6511 {"FPSCR_nzcvqc", 0x2, 0x0},
6512 {"VPR", 0x4, 0x1},
6513 {"P0", 0x5, 0x1},
6514 {"FPCXTNS", 0x6, 0x1},
6515 {"FPCXTS", 0x7, 0x1}
6516 };
6517 char *op_end = strchr (*str, ',');
6518 size_t op_strlen = op_end - *str;
6519
6520 for (i = 0; i < sizeof (sysregs) / sizeof (sysregs[0]); i++)
6521 {
6522 if (!strncmp (*str, sysregs[i].name, op_strlen))
6523 {
6524 val = sysregs[i].regl | (sysregs[i].regh << 3);
6525 *str = op_end;
6526 break;
6527 }
6528 }
6529
6530 return val;
6531 }
6532
6533 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6534 value suitable for splatting into the AIF field of the instruction. */
6535
6536 static int
6537 parse_cps_flags (char **str)
6538 {
6539 int val = 0;
6540 int saw_a_flag = 0;
6541 char *s = *str;
6542
6543 for (;;)
6544 switch (*s++)
6545 {
6546 case '\0': case ',':
6547 goto done;
6548
6549 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6550 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6551 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6552
6553 default:
6554 inst.error = _("unrecognized CPS flag");
6555 return FAIL;
6556 }
6557
6558 done:
6559 if (saw_a_flag == 0)
6560 {
6561 inst.error = _("missing CPS flags");
6562 return FAIL;
6563 }
6564
6565 *str = s - 1;
6566 return val;
6567 }
6568
6569 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6570 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6571
6572 static int
6573 parse_endian_specifier (char **str)
6574 {
6575 int little_endian;
6576 char *s = *str;
6577
6578 if (strncasecmp (s, "BE", 2))
6579 little_endian = 0;
6580 else if (strncasecmp (s, "LE", 2))
6581 little_endian = 1;
6582 else
6583 {
6584 inst.error = _("valid endian specifiers are be or le");
6585 return FAIL;
6586 }
6587
6588 if (ISALNUM (s[2]) || s[2] == '_')
6589 {
6590 inst.error = _("valid endian specifiers are be or le");
6591 return FAIL;
6592 }
6593
6594 *str = s + 2;
6595 return little_endian;
6596 }
6597
6598 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6599 value suitable for poking into the rotate field of an sxt or sxta
6600 instruction, or FAIL on error. */
6601
6602 static int
6603 parse_ror (char **str)
6604 {
6605 int rot;
6606 char *s = *str;
6607
6608 if (strncasecmp (s, "ROR", 3) == 0)
6609 s += 3;
6610 else
6611 {
6612 inst.error = _("missing rotation field after comma");
6613 return FAIL;
6614 }
6615
6616 if (parse_immediate (&s, &rot, 0, 24, false) == FAIL)
6617 return FAIL;
6618
6619 switch (rot)
6620 {
6621 case 0: *str = s; return 0x0;
6622 case 8: *str = s; return 0x1;
6623 case 16: *str = s; return 0x2;
6624 case 24: *str = s; return 0x3;
6625
6626 default:
6627 inst.error = _("rotation can only be 0, 8, 16, or 24");
6628 return FAIL;
6629 }
6630 }
6631
6632 /* Parse a conditional code (from conds[] below). The value returned is in the
6633 range 0 .. 14, or FAIL. */
6634 static int
6635 parse_cond (char **str)
6636 {
6637 char *q;
6638 const struct asm_cond *c;
6639 int n;
6640 /* Condition codes are always 2 characters, so matching up to
6641 3 characters is sufficient. */
6642 char cond[3];
6643
6644 q = *str;
6645 n = 0;
6646 while (ISALPHA (*q) && n < 3)
6647 {
6648 cond[n] = TOLOWER (*q);
6649 q++;
6650 n++;
6651 }
6652
6653 c = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, cond, n);
6654 if (!c)
6655 {
6656 inst.error = _("condition required");
6657 return FAIL;
6658 }
6659
6660 *str = q;
6661 return c->value;
6662 }
6663
6664 /* Parse an option for a barrier instruction. Returns the encoding for the
6665 option, or FAIL. */
6666 static int
6667 parse_barrier (char **str)
6668 {
6669 char *p, *q;
6670 const struct asm_barrier_opt *o;
6671
6672 p = q = *str;
6673 while (ISALPHA (*q))
6674 q++;
6675
6676 o = (const struct asm_barrier_opt *) str_hash_find_n (arm_barrier_opt_hsh, p,
6677 q - p);
6678 if (!o)
6679 return FAIL;
6680
6681 if (!mark_feature_used (&o->arch))
6682 return FAIL;
6683
6684 *str = q;
6685 return o->value;
6686 }
6687
6688 /* Parse the operands of a table branch instruction. Similar to a memory
6689 operand. */
6690 static int
6691 parse_tb (char **str)
6692 {
6693 char * p = *str;
6694 int reg;
6695
6696 if (skip_past_char (&p, '[') == FAIL)
6697 {
6698 inst.error = _("'[' expected");
6699 return FAIL;
6700 }
6701
6702 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6703 {
6704 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6705 return FAIL;
6706 }
6707 inst.operands[0].reg = reg;
6708
6709 if (skip_past_comma (&p) == FAIL)
6710 {
6711 inst.error = _("',' expected");
6712 return FAIL;
6713 }
6714
6715 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6716 {
6717 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6718 return FAIL;
6719 }
6720 inst.operands[0].imm = reg;
6721
6722 if (skip_past_comma (&p) == SUCCESS)
6723 {
6724 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6725 return FAIL;
6726 if (inst.relocs[0].exp.X_add_number != 1)
6727 {
6728 inst.error = _("invalid shift");
6729 return FAIL;
6730 }
6731 inst.operands[0].shifted = 1;
6732 }
6733
6734 if (skip_past_char (&p, ']') == FAIL)
6735 {
6736 inst.error = _("']' expected");
6737 return FAIL;
6738 }
6739 *str = p;
6740 return SUCCESS;
6741 }
6742
6743 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6744 information on the types the operands can take and how they are encoded.
6745 Up to four operands may be read; this function handles setting the
6746 ".present" field for each read operand itself.
6747 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6748 else returns FAIL. */
6749
6750 static int
6751 parse_neon_mov (char **str, int *which_operand)
6752 {
6753 int i = *which_operand, val;
6754 enum arm_reg_type rtype;
6755 char *ptr = *str;
6756 struct neon_type_el optype;
6757
6758 if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6759 {
6760 /* Cases 17 or 19. */
6761 inst.operands[i].reg = val;
6762 inst.operands[i].isvec = 1;
6763 inst.operands[i].isscalar = 2;
6764 inst.operands[i].vectype = optype;
6765 inst.operands[i++].present = 1;
6766
6767 if (skip_past_comma (&ptr) == FAIL)
6768 goto wanted_comma;
6769
6770 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6771 {
6772 /* Case 17: VMOV<c>.<dt> <Qd[idx]>, <Rt> */
6773 inst.operands[i].reg = val;
6774 inst.operands[i].isreg = 1;
6775 inst.operands[i].present = 1;
6776 }
6777 else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6778 {
6779 /* Case 19: VMOV<c> <Qd[idx]>, <Qd[idx2]>, <Rt>, <Rt2> */
6780 inst.operands[i].reg = val;
6781 inst.operands[i].isvec = 1;
6782 inst.operands[i].isscalar = 2;
6783 inst.operands[i].vectype = optype;
6784 inst.operands[i++].present = 1;
6785
6786 if (skip_past_comma (&ptr) == FAIL)
6787 goto wanted_comma;
6788
6789 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6790 goto wanted_arm;
6791
6792 inst.operands[i].reg = val;
6793 inst.operands[i].isreg = 1;
6794 inst.operands[i++].present = 1;
6795
6796 if (skip_past_comma (&ptr) == FAIL)
6797 goto wanted_comma;
6798
6799 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6800 goto wanted_arm;
6801
6802 inst.operands[i].reg = val;
6803 inst.operands[i].isreg = 1;
6804 inst.operands[i].present = 1;
6805 }
6806 else
6807 {
6808 first_error (_("expected ARM or MVE vector register"));
6809 return FAIL;
6810 }
6811 }
6812 else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_VFD)) != FAIL)
6813 {
6814 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6815 inst.operands[i].reg = val;
6816 inst.operands[i].isscalar = 1;
6817 inst.operands[i].vectype = optype;
6818 inst.operands[i++].present = 1;
6819
6820 if (skip_past_comma (&ptr) == FAIL)
6821 goto wanted_comma;
6822
6823 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6824 goto wanted_arm;
6825
6826 inst.operands[i].reg = val;
6827 inst.operands[i].isreg = 1;
6828 inst.operands[i].present = 1;
6829 }
6830 else if (((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6831 != FAIL)
6832 || ((val = arm_typed_reg_parse (&ptr, REG_TYPE_MQ, &rtype, &optype))
6833 != FAIL))
6834 {
6835 /* Cases 0, 1, 2, 3, 5 (D only). */
6836 if (skip_past_comma (&ptr) == FAIL)
6837 goto wanted_comma;
6838
6839 inst.operands[i].reg = val;
6840 inst.operands[i].isreg = 1;
6841 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6842 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6843 inst.operands[i].isvec = 1;
6844 inst.operands[i].vectype = optype;
6845 inst.operands[i++].present = 1;
6846
6847 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6848 {
6849 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6850 Case 13: VMOV <Sd>, <Rm> */
6851 inst.operands[i].reg = val;
6852 inst.operands[i].isreg = 1;
6853 inst.operands[i].present = 1;
6854
6855 if (rtype == REG_TYPE_NQ)
6856 {
6857 first_error (_("can't use Neon quad register here"));
6858 return FAIL;
6859 }
6860 else if (rtype != REG_TYPE_VFS)
6861 {
6862 i++;
6863 if (skip_past_comma (&ptr) == FAIL)
6864 goto wanted_comma;
6865 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6866 goto wanted_arm;
6867 inst.operands[i].reg = val;
6868 inst.operands[i].isreg = 1;
6869 inst.operands[i].present = 1;
6870 }
6871 }
6872 else if (((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6873 &optype)) != FAIL)
6874 || ((val = arm_typed_reg_parse (&ptr, REG_TYPE_MQ, &rtype,
6875 &optype)) != FAIL))
6876 {
6877 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6878 Case 1: VMOV<c><q> <Dd>, <Dm>
6879 Case 8: VMOV.F32 <Sd>, <Sm>
6880 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6881
6882 inst.operands[i].reg = val;
6883 inst.operands[i].isreg = 1;
6884 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6885 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6886 inst.operands[i].isvec = 1;
6887 inst.operands[i].vectype = optype;
6888 inst.operands[i].present = 1;
6889
6890 if (skip_past_comma (&ptr) == SUCCESS)
6891 {
6892 /* Case 15. */
6893 i++;
6894
6895 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6896 goto wanted_arm;
6897
6898 inst.operands[i].reg = val;
6899 inst.operands[i].isreg = 1;
6900 inst.operands[i++].present = 1;
6901
6902 if (skip_past_comma (&ptr) == FAIL)
6903 goto wanted_comma;
6904
6905 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6906 goto wanted_arm;
6907
6908 inst.operands[i].reg = val;
6909 inst.operands[i].isreg = 1;
6910 inst.operands[i].present = 1;
6911 }
6912 }
6913 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6914 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6915 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6916 Case 10: VMOV.F32 <Sd>, #<imm>
6917 Case 11: VMOV.F64 <Dd>, #<imm> */
6918 inst.operands[i].immisfloat = 1;
6919 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/false)
6920 == SUCCESS)
6921 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6922 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6923 ;
6924 else
6925 {
6926 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6927 return FAIL;
6928 }
6929 }
6930 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6931 {
6932 /* Cases 6, 7, 16, 18. */
6933 inst.operands[i].reg = val;
6934 inst.operands[i].isreg = 1;
6935 inst.operands[i++].present = 1;
6936
6937 if (skip_past_comma (&ptr) == FAIL)
6938 goto wanted_comma;
6939
6940 if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6941 {
6942 /* Case 18: VMOV<c>.<dt> <Rt>, <Qn[idx]> */
6943 inst.operands[i].reg = val;
6944 inst.operands[i].isscalar = 2;
6945 inst.operands[i].present = 1;
6946 inst.operands[i].vectype = optype;
6947 }
6948 else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_VFD)) != FAIL)
6949 {
6950 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6951 inst.operands[i].reg = val;
6952 inst.operands[i].isscalar = 1;
6953 inst.operands[i].present = 1;
6954 inst.operands[i].vectype = optype;
6955 }
6956 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6957 {
6958 inst.operands[i].reg = val;
6959 inst.operands[i].isreg = 1;
6960 inst.operands[i++].present = 1;
6961
6962 if (skip_past_comma (&ptr) == FAIL)
6963 goto wanted_comma;
6964
6965 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6966 != FAIL)
6967 {
6968 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6969
6970 inst.operands[i].reg = val;
6971 inst.operands[i].isreg = 1;
6972 inst.operands[i].isvec = 1;
6973 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6974 inst.operands[i].vectype = optype;
6975 inst.operands[i].present = 1;
6976
6977 if (rtype == REG_TYPE_VFS)
6978 {
6979 /* Case 14. */
6980 i++;
6981 if (skip_past_comma (&ptr) == FAIL)
6982 goto wanted_comma;
6983 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6984 &optype)) == FAIL)
6985 {
6986 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6987 return FAIL;
6988 }
6989 inst.operands[i].reg = val;
6990 inst.operands[i].isreg = 1;
6991 inst.operands[i].isvec = 1;
6992 inst.operands[i].issingle = 1;
6993 inst.operands[i].vectype = optype;
6994 inst.operands[i].present = 1;
6995 }
6996 }
6997 else
6998 {
6999 if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ))
7000 != FAIL)
7001 {
7002 /* Case 16: VMOV<c> <Rt>, <Rt2>, <Qd[idx]>, <Qd[idx2]> */
7003 inst.operands[i].reg = val;
7004 inst.operands[i].isvec = 1;
7005 inst.operands[i].isscalar = 2;
7006 inst.operands[i].vectype = optype;
7007 inst.operands[i++].present = 1;
7008
7009 if (skip_past_comma (&ptr) == FAIL)
7010 goto wanted_comma;
7011
7012 if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ))
7013 == FAIL)
7014 {
7015 first_error (_(reg_expected_msgs[REG_TYPE_MQ]));
7016 return FAIL;
7017 }
7018 inst.operands[i].reg = val;
7019 inst.operands[i].isvec = 1;
7020 inst.operands[i].isscalar = 2;
7021 inst.operands[i].vectype = optype;
7022 inst.operands[i].present = 1;
7023 }
7024 else
7025 {
7026 first_error (_("VFP single, double or MVE vector register"
7027 " expected"));
7028 return FAIL;
7029 }
7030 }
7031 }
7032 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
7033 != FAIL)
7034 {
7035 /* Case 13. */
7036 inst.operands[i].reg = val;
7037 inst.operands[i].isreg = 1;
7038 inst.operands[i].isvec = 1;
7039 inst.operands[i].issingle = 1;
7040 inst.operands[i].vectype = optype;
7041 inst.operands[i].present = 1;
7042 }
7043 }
7044 else
7045 {
7046 first_error (_("parse error"));
7047 return FAIL;
7048 }
7049
7050 /* Successfully parsed the operands. Update args. */
7051 *which_operand = i;
7052 *str = ptr;
7053 return SUCCESS;
7054
7055 wanted_comma:
7056 first_error (_("expected comma"));
7057 return FAIL;
7058
7059 wanted_arm:
7060 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
7061 return FAIL;
7062 }
7063
7064 /* Use this macro when the operand constraints are different
7065 for ARM and THUMB (e.g. ldrd). */
7066 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
7067 ((arm_operand) | ((thumb_operand) << 16))
7068
7069 /* Matcher codes for parse_operands. */
7070 enum operand_parse_code
7071 {
7072 OP_stop, /* end of line */
7073
7074 OP_RR, /* ARM register */
7075 OP_RRnpc, /* ARM register, not r15 */
7076 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
7077 OP_RRnpcb, /* ARM register, not r15, in square brackets */
7078 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
7079 optional trailing ! */
7080 OP_RRw, /* ARM register, not r15, optional trailing ! */
7081 OP_RCP, /* Coprocessor number */
7082 OP_RCN, /* Coprocessor register */
7083 OP_RF, /* FPA register */
7084 OP_RVS, /* VFP single precision register */
7085 OP_RVD, /* VFP double precision register (0..15) */
7086 OP_RND, /* Neon double precision register (0..31) */
7087 OP_RNDMQ, /* Neon double precision (0..31) or MVE vector register. */
7088 OP_RNDMQR, /* Neon double precision (0..31), MVE vector or ARM register.
7089 */
7090 OP_RNSDMQR, /* Neon single or double precision, MVE vector or ARM register.
7091 */
7092 OP_RNQ, /* Neon quad precision register */
7093 OP_RNQMQ, /* Neon quad or MVE vector register. */
7094 OP_RVSD, /* VFP single or double precision register */
7095 OP_RVSD_COND, /* VFP single, double precision register or condition code. */
7096 OP_RVSDMQ, /* VFP single, double precision or MVE vector register. */
7097 OP_RNSD, /* Neon single or double precision register */
7098 OP_RNDQ, /* Neon double or quad precision register */
7099 OP_RNDQMQ, /* Neon double, quad or MVE vector register. */
7100 OP_RNDQMQR, /* Neon double, quad, MVE vector or ARM register. */
7101 OP_RNSDQ, /* Neon single, double or quad precision register */
7102 OP_RNSC, /* Neon scalar D[X] */
7103 OP_RVC, /* VFP control register */
7104 OP_RMF, /* Maverick F register */
7105 OP_RMD, /* Maverick D register */
7106 OP_RMFX, /* Maverick FX register */
7107 OP_RMDX, /* Maverick DX register */
7108 OP_RMAX, /* Maverick AX register */
7109 OP_RMDS, /* Maverick DSPSC register */
7110 OP_RIWR, /* iWMMXt wR register */
7111 OP_RIWC, /* iWMMXt wC register */
7112 OP_RIWG, /* iWMMXt wCG register */
7113 OP_RXA, /* XScale accumulator register */
7114
7115 OP_RNSDMQ, /* Neon single, double or MVE vector register */
7116 OP_RNSDQMQ, /* Neon single, double or quad register or MVE vector register
7117 */
7118 OP_RNSDQMQR, /* Neon single, double or quad register, MVE vector register or
7119 GPR (no SP/SP) */
7120 OP_RMQ, /* MVE vector register. */
7121 OP_RMQRZ, /* MVE vector or ARM register including ZR. */
7122 OP_RMQRR, /* MVE vector or ARM register. */
7123
7124 /* New operands for Armv8.1-M Mainline. */
7125 OP_LR, /* ARM LR register */
7126 OP_SP, /* ARM SP register */
7127 OP_R12,
7128 OP_RRe, /* ARM register, only even numbered. */
7129 OP_RRo, /* ARM register, only odd numbered, not r13 or r15. */
7130 OP_RRnpcsp_I32, /* ARM register (no BadReg) or literal 1 .. 32 */
7131 OP_RR_ZR, /* ARM register or ZR but no PC */
7132
7133 OP_REGLST, /* ARM register list */
7134 OP_CLRMLST, /* CLRM register list */
7135 OP_VRSLST, /* VFP single-precision register list */
7136 OP_VRDLST, /* VFP double-precision register list */
7137 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
7138 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
7139 OP_NSTRLST, /* Neon element/structure list */
7140 OP_VRSDVLST, /* VFP single or double-precision register list and VPR */
7141 OP_MSTRLST2, /* MVE vector list with two elements. */
7142 OP_MSTRLST4, /* MVE vector list with four elements. */
7143
7144 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
7145 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
7146 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
7147 OP_RSVDMQ_FI0, /* VFP S, D, MVE vector register or floating point immediate
7148 zero. */
7149 OP_RR_RNSC, /* ARM reg or Neon scalar. */
7150 OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */
7151 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
7152 OP_RNSDQ_RNSC_MQ, /* Vector S, D or Q reg, Neon scalar or MVE vector register.
7153 */
7154 OP_RNSDQ_RNSC_MQ_RR, /* Vector S, D or Q reg, or MVE vector reg , or Neon
7155 scalar, or ARM register. */
7156 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
7157 OP_RNDQ_RNSC_RR, /* Neon D or Q reg, Neon scalar, or ARM register. */
7158 OP_RNDQMQ_RNSC_RR, /* Neon D or Q reg, Neon scalar, MVE vector or ARM
7159 register. */
7160 OP_RNDQMQ_RNSC, /* Neon D, Q or MVE vector reg, or Neon scalar. */
7161 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
7162 OP_VMOV, /* Neon VMOV operands. */
7163 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
7164 /* Neon D, Q or MVE vector register, or big immediate for logic and VMVN. */
7165 OP_RNDQMQ_Ibig,
7166 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
7167 OP_RNDQMQ_I63b_RR, /* Neon D or Q reg, immediate for shift, MVE vector or
7168 ARM register. */
7169 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
7170 OP_VLDR, /* VLDR operand. */
7171
7172 OP_I0, /* immediate zero */
7173 OP_I7, /* immediate value 0 .. 7 */
7174 OP_I15, /* 0 .. 15 */
7175 OP_I16, /* 1 .. 16 */
7176 OP_I16z, /* 0 .. 16 */
7177 OP_I31, /* 0 .. 31 */
7178 OP_I31w, /* 0 .. 31, optional trailing ! */
7179 OP_I32, /* 1 .. 32 */
7180 OP_I32z, /* 0 .. 32 */
7181 OP_I48_I64, /* 48 or 64 */
7182 OP_I63, /* 0 .. 63 */
7183 OP_I63s, /* -64 .. 63 */
7184 OP_I64, /* 1 .. 64 */
7185 OP_I64z, /* 0 .. 64 */
7186 OP_I127, /* 0 .. 127 */
7187 OP_I255, /* 0 .. 255 */
7188 OP_I511, /* 0 .. 511 */
7189 OP_I4095, /* 0 .. 4095 */
7190 OP_I8191, /* 0 .. 8191 */
7191 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
7192 OP_I7b, /* 0 .. 7 */
7193 OP_I15b, /* 0 .. 15 */
7194 OP_I31b, /* 0 .. 31 */
7195
7196 OP_SH, /* shifter operand */
7197 OP_SHG, /* shifter operand with possible group relocation */
7198 OP_ADDR, /* Memory address expression (any mode) */
7199 OP_ADDRMVE, /* Memory address expression for MVE's VSTR/VLDR. */
7200 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
7201 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
7202 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
7203 OP_EXP, /* arbitrary expression */
7204 OP_EXPi, /* same, with optional immediate prefix */
7205 OP_EXPr, /* same, with optional relocation suffix */
7206 OP_EXPs, /* same, with optional non-first operand relocation suffix */
7207 OP_HALF, /* 0 .. 65535 or low/high reloc. */
7208 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
7209 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
7210
7211 OP_CPSF, /* CPS flags */
7212 OP_ENDI, /* Endianness specifier */
7213 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
7214 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
7215 OP_COND, /* conditional code */
7216 OP_TB, /* Table branch. */
7217
7218 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
7219
7220 OP_RRnpc_I0, /* ARM register or literal 0 */
7221 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
7222 OP_RR_EXi, /* ARM register or expression with imm prefix */
7223 OP_RF_IF, /* FPA register or immediate */
7224 OP_RIWR_RIWC, /* iWMMXt R or C reg */
7225 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
7226
7227 /* Optional operands. */
7228 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
7229 OP_oI31b, /* 0 .. 31 */
7230 OP_oI32b, /* 1 .. 32 */
7231 OP_oI32z, /* 0 .. 32 */
7232 OP_oIffffb, /* 0 .. 65535 */
7233 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
7234
7235 OP_oRR, /* ARM register */
7236 OP_oLR, /* ARM LR register */
7237 OP_oRRnpc, /* ARM register, not the PC */
7238 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
7239 OP_oRRw, /* ARM register, not r15, optional trailing ! */
7240 OP_oRND, /* Optional Neon double precision register */
7241 OP_oRNQ, /* Optional Neon quad precision register */
7242 OP_oRNDQMQ, /* Optional Neon double, quad or MVE vector register. */
7243 OP_oRNDQ, /* Optional Neon double or quad precision register */
7244 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
7245 OP_oRNSDQMQ, /* Optional single, double or quad register or MVE vector
7246 register. */
7247 OP_oRNSDMQ, /* Optional single, double register or MVE vector
7248 register. */
7249 OP_oSHll, /* LSL immediate */
7250 OP_oSHar, /* ASR immediate */
7251 OP_oSHllar, /* LSL or ASR immediate */
7252 OP_oROR, /* ROR 0/8/16/24 */
7253 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
7254
7255 OP_oRMQRZ, /* optional MVE vector or ARM register including ZR. */
7256
7257 /* Some pre-defined mixed (ARM/THUMB) operands. */
7258 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
7259 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
7260 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
7261
7262 OP_FIRST_OPTIONAL = OP_oI7b
7263 };
7264
7265 /* Generic instruction operand parser. This does no encoding and no
7266 semantic validation; it merely squirrels values away in the inst
7267 structure. Returns SUCCESS or FAIL depending on whether the
7268 specified grammar matched. */
7269 static int
7270 parse_operands (char *str, const unsigned int *pattern, bool thumb)
7271 {
7272 unsigned const int *upat = pattern;
7273 char *backtrack_pos = 0;
7274 const char *backtrack_error = 0;
7275 int i, val = 0, backtrack_index = 0;
7276 enum arm_reg_type rtype;
7277 parse_operand_result result;
7278 unsigned int op_parse_code;
7279 bool partial_match;
7280
7281 #define po_char_or_fail(chr) \
7282 do \
7283 { \
7284 if (skip_past_char (&str, chr) == FAIL) \
7285 goto bad_args; \
7286 } \
7287 while (0)
7288
7289 #define po_reg_or_fail(regtype) \
7290 do \
7291 { \
7292 val = arm_typed_reg_parse (& str, regtype, & rtype, \
7293 & inst.operands[i].vectype); \
7294 if (val == FAIL) \
7295 { \
7296 first_error (_(reg_expected_msgs[regtype])); \
7297 goto failure; \
7298 } \
7299 inst.operands[i].reg = val; \
7300 inst.operands[i].isreg = 1; \
7301 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
7302 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
7303 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
7304 || rtype == REG_TYPE_VFD \
7305 || rtype == REG_TYPE_NQ); \
7306 inst.operands[i].iszr = (rtype == REG_TYPE_ZR); \
7307 } \
7308 while (0)
7309
7310 #define po_reg_or_goto(regtype, label) \
7311 do \
7312 { \
7313 val = arm_typed_reg_parse (& str, regtype, & rtype, \
7314 & inst.operands[i].vectype); \
7315 if (val == FAIL) \
7316 goto label; \
7317 \
7318 inst.operands[i].reg = val; \
7319 inst.operands[i].isreg = 1; \
7320 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
7321 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
7322 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
7323 || rtype == REG_TYPE_VFD \
7324 || rtype == REG_TYPE_NQ); \
7325 inst.operands[i].iszr = (rtype == REG_TYPE_ZR); \
7326 } \
7327 while (0)
7328
7329 #define po_imm_or_fail(min, max, popt) \
7330 do \
7331 { \
7332 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
7333 goto failure; \
7334 inst.operands[i].imm = val; \
7335 } \
7336 while (0)
7337
7338 #define po_imm1_or_imm2_or_fail(imm1, imm2, popt) \
7339 do \
7340 { \
7341 expressionS exp; \
7342 my_get_expression (&exp, &str, popt); \
7343 if (exp.X_op != O_constant) \
7344 { \
7345 inst.error = _("constant expression required"); \
7346 goto failure; \
7347 } \
7348 if (exp.X_add_number != imm1 && exp.X_add_number != imm2) \
7349 { \
7350 inst.error = _("immediate value 48 or 64 expected"); \
7351 goto failure; \
7352 } \
7353 inst.operands[i].imm = exp.X_add_number; \
7354 } \
7355 while (0)
7356
7357 #define po_scalar_or_goto(elsz, label, reg_type) \
7358 do \
7359 { \
7360 val = parse_scalar (& str, elsz, & inst.operands[i].vectype, \
7361 reg_type); \
7362 if (val == FAIL) \
7363 goto label; \
7364 inst.operands[i].reg = val; \
7365 inst.operands[i].isscalar = 1; \
7366 } \
7367 while (0)
7368
7369 #define po_misc_or_fail(expr) \
7370 do \
7371 { \
7372 if (expr) \
7373 goto failure; \
7374 } \
7375 while (0)
7376
7377 #define po_misc_or_fail_no_backtrack(expr) \
7378 do \
7379 { \
7380 result = expr; \
7381 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
7382 backtrack_pos = 0; \
7383 if (result != PARSE_OPERAND_SUCCESS) \
7384 goto failure; \
7385 } \
7386 while (0)
7387
7388 #define po_barrier_or_imm(str) \
7389 do \
7390 { \
7391 val = parse_barrier (&str); \
7392 if (val == FAIL && ! ISALPHA (*str)) \
7393 goto immediate; \
7394 if (val == FAIL \
7395 /* ISB can only take SY as an option. */ \
7396 || ((inst.instruction & 0xf0) == 0x60 \
7397 && val != 0xf)) \
7398 { \
7399 inst.error = _("invalid barrier type"); \
7400 backtrack_pos = 0; \
7401 goto failure; \
7402 } \
7403 } \
7404 while (0)
7405
7406 skip_whitespace (str);
7407
7408 for (i = 0; upat[i] != OP_stop; i++)
7409 {
7410 op_parse_code = upat[i];
7411 if (op_parse_code >= 1<<16)
7412 op_parse_code = thumb ? (op_parse_code >> 16)
7413 : (op_parse_code & ((1<<16)-1));
7414
7415 if (op_parse_code >= OP_FIRST_OPTIONAL)
7416 {
7417 /* Remember where we are in case we need to backtrack. */
7418 backtrack_pos = str;
7419 backtrack_error = inst.error;
7420 backtrack_index = i;
7421 }
7422
7423 if (i > 0 && (i > 1 || inst.operands[0].present))
7424 po_char_or_fail (',');
7425
7426 switch (op_parse_code)
7427 {
7428 /* Registers */
7429 case OP_oRRnpc:
7430 case OP_oRRnpcsp:
7431 case OP_RRnpc:
7432 case OP_RRnpcsp:
7433 case OP_oRR:
7434 case OP_RRe:
7435 case OP_RRo:
7436 case OP_LR:
7437 case OP_oLR:
7438 case OP_SP:
7439 case OP_R12:
7440 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
7441 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
7442 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
7443 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
7444 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
7445 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
7446 case OP_oRND:
7447 case OP_RNSDMQR:
7448 po_reg_or_goto (REG_TYPE_VFS, try_rndmqr);
7449 break;
7450 try_rndmqr:
7451 case OP_RNDMQR:
7452 po_reg_or_goto (REG_TYPE_RN, try_rndmq);
7453 break;
7454 try_rndmq:
7455 case OP_RNDMQ:
7456 po_reg_or_goto (REG_TYPE_MQ, try_rnd);
7457 break;
7458 try_rnd:
7459 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
7460 case OP_RVC:
7461 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
7462 break;
7463 /* Also accept generic coprocessor regs for unknown registers. */
7464 coproc_reg:
7465 po_reg_or_goto (REG_TYPE_CN, vpr_po);
7466 break;
7467 /* Also accept P0 or p0 for VPR.P0. Since P0 is already an
7468 existing register with a value of 0, this seems like the
7469 best way to parse P0. */
7470 vpr_po:
7471 if (strncasecmp (str, "P0", 2) == 0)
7472 {
7473 str += 2;
7474 inst.operands[i].isreg = 1;
7475 inst.operands[i].reg = 13;
7476 }
7477 else
7478 goto failure;
7479 break;
7480 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
7481 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
7482 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
7483 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
7484 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
7485 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
7486 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
7487 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
7488 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
7489 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
7490 case OP_oRNQ:
7491 case OP_RNQMQ:
7492 po_reg_or_goto (REG_TYPE_MQ, try_nq);
7493 break;
7494 try_nq:
7495 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
7496 case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break;
7497 case OP_RNDQMQR:
7498 po_reg_or_goto (REG_TYPE_RN, try_rndqmq);
7499 break;
7500 try_rndqmq:
7501 case OP_oRNDQMQ:
7502 case OP_RNDQMQ:
7503 po_reg_or_goto (REG_TYPE_MQ, try_rndq);
7504 break;
7505 try_rndq:
7506 case OP_oRNDQ:
7507 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
7508 case OP_RVSDMQ:
7509 po_reg_or_goto (REG_TYPE_MQ, try_rvsd);
7510 break;
7511 try_rvsd:
7512 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
7513 case OP_RVSD_COND:
7514 po_reg_or_goto (REG_TYPE_VFSD, try_cond);
7515 break;
7516 case OP_oRNSDMQ:
7517 case OP_RNSDMQ:
7518 po_reg_or_goto (REG_TYPE_NSD, try_mq2);
7519 break;
7520 try_mq2:
7521 po_reg_or_fail (REG_TYPE_MQ);
7522 break;
7523 case OP_oRNSDQ:
7524 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
7525 case OP_RNSDQMQR:
7526 po_reg_or_goto (REG_TYPE_RN, try_mq);
7527 break;
7528 try_mq:
7529 case OP_oRNSDQMQ:
7530 case OP_RNSDQMQ:
7531 po_reg_or_goto (REG_TYPE_MQ, try_nsdq2);
7532 break;
7533 try_nsdq2:
7534 po_reg_or_fail (REG_TYPE_NSDQ);
7535 inst.error = 0;
7536 break;
7537 case OP_RMQRR:
7538 po_reg_or_goto (REG_TYPE_RN, try_rmq);
7539 break;
7540 try_rmq:
7541 case OP_RMQ:
7542 po_reg_or_fail (REG_TYPE_MQ);
7543 break;
7544 /* Neon scalar. Using an element size of 8 means that some invalid
7545 scalars are accepted here, so deal with those in later code. */
7546 case OP_RNSC: po_scalar_or_goto (8, failure, REG_TYPE_VFD); break;
7547
7548 case OP_RNDQ_I0:
7549 {
7550 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
7551 break;
7552 try_imm0:
7553 po_imm_or_fail (0, 0, true);
7554 }
7555 break;
7556
7557 case OP_RVSD_I0:
7558 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
7559 break;
7560
7561 case OP_RSVDMQ_FI0:
7562 po_reg_or_goto (REG_TYPE_MQ, try_rsvd_fi0);
7563 break;
7564 try_rsvd_fi0:
7565 case OP_RSVD_FI0:
7566 {
7567 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
7568 break;
7569 try_ifimm0:
7570 if (parse_ifimm_zero (&str))
7571 inst.operands[i].imm = 0;
7572 else
7573 {
7574 inst.error
7575 = _("only floating point zero is allowed as immediate value");
7576 goto failure;
7577 }
7578 }
7579 break;
7580
7581 case OP_RR_RNSC:
7582 {
7583 po_scalar_or_goto (8, try_rr, REG_TYPE_VFD);
7584 break;
7585 try_rr:
7586 po_reg_or_fail (REG_TYPE_RN);
7587 }
7588 break;
7589
7590 case OP_RNSDQ_RNSC_MQ_RR:
7591 po_reg_or_goto (REG_TYPE_RN, try_rnsdq_rnsc_mq);
7592 break;
7593 try_rnsdq_rnsc_mq:
7594 case OP_RNSDQ_RNSC_MQ:
7595 po_reg_or_goto (REG_TYPE_MQ, try_rnsdq_rnsc);
7596 break;
7597 try_rnsdq_rnsc:
7598 case OP_RNSDQ_RNSC:
7599 {
7600 po_scalar_or_goto (8, try_nsdq, REG_TYPE_VFD);
7601 inst.error = 0;
7602 break;
7603 try_nsdq:
7604 po_reg_or_fail (REG_TYPE_NSDQ);
7605 inst.error = 0;
7606 }
7607 break;
7608
7609 case OP_RNSD_RNSC:
7610 {
7611 po_scalar_or_goto (8, try_s_scalar, REG_TYPE_VFD);
7612 break;
7613 try_s_scalar:
7614 po_scalar_or_goto (4, try_nsd, REG_TYPE_VFS);
7615 break;
7616 try_nsd:
7617 po_reg_or_fail (REG_TYPE_NSD);
7618 }
7619 break;
7620
7621 case OP_RNDQMQ_RNSC_RR:
7622 po_reg_or_goto (REG_TYPE_MQ, try_rndq_rnsc_rr);
7623 break;
7624 try_rndq_rnsc_rr:
7625 case OP_RNDQ_RNSC_RR:
7626 po_reg_or_goto (REG_TYPE_RN, try_rndq_rnsc);
7627 break;
7628 case OP_RNDQMQ_RNSC:
7629 po_reg_or_goto (REG_TYPE_MQ, try_rndq_rnsc);
7630 break;
7631 try_rndq_rnsc:
7632 case OP_RNDQ_RNSC:
7633 {
7634 po_scalar_or_goto (8, try_ndq, REG_TYPE_VFD);
7635 break;
7636 try_ndq:
7637 po_reg_or_fail (REG_TYPE_NDQ);
7638 }
7639 break;
7640
7641 case OP_RND_RNSC:
7642 {
7643 po_scalar_or_goto (8, try_vfd, REG_TYPE_VFD);
7644 break;
7645 try_vfd:
7646 po_reg_or_fail (REG_TYPE_VFD);
7647 }
7648 break;
7649
7650 case OP_VMOV:
7651 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7652 not careful then bad things might happen. */
7653 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
7654 break;
7655
7656 case OP_RNDQMQ_Ibig:
7657 po_reg_or_goto (REG_TYPE_MQ, try_rndq_ibig);
7658 break;
7659 try_rndq_ibig:
7660 case OP_RNDQ_Ibig:
7661 {
7662 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
7663 break;
7664 try_immbig:
7665 /* There's a possibility of getting a 64-bit immediate here, so
7666 we need special handling. */
7667 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/false)
7668 == FAIL)
7669 {
7670 inst.error = _("immediate value is out of range");
7671 goto failure;
7672 }
7673 }
7674 break;
7675
7676 case OP_RNDQMQ_I63b_RR:
7677 po_reg_or_goto (REG_TYPE_MQ, try_rndq_i63b_rr);
7678 break;
7679 try_rndq_i63b_rr:
7680 po_reg_or_goto (REG_TYPE_RN, try_rndq_i63b);
7681 break;
7682 try_rndq_i63b:
7683 case OP_RNDQ_I63b:
7684 {
7685 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
7686 break;
7687 try_shimm:
7688 po_imm_or_fail (0, 63, true);
7689 }
7690 break;
7691
7692 case OP_RRnpcb:
7693 po_char_or_fail ('[');
7694 po_reg_or_fail (REG_TYPE_RN);
7695 po_char_or_fail (']');
7696 break;
7697
7698 case OP_RRnpctw:
7699 case OP_RRw:
7700 case OP_oRRw:
7701 po_reg_or_fail (REG_TYPE_RN);
7702 if (skip_past_char (&str, '!') == SUCCESS)
7703 inst.operands[i].writeback = 1;
7704 break;
7705
7706 /* Immediates */
7707 case OP_I7: po_imm_or_fail ( 0, 7, false); break;
7708 case OP_I15: po_imm_or_fail ( 0, 15, false); break;
7709 case OP_I16: po_imm_or_fail ( 1, 16, false); break;
7710 case OP_I16z: po_imm_or_fail ( 0, 16, false); break;
7711 case OP_I31: po_imm_or_fail ( 0, 31, false); break;
7712 case OP_I32: po_imm_or_fail ( 1, 32, false); break;
7713 case OP_I32z: po_imm_or_fail ( 0, 32, false); break;
7714 case OP_I48_I64: po_imm1_or_imm2_or_fail (48, 64, false); break;
7715 case OP_I63s: po_imm_or_fail (-64, 63, false); break;
7716 case OP_I63: po_imm_or_fail ( 0, 63, false); break;
7717 case OP_I64: po_imm_or_fail ( 1, 64, false); break;
7718 case OP_I64z: po_imm_or_fail ( 0, 64, false); break;
7719 case OP_I127: po_imm_or_fail ( 0, 127, false); break;
7720 case OP_I255: po_imm_or_fail ( 0, 255, false); break;
7721 case OP_I511: po_imm_or_fail ( 0, 511, false); break;
7722 case OP_I4095: po_imm_or_fail ( 0, 4095, false); break;
7723 case OP_I8191: po_imm_or_fail ( 0, 8191, false); break;
7724 case OP_I4b: po_imm_or_fail ( 1, 4, true); break;
7725 case OP_oI7b:
7726 case OP_I7b: po_imm_or_fail ( 0, 7, true); break;
7727 case OP_I15b: po_imm_or_fail ( 0, 15, true); break;
7728 case OP_oI31b:
7729 case OP_I31b: po_imm_or_fail ( 0, 31, true); break;
7730 case OP_oI32b: po_imm_or_fail ( 1, 32, true); break;
7731 case OP_oI32z: po_imm_or_fail ( 0, 32, true); break;
7732 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, true); break;
7733
7734 /* Immediate variants */
7735 case OP_oI255c:
7736 po_char_or_fail ('{');
7737 po_imm_or_fail (0, 255, true);
7738 po_char_or_fail ('}');
7739 break;
7740
7741 case OP_I31w:
7742 /* The expression parser chokes on a trailing !, so we have
7743 to find it first and zap it. */
7744 {
7745 char *s = str;
7746 while (*s && *s != ',')
7747 s++;
7748 if (s[-1] == '!')
7749 {
7750 s[-1] = '\0';
7751 inst.operands[i].writeback = 1;
7752 }
7753 po_imm_or_fail (0, 31, true);
7754 if (str == s - 1)
7755 str = s;
7756 }
7757 break;
7758
7759 /* Expressions */
7760 case OP_EXPi: EXPi:
7761 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7762 GE_OPT_PREFIX));
7763 break;
7764
7765 case OP_EXP:
7766 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7767 GE_NO_PREFIX));
7768 break;
7769
7770 case OP_EXPr: EXPr:
7771 po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7772 GE_NO_PREFIX));
7773 if (inst.relocs[0].exp.X_op == O_symbol)
7774 {
7775 val = parse_reloc (&str);
7776 if (val == -1)
7777 {
7778 inst.error = _("unrecognized relocation suffix");
7779 goto failure;
7780 }
7781 else if (val != BFD_RELOC_UNUSED)
7782 {
7783 inst.operands[i].imm = val;
7784 inst.operands[i].hasreloc = 1;
7785 }
7786 }
7787 break;
7788
7789 case OP_EXPs:
7790 po_misc_or_fail (my_get_expression (&inst.relocs[i].exp, &str,
7791 GE_NO_PREFIX));
7792 if (inst.relocs[i].exp.X_op == O_symbol)
7793 {
7794 inst.operands[i].hasreloc = 1;
7795 }
7796 else if (inst.relocs[i].exp.X_op == O_constant)
7797 {
7798 inst.operands[i].imm = inst.relocs[i].exp.X_add_number;
7799 inst.operands[i].hasreloc = 0;
7800 }
7801 break;
7802
7803 /* Operand for MOVW or MOVT. */
7804 case OP_HALF:
7805 po_misc_or_fail (parse_half (&str));
7806 break;
7807
7808 /* Register or expression. */
7809 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7810 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7811
7812 /* Register or immediate. */
7813 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
7814 I0: po_imm_or_fail (0, 0, false); break;
7815
7816 case OP_RRnpcsp_I32: po_reg_or_goto (REG_TYPE_RN, I32); break;
7817 I32: po_imm_or_fail (1, 32, false); break;
7818
7819 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
7820 IF:
7821 if (!is_immediate_prefix (*str))
7822 goto bad_args;
7823 str++;
7824 val = parse_fpa_immediate (&str);
7825 if (val == FAIL)
7826 goto failure;
7827 /* FPA immediates are encoded as registers 8-15.
7828 parse_fpa_immediate has already applied the offset. */
7829 inst.operands[i].reg = val;
7830 inst.operands[i].isreg = 1;
7831 break;
7832
7833 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7834 I32z: po_imm_or_fail (0, 32, false); break;
7835
7836 /* Two kinds of register. */
7837 case OP_RIWR_RIWC:
7838 {
7839 struct reg_entry *rege = arm_reg_parse_multi (&str);
7840 if (!rege
7841 || (rege->type != REG_TYPE_MMXWR
7842 && rege->type != REG_TYPE_MMXWC
7843 && rege->type != REG_TYPE_MMXWCG))
7844 {
7845 inst.error = _("iWMMXt data or control register expected");
7846 goto failure;
7847 }
7848 inst.operands[i].reg = rege->number;
7849 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7850 }
7851 break;
7852
7853 case OP_RIWC_RIWG:
7854 {
7855 struct reg_entry *rege = arm_reg_parse_multi (&str);
7856 if (!rege
7857 || (rege->type != REG_TYPE_MMXWC
7858 && rege->type != REG_TYPE_MMXWCG))
7859 {
7860 inst.error = _("iWMMXt control register expected");
7861 goto failure;
7862 }
7863 inst.operands[i].reg = rege->number;
7864 inst.operands[i].isreg = 1;
7865 }
7866 break;
7867
7868 /* Misc */
7869 case OP_CPSF: val = parse_cps_flags (&str); break;
7870 case OP_ENDI: val = parse_endian_specifier (&str); break;
7871 case OP_oROR: val = parse_ror (&str); break;
7872 try_cond:
7873 case OP_COND: val = parse_cond (&str); break;
7874 case OP_oBARRIER_I15:
7875 po_barrier_or_imm (str); break;
7876 immediate:
7877 if (parse_immediate (&str, &val, 0, 15, true) == FAIL)
7878 goto failure;
7879 break;
7880
7881 case OP_wPSR:
7882 case OP_rPSR:
7883 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7884 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7885 {
7886 inst.error = _("Banked registers are not available with this "
7887 "architecture.");
7888 goto failure;
7889 }
7890 break;
7891 try_psr:
7892 val = parse_psr (&str, op_parse_code == OP_wPSR);
7893 break;
7894
7895 case OP_VLDR:
7896 po_reg_or_goto (REG_TYPE_VFSD, try_sysreg);
7897 break;
7898 try_sysreg:
7899 val = parse_sys_vldr_vstr (&str);
7900 break;
7901
7902 case OP_APSR_RR:
7903 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7904 break;
7905 try_apsr:
7906 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7907 instruction). */
7908 if (strncasecmp (str, "APSR_", 5) == 0)
7909 {
7910 unsigned found = 0;
7911 str += 5;
7912 while (found < 15)
7913 switch (*str++)
7914 {
7915 case 'c': found = (found & 1) ? 16 : found | 1; break;
7916 case 'n': found = (found & 2) ? 16 : found | 2; break;
7917 case 'z': found = (found & 4) ? 16 : found | 4; break;
7918 case 'v': found = (found & 8) ? 16 : found | 8; break;
7919 default: found = 16;
7920 }
7921 if (found != 15)
7922 goto failure;
7923 inst.operands[i].isvec = 1;
7924 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7925 inst.operands[i].reg = REG_PC;
7926 }
7927 else
7928 goto failure;
7929 break;
7930
7931 case OP_TB:
7932 po_misc_or_fail (parse_tb (&str));
7933 break;
7934
7935 /* Register lists. */
7936 case OP_REGLST:
7937 val = parse_reg_list (&str, REGLIST_RN);
7938 if (*str == '^')
7939 {
7940 inst.operands[i].writeback = 1;
7941 str++;
7942 }
7943 break;
7944
7945 case OP_CLRMLST:
7946 val = parse_reg_list (&str, REGLIST_CLRM);
7947 break;
7948
7949 case OP_VRSLST:
7950 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S,
7951 &partial_match);
7952 break;
7953
7954 case OP_VRDLST:
7955 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D,
7956 &partial_match);
7957 break;
7958
7959 case OP_VRSDLST:
7960 /* Allow Q registers too. */
7961 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7962 REGLIST_NEON_D, &partial_match);
7963 if (val == FAIL)
7964 {
7965 inst.error = NULL;
7966 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7967 REGLIST_VFP_S, &partial_match);
7968 inst.operands[i].issingle = 1;
7969 }
7970 break;
7971
7972 case OP_VRSDVLST:
7973 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7974 REGLIST_VFP_D_VPR, &partial_match);
7975 if (val == FAIL && !partial_match)
7976 {
7977 inst.error = NULL;
7978 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7979 REGLIST_VFP_S_VPR, &partial_match);
7980 inst.operands[i].issingle = 1;
7981 }
7982 break;
7983
7984 case OP_NRDLST:
7985 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7986 REGLIST_NEON_D, &partial_match);
7987 break;
7988
7989 case OP_MSTRLST4:
7990 case OP_MSTRLST2:
7991 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7992 1, &inst.operands[i].vectype);
7993 if (val != (((op_parse_code == OP_MSTRLST2) ? 3 : 7) << 5 | 0xe))
7994 goto failure;
7995 break;
7996 case OP_NSTRLST:
7997 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7998 0, &inst.operands[i].vectype);
7999 break;
8000
8001 /* Addressing modes */
8002 case OP_ADDRMVE:
8003 po_misc_or_fail (parse_address_group_reloc (&str, i, GROUP_MVE));
8004 break;
8005
8006 case OP_ADDR:
8007 po_misc_or_fail (parse_address (&str, i));
8008 break;
8009
8010 case OP_ADDRGLDR:
8011 po_misc_or_fail_no_backtrack (
8012 parse_address_group_reloc (&str, i, GROUP_LDR));
8013 break;
8014
8015 case OP_ADDRGLDRS:
8016 po_misc_or_fail_no_backtrack (
8017 parse_address_group_reloc (&str, i, GROUP_LDRS));
8018 break;
8019
8020 case OP_ADDRGLDC:
8021 po_misc_or_fail_no_backtrack (
8022 parse_address_group_reloc (&str, i, GROUP_LDC));
8023 break;
8024
8025 case OP_SH:
8026 po_misc_or_fail (parse_shifter_operand (&str, i));
8027 break;
8028
8029 case OP_SHG:
8030 po_misc_or_fail_no_backtrack (
8031 parse_shifter_operand_group_reloc (&str, i));
8032 break;
8033
8034 case OP_oSHll:
8035 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
8036 break;
8037
8038 case OP_oSHar:
8039 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
8040 break;
8041
8042 case OP_oSHllar:
8043 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
8044 break;
8045
8046 case OP_RMQRZ:
8047 case OP_oRMQRZ:
8048 po_reg_or_goto (REG_TYPE_MQ, try_rr_zr);
8049 break;
8050
8051 case OP_RR_ZR:
8052 try_rr_zr:
8053 po_reg_or_goto (REG_TYPE_RN, ZR);
8054 break;
8055 ZR:
8056 po_reg_or_fail (REG_TYPE_ZR);
8057 break;
8058
8059 default:
8060 as_fatal (_("unhandled operand code %d"), op_parse_code);
8061 }
8062
8063 /* Various value-based sanity checks and shared operations. We
8064 do not signal immediate failures for the register constraints;
8065 this allows a syntax error to take precedence. */
8066 switch (op_parse_code)
8067 {
8068 case OP_oRRnpc:
8069 case OP_RRnpc:
8070 case OP_RRnpcb:
8071 case OP_RRw:
8072 case OP_oRRw:
8073 case OP_RRnpc_I0:
8074 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
8075 inst.error = BAD_PC;
8076 break;
8077
8078 case OP_oRRnpcsp:
8079 case OP_RRnpcsp:
8080 case OP_RRnpcsp_I32:
8081 if (inst.operands[i].isreg)
8082 {
8083 if (inst.operands[i].reg == REG_PC)
8084 inst.error = BAD_PC;
8085 else if (inst.operands[i].reg == REG_SP
8086 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
8087 relaxed since ARMv8-A. */
8088 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8089 {
8090 gas_assert (thumb);
8091 inst.error = BAD_SP;
8092 }
8093 }
8094 break;
8095
8096 case OP_RRnpctw:
8097 if (inst.operands[i].isreg
8098 && inst.operands[i].reg == REG_PC
8099 && (inst.operands[i].writeback || thumb))
8100 inst.error = BAD_PC;
8101 break;
8102
8103 case OP_RVSD_COND:
8104 case OP_VLDR:
8105 if (inst.operands[i].isreg)
8106 break;
8107 /* fall through. */
8108
8109 case OP_CPSF:
8110 case OP_ENDI:
8111 case OP_oROR:
8112 case OP_wPSR:
8113 case OP_rPSR:
8114 case OP_COND:
8115 case OP_oBARRIER_I15:
8116 case OP_REGLST:
8117 case OP_CLRMLST:
8118 case OP_VRSLST:
8119 case OP_VRDLST:
8120 case OP_VRSDLST:
8121 case OP_VRSDVLST:
8122 case OP_NRDLST:
8123 case OP_NSTRLST:
8124 case OP_MSTRLST2:
8125 case OP_MSTRLST4:
8126 if (val == FAIL)
8127 goto failure;
8128 inst.operands[i].imm = val;
8129 break;
8130
8131 case OP_LR:
8132 case OP_oLR:
8133 if (inst.operands[i].reg != REG_LR)
8134 inst.error = _("operand must be LR register");
8135 break;
8136
8137 case OP_SP:
8138 if (inst.operands[i].reg != REG_SP)
8139 inst.error = _("operand must be SP register");
8140 break;
8141
8142 case OP_R12:
8143 if (inst.operands[i].reg != REG_R12)
8144 inst.error = _("operand must be r12");
8145 break;
8146
8147 case OP_RMQRZ:
8148 case OP_oRMQRZ:
8149 case OP_RR_ZR:
8150 if (!inst.operands[i].iszr && inst.operands[i].reg == REG_PC)
8151 inst.error = BAD_PC;
8152 break;
8153
8154 case OP_RRe:
8155 if (inst.operands[i].isreg
8156 && (inst.operands[i].reg & 0x00000001) != 0)
8157 inst.error = BAD_ODD;
8158 break;
8159
8160 case OP_RRo:
8161 if (inst.operands[i].isreg)
8162 {
8163 if ((inst.operands[i].reg & 0x00000001) != 1)
8164 inst.error = BAD_EVEN;
8165 else if (inst.operands[i].reg == REG_SP)
8166 as_tsktsk (MVE_BAD_SP);
8167 else if (inst.operands[i].reg == REG_PC)
8168 inst.error = BAD_PC;
8169 }
8170 break;
8171
8172 default:
8173 break;
8174 }
8175
8176 /* If we get here, this operand was successfully parsed. */
8177 inst.operands[i].present = 1;
8178 continue;
8179
8180 bad_args:
8181 inst.error = BAD_ARGS;
8182
8183 failure:
8184 if (!backtrack_pos)
8185 {
8186 /* The parse routine should already have set inst.error, but set a
8187 default here just in case. */
8188 if (!inst.error)
8189 inst.error = BAD_SYNTAX;
8190 return FAIL;
8191 }
8192
8193 /* Do not backtrack over a trailing optional argument that
8194 absorbed some text. We will only fail again, with the
8195 'garbage following instruction' error message, which is
8196 probably less helpful than the current one. */
8197 if (backtrack_index == i && backtrack_pos != str
8198 && upat[i+1] == OP_stop)
8199 {
8200 if (!inst.error)
8201 inst.error = BAD_SYNTAX;
8202 return FAIL;
8203 }
8204
8205 /* Try again, skipping the optional argument at backtrack_pos. */
8206 str = backtrack_pos;
8207 inst.error = backtrack_error;
8208 inst.operands[backtrack_index].present = 0;
8209 i = backtrack_index;
8210 backtrack_pos = 0;
8211 }
8212
8213 /* Check that we have parsed all the arguments. */
8214 if (*str != '\0' && !inst.error)
8215 inst.error = _("garbage following instruction");
8216
8217 return inst.error ? FAIL : SUCCESS;
8218 }
8219
8220 #undef po_char_or_fail
8221 #undef po_reg_or_fail
8222 #undef po_reg_or_goto
8223 #undef po_imm_or_fail
8224 #undef po_scalar_or_fail
8225 #undef po_barrier_or_imm
8226
8227 /* Shorthand macro for instruction encoding functions issuing errors. */
8228 #define constraint(expr, err) \
8229 do \
8230 { \
8231 if (expr) \
8232 { \
8233 inst.error = err; \
8234 return; \
8235 } \
8236 } \
8237 while (0)
8238
8239 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
8240 instructions are unpredictable if these registers are used. This
8241 is the BadReg predicate in ARM's Thumb-2 documentation.
8242
8243 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
8244 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
8245 #define reject_bad_reg(reg) \
8246 do \
8247 if (reg == REG_PC) \
8248 { \
8249 inst.error = BAD_PC; \
8250 return; \
8251 } \
8252 else if (reg == REG_SP \
8253 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
8254 { \
8255 inst.error = BAD_SP; \
8256 return; \
8257 } \
8258 while (0)
8259
8260 /* If REG is R13 (the stack pointer), warn that its use is
8261 deprecated. */
8262 #define warn_deprecated_sp(reg) \
8263 do \
8264 if (warn_on_deprecated && reg == REG_SP) \
8265 as_tsktsk (_("use of r13 is deprecated")); \
8266 while (0)
8267
8268 /* Functions for operand encoding. ARM, then Thumb. */
8269
8270 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
8271
8272 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
8273
8274 The only binary encoding difference is the Coprocessor number. Coprocessor
8275 9 is used for half-precision calculations or conversions. The format of the
8276 instruction is the same as the equivalent Coprocessor 10 instruction that
8277 exists for Single-Precision operation. */
8278
8279 static void
8280 do_scalar_fp16_v82_encode (void)
8281 {
8282 if (inst.cond < COND_ALWAYS)
8283 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
8284 " the behaviour is UNPREDICTABLE"));
8285 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
8286 _(BAD_FP16));
8287
8288 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
8289 mark_feature_used (&arm_ext_fp16);
8290 }
8291
8292 /* If VAL can be encoded in the immediate field of an ARM instruction,
8293 return the encoded form. Otherwise, return FAIL. */
8294
8295 static unsigned int
8296 encode_arm_immediate (unsigned int val)
8297 {
8298 unsigned int a, i;
8299
8300 if (val <= 0xff)
8301 return val;
8302
8303 for (i = 2; i < 32; i += 2)
8304 if ((a = rotate_left (val, i)) <= 0xff)
8305 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
8306
8307 return FAIL;
8308 }
8309
8310 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
8311 return the encoded form. Otherwise, return FAIL. */
8312 static unsigned int
8313 encode_thumb32_immediate (unsigned int val)
8314 {
8315 unsigned int a, i;
8316
8317 if (val <= 0xff)
8318 return val;
8319
8320 for (i = 1; i <= 24; i++)
8321 {
8322 a = val >> i;
8323 if ((val & ~(0xffU << i)) == 0)
8324 return ((val >> i) & 0x7f) | ((32 - i) << 7);
8325 }
8326
8327 a = val & 0xff;
8328 if (val == ((a << 16) | a))
8329 return 0x100 | a;
8330 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
8331 return 0x300 | a;
8332
8333 a = val & 0xff00;
8334 if (val == ((a << 16) | a))
8335 return 0x200 | (a >> 8);
8336
8337 return FAIL;
8338 }
8339 /* Encode a VFP SP or DP register number into inst.instruction. */
8340
8341 static void
8342 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
8343 {
8344 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
8345 && reg > 15)
8346 {
8347 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
8348 {
8349 if (thumb_mode)
8350 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
8351 fpu_vfp_ext_d32);
8352 else
8353 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
8354 fpu_vfp_ext_d32);
8355 }
8356 else
8357 {
8358 first_error (_("D register out of range for selected VFP version"));
8359 return;
8360 }
8361 }
8362
8363 switch (pos)
8364 {
8365 case VFP_REG_Sd:
8366 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
8367 break;
8368
8369 case VFP_REG_Sn:
8370 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
8371 break;
8372
8373 case VFP_REG_Sm:
8374 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
8375 break;
8376
8377 case VFP_REG_Dd:
8378 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
8379 break;
8380
8381 case VFP_REG_Dn:
8382 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
8383 break;
8384
8385 case VFP_REG_Dm:
8386 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
8387 break;
8388
8389 default:
8390 abort ();
8391 }
8392 }
8393
8394 /* Encode a <shift> in an ARM-format instruction. The immediate,
8395 if any, is handled by md_apply_fix. */
8396 static void
8397 encode_arm_shift (int i)
8398 {
8399 /* register-shifted register. */
8400 if (inst.operands[i].immisreg)
8401 {
8402 int op_index;
8403 for (op_index = 0; op_index <= i; ++op_index)
8404 {
8405 /* Check the operand only when it's presented. In pre-UAL syntax,
8406 if the destination register is the same as the first operand, two
8407 register form of the instruction can be used. */
8408 if (inst.operands[op_index].present && inst.operands[op_index].isreg
8409 && inst.operands[op_index].reg == REG_PC)
8410 as_warn (UNPRED_REG ("r15"));
8411 }
8412
8413 if (inst.operands[i].imm == REG_PC)
8414 as_warn (UNPRED_REG ("r15"));
8415 }
8416
8417 if (inst.operands[i].shift_kind == SHIFT_RRX)
8418 inst.instruction |= SHIFT_ROR << 5;
8419 else
8420 {
8421 inst.instruction |= inst.operands[i].shift_kind << 5;
8422 if (inst.operands[i].immisreg)
8423 {
8424 inst.instruction |= SHIFT_BY_REG;
8425 inst.instruction |= inst.operands[i].imm << 8;
8426 }
8427 else
8428 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
8429 }
8430 }
8431
8432 static void
8433 encode_arm_shifter_operand (int i)
8434 {
8435 if (inst.operands[i].isreg)
8436 {
8437 inst.instruction |= inst.operands[i].reg;
8438 encode_arm_shift (i);
8439 }
8440 else
8441 {
8442 inst.instruction |= INST_IMMEDIATE;
8443 if (inst.relocs[0].type != BFD_RELOC_ARM_IMMEDIATE)
8444 inst.instruction |= inst.operands[i].imm;
8445 }
8446 }
8447
8448 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
8449 static void
8450 encode_arm_addr_mode_common (int i, bool is_t)
8451 {
8452 /* PR 14260:
8453 Generate an error if the operand is not a register. */
8454 constraint (!inst.operands[i].isreg,
8455 _("Instruction does not support =N addresses"));
8456
8457 inst.instruction |= inst.operands[i].reg << 16;
8458
8459 if (inst.operands[i].preind)
8460 {
8461 if (is_t)
8462 {
8463 inst.error = _("instruction does not accept preindexed addressing");
8464 return;
8465 }
8466 inst.instruction |= PRE_INDEX;
8467 if (inst.operands[i].writeback)
8468 inst.instruction |= WRITE_BACK;
8469
8470 }
8471 else if (inst.operands[i].postind)
8472 {
8473 gas_assert (inst.operands[i].writeback);
8474 if (is_t)
8475 inst.instruction |= WRITE_BACK;
8476 }
8477 else /* unindexed - only for coprocessor */
8478 {
8479 inst.error = _("instruction does not accept unindexed addressing");
8480 return;
8481 }
8482
8483 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
8484 && (((inst.instruction & 0x000f0000) >> 16)
8485 == ((inst.instruction & 0x0000f000) >> 12)))
8486 as_warn ((inst.instruction & LOAD_BIT)
8487 ? _("destination register same as write-back base")
8488 : _("source register same as write-back base"));
8489 }
8490
8491 /* inst.operands[i] was set up by parse_address. Encode it into an
8492 ARM-format mode 2 load or store instruction. If is_t is true,
8493 reject forms that cannot be used with a T instruction (i.e. not
8494 post-indexed). */
8495 static void
8496 encode_arm_addr_mode_2 (int i, bool is_t)
8497 {
8498 const bool is_pc = (inst.operands[i].reg == REG_PC);
8499
8500 encode_arm_addr_mode_common (i, is_t);
8501
8502 if (inst.operands[i].immisreg)
8503 {
8504 constraint ((inst.operands[i].imm == REG_PC
8505 || (is_pc && inst.operands[i].writeback)),
8506 BAD_PC_ADDRESSING);
8507 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
8508 inst.instruction |= inst.operands[i].imm;
8509 if (!inst.operands[i].negative)
8510 inst.instruction |= INDEX_UP;
8511 if (inst.operands[i].shifted)
8512 {
8513 if (inst.operands[i].shift_kind == SHIFT_RRX)
8514 inst.instruction |= SHIFT_ROR << 5;
8515 else
8516 {
8517 inst.instruction |= inst.operands[i].shift_kind << 5;
8518 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
8519 }
8520 }
8521 }
8522 else /* immediate offset in inst.relocs[0] */
8523 {
8524 if (is_pc && !inst.relocs[0].pc_rel)
8525 {
8526 const bool is_load = ((inst.instruction & LOAD_BIT) != 0);
8527
8528 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
8529 cannot use PC in addressing.
8530 PC cannot be used in writeback addressing, either. */
8531 constraint ((is_t || inst.operands[i].writeback),
8532 BAD_PC_ADDRESSING);
8533
8534 /* Use of PC in str is deprecated for ARMv7. */
8535 if (warn_on_deprecated
8536 && !is_load
8537 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
8538 as_tsktsk (_("use of PC in this instruction is deprecated"));
8539 }
8540
8541 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8542 {
8543 /* Prefer + for zero encoded value. */
8544 if (!inst.operands[i].negative)
8545 inst.instruction |= INDEX_UP;
8546 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM;
8547 }
8548 }
8549 }
8550
8551 /* inst.operands[i] was set up by parse_address. Encode it into an
8552 ARM-format mode 3 load or store instruction. Reject forms that
8553 cannot be used with such instructions. If is_t is true, reject
8554 forms that cannot be used with a T instruction (i.e. not
8555 post-indexed). */
8556 static void
8557 encode_arm_addr_mode_3 (int i, bool is_t)
8558 {
8559 if (inst.operands[i].immisreg && inst.operands[i].shifted)
8560 {
8561 inst.error = _("instruction does not accept scaled register index");
8562 return;
8563 }
8564
8565 encode_arm_addr_mode_common (i, is_t);
8566
8567 if (inst.operands[i].immisreg)
8568 {
8569 constraint ((inst.operands[i].imm == REG_PC
8570 || (is_t && inst.operands[i].reg == REG_PC)),
8571 BAD_PC_ADDRESSING);
8572 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
8573 BAD_PC_WRITEBACK);
8574 inst.instruction |= inst.operands[i].imm;
8575 if (!inst.operands[i].negative)
8576 inst.instruction |= INDEX_UP;
8577 }
8578 else /* immediate offset in inst.relocs[0] */
8579 {
8580 constraint ((inst.operands[i].reg == REG_PC && !inst.relocs[0].pc_rel
8581 && inst.operands[i].writeback),
8582 BAD_PC_WRITEBACK);
8583 inst.instruction |= HWOFFSET_IMM;
8584 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8585 {
8586 /* Prefer + for zero encoded value. */
8587 if (!inst.operands[i].negative)
8588 inst.instruction |= INDEX_UP;
8589
8590 inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM8;
8591 }
8592 }
8593 }
8594
8595 /* Write immediate bits [7:0] to the following locations:
8596
8597 |28/24|23 19|18 16|15 4|3 0|
8598 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
8599
8600 This function is used by VMOV/VMVN/VORR/VBIC. */
8601
8602 static void
8603 neon_write_immbits (unsigned immbits)
8604 {
8605 inst.instruction |= immbits & 0xf;
8606 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
8607 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
8608 }
8609
8610 /* Invert low-order SIZE bits of XHI:XLO. */
8611
8612 static void
8613 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
8614 {
8615 unsigned immlo = xlo ? *xlo : 0;
8616 unsigned immhi = xhi ? *xhi : 0;
8617
8618 switch (size)
8619 {
8620 case 8:
8621 immlo = (~immlo) & 0xff;
8622 break;
8623
8624 case 16:
8625 immlo = (~immlo) & 0xffff;
8626 break;
8627
8628 case 64:
8629 immhi = (~immhi) & 0xffffffff;
8630 /* fall through. */
8631
8632 case 32:
8633 immlo = (~immlo) & 0xffffffff;
8634 break;
8635
8636 default:
8637 abort ();
8638 }
8639
8640 if (xlo)
8641 *xlo = immlo;
8642
8643 if (xhi)
8644 *xhi = immhi;
8645 }
8646
8647 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
8648 A, B, C, D. */
8649
8650 static int
8651 neon_bits_same_in_bytes (unsigned imm)
8652 {
8653 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
8654 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
8655 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
8656 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
8657 }
8658
8659 /* For immediate of above form, return 0bABCD. */
8660
8661 static unsigned
8662 neon_squash_bits (unsigned imm)
8663 {
8664 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
8665 | ((imm & 0x01000000) >> 21);
8666 }
8667
8668 /* Compress quarter-float representation to 0b...000 abcdefgh. */
8669
8670 static unsigned
8671 neon_qfloat_bits (unsigned imm)
8672 {
8673 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
8674 }
8675
8676 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
8677 the instruction. *OP is passed as the initial value of the op field, and
8678 may be set to a different value depending on the constant (i.e.
8679 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
8680 MVN). If the immediate looks like a repeated pattern then also
8681 try smaller element sizes. */
8682
8683 static int
8684 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
8685 unsigned *immbits, int *op, int size,
8686 enum neon_el_type type)
8687 {
8688 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
8689 float. */
8690 if (type == NT_float && !float_p)
8691 return FAIL;
8692
8693 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
8694 {
8695 if (size != 32 || *op == 1)
8696 return FAIL;
8697 *immbits = neon_qfloat_bits (immlo);
8698 return 0xf;
8699 }
8700
8701 if (size == 64)
8702 {
8703 if (neon_bits_same_in_bytes (immhi)
8704 && neon_bits_same_in_bytes (immlo))
8705 {
8706 if (*op == 1)
8707 return FAIL;
8708 *immbits = (neon_squash_bits (immhi) << 4)
8709 | neon_squash_bits (immlo);
8710 *op = 1;
8711 return 0xe;
8712 }
8713
8714 if (immhi != immlo)
8715 return FAIL;
8716 }
8717
8718 if (size >= 32)
8719 {
8720 if (immlo == (immlo & 0x000000ff))
8721 {
8722 *immbits = immlo;
8723 return 0x0;
8724 }
8725 else if (immlo == (immlo & 0x0000ff00))
8726 {
8727 *immbits = immlo >> 8;
8728 return 0x2;
8729 }
8730 else if (immlo == (immlo & 0x00ff0000))
8731 {
8732 *immbits = immlo >> 16;
8733 return 0x4;
8734 }
8735 else if (immlo == (immlo & 0xff000000))
8736 {
8737 *immbits = immlo >> 24;
8738 return 0x6;
8739 }
8740 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
8741 {
8742 *immbits = (immlo >> 8) & 0xff;
8743 return 0xc;
8744 }
8745 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
8746 {
8747 *immbits = (immlo >> 16) & 0xff;
8748 return 0xd;
8749 }
8750
8751 if ((immlo & 0xffff) != (immlo >> 16))
8752 return FAIL;
8753 immlo &= 0xffff;
8754 }
8755
8756 if (size >= 16)
8757 {
8758 if (immlo == (immlo & 0x000000ff))
8759 {
8760 *immbits = immlo;
8761 return 0x8;
8762 }
8763 else if (immlo == (immlo & 0x0000ff00))
8764 {
8765 *immbits = immlo >> 8;
8766 return 0xa;
8767 }
8768
8769 if ((immlo & 0xff) != (immlo >> 8))
8770 return FAIL;
8771 immlo &= 0xff;
8772 }
8773
8774 if (immlo == (immlo & 0x000000ff))
8775 {
8776 /* Don't allow MVN with 8-bit immediate. */
8777 if (*op == 1)
8778 return FAIL;
8779 *immbits = immlo;
8780 return 0xe;
8781 }
8782
8783 return FAIL;
8784 }
8785
8786 #if defined BFD_HOST_64_BIT
8787 /* Returns TRUE if double precision value V may be cast
8788 to single precision without loss of accuracy. */
8789
8790 static bool
8791 is_double_a_single (bfd_uint64_t v)
8792 {
8793 int exp = (v >> 52) & 0x7FF;
8794 bfd_uint64_t mantissa = v & 0xFFFFFFFFFFFFFULL;
8795
8796 return ((exp == 0 || exp == 0x7FF
8797 || (exp >= 1023 - 126 && exp <= 1023 + 127))
8798 && (mantissa & 0x1FFFFFFFL) == 0);
8799 }
8800
8801 /* Returns a double precision value casted to single precision
8802 (ignoring the least significant bits in exponent and mantissa). */
8803
8804 static int
8805 double_to_single (bfd_uint64_t v)
8806 {
8807 unsigned int sign = (v >> 63) & 1;
8808 int exp = (v >> 52) & 0x7FF;
8809 bfd_uint64_t mantissa = v & 0xFFFFFFFFFFFFFULL;
8810
8811 if (exp == 0x7FF)
8812 exp = 0xFF;
8813 else
8814 {
8815 exp = exp - 1023 + 127;
8816 if (exp >= 0xFF)
8817 {
8818 /* Infinity. */
8819 exp = 0x7F;
8820 mantissa = 0;
8821 }
8822 else if (exp < 0)
8823 {
8824 /* No denormalized numbers. */
8825 exp = 0;
8826 mantissa = 0;
8827 }
8828 }
8829 mantissa >>= 29;
8830 return (sign << 31) | (exp << 23) | mantissa;
8831 }
8832 #endif /* BFD_HOST_64_BIT */
8833
8834 enum lit_type
8835 {
8836 CONST_THUMB,
8837 CONST_ARM,
8838 CONST_VEC
8839 };
8840
8841 static void do_vfp_nsyn_opcode (const char *);
8842
8843 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8844 Determine whether it can be performed with a move instruction; if
8845 it can, convert inst.instruction to that move instruction and
8846 return true; if it can't, convert inst.instruction to a literal-pool
8847 load and return FALSE. If this is not a valid thing to do in the
8848 current context, set inst.error and return TRUE.
8849
8850 inst.operands[i] describes the destination register. */
8851
8852 static bool
8853 move_or_literal_pool (int i, enum lit_type t, bool mode_3)
8854 {
8855 unsigned long tbit;
8856 bool thumb_p = (t == CONST_THUMB);
8857 bool arm_p = (t == CONST_ARM);
8858
8859 if (thumb_p)
8860 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
8861 else
8862 tbit = LOAD_BIT;
8863
8864 if ((inst.instruction & tbit) == 0)
8865 {
8866 inst.error = _("invalid pseudo operation");
8867 return true;
8868 }
8869
8870 if (inst.relocs[0].exp.X_op != O_constant
8871 && inst.relocs[0].exp.X_op != O_symbol
8872 && inst.relocs[0].exp.X_op != O_big)
8873 {
8874 inst.error = _("constant expression expected");
8875 return true;
8876 }
8877
8878 if (inst.relocs[0].exp.X_op == O_constant
8879 || inst.relocs[0].exp.X_op == O_big)
8880 {
8881 #if defined BFD_HOST_64_BIT
8882 bfd_uint64_t v;
8883 #else
8884 valueT v;
8885 #endif
8886 if (inst.relocs[0].exp.X_op == O_big)
8887 {
8888 LITTLENUM_TYPE w[X_PRECISION];
8889 LITTLENUM_TYPE * l;
8890
8891 if (inst.relocs[0].exp.X_add_number == -1)
8892 {
8893 gen_to_words (w, X_PRECISION, E_PRECISION);
8894 l = w;
8895 /* FIXME: Should we check words w[2..5] ? */
8896 }
8897 else
8898 l = generic_bignum;
8899
8900 #if defined BFD_HOST_64_BIT
8901 v = l[3] & LITTLENUM_MASK;
8902 v <<= LITTLENUM_NUMBER_OF_BITS;
8903 v |= l[2] & LITTLENUM_MASK;
8904 v <<= LITTLENUM_NUMBER_OF_BITS;
8905 v |= l[1] & LITTLENUM_MASK;
8906 v <<= LITTLENUM_NUMBER_OF_BITS;
8907 v |= l[0] & LITTLENUM_MASK;
8908 #else
8909 v = l[1] & LITTLENUM_MASK;
8910 v <<= LITTLENUM_NUMBER_OF_BITS;
8911 v |= l[0] & LITTLENUM_MASK;
8912 #endif
8913 }
8914 else
8915 v = inst.relocs[0].exp.X_add_number;
8916
8917 if (!inst.operands[i].issingle)
8918 {
8919 if (thumb_p)
8920 {
8921 /* LDR should not use lead in a flag-setting instruction being
8922 chosen so we do not check whether movs can be used. */
8923
8924 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8925 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8926 && inst.operands[i].reg != 13
8927 && inst.operands[i].reg != 15)
8928 {
8929 /* Check if on thumb2 it can be done with a mov.w, mvn or
8930 movw instruction. */
8931 unsigned int newimm;
8932 bool isNegated = false;
8933
8934 newimm = encode_thumb32_immediate (v);
8935 if (newimm == (unsigned int) FAIL)
8936 {
8937 newimm = encode_thumb32_immediate (~v);
8938 isNegated = true;
8939 }
8940
8941 /* The number can be loaded with a mov.w or mvn
8942 instruction. */
8943 if (newimm != (unsigned int) FAIL
8944 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8945 {
8946 inst.instruction = (0xf04f0000 /* MOV.W. */
8947 | (inst.operands[i].reg << 8));
8948 /* Change to MOVN. */
8949 inst.instruction |= (isNegated ? 0x200000 : 0);
8950 inst.instruction |= (newimm & 0x800) << 15;
8951 inst.instruction |= (newimm & 0x700) << 4;
8952 inst.instruction |= (newimm & 0x0ff);
8953 return true;
8954 }
8955 /* The number can be loaded with a movw instruction. */
8956 else if ((v & ~0xFFFF) == 0
8957 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8958 {
8959 int imm = v & 0xFFFF;
8960
8961 inst.instruction = 0xf2400000; /* MOVW. */
8962 inst.instruction |= (inst.operands[i].reg << 8);
8963 inst.instruction |= (imm & 0xf000) << 4;
8964 inst.instruction |= (imm & 0x0800) << 15;
8965 inst.instruction |= (imm & 0x0700) << 4;
8966 inst.instruction |= (imm & 0x00ff);
8967 /* In case this replacement is being done on Armv8-M
8968 Baseline we need to make sure to disable the
8969 instruction size check, as otherwise GAS will reject
8970 the use of this T32 instruction. */
8971 inst.size_req = 0;
8972 return true;
8973 }
8974 }
8975 }
8976 else if (arm_p)
8977 {
8978 int value = encode_arm_immediate (v);
8979
8980 if (value != FAIL)
8981 {
8982 /* This can be done with a mov instruction. */
8983 inst.instruction &= LITERAL_MASK;
8984 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8985 inst.instruction |= value & 0xfff;
8986 return true;
8987 }
8988
8989 value = encode_arm_immediate (~ v);
8990 if (value != FAIL)
8991 {
8992 /* This can be done with a mvn instruction. */
8993 inst.instruction &= LITERAL_MASK;
8994 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8995 inst.instruction |= value & 0xfff;
8996 return true;
8997 }
8998 }
8999 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
9000 {
9001 int op = 0;
9002 unsigned immbits = 0;
9003 unsigned immlo = inst.operands[1].imm;
9004 unsigned immhi = inst.operands[1].regisimm
9005 ? inst.operands[1].reg
9006 : inst.relocs[0].exp.X_unsigned
9007 ? 0
9008 : ((bfd_int64_t)((int) immlo)) >> 32;
9009 int cmode = neon_cmode_for_move_imm (immlo, immhi, false, &immbits,
9010 &op, 64, NT_invtype);
9011
9012 if (cmode == FAIL)
9013 {
9014 neon_invert_size (&immlo, &immhi, 64);
9015 op = !op;
9016 cmode = neon_cmode_for_move_imm (immlo, immhi, false, &immbits,
9017 &op, 64, NT_invtype);
9018 }
9019
9020 if (cmode != FAIL)
9021 {
9022 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
9023 | (1 << 23)
9024 | (cmode << 8)
9025 | (op << 5)
9026 | (1 << 4);
9027
9028 /* Fill other bits in vmov encoding for both thumb and arm. */
9029 if (thumb_mode)
9030 inst.instruction |= (0x7U << 29) | (0xF << 24);
9031 else
9032 inst.instruction |= (0xFU << 28) | (0x1 << 25);
9033 neon_write_immbits (immbits);
9034 return true;
9035 }
9036 }
9037 }
9038
9039 if (t == CONST_VEC)
9040 {
9041 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
9042 if (inst.operands[i].issingle
9043 && is_quarter_float (inst.operands[1].imm)
9044 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
9045 {
9046 inst.operands[1].imm =
9047 neon_qfloat_bits (v);
9048 do_vfp_nsyn_opcode ("fconsts");
9049 return true;
9050 }
9051
9052 /* If our host does not support a 64-bit type then we cannot perform
9053 the following optimization. This mean that there will be a
9054 discrepancy between the output produced by an assembler built for
9055 a 32-bit-only host and the output produced from a 64-bit host, but
9056 this cannot be helped. */
9057 #if defined BFD_HOST_64_BIT
9058 else if (!inst.operands[1].issingle
9059 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
9060 {
9061 if (is_double_a_single (v)
9062 && is_quarter_float (double_to_single (v)))
9063 {
9064 inst.operands[1].imm =
9065 neon_qfloat_bits (double_to_single (v));
9066 do_vfp_nsyn_opcode ("fconstd");
9067 return true;
9068 }
9069 }
9070 #endif
9071 }
9072 }
9073
9074 if (add_to_lit_pool ((!inst.operands[i].isvec
9075 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
9076 return true;
9077
9078 inst.operands[1].reg = REG_PC;
9079 inst.operands[1].isreg = 1;
9080 inst.operands[1].preind = 1;
9081 inst.relocs[0].pc_rel = 1;
9082 inst.relocs[0].type = (thumb_p
9083 ? BFD_RELOC_ARM_THUMB_OFFSET
9084 : (mode_3
9085 ? BFD_RELOC_ARM_HWLITERAL
9086 : BFD_RELOC_ARM_LITERAL));
9087 return false;
9088 }
9089
9090 /* inst.operands[i] was set up by parse_address. Encode it into an
9091 ARM-format instruction. Reject all forms which cannot be encoded
9092 into a coprocessor load/store instruction. If wb_ok is false,
9093 reject use of writeback; if unind_ok is false, reject use of
9094 unindexed addressing. If reloc_override is not 0, use it instead
9095 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
9096 (in which case it is preserved). */
9097
9098 static int
9099 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
9100 {
9101 if (!inst.operands[i].isreg)
9102 {
9103 /* PR 18256 */
9104 if (! inst.operands[0].isvec)
9105 {
9106 inst.error = _("invalid co-processor operand");
9107 return FAIL;
9108 }
9109 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/false))
9110 return SUCCESS;
9111 }
9112
9113 inst.instruction |= inst.operands[i].reg << 16;
9114
9115 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
9116
9117 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
9118 {
9119 gas_assert (!inst.operands[i].writeback);
9120 if (!unind_ok)
9121 {
9122 inst.error = _("instruction does not support unindexed addressing");
9123 return FAIL;
9124 }
9125 inst.instruction |= inst.operands[i].imm;
9126 inst.instruction |= INDEX_UP;
9127 return SUCCESS;
9128 }
9129
9130 if (inst.operands[i].preind)
9131 inst.instruction |= PRE_INDEX;
9132
9133 if (inst.operands[i].writeback)
9134 {
9135 if (inst.operands[i].reg == REG_PC)
9136 {
9137 inst.error = _("pc may not be used with write-back");
9138 return FAIL;
9139 }
9140 if (!wb_ok)
9141 {
9142 inst.error = _("instruction does not support writeback");
9143 return FAIL;
9144 }
9145 inst.instruction |= WRITE_BACK;
9146 }
9147
9148 if (reloc_override)
9149 inst.relocs[0].type = (bfd_reloc_code_real_type) reloc_override;
9150 else if ((inst.relocs[0].type < BFD_RELOC_ARM_ALU_PC_G0_NC
9151 || inst.relocs[0].type > BFD_RELOC_ARM_LDC_SB_G2)
9152 && inst.relocs[0].type != BFD_RELOC_ARM_LDR_PC_G0)
9153 {
9154 if (thumb_mode)
9155 inst.relocs[0].type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
9156 else
9157 inst.relocs[0].type = BFD_RELOC_ARM_CP_OFF_IMM;
9158 }
9159
9160 /* Prefer + for zero encoded value. */
9161 if (!inst.operands[i].negative)
9162 inst.instruction |= INDEX_UP;
9163
9164 return SUCCESS;
9165 }
9166
9167 /* Functions for instruction encoding, sorted by sub-architecture.
9168 First some generics; their names are taken from the conventional
9169 bit positions for register arguments in ARM format instructions. */
9170
9171 static void
9172 do_noargs (void)
9173 {
9174 }
9175
9176 static void
9177 do_rd (void)
9178 {
9179 inst.instruction |= inst.operands[0].reg << 12;
9180 }
9181
9182 static void
9183 do_rn (void)
9184 {
9185 inst.instruction |= inst.operands[0].reg << 16;
9186 }
9187
9188 static void
9189 do_rd_rm (void)
9190 {
9191 inst.instruction |= inst.operands[0].reg << 12;
9192 inst.instruction |= inst.operands[1].reg;
9193 }
9194
9195 static void
9196 do_rm_rn (void)
9197 {
9198 inst.instruction |= inst.operands[0].reg;
9199 inst.instruction |= inst.operands[1].reg << 16;
9200 }
9201
9202 static void
9203 do_rd_rn (void)
9204 {
9205 inst.instruction |= inst.operands[0].reg << 12;
9206 inst.instruction |= inst.operands[1].reg << 16;
9207 }
9208
9209 static void
9210 do_rn_rd (void)
9211 {
9212 inst.instruction |= inst.operands[0].reg << 16;
9213 inst.instruction |= inst.operands[1].reg << 12;
9214 }
9215
9216 static void
9217 do_tt (void)
9218 {
9219 inst.instruction |= inst.operands[0].reg << 8;
9220 inst.instruction |= inst.operands[1].reg << 16;
9221 }
9222
9223 static bool
9224 check_obsolete (const arm_feature_set *feature, const char *msg)
9225 {
9226 if (ARM_CPU_IS_ANY (cpu_variant))
9227 {
9228 as_tsktsk ("%s", msg);
9229 return true;
9230 }
9231 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
9232 {
9233 as_bad ("%s", msg);
9234 return true;
9235 }
9236
9237 return false;
9238 }
9239
9240 static void
9241 do_rd_rm_rn (void)
9242 {
9243 unsigned Rn = inst.operands[2].reg;
9244 /* Enforce restrictions on SWP instruction. */
9245 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
9246 {
9247 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
9248 _("Rn must not overlap other operands"));
9249
9250 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
9251 */
9252 if (!check_obsolete (&arm_ext_v8,
9253 _("swp{b} use is obsoleted for ARMv8 and later"))
9254 && warn_on_deprecated
9255 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
9256 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
9257 }
9258
9259 inst.instruction |= inst.operands[0].reg << 12;
9260 inst.instruction |= inst.operands[1].reg;
9261 inst.instruction |= Rn << 16;
9262 }
9263
9264 static void
9265 do_rd_rn_rm (void)
9266 {
9267 inst.instruction |= inst.operands[0].reg << 12;
9268 inst.instruction |= inst.operands[1].reg << 16;
9269 inst.instruction |= inst.operands[2].reg;
9270 }
9271
9272 static void
9273 do_rm_rd_rn (void)
9274 {
9275 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
9276 constraint (((inst.relocs[0].exp.X_op != O_constant
9277 && inst.relocs[0].exp.X_op != O_illegal)
9278 || inst.relocs[0].exp.X_add_number != 0),
9279 BAD_ADDR_MODE);
9280 inst.instruction |= inst.operands[0].reg;
9281 inst.instruction |= inst.operands[1].reg << 12;
9282 inst.instruction |= inst.operands[2].reg << 16;
9283 }
9284
9285 static void
9286 do_imm0 (void)
9287 {
9288 inst.instruction |= inst.operands[0].imm;
9289 }
9290
9291 static void
9292 do_rd_cpaddr (void)
9293 {
9294 inst.instruction |= inst.operands[0].reg << 12;
9295 encode_arm_cp_address (1, true, true, 0);
9296 }
9297
9298 /* ARM instructions, in alphabetical order by function name (except
9299 that wrapper functions appear immediately after the function they
9300 wrap). */
9301
9302 /* This is a pseudo-op of the form "adr rd, label" to be converted
9303 into a relative address of the form "add rd, pc, #label-.-8". */
9304
9305 static void
9306 do_adr (void)
9307 {
9308 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
9309
9310 /* Frag hacking will turn this into a sub instruction if the offset turns
9311 out to be negative. */
9312 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
9313 inst.relocs[0].pc_rel = 1;
9314 inst.relocs[0].exp.X_add_number -= 8;
9315
9316 if (support_interwork
9317 && inst.relocs[0].exp.X_op == O_symbol
9318 && inst.relocs[0].exp.X_add_symbol != NULL
9319 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
9320 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
9321 inst.relocs[0].exp.X_add_number |= 1;
9322 }
9323
9324 /* This is a pseudo-op of the form "adrl rd, label" to be converted
9325 into a relative address of the form:
9326 add rd, pc, #low(label-.-8)"
9327 add rd, rd, #high(label-.-8)" */
9328
9329 static void
9330 do_adrl (void)
9331 {
9332 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
9333
9334 /* Frag hacking will turn this into a sub instruction if the offset turns
9335 out to be negative. */
9336 inst.relocs[0].type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
9337 inst.relocs[0].pc_rel = 1;
9338 inst.size = INSN_SIZE * 2;
9339 inst.relocs[0].exp.X_add_number -= 8;
9340
9341 if (support_interwork
9342 && inst.relocs[0].exp.X_op == O_symbol
9343 && inst.relocs[0].exp.X_add_symbol != NULL
9344 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
9345 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
9346 inst.relocs[0].exp.X_add_number |= 1;
9347 }
9348
9349 static void
9350 do_arit (void)
9351 {
9352 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9353 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9354 THUMB1_RELOC_ONLY);
9355 if (!inst.operands[1].present)
9356 inst.operands[1].reg = inst.operands[0].reg;
9357 inst.instruction |= inst.operands[0].reg << 12;
9358 inst.instruction |= inst.operands[1].reg << 16;
9359 encode_arm_shifter_operand (2);
9360 }
9361
9362 static void
9363 do_barrier (void)
9364 {
9365 if (inst.operands[0].present)
9366 inst.instruction |= inst.operands[0].imm;
9367 else
9368 inst.instruction |= 0xf;
9369 }
9370
9371 static void
9372 do_bfc (void)
9373 {
9374 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
9375 constraint (msb > 32, _("bit-field extends past end of register"));
9376 /* The instruction encoding stores the LSB and MSB,
9377 not the LSB and width. */
9378 inst.instruction |= inst.operands[0].reg << 12;
9379 inst.instruction |= inst.operands[1].imm << 7;
9380 inst.instruction |= (msb - 1) << 16;
9381 }
9382
9383 static void
9384 do_bfi (void)
9385 {
9386 unsigned int msb;
9387
9388 /* #0 in second position is alternative syntax for bfc, which is
9389 the same instruction but with REG_PC in the Rm field. */
9390 if (!inst.operands[1].isreg)
9391 inst.operands[1].reg = REG_PC;
9392
9393 msb = inst.operands[2].imm + inst.operands[3].imm;
9394 constraint (msb > 32, _("bit-field extends past end of register"));
9395 /* The instruction encoding stores the LSB and MSB,
9396 not the LSB and width. */
9397 inst.instruction |= inst.operands[0].reg << 12;
9398 inst.instruction |= inst.operands[1].reg;
9399 inst.instruction |= inst.operands[2].imm << 7;
9400 inst.instruction |= (msb - 1) << 16;
9401 }
9402
9403 static void
9404 do_bfx (void)
9405 {
9406 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
9407 _("bit-field extends past end of register"));
9408 inst.instruction |= inst.operands[0].reg << 12;
9409 inst.instruction |= inst.operands[1].reg;
9410 inst.instruction |= inst.operands[2].imm << 7;
9411 inst.instruction |= (inst.operands[3].imm - 1) << 16;
9412 }
9413
9414 /* ARM V5 breakpoint instruction (argument parse)
9415 BKPT <16 bit unsigned immediate>
9416 Instruction is not conditional.
9417 The bit pattern given in insns[] has the COND_ALWAYS condition,
9418 and it is an error if the caller tried to override that. */
9419
9420 static void
9421 do_bkpt (void)
9422 {
9423 /* Top 12 of 16 bits to bits 19:8. */
9424 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
9425
9426 /* Bottom 4 of 16 bits to bits 3:0. */
9427 inst.instruction |= inst.operands[0].imm & 0xf;
9428 }
9429
9430 static void
9431 encode_branch (int default_reloc)
9432 {
9433 if (inst.operands[0].hasreloc)
9434 {
9435 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
9436 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
9437 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
9438 inst.relocs[0].type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
9439 ? BFD_RELOC_ARM_PLT32
9440 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
9441 }
9442 else
9443 inst.relocs[0].type = (bfd_reloc_code_real_type) default_reloc;
9444 inst.relocs[0].pc_rel = 1;
9445 }
9446
9447 static void
9448 do_branch (void)
9449 {
9450 #ifdef OBJ_ELF
9451 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
9452 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
9453 else
9454 #endif
9455 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
9456 }
9457
9458 static void
9459 do_bl (void)
9460 {
9461 #ifdef OBJ_ELF
9462 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
9463 {
9464 if (inst.cond == COND_ALWAYS)
9465 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
9466 else
9467 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
9468 }
9469 else
9470 #endif
9471 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
9472 }
9473
9474 /* ARM V5 branch-link-exchange instruction (argument parse)
9475 BLX <target_addr> ie BLX(1)
9476 BLX{<condition>} <Rm> ie BLX(2)
9477 Unfortunately, there are two different opcodes for this mnemonic.
9478 So, the insns[].value is not used, and the code here zaps values
9479 into inst.instruction.
9480 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
9481
9482 static void
9483 do_blx (void)
9484 {
9485 if (inst.operands[0].isreg)
9486 {
9487 /* Arg is a register; the opcode provided by insns[] is correct.
9488 It is not illegal to do "blx pc", just useless. */
9489 if (inst.operands[0].reg == REG_PC)
9490 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
9491
9492 inst.instruction |= inst.operands[0].reg;
9493 }
9494 else
9495 {
9496 /* Arg is an address; this instruction cannot be executed
9497 conditionally, and the opcode must be adjusted.
9498 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
9499 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
9500 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9501 inst.instruction = 0xfa000000;
9502 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
9503 }
9504 }
9505
9506 static void
9507 do_bx (void)
9508 {
9509 bool want_reloc;
9510
9511 if (inst.operands[0].reg == REG_PC)
9512 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
9513
9514 inst.instruction |= inst.operands[0].reg;
9515 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
9516 it is for ARMv4t or earlier. */
9517 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
9518 if (!ARM_FEATURE_ZERO (selected_object_arch)
9519 && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
9520 want_reloc = true;
9521
9522 #ifdef OBJ_ELF
9523 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
9524 #endif
9525 want_reloc = false;
9526
9527 if (want_reloc)
9528 inst.relocs[0].type = BFD_RELOC_ARM_V4BX;
9529 }
9530
9531
9532 /* ARM v5TEJ. Jump to Jazelle code. */
9533
9534 static void
9535 do_bxj (void)
9536 {
9537 if (inst.operands[0].reg == REG_PC)
9538 as_tsktsk (_("use of r15 in bxj is not really useful"));
9539
9540 inst.instruction |= inst.operands[0].reg;
9541 }
9542
9543 /* Co-processor data operation:
9544 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
9545 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
9546 static void
9547 do_cdp (void)
9548 {
9549 inst.instruction |= inst.operands[0].reg << 8;
9550 inst.instruction |= inst.operands[1].imm << 20;
9551 inst.instruction |= inst.operands[2].reg << 12;
9552 inst.instruction |= inst.operands[3].reg << 16;
9553 inst.instruction |= inst.operands[4].reg;
9554 inst.instruction |= inst.operands[5].imm << 5;
9555 }
9556
9557 static void
9558 do_cmp (void)
9559 {
9560 inst.instruction |= inst.operands[0].reg << 16;
9561 encode_arm_shifter_operand (1);
9562 }
9563
9564 /* Transfer between coprocessor and ARM registers.
9565 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
9566 MRC2
9567 MCR{cond}
9568 MCR2
9569
9570 No special properties. */
9571
9572 struct deprecated_coproc_regs_s
9573 {
9574 unsigned cp;
9575 int opc1;
9576 unsigned crn;
9577 unsigned crm;
9578 int opc2;
9579 arm_feature_set deprecated;
9580 arm_feature_set obsoleted;
9581 const char *dep_msg;
9582 const char *obs_msg;
9583 };
9584
9585 #define DEPR_ACCESS_V8 \
9586 N_("This coprocessor register access is deprecated in ARMv8")
9587
9588 /* Table of all deprecated coprocessor registers. */
9589 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
9590 {
9591 {15, 0, 7, 10, 5, /* CP15DMB. */
9592 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9593 DEPR_ACCESS_V8, NULL},
9594 {15, 0, 7, 10, 4, /* CP15DSB. */
9595 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9596 DEPR_ACCESS_V8, NULL},
9597 {15, 0, 7, 5, 4, /* CP15ISB. */
9598 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9599 DEPR_ACCESS_V8, NULL},
9600 {14, 6, 1, 0, 0, /* TEEHBR. */
9601 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9602 DEPR_ACCESS_V8, NULL},
9603 {14, 6, 0, 0, 0, /* TEECR. */
9604 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9605 DEPR_ACCESS_V8, NULL},
9606 };
9607
9608 #undef DEPR_ACCESS_V8
9609
9610 static const size_t deprecated_coproc_reg_count =
9611 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
9612
9613 static void
9614 do_co_reg (void)
9615 {
9616 unsigned Rd;
9617 size_t i;
9618
9619 Rd = inst.operands[2].reg;
9620 if (thumb_mode)
9621 {
9622 if (inst.instruction == 0xee000010
9623 || inst.instruction == 0xfe000010)
9624 /* MCR, MCR2 */
9625 reject_bad_reg (Rd);
9626 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9627 /* MRC, MRC2 */
9628 constraint (Rd == REG_SP, BAD_SP);
9629 }
9630 else
9631 {
9632 /* MCR */
9633 if (inst.instruction == 0xe000010)
9634 constraint (Rd == REG_PC, BAD_PC);
9635 }
9636
9637 for (i = 0; i < deprecated_coproc_reg_count; ++i)
9638 {
9639 const struct deprecated_coproc_regs_s *r =
9640 deprecated_coproc_regs + i;
9641
9642 if (inst.operands[0].reg == r->cp
9643 && inst.operands[1].imm == r->opc1
9644 && inst.operands[3].reg == r->crn
9645 && inst.operands[4].reg == r->crm
9646 && inst.operands[5].imm == r->opc2)
9647 {
9648 if (! ARM_CPU_IS_ANY (cpu_variant)
9649 && warn_on_deprecated
9650 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
9651 as_tsktsk ("%s", r->dep_msg);
9652 }
9653 }
9654
9655 inst.instruction |= inst.operands[0].reg << 8;
9656 inst.instruction |= inst.operands[1].imm << 21;
9657 inst.instruction |= Rd << 12;
9658 inst.instruction |= inst.operands[3].reg << 16;
9659 inst.instruction |= inst.operands[4].reg;
9660 inst.instruction |= inst.operands[5].imm << 5;
9661 }
9662
9663 /* Transfer between coprocessor register and pair of ARM registers.
9664 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
9665 MCRR2
9666 MRRC{cond}
9667 MRRC2
9668
9669 Two XScale instructions are special cases of these:
9670
9671 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
9672 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
9673
9674 Result unpredictable if Rd or Rn is R15. */
9675
9676 static void
9677 do_co_reg2c (void)
9678 {
9679 unsigned Rd, Rn;
9680
9681 Rd = inst.operands[2].reg;
9682 Rn = inst.operands[3].reg;
9683
9684 if (thumb_mode)
9685 {
9686 reject_bad_reg (Rd);
9687 reject_bad_reg (Rn);
9688 }
9689 else
9690 {
9691 constraint (Rd == REG_PC, BAD_PC);
9692 constraint (Rn == REG_PC, BAD_PC);
9693 }
9694
9695 /* Only check the MRRC{2} variants. */
9696 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
9697 {
9698 /* If Rd == Rn, error that the operation is
9699 unpredictable (example MRRC p3,#1,r1,r1,c4). */
9700 constraint (Rd == Rn, BAD_OVERLAP);
9701 }
9702
9703 inst.instruction |= inst.operands[0].reg << 8;
9704 inst.instruction |= inst.operands[1].imm << 4;
9705 inst.instruction |= Rd << 12;
9706 inst.instruction |= Rn << 16;
9707 inst.instruction |= inst.operands[4].reg;
9708 }
9709
9710 static void
9711 do_cpsi (void)
9712 {
9713 inst.instruction |= inst.operands[0].imm << 6;
9714 if (inst.operands[1].present)
9715 {
9716 inst.instruction |= CPSI_MMOD;
9717 inst.instruction |= inst.operands[1].imm;
9718 }
9719 }
9720
9721 static void
9722 do_dbg (void)
9723 {
9724 inst.instruction |= inst.operands[0].imm;
9725 }
9726
9727 static void
9728 do_div (void)
9729 {
9730 unsigned Rd, Rn, Rm;
9731
9732 Rd = inst.operands[0].reg;
9733 Rn = (inst.operands[1].present
9734 ? inst.operands[1].reg : Rd);
9735 Rm = inst.operands[2].reg;
9736
9737 constraint ((Rd == REG_PC), BAD_PC);
9738 constraint ((Rn == REG_PC), BAD_PC);
9739 constraint ((Rm == REG_PC), BAD_PC);
9740
9741 inst.instruction |= Rd << 16;
9742 inst.instruction |= Rn << 0;
9743 inst.instruction |= Rm << 8;
9744 }
9745
9746 static void
9747 do_it (void)
9748 {
9749 /* There is no IT instruction in ARM mode. We
9750 process it to do the validation as if in
9751 thumb mode, just in case the code gets
9752 assembled for thumb using the unified syntax. */
9753
9754 inst.size = 0;
9755 if (unified_syntax)
9756 {
9757 set_pred_insn_type (IT_INSN);
9758 now_pred.mask = (inst.instruction & 0xf) | 0x10;
9759 now_pred.cc = inst.operands[0].imm;
9760 }
9761 }
9762
9763 /* If there is only one register in the register list,
9764 then return its register number. Otherwise return -1. */
9765 static int
9766 only_one_reg_in_list (int range)
9767 {
9768 int i = ffs (range) - 1;
9769 return (i > 15 || range != (1 << i)) ? -1 : i;
9770 }
9771
9772 static void
9773 encode_ldmstm(int from_push_pop_mnem)
9774 {
9775 int base_reg = inst.operands[0].reg;
9776 int range = inst.operands[1].imm;
9777 int one_reg;
9778
9779 inst.instruction |= base_reg << 16;
9780 inst.instruction |= range;
9781
9782 if (inst.operands[1].writeback)
9783 inst.instruction |= LDM_TYPE_2_OR_3;
9784
9785 if (inst.operands[0].writeback)
9786 {
9787 inst.instruction |= WRITE_BACK;
9788 /* Check for unpredictable uses of writeback. */
9789 if (inst.instruction & LOAD_BIT)
9790 {
9791 /* Not allowed in LDM type 2. */
9792 if ((inst.instruction & LDM_TYPE_2_OR_3)
9793 && ((range & (1 << REG_PC)) == 0))
9794 as_warn (_("writeback of base register is UNPREDICTABLE"));
9795 /* Only allowed if base reg not in list for other types. */
9796 else if (range & (1 << base_reg))
9797 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9798 }
9799 else /* STM. */
9800 {
9801 /* Not allowed for type 2. */
9802 if (inst.instruction & LDM_TYPE_2_OR_3)
9803 as_warn (_("writeback of base register is UNPREDICTABLE"));
9804 /* Only allowed if base reg not in list, or first in list. */
9805 else if ((range & (1 << base_reg))
9806 && (range & ((1 << base_reg) - 1)))
9807 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9808 }
9809 }
9810
9811 /* If PUSH/POP has only one register, then use the A2 encoding. */
9812 one_reg = only_one_reg_in_list (range);
9813 if (from_push_pop_mnem && one_reg >= 0)
9814 {
9815 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
9816
9817 if (is_push && one_reg == 13 /* SP */)
9818 /* PR 22483: The A2 encoding cannot be used when
9819 pushing the stack pointer as this is UNPREDICTABLE. */
9820 return;
9821
9822 inst.instruction &= A_COND_MASK;
9823 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
9824 inst.instruction |= one_reg << 12;
9825 }
9826 }
9827
9828 static void
9829 do_ldmstm (void)
9830 {
9831 encode_ldmstm (/*from_push_pop_mnem=*/false);
9832 }
9833
9834 /* ARMv5TE load-consecutive (argument parse)
9835 Mode is like LDRH.
9836
9837 LDRccD R, mode
9838 STRccD R, mode. */
9839
9840 static void
9841 do_ldrd (void)
9842 {
9843 constraint (inst.operands[0].reg % 2 != 0,
9844 _("first transfer register must be even"));
9845 constraint (inst.operands[1].present
9846 && inst.operands[1].reg != inst.operands[0].reg + 1,
9847 _("can only transfer two consecutive registers"));
9848 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9849 constraint (!inst.operands[2].isreg, _("'[' expected"));
9850
9851 if (!inst.operands[1].present)
9852 inst.operands[1].reg = inst.operands[0].reg + 1;
9853
9854 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9855 register and the first register written; we have to diagnose
9856 overlap between the base and the second register written here. */
9857
9858 if (inst.operands[2].reg == inst.operands[1].reg
9859 && (inst.operands[2].writeback || inst.operands[2].postind))
9860 as_warn (_("base register written back, and overlaps "
9861 "second transfer register"));
9862
9863 if (!(inst.instruction & V4_STR_BIT))
9864 {
9865 /* For an index-register load, the index register must not overlap the
9866 destination (even if not write-back). */
9867 if (inst.operands[2].immisreg
9868 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
9869 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
9870 as_warn (_("index register overlaps transfer register"));
9871 }
9872 inst.instruction |= inst.operands[0].reg << 12;
9873 encode_arm_addr_mode_3 (2, /*is_t=*/false);
9874 }
9875
9876 static void
9877 do_ldrex (void)
9878 {
9879 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9880 || inst.operands[1].postind || inst.operands[1].writeback
9881 || inst.operands[1].immisreg || inst.operands[1].shifted
9882 || inst.operands[1].negative
9883 /* This can arise if the programmer has written
9884 strex rN, rM, foo
9885 or if they have mistakenly used a register name as the last
9886 operand, eg:
9887 strex rN, rM, rX
9888 It is very difficult to distinguish between these two cases
9889 because "rX" might actually be a label. ie the register
9890 name has been occluded by a symbol of the same name. So we
9891 just generate a general 'bad addressing mode' type error
9892 message and leave it up to the programmer to discover the
9893 true cause and fix their mistake. */
9894 || (inst.operands[1].reg == REG_PC),
9895 BAD_ADDR_MODE);
9896
9897 constraint (inst.relocs[0].exp.X_op != O_constant
9898 || inst.relocs[0].exp.X_add_number != 0,
9899 _("offset must be zero in ARM encoding"));
9900
9901 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9902
9903 inst.instruction |= inst.operands[0].reg << 12;
9904 inst.instruction |= inst.operands[1].reg << 16;
9905 inst.relocs[0].type = BFD_RELOC_UNUSED;
9906 }
9907
9908 static void
9909 do_ldrexd (void)
9910 {
9911 constraint (inst.operands[0].reg % 2 != 0,
9912 _("even register required"));
9913 constraint (inst.operands[1].present
9914 && inst.operands[1].reg != inst.operands[0].reg + 1,
9915 _("can only load two consecutive registers"));
9916 /* If op 1 were present and equal to PC, this function wouldn't
9917 have been called in the first place. */
9918 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9919
9920 inst.instruction |= inst.operands[0].reg << 12;
9921 inst.instruction |= inst.operands[2].reg << 16;
9922 }
9923
9924 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9925 which is not a multiple of four is UNPREDICTABLE. */
9926 static void
9927 check_ldr_r15_aligned (void)
9928 {
9929 constraint (!(inst.operands[1].immisreg)
9930 && (inst.operands[0].reg == REG_PC
9931 && inst.operands[1].reg == REG_PC
9932 && (inst.relocs[0].exp.X_add_number & 0x3)),
9933 _("ldr to register 15 must be 4-byte aligned"));
9934 }
9935
9936 static void
9937 do_ldst (void)
9938 {
9939 inst.instruction |= inst.operands[0].reg << 12;
9940 if (!inst.operands[1].isreg)
9941 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/false))
9942 return;
9943 encode_arm_addr_mode_2 (1, /*is_t=*/false);
9944 check_ldr_r15_aligned ();
9945 }
9946
9947 static void
9948 do_ldstt (void)
9949 {
9950 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9951 reject [Rn,...]. */
9952 if (inst.operands[1].preind)
9953 {
9954 constraint (inst.relocs[0].exp.X_op != O_constant
9955 || inst.relocs[0].exp.X_add_number != 0,
9956 _("this instruction requires a post-indexed address"));
9957
9958 inst.operands[1].preind = 0;
9959 inst.operands[1].postind = 1;
9960 inst.operands[1].writeback = 1;
9961 }
9962 inst.instruction |= inst.operands[0].reg << 12;
9963 encode_arm_addr_mode_2 (1, /*is_t=*/true);
9964 }
9965
9966 /* Halfword and signed-byte load/store operations. */
9967
9968 static void
9969 do_ldstv4 (void)
9970 {
9971 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9972 inst.instruction |= inst.operands[0].reg << 12;
9973 if (!inst.operands[1].isreg)
9974 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/true))
9975 return;
9976 encode_arm_addr_mode_3 (1, /*is_t=*/false);
9977 }
9978
9979 static void
9980 do_ldsttv4 (void)
9981 {
9982 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9983 reject [Rn,...]. */
9984 if (inst.operands[1].preind)
9985 {
9986 constraint (inst.relocs[0].exp.X_op != O_constant
9987 || inst.relocs[0].exp.X_add_number != 0,
9988 _("this instruction requires a post-indexed address"));
9989
9990 inst.operands[1].preind = 0;
9991 inst.operands[1].postind = 1;
9992 inst.operands[1].writeback = 1;
9993 }
9994 inst.instruction |= inst.operands[0].reg << 12;
9995 encode_arm_addr_mode_3 (1, /*is_t=*/true);
9996 }
9997
9998 /* Co-processor register load/store.
9999 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
10000 static void
10001 do_lstc (void)
10002 {
10003 inst.instruction |= inst.operands[0].reg << 8;
10004 inst.instruction |= inst.operands[1].reg << 12;
10005 encode_arm_cp_address (2, true, true, 0);
10006 }
10007
10008 static void
10009 do_mlas (void)
10010 {
10011 /* This restriction does not apply to mls (nor to mla in v6 or later). */
10012 if (inst.operands[0].reg == inst.operands[1].reg
10013 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
10014 && !(inst.instruction & 0x00400000))
10015 as_tsktsk (_("Rd and Rm should be different in mla"));
10016
10017 inst.instruction |= inst.operands[0].reg << 16;
10018 inst.instruction |= inst.operands[1].reg;
10019 inst.instruction |= inst.operands[2].reg << 8;
10020 inst.instruction |= inst.operands[3].reg << 12;
10021 }
10022
10023 static void
10024 do_mov (void)
10025 {
10026 constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10027 && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
10028 THUMB1_RELOC_ONLY);
10029 inst.instruction |= inst.operands[0].reg << 12;
10030 encode_arm_shifter_operand (1);
10031 }
10032
10033 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
10034 static void
10035 do_mov16 (void)
10036 {
10037 bfd_vma imm;
10038 bool top;
10039
10040 top = (inst.instruction & 0x00400000) != 0;
10041 constraint (top && inst.relocs[0].type == BFD_RELOC_ARM_MOVW,
10042 _(":lower16: not allowed in this instruction"));
10043 constraint (!top && inst.relocs[0].type == BFD_RELOC_ARM_MOVT,
10044 _(":upper16: not allowed in this instruction"));
10045 inst.instruction |= inst.operands[0].reg << 12;
10046 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
10047 {
10048 imm = inst.relocs[0].exp.X_add_number;
10049 /* The value is in two pieces: 0:11, 16:19. */
10050 inst.instruction |= (imm & 0x00000fff);
10051 inst.instruction |= (imm & 0x0000f000) << 4;
10052 }
10053 }
10054
10055 static int
10056 do_vfp_nsyn_mrs (void)
10057 {
10058 if (inst.operands[0].isvec)
10059 {
10060 if (inst.operands[1].reg != 1)
10061 first_error (_("operand 1 must be FPSCR"));
10062 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
10063 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
10064 do_vfp_nsyn_opcode ("fmstat");
10065 }
10066 else if (inst.operands[1].isvec)
10067 do_vfp_nsyn_opcode ("fmrx");
10068 else
10069 return FAIL;
10070
10071 return SUCCESS;
10072 }
10073
10074 static int
10075 do_vfp_nsyn_msr (void)
10076 {
10077 if (inst.operands[0].isvec)
10078 do_vfp_nsyn_opcode ("fmxr");
10079 else
10080 return FAIL;
10081
10082 return SUCCESS;
10083 }
10084
10085 static void
10086 do_vmrs (void)
10087 {
10088 unsigned Rt = inst.operands[0].reg;
10089
10090 if (thumb_mode && Rt == REG_SP)
10091 {
10092 inst.error = BAD_SP;
10093 return;
10094 }
10095
10096 switch (inst.operands[1].reg)
10097 {
10098 /* MVFR2 is only valid for Armv8-A. */
10099 case 5:
10100 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
10101 _(BAD_FPU));
10102 break;
10103
10104 /* Check for new Armv8.1-M Mainline changes to <spec_reg>. */
10105 case 1: /* fpscr. */
10106 constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10107 || ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10108 _(BAD_FPU));
10109 break;
10110
10111 case 14: /* fpcxt_ns. */
10112 case 15: /* fpcxt_s. */
10113 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main),
10114 _("selected processor does not support instruction"));
10115 break;
10116
10117 case 2: /* fpscr_nzcvqc. */
10118 case 12: /* vpr. */
10119 case 13: /* p0. */
10120 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main)
10121 || (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10122 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10123 _("selected processor does not support instruction"));
10124 if (inst.operands[0].reg != 2
10125 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
10126 as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE"));
10127 break;
10128
10129 default:
10130 break;
10131 }
10132
10133 /* APSR_ sets isvec. All other refs to PC are illegal. */
10134 if (!inst.operands[0].isvec && Rt == REG_PC)
10135 {
10136 inst.error = BAD_PC;
10137 return;
10138 }
10139
10140 /* If we get through parsing the register name, we just insert the number
10141 generated into the instruction without further validation. */
10142 inst.instruction |= (inst.operands[1].reg << 16);
10143 inst.instruction |= (Rt << 12);
10144 }
10145
10146 static void
10147 do_vmsr (void)
10148 {
10149 unsigned Rt = inst.operands[1].reg;
10150
10151 if (thumb_mode)
10152 reject_bad_reg (Rt);
10153 else if (Rt == REG_PC)
10154 {
10155 inst.error = BAD_PC;
10156 return;
10157 }
10158
10159 switch (inst.operands[0].reg)
10160 {
10161 /* MVFR2 is only valid for Armv8-A. */
10162 case 5:
10163 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
10164 _(BAD_FPU));
10165 break;
10166
10167 /* Check for new Armv8.1-M Mainline changes to <spec_reg>. */
10168 case 1: /* fpcr. */
10169 constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10170 || ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10171 _(BAD_FPU));
10172 break;
10173
10174 case 14: /* fpcxt_ns. */
10175 case 15: /* fpcxt_s. */
10176 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main),
10177 _("selected processor does not support instruction"));
10178 break;
10179
10180 case 2: /* fpscr_nzcvqc. */
10181 case 12: /* vpr. */
10182 case 13: /* p0. */
10183 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main)
10184 || (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10185 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10186 _("selected processor does not support instruction"));
10187 if (inst.operands[0].reg != 2
10188 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
10189 as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE"));
10190 break;
10191
10192 default:
10193 break;
10194 }
10195
10196 /* If we get through parsing the register name, we just insert the number
10197 generated into the instruction without further validation. */
10198 inst.instruction |= (inst.operands[0].reg << 16);
10199 inst.instruction |= (Rt << 12);
10200 }
10201
10202 static void
10203 do_mrs (void)
10204 {
10205 unsigned br;
10206
10207 if (do_vfp_nsyn_mrs () == SUCCESS)
10208 return;
10209
10210 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10211 inst.instruction |= inst.operands[0].reg << 12;
10212
10213 if (inst.operands[1].isreg)
10214 {
10215 br = inst.operands[1].reg;
10216 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
10217 as_bad (_("bad register for mrs"));
10218 }
10219 else
10220 {
10221 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10222 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
10223 != (PSR_c|PSR_f),
10224 _("'APSR', 'CPSR' or 'SPSR' expected"));
10225 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
10226 }
10227
10228 inst.instruction |= br;
10229 }
10230
10231 /* Two possible forms:
10232 "{C|S}PSR_<field>, Rm",
10233 "{C|S}PSR_f, #expression". */
10234
10235 static void
10236 do_msr (void)
10237 {
10238 if (do_vfp_nsyn_msr () == SUCCESS)
10239 return;
10240
10241 inst.instruction |= inst.operands[0].imm;
10242 if (inst.operands[1].isreg)
10243 inst.instruction |= inst.operands[1].reg;
10244 else
10245 {
10246 inst.instruction |= INST_IMMEDIATE;
10247 inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
10248 inst.relocs[0].pc_rel = 0;
10249 }
10250 }
10251
10252 static void
10253 do_mul (void)
10254 {
10255 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
10256
10257 if (!inst.operands[2].present)
10258 inst.operands[2].reg = inst.operands[0].reg;
10259 inst.instruction |= inst.operands[0].reg << 16;
10260 inst.instruction |= inst.operands[1].reg;
10261 inst.instruction |= inst.operands[2].reg << 8;
10262
10263 if (inst.operands[0].reg == inst.operands[1].reg
10264 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
10265 as_tsktsk (_("Rd and Rm should be different in mul"));
10266 }
10267
10268 /* Long Multiply Parser
10269 UMULL RdLo, RdHi, Rm, Rs
10270 SMULL RdLo, RdHi, Rm, Rs
10271 UMLAL RdLo, RdHi, Rm, Rs
10272 SMLAL RdLo, RdHi, Rm, Rs. */
10273
10274 static void
10275 do_mull (void)
10276 {
10277 inst.instruction |= inst.operands[0].reg << 12;
10278 inst.instruction |= inst.operands[1].reg << 16;
10279 inst.instruction |= inst.operands[2].reg;
10280 inst.instruction |= inst.operands[3].reg << 8;
10281
10282 /* rdhi and rdlo must be different. */
10283 if (inst.operands[0].reg == inst.operands[1].reg)
10284 as_tsktsk (_("rdhi and rdlo must be different"));
10285
10286 /* rdhi, rdlo and rm must all be different before armv6. */
10287 if ((inst.operands[0].reg == inst.operands[2].reg
10288 || inst.operands[1].reg == inst.operands[2].reg)
10289 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
10290 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
10291 }
10292
10293 static void
10294 do_nop (void)
10295 {
10296 if (inst.operands[0].present
10297 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
10298 {
10299 /* Architectural NOP hints are CPSR sets with no bits selected. */
10300 inst.instruction &= 0xf0000000;
10301 inst.instruction |= 0x0320f000;
10302 if (inst.operands[0].present)
10303 inst.instruction |= inst.operands[0].imm;
10304 }
10305 }
10306
10307 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
10308 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
10309 Condition defaults to COND_ALWAYS.
10310 Error if Rd, Rn or Rm are R15. */
10311
10312 static void
10313 do_pkhbt (void)
10314 {
10315 inst.instruction |= inst.operands[0].reg << 12;
10316 inst.instruction |= inst.operands[1].reg << 16;
10317 inst.instruction |= inst.operands[2].reg;
10318 if (inst.operands[3].present)
10319 encode_arm_shift (3);
10320 }
10321
10322 /* ARM V6 PKHTB (Argument Parse). */
10323
10324 static void
10325 do_pkhtb (void)
10326 {
10327 if (!inst.operands[3].present)
10328 {
10329 /* If the shift specifier is omitted, turn the instruction
10330 into pkhbt rd, rm, rn. */
10331 inst.instruction &= 0xfff00010;
10332 inst.instruction |= inst.operands[0].reg << 12;
10333 inst.instruction |= inst.operands[1].reg;
10334 inst.instruction |= inst.operands[2].reg << 16;
10335 }
10336 else
10337 {
10338 inst.instruction |= inst.operands[0].reg << 12;
10339 inst.instruction |= inst.operands[1].reg << 16;
10340 inst.instruction |= inst.operands[2].reg;
10341 encode_arm_shift (3);
10342 }
10343 }
10344
10345 /* ARMv5TE: Preload-Cache
10346 MP Extensions: Preload for write
10347
10348 PLD(W) <addr_mode>
10349
10350 Syntactically, like LDR with B=1, W=0, L=1. */
10351
10352 static void
10353 do_pld (void)
10354 {
10355 constraint (!inst.operands[0].isreg,
10356 _("'[' expected after PLD mnemonic"));
10357 constraint (inst.operands[0].postind,
10358 _("post-indexed expression used in preload instruction"));
10359 constraint (inst.operands[0].writeback,
10360 _("writeback used in preload instruction"));
10361 constraint (!inst.operands[0].preind,
10362 _("unindexed addressing used in preload instruction"));
10363 encode_arm_addr_mode_2 (0, /*is_t=*/false);
10364 }
10365
10366 /* ARMv7: PLI <addr_mode> */
10367 static void
10368 do_pli (void)
10369 {
10370 constraint (!inst.operands[0].isreg,
10371 _("'[' expected after PLI mnemonic"));
10372 constraint (inst.operands[0].postind,
10373 _("post-indexed expression used in preload instruction"));
10374 constraint (inst.operands[0].writeback,
10375 _("writeback used in preload instruction"));
10376 constraint (!inst.operands[0].preind,
10377 _("unindexed addressing used in preload instruction"));
10378 encode_arm_addr_mode_2 (0, /*is_t=*/false);
10379 inst.instruction &= ~PRE_INDEX;
10380 }
10381
10382 static void
10383 do_push_pop (void)
10384 {
10385 constraint (inst.operands[0].writeback,
10386 _("push/pop do not support {reglist}^"));
10387 inst.operands[1] = inst.operands[0];
10388 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
10389 inst.operands[0].isreg = 1;
10390 inst.operands[0].writeback = 1;
10391 inst.operands[0].reg = REG_SP;
10392 encode_ldmstm (/*from_push_pop_mnem=*/true);
10393 }
10394
10395 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
10396 word at the specified address and the following word
10397 respectively.
10398 Unconditionally executed.
10399 Error if Rn is R15. */
10400
10401 static void
10402 do_rfe (void)
10403 {
10404 inst.instruction |= inst.operands[0].reg << 16;
10405 if (inst.operands[0].writeback)
10406 inst.instruction |= WRITE_BACK;
10407 }
10408
10409 /* ARM V6 ssat (argument parse). */
10410
10411 static void
10412 do_ssat (void)
10413 {
10414 inst.instruction |= inst.operands[0].reg << 12;
10415 inst.instruction |= (inst.operands[1].imm - 1) << 16;
10416 inst.instruction |= inst.operands[2].reg;
10417
10418 if (inst.operands[3].present)
10419 encode_arm_shift (3);
10420 }
10421
10422 /* ARM V6 usat (argument parse). */
10423
10424 static void
10425 do_usat (void)
10426 {
10427 inst.instruction |= inst.operands[0].reg << 12;
10428 inst.instruction |= inst.operands[1].imm << 16;
10429 inst.instruction |= inst.operands[2].reg;
10430
10431 if (inst.operands[3].present)
10432 encode_arm_shift (3);
10433 }
10434
10435 /* ARM V6 ssat16 (argument parse). */
10436
10437 static void
10438 do_ssat16 (void)
10439 {
10440 inst.instruction |= inst.operands[0].reg << 12;
10441 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
10442 inst.instruction |= inst.operands[2].reg;
10443 }
10444
10445 static void
10446 do_usat16 (void)
10447 {
10448 inst.instruction |= inst.operands[0].reg << 12;
10449 inst.instruction |= inst.operands[1].imm << 16;
10450 inst.instruction |= inst.operands[2].reg;
10451 }
10452
10453 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
10454 preserving the other bits.
10455
10456 setend <endian_specifier>, where <endian_specifier> is either
10457 BE or LE. */
10458
10459 static void
10460 do_setend (void)
10461 {
10462 if (warn_on_deprecated
10463 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10464 as_tsktsk (_("setend use is deprecated for ARMv8"));
10465
10466 if (inst.operands[0].imm)
10467 inst.instruction |= 0x200;
10468 }
10469
10470 static void
10471 do_shift (void)
10472 {
10473 unsigned int Rm = (inst.operands[1].present
10474 ? inst.operands[1].reg
10475 : inst.operands[0].reg);
10476
10477 inst.instruction |= inst.operands[0].reg << 12;
10478 inst.instruction |= Rm;
10479 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
10480 {
10481 inst.instruction |= inst.operands[2].reg << 8;
10482 inst.instruction |= SHIFT_BY_REG;
10483 /* PR 12854: Error on extraneous shifts. */
10484 constraint (inst.operands[2].shifted,
10485 _("extraneous shift as part of operand to shift insn"));
10486 }
10487 else
10488 inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
10489 }
10490
10491 static void
10492 do_smc (void)
10493 {
10494 unsigned int value = inst.relocs[0].exp.X_add_number;
10495 constraint (value > 0xf, _("immediate too large (bigger than 0xF)"));
10496
10497 inst.relocs[0].type = BFD_RELOC_ARM_SMC;
10498 inst.relocs[0].pc_rel = 0;
10499 }
10500
10501 static void
10502 do_hvc (void)
10503 {
10504 inst.relocs[0].type = BFD_RELOC_ARM_HVC;
10505 inst.relocs[0].pc_rel = 0;
10506 }
10507
10508 static void
10509 do_swi (void)
10510 {
10511 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
10512 inst.relocs[0].pc_rel = 0;
10513 }
10514
10515 static void
10516 do_setpan (void)
10517 {
10518 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
10519 _("selected processor does not support SETPAN instruction"));
10520
10521 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
10522 }
10523
10524 static void
10525 do_t_setpan (void)
10526 {
10527 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
10528 _("selected processor does not support SETPAN instruction"));
10529
10530 inst.instruction |= (inst.operands[0].imm << 3);
10531 }
10532
10533 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
10534 SMLAxy{cond} Rd,Rm,Rs,Rn
10535 SMLAWy{cond} Rd,Rm,Rs,Rn
10536 Error if any register is R15. */
10537
10538 static void
10539 do_smla (void)
10540 {
10541 inst.instruction |= inst.operands[0].reg << 16;
10542 inst.instruction |= inst.operands[1].reg;
10543 inst.instruction |= inst.operands[2].reg << 8;
10544 inst.instruction |= inst.operands[3].reg << 12;
10545 }
10546
10547 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
10548 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
10549 Error if any register is R15.
10550 Warning if Rdlo == Rdhi. */
10551
10552 static void
10553 do_smlal (void)
10554 {
10555 inst.instruction |= inst.operands[0].reg << 12;
10556 inst.instruction |= inst.operands[1].reg << 16;
10557 inst.instruction |= inst.operands[2].reg;
10558 inst.instruction |= inst.operands[3].reg << 8;
10559
10560 if (inst.operands[0].reg == inst.operands[1].reg)
10561 as_tsktsk (_("rdhi and rdlo must be different"));
10562 }
10563
10564 /* ARM V5E (El Segundo) signed-multiply (argument parse)
10565 SMULxy{cond} Rd,Rm,Rs
10566 Error if any register is R15. */
10567
10568 static void
10569 do_smul (void)
10570 {
10571 inst.instruction |= inst.operands[0].reg << 16;
10572 inst.instruction |= inst.operands[1].reg;
10573 inst.instruction |= inst.operands[2].reg << 8;
10574 }
10575
10576 /* ARM V6 srs (argument parse). The variable fields in the encoding are
10577 the same for both ARM and Thumb-2. */
10578
10579 static void
10580 do_srs (void)
10581 {
10582 int reg;
10583
10584 if (inst.operands[0].present)
10585 {
10586 reg = inst.operands[0].reg;
10587 constraint (reg != REG_SP, _("SRS base register must be r13"));
10588 }
10589 else
10590 reg = REG_SP;
10591
10592 inst.instruction |= reg << 16;
10593 inst.instruction |= inst.operands[1].imm;
10594 if (inst.operands[0].writeback || inst.operands[1].writeback)
10595 inst.instruction |= WRITE_BACK;
10596 }
10597
10598 /* ARM V6 strex (argument parse). */
10599
10600 static void
10601 do_strex (void)
10602 {
10603 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10604 || inst.operands[2].postind || inst.operands[2].writeback
10605 || inst.operands[2].immisreg || inst.operands[2].shifted
10606 || inst.operands[2].negative
10607 /* See comment in do_ldrex(). */
10608 || (inst.operands[2].reg == REG_PC),
10609 BAD_ADDR_MODE);
10610
10611 constraint (inst.operands[0].reg == inst.operands[1].reg
10612 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10613
10614 constraint (inst.relocs[0].exp.X_op != O_constant
10615 || inst.relocs[0].exp.X_add_number != 0,
10616 _("offset must be zero in ARM encoding"));
10617
10618 inst.instruction |= inst.operands[0].reg << 12;
10619 inst.instruction |= inst.operands[1].reg;
10620 inst.instruction |= inst.operands[2].reg << 16;
10621 inst.relocs[0].type = BFD_RELOC_UNUSED;
10622 }
10623
10624 static void
10625 do_t_strexbh (void)
10626 {
10627 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10628 || inst.operands[2].postind || inst.operands[2].writeback
10629 || inst.operands[2].immisreg || inst.operands[2].shifted
10630 || inst.operands[2].negative,
10631 BAD_ADDR_MODE);
10632
10633 constraint (inst.operands[0].reg == inst.operands[1].reg
10634 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10635
10636 do_rm_rd_rn ();
10637 }
10638
10639 static void
10640 do_strexd (void)
10641 {
10642 constraint (inst.operands[1].reg % 2 != 0,
10643 _("even register required"));
10644 constraint (inst.operands[2].present
10645 && inst.operands[2].reg != inst.operands[1].reg + 1,
10646 _("can only store two consecutive registers"));
10647 /* If op 2 were present and equal to PC, this function wouldn't
10648 have been called in the first place. */
10649 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
10650
10651 constraint (inst.operands[0].reg == inst.operands[1].reg
10652 || inst.operands[0].reg == inst.operands[1].reg + 1
10653 || inst.operands[0].reg == inst.operands[3].reg,
10654 BAD_OVERLAP);
10655
10656 inst.instruction |= inst.operands[0].reg << 12;
10657 inst.instruction |= inst.operands[1].reg;
10658 inst.instruction |= inst.operands[3].reg << 16;
10659 }
10660
10661 /* ARM V8 STRL. */
10662 static void
10663 do_stlex (void)
10664 {
10665 constraint (inst.operands[0].reg == inst.operands[1].reg
10666 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10667
10668 do_rd_rm_rn ();
10669 }
10670
10671 static void
10672 do_t_stlex (void)
10673 {
10674 constraint (inst.operands[0].reg == inst.operands[1].reg
10675 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10676
10677 do_rm_rd_rn ();
10678 }
10679
10680 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
10681 extends it to 32-bits, and adds the result to a value in another
10682 register. You can specify a rotation by 0, 8, 16, or 24 bits
10683 before extracting the 16-bit value.
10684 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
10685 Condition defaults to COND_ALWAYS.
10686 Error if any register uses R15. */
10687
10688 static void
10689 do_sxtah (void)
10690 {
10691 inst.instruction |= inst.operands[0].reg << 12;
10692 inst.instruction |= inst.operands[1].reg << 16;
10693 inst.instruction |= inst.operands[2].reg;
10694 inst.instruction |= inst.operands[3].imm << 10;
10695 }
10696
10697 /* ARM V6 SXTH.
10698
10699 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
10700 Condition defaults to COND_ALWAYS.
10701 Error if any register uses R15. */
10702
10703 static void
10704 do_sxth (void)
10705 {
10706 inst.instruction |= inst.operands[0].reg << 12;
10707 inst.instruction |= inst.operands[1].reg;
10708 inst.instruction |= inst.operands[2].imm << 10;
10709 }
10710 \f
10711 /* VFP instructions. In a logical order: SP variant first, monad
10712 before dyad, arithmetic then move then load/store. */
10713
10714 static void
10715 do_vfp_sp_monadic (void)
10716 {
10717 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10718 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10719 _(BAD_FPU));
10720
10721 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10722 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10723 }
10724
10725 static void
10726 do_vfp_sp_dyadic (void)
10727 {
10728 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10729 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10730 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10731 }
10732
10733 static void
10734 do_vfp_sp_compare_z (void)
10735 {
10736 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10737 }
10738
10739 static void
10740 do_vfp_dp_sp_cvt (void)
10741 {
10742 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10743 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10744 }
10745
10746 static void
10747 do_vfp_sp_dp_cvt (void)
10748 {
10749 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10750 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10751 }
10752
10753 static void
10754 do_vfp_reg_from_sp (void)
10755 {
10756 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10757 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10758 _(BAD_FPU));
10759
10760 inst.instruction |= inst.operands[0].reg << 12;
10761 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10762 }
10763
10764 static void
10765 do_vfp_reg2_from_sp2 (void)
10766 {
10767 constraint (inst.operands[2].imm != 2,
10768 _("only two consecutive VFP SP registers allowed here"));
10769 inst.instruction |= inst.operands[0].reg << 12;
10770 inst.instruction |= inst.operands[1].reg << 16;
10771 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10772 }
10773
10774 static void
10775 do_vfp_sp_from_reg (void)
10776 {
10777 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10778 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10779 _(BAD_FPU));
10780
10781 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
10782 inst.instruction |= inst.operands[1].reg << 12;
10783 }
10784
10785 static void
10786 do_vfp_sp2_from_reg2 (void)
10787 {
10788 constraint (inst.operands[0].imm != 2,
10789 _("only two consecutive VFP SP registers allowed here"));
10790 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
10791 inst.instruction |= inst.operands[1].reg << 12;
10792 inst.instruction |= inst.operands[2].reg << 16;
10793 }
10794
10795 static void
10796 do_vfp_sp_ldst (void)
10797 {
10798 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10799 encode_arm_cp_address (1, false, true, 0);
10800 }
10801
10802 static void
10803 do_vfp_dp_ldst (void)
10804 {
10805 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10806 encode_arm_cp_address (1, false, true, 0);
10807 }
10808
10809
10810 static void
10811 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
10812 {
10813 if (inst.operands[0].writeback)
10814 inst.instruction |= WRITE_BACK;
10815 else
10816 constraint (ldstm_type != VFP_LDSTMIA,
10817 _("this addressing mode requires base-register writeback"));
10818 inst.instruction |= inst.operands[0].reg << 16;
10819 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
10820 inst.instruction |= inst.operands[1].imm;
10821 }
10822
10823 static void
10824 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
10825 {
10826 int count;
10827
10828 if (inst.operands[0].writeback)
10829 inst.instruction |= WRITE_BACK;
10830 else
10831 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
10832 _("this addressing mode requires base-register writeback"));
10833
10834 inst.instruction |= inst.operands[0].reg << 16;
10835 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10836
10837 count = inst.operands[1].imm << 1;
10838 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
10839 count += 1;
10840
10841 inst.instruction |= count;
10842 }
10843
10844 static void
10845 do_vfp_sp_ldstmia (void)
10846 {
10847 vfp_sp_ldstm (VFP_LDSTMIA);
10848 }
10849
10850 static void
10851 do_vfp_sp_ldstmdb (void)
10852 {
10853 vfp_sp_ldstm (VFP_LDSTMDB);
10854 }
10855
10856 static void
10857 do_vfp_dp_ldstmia (void)
10858 {
10859 vfp_dp_ldstm (VFP_LDSTMIA);
10860 }
10861
10862 static void
10863 do_vfp_dp_ldstmdb (void)
10864 {
10865 vfp_dp_ldstm (VFP_LDSTMDB);
10866 }
10867
10868 static void
10869 do_vfp_xp_ldstmia (void)
10870 {
10871 vfp_dp_ldstm (VFP_LDSTMIAX);
10872 }
10873
10874 static void
10875 do_vfp_xp_ldstmdb (void)
10876 {
10877 vfp_dp_ldstm (VFP_LDSTMDBX);
10878 }
10879
10880 static void
10881 do_vfp_dp_rd_rm (void)
10882 {
10883 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
10884 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10885 _(BAD_FPU));
10886
10887 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10888 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10889 }
10890
10891 static void
10892 do_vfp_dp_rn_rd (void)
10893 {
10894 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
10895 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10896 }
10897
10898 static void
10899 do_vfp_dp_rd_rn (void)
10900 {
10901 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10902 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10903 }
10904
10905 static void
10906 do_vfp_dp_rd_rn_rm (void)
10907 {
10908 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
10909 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10910 _(BAD_FPU));
10911
10912 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10913 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10914 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
10915 }
10916
10917 static void
10918 do_vfp_dp_rd (void)
10919 {
10920 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10921 }
10922
10923 static void
10924 do_vfp_dp_rm_rd_rn (void)
10925 {
10926 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
10927 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10928 _(BAD_FPU));
10929
10930 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
10931 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10932 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
10933 }
10934
10935 /* VFPv3 instructions. */
10936 static void
10937 do_vfp_sp_const (void)
10938 {
10939 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10940 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10941 inst.instruction |= (inst.operands[1].imm & 0x0f);
10942 }
10943
10944 static void
10945 do_vfp_dp_const (void)
10946 {
10947 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10948 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10949 inst.instruction |= (inst.operands[1].imm & 0x0f);
10950 }
10951
10952 static void
10953 vfp_conv (int srcsize)
10954 {
10955 int immbits = srcsize - inst.operands[1].imm;
10956
10957 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
10958 {
10959 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10960 i.e. immbits must be in range 0 - 16. */
10961 inst.error = _("immediate value out of range, expected range [0, 16]");
10962 return;
10963 }
10964 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
10965 {
10966 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10967 i.e. immbits must be in range 0 - 31. */
10968 inst.error = _("immediate value out of range, expected range [1, 32]");
10969 return;
10970 }
10971
10972 inst.instruction |= (immbits & 1) << 5;
10973 inst.instruction |= (immbits >> 1);
10974 }
10975
10976 static void
10977 do_vfp_sp_conv_16 (void)
10978 {
10979 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10980 vfp_conv (16);
10981 }
10982
10983 static void
10984 do_vfp_dp_conv_16 (void)
10985 {
10986 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10987 vfp_conv (16);
10988 }
10989
10990 static void
10991 do_vfp_sp_conv_32 (void)
10992 {
10993 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10994 vfp_conv (32);
10995 }
10996
10997 static void
10998 do_vfp_dp_conv_32 (void)
10999 {
11000 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
11001 vfp_conv (32);
11002 }
11003 \f
11004 /* FPA instructions. Also in a logical order. */
11005
11006 static void
11007 do_fpa_cmp (void)
11008 {
11009 inst.instruction |= inst.operands[0].reg << 16;
11010 inst.instruction |= inst.operands[1].reg;
11011 }
11012
11013 static void
11014 do_fpa_ldmstm (void)
11015 {
11016 inst.instruction |= inst.operands[0].reg << 12;
11017 switch (inst.operands[1].imm)
11018 {
11019 case 1: inst.instruction |= CP_T_X; break;
11020 case 2: inst.instruction |= CP_T_Y; break;
11021 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
11022 case 4: break;
11023 default: abort ();
11024 }
11025
11026 if (inst.instruction & (PRE_INDEX | INDEX_UP))
11027 {
11028 /* The instruction specified "ea" or "fd", so we can only accept
11029 [Rn]{!}. The instruction does not really support stacking or
11030 unstacking, so we have to emulate these by setting appropriate
11031 bits and offsets. */
11032 constraint (inst.relocs[0].exp.X_op != O_constant
11033 || inst.relocs[0].exp.X_add_number != 0,
11034 _("this instruction does not support indexing"));
11035
11036 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
11037 inst.relocs[0].exp.X_add_number = 12 * inst.operands[1].imm;
11038
11039 if (!(inst.instruction & INDEX_UP))
11040 inst.relocs[0].exp.X_add_number = -inst.relocs[0].exp.X_add_number;
11041
11042 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
11043 {
11044 inst.operands[2].preind = 0;
11045 inst.operands[2].postind = 1;
11046 }
11047 }
11048
11049 encode_arm_cp_address (2, true, true, 0);
11050 }
11051 \f
11052 /* iWMMXt instructions: strictly in alphabetical order. */
11053
11054 static void
11055 do_iwmmxt_tandorc (void)
11056 {
11057 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
11058 }
11059
11060 static void
11061 do_iwmmxt_textrc (void)
11062 {
11063 inst.instruction |= inst.operands[0].reg << 12;
11064 inst.instruction |= inst.operands[1].imm;
11065 }
11066
11067 static void
11068 do_iwmmxt_textrm (void)
11069 {
11070 inst.instruction |= inst.operands[0].reg << 12;
11071 inst.instruction |= inst.operands[1].reg << 16;
11072 inst.instruction |= inst.operands[2].imm;
11073 }
11074
11075 static void
11076 do_iwmmxt_tinsr (void)
11077 {
11078 inst.instruction |= inst.operands[0].reg << 16;
11079 inst.instruction |= inst.operands[1].reg << 12;
11080 inst.instruction |= inst.operands[2].imm;
11081 }
11082
11083 static void
11084 do_iwmmxt_tmia (void)
11085 {
11086 inst.instruction |= inst.operands[0].reg << 5;
11087 inst.instruction |= inst.operands[1].reg;
11088 inst.instruction |= inst.operands[2].reg << 12;
11089 }
11090
11091 static void
11092 do_iwmmxt_waligni (void)
11093 {
11094 inst.instruction |= inst.operands[0].reg << 12;
11095 inst.instruction |= inst.operands[1].reg << 16;
11096 inst.instruction |= inst.operands[2].reg;
11097 inst.instruction |= inst.operands[3].imm << 20;
11098 }
11099
11100 static void
11101 do_iwmmxt_wmerge (void)
11102 {
11103 inst.instruction |= inst.operands[0].reg << 12;
11104 inst.instruction |= inst.operands[1].reg << 16;
11105 inst.instruction |= inst.operands[2].reg;
11106 inst.instruction |= inst.operands[3].imm << 21;
11107 }
11108
11109 static void
11110 do_iwmmxt_wmov (void)
11111 {
11112 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
11113 inst.instruction |= inst.operands[0].reg << 12;
11114 inst.instruction |= inst.operands[1].reg << 16;
11115 inst.instruction |= inst.operands[1].reg;
11116 }
11117
11118 static void
11119 do_iwmmxt_wldstbh (void)
11120 {
11121 int reloc;
11122 inst.instruction |= inst.operands[0].reg << 12;
11123 if (thumb_mode)
11124 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
11125 else
11126 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
11127 encode_arm_cp_address (1, true, false, reloc);
11128 }
11129
11130 static void
11131 do_iwmmxt_wldstw (void)
11132 {
11133 /* RIWR_RIWC clears .isreg for a control register. */
11134 if (!inst.operands[0].isreg)
11135 {
11136 constraint (inst.cond != COND_ALWAYS, BAD_COND);
11137 inst.instruction |= 0xf0000000;
11138 }
11139
11140 inst.instruction |= inst.operands[0].reg << 12;
11141 encode_arm_cp_address (1, true, true, 0);
11142 }
11143
11144 static void
11145 do_iwmmxt_wldstd (void)
11146 {
11147 inst.instruction |= inst.operands[0].reg << 12;
11148 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
11149 && inst.operands[1].immisreg)
11150 {
11151 inst.instruction &= ~0x1a000ff;
11152 inst.instruction |= (0xfU << 28);
11153 if (inst.operands[1].preind)
11154 inst.instruction |= PRE_INDEX;
11155 if (!inst.operands[1].negative)
11156 inst.instruction |= INDEX_UP;
11157 if (inst.operands[1].writeback)
11158 inst.instruction |= WRITE_BACK;
11159 inst.instruction |= inst.operands[1].reg << 16;
11160 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
11161 inst.instruction |= inst.operands[1].imm;
11162 }
11163 else
11164 encode_arm_cp_address (1, true, false, 0);
11165 }
11166
11167 static void
11168 do_iwmmxt_wshufh (void)
11169 {
11170 inst.instruction |= inst.operands[0].reg << 12;
11171 inst.instruction |= inst.operands[1].reg << 16;
11172 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
11173 inst.instruction |= (inst.operands[2].imm & 0x0f);
11174 }
11175
11176 static void
11177 do_iwmmxt_wzero (void)
11178 {
11179 /* WZERO reg is an alias for WANDN reg, reg, reg. */
11180 inst.instruction |= inst.operands[0].reg;
11181 inst.instruction |= inst.operands[0].reg << 12;
11182 inst.instruction |= inst.operands[0].reg << 16;
11183 }
11184
11185 static void
11186 do_iwmmxt_wrwrwr_or_imm5 (void)
11187 {
11188 if (inst.operands[2].isreg)
11189 do_rd_rn_rm ();
11190 else {
11191 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
11192 _("immediate operand requires iWMMXt2"));
11193 do_rd_rn ();
11194 if (inst.operands[2].imm == 0)
11195 {
11196 switch ((inst.instruction >> 20) & 0xf)
11197 {
11198 case 4:
11199 case 5:
11200 case 6:
11201 case 7:
11202 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
11203 inst.operands[2].imm = 16;
11204 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
11205 break;
11206 case 8:
11207 case 9:
11208 case 10:
11209 case 11:
11210 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
11211 inst.operands[2].imm = 32;
11212 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
11213 break;
11214 case 12:
11215 case 13:
11216 case 14:
11217 case 15:
11218 {
11219 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
11220 unsigned long wrn;
11221 wrn = (inst.instruction >> 16) & 0xf;
11222 inst.instruction &= 0xff0fff0f;
11223 inst.instruction |= wrn;
11224 /* Bail out here; the instruction is now assembled. */
11225 return;
11226 }
11227 }
11228 }
11229 /* Map 32 -> 0, etc. */
11230 inst.operands[2].imm &= 0x1f;
11231 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
11232 }
11233 }
11234 \f
11235 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
11236 operations first, then control, shift, and load/store. */
11237
11238 /* Insns like "foo X,Y,Z". */
11239
11240 static void
11241 do_mav_triple (void)
11242 {
11243 inst.instruction |= inst.operands[0].reg << 16;
11244 inst.instruction |= inst.operands[1].reg;
11245 inst.instruction |= inst.operands[2].reg << 12;
11246 }
11247
11248 /* Insns like "foo W,X,Y,Z".
11249 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
11250
11251 static void
11252 do_mav_quad (void)
11253 {
11254 inst.instruction |= inst.operands[0].reg << 5;
11255 inst.instruction |= inst.operands[1].reg << 12;
11256 inst.instruction |= inst.operands[2].reg << 16;
11257 inst.instruction |= inst.operands[3].reg;
11258 }
11259
11260 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
11261 static void
11262 do_mav_dspsc (void)
11263 {
11264 inst.instruction |= inst.operands[1].reg << 12;
11265 }
11266
11267 /* Maverick shift immediate instructions.
11268 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
11269 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
11270
11271 static void
11272 do_mav_shift (void)
11273 {
11274 int imm = inst.operands[2].imm;
11275
11276 inst.instruction |= inst.operands[0].reg << 12;
11277 inst.instruction |= inst.operands[1].reg << 16;
11278
11279 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
11280 Bits 5-7 of the insn should have bits 4-6 of the immediate.
11281 Bit 4 should be 0. */
11282 imm = (imm & 0xf) | ((imm & 0x70) << 1);
11283
11284 inst.instruction |= imm;
11285 }
11286 \f
11287 /* XScale instructions. Also sorted arithmetic before move. */
11288
11289 /* Xscale multiply-accumulate (argument parse)
11290 MIAcc acc0,Rm,Rs
11291 MIAPHcc acc0,Rm,Rs
11292 MIAxycc acc0,Rm,Rs. */
11293
11294 static void
11295 do_xsc_mia (void)
11296 {
11297 inst.instruction |= inst.operands[1].reg;
11298 inst.instruction |= inst.operands[2].reg << 12;
11299 }
11300
11301 /* Xscale move-accumulator-register (argument parse)
11302
11303 MARcc acc0,RdLo,RdHi. */
11304
11305 static void
11306 do_xsc_mar (void)
11307 {
11308 inst.instruction |= inst.operands[1].reg << 12;
11309 inst.instruction |= inst.operands[2].reg << 16;
11310 }
11311
11312 /* Xscale move-register-accumulator (argument parse)
11313
11314 MRAcc RdLo,RdHi,acc0. */
11315
11316 static void
11317 do_xsc_mra (void)
11318 {
11319 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
11320 inst.instruction |= inst.operands[0].reg << 12;
11321 inst.instruction |= inst.operands[1].reg << 16;
11322 }
11323 \f
11324 /* Encoding functions relevant only to Thumb. */
11325
11326 /* inst.operands[i] is a shifted-register operand; encode
11327 it into inst.instruction in the format used by Thumb32. */
11328
11329 static void
11330 encode_thumb32_shifted_operand (int i)
11331 {
11332 unsigned int value = inst.relocs[0].exp.X_add_number;
11333 unsigned int shift = inst.operands[i].shift_kind;
11334
11335 constraint (inst.operands[i].immisreg,
11336 _("shift by register not allowed in thumb mode"));
11337 inst.instruction |= inst.operands[i].reg;
11338 if (shift == SHIFT_RRX)
11339 inst.instruction |= SHIFT_ROR << 4;
11340 else
11341 {
11342 constraint (inst.relocs[0].exp.X_op != O_constant,
11343 _("expression too complex"));
11344
11345 constraint (value > 32
11346 || (value == 32 && (shift == SHIFT_LSL
11347 || shift == SHIFT_ROR)),
11348 _("shift expression is too large"));
11349
11350 if (value == 0)
11351 shift = SHIFT_LSL;
11352 else if (value == 32)
11353 value = 0;
11354
11355 inst.instruction |= shift << 4;
11356 inst.instruction |= (value & 0x1c) << 10;
11357 inst.instruction |= (value & 0x03) << 6;
11358 }
11359 }
11360
11361
11362 /* inst.operands[i] was set up by parse_address. Encode it into a
11363 Thumb32 format load or store instruction. Reject forms that cannot
11364 be used with such instructions. If is_t is true, reject forms that
11365 cannot be used with a T instruction; if is_d is true, reject forms
11366 that cannot be used with a D instruction. If it is a store insn,
11367 reject PC in Rn. */
11368
11369 static void
11370 encode_thumb32_addr_mode (int i, bool is_t, bool is_d)
11371 {
11372 const bool is_pc = (inst.operands[i].reg == REG_PC);
11373
11374 constraint (!inst.operands[i].isreg,
11375 _("Instruction does not support =N addresses"));
11376
11377 inst.instruction |= inst.operands[i].reg << 16;
11378 if (inst.operands[i].immisreg)
11379 {
11380 constraint (is_pc, BAD_PC_ADDRESSING);
11381 constraint (is_t || is_d, _("cannot use register index with this instruction"));
11382 constraint (inst.operands[i].negative,
11383 _("Thumb does not support negative register indexing"));
11384 constraint (inst.operands[i].postind,
11385 _("Thumb does not support register post-indexing"));
11386 constraint (inst.operands[i].writeback,
11387 _("Thumb does not support register indexing with writeback"));
11388 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
11389 _("Thumb supports only LSL in shifted register indexing"));
11390
11391 inst.instruction |= inst.operands[i].imm;
11392 if (inst.operands[i].shifted)
11393 {
11394 constraint (inst.relocs[0].exp.X_op != O_constant,
11395 _("expression too complex"));
11396 constraint (inst.relocs[0].exp.X_add_number < 0
11397 || inst.relocs[0].exp.X_add_number > 3,
11398 _("shift out of range"));
11399 inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
11400 }
11401 inst.relocs[0].type = BFD_RELOC_UNUSED;
11402 }
11403 else if (inst.operands[i].preind)
11404 {
11405 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
11406 constraint (is_t && inst.operands[i].writeback,
11407 _("cannot use writeback with this instruction"));
11408 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
11409 BAD_PC_ADDRESSING);
11410
11411 if (is_d)
11412 {
11413 inst.instruction |= 0x01000000;
11414 if (inst.operands[i].writeback)
11415 inst.instruction |= 0x00200000;
11416 }
11417 else
11418 {
11419 inst.instruction |= 0x00000c00;
11420 if (inst.operands[i].writeback)
11421 inst.instruction |= 0x00000100;
11422 }
11423 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
11424 }
11425 else if (inst.operands[i].postind)
11426 {
11427 gas_assert (inst.operands[i].writeback);
11428 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
11429 constraint (is_t, _("cannot use post-indexing with this instruction"));
11430
11431 if (is_d)
11432 inst.instruction |= 0x00200000;
11433 else
11434 inst.instruction |= 0x00000900;
11435 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
11436 }
11437 else /* unindexed - only for coprocessor */
11438 inst.error = _("instruction does not accept unindexed addressing");
11439 }
11440
11441 /* Table of Thumb instructions which exist in 16- and/or 32-bit
11442 encodings (the latter only in post-V6T2 cores). The index is the
11443 value used in the insns table below. When there is more than one
11444 possible 16-bit encoding for the instruction, this table always
11445 holds variant (1).
11446 Also contains several pseudo-instructions used during relaxation. */
11447 #define T16_32_TAB \
11448 X(_adc, 4140, eb400000), \
11449 X(_adcs, 4140, eb500000), \
11450 X(_add, 1c00, eb000000), \
11451 X(_adds, 1c00, eb100000), \
11452 X(_addi, 0000, f1000000), \
11453 X(_addis, 0000, f1100000), \
11454 X(_add_pc,000f, f20f0000), \
11455 X(_add_sp,000d, f10d0000), \
11456 X(_adr, 000f, f20f0000), \
11457 X(_and, 4000, ea000000), \
11458 X(_ands, 4000, ea100000), \
11459 X(_asr, 1000, fa40f000), \
11460 X(_asrs, 1000, fa50f000), \
11461 X(_aut, 0000, f3af802d), \
11462 X(_autg, 0000, fb500f00), \
11463 X(_b, e000, f000b000), \
11464 X(_bcond, d000, f0008000), \
11465 X(_bf, 0000, f040e001), \
11466 X(_bfcsel,0000, f000e001), \
11467 X(_bfx, 0000, f060e001), \
11468 X(_bfl, 0000, f000c001), \
11469 X(_bflx, 0000, f070e001), \
11470 X(_bic, 4380, ea200000), \
11471 X(_bics, 4380, ea300000), \
11472 X(_bxaut, 0000, fb500f10), \
11473 X(_cinc, 0000, ea509000), \
11474 X(_cinv, 0000, ea50a000), \
11475 X(_cmn, 42c0, eb100f00), \
11476 X(_cmp, 2800, ebb00f00), \
11477 X(_cneg, 0000, ea50b000), \
11478 X(_cpsie, b660, f3af8400), \
11479 X(_cpsid, b670, f3af8600), \
11480 X(_cpy, 4600, ea4f0000), \
11481 X(_csel, 0000, ea508000), \
11482 X(_cset, 0000, ea5f900f), \
11483 X(_csetm, 0000, ea5fa00f), \
11484 X(_csinc, 0000, ea509000), \
11485 X(_csinv, 0000, ea50a000), \
11486 X(_csneg, 0000, ea50b000), \
11487 X(_dec_sp,80dd, f1ad0d00), \
11488 X(_dls, 0000, f040e001), \
11489 X(_dlstp, 0000, f000e001), \
11490 X(_eor, 4040, ea800000), \
11491 X(_eors, 4040, ea900000), \
11492 X(_inc_sp,00dd, f10d0d00), \
11493 X(_lctp, 0000, f00fe001), \
11494 X(_ldmia, c800, e8900000), \
11495 X(_ldr, 6800, f8500000), \
11496 X(_ldrb, 7800, f8100000), \
11497 X(_ldrh, 8800, f8300000), \
11498 X(_ldrsb, 5600, f9100000), \
11499 X(_ldrsh, 5e00, f9300000), \
11500 X(_ldr_pc,4800, f85f0000), \
11501 X(_ldr_pc2,4800, f85f0000), \
11502 X(_ldr_sp,9800, f85d0000), \
11503 X(_le, 0000, f00fc001), \
11504 X(_letp, 0000, f01fc001), \
11505 X(_lsl, 0000, fa00f000), \
11506 X(_lsls, 0000, fa10f000), \
11507 X(_lsr, 0800, fa20f000), \
11508 X(_lsrs, 0800, fa30f000), \
11509 X(_mov, 2000, ea4f0000), \
11510 X(_movs, 2000, ea5f0000), \
11511 X(_mul, 4340, fb00f000), \
11512 X(_muls, 4340, ffffffff), /* no 32b muls */ \
11513 X(_mvn, 43c0, ea6f0000), \
11514 X(_mvns, 43c0, ea7f0000), \
11515 X(_neg, 4240, f1c00000), /* rsb #0 */ \
11516 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
11517 X(_orr, 4300, ea400000), \
11518 X(_orrs, 4300, ea500000), \
11519 X(_pac, 0000, f3af801d), \
11520 X(_pacbti, 0000, f3af800d), \
11521 X(_pacg, 0000, fb60f000), \
11522 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
11523 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
11524 X(_rev, ba00, fa90f080), \
11525 X(_rev16, ba40, fa90f090), \
11526 X(_revsh, bac0, fa90f0b0), \
11527 X(_ror, 41c0, fa60f000), \
11528 X(_rors, 41c0, fa70f000), \
11529 X(_sbc, 4180, eb600000), \
11530 X(_sbcs, 4180, eb700000), \
11531 X(_stmia, c000, e8800000), \
11532 X(_str, 6000, f8400000), \
11533 X(_strb, 7000, f8000000), \
11534 X(_strh, 8000, f8200000), \
11535 X(_str_sp,9000, f84d0000), \
11536 X(_sub, 1e00, eba00000), \
11537 X(_subs, 1e00, ebb00000), \
11538 X(_subi, 8000, f1a00000), \
11539 X(_subis, 8000, f1b00000), \
11540 X(_sxtb, b240, fa4ff080), \
11541 X(_sxth, b200, fa0ff080), \
11542 X(_tst, 4200, ea100f00), \
11543 X(_uxtb, b2c0, fa5ff080), \
11544 X(_uxth, b280, fa1ff080), \
11545 X(_nop, bf00, f3af8000), \
11546 X(_yield, bf10, f3af8001), \
11547 X(_wfe, bf20, f3af8002), \
11548 X(_wfi, bf30, f3af8003), \
11549 X(_wls, 0000, f040c001), \
11550 X(_wlstp, 0000, f000c001), \
11551 X(_sev, bf40, f3af8004), \
11552 X(_sevl, bf50, f3af8005), \
11553 X(_udf, de00, f7f0a000)
11554
11555 /* To catch errors in encoding functions, the codes are all offset by
11556 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
11557 as 16-bit instructions. */
11558 #define X(a,b,c) T_MNEM##a
11559 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
11560 #undef X
11561
11562 #define X(a,b,c) 0x##b
11563 static const unsigned short thumb_op16[] = { T16_32_TAB };
11564 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
11565 #undef X
11566
11567 #define X(a,b,c) 0x##c
11568 static const unsigned int thumb_op32[] = { T16_32_TAB };
11569 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
11570 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
11571 #undef X
11572 #undef T16_32_TAB
11573
11574 /* Thumb instruction encoders, in alphabetical order. */
11575
11576 /* ADDW or SUBW. */
11577
11578 static void
11579 do_t_add_sub_w (void)
11580 {
11581 int Rd, Rn;
11582
11583 Rd = inst.operands[0].reg;
11584 Rn = inst.operands[1].reg;
11585
11586 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
11587 is the SP-{plus,minus}-immediate form of the instruction. */
11588 if (Rn == REG_SP)
11589 constraint (Rd == REG_PC, BAD_PC);
11590 else
11591 reject_bad_reg (Rd);
11592
11593 inst.instruction |= (Rn << 16) | (Rd << 8);
11594 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11595 }
11596
11597 /* Parse an add or subtract instruction. We get here with inst.instruction
11598 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
11599
11600 static void
11601 do_t_add_sub (void)
11602 {
11603 int Rd, Rs, Rn;
11604
11605 Rd = inst.operands[0].reg;
11606 Rs = (inst.operands[1].present
11607 ? inst.operands[1].reg /* Rd, Rs, foo */
11608 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11609
11610 if (Rd == REG_PC)
11611 set_pred_insn_type_last ();
11612
11613 if (unified_syntax)
11614 {
11615 bool flags;
11616 bool narrow;
11617 int opcode;
11618
11619 flags = (inst.instruction == T_MNEM_adds
11620 || inst.instruction == T_MNEM_subs);
11621 if (flags)
11622 narrow = !in_pred_block ();
11623 else
11624 narrow = in_pred_block ();
11625 if (!inst.operands[2].isreg)
11626 {
11627 int add;
11628
11629 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11630 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11631
11632 add = (inst.instruction == T_MNEM_add
11633 || inst.instruction == T_MNEM_adds);
11634 opcode = 0;
11635 if (inst.size_req != 4)
11636 {
11637 /* Attempt to use a narrow opcode, with relaxation if
11638 appropriate. */
11639 if (Rd == REG_SP && Rs == REG_SP && !flags)
11640 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
11641 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
11642 opcode = T_MNEM_add_sp;
11643 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
11644 opcode = T_MNEM_add_pc;
11645 else if (Rd <= 7 && Rs <= 7 && narrow)
11646 {
11647 if (flags)
11648 opcode = add ? T_MNEM_addis : T_MNEM_subis;
11649 else
11650 opcode = add ? T_MNEM_addi : T_MNEM_subi;
11651 }
11652 if (opcode)
11653 {
11654 inst.instruction = THUMB_OP16(opcode);
11655 inst.instruction |= (Rd << 4) | Rs;
11656 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11657 || (inst.relocs[0].type
11658 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC))
11659 {
11660 if (inst.size_req == 2)
11661 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11662 else
11663 inst.relax = opcode;
11664 }
11665 }
11666 else
11667 constraint (inst.size_req == 2, _("cannot honor width suffix"));
11668 }
11669 if (inst.size_req == 4
11670 || (inst.size_req != 2 && !opcode))
11671 {
11672 constraint ((inst.relocs[0].type
11673 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
11674 && (inst.relocs[0].type
11675 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
11676 THUMB1_RELOC_ONLY);
11677 if (Rd == REG_PC)
11678 {
11679 constraint (add, BAD_PC);
11680 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
11681 _("only SUBS PC, LR, #const allowed"));
11682 constraint (inst.relocs[0].exp.X_op != O_constant,
11683 _("expression too complex"));
11684 constraint (inst.relocs[0].exp.X_add_number < 0
11685 || inst.relocs[0].exp.X_add_number > 0xff,
11686 _("immediate value out of range"));
11687 inst.instruction = T2_SUBS_PC_LR
11688 | inst.relocs[0].exp.X_add_number;
11689 inst.relocs[0].type = BFD_RELOC_UNUSED;
11690 return;
11691 }
11692 else if (Rs == REG_PC)
11693 {
11694 /* Always use addw/subw. */
11695 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
11696 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11697 }
11698 else
11699 {
11700 inst.instruction = THUMB_OP32 (inst.instruction);
11701 inst.instruction = (inst.instruction & 0xe1ffffff)
11702 | 0x10000000;
11703 if (flags)
11704 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11705 else
11706 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_IMM;
11707 }
11708 inst.instruction |= Rd << 8;
11709 inst.instruction |= Rs << 16;
11710 }
11711 }
11712 else
11713 {
11714 unsigned int value = inst.relocs[0].exp.X_add_number;
11715 unsigned int shift = inst.operands[2].shift_kind;
11716
11717 Rn = inst.operands[2].reg;
11718 /* See if we can do this with a 16-bit instruction. */
11719 if (!inst.operands[2].shifted && inst.size_req != 4)
11720 {
11721 if (Rd > 7 || Rs > 7 || Rn > 7)
11722 narrow = false;
11723
11724 if (narrow)
11725 {
11726 inst.instruction = ((inst.instruction == T_MNEM_adds
11727 || inst.instruction == T_MNEM_add)
11728 ? T_OPCODE_ADD_R3
11729 : T_OPCODE_SUB_R3);
11730 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11731 return;
11732 }
11733
11734 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
11735 {
11736 /* Thumb-1 cores (except v6-M) require at least one high
11737 register in a narrow non flag setting add. */
11738 if (Rd > 7 || Rn > 7
11739 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
11740 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
11741 {
11742 if (Rd == Rn)
11743 {
11744 Rn = Rs;
11745 Rs = Rd;
11746 }
11747 inst.instruction = T_OPCODE_ADD_HI;
11748 inst.instruction |= (Rd & 8) << 4;
11749 inst.instruction |= (Rd & 7);
11750 inst.instruction |= Rn << 3;
11751 return;
11752 }
11753 }
11754 }
11755
11756 constraint (Rd == REG_PC, BAD_PC);
11757 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11758 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11759 constraint (Rs == REG_PC, BAD_PC);
11760 reject_bad_reg (Rn);
11761
11762 /* If we get here, it can't be done in 16 bits. */
11763 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
11764 _("shift must be constant"));
11765 inst.instruction = THUMB_OP32 (inst.instruction);
11766 inst.instruction |= Rd << 8;
11767 inst.instruction |= Rs << 16;
11768 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
11769 _("shift value over 3 not allowed in thumb mode"));
11770 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
11771 _("only LSL shift allowed in thumb mode"));
11772 encode_thumb32_shifted_operand (2);
11773 }
11774 }
11775 else
11776 {
11777 constraint (inst.instruction == T_MNEM_adds
11778 || inst.instruction == T_MNEM_subs,
11779 BAD_THUMB32);
11780
11781 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
11782 {
11783 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
11784 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
11785 BAD_HIREG);
11786
11787 inst.instruction = (inst.instruction == T_MNEM_add
11788 ? 0x0000 : 0x8000);
11789 inst.instruction |= (Rd << 4) | Rs;
11790 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11791 return;
11792 }
11793
11794 Rn = inst.operands[2].reg;
11795 constraint (inst.operands[2].shifted, _("unshifted register required"));
11796
11797 /* We now have Rd, Rs, and Rn set to registers. */
11798 if (Rd > 7 || Rs > 7 || Rn > 7)
11799 {
11800 /* Can't do this for SUB. */
11801 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
11802 inst.instruction = T_OPCODE_ADD_HI;
11803 inst.instruction |= (Rd & 8) << 4;
11804 inst.instruction |= (Rd & 7);
11805 if (Rs == Rd)
11806 inst.instruction |= Rn << 3;
11807 else if (Rn == Rd)
11808 inst.instruction |= Rs << 3;
11809 else
11810 constraint (1, _("dest must overlap one source register"));
11811 }
11812 else
11813 {
11814 inst.instruction = (inst.instruction == T_MNEM_add
11815 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
11816 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11817 }
11818 }
11819 }
11820
11821 static void
11822 do_t_adr (void)
11823 {
11824 unsigned Rd;
11825
11826 Rd = inst.operands[0].reg;
11827 reject_bad_reg (Rd);
11828
11829 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
11830 {
11831 /* Defer to section relaxation. */
11832 inst.relax = inst.instruction;
11833 inst.instruction = THUMB_OP16 (inst.instruction);
11834 inst.instruction |= Rd << 4;
11835 }
11836 else if (unified_syntax && inst.size_req != 2)
11837 {
11838 /* Generate a 32-bit opcode. */
11839 inst.instruction = THUMB_OP32 (inst.instruction);
11840 inst.instruction |= Rd << 8;
11841 inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_PC12;
11842 inst.relocs[0].pc_rel = 1;
11843 }
11844 else
11845 {
11846 /* Generate a 16-bit opcode. */
11847 inst.instruction = THUMB_OP16 (inst.instruction);
11848 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11849 inst.relocs[0].exp.X_add_number -= 4; /* PC relative adjust. */
11850 inst.relocs[0].pc_rel = 1;
11851 inst.instruction |= Rd << 4;
11852 }
11853
11854 if (inst.relocs[0].exp.X_op == O_symbol
11855 && inst.relocs[0].exp.X_add_symbol != NULL
11856 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11857 && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11858 inst.relocs[0].exp.X_add_number += 1;
11859 }
11860
11861 /* Arithmetic instructions for which there is just one 16-bit
11862 instruction encoding, and it allows only two low registers.
11863 For maximal compatibility with ARM syntax, we allow three register
11864 operands even when Thumb-32 instructions are not available, as long
11865 as the first two are identical. For instance, both "sbc r0,r1" and
11866 "sbc r0,r0,r1" are allowed. */
11867 static void
11868 do_t_arit3 (void)
11869 {
11870 int Rd, Rs, Rn;
11871
11872 Rd = inst.operands[0].reg;
11873 Rs = (inst.operands[1].present
11874 ? inst.operands[1].reg /* Rd, Rs, foo */
11875 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11876 Rn = inst.operands[2].reg;
11877
11878 reject_bad_reg (Rd);
11879 reject_bad_reg (Rs);
11880 if (inst.operands[2].isreg)
11881 reject_bad_reg (Rn);
11882
11883 if (unified_syntax)
11884 {
11885 if (!inst.operands[2].isreg)
11886 {
11887 /* For an immediate, we always generate a 32-bit opcode;
11888 section relaxation will shrink it later if possible. */
11889 inst.instruction = THUMB_OP32 (inst.instruction);
11890 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11891 inst.instruction |= Rd << 8;
11892 inst.instruction |= Rs << 16;
11893 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11894 }
11895 else
11896 {
11897 bool narrow;
11898
11899 /* See if we can do this with a 16-bit instruction. */
11900 if (THUMB_SETS_FLAGS (inst.instruction))
11901 narrow = !in_pred_block ();
11902 else
11903 narrow = in_pred_block ();
11904
11905 if (Rd > 7 || Rn > 7 || Rs > 7)
11906 narrow = false;
11907 if (inst.operands[2].shifted)
11908 narrow = false;
11909 if (inst.size_req == 4)
11910 narrow = false;
11911
11912 if (narrow
11913 && Rd == Rs)
11914 {
11915 inst.instruction = THUMB_OP16 (inst.instruction);
11916 inst.instruction |= Rd;
11917 inst.instruction |= Rn << 3;
11918 return;
11919 }
11920
11921 /* If we get here, it can't be done in 16 bits. */
11922 constraint (inst.operands[2].shifted
11923 && inst.operands[2].immisreg,
11924 _("shift must be constant"));
11925 inst.instruction = THUMB_OP32 (inst.instruction);
11926 inst.instruction |= Rd << 8;
11927 inst.instruction |= Rs << 16;
11928 encode_thumb32_shifted_operand (2);
11929 }
11930 }
11931 else
11932 {
11933 /* On its face this is a lie - the instruction does set the
11934 flags. However, the only supported mnemonic in this mode
11935 says it doesn't. */
11936 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11937
11938 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11939 _("unshifted register required"));
11940 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11941 constraint (Rd != Rs,
11942 _("dest and source1 must be the same register"));
11943
11944 inst.instruction = THUMB_OP16 (inst.instruction);
11945 inst.instruction |= Rd;
11946 inst.instruction |= Rn << 3;
11947 }
11948 }
11949
11950 /* Similarly, but for instructions where the arithmetic operation is
11951 commutative, so we can allow either of them to be different from
11952 the destination operand in a 16-bit instruction. For instance, all
11953 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11954 accepted. */
11955 static void
11956 do_t_arit3c (void)
11957 {
11958 int Rd, Rs, Rn;
11959
11960 Rd = inst.operands[0].reg;
11961 Rs = (inst.operands[1].present
11962 ? inst.operands[1].reg /* Rd, Rs, foo */
11963 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11964 Rn = inst.operands[2].reg;
11965
11966 reject_bad_reg (Rd);
11967 reject_bad_reg (Rs);
11968 if (inst.operands[2].isreg)
11969 reject_bad_reg (Rn);
11970
11971 if (unified_syntax)
11972 {
11973 if (!inst.operands[2].isreg)
11974 {
11975 /* For an immediate, we always generate a 32-bit opcode;
11976 section relaxation will shrink it later if possible. */
11977 inst.instruction = THUMB_OP32 (inst.instruction);
11978 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11979 inst.instruction |= Rd << 8;
11980 inst.instruction |= Rs << 16;
11981 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11982 }
11983 else
11984 {
11985 bool narrow;
11986
11987 /* See if we can do this with a 16-bit instruction. */
11988 if (THUMB_SETS_FLAGS (inst.instruction))
11989 narrow = !in_pred_block ();
11990 else
11991 narrow = in_pred_block ();
11992
11993 if (Rd > 7 || Rn > 7 || Rs > 7)
11994 narrow = false;
11995 if (inst.operands[2].shifted)
11996 narrow = false;
11997 if (inst.size_req == 4)
11998 narrow = false;
11999
12000 if (narrow)
12001 {
12002 if (Rd == Rs)
12003 {
12004 inst.instruction = THUMB_OP16 (inst.instruction);
12005 inst.instruction |= Rd;
12006 inst.instruction |= Rn << 3;
12007 return;
12008 }
12009 if (Rd == Rn)
12010 {
12011 inst.instruction = THUMB_OP16 (inst.instruction);
12012 inst.instruction |= Rd;
12013 inst.instruction |= Rs << 3;
12014 return;
12015 }
12016 }
12017
12018 /* If we get here, it can't be done in 16 bits. */
12019 constraint (inst.operands[2].shifted
12020 && inst.operands[2].immisreg,
12021 _("shift must be constant"));
12022 inst.instruction = THUMB_OP32 (inst.instruction);
12023 inst.instruction |= Rd << 8;
12024 inst.instruction |= Rs << 16;
12025 encode_thumb32_shifted_operand (2);
12026 }
12027 }
12028 else
12029 {
12030 /* On its face this is a lie - the instruction does set the
12031 flags. However, the only supported mnemonic in this mode
12032 says it doesn't. */
12033 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12034
12035 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
12036 _("unshifted register required"));
12037 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
12038
12039 inst.instruction = THUMB_OP16 (inst.instruction);
12040 inst.instruction |= Rd;
12041
12042 if (Rd == Rs)
12043 inst.instruction |= Rn << 3;
12044 else if (Rd == Rn)
12045 inst.instruction |= Rs << 3;
12046 else
12047 constraint (1, _("dest must overlap one source register"));
12048 }
12049 }
12050
12051 static void
12052 do_t_bfc (void)
12053 {
12054 unsigned Rd;
12055 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
12056 constraint (msb > 32, _("bit-field extends past end of register"));
12057 /* The instruction encoding stores the LSB and MSB,
12058 not the LSB and width. */
12059 Rd = inst.operands[0].reg;
12060 reject_bad_reg (Rd);
12061 inst.instruction |= Rd << 8;
12062 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
12063 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
12064 inst.instruction |= msb - 1;
12065 }
12066
12067 static void
12068 do_t_bfi (void)
12069 {
12070 int Rd, Rn;
12071 unsigned int msb;
12072
12073 Rd = inst.operands[0].reg;
12074 reject_bad_reg (Rd);
12075
12076 /* #0 in second position is alternative syntax for bfc, which is
12077 the same instruction but with REG_PC in the Rm field. */
12078 if (!inst.operands[1].isreg)
12079 Rn = REG_PC;
12080 else
12081 {
12082 Rn = inst.operands[1].reg;
12083 reject_bad_reg (Rn);
12084 }
12085
12086 msb = inst.operands[2].imm + inst.operands[3].imm;
12087 constraint (msb > 32, _("bit-field extends past end of register"));
12088 /* The instruction encoding stores the LSB and MSB,
12089 not the LSB and width. */
12090 inst.instruction |= Rd << 8;
12091 inst.instruction |= Rn << 16;
12092 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
12093 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
12094 inst.instruction |= msb - 1;
12095 }
12096
12097 static void
12098 do_t_bfx (void)
12099 {
12100 unsigned Rd, Rn;
12101
12102 Rd = inst.operands[0].reg;
12103 Rn = inst.operands[1].reg;
12104
12105 reject_bad_reg (Rd);
12106 reject_bad_reg (Rn);
12107
12108 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
12109 _("bit-field extends past end of register"));
12110 inst.instruction |= Rd << 8;
12111 inst.instruction |= Rn << 16;
12112 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
12113 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
12114 inst.instruction |= inst.operands[3].imm - 1;
12115 }
12116
12117 /* ARM V5 Thumb BLX (argument parse)
12118 BLX <target_addr> which is BLX(1)
12119 BLX <Rm> which is BLX(2)
12120 Unfortunately, there are two different opcodes for this mnemonic.
12121 So, the insns[].value is not used, and the code here zaps values
12122 into inst.instruction.
12123
12124 ??? How to take advantage of the additional two bits of displacement
12125 available in Thumb32 mode? Need new relocation? */
12126
12127 static void
12128 do_t_blx (void)
12129 {
12130 set_pred_insn_type_last ();
12131
12132 if (inst.operands[0].isreg)
12133 {
12134 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
12135 /* We have a register, so this is BLX(2). */
12136 inst.instruction |= inst.operands[0].reg << 3;
12137 }
12138 else
12139 {
12140 /* No register. This must be BLX(1). */
12141 inst.instruction = 0xf000e800;
12142 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
12143 }
12144 }
12145
12146 static void
12147 do_t_branch (void)
12148 {
12149 int opcode;
12150 int cond;
12151 bfd_reloc_code_real_type reloc;
12152
12153 cond = inst.cond;
12154 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN);
12155
12156 if (in_pred_block ())
12157 {
12158 /* Conditional branches inside IT blocks are encoded as unconditional
12159 branches. */
12160 cond = COND_ALWAYS;
12161 }
12162 else
12163 cond = inst.cond;
12164
12165 if (cond != COND_ALWAYS)
12166 opcode = T_MNEM_bcond;
12167 else
12168 opcode = inst.instruction;
12169
12170 if (unified_syntax
12171 && (inst.size_req == 4
12172 || (inst.size_req != 2
12173 && (inst.operands[0].hasreloc
12174 || inst.relocs[0].exp.X_op == O_constant))))
12175 {
12176 inst.instruction = THUMB_OP32(opcode);
12177 if (cond == COND_ALWAYS)
12178 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
12179 else
12180 {
12181 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
12182 _("selected architecture does not support "
12183 "wide conditional branch instruction"));
12184
12185 gas_assert (cond != 0xF);
12186 inst.instruction |= cond << 22;
12187 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
12188 }
12189 }
12190 else
12191 {
12192 inst.instruction = THUMB_OP16(opcode);
12193 if (cond == COND_ALWAYS)
12194 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
12195 else
12196 {
12197 inst.instruction |= cond << 8;
12198 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
12199 }
12200 /* Allow section relaxation. */
12201 if (unified_syntax && inst.size_req != 2)
12202 inst.relax = opcode;
12203 }
12204 inst.relocs[0].type = reloc;
12205 inst.relocs[0].pc_rel = 1;
12206 }
12207
12208 /* Actually do the work for Thumb state bkpt and hlt. The only difference
12209 between the two is the maximum immediate allowed - which is passed in
12210 RANGE. */
12211 static void
12212 do_t_bkpt_hlt1 (int range)
12213 {
12214 constraint (inst.cond != COND_ALWAYS,
12215 _("instruction is always unconditional"));
12216 if (inst.operands[0].present)
12217 {
12218 constraint (inst.operands[0].imm > range,
12219 _("immediate value out of range"));
12220 inst.instruction |= inst.operands[0].imm;
12221 }
12222
12223 set_pred_insn_type (NEUTRAL_IT_INSN);
12224 }
12225
12226 static void
12227 do_t_hlt (void)
12228 {
12229 do_t_bkpt_hlt1 (63);
12230 }
12231
12232 static void
12233 do_t_bkpt (void)
12234 {
12235 do_t_bkpt_hlt1 (255);
12236 }
12237
12238 static void
12239 do_t_branch23 (void)
12240 {
12241 set_pred_insn_type_last ();
12242 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
12243
12244 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
12245 this file. We used to simply ignore the PLT reloc type here --
12246 the branch encoding is now needed to deal with TLSCALL relocs.
12247 So if we see a PLT reloc now, put it back to how it used to be to
12248 keep the preexisting behaviour. */
12249 if (inst.relocs[0].type == BFD_RELOC_ARM_PLT32)
12250 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH23;
12251
12252 #if defined(OBJ_COFF)
12253 /* If the destination of the branch is a defined symbol which does not have
12254 the THUMB_FUNC attribute, then we must be calling a function which has
12255 the (interfacearm) attribute. We look for the Thumb entry point to that
12256 function and change the branch to refer to that function instead. */
12257 if ( inst.relocs[0].exp.X_op == O_symbol
12258 && inst.relocs[0].exp.X_add_symbol != NULL
12259 && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
12260 && ! THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
12261 inst.relocs[0].exp.X_add_symbol
12262 = find_real_start (inst.relocs[0].exp.X_add_symbol);
12263 #endif
12264 }
12265
12266 static void
12267 do_t_bx (void)
12268 {
12269 set_pred_insn_type_last ();
12270 inst.instruction |= inst.operands[0].reg << 3;
12271 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
12272 should cause the alignment to be checked once it is known. This is
12273 because BX PC only works if the instruction is word aligned. */
12274 }
12275
12276 static void
12277 do_t_bxj (void)
12278 {
12279 int Rm;
12280
12281 set_pred_insn_type_last ();
12282 Rm = inst.operands[0].reg;
12283 reject_bad_reg (Rm);
12284 inst.instruction |= Rm << 16;
12285 }
12286
12287 static void
12288 do_t_clz (void)
12289 {
12290 unsigned Rd;
12291 unsigned Rm;
12292
12293 Rd = inst.operands[0].reg;
12294 Rm = inst.operands[1].reg;
12295
12296 reject_bad_reg (Rd);
12297 reject_bad_reg (Rm);
12298
12299 inst.instruction |= Rd << 8;
12300 inst.instruction |= Rm << 16;
12301 inst.instruction |= Rm;
12302 }
12303
12304 /* For the Armv8.1-M conditional instructions. */
12305 static void
12306 do_t_cond (void)
12307 {
12308 unsigned Rd, Rn, Rm;
12309 signed int cond;
12310
12311 constraint (inst.cond != COND_ALWAYS, BAD_COND);
12312
12313 Rd = inst.operands[0].reg;
12314 switch (inst.instruction)
12315 {
12316 case T_MNEM_csinc:
12317 case T_MNEM_csinv:
12318 case T_MNEM_csneg:
12319 case T_MNEM_csel:
12320 Rn = inst.operands[1].reg;
12321 Rm = inst.operands[2].reg;
12322 cond = inst.operands[3].imm;
12323 constraint (Rn == REG_SP, BAD_SP);
12324 constraint (Rm == REG_SP, BAD_SP);
12325 break;
12326
12327 case T_MNEM_cinc:
12328 case T_MNEM_cinv:
12329 case T_MNEM_cneg:
12330 Rn = inst.operands[1].reg;
12331 cond = inst.operands[2].imm;
12332 /* Invert the last bit to invert the cond. */
12333 cond = TOGGLE_BIT (cond, 0);
12334 constraint (Rn == REG_SP, BAD_SP);
12335 Rm = Rn;
12336 break;
12337
12338 case T_MNEM_csetm:
12339 case T_MNEM_cset:
12340 cond = inst.operands[1].imm;
12341 /* Invert the last bit to invert the cond. */
12342 cond = TOGGLE_BIT (cond, 0);
12343 Rn = REG_PC;
12344 Rm = REG_PC;
12345 break;
12346
12347 default: abort ();
12348 }
12349
12350 set_pred_insn_type (OUTSIDE_PRED_INSN);
12351 inst.instruction = THUMB_OP32 (inst.instruction);
12352 inst.instruction |= Rd << 8;
12353 inst.instruction |= Rn << 16;
12354 inst.instruction |= Rm;
12355 inst.instruction |= cond << 4;
12356 }
12357
12358 static void
12359 do_t_csdb (void)
12360 {
12361 set_pred_insn_type (OUTSIDE_PRED_INSN);
12362 }
12363
12364 static void
12365 do_t_cps (void)
12366 {
12367 set_pred_insn_type (OUTSIDE_PRED_INSN);
12368 inst.instruction |= inst.operands[0].imm;
12369 }
12370
12371 static void
12372 do_t_cpsi (void)
12373 {
12374 set_pred_insn_type (OUTSIDE_PRED_INSN);
12375 if (unified_syntax
12376 && (inst.operands[1].present || inst.size_req == 4)
12377 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
12378 {
12379 unsigned int imod = (inst.instruction & 0x0030) >> 4;
12380 inst.instruction = 0xf3af8000;
12381 inst.instruction |= imod << 9;
12382 inst.instruction |= inst.operands[0].imm << 5;
12383 if (inst.operands[1].present)
12384 inst.instruction |= 0x100 | inst.operands[1].imm;
12385 }
12386 else
12387 {
12388 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
12389 && (inst.operands[0].imm & 4),
12390 _("selected processor does not support 'A' form "
12391 "of this instruction"));
12392 constraint (inst.operands[1].present || inst.size_req == 4,
12393 _("Thumb does not support the 2-argument "
12394 "form of this instruction"));
12395 inst.instruction |= inst.operands[0].imm;
12396 }
12397 }
12398
12399 /* THUMB CPY instruction (argument parse). */
12400
12401 static void
12402 do_t_cpy (void)
12403 {
12404 if (inst.size_req == 4)
12405 {
12406 inst.instruction = THUMB_OP32 (T_MNEM_mov);
12407 inst.instruction |= inst.operands[0].reg << 8;
12408 inst.instruction |= inst.operands[1].reg;
12409 }
12410 else
12411 {
12412 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
12413 inst.instruction |= (inst.operands[0].reg & 0x7);
12414 inst.instruction |= inst.operands[1].reg << 3;
12415 }
12416 }
12417
12418 static void
12419 do_t_cbz (void)
12420 {
12421 set_pred_insn_type (OUTSIDE_PRED_INSN);
12422 constraint (inst.operands[0].reg > 7, BAD_HIREG);
12423 inst.instruction |= inst.operands[0].reg;
12424 inst.relocs[0].pc_rel = 1;
12425 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH7;
12426 }
12427
12428 static void
12429 do_t_dbg (void)
12430 {
12431 inst.instruction |= inst.operands[0].imm;
12432 }
12433
12434 static void
12435 do_t_div (void)
12436 {
12437 unsigned Rd, Rn, Rm;
12438
12439 Rd = inst.operands[0].reg;
12440 Rn = (inst.operands[1].present
12441 ? inst.operands[1].reg : Rd);
12442 Rm = inst.operands[2].reg;
12443
12444 reject_bad_reg (Rd);
12445 reject_bad_reg (Rn);
12446 reject_bad_reg (Rm);
12447
12448 inst.instruction |= Rd << 8;
12449 inst.instruction |= Rn << 16;
12450 inst.instruction |= Rm;
12451 }
12452
12453 static void
12454 do_t_hint (void)
12455 {
12456 if (unified_syntax && inst.size_req == 4)
12457 inst.instruction = THUMB_OP32 (inst.instruction);
12458 else
12459 inst.instruction = THUMB_OP16 (inst.instruction);
12460 }
12461
12462 static void
12463 do_t_it (void)
12464 {
12465 unsigned int cond = inst.operands[0].imm;
12466
12467 set_pred_insn_type (IT_INSN);
12468 now_pred.mask = (inst.instruction & 0xf) | 0x10;
12469 now_pred.cc = cond;
12470 now_pred.warn_deprecated = false;
12471 now_pred.type = SCALAR_PRED;
12472
12473 /* If the condition is a negative condition, invert the mask. */
12474 if ((cond & 0x1) == 0x0)
12475 {
12476 unsigned int mask = inst.instruction & 0x000f;
12477
12478 if ((mask & 0x7) == 0)
12479 {
12480 /* No conversion needed. */
12481 now_pred.block_length = 1;
12482 }
12483 else if ((mask & 0x3) == 0)
12484 {
12485 mask ^= 0x8;
12486 now_pred.block_length = 2;
12487 }
12488 else if ((mask & 0x1) == 0)
12489 {
12490 mask ^= 0xC;
12491 now_pred.block_length = 3;
12492 }
12493 else
12494 {
12495 mask ^= 0xE;
12496 now_pred.block_length = 4;
12497 }
12498
12499 inst.instruction &= 0xfff0;
12500 inst.instruction |= mask;
12501 }
12502
12503 inst.instruction |= cond << 4;
12504 }
12505
12506 /* Helper function used for both push/pop and ldm/stm. */
12507 static void
12508 encode_thumb2_multi (bool do_io, int base, unsigned mask,
12509 bool writeback)
12510 {
12511 bool load, store;
12512
12513 gas_assert (base != -1 || !do_io);
12514 load = do_io && ((inst.instruction & (1 << 20)) != 0);
12515 store = do_io && !load;
12516
12517 if (mask & (1 << 13))
12518 inst.error = _("SP not allowed in register list");
12519
12520 if (do_io && (mask & (1 << base)) != 0
12521 && writeback)
12522 inst.error = _("having the base register in the register list when "
12523 "using write back is UNPREDICTABLE");
12524
12525 if (load)
12526 {
12527 if (mask & (1 << 15))
12528 {
12529 if (mask & (1 << 14))
12530 inst.error = _("LR and PC should not both be in register list");
12531 else
12532 set_pred_insn_type_last ();
12533 }
12534 }
12535 else if (store)
12536 {
12537 if (mask & (1 << 15))
12538 inst.error = _("PC not allowed in register list");
12539 }
12540
12541 if (do_io && ((mask & (mask - 1)) == 0))
12542 {
12543 /* Single register transfers implemented as str/ldr. */
12544 if (writeback)
12545 {
12546 if (inst.instruction & (1 << 23))
12547 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
12548 else
12549 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
12550 }
12551 else
12552 {
12553 if (inst.instruction & (1 << 23))
12554 inst.instruction = 0x00800000; /* ia -> [base] */
12555 else
12556 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
12557 }
12558
12559 inst.instruction |= 0xf8400000;
12560 if (load)
12561 inst.instruction |= 0x00100000;
12562
12563 mask = ffs (mask) - 1;
12564 mask <<= 12;
12565 }
12566 else if (writeback)
12567 inst.instruction |= WRITE_BACK;
12568
12569 inst.instruction |= mask;
12570 if (do_io)
12571 inst.instruction |= base << 16;
12572 }
12573
12574 static void
12575 do_t_ldmstm (void)
12576 {
12577 /* This really doesn't seem worth it. */
12578 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
12579 _("expression too complex"));
12580 constraint (inst.operands[1].writeback,
12581 _("Thumb load/store multiple does not support {reglist}^"));
12582
12583 if (unified_syntax)
12584 {
12585 bool narrow;
12586 unsigned mask;
12587
12588 narrow = false;
12589 /* See if we can use a 16-bit instruction. */
12590 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
12591 && inst.size_req != 4
12592 && !(inst.operands[1].imm & ~0xff))
12593 {
12594 mask = 1 << inst.operands[0].reg;
12595
12596 if (inst.operands[0].reg <= 7)
12597 {
12598 if (inst.instruction == T_MNEM_stmia
12599 ? inst.operands[0].writeback
12600 : (inst.operands[0].writeback
12601 == !(inst.operands[1].imm & mask)))
12602 {
12603 if (inst.instruction == T_MNEM_stmia
12604 && (inst.operands[1].imm & mask)
12605 && (inst.operands[1].imm & (mask - 1)))
12606 as_warn (_("value stored for r%d is UNKNOWN"),
12607 inst.operands[0].reg);
12608
12609 inst.instruction = THUMB_OP16 (inst.instruction);
12610 inst.instruction |= inst.operands[0].reg << 8;
12611 inst.instruction |= inst.operands[1].imm;
12612 narrow = true;
12613 }
12614 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
12615 {
12616 /* This means 1 register in reg list one of 3 situations:
12617 1. Instruction is stmia, but without writeback.
12618 2. lmdia without writeback, but with Rn not in
12619 reglist.
12620 3. ldmia with writeback, but with Rn in reglist.
12621 Case 3 is UNPREDICTABLE behaviour, so we handle
12622 case 1 and 2 which can be converted into a 16-bit
12623 str or ldr. The SP cases are handled below. */
12624 unsigned long opcode;
12625 /* First, record an error for Case 3. */
12626 if (inst.operands[1].imm & mask
12627 && inst.operands[0].writeback)
12628 inst.error =
12629 _("having the base register in the register list when "
12630 "using write back is UNPREDICTABLE");
12631
12632 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
12633 : T_MNEM_ldr);
12634 inst.instruction = THUMB_OP16 (opcode);
12635 inst.instruction |= inst.operands[0].reg << 3;
12636 inst.instruction |= (ffs (inst.operands[1].imm)-1);
12637 narrow = true;
12638 }
12639 }
12640 else if (inst.operands[0] .reg == REG_SP)
12641 {
12642 if (inst.operands[0].writeback)
12643 {
12644 inst.instruction =
12645 THUMB_OP16 (inst.instruction == T_MNEM_stmia
12646 ? T_MNEM_push : T_MNEM_pop);
12647 inst.instruction |= inst.operands[1].imm;
12648 narrow = true;
12649 }
12650 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
12651 {
12652 inst.instruction =
12653 THUMB_OP16 (inst.instruction == T_MNEM_stmia
12654 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
12655 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
12656 narrow = true;
12657 }
12658 }
12659 }
12660
12661 if (!narrow)
12662 {
12663 if (inst.instruction < 0xffff)
12664 inst.instruction = THUMB_OP32 (inst.instruction);
12665
12666 encode_thumb2_multi (true /* do_io */, inst.operands[0].reg,
12667 inst.operands[1].imm,
12668 inst.operands[0].writeback);
12669 }
12670 }
12671 else
12672 {
12673 constraint (inst.operands[0].reg > 7
12674 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
12675 constraint (inst.instruction != T_MNEM_ldmia
12676 && inst.instruction != T_MNEM_stmia,
12677 _("Thumb-2 instruction only valid in unified syntax"));
12678 if (inst.instruction == T_MNEM_stmia)
12679 {
12680 if (!inst.operands[0].writeback)
12681 as_warn (_("this instruction will write back the base register"));
12682 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
12683 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
12684 as_warn (_("value stored for r%d is UNKNOWN"),
12685 inst.operands[0].reg);
12686 }
12687 else
12688 {
12689 if (!inst.operands[0].writeback
12690 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
12691 as_warn (_("this instruction will write back the base register"));
12692 else if (inst.operands[0].writeback
12693 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
12694 as_warn (_("this instruction will not write back the base register"));
12695 }
12696
12697 inst.instruction = THUMB_OP16 (inst.instruction);
12698 inst.instruction |= inst.operands[0].reg << 8;
12699 inst.instruction |= inst.operands[1].imm;
12700 }
12701 }
12702
12703 static void
12704 do_t_ldrex (void)
12705 {
12706 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
12707 || inst.operands[1].postind || inst.operands[1].writeback
12708 || inst.operands[1].immisreg || inst.operands[1].shifted
12709 || inst.operands[1].negative,
12710 BAD_ADDR_MODE);
12711
12712 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
12713
12714 inst.instruction |= inst.operands[0].reg << 12;
12715 inst.instruction |= inst.operands[1].reg << 16;
12716 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
12717 }
12718
12719 static void
12720 do_t_ldrexd (void)
12721 {
12722 if (!inst.operands[1].present)
12723 {
12724 constraint (inst.operands[0].reg == REG_LR,
12725 _("r14 not allowed as first register "
12726 "when second register is omitted"));
12727 inst.operands[1].reg = inst.operands[0].reg + 1;
12728 }
12729 constraint (inst.operands[0].reg == inst.operands[1].reg,
12730 BAD_OVERLAP);
12731
12732 inst.instruction |= inst.operands[0].reg << 12;
12733 inst.instruction |= inst.operands[1].reg << 8;
12734 inst.instruction |= inst.operands[2].reg << 16;
12735 }
12736
12737 static void
12738 do_t_ldst (void)
12739 {
12740 unsigned long opcode;
12741 int Rn;
12742
12743 if (inst.operands[0].isreg
12744 && !inst.operands[0].preind
12745 && inst.operands[0].reg == REG_PC)
12746 set_pred_insn_type_last ();
12747
12748 opcode = inst.instruction;
12749 if (unified_syntax)
12750 {
12751 if (!inst.operands[1].isreg)
12752 {
12753 if (opcode <= 0xffff)
12754 inst.instruction = THUMB_OP32 (opcode);
12755 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/false))
12756 return;
12757 }
12758 if (inst.operands[1].isreg
12759 && !inst.operands[1].writeback
12760 && !inst.operands[1].shifted && !inst.operands[1].postind
12761 && !inst.operands[1].negative && inst.operands[0].reg <= 7
12762 && opcode <= 0xffff
12763 && inst.size_req != 4)
12764 {
12765 /* Insn may have a 16-bit form. */
12766 Rn = inst.operands[1].reg;
12767 if (inst.operands[1].immisreg)
12768 {
12769 inst.instruction = THUMB_OP16 (opcode);
12770 /* [Rn, Rik] */
12771 if (Rn <= 7 && inst.operands[1].imm <= 7)
12772 goto op16;
12773 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
12774 reject_bad_reg (inst.operands[1].imm);
12775 }
12776 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
12777 && opcode != T_MNEM_ldrsb)
12778 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
12779 || (Rn == REG_SP && opcode == T_MNEM_str))
12780 {
12781 /* [Rn, #const] */
12782 if (Rn > 7)
12783 {
12784 if (Rn == REG_PC)
12785 {
12786 if (inst.relocs[0].pc_rel)
12787 opcode = T_MNEM_ldr_pc2;
12788 else
12789 opcode = T_MNEM_ldr_pc;
12790 }
12791 else
12792 {
12793 if (opcode == T_MNEM_ldr)
12794 opcode = T_MNEM_ldr_sp;
12795 else
12796 opcode = T_MNEM_str_sp;
12797 }
12798 inst.instruction = inst.operands[0].reg << 8;
12799 }
12800 else
12801 {
12802 inst.instruction = inst.operands[0].reg;
12803 inst.instruction |= inst.operands[1].reg << 3;
12804 }
12805 inst.instruction |= THUMB_OP16 (opcode);
12806 if (inst.size_req == 2)
12807 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12808 else
12809 inst.relax = opcode;
12810 return;
12811 }
12812 }
12813 /* Definitely a 32-bit variant. */
12814
12815 /* Warning for Erratum 752419. */
12816 if (opcode == T_MNEM_ldr
12817 && inst.operands[0].reg == REG_SP
12818 && inst.operands[1].writeback == 1
12819 && !inst.operands[1].immisreg)
12820 {
12821 if (no_cpu_selected ()
12822 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
12823 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
12824 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
12825 as_warn (_("This instruction may be unpredictable "
12826 "if executed on M-profile cores "
12827 "with interrupts enabled."));
12828 }
12829
12830 /* Do some validations regarding addressing modes. */
12831 if (inst.operands[1].immisreg)
12832 reject_bad_reg (inst.operands[1].imm);
12833
12834 constraint (inst.operands[1].writeback == 1
12835 && inst.operands[0].reg == inst.operands[1].reg,
12836 BAD_OVERLAP);
12837
12838 inst.instruction = THUMB_OP32 (opcode);
12839 inst.instruction |= inst.operands[0].reg << 12;
12840 encode_thumb32_addr_mode (1, /*is_t=*/false, /*is_d=*/false);
12841 check_ldr_r15_aligned ();
12842 return;
12843 }
12844
12845 constraint (inst.operands[0].reg > 7, BAD_HIREG);
12846
12847 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
12848 {
12849 /* Only [Rn,Rm] is acceptable. */
12850 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
12851 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
12852 || inst.operands[1].postind || inst.operands[1].shifted
12853 || inst.operands[1].negative,
12854 _("Thumb does not support this addressing mode"));
12855 inst.instruction = THUMB_OP16 (inst.instruction);
12856 goto op16;
12857 }
12858
12859 inst.instruction = THUMB_OP16 (inst.instruction);
12860 if (!inst.operands[1].isreg)
12861 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/false))
12862 return;
12863
12864 constraint (!inst.operands[1].preind
12865 || inst.operands[1].shifted
12866 || inst.operands[1].writeback,
12867 _("Thumb does not support this addressing mode"));
12868 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
12869 {
12870 constraint (inst.instruction & 0x0600,
12871 _("byte or halfword not valid for base register"));
12872 constraint (inst.operands[1].reg == REG_PC
12873 && !(inst.instruction & THUMB_LOAD_BIT),
12874 _("r15 based store not allowed"));
12875 constraint (inst.operands[1].immisreg,
12876 _("invalid base register for register offset"));
12877
12878 if (inst.operands[1].reg == REG_PC)
12879 inst.instruction = T_OPCODE_LDR_PC;
12880 else if (inst.instruction & THUMB_LOAD_BIT)
12881 inst.instruction = T_OPCODE_LDR_SP;
12882 else
12883 inst.instruction = T_OPCODE_STR_SP;
12884
12885 inst.instruction |= inst.operands[0].reg << 8;
12886 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12887 return;
12888 }
12889
12890 constraint (inst.operands[1].reg > 7, BAD_HIREG);
12891 if (!inst.operands[1].immisreg)
12892 {
12893 /* Immediate offset. */
12894 inst.instruction |= inst.operands[0].reg;
12895 inst.instruction |= inst.operands[1].reg << 3;
12896 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12897 return;
12898 }
12899
12900 /* Register offset. */
12901 constraint (inst.operands[1].imm > 7, BAD_HIREG);
12902 constraint (inst.operands[1].negative,
12903 _("Thumb does not support this addressing mode"));
12904
12905 op16:
12906 switch (inst.instruction)
12907 {
12908 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
12909 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
12910 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
12911 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
12912 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
12913 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
12914 case 0x5600 /* ldrsb */:
12915 case 0x5e00 /* ldrsh */: break;
12916 default: abort ();
12917 }
12918
12919 inst.instruction |= inst.operands[0].reg;
12920 inst.instruction |= inst.operands[1].reg << 3;
12921 inst.instruction |= inst.operands[1].imm << 6;
12922 }
12923
12924 static void
12925 do_t_ldstd (void)
12926 {
12927 if (!inst.operands[1].present)
12928 {
12929 inst.operands[1].reg = inst.operands[0].reg + 1;
12930 constraint (inst.operands[0].reg == REG_LR,
12931 _("r14 not allowed here"));
12932 constraint (inst.operands[0].reg == REG_R12,
12933 _("r12 not allowed here"));
12934 }
12935
12936 if (inst.operands[2].writeback
12937 && (inst.operands[0].reg == inst.operands[2].reg
12938 || inst.operands[1].reg == inst.operands[2].reg))
12939 as_warn (_("base register written back, and overlaps "
12940 "one of transfer registers"));
12941
12942 inst.instruction |= inst.operands[0].reg << 12;
12943 inst.instruction |= inst.operands[1].reg << 8;
12944 encode_thumb32_addr_mode (2, /*is_t=*/false, /*is_d=*/true);
12945 }
12946
12947 static void
12948 do_t_ldstt (void)
12949 {
12950 inst.instruction |= inst.operands[0].reg << 12;
12951 encode_thumb32_addr_mode (1, /*is_t=*/true, /*is_d=*/false);
12952 }
12953
12954 static void
12955 do_t_mla (void)
12956 {
12957 unsigned Rd, Rn, Rm, Ra;
12958
12959 Rd = inst.operands[0].reg;
12960 Rn = inst.operands[1].reg;
12961 Rm = inst.operands[2].reg;
12962 Ra = inst.operands[3].reg;
12963
12964 reject_bad_reg (Rd);
12965 reject_bad_reg (Rn);
12966 reject_bad_reg (Rm);
12967 reject_bad_reg (Ra);
12968
12969 inst.instruction |= Rd << 8;
12970 inst.instruction |= Rn << 16;
12971 inst.instruction |= Rm;
12972 inst.instruction |= Ra << 12;
12973 }
12974
12975 static void
12976 do_t_mlal (void)
12977 {
12978 unsigned RdLo, RdHi, Rn, Rm;
12979
12980 RdLo = inst.operands[0].reg;
12981 RdHi = inst.operands[1].reg;
12982 Rn = inst.operands[2].reg;
12983 Rm = inst.operands[3].reg;
12984
12985 reject_bad_reg (RdLo);
12986 reject_bad_reg (RdHi);
12987 reject_bad_reg (Rn);
12988 reject_bad_reg (Rm);
12989
12990 inst.instruction |= RdLo << 12;
12991 inst.instruction |= RdHi << 8;
12992 inst.instruction |= Rn << 16;
12993 inst.instruction |= Rm;
12994 }
12995
12996 static void
12997 do_t_mov_cmp (void)
12998 {
12999 unsigned Rn, Rm;
13000
13001 Rn = inst.operands[0].reg;
13002 Rm = inst.operands[1].reg;
13003
13004 if (Rn == REG_PC)
13005 set_pred_insn_type_last ();
13006
13007 if (unified_syntax)
13008 {
13009 int r0off = (inst.instruction == T_MNEM_mov
13010 || inst.instruction == T_MNEM_movs) ? 8 : 16;
13011 unsigned long opcode;
13012 bool narrow;
13013 bool low_regs;
13014
13015 low_regs = (Rn <= 7 && Rm <= 7);
13016 opcode = inst.instruction;
13017 if (in_pred_block ())
13018 narrow = opcode != T_MNEM_movs;
13019 else
13020 narrow = opcode != T_MNEM_movs || low_regs;
13021 if (inst.size_req == 4
13022 || inst.operands[1].shifted)
13023 narrow = false;
13024
13025 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
13026 if (opcode == T_MNEM_movs && inst.operands[1].isreg
13027 && !inst.operands[1].shifted
13028 && Rn == REG_PC
13029 && Rm == REG_LR)
13030 {
13031 inst.instruction = T2_SUBS_PC_LR;
13032 return;
13033 }
13034
13035 if (opcode == T_MNEM_cmp)
13036 {
13037 constraint (Rn == REG_PC, BAD_PC);
13038 if (narrow)
13039 {
13040 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
13041 but valid. */
13042 warn_deprecated_sp (Rm);
13043 /* R15 was documented as a valid choice for Rm in ARMv6,
13044 but as UNPREDICTABLE in ARMv7. ARM's proprietary
13045 tools reject R15, so we do too. */
13046 constraint (Rm == REG_PC, BAD_PC);
13047 }
13048 else
13049 reject_bad_reg (Rm);
13050 }
13051 else if (opcode == T_MNEM_mov
13052 || opcode == T_MNEM_movs)
13053 {
13054 if (inst.operands[1].isreg)
13055 {
13056 if (opcode == T_MNEM_movs)
13057 {
13058 reject_bad_reg (Rn);
13059 reject_bad_reg (Rm);
13060 }
13061 else if (narrow)
13062 {
13063 /* This is mov.n. */
13064 if ((Rn == REG_SP || Rn == REG_PC)
13065 && (Rm == REG_SP || Rm == REG_PC))
13066 {
13067 as_tsktsk (_("Use of r%u as a source register is "
13068 "deprecated when r%u is the destination "
13069 "register."), Rm, Rn);
13070 }
13071 }
13072 else
13073 {
13074 /* This is mov.w. */
13075 constraint (Rn == REG_PC, BAD_PC);
13076 constraint (Rm == REG_PC, BAD_PC);
13077 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13078 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
13079 }
13080 }
13081 else
13082 reject_bad_reg (Rn);
13083 }
13084
13085 if (!inst.operands[1].isreg)
13086 {
13087 /* Immediate operand. */
13088 if (!in_pred_block () && opcode == T_MNEM_mov)
13089 narrow = 0;
13090 if (low_regs && narrow)
13091 {
13092 inst.instruction = THUMB_OP16 (opcode);
13093 inst.instruction |= Rn << 8;
13094 if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
13095 || inst.relocs[0].type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
13096 {
13097 if (inst.size_req == 2)
13098 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
13099 else
13100 inst.relax = opcode;
13101 }
13102 }
13103 else
13104 {
13105 constraint ((inst.relocs[0].type
13106 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
13107 && (inst.relocs[0].type
13108 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
13109 THUMB1_RELOC_ONLY);
13110
13111 inst.instruction = THUMB_OP32 (inst.instruction);
13112 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13113 inst.instruction |= Rn << r0off;
13114 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13115 }
13116 }
13117 else if (inst.operands[1].shifted && inst.operands[1].immisreg
13118 && (inst.instruction == T_MNEM_mov
13119 || inst.instruction == T_MNEM_movs))
13120 {
13121 /* Register shifts are encoded as separate shift instructions. */
13122 bool flags = (inst.instruction == T_MNEM_movs);
13123
13124 if (in_pred_block ())
13125 narrow = !flags;
13126 else
13127 narrow = flags;
13128
13129 if (inst.size_req == 4)
13130 narrow = false;
13131
13132 if (!low_regs || inst.operands[1].imm > 7)
13133 narrow = false;
13134
13135 if (Rn != Rm)
13136 narrow = false;
13137
13138 switch (inst.operands[1].shift_kind)
13139 {
13140 case SHIFT_LSL:
13141 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
13142 break;
13143 case SHIFT_ASR:
13144 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
13145 break;
13146 case SHIFT_LSR:
13147 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
13148 break;
13149 case SHIFT_ROR:
13150 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
13151 break;
13152 default:
13153 abort ();
13154 }
13155
13156 inst.instruction = opcode;
13157 if (narrow)
13158 {
13159 inst.instruction |= Rn;
13160 inst.instruction |= inst.operands[1].imm << 3;
13161 }
13162 else
13163 {
13164 if (flags)
13165 inst.instruction |= CONDS_BIT;
13166
13167 inst.instruction |= Rn << 8;
13168 inst.instruction |= Rm << 16;
13169 inst.instruction |= inst.operands[1].imm;
13170 }
13171 }
13172 else if (!narrow)
13173 {
13174 /* Some mov with immediate shift have narrow variants.
13175 Register shifts are handled above. */
13176 if (low_regs && inst.operands[1].shifted
13177 && (inst.instruction == T_MNEM_mov
13178 || inst.instruction == T_MNEM_movs))
13179 {
13180 if (in_pred_block ())
13181 narrow = (inst.instruction == T_MNEM_mov);
13182 else
13183 narrow = (inst.instruction == T_MNEM_movs);
13184 }
13185
13186 if (narrow)
13187 {
13188 switch (inst.operands[1].shift_kind)
13189 {
13190 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13191 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13192 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13193 default: narrow = false; break;
13194 }
13195 }
13196
13197 if (narrow)
13198 {
13199 inst.instruction |= Rn;
13200 inst.instruction |= Rm << 3;
13201 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13202 }
13203 else
13204 {
13205 inst.instruction = THUMB_OP32 (inst.instruction);
13206 inst.instruction |= Rn << r0off;
13207 encode_thumb32_shifted_operand (1);
13208 }
13209 }
13210 else
13211 switch (inst.instruction)
13212 {
13213 case T_MNEM_mov:
13214 /* In v4t or v5t a move of two lowregs produces unpredictable
13215 results. Don't allow this. */
13216 if (low_regs)
13217 {
13218 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
13219 "MOV Rd, Rs with two low registers is not "
13220 "permitted on this architecture");
13221 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
13222 arm_ext_v6);
13223 }
13224
13225 inst.instruction = T_OPCODE_MOV_HR;
13226 inst.instruction |= (Rn & 0x8) << 4;
13227 inst.instruction |= (Rn & 0x7);
13228 inst.instruction |= Rm << 3;
13229 break;
13230
13231 case T_MNEM_movs:
13232 /* We know we have low registers at this point.
13233 Generate LSLS Rd, Rs, #0. */
13234 inst.instruction = T_OPCODE_LSL_I;
13235 inst.instruction |= Rn;
13236 inst.instruction |= Rm << 3;
13237 break;
13238
13239 case T_MNEM_cmp:
13240 if (low_regs)
13241 {
13242 inst.instruction = T_OPCODE_CMP_LR;
13243 inst.instruction |= Rn;
13244 inst.instruction |= Rm << 3;
13245 }
13246 else
13247 {
13248 inst.instruction = T_OPCODE_CMP_HR;
13249 inst.instruction |= (Rn & 0x8) << 4;
13250 inst.instruction |= (Rn & 0x7);
13251 inst.instruction |= Rm << 3;
13252 }
13253 break;
13254 }
13255 return;
13256 }
13257
13258 inst.instruction = THUMB_OP16 (inst.instruction);
13259
13260 /* PR 10443: Do not silently ignore shifted operands. */
13261 constraint (inst.operands[1].shifted,
13262 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
13263
13264 if (inst.operands[1].isreg)
13265 {
13266 if (Rn < 8 && Rm < 8)
13267 {
13268 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
13269 since a MOV instruction produces unpredictable results. */
13270 if (inst.instruction == T_OPCODE_MOV_I8)
13271 inst.instruction = T_OPCODE_ADD_I3;
13272 else
13273 inst.instruction = T_OPCODE_CMP_LR;
13274
13275 inst.instruction |= Rn;
13276 inst.instruction |= Rm << 3;
13277 }
13278 else
13279 {
13280 if (inst.instruction == T_OPCODE_MOV_I8)
13281 inst.instruction = T_OPCODE_MOV_HR;
13282 else
13283 inst.instruction = T_OPCODE_CMP_HR;
13284 do_t_cpy ();
13285 }
13286 }
13287 else
13288 {
13289 constraint (Rn > 7,
13290 _("only lo regs allowed with immediate"));
13291 inst.instruction |= Rn << 8;
13292 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
13293 }
13294 }
13295
13296 static void
13297 do_t_mov16 (void)
13298 {
13299 unsigned Rd;
13300 bfd_vma imm;
13301 bool top;
13302
13303 top = (inst.instruction & 0x00800000) != 0;
13304 if (inst.relocs[0].type == BFD_RELOC_ARM_MOVW)
13305 {
13306 constraint (top, _(":lower16: not allowed in this instruction"));
13307 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVW;
13308 }
13309 else if (inst.relocs[0].type == BFD_RELOC_ARM_MOVT)
13310 {
13311 constraint (!top, _(":upper16: not allowed in this instruction"));
13312 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVT;
13313 }
13314
13315 Rd = inst.operands[0].reg;
13316 reject_bad_reg (Rd);
13317
13318 inst.instruction |= Rd << 8;
13319 if (inst.relocs[0].type == BFD_RELOC_UNUSED)
13320 {
13321 imm = inst.relocs[0].exp.X_add_number;
13322 inst.instruction |= (imm & 0xf000) << 4;
13323 inst.instruction |= (imm & 0x0800) << 15;
13324 inst.instruction |= (imm & 0x0700) << 4;
13325 inst.instruction |= (imm & 0x00ff);
13326 }
13327 }
13328
13329 static void
13330 do_t_mvn_tst (void)
13331 {
13332 unsigned Rn, Rm;
13333
13334 Rn = inst.operands[0].reg;
13335 Rm = inst.operands[1].reg;
13336
13337 if (inst.instruction == T_MNEM_cmp
13338 || inst.instruction == T_MNEM_cmn)
13339 constraint (Rn == REG_PC, BAD_PC);
13340 else
13341 reject_bad_reg (Rn);
13342 reject_bad_reg (Rm);
13343
13344 if (unified_syntax)
13345 {
13346 int r0off = (inst.instruction == T_MNEM_mvn
13347 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
13348 bool narrow;
13349
13350 if (inst.size_req == 4
13351 || inst.instruction > 0xffff
13352 || inst.operands[1].shifted
13353 || Rn > 7 || Rm > 7)
13354 narrow = false;
13355 else if (inst.instruction == T_MNEM_cmn
13356 || inst.instruction == T_MNEM_tst)
13357 narrow = true;
13358 else if (THUMB_SETS_FLAGS (inst.instruction))
13359 narrow = !in_pred_block ();
13360 else
13361 narrow = in_pred_block ();
13362
13363 if (!inst.operands[1].isreg)
13364 {
13365 /* For an immediate, we always generate a 32-bit opcode;
13366 section relaxation will shrink it later if possible. */
13367 if (inst.instruction < 0xffff)
13368 inst.instruction = THUMB_OP32 (inst.instruction);
13369 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13370 inst.instruction |= Rn << r0off;
13371 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13372 }
13373 else
13374 {
13375 /* See if we can do this with a 16-bit instruction. */
13376 if (narrow)
13377 {
13378 inst.instruction = THUMB_OP16 (inst.instruction);
13379 inst.instruction |= Rn;
13380 inst.instruction |= Rm << 3;
13381 }
13382 else
13383 {
13384 constraint (inst.operands[1].shifted
13385 && inst.operands[1].immisreg,
13386 _("shift must be constant"));
13387 if (inst.instruction < 0xffff)
13388 inst.instruction = THUMB_OP32 (inst.instruction);
13389 inst.instruction |= Rn << r0off;
13390 encode_thumb32_shifted_operand (1);
13391 }
13392 }
13393 }
13394 else
13395 {
13396 constraint (inst.instruction > 0xffff
13397 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
13398 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
13399 _("unshifted register required"));
13400 constraint (Rn > 7 || Rm > 7,
13401 BAD_HIREG);
13402
13403 inst.instruction = THUMB_OP16 (inst.instruction);
13404 inst.instruction |= Rn;
13405 inst.instruction |= Rm << 3;
13406 }
13407 }
13408
13409 static void
13410 do_t_mrs (void)
13411 {
13412 unsigned Rd;
13413
13414 if (do_vfp_nsyn_mrs () == SUCCESS)
13415 return;
13416
13417 Rd = inst.operands[0].reg;
13418 reject_bad_reg (Rd);
13419 inst.instruction |= Rd << 8;
13420
13421 if (inst.operands[1].isreg)
13422 {
13423 unsigned br = inst.operands[1].reg;
13424 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
13425 as_bad (_("bad register for mrs"));
13426
13427 inst.instruction |= br & (0xf << 16);
13428 inst.instruction |= (br & 0x300) >> 4;
13429 inst.instruction |= (br & SPSR_BIT) >> 2;
13430 }
13431 else
13432 {
13433 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
13434
13435 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
13436 {
13437 /* PR gas/12698: The constraint is only applied for m_profile.
13438 If the user has specified -march=all, we want to ignore it as
13439 we are building for any CPU type, including non-m variants. */
13440 bool m_profile =
13441 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
13442 constraint ((flags != 0) && m_profile, _("selected processor does "
13443 "not support requested special purpose register"));
13444 }
13445 else
13446 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
13447 devices). */
13448 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
13449 _("'APSR', 'CPSR' or 'SPSR' expected"));
13450
13451 inst.instruction |= (flags & SPSR_BIT) >> 2;
13452 inst.instruction |= inst.operands[1].imm & 0xff;
13453 inst.instruction |= 0xf0000;
13454 }
13455 }
13456
13457 static void
13458 do_t_msr (void)
13459 {
13460 int flags;
13461 unsigned Rn;
13462
13463 if (do_vfp_nsyn_msr () == SUCCESS)
13464 return;
13465
13466 constraint (!inst.operands[1].isreg,
13467 _("Thumb encoding does not support an immediate here"));
13468
13469 if (inst.operands[0].isreg)
13470 flags = (int)(inst.operands[0].reg);
13471 else
13472 flags = inst.operands[0].imm;
13473
13474 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
13475 {
13476 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
13477
13478 /* PR gas/12698: The constraint is only applied for m_profile.
13479 If the user has specified -march=all, we want to ignore it as
13480 we are building for any CPU type, including non-m variants. */
13481 bool m_profile =
13482 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
13483 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
13484 && (bits & ~(PSR_s | PSR_f)) != 0)
13485 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
13486 && bits != PSR_f)) && m_profile,
13487 _("selected processor does not support requested special "
13488 "purpose register"));
13489 }
13490 else
13491 constraint ((flags & 0xff) != 0, _("selected processor does not support "
13492 "requested special purpose register"));
13493
13494 Rn = inst.operands[1].reg;
13495 reject_bad_reg (Rn);
13496
13497 inst.instruction |= (flags & SPSR_BIT) >> 2;
13498 inst.instruction |= (flags & 0xf0000) >> 8;
13499 inst.instruction |= (flags & 0x300) >> 4;
13500 inst.instruction |= (flags & 0xff);
13501 inst.instruction |= Rn << 16;
13502 }
13503
13504 static void
13505 do_t_mul (void)
13506 {
13507 bool narrow;
13508 unsigned Rd, Rn, Rm;
13509
13510 if (!inst.operands[2].present)
13511 inst.operands[2].reg = inst.operands[0].reg;
13512
13513 Rd = inst.operands[0].reg;
13514 Rn = inst.operands[1].reg;
13515 Rm = inst.operands[2].reg;
13516
13517 if (unified_syntax)
13518 {
13519 if (inst.size_req == 4
13520 || (Rd != Rn
13521 && Rd != Rm)
13522 || Rn > 7
13523 || Rm > 7)
13524 narrow = false;
13525 else if (inst.instruction == T_MNEM_muls)
13526 narrow = !in_pred_block ();
13527 else
13528 narrow = in_pred_block ();
13529 }
13530 else
13531 {
13532 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
13533 constraint (Rn > 7 || Rm > 7,
13534 BAD_HIREG);
13535 narrow = true;
13536 }
13537
13538 if (narrow)
13539 {
13540 /* 16-bit MULS/Conditional MUL. */
13541 inst.instruction = THUMB_OP16 (inst.instruction);
13542 inst.instruction |= Rd;
13543
13544 if (Rd == Rn)
13545 inst.instruction |= Rm << 3;
13546 else if (Rd == Rm)
13547 inst.instruction |= Rn << 3;
13548 else
13549 constraint (1, _("dest must overlap one source register"));
13550 }
13551 else
13552 {
13553 constraint (inst.instruction != T_MNEM_mul,
13554 _("Thumb-2 MUL must not set flags"));
13555 /* 32-bit MUL. */
13556 inst.instruction = THUMB_OP32 (inst.instruction);
13557 inst.instruction |= Rd << 8;
13558 inst.instruction |= Rn << 16;
13559 inst.instruction |= Rm << 0;
13560
13561 reject_bad_reg (Rd);
13562 reject_bad_reg (Rn);
13563 reject_bad_reg (Rm);
13564 }
13565 }
13566
13567 static void
13568 do_t_mull (void)
13569 {
13570 unsigned RdLo, RdHi, Rn, Rm;
13571
13572 RdLo = inst.operands[0].reg;
13573 RdHi = inst.operands[1].reg;
13574 Rn = inst.operands[2].reg;
13575 Rm = inst.operands[3].reg;
13576
13577 reject_bad_reg (RdLo);
13578 reject_bad_reg (RdHi);
13579 reject_bad_reg (Rn);
13580 reject_bad_reg (Rm);
13581
13582 inst.instruction |= RdLo << 12;
13583 inst.instruction |= RdHi << 8;
13584 inst.instruction |= Rn << 16;
13585 inst.instruction |= Rm;
13586
13587 if (RdLo == RdHi)
13588 as_tsktsk (_("rdhi and rdlo must be different"));
13589 }
13590
13591 static void
13592 do_t_nop (void)
13593 {
13594 set_pred_insn_type (NEUTRAL_IT_INSN);
13595
13596 if (unified_syntax)
13597 {
13598 if (inst.size_req == 4 || inst.operands[0].imm > 15)
13599 {
13600 inst.instruction = THUMB_OP32 (inst.instruction);
13601 inst.instruction |= inst.operands[0].imm;
13602 }
13603 else
13604 {
13605 /* PR9722: Check for Thumb2 availability before
13606 generating a thumb2 nop instruction. */
13607 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
13608 {
13609 inst.instruction = THUMB_OP16 (inst.instruction);
13610 inst.instruction |= inst.operands[0].imm << 4;
13611 }
13612 else
13613 inst.instruction = 0x46c0;
13614 }
13615 }
13616 else
13617 {
13618 constraint (inst.operands[0].present,
13619 _("Thumb does not support NOP with hints"));
13620 inst.instruction = 0x46c0;
13621 }
13622 }
13623
13624 static void
13625 do_t_neg (void)
13626 {
13627 if (unified_syntax)
13628 {
13629 bool narrow;
13630
13631 if (THUMB_SETS_FLAGS (inst.instruction))
13632 narrow = !in_pred_block ();
13633 else
13634 narrow = in_pred_block ();
13635 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13636 narrow = false;
13637 if (inst.size_req == 4)
13638 narrow = false;
13639
13640 if (!narrow)
13641 {
13642 inst.instruction = THUMB_OP32 (inst.instruction);
13643 inst.instruction |= inst.operands[0].reg << 8;
13644 inst.instruction |= inst.operands[1].reg << 16;
13645 }
13646 else
13647 {
13648 inst.instruction = THUMB_OP16 (inst.instruction);
13649 inst.instruction |= inst.operands[0].reg;
13650 inst.instruction |= inst.operands[1].reg << 3;
13651 }
13652 }
13653 else
13654 {
13655 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
13656 BAD_HIREG);
13657 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13658
13659 inst.instruction = THUMB_OP16 (inst.instruction);
13660 inst.instruction |= inst.operands[0].reg;
13661 inst.instruction |= inst.operands[1].reg << 3;
13662 }
13663 }
13664
13665 static void
13666 do_t_orn (void)
13667 {
13668 unsigned Rd, Rn;
13669
13670 Rd = inst.operands[0].reg;
13671 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
13672
13673 reject_bad_reg (Rd);
13674 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
13675 reject_bad_reg (Rn);
13676
13677 inst.instruction |= Rd << 8;
13678 inst.instruction |= Rn << 16;
13679
13680 if (!inst.operands[2].isreg)
13681 {
13682 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13683 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13684 }
13685 else
13686 {
13687 unsigned Rm;
13688
13689 Rm = inst.operands[2].reg;
13690 reject_bad_reg (Rm);
13691
13692 constraint (inst.operands[2].shifted
13693 && inst.operands[2].immisreg,
13694 _("shift must be constant"));
13695 encode_thumb32_shifted_operand (2);
13696 }
13697 }
13698
13699 static void
13700 do_t_pkhbt (void)
13701 {
13702 unsigned Rd, Rn, Rm;
13703
13704 Rd = inst.operands[0].reg;
13705 Rn = inst.operands[1].reg;
13706 Rm = inst.operands[2].reg;
13707
13708 reject_bad_reg (Rd);
13709 reject_bad_reg (Rn);
13710 reject_bad_reg (Rm);
13711
13712 inst.instruction |= Rd << 8;
13713 inst.instruction |= Rn << 16;
13714 inst.instruction |= Rm;
13715 if (inst.operands[3].present)
13716 {
13717 unsigned int val = inst.relocs[0].exp.X_add_number;
13718 constraint (inst.relocs[0].exp.X_op != O_constant,
13719 _("expression too complex"));
13720 inst.instruction |= (val & 0x1c) << 10;
13721 inst.instruction |= (val & 0x03) << 6;
13722 }
13723 }
13724
13725 static void
13726 do_t_pkhtb (void)
13727 {
13728 if (!inst.operands[3].present)
13729 {
13730 unsigned Rtmp;
13731
13732 inst.instruction &= ~0x00000020;
13733
13734 /* PR 10168. Swap the Rm and Rn registers. */
13735 Rtmp = inst.operands[1].reg;
13736 inst.operands[1].reg = inst.operands[2].reg;
13737 inst.operands[2].reg = Rtmp;
13738 }
13739 do_t_pkhbt ();
13740 }
13741
13742 static void
13743 do_t_pld (void)
13744 {
13745 if (inst.operands[0].immisreg)
13746 reject_bad_reg (inst.operands[0].imm);
13747
13748 encode_thumb32_addr_mode (0, /*is_t=*/false, /*is_d=*/false);
13749 }
13750
13751 static void
13752 do_t_push_pop (void)
13753 {
13754 unsigned mask;
13755
13756 constraint (inst.operands[0].writeback,
13757 _("push/pop do not support {reglist}^"));
13758 constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
13759 _("expression too complex"));
13760
13761 mask = inst.operands[0].imm;
13762 if (inst.size_req != 4 && (mask & ~0xff) == 0)
13763 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
13764 else if (inst.size_req != 4
13765 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
13766 ? REG_LR : REG_PC)))
13767 {
13768 inst.instruction = THUMB_OP16 (inst.instruction);
13769 inst.instruction |= THUMB_PP_PC_LR;
13770 inst.instruction |= mask & 0xff;
13771 }
13772 else if (unified_syntax)
13773 {
13774 inst.instruction = THUMB_OP32 (inst.instruction);
13775 encode_thumb2_multi (true /* do_io */, 13, mask, true);
13776 }
13777 else
13778 {
13779 inst.error = _("invalid register list to push/pop instruction");
13780 return;
13781 }
13782 }
13783
13784 static void
13785 do_t_clrm (void)
13786 {
13787 if (unified_syntax)
13788 encode_thumb2_multi (false /* do_io */, -1, inst.operands[0].imm, false);
13789 else
13790 {
13791 inst.error = _("invalid register list to push/pop instruction");
13792 return;
13793 }
13794 }
13795
13796 static void
13797 do_t_vscclrm (void)
13798 {
13799 if (inst.operands[0].issingle)
13800 {
13801 inst.instruction |= (inst.operands[0].reg & 0x1) << 22;
13802 inst.instruction |= (inst.operands[0].reg & 0x1e) << 11;
13803 inst.instruction |= inst.operands[0].imm;
13804 }
13805 else
13806 {
13807 inst.instruction |= (inst.operands[0].reg & 0x10) << 18;
13808 inst.instruction |= (inst.operands[0].reg & 0xf) << 12;
13809 inst.instruction |= 1 << 8;
13810 inst.instruction |= inst.operands[0].imm << 1;
13811 }
13812 }
13813
13814 static void
13815 do_t_rbit (void)
13816 {
13817 unsigned Rd, Rm;
13818
13819 Rd = inst.operands[0].reg;
13820 Rm = inst.operands[1].reg;
13821
13822 reject_bad_reg (Rd);
13823 reject_bad_reg (Rm);
13824
13825 inst.instruction |= Rd << 8;
13826 inst.instruction |= Rm << 16;
13827 inst.instruction |= Rm;
13828 }
13829
13830 static void
13831 do_t_rev (void)
13832 {
13833 unsigned Rd, Rm;
13834
13835 Rd = inst.operands[0].reg;
13836 Rm = inst.operands[1].reg;
13837
13838 reject_bad_reg (Rd);
13839 reject_bad_reg (Rm);
13840
13841 if (Rd <= 7 && Rm <= 7
13842 && inst.size_req != 4)
13843 {
13844 inst.instruction = THUMB_OP16 (inst.instruction);
13845 inst.instruction |= Rd;
13846 inst.instruction |= Rm << 3;
13847 }
13848 else if (unified_syntax)
13849 {
13850 inst.instruction = THUMB_OP32 (inst.instruction);
13851 inst.instruction |= Rd << 8;
13852 inst.instruction |= Rm << 16;
13853 inst.instruction |= Rm;
13854 }
13855 else
13856 inst.error = BAD_HIREG;
13857 }
13858
13859 static void
13860 do_t_rrx (void)
13861 {
13862 unsigned Rd, Rm;
13863
13864 Rd = inst.operands[0].reg;
13865 Rm = inst.operands[1].reg;
13866
13867 reject_bad_reg (Rd);
13868 reject_bad_reg (Rm);
13869
13870 inst.instruction |= Rd << 8;
13871 inst.instruction |= Rm;
13872 }
13873
13874 static void
13875 do_t_rsb (void)
13876 {
13877 unsigned Rd, Rs;
13878
13879 Rd = inst.operands[0].reg;
13880 Rs = (inst.operands[1].present
13881 ? inst.operands[1].reg /* Rd, Rs, foo */
13882 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
13883
13884 reject_bad_reg (Rd);
13885 reject_bad_reg (Rs);
13886 if (inst.operands[2].isreg)
13887 reject_bad_reg (inst.operands[2].reg);
13888
13889 inst.instruction |= Rd << 8;
13890 inst.instruction |= Rs << 16;
13891 if (!inst.operands[2].isreg)
13892 {
13893 bool narrow;
13894
13895 if ((inst.instruction & 0x00100000) != 0)
13896 narrow = !in_pred_block ();
13897 else
13898 narrow = in_pred_block ();
13899
13900 if (Rd > 7 || Rs > 7)
13901 narrow = false;
13902
13903 if (inst.size_req == 4 || !unified_syntax)
13904 narrow = false;
13905
13906 if (inst.relocs[0].exp.X_op != O_constant
13907 || inst.relocs[0].exp.X_add_number != 0)
13908 narrow = false;
13909
13910 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13911 relaxation, but it doesn't seem worth the hassle. */
13912 if (narrow)
13913 {
13914 inst.relocs[0].type = BFD_RELOC_UNUSED;
13915 inst.instruction = THUMB_OP16 (T_MNEM_negs);
13916 inst.instruction |= Rs << 3;
13917 inst.instruction |= Rd;
13918 }
13919 else
13920 {
13921 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13922 inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13923 }
13924 }
13925 else
13926 encode_thumb32_shifted_operand (2);
13927 }
13928
13929 static void
13930 do_t_setend (void)
13931 {
13932 if (warn_on_deprecated
13933 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13934 as_tsktsk (_("setend use is deprecated for ARMv8"));
13935
13936 set_pred_insn_type (OUTSIDE_PRED_INSN);
13937 if (inst.operands[0].imm)
13938 inst.instruction |= 0x8;
13939 }
13940
13941 static void
13942 do_t_shift (void)
13943 {
13944 if (!inst.operands[1].present)
13945 inst.operands[1].reg = inst.operands[0].reg;
13946
13947 if (unified_syntax)
13948 {
13949 bool narrow;
13950 int shift_kind;
13951
13952 switch (inst.instruction)
13953 {
13954 case T_MNEM_asr:
13955 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
13956 case T_MNEM_lsl:
13957 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
13958 case T_MNEM_lsr:
13959 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
13960 case T_MNEM_ror:
13961 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
13962 default: abort ();
13963 }
13964
13965 if (THUMB_SETS_FLAGS (inst.instruction))
13966 narrow = !in_pred_block ();
13967 else
13968 narrow = in_pred_block ();
13969 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13970 narrow = false;
13971 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
13972 narrow = false;
13973 if (inst.operands[2].isreg
13974 && (inst.operands[1].reg != inst.operands[0].reg
13975 || inst.operands[2].reg > 7))
13976 narrow = false;
13977 if (inst.size_req == 4)
13978 narrow = false;
13979
13980 reject_bad_reg (inst.operands[0].reg);
13981 reject_bad_reg (inst.operands[1].reg);
13982
13983 if (!narrow)
13984 {
13985 if (inst.operands[2].isreg)
13986 {
13987 reject_bad_reg (inst.operands[2].reg);
13988 inst.instruction = THUMB_OP32 (inst.instruction);
13989 inst.instruction |= inst.operands[0].reg << 8;
13990 inst.instruction |= inst.operands[1].reg << 16;
13991 inst.instruction |= inst.operands[2].reg;
13992
13993 /* PR 12854: Error on extraneous shifts. */
13994 constraint (inst.operands[2].shifted,
13995 _("extraneous shift as part of operand to shift insn"));
13996 }
13997 else
13998 {
13999 inst.operands[1].shifted = 1;
14000 inst.operands[1].shift_kind = shift_kind;
14001 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
14002 ? T_MNEM_movs : T_MNEM_mov);
14003 inst.instruction |= inst.operands[0].reg << 8;
14004 encode_thumb32_shifted_operand (1);
14005 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
14006 inst.relocs[0].type = BFD_RELOC_UNUSED;
14007 }
14008 }
14009 else
14010 {
14011 if (inst.operands[2].isreg)
14012 {
14013 switch (shift_kind)
14014 {
14015 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
14016 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
14017 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
14018 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
14019 default: abort ();
14020 }
14021
14022 inst.instruction |= inst.operands[0].reg;
14023 inst.instruction |= inst.operands[2].reg << 3;
14024
14025 /* PR 12854: Error on extraneous shifts. */
14026 constraint (inst.operands[2].shifted,
14027 _("extraneous shift as part of operand to shift insn"));
14028 }
14029 else
14030 {
14031 switch (shift_kind)
14032 {
14033 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
14034 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
14035 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
14036 default: abort ();
14037 }
14038 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
14039 inst.instruction |= inst.operands[0].reg;
14040 inst.instruction |= inst.operands[1].reg << 3;
14041 }
14042 }
14043 }
14044 else
14045 {
14046 constraint (inst.operands[0].reg > 7
14047 || inst.operands[1].reg > 7, BAD_HIREG);
14048 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
14049
14050 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
14051 {
14052 constraint (inst.operands[2].reg > 7, BAD_HIREG);
14053 constraint (inst.operands[0].reg != inst.operands[1].reg,
14054 _("source1 and dest must be same register"));
14055
14056 switch (inst.instruction)
14057 {
14058 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
14059 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
14060 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
14061 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
14062 default: abort ();
14063 }
14064
14065 inst.instruction |= inst.operands[0].reg;
14066 inst.instruction |= inst.operands[2].reg << 3;
14067
14068 /* PR 12854: Error on extraneous shifts. */
14069 constraint (inst.operands[2].shifted,
14070 _("extraneous shift as part of operand to shift insn"));
14071 }
14072 else
14073 {
14074 switch (inst.instruction)
14075 {
14076 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
14077 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
14078 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
14079 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
14080 default: abort ();
14081 }
14082 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
14083 inst.instruction |= inst.operands[0].reg;
14084 inst.instruction |= inst.operands[1].reg << 3;
14085 }
14086 }
14087 }
14088
14089 static void
14090 do_t_simd (void)
14091 {
14092 unsigned Rd, Rn, Rm;
14093
14094 Rd = inst.operands[0].reg;
14095 Rn = inst.operands[1].reg;
14096 Rm = inst.operands[2].reg;
14097
14098 reject_bad_reg (Rd);
14099 reject_bad_reg (Rn);
14100 reject_bad_reg (Rm);
14101
14102 inst.instruction |= Rd << 8;
14103 inst.instruction |= Rn << 16;
14104 inst.instruction |= Rm;
14105 }
14106
14107 static void
14108 do_t_simd2 (void)
14109 {
14110 unsigned Rd, Rn, Rm;
14111
14112 Rd = inst.operands[0].reg;
14113 Rm = inst.operands[1].reg;
14114 Rn = inst.operands[2].reg;
14115
14116 reject_bad_reg (Rd);
14117 reject_bad_reg (Rn);
14118 reject_bad_reg (Rm);
14119
14120 inst.instruction |= Rd << 8;
14121 inst.instruction |= Rn << 16;
14122 inst.instruction |= Rm;
14123 }
14124
14125 static void
14126 do_t_smc (void)
14127 {
14128 unsigned int value = inst.relocs[0].exp.X_add_number;
14129 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
14130 _("SMC is not permitted on this architecture"));
14131 constraint (inst.relocs[0].exp.X_op != O_constant,
14132 _("expression too complex"));
14133 constraint (value > 0xf, _("immediate too large (bigger than 0xF)"));
14134
14135 inst.relocs[0].type = BFD_RELOC_UNUSED;
14136 inst.instruction |= (value & 0x000f) << 16;
14137
14138 /* PR gas/15623: SMC instructions must be last in an IT block. */
14139 set_pred_insn_type_last ();
14140 }
14141
14142 static void
14143 do_t_hvc (void)
14144 {
14145 unsigned int value = inst.relocs[0].exp.X_add_number;
14146
14147 inst.relocs[0].type = BFD_RELOC_UNUSED;
14148 inst.instruction |= (value & 0x0fff);
14149 inst.instruction |= (value & 0xf000) << 4;
14150 }
14151
14152 static void
14153 do_t_ssat_usat (int bias)
14154 {
14155 unsigned Rd, Rn;
14156
14157 Rd = inst.operands[0].reg;
14158 Rn = inst.operands[2].reg;
14159
14160 reject_bad_reg (Rd);
14161 reject_bad_reg (Rn);
14162
14163 inst.instruction |= Rd << 8;
14164 inst.instruction |= inst.operands[1].imm - bias;
14165 inst.instruction |= Rn << 16;
14166
14167 if (inst.operands[3].present)
14168 {
14169 offsetT shift_amount = inst.relocs[0].exp.X_add_number;
14170
14171 inst.relocs[0].type = BFD_RELOC_UNUSED;
14172
14173 constraint (inst.relocs[0].exp.X_op != O_constant,
14174 _("expression too complex"));
14175
14176 if (shift_amount != 0)
14177 {
14178 constraint (shift_amount > 31,
14179 _("shift expression is too large"));
14180
14181 if (inst.operands[3].shift_kind == SHIFT_ASR)
14182 inst.instruction |= 0x00200000; /* sh bit. */
14183
14184 inst.instruction |= (shift_amount & 0x1c) << 10;
14185 inst.instruction |= (shift_amount & 0x03) << 6;
14186 }
14187 }
14188 }
14189
14190 static void
14191 do_t_ssat (void)
14192 {
14193 do_t_ssat_usat (1);
14194 }
14195
14196 static void
14197 do_t_ssat16 (void)
14198 {
14199 unsigned Rd, Rn;
14200
14201 Rd = inst.operands[0].reg;
14202 Rn = inst.operands[2].reg;
14203
14204 reject_bad_reg (Rd);
14205 reject_bad_reg (Rn);
14206
14207 inst.instruction |= Rd << 8;
14208 inst.instruction |= inst.operands[1].imm - 1;
14209 inst.instruction |= Rn << 16;
14210 }
14211
14212 static void
14213 do_t_strex (void)
14214 {
14215 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
14216 || inst.operands[2].postind || inst.operands[2].writeback
14217 || inst.operands[2].immisreg || inst.operands[2].shifted
14218 || inst.operands[2].negative,
14219 BAD_ADDR_MODE);
14220
14221 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
14222
14223 inst.instruction |= inst.operands[0].reg << 8;
14224 inst.instruction |= inst.operands[1].reg << 12;
14225 inst.instruction |= inst.operands[2].reg << 16;
14226 inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
14227 }
14228
14229 static void
14230 do_t_strexd (void)
14231 {
14232 if (!inst.operands[2].present)
14233 inst.operands[2].reg = inst.operands[1].reg + 1;
14234
14235 constraint (inst.operands[0].reg == inst.operands[1].reg
14236 || inst.operands[0].reg == inst.operands[2].reg
14237 || inst.operands[0].reg == inst.operands[3].reg,
14238 BAD_OVERLAP);
14239
14240 inst.instruction |= inst.operands[0].reg;
14241 inst.instruction |= inst.operands[1].reg << 12;
14242 inst.instruction |= inst.operands[2].reg << 8;
14243 inst.instruction |= inst.operands[3].reg << 16;
14244 }
14245
14246 static void
14247 do_t_sxtah (void)
14248 {
14249 unsigned Rd, Rn, Rm;
14250
14251 Rd = inst.operands[0].reg;
14252 Rn = inst.operands[1].reg;
14253 Rm = inst.operands[2].reg;
14254
14255 reject_bad_reg (Rd);
14256 reject_bad_reg (Rn);
14257 reject_bad_reg (Rm);
14258
14259 inst.instruction |= Rd << 8;
14260 inst.instruction |= Rn << 16;
14261 inst.instruction |= Rm;
14262 inst.instruction |= inst.operands[3].imm << 4;
14263 }
14264
14265 static void
14266 do_t_sxth (void)
14267 {
14268 unsigned Rd, Rm;
14269
14270 Rd = inst.operands[0].reg;
14271 Rm = inst.operands[1].reg;
14272
14273 reject_bad_reg (Rd);
14274 reject_bad_reg (Rm);
14275
14276 if (inst.instruction <= 0xffff
14277 && inst.size_req != 4
14278 && Rd <= 7 && Rm <= 7
14279 && (!inst.operands[2].present || inst.operands[2].imm == 0))
14280 {
14281 inst.instruction = THUMB_OP16 (inst.instruction);
14282 inst.instruction |= Rd;
14283 inst.instruction |= Rm << 3;
14284 }
14285 else if (unified_syntax)
14286 {
14287 if (inst.instruction <= 0xffff)
14288 inst.instruction = THUMB_OP32 (inst.instruction);
14289 inst.instruction |= Rd << 8;
14290 inst.instruction |= Rm;
14291 inst.instruction |= inst.operands[2].imm << 4;
14292 }
14293 else
14294 {
14295 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
14296 _("Thumb encoding does not support rotation"));
14297 constraint (1, BAD_HIREG);
14298 }
14299 }
14300
14301 static void
14302 do_t_swi (void)
14303 {
14304 inst.relocs[0].type = BFD_RELOC_ARM_SWI;
14305 }
14306
14307 static void
14308 do_t_tb (void)
14309 {
14310 unsigned Rn, Rm;
14311 int half;
14312
14313 half = (inst.instruction & 0x10) != 0;
14314 set_pred_insn_type_last ();
14315 constraint (inst.operands[0].immisreg,
14316 _("instruction requires register index"));
14317
14318 Rn = inst.operands[0].reg;
14319 Rm = inst.operands[0].imm;
14320
14321 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
14322 constraint (Rn == REG_SP, BAD_SP);
14323 reject_bad_reg (Rm);
14324
14325 constraint (!half && inst.operands[0].shifted,
14326 _("instruction does not allow shifted index"));
14327 inst.instruction |= (Rn << 16) | Rm;
14328 }
14329
14330 static void
14331 do_t_udf (void)
14332 {
14333 if (!inst.operands[0].present)
14334 inst.operands[0].imm = 0;
14335
14336 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
14337 {
14338 constraint (inst.size_req == 2,
14339 _("immediate value out of range"));
14340 inst.instruction = THUMB_OP32 (inst.instruction);
14341 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
14342 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
14343 }
14344 else
14345 {
14346 inst.instruction = THUMB_OP16 (inst.instruction);
14347 inst.instruction |= inst.operands[0].imm;
14348 }
14349
14350 set_pred_insn_type (NEUTRAL_IT_INSN);
14351 }
14352
14353
14354 static void
14355 do_t_usat (void)
14356 {
14357 do_t_ssat_usat (0);
14358 }
14359
14360 static void
14361 do_t_usat16 (void)
14362 {
14363 unsigned Rd, Rn;
14364
14365 Rd = inst.operands[0].reg;
14366 Rn = inst.operands[2].reg;
14367
14368 reject_bad_reg (Rd);
14369 reject_bad_reg (Rn);
14370
14371 inst.instruction |= Rd << 8;
14372 inst.instruction |= inst.operands[1].imm;
14373 inst.instruction |= Rn << 16;
14374 }
14375
14376 /* Checking the range of the branch offset (VAL) with NBITS bits
14377 and IS_SIGNED signedness. Also checks the LSB to be 0. */
14378 static int
14379 v8_1_branch_value_check (int val, int nbits, int is_signed)
14380 {
14381 gas_assert (nbits > 0 && nbits <= 32);
14382 if (is_signed)
14383 {
14384 int cmp = (1 << (nbits - 1));
14385 if ((val < -cmp) || (val >= cmp) || (val & 0x01))
14386 return FAIL;
14387 }
14388 else
14389 {
14390 if ((val <= 0) || (val >= (1 << nbits)) || (val & 0x1))
14391 return FAIL;
14392 }
14393 return SUCCESS;
14394 }
14395
14396 /* For branches in Armv8.1-M Mainline. */
14397 static void
14398 do_t_branch_future (void)
14399 {
14400 unsigned long insn = inst.instruction;
14401
14402 inst.instruction = THUMB_OP32 (inst.instruction);
14403 if (inst.operands[0].hasreloc == 0)
14404 {
14405 if (v8_1_branch_value_check (inst.operands[0].imm, 5, false) == FAIL)
14406 as_bad (BAD_BRANCH_OFF);
14407
14408 inst.instruction |= ((inst.operands[0].imm & 0x1f) >> 1) << 23;
14409 }
14410 else
14411 {
14412 inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH5;
14413 inst.relocs[0].pc_rel = 1;
14414 }
14415
14416 switch (insn)
14417 {
14418 case T_MNEM_bf:
14419 if (inst.operands[1].hasreloc == 0)
14420 {
14421 int val = inst.operands[1].imm;
14422 if (v8_1_branch_value_check (inst.operands[1].imm, 17, true) == FAIL)
14423 as_bad (BAD_BRANCH_OFF);
14424
14425 int immA = (val & 0x0001f000) >> 12;
14426 int immB = (val & 0x00000ffc) >> 2;
14427 int immC = (val & 0x00000002) >> 1;
14428 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14429 }
14430 else
14431 {
14432 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF17;
14433 inst.relocs[1].pc_rel = 1;
14434 }
14435 break;
14436
14437 case T_MNEM_bfl:
14438 if (inst.operands[1].hasreloc == 0)
14439 {
14440 int val = inst.operands[1].imm;
14441 if (v8_1_branch_value_check (inst.operands[1].imm, 19, true) == FAIL)
14442 as_bad (BAD_BRANCH_OFF);
14443
14444 int immA = (val & 0x0007f000) >> 12;
14445 int immB = (val & 0x00000ffc) >> 2;
14446 int immC = (val & 0x00000002) >> 1;
14447 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14448 }
14449 else
14450 {
14451 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF19;
14452 inst.relocs[1].pc_rel = 1;
14453 }
14454 break;
14455
14456 case T_MNEM_bfcsel:
14457 /* Operand 1. */
14458 if (inst.operands[1].hasreloc == 0)
14459 {
14460 int val = inst.operands[1].imm;
14461 int immA = (val & 0x00001000) >> 12;
14462 int immB = (val & 0x00000ffc) >> 2;
14463 int immC = (val & 0x00000002) >> 1;
14464 inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14465 }
14466 else
14467 {
14468 inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF13;
14469 inst.relocs[1].pc_rel = 1;
14470 }
14471
14472 /* Operand 2. */
14473 if (inst.operands[2].hasreloc == 0)
14474 {
14475 constraint ((inst.operands[0].hasreloc != 0), BAD_ARGS);
14476 int val2 = inst.operands[2].imm;
14477 int val0 = inst.operands[0].imm & 0x1f;
14478 int diff = val2 - val0;
14479 if (diff == 4)
14480 inst.instruction |= 1 << 17; /* T bit. */
14481 else if (diff != 2)
14482 as_bad (_("out of range label-relative fixup value"));
14483 }
14484 else
14485 {
14486 constraint ((inst.operands[0].hasreloc == 0), BAD_ARGS);
14487 inst.relocs[2].type = BFD_RELOC_THUMB_PCREL_BFCSEL;
14488 inst.relocs[2].pc_rel = 1;
14489 }
14490
14491 /* Operand 3. */
14492 constraint (inst.cond != COND_ALWAYS, BAD_COND);
14493 inst.instruction |= (inst.operands[3].imm & 0xf) << 18;
14494 break;
14495
14496 case T_MNEM_bfx:
14497 case T_MNEM_bflx:
14498 inst.instruction |= inst.operands[1].reg << 16;
14499 break;
14500
14501 default: abort ();
14502 }
14503 }
14504
14505 /* Helper function for do_t_loloop to handle relocations. */
14506 static void
14507 v8_1_loop_reloc (int is_le)
14508 {
14509 if (inst.relocs[0].exp.X_op == O_constant)
14510 {
14511 int value = inst.relocs[0].exp.X_add_number;
14512 value = (is_le) ? -value : value;
14513
14514 if (v8_1_branch_value_check (value, 12, false) == FAIL)
14515 as_bad (BAD_BRANCH_OFF);
14516
14517 int imml, immh;
14518
14519 immh = (value & 0x00000ffc) >> 2;
14520 imml = (value & 0x00000002) >> 1;
14521
14522 inst.instruction |= (imml << 11) | (immh << 1);
14523 }
14524 else
14525 {
14526 inst.relocs[0].type = BFD_RELOC_ARM_THUMB_LOOP12;
14527 inst.relocs[0].pc_rel = 1;
14528 }
14529 }
14530
14531 /* For shifts with four operands in MVE. */
14532 static void
14533 do_mve_scalar_shift1 (void)
14534 {
14535 unsigned int value = inst.operands[2].imm;
14536
14537 inst.instruction |= inst.operands[0].reg << 16;
14538 inst.instruction |= inst.operands[1].reg << 8;
14539
14540 /* Setting the bit for saturation. */
14541 inst.instruction |= ((value == 64) ? 0: 1) << 7;
14542
14543 /* Assuming Rm is already checked not to be 11x1. */
14544 constraint (inst.operands[3].reg == inst.operands[0].reg, BAD_OVERLAP);
14545 constraint (inst.operands[3].reg == inst.operands[1].reg, BAD_OVERLAP);
14546 inst.instruction |= inst.operands[3].reg << 12;
14547 }
14548
14549 /* For shifts in MVE. */
14550 static void
14551 do_mve_scalar_shift (void)
14552 {
14553 if (!inst.operands[2].present)
14554 {
14555 inst.operands[2] = inst.operands[1];
14556 inst.operands[1].reg = 0xf;
14557 }
14558
14559 inst.instruction |= inst.operands[0].reg << 16;
14560 inst.instruction |= inst.operands[1].reg << 8;
14561
14562 if (inst.operands[2].isreg)
14563 {
14564 /* Assuming Rm is already checked not to be 11x1. */
14565 constraint (inst.operands[2].reg == inst.operands[0].reg, BAD_OVERLAP);
14566 constraint (inst.operands[2].reg == inst.operands[1].reg, BAD_OVERLAP);
14567 inst.instruction |= inst.operands[2].reg << 12;
14568 }
14569 else
14570 {
14571 /* Assuming imm is already checked as [1,32]. */
14572 unsigned int value = inst.operands[2].imm;
14573 inst.instruction |= (value & 0x1c) << 10;
14574 inst.instruction |= (value & 0x03) << 6;
14575 /* Change last 4 bits from 0xd to 0xf. */
14576 inst.instruction |= 0x2;
14577 }
14578 }
14579
14580 /* MVE instruction encoder helpers. */
14581 #define M_MNEM_vabav 0xee800f01
14582 #define M_MNEM_vmladav 0xeef00e00
14583 #define M_MNEM_vmladava 0xeef00e20
14584 #define M_MNEM_vmladavx 0xeef01e00
14585 #define M_MNEM_vmladavax 0xeef01e20
14586 #define M_MNEM_vmlsdav 0xeef00e01
14587 #define M_MNEM_vmlsdava 0xeef00e21
14588 #define M_MNEM_vmlsdavx 0xeef01e01
14589 #define M_MNEM_vmlsdavax 0xeef01e21
14590 #define M_MNEM_vmullt 0xee011e00
14591 #define M_MNEM_vmullb 0xee010e00
14592 #define M_MNEM_vctp 0xf000e801
14593 #define M_MNEM_vst20 0xfc801e00
14594 #define M_MNEM_vst21 0xfc801e20
14595 #define M_MNEM_vst40 0xfc801e01
14596 #define M_MNEM_vst41 0xfc801e21
14597 #define M_MNEM_vst42 0xfc801e41
14598 #define M_MNEM_vst43 0xfc801e61
14599 #define M_MNEM_vld20 0xfc901e00
14600 #define M_MNEM_vld21 0xfc901e20
14601 #define M_MNEM_vld40 0xfc901e01
14602 #define M_MNEM_vld41 0xfc901e21
14603 #define M_MNEM_vld42 0xfc901e41
14604 #define M_MNEM_vld43 0xfc901e61
14605 #define M_MNEM_vstrb 0xec000e00
14606 #define M_MNEM_vstrh 0xec000e10
14607 #define M_MNEM_vstrw 0xec000e40
14608 #define M_MNEM_vstrd 0xec000e50
14609 #define M_MNEM_vldrb 0xec100e00
14610 #define M_MNEM_vldrh 0xec100e10
14611 #define M_MNEM_vldrw 0xec100e40
14612 #define M_MNEM_vldrd 0xec100e50
14613 #define M_MNEM_vmovlt 0xeea01f40
14614 #define M_MNEM_vmovlb 0xeea00f40
14615 #define M_MNEM_vmovnt 0xfe311e81
14616 #define M_MNEM_vmovnb 0xfe310e81
14617 #define M_MNEM_vadc 0xee300f00
14618 #define M_MNEM_vadci 0xee301f00
14619 #define M_MNEM_vbrsr 0xfe011e60
14620 #define M_MNEM_vaddlv 0xee890f00
14621 #define M_MNEM_vaddlva 0xee890f20
14622 #define M_MNEM_vaddv 0xeef10f00
14623 #define M_MNEM_vaddva 0xeef10f20
14624 #define M_MNEM_vddup 0xee011f6e
14625 #define M_MNEM_vdwdup 0xee011f60
14626 #define M_MNEM_vidup 0xee010f6e
14627 #define M_MNEM_viwdup 0xee010f60
14628 #define M_MNEM_vmaxv 0xeee20f00
14629 #define M_MNEM_vmaxav 0xeee00f00
14630 #define M_MNEM_vminv 0xeee20f80
14631 #define M_MNEM_vminav 0xeee00f80
14632 #define M_MNEM_vmlaldav 0xee800e00
14633 #define M_MNEM_vmlaldava 0xee800e20
14634 #define M_MNEM_vmlaldavx 0xee801e00
14635 #define M_MNEM_vmlaldavax 0xee801e20
14636 #define M_MNEM_vmlsldav 0xee800e01
14637 #define M_MNEM_vmlsldava 0xee800e21
14638 #define M_MNEM_vmlsldavx 0xee801e01
14639 #define M_MNEM_vmlsldavax 0xee801e21
14640 #define M_MNEM_vrmlaldavhx 0xee801f00
14641 #define M_MNEM_vrmlaldavhax 0xee801f20
14642 #define M_MNEM_vrmlsldavh 0xfe800e01
14643 #define M_MNEM_vrmlsldavha 0xfe800e21
14644 #define M_MNEM_vrmlsldavhx 0xfe801e01
14645 #define M_MNEM_vrmlsldavhax 0xfe801e21
14646 #define M_MNEM_vqmovnt 0xee331e01
14647 #define M_MNEM_vqmovnb 0xee330e01
14648 #define M_MNEM_vqmovunt 0xee311e81
14649 #define M_MNEM_vqmovunb 0xee310e81
14650 #define M_MNEM_vshrnt 0xee801fc1
14651 #define M_MNEM_vshrnb 0xee800fc1
14652 #define M_MNEM_vrshrnt 0xfe801fc1
14653 #define M_MNEM_vqshrnt 0xee801f40
14654 #define M_MNEM_vqshrnb 0xee800f40
14655 #define M_MNEM_vqshrunt 0xee801fc0
14656 #define M_MNEM_vqshrunb 0xee800fc0
14657 #define M_MNEM_vrshrnb 0xfe800fc1
14658 #define M_MNEM_vqrshrnt 0xee801f41
14659 #define M_MNEM_vqrshrnb 0xee800f41
14660 #define M_MNEM_vqrshrunt 0xfe801fc0
14661 #define M_MNEM_vqrshrunb 0xfe800fc0
14662
14663 /* Bfloat16 instruction encoder helpers. */
14664 #define B_MNEM_vfmat 0xfc300850
14665 #define B_MNEM_vfmab 0xfc300810
14666
14667 /* Neon instruction encoder helpers. */
14668
14669 /* Encodings for the different types for various Neon opcodes. */
14670
14671 /* An "invalid" code for the following tables. */
14672 #define N_INV -1u
14673
14674 struct neon_tab_entry
14675 {
14676 unsigned integer;
14677 unsigned float_or_poly;
14678 unsigned scalar_or_imm;
14679 };
14680
14681 /* Map overloaded Neon opcodes to their respective encodings. */
14682 #define NEON_ENC_TAB \
14683 X(vabd, 0x0000700, 0x1200d00, N_INV), \
14684 X(vabdl, 0x0800700, N_INV, N_INV), \
14685 X(vmax, 0x0000600, 0x0000f00, N_INV), \
14686 X(vmin, 0x0000610, 0x0200f00, N_INV), \
14687 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
14688 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
14689 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
14690 X(vadd, 0x0000800, 0x0000d00, N_INV), \
14691 X(vaddl, 0x0800000, N_INV, N_INV), \
14692 X(vsub, 0x1000800, 0x0200d00, N_INV), \
14693 X(vsubl, 0x0800200, N_INV, N_INV), \
14694 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
14695 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
14696 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
14697 /* Register variants of the following two instructions are encoded as
14698 vcge / vcgt with the operands reversed. */ \
14699 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
14700 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
14701 X(vfma, N_INV, 0x0000c10, N_INV), \
14702 X(vfms, N_INV, 0x0200c10, N_INV), \
14703 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
14704 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
14705 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
14706 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
14707 X(vmlal, 0x0800800, N_INV, 0x0800240), \
14708 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
14709 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
14710 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
14711 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
14712 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
14713 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
14714 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
14715 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
14716 X(vshl, 0x0000400, N_INV, 0x0800510), \
14717 X(vqshl, 0x0000410, N_INV, 0x0800710), \
14718 X(vand, 0x0000110, N_INV, 0x0800030), \
14719 X(vbic, 0x0100110, N_INV, 0x0800030), \
14720 X(veor, 0x1000110, N_INV, N_INV), \
14721 X(vorn, 0x0300110, N_INV, 0x0800010), \
14722 X(vorr, 0x0200110, N_INV, 0x0800010), \
14723 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
14724 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
14725 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
14726 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
14727 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
14728 X(vst1, 0x0000000, 0x0800000, N_INV), \
14729 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
14730 X(vst2, 0x0000100, 0x0800100, N_INV), \
14731 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
14732 X(vst3, 0x0000200, 0x0800200, N_INV), \
14733 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
14734 X(vst4, 0x0000300, 0x0800300, N_INV), \
14735 X(vmovn, 0x1b20200, N_INV, N_INV), \
14736 X(vtrn, 0x1b20080, N_INV, N_INV), \
14737 X(vqmovn, 0x1b20200, N_INV, N_INV), \
14738 X(vqmovun, 0x1b20240, N_INV, N_INV), \
14739 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
14740 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
14741 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
14742 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
14743 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
14744 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
14745 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
14746 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
14747 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
14748 X(vseleq, 0xe000a00, N_INV, N_INV), \
14749 X(vselvs, 0xe100a00, N_INV, N_INV), \
14750 X(vselge, 0xe200a00, N_INV, N_INV), \
14751 X(vselgt, 0xe300a00, N_INV, N_INV), \
14752 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
14753 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
14754 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
14755 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
14756 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
14757 X(aes, 0x3b00300, N_INV, N_INV), \
14758 X(sha3op, 0x2000c00, N_INV, N_INV), \
14759 X(sha1h, 0x3b902c0, N_INV, N_INV), \
14760 X(sha2op, 0x3ba0380, N_INV, N_INV)
14761
14762 enum neon_opc
14763 {
14764 #define X(OPC,I,F,S) N_MNEM_##OPC
14765 NEON_ENC_TAB
14766 #undef X
14767 };
14768
14769 static const struct neon_tab_entry neon_enc_tab[] =
14770 {
14771 #define X(OPC,I,F,S) { (I), (F), (S) }
14772 NEON_ENC_TAB
14773 #undef X
14774 };
14775
14776 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
14777 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14778 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14779 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14780 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14781 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14782 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14783 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14784 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14785 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14786 #define NEON_ENC_SINGLE_(X) \
14787 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
14788 #define NEON_ENC_DOUBLE_(X) \
14789 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
14790 #define NEON_ENC_FPV8_(X) \
14791 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
14792
14793 #define NEON_ENCODE(type, inst) \
14794 do \
14795 { \
14796 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
14797 inst.is_neon = 1; \
14798 } \
14799 while (0)
14800
14801 #define check_neon_suffixes \
14802 do \
14803 { \
14804 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
14805 { \
14806 as_bad (_("invalid neon suffix for non neon instruction")); \
14807 return; \
14808 } \
14809 } \
14810 while (0)
14811
14812 /* Define shapes for instruction operands. The following mnemonic characters
14813 are used in this table:
14814
14815 F - VFP S<n> register
14816 D - Neon D<n> register
14817 Q - Neon Q<n> register
14818 I - Immediate
14819 S - Scalar
14820 R - ARM register
14821 L - D<n> register list
14822
14823 This table is used to generate various data:
14824 - enumerations of the form NS_DDR to be used as arguments to
14825 neon_select_shape.
14826 - a table classifying shapes into single, double, quad, mixed.
14827 - a table used to drive neon_select_shape. */
14828
14829 #define NEON_SHAPE_DEF \
14830 X(4, (R, R, Q, Q), QUAD), \
14831 X(4, (Q, R, R, I), QUAD), \
14832 X(4, (R, R, S, S), QUAD), \
14833 X(4, (S, S, R, R), QUAD), \
14834 X(3, (Q, R, I), QUAD), \
14835 X(3, (I, Q, Q), QUAD), \
14836 X(3, (I, Q, R), QUAD), \
14837 X(3, (R, Q, Q), QUAD), \
14838 X(3, (D, D, D), DOUBLE), \
14839 X(3, (Q, Q, Q), QUAD), \
14840 X(3, (D, D, I), DOUBLE), \
14841 X(3, (Q, Q, I), QUAD), \
14842 X(3, (D, D, S), DOUBLE), \
14843 X(3, (Q, Q, S), QUAD), \
14844 X(3, (Q, Q, R), QUAD), \
14845 X(3, (R, R, Q), QUAD), \
14846 X(2, (R, Q), QUAD), \
14847 X(2, (D, D), DOUBLE), \
14848 X(2, (Q, Q), QUAD), \
14849 X(2, (D, S), DOUBLE), \
14850 X(2, (Q, S), QUAD), \
14851 X(2, (D, R), DOUBLE), \
14852 X(2, (Q, R), QUAD), \
14853 X(2, (D, I), DOUBLE), \
14854 X(2, (Q, I), QUAD), \
14855 X(3, (P, F, I), SINGLE), \
14856 X(3, (P, D, I), DOUBLE), \
14857 X(3, (P, Q, I), QUAD), \
14858 X(4, (P, F, F, I), SINGLE), \
14859 X(4, (P, D, D, I), DOUBLE), \
14860 X(4, (P, Q, Q, I), QUAD), \
14861 X(5, (P, F, F, F, I), SINGLE), \
14862 X(5, (P, D, D, D, I), DOUBLE), \
14863 X(5, (P, Q, Q, Q, I), QUAD), \
14864 X(3, (D, L, D), DOUBLE), \
14865 X(2, (D, Q), MIXED), \
14866 X(2, (Q, D), MIXED), \
14867 X(3, (D, Q, I), MIXED), \
14868 X(3, (Q, D, I), MIXED), \
14869 X(3, (Q, D, D), MIXED), \
14870 X(3, (D, Q, Q), MIXED), \
14871 X(3, (Q, Q, D), MIXED), \
14872 X(3, (Q, D, S), MIXED), \
14873 X(3, (D, Q, S), MIXED), \
14874 X(4, (D, D, D, I), DOUBLE), \
14875 X(4, (Q, Q, Q, I), QUAD), \
14876 X(4, (D, D, S, I), DOUBLE), \
14877 X(4, (Q, Q, S, I), QUAD), \
14878 X(2, (F, F), SINGLE), \
14879 X(3, (F, F, F), SINGLE), \
14880 X(2, (F, I), SINGLE), \
14881 X(2, (F, D), MIXED), \
14882 X(2, (D, F), MIXED), \
14883 X(3, (F, F, I), MIXED), \
14884 X(4, (R, R, F, F), SINGLE), \
14885 X(4, (F, F, R, R), SINGLE), \
14886 X(3, (D, R, R), DOUBLE), \
14887 X(3, (R, R, D), DOUBLE), \
14888 X(2, (S, R), SINGLE), \
14889 X(2, (R, S), SINGLE), \
14890 X(2, (F, R), SINGLE), \
14891 X(2, (R, F), SINGLE), \
14892 /* Used for MVE tail predicated loop instructions. */\
14893 X(2, (R, R), QUAD), \
14894 /* Half float shape supported so far. */\
14895 X (2, (H, D), MIXED), \
14896 X (2, (D, H), MIXED), \
14897 X (2, (H, F), MIXED), \
14898 X (2, (F, H), MIXED), \
14899 X (2, (H, H), HALF), \
14900 X (2, (H, R), HALF), \
14901 X (2, (R, H), HALF), \
14902 X (2, (H, I), HALF), \
14903 X (3, (H, H, H), HALF), \
14904 X (3, (H, F, I), MIXED), \
14905 X (3, (F, H, I), MIXED), \
14906 X (3, (D, H, H), MIXED), \
14907 X (3, (D, H, S), MIXED)
14908
14909 #define S2(A,B) NS_##A##B
14910 #define S3(A,B,C) NS_##A##B##C
14911 #define S4(A,B,C,D) NS_##A##B##C##D
14912 #define S5(A,B,C,D,E) NS_##A##B##C##D##E
14913
14914 #define X(N, L, C) S##N L
14915
14916 enum neon_shape
14917 {
14918 NEON_SHAPE_DEF,
14919 NS_NULL
14920 };
14921
14922 #undef X
14923 #undef S2
14924 #undef S3
14925 #undef S4
14926 #undef S5
14927
14928 enum neon_shape_class
14929 {
14930 SC_HALF,
14931 SC_SINGLE,
14932 SC_DOUBLE,
14933 SC_QUAD,
14934 SC_MIXED
14935 };
14936
14937 #define X(N, L, C) SC_##C
14938
14939 static enum neon_shape_class neon_shape_class[] =
14940 {
14941 NEON_SHAPE_DEF
14942 };
14943
14944 #undef X
14945
14946 enum neon_shape_el
14947 {
14948 SE_H,
14949 SE_F,
14950 SE_D,
14951 SE_Q,
14952 SE_I,
14953 SE_S,
14954 SE_R,
14955 SE_L,
14956 SE_P
14957 };
14958
14959 /* Register widths of above. */
14960 static unsigned neon_shape_el_size[] =
14961 {
14962 16,
14963 32,
14964 64,
14965 128,
14966 0,
14967 32,
14968 32,
14969 0,
14970 0
14971 };
14972
14973 struct neon_shape_info
14974 {
14975 unsigned els;
14976 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
14977 };
14978
14979 #define S2(A,B) { SE_##A, SE_##B }
14980 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
14981 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
14982 #define S5(A,B,C,D,E) { SE_##A, SE_##B, SE_##C, SE_##D, SE_##E }
14983
14984 #define X(N, L, C) { N, S##N L }
14985
14986 static struct neon_shape_info neon_shape_tab[] =
14987 {
14988 NEON_SHAPE_DEF
14989 };
14990
14991 #undef X
14992 #undef S2
14993 #undef S3
14994 #undef S4
14995 #undef S5
14996
14997 /* Bit masks used in type checking given instructions.
14998 'N_EQK' means the type must be the same as (or based on in some way) the key
14999 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
15000 set, various other bits can be set as well in order to modify the meaning of
15001 the type constraint. */
15002
15003 enum neon_type_mask
15004 {
15005 N_S8 = 0x0000001,
15006 N_S16 = 0x0000002,
15007 N_S32 = 0x0000004,
15008 N_S64 = 0x0000008,
15009 N_U8 = 0x0000010,
15010 N_U16 = 0x0000020,
15011 N_U32 = 0x0000040,
15012 N_U64 = 0x0000080,
15013 N_I8 = 0x0000100,
15014 N_I16 = 0x0000200,
15015 N_I32 = 0x0000400,
15016 N_I64 = 0x0000800,
15017 N_8 = 0x0001000,
15018 N_16 = 0x0002000,
15019 N_32 = 0x0004000,
15020 N_64 = 0x0008000,
15021 N_P8 = 0x0010000,
15022 N_P16 = 0x0020000,
15023 N_F16 = 0x0040000,
15024 N_F32 = 0x0080000,
15025 N_F64 = 0x0100000,
15026 N_P64 = 0x0200000,
15027 N_BF16 = 0x0400000,
15028 N_KEY = 0x1000000, /* Key element (main type specifier). */
15029 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
15030 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
15031 N_UNT = 0x8000000, /* Must be explicitly untyped. */
15032 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
15033 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
15034 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
15035 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
15036 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
15037 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
15038 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
15039 N_UTYP = 0,
15040 N_MAX_NONSPECIAL = N_P64
15041 };
15042
15043 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
15044
15045 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
15046 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
15047 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
15048 #define N_S_32 (N_S8 | N_S16 | N_S32)
15049 #define N_F_16_32 (N_F16 | N_F32)
15050 #define N_SUF_32 (N_SU_32 | N_F_16_32)
15051 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
15052 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
15053 #define N_F_ALL (N_F16 | N_F32 | N_F64)
15054 #define N_I_MVE (N_I8 | N_I16 | N_I32)
15055 #define N_F_MVE (N_F16 | N_F32)
15056 #define N_SU_MVE (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
15057
15058 /* Pass this as the first type argument to neon_check_type to ignore types
15059 altogether. */
15060 #define N_IGNORE_TYPE (N_KEY | N_EQK)
15061
15062 /* Select a "shape" for the current instruction (describing register types or
15063 sizes) from a list of alternatives. Return NS_NULL if the current instruction
15064 doesn't fit. For non-polymorphic shapes, checking is usually done as a
15065 function of operand parsing, so this function doesn't need to be called.
15066 Shapes should be listed in order of decreasing length. */
15067
15068 static enum neon_shape
15069 neon_select_shape (enum neon_shape shape, ...)
15070 {
15071 va_list ap;
15072 enum neon_shape first_shape = shape;
15073
15074 /* Fix missing optional operands. FIXME: we don't know at this point how
15075 many arguments we should have, so this makes the assumption that we have
15076 > 1. This is true of all current Neon opcodes, I think, but may not be
15077 true in the future. */
15078 if (!inst.operands[1].present)
15079 inst.operands[1] = inst.operands[0];
15080
15081 va_start (ap, shape);
15082
15083 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
15084 {
15085 unsigned j;
15086 int matches = 1;
15087
15088 for (j = 0; j < neon_shape_tab[shape].els; j++)
15089 {
15090 if (!inst.operands[j].present)
15091 {
15092 matches = 0;
15093 break;
15094 }
15095
15096 switch (neon_shape_tab[shape].el[j])
15097 {
15098 /* If a .f16, .16, .u16, .s16 type specifier is given over
15099 a VFP single precision register operand, it's essentially
15100 means only half of the register is used.
15101
15102 If the type specifier is given after the mnemonics, the
15103 information is stored in inst.vectype. If the type specifier
15104 is given after register operand, the information is stored
15105 in inst.operands[].vectype.
15106
15107 When there is only one type specifier, and all the register
15108 operands are the same type of hardware register, the type
15109 specifier applies to all register operands.
15110
15111 If no type specifier is given, the shape is inferred from
15112 operand information.
15113
15114 for example:
15115 vadd.f16 s0, s1, s2: NS_HHH
15116 vabs.f16 s0, s1: NS_HH
15117 vmov.f16 s0, r1: NS_HR
15118 vmov.f16 r0, s1: NS_RH
15119 vcvt.f16 r0, s1: NS_RH
15120 vcvt.f16.s32 s2, s2, #29: NS_HFI
15121 vcvt.f16.s32 s2, s2: NS_HF
15122 */
15123 case SE_H:
15124 if (!(inst.operands[j].isreg
15125 && inst.operands[j].isvec
15126 && inst.operands[j].issingle
15127 && !inst.operands[j].isquad
15128 && ((inst.vectype.elems == 1
15129 && inst.vectype.el[0].size == 16)
15130 || (inst.vectype.elems > 1
15131 && inst.vectype.el[j].size == 16)
15132 || (inst.vectype.elems == 0
15133 && inst.operands[j].vectype.type != NT_invtype
15134 && inst.operands[j].vectype.size == 16))))
15135 matches = 0;
15136 break;
15137
15138 case SE_F:
15139 if (!(inst.operands[j].isreg
15140 && inst.operands[j].isvec
15141 && inst.operands[j].issingle
15142 && !inst.operands[j].isquad
15143 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
15144 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
15145 || (inst.vectype.elems == 0
15146 && (inst.operands[j].vectype.size == 32
15147 || inst.operands[j].vectype.type == NT_invtype)))))
15148 matches = 0;
15149 break;
15150
15151 case SE_D:
15152 if (!(inst.operands[j].isreg
15153 && inst.operands[j].isvec
15154 && !inst.operands[j].isquad
15155 && !inst.operands[j].issingle))
15156 matches = 0;
15157 break;
15158
15159 case SE_R:
15160 if (!(inst.operands[j].isreg
15161 && !inst.operands[j].isvec))
15162 matches = 0;
15163 break;
15164
15165 case SE_Q:
15166 if (!(inst.operands[j].isreg
15167 && inst.operands[j].isvec
15168 && inst.operands[j].isquad
15169 && !inst.operands[j].issingle))
15170 matches = 0;
15171 break;
15172
15173 case SE_I:
15174 if (!(!inst.operands[j].isreg
15175 && !inst.operands[j].isscalar))
15176 matches = 0;
15177 break;
15178
15179 case SE_S:
15180 if (!(!inst.operands[j].isreg
15181 && inst.operands[j].isscalar))
15182 matches = 0;
15183 break;
15184
15185 case SE_P:
15186 case SE_L:
15187 break;
15188 }
15189 if (!matches)
15190 break;
15191 }
15192 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
15193 /* We've matched all the entries in the shape table, and we don't
15194 have any left over operands which have not been matched. */
15195 break;
15196 }
15197
15198 va_end (ap);
15199
15200 if (shape == NS_NULL && first_shape != NS_NULL)
15201 first_error (_("invalid instruction shape"));
15202
15203 return shape;
15204 }
15205
15206 /* True if SHAPE is predominantly a quadword operation (most of the time, this
15207 means the Q bit should be set). */
15208
15209 static int
15210 neon_quad (enum neon_shape shape)
15211 {
15212 return neon_shape_class[shape] == SC_QUAD;
15213 }
15214
15215 static void
15216 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
15217 unsigned *g_size)
15218 {
15219 /* Allow modification to be made to types which are constrained to be
15220 based on the key element, based on bits set alongside N_EQK. */
15221 if ((typebits & N_EQK) != 0)
15222 {
15223 if ((typebits & N_HLF) != 0)
15224 *g_size /= 2;
15225 else if ((typebits & N_DBL) != 0)
15226 *g_size *= 2;
15227 if ((typebits & N_SGN) != 0)
15228 *g_type = NT_signed;
15229 else if ((typebits & N_UNS) != 0)
15230 *g_type = NT_unsigned;
15231 else if ((typebits & N_INT) != 0)
15232 *g_type = NT_integer;
15233 else if ((typebits & N_FLT) != 0)
15234 *g_type = NT_float;
15235 else if ((typebits & N_SIZ) != 0)
15236 *g_type = NT_untyped;
15237 }
15238 }
15239
15240 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
15241 operand type, i.e. the single type specified in a Neon instruction when it
15242 is the only one given. */
15243
15244 static struct neon_type_el
15245 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
15246 {
15247 struct neon_type_el dest = *key;
15248
15249 gas_assert ((thisarg & N_EQK) != 0);
15250
15251 neon_modify_type_size (thisarg, &dest.type, &dest.size);
15252
15253 return dest;
15254 }
15255
15256 /* Convert Neon type and size into compact bitmask representation. */
15257
15258 static enum neon_type_mask
15259 type_chk_of_el_type (enum neon_el_type type, unsigned size)
15260 {
15261 switch (type)
15262 {
15263 case NT_untyped:
15264 switch (size)
15265 {
15266 case 8: return N_8;
15267 case 16: return N_16;
15268 case 32: return N_32;
15269 case 64: return N_64;
15270 default: ;
15271 }
15272 break;
15273
15274 case NT_integer:
15275 switch (size)
15276 {
15277 case 8: return N_I8;
15278 case 16: return N_I16;
15279 case 32: return N_I32;
15280 case 64: return N_I64;
15281 default: ;
15282 }
15283 break;
15284
15285 case NT_float:
15286 switch (size)
15287 {
15288 case 16: return N_F16;
15289 case 32: return N_F32;
15290 case 64: return N_F64;
15291 default: ;
15292 }
15293 break;
15294
15295 case NT_poly:
15296 switch (size)
15297 {
15298 case 8: return N_P8;
15299 case 16: return N_P16;
15300 case 64: return N_P64;
15301 default: ;
15302 }
15303 break;
15304
15305 case NT_signed:
15306 switch (size)
15307 {
15308 case 8: return N_S8;
15309 case 16: return N_S16;
15310 case 32: return N_S32;
15311 case 64: return N_S64;
15312 default: ;
15313 }
15314 break;
15315
15316 case NT_unsigned:
15317 switch (size)
15318 {
15319 case 8: return N_U8;
15320 case 16: return N_U16;
15321 case 32: return N_U32;
15322 case 64: return N_U64;
15323 default: ;
15324 }
15325 break;
15326
15327 case NT_bfloat:
15328 if (size == 16) return N_BF16;
15329 break;
15330
15331 default: ;
15332 }
15333
15334 return N_UTYP;
15335 }
15336
15337 /* Convert compact Neon bitmask type representation to a type and size. Only
15338 handles the case where a single bit is set in the mask. */
15339
15340 static int
15341 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
15342 enum neon_type_mask mask)
15343 {
15344 if ((mask & N_EQK) != 0)
15345 return FAIL;
15346
15347 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
15348 *size = 8;
15349 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16 | N_BF16))
15350 != 0)
15351 *size = 16;
15352 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
15353 *size = 32;
15354 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
15355 *size = 64;
15356 else
15357 return FAIL;
15358
15359 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
15360 *type = NT_signed;
15361 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
15362 *type = NT_unsigned;
15363 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
15364 *type = NT_integer;
15365 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
15366 *type = NT_untyped;
15367 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
15368 *type = NT_poly;
15369 else if ((mask & (N_F_ALL)) != 0)
15370 *type = NT_float;
15371 else if ((mask & (N_BF16)) != 0)
15372 *type = NT_bfloat;
15373 else
15374 return FAIL;
15375
15376 return SUCCESS;
15377 }
15378
15379 /* Modify a bitmask of allowed types. This is only needed for type
15380 relaxation. */
15381
15382 static unsigned
15383 modify_types_allowed (unsigned allowed, unsigned mods)
15384 {
15385 unsigned size;
15386 enum neon_el_type type;
15387 unsigned destmask;
15388 int i;
15389
15390 destmask = 0;
15391
15392 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
15393 {
15394 if (el_type_of_type_chk (&type, &size,
15395 (enum neon_type_mask) (allowed & i)) == SUCCESS)
15396 {
15397 neon_modify_type_size (mods, &type, &size);
15398 destmask |= type_chk_of_el_type (type, size);
15399 }
15400 }
15401
15402 return destmask;
15403 }
15404
15405 /* Check type and return type classification.
15406 The manual states (paraphrase): If one datatype is given, it indicates the
15407 type given in:
15408 - the second operand, if there is one
15409 - the operand, if there is no second operand
15410 - the result, if there are no operands.
15411 This isn't quite good enough though, so we use a concept of a "key" datatype
15412 which is set on a per-instruction basis, which is the one which matters when
15413 only one data type is written.
15414 Note: this function has side-effects (e.g. filling in missing operands). All
15415 Neon instructions should call it before performing bit encoding. */
15416
15417 static struct neon_type_el
15418 neon_check_type (unsigned els, enum neon_shape ns, ...)
15419 {
15420 va_list ap;
15421 unsigned i, pass, key_el = 0;
15422 unsigned types[NEON_MAX_TYPE_ELS];
15423 enum neon_el_type k_type = NT_invtype;
15424 unsigned k_size = -1u;
15425 struct neon_type_el badtype = {NT_invtype, -1};
15426 unsigned key_allowed = 0;
15427
15428 /* Optional registers in Neon instructions are always (not) in operand 1.
15429 Fill in the missing operand here, if it was omitted. */
15430 if (els > 1 && !inst.operands[1].present)
15431 inst.operands[1] = inst.operands[0];
15432
15433 /* Suck up all the varargs. */
15434 va_start (ap, ns);
15435 for (i = 0; i < els; i++)
15436 {
15437 unsigned thisarg = va_arg (ap, unsigned);
15438 if (thisarg == N_IGNORE_TYPE)
15439 {
15440 va_end (ap);
15441 return badtype;
15442 }
15443 types[i] = thisarg;
15444 if ((thisarg & N_KEY) != 0)
15445 key_el = i;
15446 }
15447 va_end (ap);
15448
15449 if (inst.vectype.elems > 0)
15450 for (i = 0; i < els; i++)
15451 if (inst.operands[i].vectype.type != NT_invtype)
15452 {
15453 first_error (_("types specified in both the mnemonic and operands"));
15454 return badtype;
15455 }
15456
15457 /* Duplicate inst.vectype elements here as necessary.
15458 FIXME: No idea if this is exactly the same as the ARM assembler,
15459 particularly when an insn takes one register and one non-register
15460 operand. */
15461 if (inst.vectype.elems == 1 && els > 1)
15462 {
15463 unsigned j;
15464 inst.vectype.elems = els;
15465 inst.vectype.el[key_el] = inst.vectype.el[0];
15466 for (j = 0; j < els; j++)
15467 if (j != key_el)
15468 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
15469 types[j]);
15470 }
15471 else if (inst.vectype.elems == 0 && els > 0)
15472 {
15473 unsigned j;
15474 /* No types were given after the mnemonic, so look for types specified
15475 after each operand. We allow some flexibility here; as long as the
15476 "key" operand has a type, we can infer the others. */
15477 for (j = 0; j < els; j++)
15478 if (inst.operands[j].vectype.type != NT_invtype)
15479 inst.vectype.el[j] = inst.operands[j].vectype;
15480
15481 if (inst.operands[key_el].vectype.type != NT_invtype)
15482 {
15483 for (j = 0; j < els; j++)
15484 if (inst.operands[j].vectype.type == NT_invtype)
15485 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
15486 types[j]);
15487 }
15488 else
15489 {
15490 first_error (_("operand types can't be inferred"));
15491 return badtype;
15492 }
15493 }
15494 else if (inst.vectype.elems != els)
15495 {
15496 first_error (_("type specifier has the wrong number of parts"));
15497 return badtype;
15498 }
15499
15500 for (pass = 0; pass < 2; pass++)
15501 {
15502 for (i = 0; i < els; i++)
15503 {
15504 unsigned thisarg = types[i];
15505 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
15506 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
15507 enum neon_el_type g_type = inst.vectype.el[i].type;
15508 unsigned g_size = inst.vectype.el[i].size;
15509
15510 /* Decay more-specific signed & unsigned types to sign-insensitive
15511 integer types if sign-specific variants are unavailable. */
15512 if ((g_type == NT_signed || g_type == NT_unsigned)
15513 && (types_allowed & N_SU_ALL) == 0)
15514 g_type = NT_integer;
15515
15516 /* If only untyped args are allowed, decay any more specific types to
15517 them. Some instructions only care about signs for some element
15518 sizes, so handle that properly. */
15519 if (((types_allowed & N_UNT) == 0)
15520 && ((g_size == 8 && (types_allowed & N_8) != 0)
15521 || (g_size == 16 && (types_allowed & N_16) != 0)
15522 || (g_size == 32 && (types_allowed & N_32) != 0)
15523 || (g_size == 64 && (types_allowed & N_64) != 0)))
15524 g_type = NT_untyped;
15525
15526 if (pass == 0)
15527 {
15528 if ((thisarg & N_KEY) != 0)
15529 {
15530 k_type = g_type;
15531 k_size = g_size;
15532 key_allowed = thisarg & ~N_KEY;
15533
15534 /* Check architecture constraint on FP16 extension. */
15535 if (k_size == 16
15536 && k_type == NT_float
15537 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
15538 {
15539 inst.error = _(BAD_FP16);
15540 return badtype;
15541 }
15542 }
15543 }
15544 else
15545 {
15546 if ((thisarg & N_VFP) != 0)
15547 {
15548 enum neon_shape_el regshape;
15549 unsigned regwidth, match;
15550
15551 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
15552 if (ns == NS_NULL)
15553 {
15554 first_error (_("invalid instruction shape"));
15555 return badtype;
15556 }
15557 regshape = neon_shape_tab[ns].el[i];
15558 regwidth = neon_shape_el_size[regshape];
15559
15560 /* In VFP mode, operands must match register widths. If we
15561 have a key operand, use its width, else use the width of
15562 the current operand. */
15563 if (k_size != -1u)
15564 match = k_size;
15565 else
15566 match = g_size;
15567
15568 /* FP16 will use a single precision register. */
15569 if (regwidth == 32 && match == 16)
15570 {
15571 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
15572 match = regwidth;
15573 else
15574 {
15575 inst.error = _(BAD_FP16);
15576 return badtype;
15577 }
15578 }
15579
15580 if (regwidth != match)
15581 {
15582 first_error (_("operand size must match register width"));
15583 return badtype;
15584 }
15585 }
15586
15587 if ((thisarg & N_EQK) == 0)
15588 {
15589 unsigned given_type = type_chk_of_el_type (g_type, g_size);
15590
15591 if ((given_type & types_allowed) == 0)
15592 {
15593 first_error (BAD_SIMD_TYPE);
15594 return badtype;
15595 }
15596 }
15597 else
15598 {
15599 enum neon_el_type mod_k_type = k_type;
15600 unsigned mod_k_size = k_size;
15601 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
15602 if (g_type != mod_k_type || g_size != mod_k_size)
15603 {
15604 first_error (_("inconsistent types in Neon instruction"));
15605 return badtype;
15606 }
15607 }
15608 }
15609 }
15610 }
15611
15612 return inst.vectype.el[key_el];
15613 }
15614
15615 /* Neon-style VFP instruction forwarding. */
15616
15617 /* Thumb VFP instructions have 0xE in the condition field. */
15618
15619 static void
15620 do_vfp_cond_or_thumb (void)
15621 {
15622 inst.is_neon = 1;
15623
15624 if (thumb_mode)
15625 inst.instruction |= 0xe0000000;
15626 else
15627 inst.instruction |= inst.cond << 28;
15628 }
15629
15630 /* Look up and encode a simple mnemonic, for use as a helper function for the
15631 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
15632 etc. It is assumed that operand parsing has already been done, and that the
15633 operands are in the form expected by the given opcode (this isn't necessarily
15634 the same as the form in which they were parsed, hence some massaging must
15635 take place before this function is called).
15636 Checks current arch version against that in the looked-up opcode. */
15637
15638 static void
15639 do_vfp_nsyn_opcode (const char *opname)
15640 {
15641 const struct asm_opcode *opcode;
15642
15643 opcode = (const struct asm_opcode *) str_hash_find (arm_ops_hsh, opname);
15644
15645 if (!opcode)
15646 abort ();
15647
15648 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
15649 thumb_mode ? *opcode->tvariant : *opcode->avariant),
15650 _(BAD_FPU));
15651
15652 inst.is_neon = 1;
15653
15654 if (thumb_mode)
15655 {
15656 inst.instruction = opcode->tvalue;
15657 opcode->tencode ();
15658 }
15659 else
15660 {
15661 inst.instruction = (inst.cond << 28) | opcode->avalue;
15662 opcode->aencode ();
15663 }
15664 }
15665
15666 static void
15667 do_vfp_nsyn_add_sub (enum neon_shape rs)
15668 {
15669 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
15670
15671 if (rs == NS_FFF || rs == NS_HHH)
15672 {
15673 if (is_add)
15674 do_vfp_nsyn_opcode ("fadds");
15675 else
15676 do_vfp_nsyn_opcode ("fsubs");
15677
15678 /* ARMv8.2 fp16 instruction. */
15679 if (rs == NS_HHH)
15680 do_scalar_fp16_v82_encode ();
15681 }
15682 else
15683 {
15684 if (is_add)
15685 do_vfp_nsyn_opcode ("faddd");
15686 else
15687 do_vfp_nsyn_opcode ("fsubd");
15688 }
15689 }
15690
15691 /* Check operand types to see if this is a VFP instruction, and if so call
15692 PFN (). */
15693
15694 static int
15695 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
15696 {
15697 enum neon_shape rs;
15698 struct neon_type_el et;
15699
15700 switch (args)
15701 {
15702 case 2:
15703 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15704 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15705 break;
15706
15707 case 3:
15708 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15709 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15710 N_F_ALL | N_KEY | N_VFP);
15711 break;
15712
15713 default:
15714 abort ();
15715 }
15716
15717 if (et.type != NT_invtype)
15718 {
15719 pfn (rs);
15720 return SUCCESS;
15721 }
15722
15723 inst.error = NULL;
15724 return FAIL;
15725 }
15726
15727 static void
15728 do_vfp_nsyn_mla_mls (enum neon_shape rs)
15729 {
15730 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
15731
15732 if (rs == NS_FFF || rs == NS_HHH)
15733 {
15734 if (is_mla)
15735 do_vfp_nsyn_opcode ("fmacs");
15736 else
15737 do_vfp_nsyn_opcode ("fnmacs");
15738
15739 /* ARMv8.2 fp16 instruction. */
15740 if (rs == NS_HHH)
15741 do_scalar_fp16_v82_encode ();
15742 }
15743 else
15744 {
15745 if (is_mla)
15746 do_vfp_nsyn_opcode ("fmacd");
15747 else
15748 do_vfp_nsyn_opcode ("fnmacd");
15749 }
15750 }
15751
15752 static void
15753 do_vfp_nsyn_fma_fms (enum neon_shape rs)
15754 {
15755 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
15756
15757 if (rs == NS_FFF || rs == NS_HHH)
15758 {
15759 if (is_fma)
15760 do_vfp_nsyn_opcode ("ffmas");
15761 else
15762 do_vfp_nsyn_opcode ("ffnmas");
15763
15764 /* ARMv8.2 fp16 instruction. */
15765 if (rs == NS_HHH)
15766 do_scalar_fp16_v82_encode ();
15767 }
15768 else
15769 {
15770 if (is_fma)
15771 do_vfp_nsyn_opcode ("ffmad");
15772 else
15773 do_vfp_nsyn_opcode ("ffnmad");
15774 }
15775 }
15776
15777 static void
15778 do_vfp_nsyn_mul (enum neon_shape rs)
15779 {
15780 if (rs == NS_FFF || rs == NS_HHH)
15781 {
15782 do_vfp_nsyn_opcode ("fmuls");
15783
15784 /* ARMv8.2 fp16 instruction. */
15785 if (rs == NS_HHH)
15786 do_scalar_fp16_v82_encode ();
15787 }
15788 else
15789 do_vfp_nsyn_opcode ("fmuld");
15790 }
15791
15792 static void
15793 do_vfp_nsyn_abs_neg (enum neon_shape rs)
15794 {
15795 int is_neg = (inst.instruction & 0x80) != 0;
15796 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
15797
15798 if (rs == NS_FF || rs == NS_HH)
15799 {
15800 if (is_neg)
15801 do_vfp_nsyn_opcode ("fnegs");
15802 else
15803 do_vfp_nsyn_opcode ("fabss");
15804
15805 /* ARMv8.2 fp16 instruction. */
15806 if (rs == NS_HH)
15807 do_scalar_fp16_v82_encode ();
15808 }
15809 else
15810 {
15811 if (is_neg)
15812 do_vfp_nsyn_opcode ("fnegd");
15813 else
15814 do_vfp_nsyn_opcode ("fabsd");
15815 }
15816 }
15817
15818 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
15819 insns belong to Neon, and are handled elsewhere. */
15820
15821 static void
15822 do_vfp_nsyn_ldm_stm (int is_dbmode)
15823 {
15824 int is_ldm = (inst.instruction & (1 << 20)) != 0;
15825 if (is_ldm)
15826 {
15827 if (is_dbmode)
15828 do_vfp_nsyn_opcode ("fldmdbs");
15829 else
15830 do_vfp_nsyn_opcode ("fldmias");
15831 }
15832 else
15833 {
15834 if (is_dbmode)
15835 do_vfp_nsyn_opcode ("fstmdbs");
15836 else
15837 do_vfp_nsyn_opcode ("fstmias");
15838 }
15839 }
15840
15841 static void
15842 do_vfp_nsyn_sqrt (void)
15843 {
15844 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15845 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15846
15847 if (rs == NS_FF || rs == NS_HH)
15848 {
15849 do_vfp_nsyn_opcode ("fsqrts");
15850
15851 /* ARMv8.2 fp16 instruction. */
15852 if (rs == NS_HH)
15853 do_scalar_fp16_v82_encode ();
15854 }
15855 else
15856 do_vfp_nsyn_opcode ("fsqrtd");
15857 }
15858
15859 static void
15860 do_vfp_nsyn_div (void)
15861 {
15862 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15863 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15864 N_F_ALL | N_KEY | N_VFP);
15865
15866 if (rs == NS_FFF || rs == NS_HHH)
15867 {
15868 do_vfp_nsyn_opcode ("fdivs");
15869
15870 /* ARMv8.2 fp16 instruction. */
15871 if (rs == NS_HHH)
15872 do_scalar_fp16_v82_encode ();
15873 }
15874 else
15875 do_vfp_nsyn_opcode ("fdivd");
15876 }
15877
15878 static void
15879 do_vfp_nsyn_nmul (void)
15880 {
15881 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15882 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15883 N_F_ALL | N_KEY | N_VFP);
15884
15885 if (rs == NS_FFF || rs == NS_HHH)
15886 {
15887 NEON_ENCODE (SINGLE, inst);
15888 do_vfp_sp_dyadic ();
15889
15890 /* ARMv8.2 fp16 instruction. */
15891 if (rs == NS_HHH)
15892 do_scalar_fp16_v82_encode ();
15893 }
15894 else
15895 {
15896 NEON_ENCODE (DOUBLE, inst);
15897 do_vfp_dp_rd_rn_rm ();
15898 }
15899 do_vfp_cond_or_thumb ();
15900
15901 }
15902
15903 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15904 (0, 1, 2, 3). */
15905
15906 static unsigned
15907 neon_logbits (unsigned x)
15908 {
15909 return ffs (x) - 4;
15910 }
15911
15912 #define LOW4(R) ((R) & 0xf)
15913 #define HI1(R) (((R) >> 4) & 1)
15914 #define LOW1(R) ((R) & 0x1)
15915 #define HI4(R) (((R) >> 1) & 0xf)
15916
15917 static unsigned
15918 mve_get_vcmp_vpt_cond (struct neon_type_el et)
15919 {
15920 switch (et.type)
15921 {
15922 default:
15923 first_error (BAD_EL_TYPE);
15924 return 0;
15925 case NT_float:
15926 switch (inst.operands[0].imm)
15927 {
15928 default:
15929 first_error (_("invalid condition"));
15930 return 0;
15931 case 0x0:
15932 /* eq. */
15933 return 0;
15934 case 0x1:
15935 /* ne. */
15936 return 1;
15937 case 0xa:
15938 /* ge/ */
15939 return 4;
15940 case 0xb:
15941 /* lt. */
15942 return 5;
15943 case 0xc:
15944 /* gt. */
15945 return 6;
15946 case 0xd:
15947 /* le. */
15948 return 7;
15949 }
15950 case NT_integer:
15951 /* only accept eq and ne. */
15952 if (inst.operands[0].imm > 1)
15953 {
15954 first_error (_("invalid condition"));
15955 return 0;
15956 }
15957 return inst.operands[0].imm;
15958 case NT_unsigned:
15959 if (inst.operands[0].imm == 0x2)
15960 return 2;
15961 else if (inst.operands[0].imm == 0x8)
15962 return 3;
15963 else
15964 {
15965 first_error (_("invalid condition"));
15966 return 0;
15967 }
15968 case NT_signed:
15969 switch (inst.operands[0].imm)
15970 {
15971 default:
15972 first_error (_("invalid condition"));
15973 return 0;
15974 case 0xa:
15975 /* ge. */
15976 return 4;
15977 case 0xb:
15978 /* lt. */
15979 return 5;
15980 case 0xc:
15981 /* gt. */
15982 return 6;
15983 case 0xd:
15984 /* le. */
15985 return 7;
15986 }
15987 }
15988 /* Should be unreachable. */
15989 abort ();
15990 }
15991
15992 /* For VCTP (create vector tail predicate) in MVE. */
15993 static void
15994 do_mve_vctp (void)
15995 {
15996 int dt = 0;
15997 unsigned size = 0x0;
15998
15999 if (inst.cond > COND_ALWAYS)
16000 inst.pred_insn_type = INSIDE_VPT_INSN;
16001 else
16002 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16003
16004 /* This is a typical MVE instruction which has no type but have size 8, 16,
16005 32 and 64. For instructions with no type, inst.vectype.el[j].type is set
16006 to NT_untyped and size is updated in inst.vectype.el[j].size. */
16007 if ((inst.operands[0].present) && (inst.vectype.el[0].type == NT_untyped))
16008 dt = inst.vectype.el[0].size;
16009
16010 /* Setting this does not indicate an actual NEON instruction, but only
16011 indicates that the mnemonic accepts neon-style type suffixes. */
16012 inst.is_neon = 1;
16013
16014 switch (dt)
16015 {
16016 case 8:
16017 break;
16018 case 16:
16019 size = 0x1; break;
16020 case 32:
16021 size = 0x2; break;
16022 case 64:
16023 size = 0x3; break;
16024 default:
16025 first_error (_("Type is not allowed for this instruction"));
16026 }
16027 inst.instruction |= size << 20;
16028 inst.instruction |= inst.operands[0].reg << 16;
16029 }
16030
16031 static void
16032 do_mve_vpt (void)
16033 {
16034 /* We are dealing with a vector predicated block. */
16035 if (inst.operands[0].present)
16036 {
16037 enum neon_shape rs = neon_select_shape (NS_IQQ, NS_IQR, NS_NULL);
16038 struct neon_type_el et
16039 = neon_check_type (3, rs, N_EQK, N_KEY | N_F_MVE | N_I_MVE | N_SU_32,
16040 N_EQK);
16041
16042 unsigned fcond = mve_get_vcmp_vpt_cond (et);
16043
16044 constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
16045
16046 if (et.type == NT_invtype)
16047 return;
16048
16049 if (et.type == NT_float)
16050 {
16051 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
16052 BAD_FPU);
16053 constraint (et.size != 16 && et.size != 32, BAD_EL_TYPE);
16054 inst.instruction |= (et.size == 16) << 28;
16055 inst.instruction |= 0x3 << 20;
16056 }
16057 else
16058 {
16059 constraint (et.size != 8 && et.size != 16 && et.size != 32,
16060 BAD_EL_TYPE);
16061 inst.instruction |= 1 << 28;
16062 inst.instruction |= neon_logbits (et.size) << 20;
16063 }
16064
16065 if (inst.operands[2].isquad)
16066 {
16067 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16068 inst.instruction |= LOW4 (inst.operands[2].reg);
16069 inst.instruction |= (fcond & 0x2) >> 1;
16070 }
16071 else
16072 {
16073 if (inst.operands[2].reg == REG_SP)
16074 as_tsktsk (MVE_BAD_SP);
16075 inst.instruction |= 1 << 6;
16076 inst.instruction |= (fcond & 0x2) << 4;
16077 inst.instruction |= inst.operands[2].reg;
16078 }
16079 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16080 inst.instruction |= (fcond & 0x4) << 10;
16081 inst.instruction |= (fcond & 0x1) << 7;
16082
16083 }
16084 set_pred_insn_type (VPT_INSN);
16085 now_pred.cc = 0;
16086 now_pred.mask = ((inst.instruction & 0x00400000) >> 19)
16087 | ((inst.instruction & 0xe000) >> 13);
16088 now_pred.warn_deprecated = false;
16089 now_pred.type = VECTOR_PRED;
16090 inst.is_neon = 1;
16091 }
16092
16093 static void
16094 do_mve_vcmp (void)
16095 {
16096 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
16097 if (!inst.operands[1].isreg || !inst.operands[1].isquad)
16098 first_error (_(reg_expected_msgs[REG_TYPE_MQ]));
16099 if (!inst.operands[2].present)
16100 first_error (_("MVE vector or ARM register expected"));
16101 constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
16102
16103 /* Deal with 'else' conditional MVE's vcmp, it will be parsed as vcmpe. */
16104 if ((inst.instruction & 0xffffffff) == N_MNEM_vcmpe
16105 && inst.operands[1].isquad)
16106 {
16107 inst.instruction = N_MNEM_vcmp;
16108 inst.cond = 0x10;
16109 }
16110
16111 if (inst.cond > COND_ALWAYS)
16112 inst.pred_insn_type = INSIDE_VPT_INSN;
16113 else
16114 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16115
16116 enum neon_shape rs = neon_select_shape (NS_IQQ, NS_IQR, NS_NULL);
16117 struct neon_type_el et
16118 = neon_check_type (3, rs, N_EQK, N_KEY | N_F_MVE | N_I_MVE | N_SU_32,
16119 N_EQK);
16120
16121 constraint (rs == NS_IQR && inst.operands[2].reg == REG_PC
16122 && !inst.operands[2].iszr, BAD_PC);
16123
16124 unsigned fcond = mve_get_vcmp_vpt_cond (et);
16125
16126 inst.instruction = 0xee010f00;
16127 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16128 inst.instruction |= (fcond & 0x4) << 10;
16129 inst.instruction |= (fcond & 0x1) << 7;
16130 if (et.type == NT_float)
16131 {
16132 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
16133 BAD_FPU);
16134 inst.instruction |= (et.size == 16) << 28;
16135 inst.instruction |= 0x3 << 20;
16136 }
16137 else
16138 {
16139 inst.instruction |= 1 << 28;
16140 inst.instruction |= neon_logbits (et.size) << 20;
16141 }
16142 if (inst.operands[2].isquad)
16143 {
16144 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16145 inst.instruction |= (fcond & 0x2) >> 1;
16146 inst.instruction |= LOW4 (inst.operands[2].reg);
16147 }
16148 else
16149 {
16150 if (inst.operands[2].reg == REG_SP)
16151 as_tsktsk (MVE_BAD_SP);
16152 inst.instruction |= 1 << 6;
16153 inst.instruction |= (fcond & 0x2) << 4;
16154 inst.instruction |= inst.operands[2].reg;
16155 }
16156
16157 inst.is_neon = 1;
16158 return;
16159 }
16160
16161 static void
16162 do_mve_vmaxa_vmina (void)
16163 {
16164 if (inst.cond > COND_ALWAYS)
16165 inst.pred_insn_type = INSIDE_VPT_INSN;
16166 else
16167 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16168
16169 enum neon_shape rs = neon_select_shape (NS_QQ, NS_NULL);
16170 struct neon_type_el et
16171 = neon_check_type (2, rs, N_EQK, N_KEY | N_S8 | N_S16 | N_S32);
16172
16173 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16174 inst.instruction |= neon_logbits (et.size) << 18;
16175 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16176 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16177 inst.instruction |= LOW4 (inst.operands[1].reg);
16178 inst.is_neon = 1;
16179 }
16180
16181 static void
16182 do_mve_vfmas (void)
16183 {
16184 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
16185 struct neon_type_el et
16186 = neon_check_type (3, rs, N_F_MVE | N_KEY, N_EQK, N_EQK);
16187
16188 if (inst.cond > COND_ALWAYS)
16189 inst.pred_insn_type = INSIDE_VPT_INSN;
16190 else
16191 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16192
16193 if (inst.operands[2].reg == REG_SP)
16194 as_tsktsk (MVE_BAD_SP);
16195 else if (inst.operands[2].reg == REG_PC)
16196 as_tsktsk (MVE_BAD_PC);
16197
16198 inst.instruction |= (et.size == 16) << 28;
16199 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16200 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16201 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16202 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16203 inst.instruction |= inst.operands[2].reg;
16204 inst.is_neon = 1;
16205 }
16206
16207 static void
16208 do_mve_viddup (void)
16209 {
16210 if (inst.cond > COND_ALWAYS)
16211 inst.pred_insn_type = INSIDE_VPT_INSN;
16212 else
16213 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16214
16215 unsigned imm = inst.relocs[0].exp.X_add_number;
16216 constraint (imm != 1 && imm != 2 && imm != 4 && imm != 8,
16217 _("immediate must be either 1, 2, 4 or 8"));
16218
16219 enum neon_shape rs;
16220 struct neon_type_el et;
16221 unsigned Rm;
16222 if (inst.instruction == M_MNEM_vddup || inst.instruction == M_MNEM_vidup)
16223 {
16224 rs = neon_select_shape (NS_QRI, NS_NULL);
16225 et = neon_check_type (2, rs, N_KEY | N_U8 | N_U16 | N_U32, N_EQK);
16226 Rm = 7;
16227 }
16228 else
16229 {
16230 constraint ((inst.operands[2].reg % 2) != 1, BAD_EVEN);
16231 if (inst.operands[2].reg == REG_SP)
16232 as_tsktsk (MVE_BAD_SP);
16233 else if (inst.operands[2].reg == REG_PC)
16234 first_error (BAD_PC);
16235
16236 rs = neon_select_shape (NS_QRRI, NS_NULL);
16237 et = neon_check_type (3, rs, N_KEY | N_U8 | N_U16 | N_U32, N_EQK, N_EQK);
16238 Rm = inst.operands[2].reg >> 1;
16239 }
16240 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16241 inst.instruction |= neon_logbits (et.size) << 20;
16242 inst.instruction |= inst.operands[1].reg << 16;
16243 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16244 inst.instruction |= (imm > 2) << 7;
16245 inst.instruction |= Rm << 1;
16246 inst.instruction |= (imm == 2 || imm == 8);
16247 inst.is_neon = 1;
16248 }
16249
16250 static void
16251 do_mve_vmlas (void)
16252 {
16253 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
16254 struct neon_type_el et
16255 = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
16256
16257 if (inst.operands[2].reg == REG_PC)
16258 as_tsktsk (MVE_BAD_PC);
16259 else if (inst.operands[2].reg == REG_SP)
16260 as_tsktsk (MVE_BAD_SP);
16261
16262 if (inst.cond > COND_ALWAYS)
16263 inst.pred_insn_type = INSIDE_VPT_INSN;
16264 else
16265 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16266
16267 inst.instruction |= (et.type == NT_unsigned) << 28;
16268 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16269 inst.instruction |= neon_logbits (et.size) << 20;
16270 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16271 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16272 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16273 inst.instruction |= inst.operands[2].reg;
16274 inst.is_neon = 1;
16275 }
16276
16277 static void
16278 do_mve_vshll (void)
16279 {
16280 struct neon_type_el et
16281 = neon_check_type (2, NS_QQI, N_EQK, N_S8 | N_U8 | N_S16 | N_U16 | N_KEY);
16282
16283 if (inst.cond > COND_ALWAYS)
16284 inst.pred_insn_type = INSIDE_VPT_INSN;
16285 else
16286 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16287
16288 int imm = inst.operands[2].imm;
16289 constraint (imm < 1 || (unsigned)imm > et.size,
16290 _("immediate value out of range"));
16291
16292 if ((unsigned)imm == et.size)
16293 {
16294 inst.instruction |= neon_logbits (et.size) << 18;
16295 inst.instruction |= 0x110001;
16296 }
16297 else
16298 {
16299 inst.instruction |= (et.size + imm) << 16;
16300 inst.instruction |= 0x800140;
16301 }
16302
16303 inst.instruction |= (et.type == NT_unsigned) << 28;
16304 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16305 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16306 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16307 inst.instruction |= LOW4 (inst.operands[1].reg);
16308 inst.is_neon = 1;
16309 }
16310
16311 static void
16312 do_mve_vshlc (void)
16313 {
16314 if (inst.cond > COND_ALWAYS)
16315 inst.pred_insn_type = INSIDE_VPT_INSN;
16316 else
16317 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16318
16319 if (inst.operands[1].reg == REG_PC)
16320 as_tsktsk (MVE_BAD_PC);
16321 else if (inst.operands[1].reg == REG_SP)
16322 as_tsktsk (MVE_BAD_SP);
16323
16324 int imm = inst.operands[2].imm;
16325 constraint (imm < 1 || imm > 32, _("immediate value out of range"));
16326
16327 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16328 inst.instruction |= (imm & 0x1f) << 16;
16329 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16330 inst.instruction |= inst.operands[1].reg;
16331 inst.is_neon = 1;
16332 }
16333
16334 static void
16335 do_mve_vshrn (void)
16336 {
16337 unsigned types;
16338 switch (inst.instruction)
16339 {
16340 case M_MNEM_vshrnt:
16341 case M_MNEM_vshrnb:
16342 case M_MNEM_vrshrnt:
16343 case M_MNEM_vrshrnb:
16344 types = N_I16 | N_I32;
16345 break;
16346 case M_MNEM_vqshrnt:
16347 case M_MNEM_vqshrnb:
16348 case M_MNEM_vqrshrnt:
16349 case M_MNEM_vqrshrnb:
16350 types = N_U16 | N_U32 | N_S16 | N_S32;
16351 break;
16352 case M_MNEM_vqshrunt:
16353 case M_MNEM_vqshrunb:
16354 case M_MNEM_vqrshrunt:
16355 case M_MNEM_vqrshrunb:
16356 types = N_S16 | N_S32;
16357 break;
16358 default:
16359 abort ();
16360 }
16361
16362 struct neon_type_el et = neon_check_type (2, NS_QQI, N_EQK, types | N_KEY);
16363
16364 if (inst.cond > COND_ALWAYS)
16365 inst.pred_insn_type = INSIDE_VPT_INSN;
16366 else
16367 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16368
16369 unsigned Qd = inst.operands[0].reg;
16370 unsigned Qm = inst.operands[1].reg;
16371 unsigned imm = inst.operands[2].imm;
16372 constraint (imm < 1 || ((unsigned) imm) > (et.size / 2),
16373 et.size == 16
16374 ? _("immediate operand expected in the range [1,8]")
16375 : _("immediate operand expected in the range [1,16]"));
16376
16377 inst.instruction |= (et.type == NT_unsigned) << 28;
16378 inst.instruction |= HI1 (Qd) << 22;
16379 inst.instruction |= (et.size - imm) << 16;
16380 inst.instruction |= LOW4 (Qd) << 12;
16381 inst.instruction |= HI1 (Qm) << 5;
16382 inst.instruction |= LOW4 (Qm);
16383 inst.is_neon = 1;
16384 }
16385
16386 static void
16387 do_mve_vqmovn (void)
16388 {
16389 struct neon_type_el et;
16390 if (inst.instruction == M_MNEM_vqmovnt
16391 || inst.instruction == M_MNEM_vqmovnb)
16392 et = neon_check_type (2, NS_QQ, N_EQK,
16393 N_U16 | N_U32 | N_S16 | N_S32 | N_KEY);
16394 else
16395 et = neon_check_type (2, NS_QQ, N_EQK, N_S16 | N_S32 | N_KEY);
16396
16397 if (inst.cond > COND_ALWAYS)
16398 inst.pred_insn_type = INSIDE_VPT_INSN;
16399 else
16400 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16401
16402 inst.instruction |= (et.type == NT_unsigned) << 28;
16403 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16404 inst.instruction |= (et.size == 32) << 18;
16405 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16406 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16407 inst.instruction |= LOW4 (inst.operands[1].reg);
16408 inst.is_neon = 1;
16409 }
16410
16411 static void
16412 do_mve_vpsel (void)
16413 {
16414 neon_select_shape (NS_QQQ, NS_NULL);
16415
16416 if (inst.cond > COND_ALWAYS)
16417 inst.pred_insn_type = INSIDE_VPT_INSN;
16418 else
16419 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16420
16421 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16422 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16423 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16424 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16425 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16426 inst.instruction |= LOW4 (inst.operands[2].reg);
16427 inst.is_neon = 1;
16428 }
16429
16430 static void
16431 do_mve_vpnot (void)
16432 {
16433 if (inst.cond > COND_ALWAYS)
16434 inst.pred_insn_type = INSIDE_VPT_INSN;
16435 else
16436 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16437 }
16438
16439 static void
16440 do_mve_vmaxnma_vminnma (void)
16441 {
16442 enum neon_shape rs = neon_select_shape (NS_QQ, NS_NULL);
16443 struct neon_type_el et
16444 = neon_check_type (2, rs, N_EQK, N_F_MVE | N_KEY);
16445
16446 if (inst.cond > COND_ALWAYS)
16447 inst.pred_insn_type = INSIDE_VPT_INSN;
16448 else
16449 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16450
16451 inst.instruction |= (et.size == 16) << 28;
16452 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16453 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16454 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16455 inst.instruction |= LOW4 (inst.operands[1].reg);
16456 inst.is_neon = 1;
16457 }
16458
16459 static void
16460 do_mve_vcmul (void)
16461 {
16462 enum neon_shape rs = neon_select_shape (NS_QQQI, NS_NULL);
16463 struct neon_type_el et
16464 = neon_check_type (3, rs, N_EQK, N_EQK, N_F_MVE | N_KEY);
16465
16466 if (inst.cond > COND_ALWAYS)
16467 inst.pred_insn_type = INSIDE_VPT_INSN;
16468 else
16469 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16470
16471 unsigned rot = inst.relocs[0].exp.X_add_number;
16472 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
16473 _("immediate out of range"));
16474
16475 if (et.size == 32 && (inst.operands[0].reg == inst.operands[1].reg
16476 || inst.operands[0].reg == inst.operands[2].reg))
16477 as_tsktsk (BAD_MVE_SRCDEST);
16478
16479 inst.instruction |= (et.size == 32) << 28;
16480 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16481 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16482 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16483 inst.instruction |= (rot > 90) << 12;
16484 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16485 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16486 inst.instruction |= LOW4 (inst.operands[2].reg);
16487 inst.instruction |= (rot == 90 || rot == 270);
16488 inst.is_neon = 1;
16489 }
16490
16491 /* To handle the Low Overhead Loop instructions
16492 in Armv8.1-M Mainline and MVE. */
16493 static void
16494 do_t_loloop (void)
16495 {
16496 unsigned long insn = inst.instruction;
16497
16498 inst.instruction = THUMB_OP32 (inst.instruction);
16499
16500 if (insn == T_MNEM_lctp)
16501 return;
16502
16503 set_pred_insn_type (MVE_OUTSIDE_PRED_INSN);
16504
16505 if (insn == T_MNEM_wlstp || insn == T_MNEM_dlstp)
16506 {
16507 struct neon_type_el et
16508 = neon_check_type (2, NS_RR, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16509 inst.instruction |= neon_logbits (et.size) << 20;
16510 inst.is_neon = 1;
16511 }
16512
16513 switch (insn)
16514 {
16515 case T_MNEM_letp:
16516 constraint (!inst.operands[0].present,
16517 _("expected LR"));
16518 /* fall through. */
16519 case T_MNEM_le:
16520 /* le <label>. */
16521 if (!inst.operands[0].present)
16522 inst.instruction |= 1 << 21;
16523
16524 v8_1_loop_reloc (true);
16525 break;
16526
16527 case T_MNEM_wls:
16528 case T_MNEM_wlstp:
16529 v8_1_loop_reloc (false);
16530 /* fall through. */
16531 case T_MNEM_dlstp:
16532 case T_MNEM_dls:
16533 constraint (inst.operands[1].isreg != 1, BAD_ARGS);
16534
16535 if (insn == T_MNEM_wlstp || insn == T_MNEM_dlstp)
16536 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16537 else if (inst.operands[1].reg == REG_PC)
16538 as_tsktsk (MVE_BAD_PC);
16539 if (inst.operands[1].reg == REG_SP)
16540 as_tsktsk (MVE_BAD_SP);
16541
16542 inst.instruction |= (inst.operands[1].reg << 16);
16543 break;
16544
16545 default:
16546 abort ();
16547 }
16548 }
16549
16550
16551 static void
16552 do_vfp_nsyn_cmp (void)
16553 {
16554 enum neon_shape rs;
16555 if (!inst.operands[0].isreg)
16556 {
16557 do_mve_vcmp ();
16558 return;
16559 }
16560 else
16561 {
16562 constraint (inst.operands[2].present, BAD_SYNTAX);
16563 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
16564 BAD_FPU);
16565 }
16566
16567 if (inst.operands[1].isreg)
16568 {
16569 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
16570 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
16571
16572 if (rs == NS_FF || rs == NS_HH)
16573 {
16574 NEON_ENCODE (SINGLE, inst);
16575 do_vfp_sp_monadic ();
16576 }
16577 else
16578 {
16579 NEON_ENCODE (DOUBLE, inst);
16580 do_vfp_dp_rd_rm ();
16581 }
16582 }
16583 else
16584 {
16585 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
16586 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
16587
16588 switch (inst.instruction & 0x0fffffff)
16589 {
16590 case N_MNEM_vcmp:
16591 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
16592 break;
16593 case N_MNEM_vcmpe:
16594 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
16595 break;
16596 default:
16597 abort ();
16598 }
16599
16600 if (rs == NS_FI || rs == NS_HI)
16601 {
16602 NEON_ENCODE (SINGLE, inst);
16603 do_vfp_sp_compare_z ();
16604 }
16605 else
16606 {
16607 NEON_ENCODE (DOUBLE, inst);
16608 do_vfp_dp_rd ();
16609 }
16610 }
16611 do_vfp_cond_or_thumb ();
16612
16613 /* ARMv8.2 fp16 instruction. */
16614 if (rs == NS_HI || rs == NS_HH)
16615 do_scalar_fp16_v82_encode ();
16616 }
16617
16618 static void
16619 nsyn_insert_sp (void)
16620 {
16621 inst.operands[1] = inst.operands[0];
16622 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
16623 inst.operands[0].reg = REG_SP;
16624 inst.operands[0].isreg = 1;
16625 inst.operands[0].writeback = 1;
16626 inst.operands[0].present = 1;
16627 }
16628
16629 /* Fix up Neon data-processing instructions, ORing in the correct bits for
16630 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
16631
16632 static void
16633 neon_dp_fixup (struct arm_it* insn)
16634 {
16635 unsigned int i = insn->instruction;
16636 insn->is_neon = 1;
16637
16638 if (thumb_mode)
16639 {
16640 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
16641 if (i & (1 << 24))
16642 i |= 1 << 28;
16643
16644 i &= ~(1 << 24);
16645
16646 i |= 0xef000000;
16647 }
16648 else
16649 i |= 0xf2000000;
16650
16651 insn->instruction = i;
16652 }
16653
16654 static void
16655 mve_encode_qqr (int size, int U, int fp)
16656 {
16657 if (inst.operands[2].reg == REG_SP)
16658 as_tsktsk (MVE_BAD_SP);
16659 else if (inst.operands[2].reg == REG_PC)
16660 as_tsktsk (MVE_BAD_PC);
16661
16662 if (fp)
16663 {
16664 /* vadd. */
16665 if (((unsigned)inst.instruction) == 0xd00)
16666 inst.instruction = 0xee300f40;
16667 /* vsub. */
16668 else if (((unsigned)inst.instruction) == 0x200d00)
16669 inst.instruction = 0xee301f40;
16670 /* vmul. */
16671 else if (((unsigned)inst.instruction) == 0x1000d10)
16672 inst.instruction = 0xee310e60;
16673
16674 /* Setting size which is 1 for F16 and 0 for F32. */
16675 inst.instruction |= (size == 16) << 28;
16676 }
16677 else
16678 {
16679 /* vadd. */
16680 if (((unsigned)inst.instruction) == 0x800)
16681 inst.instruction = 0xee010f40;
16682 /* vsub. */
16683 else if (((unsigned)inst.instruction) == 0x1000800)
16684 inst.instruction = 0xee011f40;
16685 /* vhadd. */
16686 else if (((unsigned)inst.instruction) == 0)
16687 inst.instruction = 0xee000f40;
16688 /* vhsub. */
16689 else if (((unsigned)inst.instruction) == 0x200)
16690 inst.instruction = 0xee001f40;
16691 /* vmla. */
16692 else if (((unsigned)inst.instruction) == 0x900)
16693 inst.instruction = 0xee010e40;
16694 /* vmul. */
16695 else if (((unsigned)inst.instruction) == 0x910)
16696 inst.instruction = 0xee011e60;
16697 /* vqadd. */
16698 else if (((unsigned)inst.instruction) == 0x10)
16699 inst.instruction = 0xee000f60;
16700 /* vqsub. */
16701 else if (((unsigned)inst.instruction) == 0x210)
16702 inst.instruction = 0xee001f60;
16703 /* vqrdmlah. */
16704 else if (((unsigned)inst.instruction) == 0x3000b10)
16705 inst.instruction = 0xee000e40;
16706 /* vqdmulh. */
16707 else if (((unsigned)inst.instruction) == 0x0000b00)
16708 inst.instruction = 0xee010e60;
16709 /* vqrdmulh. */
16710 else if (((unsigned)inst.instruction) == 0x1000b00)
16711 inst.instruction = 0xfe010e60;
16712
16713 /* Set U-bit. */
16714 inst.instruction |= U << 28;
16715
16716 /* Setting bits for size. */
16717 inst.instruction |= neon_logbits (size) << 20;
16718 }
16719 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16720 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16721 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16722 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16723 inst.instruction |= inst.operands[2].reg;
16724 inst.is_neon = 1;
16725 }
16726
16727 static void
16728 mve_encode_rqq (unsigned bit28, unsigned size)
16729 {
16730 inst.instruction |= bit28 << 28;
16731 inst.instruction |= neon_logbits (size) << 20;
16732 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16733 inst.instruction |= inst.operands[0].reg << 12;
16734 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16735 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16736 inst.instruction |= LOW4 (inst.operands[2].reg);
16737 inst.is_neon = 1;
16738 }
16739
16740 static void
16741 mve_encode_qqq (int ubit, int size)
16742 {
16743
16744 inst.instruction |= (ubit != 0) << 28;
16745 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16746 inst.instruction |= neon_logbits (size) << 20;
16747 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16748 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16749 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16750 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16751 inst.instruction |= LOW4 (inst.operands[2].reg);
16752
16753 inst.is_neon = 1;
16754 }
16755
16756 static void
16757 mve_encode_rq (unsigned bit28, unsigned size)
16758 {
16759 inst.instruction |= bit28 << 28;
16760 inst.instruction |= neon_logbits (size) << 18;
16761 inst.instruction |= inst.operands[0].reg << 12;
16762 inst.instruction |= LOW4 (inst.operands[1].reg);
16763 inst.is_neon = 1;
16764 }
16765
16766 static void
16767 mve_encode_rrqq (unsigned U, unsigned size)
16768 {
16769 constraint (inst.operands[3].reg > 14, MVE_BAD_QREG);
16770
16771 inst.instruction |= U << 28;
16772 inst.instruction |= (inst.operands[1].reg >> 1) << 20;
16773 inst.instruction |= LOW4 (inst.operands[2].reg) << 16;
16774 inst.instruction |= (size == 32) << 16;
16775 inst.instruction |= inst.operands[0].reg << 12;
16776 inst.instruction |= HI1 (inst.operands[2].reg) << 7;
16777 inst.instruction |= inst.operands[3].reg;
16778 inst.is_neon = 1;
16779 }
16780
16781 /* Helper function for neon_three_same handling the operands. */
16782 static void
16783 neon_three_args (int isquad)
16784 {
16785 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16786 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16787 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16788 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16789 inst.instruction |= LOW4 (inst.operands[2].reg);
16790 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16791 inst.instruction |= (isquad != 0) << 6;
16792 inst.is_neon = 1;
16793 }
16794
16795 /* Encode insns with bit pattern:
16796
16797 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16798 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
16799
16800 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
16801 different meaning for some instruction. */
16802
16803 static void
16804 neon_three_same (int isquad, int ubit, int size)
16805 {
16806 neon_three_args (isquad);
16807 inst.instruction |= (ubit != 0) << 24;
16808 if (size != -1)
16809 inst.instruction |= neon_logbits (size) << 20;
16810
16811 neon_dp_fixup (&inst);
16812 }
16813
16814 /* Encode instructions of the form:
16815
16816 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
16817 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
16818
16819 Don't write size if SIZE == -1. */
16820
16821 static void
16822 neon_two_same (int qbit, int ubit, int size)
16823 {
16824 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16825 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16826 inst.instruction |= LOW4 (inst.operands[1].reg);
16827 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16828 inst.instruction |= (qbit != 0) << 6;
16829 inst.instruction |= (ubit != 0) << 24;
16830
16831 if (size != -1)
16832 inst.instruction |= neon_logbits (size) << 18;
16833
16834 neon_dp_fixup (&inst);
16835 }
16836
16837 enum vfp_or_neon_is_neon_bits
16838 {
16839 NEON_CHECK_CC = 1,
16840 NEON_CHECK_ARCH = 2,
16841 NEON_CHECK_ARCH8 = 4
16842 };
16843
16844 /* Call this function if an instruction which may have belonged to the VFP or
16845 Neon instruction sets, but turned out to be a Neon instruction (due to the
16846 operand types involved, etc.). We have to check and/or fix-up a couple of
16847 things:
16848
16849 - Make sure the user hasn't attempted to make a Neon instruction
16850 conditional.
16851 - Alter the value in the condition code field if necessary.
16852 - Make sure that the arch supports Neon instructions.
16853
16854 Which of these operations take place depends on bits from enum
16855 vfp_or_neon_is_neon_bits.
16856
16857 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
16858 current instruction's condition is COND_ALWAYS, the condition field is
16859 changed to inst.uncond_value. This is necessary because instructions shared
16860 between VFP and Neon may be conditional for the VFP variants only, and the
16861 unconditional Neon version must have, e.g., 0xF in the condition field. */
16862
16863 static int
16864 vfp_or_neon_is_neon (unsigned check)
16865 {
16866 /* Conditions are always legal in Thumb mode (IT blocks). */
16867 if (!thumb_mode && (check & NEON_CHECK_CC))
16868 {
16869 if (inst.cond != COND_ALWAYS)
16870 {
16871 first_error (_(BAD_COND));
16872 return FAIL;
16873 }
16874 if (inst.uncond_value != -1u)
16875 inst.instruction |= inst.uncond_value << 28;
16876 }
16877
16878
16879 if (((check & NEON_CHECK_ARCH) && !mark_feature_used (&fpu_neon_ext_v1))
16880 || ((check & NEON_CHECK_ARCH8)
16881 && !mark_feature_used (&fpu_neon_ext_armv8)))
16882 {
16883 first_error (_(BAD_FPU));
16884 return FAIL;
16885 }
16886
16887 return SUCCESS;
16888 }
16889
16890
16891 /* Return TRUE if the SIMD instruction is available for the current
16892 cpu_variant. FP is set to TRUE if this is a SIMD floating-point
16893 instruction. CHECK contains th. CHECK contains the set of bits to pass to
16894 vfp_or_neon_is_neon for the NEON specific checks. */
16895
16896 static bool
16897 check_simd_pred_availability (int fp, unsigned check)
16898 {
16899 if (inst.cond > COND_ALWAYS)
16900 {
16901 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16902 {
16903 inst.error = BAD_FPU;
16904 return false;
16905 }
16906 inst.pred_insn_type = INSIDE_VPT_INSN;
16907 }
16908 else if (inst.cond < COND_ALWAYS)
16909 {
16910 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16911 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16912 else if (vfp_or_neon_is_neon (check) == FAIL)
16913 return false;
16914 }
16915 else
16916 {
16917 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fp ? mve_fp_ext : mve_ext)
16918 && vfp_or_neon_is_neon (check) == FAIL)
16919 return false;
16920
16921 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16922 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16923 }
16924 return true;
16925 }
16926
16927 /* Neon instruction encoders, in approximate order of appearance. */
16928
16929 static void
16930 do_neon_dyadic_i_su (void)
16931 {
16932 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
16933 return;
16934
16935 enum neon_shape rs;
16936 struct neon_type_el et;
16937 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16938 rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
16939 else
16940 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16941
16942 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_32 | N_KEY);
16943
16944
16945 if (rs != NS_QQR)
16946 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
16947 else
16948 mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
16949 }
16950
16951 static void
16952 do_neon_dyadic_i64_su (void)
16953 {
16954 if (!check_simd_pred_availability (false, NEON_CHECK_CC | NEON_CHECK_ARCH))
16955 return;
16956 enum neon_shape rs;
16957 struct neon_type_el et;
16958 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16959 {
16960 rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
16961 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
16962 }
16963 else
16964 {
16965 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16966 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_ALL | N_KEY);
16967 }
16968 if (rs == NS_QQR)
16969 mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
16970 else
16971 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
16972 }
16973
16974 static void
16975 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
16976 unsigned immbits)
16977 {
16978 unsigned size = et.size >> 3;
16979 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16980 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16981 inst.instruction |= LOW4 (inst.operands[1].reg);
16982 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16983 inst.instruction |= (isquad != 0) << 6;
16984 inst.instruction |= immbits << 16;
16985 inst.instruction |= (size >> 3) << 7;
16986 inst.instruction |= (size & 0x7) << 19;
16987 if (write_ubit)
16988 inst.instruction |= (uval != 0) << 24;
16989
16990 neon_dp_fixup (&inst);
16991 }
16992
16993 static void
16994 do_neon_shl (void)
16995 {
16996 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
16997 return;
16998
16999 if (!inst.operands[2].isreg)
17000 {
17001 enum neon_shape rs;
17002 struct neon_type_el et;
17003 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17004 {
17005 rs = neon_select_shape (NS_QQI, NS_NULL);
17006 et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_MVE);
17007 }
17008 else
17009 {
17010 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
17011 et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
17012 }
17013 int imm = inst.operands[2].imm;
17014
17015 constraint (imm < 0 || (unsigned)imm >= et.size,
17016 _("immediate out of range for shift"));
17017 NEON_ENCODE (IMMED, inst);
17018 neon_imm_shift (false, 0, neon_quad (rs), et, imm);
17019 }
17020 else
17021 {
17022 enum neon_shape rs;
17023 struct neon_type_el et;
17024 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17025 {
17026 rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
17027 et = neon_check_type (3, rs, N_EQK, N_SU_MVE | N_KEY, N_EQK | N_EQK);
17028 }
17029 else
17030 {
17031 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17032 et = neon_check_type (3, rs, N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
17033 }
17034
17035
17036 if (rs == NS_QQR)
17037 {
17038 constraint (inst.operands[0].reg != inst.operands[1].reg,
17039 _("invalid instruction shape"));
17040 if (inst.operands[2].reg == REG_SP)
17041 as_tsktsk (MVE_BAD_SP);
17042 else if (inst.operands[2].reg == REG_PC)
17043 as_tsktsk (MVE_BAD_PC);
17044
17045 inst.instruction = 0xee311e60;
17046 inst.instruction |= (et.type == NT_unsigned) << 28;
17047 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17048 inst.instruction |= neon_logbits (et.size) << 18;
17049 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17050 inst.instruction |= inst.operands[2].reg;
17051 inst.is_neon = 1;
17052 }
17053 else
17054 {
17055 unsigned int tmp;
17056
17057 /* VSHL/VQSHL 3-register variants have syntax such as:
17058 vshl.xx Dd, Dm, Dn
17059 whereas other 3-register operations encoded by neon_three_same have
17060 syntax like:
17061 vadd.xx Dd, Dn, Dm
17062 (i.e. with Dn & Dm reversed). Swap operands[1].reg and
17063 operands[2].reg here. */
17064 tmp = inst.operands[2].reg;
17065 inst.operands[2].reg = inst.operands[1].reg;
17066 inst.operands[1].reg = tmp;
17067 NEON_ENCODE (INTEGER, inst);
17068 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17069 }
17070 }
17071 }
17072
17073 static void
17074 do_neon_qshl (void)
17075 {
17076 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
17077 return;
17078
17079 if (!inst.operands[2].isreg)
17080 {
17081 enum neon_shape rs;
17082 struct neon_type_el et;
17083 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17084 {
17085 rs = neon_select_shape (NS_QQI, NS_NULL);
17086 et = neon_check_type (2, rs, N_EQK, N_KEY | N_SU_MVE);
17087 }
17088 else
17089 {
17090 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
17091 et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
17092 }
17093 int imm = inst.operands[2].imm;
17094
17095 constraint (imm < 0 || (unsigned)imm >= et.size,
17096 _("immediate out of range for shift"));
17097 NEON_ENCODE (IMMED, inst);
17098 neon_imm_shift (true, et.type == NT_unsigned, neon_quad (rs), et, imm);
17099 }
17100 else
17101 {
17102 enum neon_shape rs;
17103 struct neon_type_el et;
17104
17105 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17106 {
17107 rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
17108 et = neon_check_type (3, rs, N_EQK, N_SU_MVE | N_KEY, N_EQK | N_EQK);
17109 }
17110 else
17111 {
17112 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17113 et = neon_check_type (3, rs, N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
17114 }
17115
17116 if (rs == NS_QQR)
17117 {
17118 constraint (inst.operands[0].reg != inst.operands[1].reg,
17119 _("invalid instruction shape"));
17120 if (inst.operands[2].reg == REG_SP)
17121 as_tsktsk (MVE_BAD_SP);
17122 else if (inst.operands[2].reg == REG_PC)
17123 as_tsktsk (MVE_BAD_PC);
17124
17125 inst.instruction = 0xee311ee0;
17126 inst.instruction |= (et.type == NT_unsigned) << 28;
17127 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17128 inst.instruction |= neon_logbits (et.size) << 18;
17129 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17130 inst.instruction |= inst.operands[2].reg;
17131 inst.is_neon = 1;
17132 }
17133 else
17134 {
17135 unsigned int tmp;
17136
17137 /* See note in do_neon_shl. */
17138 tmp = inst.operands[2].reg;
17139 inst.operands[2].reg = inst.operands[1].reg;
17140 inst.operands[1].reg = tmp;
17141 NEON_ENCODE (INTEGER, inst);
17142 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17143 }
17144 }
17145 }
17146
17147 static void
17148 do_neon_rshl (void)
17149 {
17150 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
17151 return;
17152
17153 enum neon_shape rs;
17154 struct neon_type_el et;
17155 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17156 {
17157 rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
17158 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
17159 }
17160 else
17161 {
17162 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17163 et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_ALL | N_KEY);
17164 }
17165
17166 unsigned int tmp;
17167
17168 if (rs == NS_QQR)
17169 {
17170 if (inst.operands[2].reg == REG_PC)
17171 as_tsktsk (MVE_BAD_PC);
17172 else if (inst.operands[2].reg == REG_SP)
17173 as_tsktsk (MVE_BAD_SP);
17174
17175 constraint (inst.operands[0].reg != inst.operands[1].reg,
17176 _("invalid instruction shape"));
17177
17178 if (inst.instruction == 0x0000510)
17179 /* We are dealing with vqrshl. */
17180 inst.instruction = 0xee331ee0;
17181 else
17182 /* We are dealing with vrshl. */
17183 inst.instruction = 0xee331e60;
17184
17185 inst.instruction |= (et.type == NT_unsigned) << 28;
17186 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17187 inst.instruction |= neon_logbits (et.size) << 18;
17188 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17189 inst.instruction |= inst.operands[2].reg;
17190 inst.is_neon = 1;
17191 }
17192 else
17193 {
17194 tmp = inst.operands[2].reg;
17195 inst.operands[2].reg = inst.operands[1].reg;
17196 inst.operands[1].reg = tmp;
17197 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17198 }
17199 }
17200
17201 static int
17202 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
17203 {
17204 /* Handle .I8 pseudo-instructions. */
17205 if (size == 8)
17206 {
17207 /* Unfortunately, this will make everything apart from zero out-of-range.
17208 FIXME is this the intended semantics? There doesn't seem much point in
17209 accepting .I8 if so. */
17210 immediate |= immediate << 8;
17211 size = 16;
17212 }
17213
17214 if (size >= 32)
17215 {
17216 if (immediate == (immediate & 0x000000ff))
17217 {
17218 *immbits = immediate;
17219 return 0x1;
17220 }
17221 else if (immediate == (immediate & 0x0000ff00))
17222 {
17223 *immbits = immediate >> 8;
17224 return 0x3;
17225 }
17226 else if (immediate == (immediate & 0x00ff0000))
17227 {
17228 *immbits = immediate >> 16;
17229 return 0x5;
17230 }
17231 else if (immediate == (immediate & 0xff000000))
17232 {
17233 *immbits = immediate >> 24;
17234 return 0x7;
17235 }
17236 if ((immediate & 0xffff) != (immediate >> 16))
17237 goto bad_immediate;
17238 immediate &= 0xffff;
17239 }
17240
17241 if (immediate == (immediate & 0x000000ff))
17242 {
17243 *immbits = immediate;
17244 return 0x9;
17245 }
17246 else if (immediate == (immediate & 0x0000ff00))
17247 {
17248 *immbits = immediate >> 8;
17249 return 0xb;
17250 }
17251
17252 bad_immediate:
17253 first_error (_("immediate value out of range"));
17254 return FAIL;
17255 }
17256
17257 static void
17258 do_neon_logic (void)
17259 {
17260 if (inst.operands[2].present && inst.operands[2].isreg)
17261 {
17262 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17263 if (rs == NS_QQQ
17264 && !check_simd_pred_availability (false,
17265 NEON_CHECK_ARCH | NEON_CHECK_CC))
17266 return;
17267 else if (rs != NS_QQQ
17268 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
17269 first_error (BAD_FPU);
17270
17271 neon_check_type (3, rs, N_IGNORE_TYPE);
17272 /* U bit and size field were set as part of the bitmask. */
17273 NEON_ENCODE (INTEGER, inst);
17274 neon_three_same (neon_quad (rs), 0, -1);
17275 }
17276 else
17277 {
17278 const int three_ops_form = (inst.operands[2].present
17279 && !inst.operands[2].isreg);
17280 const int immoperand = (three_ops_form ? 2 : 1);
17281 enum neon_shape rs = (three_ops_form
17282 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
17283 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
17284 /* Because neon_select_shape makes the second operand a copy of the first
17285 if the second operand is not present. */
17286 if (rs == NS_QQI
17287 && !check_simd_pred_availability (false,
17288 NEON_CHECK_ARCH | NEON_CHECK_CC))
17289 return;
17290 else if (rs != NS_QQI
17291 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
17292 first_error (BAD_FPU);
17293
17294 struct neon_type_el et;
17295 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17296 et = neon_check_type (2, rs, N_I32 | N_I16 | N_KEY, N_EQK);
17297 else
17298 et = neon_check_type (2, rs, N_I8 | N_I16 | N_I32 | N_I64 | N_F32
17299 | N_KEY, N_EQK);
17300
17301 if (et.type == NT_invtype)
17302 return;
17303 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
17304 unsigned immbits;
17305 int cmode;
17306
17307
17308 if (three_ops_form)
17309 constraint (inst.operands[0].reg != inst.operands[1].reg,
17310 _("first and second operands shall be the same register"));
17311
17312 NEON_ENCODE (IMMED, inst);
17313
17314 immbits = inst.operands[immoperand].imm;
17315 if (et.size == 64)
17316 {
17317 /* .i64 is a pseudo-op, so the immediate must be a repeating
17318 pattern. */
17319 if (immbits != (inst.operands[immoperand].regisimm ?
17320 inst.operands[immoperand].reg : 0))
17321 {
17322 /* Set immbits to an invalid constant. */
17323 immbits = 0xdeadbeef;
17324 }
17325 }
17326
17327 switch (opcode)
17328 {
17329 case N_MNEM_vbic:
17330 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17331 break;
17332
17333 case N_MNEM_vorr:
17334 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17335 break;
17336
17337 case N_MNEM_vand:
17338 /* Pseudo-instruction for VBIC. */
17339 neon_invert_size (&immbits, 0, et.size);
17340 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17341 break;
17342
17343 case N_MNEM_vorn:
17344 /* Pseudo-instruction for VORR. */
17345 neon_invert_size (&immbits, 0, et.size);
17346 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17347 break;
17348
17349 default:
17350 abort ();
17351 }
17352
17353 if (cmode == FAIL)
17354 return;
17355
17356 inst.instruction |= neon_quad (rs) << 6;
17357 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17358 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17359 inst.instruction |= cmode << 8;
17360 neon_write_immbits (immbits);
17361
17362 neon_dp_fixup (&inst);
17363 }
17364 }
17365
17366 static void
17367 do_neon_bitfield (void)
17368 {
17369 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17370 neon_check_type (3, rs, N_IGNORE_TYPE);
17371 neon_three_same (neon_quad (rs), 0, -1);
17372 }
17373
17374 static void
17375 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
17376 unsigned destbits)
17377 {
17378 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17379 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
17380 types | N_KEY);
17381 if (et.type == NT_float)
17382 {
17383 NEON_ENCODE (FLOAT, inst);
17384 if (rs == NS_QQR)
17385 mve_encode_qqr (et.size, 0, 1);
17386 else
17387 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
17388 }
17389 else
17390 {
17391 NEON_ENCODE (INTEGER, inst);
17392 if (rs == NS_QQR)
17393 mve_encode_qqr (et.size, et.type == ubit_meaning, 0);
17394 else
17395 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
17396 }
17397 }
17398
17399
17400 static void
17401 do_neon_dyadic_if_su_d (void)
17402 {
17403 /* This version only allow D registers, but that constraint is enforced during
17404 operand parsing so we don't need to do anything extra here. */
17405 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
17406 }
17407
17408 static void
17409 do_neon_dyadic_if_i_d (void)
17410 {
17411 /* The "untyped" case can't happen. Do this to stop the "U" bit being
17412 affected if we specify unsigned args. */
17413 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17414 }
17415
17416 static void
17417 do_mve_vstr_vldr_QI (int size, int elsize, int load)
17418 {
17419 constraint (size < 32, BAD_ADDR_MODE);
17420 constraint (size != elsize, BAD_EL_TYPE);
17421 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17422 constraint (!inst.operands[1].preind, BAD_ADDR_MODE);
17423 constraint (load && inst.operands[0].reg == inst.operands[1].reg,
17424 _("destination register and offset register may not be the"
17425 " same"));
17426
17427 int imm = inst.relocs[0].exp.X_add_number;
17428 int add = 1;
17429 if (imm < 0)
17430 {
17431 add = 0;
17432 imm = -imm;
17433 }
17434 constraint ((imm % (size / 8) != 0)
17435 || imm > (0x7f << neon_logbits (size)),
17436 (size == 32) ? _("immediate must be a multiple of 4 in the"
17437 " range of +/-[0,508]")
17438 : _("immediate must be a multiple of 8 in the"
17439 " range of +/-[0,1016]"));
17440 inst.instruction |= 0x11 << 24;
17441 inst.instruction |= add << 23;
17442 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17443 inst.instruction |= inst.operands[1].writeback << 21;
17444 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17445 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17446 inst.instruction |= 1 << 12;
17447 inst.instruction |= (size == 64) << 8;
17448 inst.instruction &= 0xffffff00;
17449 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17450 inst.instruction |= imm >> neon_logbits (size);
17451 }
17452
17453 static void
17454 do_mve_vstr_vldr_RQ (int size, int elsize, int load)
17455 {
17456 unsigned os = inst.operands[1].imm >> 5;
17457 unsigned type = inst.vectype.el[0].type;
17458 constraint (os != 0 && size == 8,
17459 _("can not shift offsets when accessing less than half-word"));
17460 constraint (os && os != neon_logbits (size),
17461 _("shift immediate must be 1, 2 or 3 for half-word, word"
17462 " or double-word accesses respectively"));
17463 if (inst.operands[1].reg == REG_PC)
17464 as_tsktsk (MVE_BAD_PC);
17465
17466 switch (size)
17467 {
17468 case 8:
17469 constraint (elsize >= 64, BAD_EL_TYPE);
17470 break;
17471 case 16:
17472 constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
17473 break;
17474 case 32:
17475 case 64:
17476 constraint (elsize != size, BAD_EL_TYPE);
17477 break;
17478 default:
17479 break;
17480 }
17481 constraint (inst.operands[1].writeback || !inst.operands[1].preind,
17482 BAD_ADDR_MODE);
17483 if (load)
17484 {
17485 constraint (inst.operands[0].reg == (inst.operands[1].imm & 0x1f),
17486 _("destination register and offset register may not be"
17487 " the same"));
17488 constraint (size == elsize && type == NT_signed, BAD_EL_TYPE);
17489 constraint (size != elsize && type != NT_unsigned && type != NT_signed,
17490 BAD_EL_TYPE);
17491 inst.instruction |= ((size == elsize) || (type == NT_unsigned)) << 28;
17492 }
17493 else
17494 {
17495 constraint (type != NT_untyped, BAD_EL_TYPE);
17496 }
17497
17498 inst.instruction |= 1 << 23;
17499 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17500 inst.instruction |= inst.operands[1].reg << 16;
17501 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17502 inst.instruction |= neon_logbits (elsize) << 7;
17503 inst.instruction |= HI1 (inst.operands[1].imm) << 5;
17504 inst.instruction |= LOW4 (inst.operands[1].imm);
17505 inst.instruction |= !!os;
17506 }
17507
17508 static void
17509 do_mve_vstr_vldr_RI (int size, int elsize, int load)
17510 {
17511 enum neon_el_type type = inst.vectype.el[0].type;
17512
17513 constraint (size >= 64, BAD_ADDR_MODE);
17514 switch (size)
17515 {
17516 case 16:
17517 constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
17518 break;
17519 case 32:
17520 constraint (elsize != size, BAD_EL_TYPE);
17521 break;
17522 default:
17523 break;
17524 }
17525 if (load)
17526 {
17527 constraint (elsize != size && type != NT_unsigned
17528 && type != NT_signed, BAD_EL_TYPE);
17529 }
17530 else
17531 {
17532 constraint (elsize != size && type != NT_untyped, BAD_EL_TYPE);
17533 }
17534
17535 int imm = inst.relocs[0].exp.X_add_number;
17536 int add = 1;
17537 if (imm < 0)
17538 {
17539 add = 0;
17540 imm = -imm;
17541 }
17542
17543 if ((imm % (size / 8) != 0) || imm > (0x7f << neon_logbits (size)))
17544 {
17545 switch (size)
17546 {
17547 case 8:
17548 constraint (1, _("immediate must be in the range of +/-[0,127]"));
17549 break;
17550 case 16:
17551 constraint (1, _("immediate must be a multiple of 2 in the"
17552 " range of +/-[0,254]"));
17553 break;
17554 case 32:
17555 constraint (1, _("immediate must be a multiple of 4 in the"
17556 " range of +/-[0,508]"));
17557 break;
17558 }
17559 }
17560
17561 if (size != elsize)
17562 {
17563 constraint (inst.operands[1].reg > 7, BAD_HIREG);
17564 constraint (inst.operands[0].reg > 14,
17565 _("MVE vector register in the range [Q0..Q7] expected"));
17566 inst.instruction |= (load && type == NT_unsigned) << 28;
17567 inst.instruction |= (size == 16) << 19;
17568 inst.instruction |= neon_logbits (elsize) << 7;
17569 }
17570 else
17571 {
17572 if (inst.operands[1].reg == REG_PC)
17573 as_tsktsk (MVE_BAD_PC);
17574 else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
17575 as_tsktsk (MVE_BAD_SP);
17576 inst.instruction |= 1 << 12;
17577 inst.instruction |= neon_logbits (size) << 7;
17578 }
17579 inst.instruction |= inst.operands[1].preind << 24;
17580 inst.instruction |= add << 23;
17581 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17582 inst.instruction |= inst.operands[1].writeback << 21;
17583 inst.instruction |= inst.operands[1].reg << 16;
17584 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17585 inst.instruction &= 0xffffff80;
17586 inst.instruction |= imm >> neon_logbits (size);
17587
17588 }
17589
17590 static void
17591 do_mve_vstr_vldr (void)
17592 {
17593 unsigned size;
17594 int load = 0;
17595
17596 if (inst.cond > COND_ALWAYS)
17597 inst.pred_insn_type = INSIDE_VPT_INSN;
17598 else
17599 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17600
17601 switch (inst.instruction)
17602 {
17603 default:
17604 gas_assert (0);
17605 break;
17606 case M_MNEM_vldrb:
17607 load = 1;
17608 /* fall through. */
17609 case M_MNEM_vstrb:
17610 size = 8;
17611 break;
17612 case M_MNEM_vldrh:
17613 load = 1;
17614 /* fall through. */
17615 case M_MNEM_vstrh:
17616 size = 16;
17617 break;
17618 case M_MNEM_vldrw:
17619 load = 1;
17620 /* fall through. */
17621 case M_MNEM_vstrw:
17622 size = 32;
17623 break;
17624 case M_MNEM_vldrd:
17625 load = 1;
17626 /* fall through. */
17627 case M_MNEM_vstrd:
17628 size = 64;
17629 break;
17630 }
17631 unsigned elsize = inst.vectype.el[0].size;
17632
17633 if (inst.operands[1].isquad)
17634 {
17635 /* We are dealing with [Q, imm]{!} cases. */
17636 do_mve_vstr_vldr_QI (size, elsize, load);
17637 }
17638 else
17639 {
17640 if (inst.operands[1].immisreg == 2)
17641 {
17642 /* We are dealing with [R, Q, {UXTW #os}] cases. */
17643 do_mve_vstr_vldr_RQ (size, elsize, load);
17644 }
17645 else if (!inst.operands[1].immisreg)
17646 {
17647 /* We are dealing with [R, Imm]{!}/[R], Imm cases. */
17648 do_mve_vstr_vldr_RI (size, elsize, load);
17649 }
17650 else
17651 constraint (1, BAD_ADDR_MODE);
17652 }
17653
17654 inst.is_neon = 1;
17655 }
17656
17657 static void
17658 do_mve_vst_vld (void)
17659 {
17660 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17661 return;
17662
17663 constraint (!inst.operands[1].preind || inst.relocs[0].exp.X_add_symbol != 0
17664 || inst.relocs[0].exp.X_add_number != 0
17665 || inst.operands[1].immisreg != 0,
17666 BAD_ADDR_MODE);
17667 constraint (inst.vectype.el[0].size > 32, BAD_EL_TYPE);
17668 if (inst.operands[1].reg == REG_PC)
17669 as_tsktsk (MVE_BAD_PC);
17670 else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
17671 as_tsktsk (MVE_BAD_SP);
17672
17673
17674 /* These instructions are one of the "exceptions" mentioned in
17675 handle_pred_state. They are MVE instructions that are not VPT compatible
17676 and do not accept a VPT code, thus appending such a code is a syntax
17677 error. */
17678 if (inst.cond > COND_ALWAYS)
17679 first_error (BAD_SYNTAX);
17680 /* If we append a scalar condition code we can set this to
17681 MVE_OUTSIDE_PRED_INSN as it will also lead to a syntax error. */
17682 else if (inst.cond < COND_ALWAYS)
17683 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17684 else
17685 inst.pred_insn_type = MVE_UNPREDICABLE_INSN;
17686
17687 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17688 inst.instruction |= inst.operands[1].writeback << 21;
17689 inst.instruction |= inst.operands[1].reg << 16;
17690 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17691 inst.instruction |= neon_logbits (inst.vectype.el[0].size) << 7;
17692 inst.is_neon = 1;
17693 }
17694
17695 static void
17696 do_mve_vaddlv (void)
17697 {
17698 enum neon_shape rs = neon_select_shape (NS_RRQ, NS_NULL);
17699 struct neon_type_el et
17700 = neon_check_type (3, rs, N_EQK, N_EQK, N_S32 | N_U32 | N_KEY);
17701
17702 if (et.type == NT_invtype)
17703 first_error (BAD_EL_TYPE);
17704
17705 if (inst.cond > COND_ALWAYS)
17706 inst.pred_insn_type = INSIDE_VPT_INSN;
17707 else
17708 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17709
17710 constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
17711
17712 inst.instruction |= (et.type == NT_unsigned) << 28;
17713 inst.instruction |= inst.operands[1].reg << 19;
17714 inst.instruction |= inst.operands[0].reg << 12;
17715 inst.instruction |= inst.operands[2].reg;
17716 inst.is_neon = 1;
17717 }
17718
17719 static void
17720 do_neon_dyadic_if_su (void)
17721 {
17722 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17723 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
17724 N_SUF_32 | N_KEY);
17725
17726 constraint ((inst.instruction == ((unsigned) N_MNEM_vmax)
17727 || inst.instruction == ((unsigned) N_MNEM_vmin))
17728 && et.type == NT_float
17729 && !ARM_CPU_HAS_FEATURE (cpu_variant,fpu_neon_ext_v1), BAD_FPU);
17730
17731 if (!check_simd_pred_availability (et.type == NT_float,
17732 NEON_CHECK_ARCH | NEON_CHECK_CC))
17733 return;
17734
17735 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
17736 }
17737
17738 static void
17739 do_neon_addsub_if_i (void)
17740 {
17741 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
17742 && try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
17743 return;
17744
17745 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17746 struct neon_type_el et = neon_check_type (3, rs, N_EQK,
17747 N_EQK, N_IF_32 | N_I64 | N_KEY);
17748
17749 constraint (rs == NS_QQR && et.size == 64, BAD_FPU);
17750 /* If we are parsing Q registers and the element types match MVE, which NEON
17751 also supports, then we must check whether this is an instruction that can
17752 be used by both MVE/NEON. This distinction can be made based on whether
17753 they are predicated or not. */
17754 if ((rs == NS_QQQ || rs == NS_QQR) && et.size != 64)
17755 {
17756 if (!check_simd_pred_availability (et.type == NT_float,
17757 NEON_CHECK_ARCH | NEON_CHECK_CC))
17758 return;
17759 }
17760 else
17761 {
17762 /* If they are either in a D register or are using an unsupported. */
17763 if (rs != NS_QQR
17764 && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17765 return;
17766 }
17767
17768 /* The "untyped" case can't happen. Do this to stop the "U" bit being
17769 affected if we specify unsigned args. */
17770 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
17771 }
17772
17773 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
17774 result to be:
17775 V<op> A,B (A is operand 0, B is operand 2)
17776 to mean:
17777 V<op> A,B,A
17778 not:
17779 V<op> A,B,B
17780 so handle that case specially. */
17781
17782 static void
17783 neon_exchange_operands (void)
17784 {
17785 if (inst.operands[1].present)
17786 {
17787 void *scratch = xmalloc (sizeof (inst.operands[0]));
17788
17789 /* Swap operands[1] and operands[2]. */
17790 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
17791 inst.operands[1] = inst.operands[2];
17792 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
17793 free (scratch);
17794 }
17795 else
17796 {
17797 inst.operands[1] = inst.operands[2];
17798 inst.operands[2] = inst.operands[0];
17799 }
17800 }
17801
17802 static void
17803 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
17804 {
17805 if (inst.operands[2].isreg)
17806 {
17807 if (invert)
17808 neon_exchange_operands ();
17809 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
17810 }
17811 else
17812 {
17813 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
17814 struct neon_type_el et = neon_check_type (2, rs,
17815 N_EQK | N_SIZ, immtypes | N_KEY);
17816
17817 NEON_ENCODE (IMMED, inst);
17818 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17819 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17820 inst.instruction |= LOW4 (inst.operands[1].reg);
17821 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17822 inst.instruction |= neon_quad (rs) << 6;
17823 inst.instruction |= (et.type == NT_float) << 10;
17824 inst.instruction |= neon_logbits (et.size) << 18;
17825
17826 neon_dp_fixup (&inst);
17827 }
17828 }
17829
17830 static void
17831 do_neon_cmp (void)
17832 {
17833 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, false);
17834 }
17835
17836 static void
17837 do_neon_cmp_inv (void)
17838 {
17839 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, true);
17840 }
17841
17842 static void
17843 do_neon_ceq (void)
17844 {
17845 neon_compare (N_IF_32, N_IF_32, false);
17846 }
17847
17848 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
17849 scalars, which are encoded in 5 bits, M : Rm.
17850 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
17851 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
17852 index in M.
17853
17854 Dot Product instructions are similar to multiply instructions except elsize
17855 should always be 32.
17856
17857 This function translates SCALAR, which is GAS's internal encoding of indexed
17858 scalar register, to raw encoding. There is also register and index range
17859 check based on ELSIZE. */
17860
17861 static unsigned
17862 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
17863 {
17864 unsigned regno = NEON_SCALAR_REG (scalar);
17865 unsigned elno = NEON_SCALAR_INDEX (scalar);
17866
17867 switch (elsize)
17868 {
17869 case 16:
17870 if (regno > 7 || elno > 3)
17871 goto bad_scalar;
17872 return regno | (elno << 3);
17873
17874 case 32:
17875 if (regno > 15 || elno > 1)
17876 goto bad_scalar;
17877 return regno | (elno << 4);
17878
17879 default:
17880 bad_scalar:
17881 first_error (_("scalar out of range for multiply instruction"));
17882 }
17883
17884 return 0;
17885 }
17886
17887 /* Encode multiply / multiply-accumulate scalar instructions. */
17888
17889 static void
17890 neon_mul_mac (struct neon_type_el et, int ubit)
17891 {
17892 unsigned scalar;
17893
17894 /* Give a more helpful error message if we have an invalid type. */
17895 if (et.type == NT_invtype)
17896 return;
17897
17898 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
17899 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17900 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17901 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17902 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17903 inst.instruction |= LOW4 (scalar);
17904 inst.instruction |= HI1 (scalar) << 5;
17905 inst.instruction |= (et.type == NT_float) << 8;
17906 inst.instruction |= neon_logbits (et.size) << 20;
17907 inst.instruction |= (ubit != 0) << 24;
17908
17909 neon_dp_fixup (&inst);
17910 }
17911
17912 static void
17913 do_neon_mac_maybe_scalar (void)
17914 {
17915 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
17916 return;
17917
17918 if (!check_simd_pred_availability (false, NEON_CHECK_CC | NEON_CHECK_ARCH))
17919 return;
17920
17921 if (inst.operands[2].isscalar)
17922 {
17923 constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17924 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
17925 struct neon_type_el et = neon_check_type (3, rs,
17926 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
17927 NEON_ENCODE (SCALAR, inst);
17928 neon_mul_mac (et, neon_quad (rs));
17929 }
17930 else if (!inst.operands[2].isvec)
17931 {
17932 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17933
17934 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
17935 neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
17936
17937 neon_dyadic_misc (NT_unsigned, N_SU_MVE, 0);
17938 }
17939 else
17940 {
17941 constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17942 /* The "untyped" case can't happen. Do this to stop the "U" bit being
17943 affected if we specify unsigned args. */
17944 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17945 }
17946 }
17947
17948 static void
17949 do_bfloat_vfma (void)
17950 {
17951 constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
17952 constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
17953 enum neon_shape rs;
17954 int t_bit = 0;
17955
17956 if (inst.instruction != B_MNEM_vfmab)
17957 {
17958 t_bit = 1;
17959 inst.instruction = B_MNEM_vfmat;
17960 }
17961
17962 if (inst.operands[2].isscalar)
17963 {
17964 rs = neon_select_shape (NS_QQS, NS_NULL);
17965 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
17966
17967 inst.instruction |= (1 << 25);
17968 int idx = inst.operands[2].reg & 0xf;
17969 constraint (!(idx < 4), _("index must be in the range 0 to 3"));
17970 inst.operands[2].reg >>= 4;
17971 constraint (!(inst.operands[2].reg < 8),
17972 _("indexed register must be less than 8"));
17973 neon_three_args (t_bit);
17974 inst.instruction |= ((idx & 1) << 3);
17975 inst.instruction |= ((idx & 2) << 4);
17976 }
17977 else
17978 {
17979 rs = neon_select_shape (NS_QQQ, NS_NULL);
17980 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
17981 neon_three_args (t_bit);
17982 }
17983
17984 }
17985
17986 static void
17987 do_neon_fmac (void)
17988 {
17989 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_fma)
17990 && try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
17991 return;
17992
17993 if (!check_simd_pred_availability (true, NEON_CHECK_CC | NEON_CHECK_ARCH))
17994 return;
17995
17996 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
17997 {
17998 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
17999 struct neon_type_el et = neon_check_type (3, rs, N_F_MVE | N_KEY, N_EQK,
18000 N_EQK);
18001
18002 if (rs == NS_QQR)
18003 {
18004
18005 if (inst.operands[2].reg == REG_SP)
18006 as_tsktsk (MVE_BAD_SP);
18007 else if (inst.operands[2].reg == REG_PC)
18008 as_tsktsk (MVE_BAD_PC);
18009
18010 inst.instruction = 0xee310e40;
18011 inst.instruction |= (et.size == 16) << 28;
18012 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18013 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
18014 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18015 inst.instruction |= HI1 (inst.operands[1].reg) << 6;
18016 inst.instruction |= inst.operands[2].reg;
18017 inst.is_neon = 1;
18018 return;
18019 }
18020 }
18021 else
18022 {
18023 constraint (!inst.operands[2].isvec, BAD_FPU);
18024 }
18025
18026 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
18027 }
18028
18029 static void
18030 do_mve_vfma (void)
18031 {
18032 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_bf16) &&
18033 inst.cond == COND_ALWAYS)
18034 {
18035 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
18036 inst.instruction = N_MNEM_vfma;
18037 inst.pred_insn_type = INSIDE_VPT_INSN;
18038 inst.cond = 0xf;
18039 return do_neon_fmac();
18040 }
18041 else
18042 {
18043 do_bfloat_vfma();
18044 }
18045 }
18046
18047 static void
18048 do_neon_tst (void)
18049 {
18050 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18051 struct neon_type_el et = neon_check_type (3, rs,
18052 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18053 neon_three_same (neon_quad (rs), 0, et.size);
18054 }
18055
18056 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
18057 same types as the MAC equivalents. The polynomial type for this instruction
18058 is encoded the same as the integer type. */
18059
18060 static void
18061 do_neon_mul (void)
18062 {
18063 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
18064 return;
18065
18066 if (!check_simd_pred_availability (false, NEON_CHECK_CC | NEON_CHECK_ARCH))
18067 return;
18068
18069 if (inst.operands[2].isscalar)
18070 {
18071 constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
18072 do_neon_mac_maybe_scalar ();
18073 }
18074 else
18075 {
18076 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18077 {
18078 enum neon_shape rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
18079 struct neon_type_el et
18080 = neon_check_type (3, rs, N_EQK, N_EQK, N_I_MVE | N_F_MVE | N_KEY);
18081 if (et.type == NT_float)
18082 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
18083 BAD_FPU);
18084
18085 neon_dyadic_misc (NT_float, N_I_MVE | N_F_MVE, 0);
18086 }
18087 else
18088 {
18089 constraint (!inst.operands[2].isvec, BAD_FPU);
18090 neon_dyadic_misc (NT_poly,
18091 N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
18092 }
18093 }
18094 }
18095
18096 static void
18097 do_neon_qdmulh (void)
18098 {
18099 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
18100 return;
18101
18102 if (inst.operands[2].isscalar)
18103 {
18104 constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
18105 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18106 struct neon_type_el et = neon_check_type (3, rs,
18107 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18108 NEON_ENCODE (SCALAR, inst);
18109 neon_mul_mac (et, neon_quad (rs));
18110 }
18111 else
18112 {
18113 enum neon_shape rs;
18114 struct neon_type_el et;
18115 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18116 {
18117 rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
18118 et = neon_check_type (3, rs,
18119 N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18120 }
18121 else
18122 {
18123 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18124 et = neon_check_type (3, rs,
18125 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18126 }
18127
18128 NEON_ENCODE (INTEGER, inst);
18129 if (rs == NS_QQR)
18130 mve_encode_qqr (et.size, 0, 0);
18131 else
18132 /* The U bit (rounding) comes from bit mask. */
18133 neon_three_same (neon_quad (rs), 0, et.size);
18134 }
18135 }
18136
18137 static void
18138 do_mve_vaddv (void)
18139 {
18140 enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18141 struct neon_type_el et
18142 = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
18143
18144 if (et.type == NT_invtype)
18145 first_error (BAD_EL_TYPE);
18146
18147 if (inst.cond > COND_ALWAYS)
18148 inst.pred_insn_type = INSIDE_VPT_INSN;
18149 else
18150 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18151
18152 constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
18153
18154 mve_encode_rq (et.type == NT_unsigned, et.size);
18155 }
18156
18157 static void
18158 do_mve_vhcadd (void)
18159 {
18160 enum neon_shape rs = neon_select_shape (NS_QQQI, NS_NULL);
18161 struct neon_type_el et
18162 = neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18163
18164 if (inst.cond > COND_ALWAYS)
18165 inst.pred_insn_type = INSIDE_VPT_INSN;
18166 else
18167 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18168
18169 unsigned rot = inst.relocs[0].exp.X_add_number;
18170 constraint (rot != 90 && rot != 270, _("immediate out of range"));
18171
18172 if (et.size == 32 && inst.operands[0].reg == inst.operands[2].reg)
18173 as_tsktsk (_("Warning: 32-bit element size and same first and third "
18174 "operand makes instruction UNPREDICTABLE"));
18175
18176 mve_encode_qqq (0, et.size);
18177 inst.instruction |= (rot == 270) << 12;
18178 inst.is_neon = 1;
18179 }
18180
18181 static void
18182 do_mve_vqdmull (void)
18183 {
18184 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
18185 struct neon_type_el et
18186 = neon_check_type (3, rs, N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18187
18188 if (et.size == 32
18189 && (inst.operands[0].reg == inst.operands[1].reg
18190 || (rs == NS_QQQ && inst.operands[0].reg == inst.operands[2].reg)))
18191 as_tsktsk (BAD_MVE_SRCDEST);
18192
18193 if (inst.cond > COND_ALWAYS)
18194 inst.pred_insn_type = INSIDE_VPT_INSN;
18195 else
18196 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18197
18198 if (rs == NS_QQQ)
18199 {
18200 mve_encode_qqq (et.size == 32, 64);
18201 inst.instruction |= 1;
18202 }
18203 else
18204 {
18205 mve_encode_qqr (64, et.size == 32, 0);
18206 inst.instruction |= 0x3 << 5;
18207 }
18208 }
18209
18210 static void
18211 do_mve_vadc (void)
18212 {
18213 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18214 struct neon_type_el et
18215 = neon_check_type (3, rs, N_KEY | N_I32, N_EQK, N_EQK);
18216
18217 if (et.type == NT_invtype)
18218 first_error (BAD_EL_TYPE);
18219
18220 if (inst.cond > COND_ALWAYS)
18221 inst.pred_insn_type = INSIDE_VPT_INSN;
18222 else
18223 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18224
18225 mve_encode_qqq (0, 64);
18226 }
18227
18228 static void
18229 do_mve_vbrsr (void)
18230 {
18231 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18232 struct neon_type_el et
18233 = neon_check_type (3, rs, N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18234
18235 if (inst.cond > COND_ALWAYS)
18236 inst.pred_insn_type = INSIDE_VPT_INSN;
18237 else
18238 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18239
18240 mve_encode_qqr (et.size, 0, 0);
18241 }
18242
18243 static void
18244 do_mve_vsbc (void)
18245 {
18246 neon_check_type (3, NS_QQQ, N_EQK, N_EQK, N_I32 | N_KEY);
18247
18248 if (inst.cond > COND_ALWAYS)
18249 inst.pred_insn_type = INSIDE_VPT_INSN;
18250 else
18251 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18252
18253 mve_encode_qqq (1, 64);
18254 }
18255
18256 static void
18257 do_mve_vmulh (void)
18258 {
18259 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18260 struct neon_type_el et
18261 = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
18262
18263 if (inst.cond > COND_ALWAYS)
18264 inst.pred_insn_type = INSIDE_VPT_INSN;
18265 else
18266 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18267
18268 mve_encode_qqq (et.type == NT_unsigned, et.size);
18269 }
18270
18271 static void
18272 do_mve_vqdmlah (void)
18273 {
18274 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18275 struct neon_type_el et
18276 = neon_check_type (3, rs, N_EQK, N_EQK, N_S_32 | N_KEY);
18277
18278 if (inst.cond > COND_ALWAYS)
18279 inst.pred_insn_type = INSIDE_VPT_INSN;
18280 else
18281 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18282
18283 mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
18284 }
18285
18286 static void
18287 do_mve_vqdmladh (void)
18288 {
18289 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18290 struct neon_type_el et
18291 = neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18292
18293 if (inst.cond > COND_ALWAYS)
18294 inst.pred_insn_type = INSIDE_VPT_INSN;
18295 else
18296 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18297
18298 mve_encode_qqq (0, et.size);
18299 }
18300
18301
18302 static void
18303 do_mve_vmull (void)
18304 {
18305
18306 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_DDS,
18307 NS_QQS, NS_QQQ, NS_QQR, NS_NULL);
18308 if (inst.cond == COND_ALWAYS
18309 && ((unsigned)inst.instruction) == M_MNEM_vmullt)
18310 {
18311
18312 if (rs == NS_QQQ)
18313 {
18314 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18315 goto neon_vmul;
18316 }
18317 else
18318 goto neon_vmul;
18319 }
18320
18321 constraint (rs != NS_QQQ, BAD_FPU);
18322 struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
18323 N_SU_32 | N_P8 | N_P16 | N_KEY);
18324
18325 /* We are dealing with MVE's vmullt. */
18326 if (et.size == 32
18327 && (inst.operands[0].reg == inst.operands[1].reg
18328 || inst.operands[0].reg == inst.operands[2].reg))
18329 as_tsktsk (BAD_MVE_SRCDEST);
18330
18331 if (inst.cond > COND_ALWAYS)
18332 inst.pred_insn_type = INSIDE_VPT_INSN;
18333 else
18334 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18335
18336 if (et.type == NT_poly)
18337 mve_encode_qqq (neon_logbits (et.size), 64);
18338 else
18339 mve_encode_qqq (et.type == NT_unsigned, et.size);
18340
18341 return;
18342
18343 neon_vmul:
18344 inst.instruction = N_MNEM_vmul;
18345 inst.cond = 0xb;
18346 if (thumb_mode)
18347 inst.pred_insn_type = INSIDE_IT_INSN;
18348 do_neon_mul ();
18349 }
18350
18351 static void
18352 do_mve_vabav (void)
18353 {
18354 enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
18355
18356 if (rs == NS_NULL)
18357 return;
18358
18359 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18360 return;
18361
18362 struct neon_type_el et = neon_check_type (2, NS_NULL, N_EQK, N_KEY | N_S8
18363 | N_S16 | N_S32 | N_U8 | N_U16
18364 | N_U32);
18365
18366 if (inst.cond > COND_ALWAYS)
18367 inst.pred_insn_type = INSIDE_VPT_INSN;
18368 else
18369 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18370
18371 mve_encode_rqq (et.type == NT_unsigned, et.size);
18372 }
18373
18374 static void
18375 do_mve_vmladav (void)
18376 {
18377 enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
18378 struct neon_type_el et = neon_check_type (3, rs,
18379 N_EQK, N_EQK, N_SU_MVE | N_KEY);
18380
18381 if (et.type == NT_unsigned
18382 && (inst.instruction == M_MNEM_vmladavx
18383 || inst.instruction == M_MNEM_vmladavax
18384 || inst.instruction == M_MNEM_vmlsdav
18385 || inst.instruction == M_MNEM_vmlsdava
18386 || inst.instruction == M_MNEM_vmlsdavx
18387 || inst.instruction == M_MNEM_vmlsdavax))
18388 first_error (BAD_SIMD_TYPE);
18389
18390 constraint (inst.operands[2].reg > 14,
18391 _("MVE vector register in the range [Q0..Q7] expected"));
18392
18393 if (inst.cond > COND_ALWAYS)
18394 inst.pred_insn_type = INSIDE_VPT_INSN;
18395 else
18396 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18397
18398 if (inst.instruction == M_MNEM_vmlsdav
18399 || inst.instruction == M_MNEM_vmlsdava
18400 || inst.instruction == M_MNEM_vmlsdavx
18401 || inst.instruction == M_MNEM_vmlsdavax)
18402 inst.instruction |= (et.size == 8) << 28;
18403 else
18404 inst.instruction |= (et.size == 8) << 8;
18405
18406 mve_encode_rqq (et.type == NT_unsigned, 64);
18407 inst.instruction |= (et.size == 32) << 16;
18408 }
18409
18410 static void
18411 do_mve_vmlaldav (void)
18412 {
18413 enum neon_shape rs = neon_select_shape (NS_RRQQ, NS_NULL);
18414 struct neon_type_el et
18415 = neon_check_type (4, rs, N_EQK, N_EQK, N_EQK,
18416 N_S16 | N_S32 | N_U16 | N_U32 | N_KEY);
18417
18418 if (et.type == NT_unsigned
18419 && (inst.instruction == M_MNEM_vmlsldav
18420 || inst.instruction == M_MNEM_vmlsldava
18421 || inst.instruction == M_MNEM_vmlsldavx
18422 || inst.instruction == M_MNEM_vmlsldavax))
18423 first_error (BAD_SIMD_TYPE);
18424
18425 if (inst.cond > COND_ALWAYS)
18426 inst.pred_insn_type = INSIDE_VPT_INSN;
18427 else
18428 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18429
18430 mve_encode_rrqq (et.type == NT_unsigned, et.size);
18431 }
18432
18433 static void
18434 do_mve_vrmlaldavh (void)
18435 {
18436 struct neon_type_el et;
18437 if (inst.instruction == M_MNEM_vrmlsldavh
18438 || inst.instruction == M_MNEM_vrmlsldavha
18439 || inst.instruction == M_MNEM_vrmlsldavhx
18440 || inst.instruction == M_MNEM_vrmlsldavhax)
18441 {
18442 et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK, N_S32 | N_KEY);
18443 if (inst.operands[1].reg == REG_SP)
18444 as_tsktsk (MVE_BAD_SP);
18445 }
18446 else
18447 {
18448 if (inst.instruction == M_MNEM_vrmlaldavhx
18449 || inst.instruction == M_MNEM_vrmlaldavhax)
18450 et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK, N_S32 | N_KEY);
18451 else
18452 et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK,
18453 N_U32 | N_S32 | N_KEY);
18454 /* vrmlaldavh's encoding with SP as the second, odd, GPR operand may alias
18455 with vmax/min instructions, making the use of SP in assembly really
18456 nonsensical, so instead of issuing a warning like we do for other uses
18457 of SP for the odd register operand we error out. */
18458 constraint (inst.operands[1].reg == REG_SP, BAD_SP);
18459 }
18460
18461 /* Make sure we still check the second operand is an odd one and that PC is
18462 disallowed. This because we are parsing for any GPR operand, to be able
18463 to distinguish between giving a warning or an error for SP as described
18464 above. */
18465 constraint ((inst.operands[1].reg % 2) != 1, BAD_EVEN);
18466 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
18467
18468 if (inst.cond > COND_ALWAYS)
18469 inst.pred_insn_type = INSIDE_VPT_INSN;
18470 else
18471 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18472
18473 mve_encode_rrqq (et.type == NT_unsigned, 0);
18474 }
18475
18476
18477 static void
18478 do_mve_vmaxnmv (void)
18479 {
18480 enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18481 struct neon_type_el et
18482 = neon_check_type (2, rs, N_EQK, N_F_MVE | N_KEY);
18483
18484 if (inst.cond > COND_ALWAYS)
18485 inst.pred_insn_type = INSIDE_VPT_INSN;
18486 else
18487 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18488
18489 if (inst.operands[0].reg == REG_SP)
18490 as_tsktsk (MVE_BAD_SP);
18491 else if (inst.operands[0].reg == REG_PC)
18492 as_tsktsk (MVE_BAD_PC);
18493
18494 mve_encode_rq (et.size == 16, 64);
18495 }
18496
18497 static void
18498 do_mve_vmaxv (void)
18499 {
18500 enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18501 struct neon_type_el et;
18502
18503 if (inst.instruction == M_MNEM_vmaxv || inst.instruction == M_MNEM_vminv)
18504 et = neon_check_type (2, rs, N_EQK, N_SU_MVE | N_KEY);
18505 else
18506 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18507
18508 if (inst.cond > COND_ALWAYS)
18509 inst.pred_insn_type = INSIDE_VPT_INSN;
18510 else
18511 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18512
18513 if (inst.operands[0].reg == REG_SP)
18514 as_tsktsk (MVE_BAD_SP);
18515 else if (inst.operands[0].reg == REG_PC)
18516 as_tsktsk (MVE_BAD_PC);
18517
18518 mve_encode_rq (et.type == NT_unsigned, et.size);
18519 }
18520
18521
18522 static void
18523 do_neon_qrdmlah (void)
18524 {
18525 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
18526 return;
18527 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18528 {
18529 /* Check we're on the correct architecture. */
18530 if (!mark_feature_used (&fpu_neon_ext_armv8))
18531 inst.error
18532 = _("instruction form not available on this architecture.");
18533 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
18534 {
18535 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
18536 record_feature_use (&fpu_neon_ext_v8_1);
18537 }
18538 if (inst.operands[2].isscalar)
18539 {
18540 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18541 struct neon_type_el et = neon_check_type (3, rs,
18542 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18543 NEON_ENCODE (SCALAR, inst);
18544 neon_mul_mac (et, neon_quad (rs));
18545 }
18546 else
18547 {
18548 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18549 struct neon_type_el et = neon_check_type (3, rs,
18550 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18551 NEON_ENCODE (INTEGER, inst);
18552 /* The U bit (rounding) comes from bit mask. */
18553 neon_three_same (neon_quad (rs), 0, et.size);
18554 }
18555 }
18556 else
18557 {
18558 enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18559 struct neon_type_el et
18560 = neon_check_type (3, rs, N_EQK, N_EQK, N_S_32 | N_KEY);
18561
18562 NEON_ENCODE (INTEGER, inst);
18563 mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
18564 }
18565 }
18566
18567 static void
18568 do_neon_fcmp_absolute (void)
18569 {
18570 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18571 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
18572 N_F_16_32 | N_KEY);
18573 /* Size field comes from bit mask. */
18574 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
18575 }
18576
18577 static void
18578 do_neon_fcmp_absolute_inv (void)
18579 {
18580 neon_exchange_operands ();
18581 do_neon_fcmp_absolute ();
18582 }
18583
18584 static void
18585 do_neon_step (void)
18586 {
18587 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18588 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
18589 N_F_16_32 | N_KEY);
18590 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
18591 }
18592
18593 static void
18594 do_neon_abs_neg (void)
18595 {
18596 enum neon_shape rs;
18597 struct neon_type_el et;
18598
18599 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
18600 return;
18601
18602 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18603 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
18604
18605 if (!check_simd_pred_availability (et.type == NT_float,
18606 NEON_CHECK_ARCH | NEON_CHECK_CC))
18607 return;
18608
18609 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18610 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18611 inst.instruction |= LOW4 (inst.operands[1].reg);
18612 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18613 inst.instruction |= neon_quad (rs) << 6;
18614 inst.instruction |= (et.type == NT_float) << 10;
18615 inst.instruction |= neon_logbits (et.size) << 18;
18616
18617 neon_dp_fixup (&inst);
18618 }
18619
18620 static void
18621 do_neon_sli (void)
18622 {
18623 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
18624 return;
18625
18626 enum neon_shape rs;
18627 struct neon_type_el et;
18628 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18629 {
18630 rs = neon_select_shape (NS_QQI, NS_NULL);
18631 et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18632 }
18633 else
18634 {
18635 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18636 et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
18637 }
18638
18639
18640 int imm = inst.operands[2].imm;
18641 constraint (imm < 0 || (unsigned)imm >= et.size,
18642 _("immediate out of range for insert"));
18643 neon_imm_shift (false, 0, neon_quad (rs), et, imm);
18644 }
18645
18646 static void
18647 do_neon_sri (void)
18648 {
18649 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
18650 return;
18651
18652 enum neon_shape rs;
18653 struct neon_type_el et;
18654 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18655 {
18656 rs = neon_select_shape (NS_QQI, NS_NULL);
18657 et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18658 }
18659 else
18660 {
18661 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18662 et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
18663 }
18664
18665 int imm = inst.operands[2].imm;
18666 constraint (imm < 1 || (unsigned)imm > et.size,
18667 _("immediate out of range for insert"));
18668 neon_imm_shift (false, 0, neon_quad (rs), et, et.size - imm);
18669 }
18670
18671 static void
18672 do_neon_qshlu_imm (void)
18673 {
18674 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
18675 return;
18676
18677 enum neon_shape rs;
18678 struct neon_type_el et;
18679 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18680 {
18681 rs = neon_select_shape (NS_QQI, NS_NULL);
18682 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18683 }
18684 else
18685 {
18686 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18687 et = neon_check_type (2, rs, N_EQK | N_UNS,
18688 N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
18689 }
18690
18691 int imm = inst.operands[2].imm;
18692 constraint (imm < 0 || (unsigned)imm >= et.size,
18693 _("immediate out of range for shift"));
18694 /* Only encodes the 'U present' variant of the instruction.
18695 In this case, signed types have OP (bit 8) set to 0.
18696 Unsigned types have OP set to 1. */
18697 inst.instruction |= (et.type == NT_unsigned) << 8;
18698 /* The rest of the bits are the same as other immediate shifts. */
18699 neon_imm_shift (false, 0, neon_quad (rs), et, imm);
18700 }
18701
18702 static void
18703 do_neon_qmovn (void)
18704 {
18705 struct neon_type_el et = neon_check_type (2, NS_DQ,
18706 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
18707 /* Saturating move where operands can be signed or unsigned, and the
18708 destination has the same signedness. */
18709 NEON_ENCODE (INTEGER, inst);
18710 if (et.type == NT_unsigned)
18711 inst.instruction |= 0xc0;
18712 else
18713 inst.instruction |= 0x80;
18714 neon_two_same (0, 1, et.size / 2);
18715 }
18716
18717 static void
18718 do_neon_qmovun (void)
18719 {
18720 struct neon_type_el et = neon_check_type (2, NS_DQ,
18721 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
18722 /* Saturating move with unsigned results. Operands must be signed. */
18723 NEON_ENCODE (INTEGER, inst);
18724 neon_two_same (0, 1, et.size / 2);
18725 }
18726
18727 static void
18728 do_neon_rshift_sat_narrow (void)
18729 {
18730 /* FIXME: Types for narrowing. If operands are signed, results can be signed
18731 or unsigned. If operands are unsigned, results must also be unsigned. */
18732 struct neon_type_el et = neon_check_type (2, NS_DQI,
18733 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
18734 int imm = inst.operands[2].imm;
18735 /* This gets the bounds check, size encoding and immediate bits calculation
18736 right. */
18737 et.size /= 2;
18738
18739 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
18740 VQMOVN.I<size> <Dd>, <Qm>. */
18741 if (imm == 0)
18742 {
18743 inst.operands[2].present = 0;
18744 inst.instruction = N_MNEM_vqmovn;
18745 do_neon_qmovn ();
18746 return;
18747 }
18748
18749 constraint (imm < 1 || (unsigned)imm > et.size,
18750 _("immediate out of range"));
18751 neon_imm_shift (true, et.type == NT_unsigned, 0, et, et.size - imm);
18752 }
18753
18754 static void
18755 do_neon_rshift_sat_narrow_u (void)
18756 {
18757 /* FIXME: Types for narrowing. If operands are signed, results can be signed
18758 or unsigned. If operands are unsigned, results must also be unsigned. */
18759 struct neon_type_el et = neon_check_type (2, NS_DQI,
18760 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
18761 int imm = inst.operands[2].imm;
18762 /* This gets the bounds check, size encoding and immediate bits calculation
18763 right. */
18764 et.size /= 2;
18765
18766 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
18767 VQMOVUN.I<size> <Dd>, <Qm>. */
18768 if (imm == 0)
18769 {
18770 inst.operands[2].present = 0;
18771 inst.instruction = N_MNEM_vqmovun;
18772 do_neon_qmovun ();
18773 return;
18774 }
18775
18776 constraint (imm < 1 || (unsigned)imm > et.size,
18777 _("immediate out of range"));
18778 /* FIXME: The manual is kind of unclear about what value U should have in
18779 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
18780 must be 1. */
18781 neon_imm_shift (true, 1, 0, et, et.size - imm);
18782 }
18783
18784 static void
18785 do_neon_movn (void)
18786 {
18787 struct neon_type_el et = neon_check_type (2, NS_DQ,
18788 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
18789 NEON_ENCODE (INTEGER, inst);
18790 neon_two_same (0, 1, et.size / 2);
18791 }
18792
18793 static void
18794 do_neon_rshift_narrow (void)
18795 {
18796 struct neon_type_el et = neon_check_type (2, NS_DQI,
18797 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
18798 int imm = inst.operands[2].imm;
18799 /* This gets the bounds check, size encoding and immediate bits calculation
18800 right. */
18801 et.size /= 2;
18802
18803 /* If immediate is zero then we are a pseudo-instruction for
18804 VMOVN.I<size> <Dd>, <Qm> */
18805 if (imm == 0)
18806 {
18807 inst.operands[2].present = 0;
18808 inst.instruction = N_MNEM_vmovn;
18809 do_neon_movn ();
18810 return;
18811 }
18812
18813 constraint (imm < 1 || (unsigned)imm > et.size,
18814 _("immediate out of range for narrowing operation"));
18815 neon_imm_shift (false, 0, 0, et, et.size - imm);
18816 }
18817
18818 static void
18819 do_neon_shll (void)
18820 {
18821 /* FIXME: Type checking when lengthening. */
18822 struct neon_type_el et = neon_check_type (2, NS_QDI,
18823 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
18824 unsigned imm = inst.operands[2].imm;
18825
18826 if (imm == et.size)
18827 {
18828 /* Maximum shift variant. */
18829 NEON_ENCODE (INTEGER, inst);
18830 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18831 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18832 inst.instruction |= LOW4 (inst.operands[1].reg);
18833 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18834 inst.instruction |= neon_logbits (et.size) << 18;
18835
18836 neon_dp_fixup (&inst);
18837 }
18838 else
18839 {
18840 /* A more-specific type check for non-max versions. */
18841 et = neon_check_type (2, NS_QDI,
18842 N_EQK | N_DBL, N_SU_32 | N_KEY);
18843 NEON_ENCODE (IMMED, inst);
18844 neon_imm_shift (true, et.type == NT_unsigned, 0, et, imm);
18845 }
18846 }
18847
18848 /* Check the various types for the VCVT instruction, and return which version
18849 the current instruction is. */
18850
18851 #define CVT_FLAVOUR_VAR \
18852 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
18853 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
18854 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
18855 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
18856 /* Half-precision conversions. */ \
18857 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
18858 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
18859 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
18860 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
18861 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
18862 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
18863 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
18864 Compared with single/double precision variants, only the co-processor \
18865 field is different, so the encoding flow is reused here. */ \
18866 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
18867 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
18868 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
18869 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
18870 CVT_VAR (bf16_f32, N_BF16, N_F32, whole_reg, NULL, NULL, NULL) \
18871 /* VFP instructions. */ \
18872 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
18873 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
18874 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
18875 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
18876 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
18877 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
18878 /* VFP instructions with bitshift. */ \
18879 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
18880 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
18881 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
18882 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
18883 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
18884 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
18885 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
18886 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
18887
18888 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
18889 neon_cvt_flavour_##C,
18890
18891 /* The different types of conversions we can do. */
18892 enum neon_cvt_flavour
18893 {
18894 CVT_FLAVOUR_VAR
18895 neon_cvt_flavour_invalid,
18896 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
18897 };
18898
18899 #undef CVT_VAR
18900
18901 static enum neon_cvt_flavour
18902 get_neon_cvt_flavour (enum neon_shape rs)
18903 {
18904 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
18905 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
18906 if (et.type != NT_invtype) \
18907 { \
18908 inst.error = NULL; \
18909 return (neon_cvt_flavour_##C); \
18910 }
18911
18912 struct neon_type_el et;
18913 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
18914 || rs == NS_FF) ? N_VFP : 0;
18915 /* The instruction versions which take an immediate take one register
18916 argument, which is extended to the width of the full register. Thus the
18917 "source" and "destination" registers must have the same width. Hack that
18918 here by making the size equal to the key (wider, in this case) operand. */
18919 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
18920
18921 CVT_FLAVOUR_VAR;
18922
18923 return neon_cvt_flavour_invalid;
18924 #undef CVT_VAR
18925 }
18926
18927 enum neon_cvt_mode
18928 {
18929 neon_cvt_mode_a,
18930 neon_cvt_mode_n,
18931 neon_cvt_mode_p,
18932 neon_cvt_mode_m,
18933 neon_cvt_mode_z,
18934 neon_cvt_mode_x,
18935 neon_cvt_mode_r
18936 };
18937
18938 /* Neon-syntax VFP conversions. */
18939
18940 static void
18941 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
18942 {
18943 const char *opname = 0;
18944
18945 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
18946 || rs == NS_FHI || rs == NS_HFI)
18947 {
18948 /* Conversions with immediate bitshift. */
18949 const char *enc[] =
18950 {
18951 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
18952 CVT_FLAVOUR_VAR
18953 NULL
18954 #undef CVT_VAR
18955 };
18956
18957 if (flavour < (int) ARRAY_SIZE (enc))
18958 {
18959 opname = enc[flavour];
18960 constraint (inst.operands[0].reg != inst.operands[1].reg,
18961 _("operands 0 and 1 must be the same register"));
18962 inst.operands[1] = inst.operands[2];
18963 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
18964 }
18965 }
18966 else
18967 {
18968 /* Conversions without bitshift. */
18969 const char *enc[] =
18970 {
18971 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
18972 CVT_FLAVOUR_VAR
18973 NULL
18974 #undef CVT_VAR
18975 };
18976
18977 if (flavour < (int) ARRAY_SIZE (enc))
18978 opname = enc[flavour];
18979 }
18980
18981 if (opname)
18982 do_vfp_nsyn_opcode (opname);
18983
18984 /* ARMv8.2 fp16 VCVT instruction. */
18985 if (flavour == neon_cvt_flavour_s32_f16
18986 || flavour == neon_cvt_flavour_u32_f16
18987 || flavour == neon_cvt_flavour_f16_u32
18988 || flavour == neon_cvt_flavour_f16_s32)
18989 do_scalar_fp16_v82_encode ();
18990 }
18991
18992 static void
18993 do_vfp_nsyn_cvtz (void)
18994 {
18995 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
18996 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
18997 const char *enc[] =
18998 {
18999 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
19000 CVT_FLAVOUR_VAR
19001 NULL
19002 #undef CVT_VAR
19003 };
19004
19005 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
19006 do_vfp_nsyn_opcode (enc[flavour]);
19007 }
19008
19009 static void
19010 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
19011 enum neon_cvt_mode mode)
19012 {
19013 int sz, op;
19014 int rm;
19015
19016 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
19017 D register operands. */
19018 if (flavour == neon_cvt_flavour_s32_f64
19019 || flavour == neon_cvt_flavour_u32_f64)
19020 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
19021 _(BAD_FPU));
19022
19023 if (flavour == neon_cvt_flavour_s32_f16
19024 || flavour == neon_cvt_flavour_u32_f16)
19025 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
19026 _(BAD_FP16));
19027
19028 set_pred_insn_type (OUTSIDE_PRED_INSN);
19029
19030 switch (flavour)
19031 {
19032 case neon_cvt_flavour_s32_f64:
19033 sz = 1;
19034 op = 1;
19035 break;
19036 case neon_cvt_flavour_s32_f32:
19037 sz = 0;
19038 op = 1;
19039 break;
19040 case neon_cvt_flavour_s32_f16:
19041 sz = 0;
19042 op = 1;
19043 break;
19044 case neon_cvt_flavour_u32_f64:
19045 sz = 1;
19046 op = 0;
19047 break;
19048 case neon_cvt_flavour_u32_f32:
19049 sz = 0;
19050 op = 0;
19051 break;
19052 case neon_cvt_flavour_u32_f16:
19053 sz = 0;
19054 op = 0;
19055 break;
19056 default:
19057 first_error (_("invalid instruction shape"));
19058 return;
19059 }
19060
19061 switch (mode)
19062 {
19063 case neon_cvt_mode_a: rm = 0; break;
19064 case neon_cvt_mode_n: rm = 1; break;
19065 case neon_cvt_mode_p: rm = 2; break;
19066 case neon_cvt_mode_m: rm = 3; break;
19067 default: first_error (_("invalid rounding mode")); return;
19068 }
19069
19070 NEON_ENCODE (FPV8, inst);
19071 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
19072 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
19073 inst.instruction |= sz << 8;
19074
19075 /* ARMv8.2 fp16 VCVT instruction. */
19076 if (flavour == neon_cvt_flavour_s32_f16
19077 ||flavour == neon_cvt_flavour_u32_f16)
19078 do_scalar_fp16_v82_encode ();
19079 inst.instruction |= op << 7;
19080 inst.instruction |= rm << 16;
19081 inst.instruction |= 0xf0000000;
19082 inst.is_neon = true;
19083 }
19084
19085 static void
19086 do_neon_cvt_1 (enum neon_cvt_mode mode)
19087 {
19088 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
19089 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
19090 NS_FH, NS_HF, NS_FHI, NS_HFI,
19091 NS_NULL);
19092 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
19093
19094 if (flavour == neon_cvt_flavour_invalid)
19095 return;
19096
19097 /* PR11109: Handle round-to-zero for VCVT conversions. */
19098 if (mode == neon_cvt_mode_z
19099 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
19100 && (flavour == neon_cvt_flavour_s16_f16
19101 || flavour == neon_cvt_flavour_u16_f16
19102 || flavour == neon_cvt_flavour_s32_f32
19103 || flavour == neon_cvt_flavour_u32_f32
19104 || flavour == neon_cvt_flavour_s32_f64
19105 || flavour == neon_cvt_flavour_u32_f64)
19106 && (rs == NS_FD || rs == NS_FF))
19107 {
19108 do_vfp_nsyn_cvtz ();
19109 return;
19110 }
19111
19112 /* ARMv8.2 fp16 VCVT conversions. */
19113 if (mode == neon_cvt_mode_z
19114 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
19115 && (flavour == neon_cvt_flavour_s32_f16
19116 || flavour == neon_cvt_flavour_u32_f16)
19117 && (rs == NS_FH))
19118 {
19119 do_vfp_nsyn_cvtz ();
19120 do_scalar_fp16_v82_encode ();
19121 return;
19122 }
19123
19124 if ((rs == NS_FD || rs == NS_QQI) && mode == neon_cvt_mode_n
19125 && ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19126 {
19127 /* We are dealing with vcvt with the 'ne' condition. */
19128 inst.cond = 0x1;
19129 inst.instruction = N_MNEM_vcvt;
19130 do_neon_cvt_1 (neon_cvt_mode_z);
19131 return;
19132 }
19133
19134 /* VFP rather than Neon conversions. */
19135 if (flavour >= neon_cvt_flavour_first_fp)
19136 {
19137 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
19138 do_vfp_nsyn_cvt (rs, flavour);
19139 else
19140 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
19141
19142 return;
19143 }
19144
19145 switch (rs)
19146 {
19147 case NS_QQI:
19148 if (mode == neon_cvt_mode_z
19149 && (flavour == neon_cvt_flavour_f16_s16
19150 || flavour == neon_cvt_flavour_f16_u16
19151 || flavour == neon_cvt_flavour_s16_f16
19152 || flavour == neon_cvt_flavour_u16_f16
19153 || flavour == neon_cvt_flavour_f32_u32
19154 || flavour == neon_cvt_flavour_f32_s32
19155 || flavour == neon_cvt_flavour_s32_f32
19156 || flavour == neon_cvt_flavour_u32_f32))
19157 {
19158 if (!check_simd_pred_availability (true,
19159 NEON_CHECK_CC | NEON_CHECK_ARCH))
19160 return;
19161 }
19162 /* fall through. */
19163 case NS_DDI:
19164 {
19165 unsigned immbits;
19166 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
19167 0x0000100, 0x1000100, 0x0, 0x1000000};
19168
19169 if ((rs != NS_QQI || !ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19170 && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19171 return;
19172
19173 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19174 {
19175 constraint (inst.operands[2].present && inst.operands[2].imm == 0,
19176 _("immediate value out of range"));
19177 switch (flavour)
19178 {
19179 case neon_cvt_flavour_f16_s16:
19180 case neon_cvt_flavour_f16_u16:
19181 case neon_cvt_flavour_s16_f16:
19182 case neon_cvt_flavour_u16_f16:
19183 constraint (inst.operands[2].imm > 16,
19184 _("immediate value out of range"));
19185 break;
19186 case neon_cvt_flavour_f32_u32:
19187 case neon_cvt_flavour_f32_s32:
19188 case neon_cvt_flavour_s32_f32:
19189 case neon_cvt_flavour_u32_f32:
19190 constraint (inst.operands[2].imm > 32,
19191 _("immediate value out of range"));
19192 break;
19193 default:
19194 inst.error = BAD_FPU;
19195 return;
19196 }
19197 }
19198
19199 /* Fixed-point conversion with #0 immediate is encoded as an
19200 integer conversion. */
19201 if (inst.operands[2].present && inst.operands[2].imm == 0)
19202 goto int_encode;
19203 NEON_ENCODE (IMMED, inst);
19204 if (flavour != neon_cvt_flavour_invalid)
19205 inst.instruction |= enctab[flavour];
19206 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19207 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19208 inst.instruction |= LOW4 (inst.operands[1].reg);
19209 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19210 inst.instruction |= neon_quad (rs) << 6;
19211 inst.instruction |= 1 << 21;
19212 if (flavour < neon_cvt_flavour_s16_f16)
19213 {
19214 inst.instruction |= 1 << 21;
19215 immbits = 32 - inst.operands[2].imm;
19216 inst.instruction |= immbits << 16;
19217 }
19218 else
19219 {
19220 inst.instruction |= 3 << 20;
19221 immbits = 16 - inst.operands[2].imm;
19222 inst.instruction |= immbits << 16;
19223 inst.instruction &= ~(1 << 9);
19224 }
19225
19226 neon_dp_fixup (&inst);
19227 }
19228 break;
19229
19230 case NS_QQ:
19231 if ((mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
19232 || mode == neon_cvt_mode_m || mode == neon_cvt_mode_p)
19233 && (flavour == neon_cvt_flavour_s16_f16
19234 || flavour == neon_cvt_flavour_u16_f16
19235 || flavour == neon_cvt_flavour_s32_f32
19236 || flavour == neon_cvt_flavour_u32_f32))
19237 {
19238 if (!check_simd_pred_availability (true,
19239 NEON_CHECK_CC | NEON_CHECK_ARCH8))
19240 return;
19241 }
19242 else if (mode == neon_cvt_mode_z
19243 && (flavour == neon_cvt_flavour_f16_s16
19244 || flavour == neon_cvt_flavour_f16_u16
19245 || flavour == neon_cvt_flavour_s16_f16
19246 || flavour == neon_cvt_flavour_u16_f16
19247 || flavour == neon_cvt_flavour_f32_u32
19248 || flavour == neon_cvt_flavour_f32_s32
19249 || flavour == neon_cvt_flavour_s32_f32
19250 || flavour == neon_cvt_flavour_u32_f32))
19251 {
19252 if (!check_simd_pred_availability (true,
19253 NEON_CHECK_CC | NEON_CHECK_ARCH))
19254 return;
19255 }
19256 /* fall through. */
19257 case NS_DD:
19258 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
19259 {
19260
19261 NEON_ENCODE (FLOAT, inst);
19262 if (!check_simd_pred_availability (true,
19263 NEON_CHECK_CC | NEON_CHECK_ARCH8))
19264 return;
19265
19266 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19267 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19268 inst.instruction |= LOW4 (inst.operands[1].reg);
19269 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19270 inst.instruction |= neon_quad (rs) << 6;
19271 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
19272 || flavour == neon_cvt_flavour_u32_f32) << 7;
19273 inst.instruction |= mode << 8;
19274 if (flavour == neon_cvt_flavour_u16_f16
19275 || flavour == neon_cvt_flavour_s16_f16)
19276 /* Mask off the original size bits and reencode them. */
19277 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
19278
19279 if (thumb_mode)
19280 inst.instruction |= 0xfc000000;
19281 else
19282 inst.instruction |= 0xf0000000;
19283 }
19284 else
19285 {
19286 int_encode:
19287 {
19288 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
19289 0x100, 0x180, 0x0, 0x080};
19290
19291 NEON_ENCODE (INTEGER, inst);
19292
19293 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19294 {
19295 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19296 return;
19297 }
19298
19299 if (flavour != neon_cvt_flavour_invalid)
19300 inst.instruction |= enctab[flavour];
19301
19302 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19303 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19304 inst.instruction |= LOW4 (inst.operands[1].reg);
19305 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19306 inst.instruction |= neon_quad (rs) << 6;
19307 if (flavour >= neon_cvt_flavour_s16_f16
19308 && flavour <= neon_cvt_flavour_f16_u16)
19309 /* Half precision. */
19310 inst.instruction |= 1 << 18;
19311 else
19312 inst.instruction |= 2 << 18;
19313
19314 neon_dp_fixup (&inst);
19315 }
19316 }
19317 break;
19318
19319 /* Half-precision conversions for Advanced SIMD -- neon. */
19320 case NS_QD:
19321 case NS_DQ:
19322 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19323 return;
19324
19325 if ((rs == NS_DQ)
19326 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
19327 {
19328 as_bad (_("operand size must match register width"));
19329 break;
19330 }
19331
19332 if ((rs == NS_QD)
19333 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
19334 {
19335 as_bad (_("operand size must match register width"));
19336 break;
19337 }
19338
19339 if (rs == NS_DQ)
19340 {
19341 if (flavour == neon_cvt_flavour_bf16_f32)
19342 {
19343 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH8) == FAIL)
19344 return;
19345 constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
19346 /* VCVT.bf16.f32. */
19347 inst.instruction = 0x11b60640;
19348 }
19349 else
19350 /* VCVT.f16.f32. */
19351 inst.instruction = 0x3b60600;
19352 }
19353 else
19354 /* VCVT.f32.f16. */
19355 inst.instruction = 0x3b60700;
19356
19357 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19358 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19359 inst.instruction |= LOW4 (inst.operands[1].reg);
19360 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19361 neon_dp_fixup (&inst);
19362 break;
19363
19364 default:
19365 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
19366 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
19367 do_vfp_nsyn_cvt (rs, flavour);
19368 else
19369 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
19370 }
19371 }
19372
19373 static void
19374 do_neon_cvtr (void)
19375 {
19376 do_neon_cvt_1 (neon_cvt_mode_x);
19377 }
19378
19379 static void
19380 do_neon_cvt (void)
19381 {
19382 do_neon_cvt_1 (neon_cvt_mode_z);
19383 }
19384
19385 static void
19386 do_neon_cvta (void)
19387 {
19388 do_neon_cvt_1 (neon_cvt_mode_a);
19389 }
19390
19391 static void
19392 do_neon_cvtn (void)
19393 {
19394 do_neon_cvt_1 (neon_cvt_mode_n);
19395 }
19396
19397 static void
19398 do_neon_cvtp (void)
19399 {
19400 do_neon_cvt_1 (neon_cvt_mode_p);
19401 }
19402
19403 static void
19404 do_neon_cvtm (void)
19405 {
19406 do_neon_cvt_1 (neon_cvt_mode_m);
19407 }
19408
19409 static void
19410 do_neon_cvttb_2 (bool t, bool to, bool is_double)
19411 {
19412 if (is_double)
19413 mark_feature_used (&fpu_vfp_ext_armv8);
19414
19415 encode_arm_vfp_reg (inst.operands[0].reg,
19416 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
19417 encode_arm_vfp_reg (inst.operands[1].reg,
19418 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
19419 inst.instruction |= to ? 0x10000 : 0;
19420 inst.instruction |= t ? 0x80 : 0;
19421 inst.instruction |= is_double ? 0x100 : 0;
19422 do_vfp_cond_or_thumb ();
19423 }
19424
19425 static void
19426 do_neon_cvttb_1 (bool t)
19427 {
19428 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
19429 NS_DF, NS_DH, NS_QQ, NS_QQI, NS_NULL);
19430
19431 if (rs == NS_NULL)
19432 return;
19433 else if (rs == NS_QQ || rs == NS_QQI)
19434 {
19435 int single_to_half = 0;
19436 if (!check_simd_pred_availability (true, NEON_CHECK_ARCH))
19437 return;
19438
19439 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
19440
19441 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
19442 && (flavour == neon_cvt_flavour_u16_f16
19443 || flavour == neon_cvt_flavour_s16_f16
19444 || flavour == neon_cvt_flavour_f16_s16
19445 || flavour == neon_cvt_flavour_f16_u16
19446 || flavour == neon_cvt_flavour_u32_f32
19447 || flavour == neon_cvt_flavour_s32_f32
19448 || flavour == neon_cvt_flavour_f32_s32
19449 || flavour == neon_cvt_flavour_f32_u32))
19450 {
19451 inst.cond = 0xf;
19452 inst.instruction = N_MNEM_vcvt;
19453 set_pred_insn_type (INSIDE_VPT_INSN);
19454 do_neon_cvt_1 (neon_cvt_mode_z);
19455 return;
19456 }
19457 else if (rs == NS_QQ && flavour == neon_cvt_flavour_f32_f16)
19458 single_to_half = 1;
19459 else if (rs == NS_QQ && flavour != neon_cvt_flavour_f16_f32)
19460 {
19461 first_error (BAD_FPU);
19462 return;
19463 }
19464
19465 inst.instruction = 0xee3f0e01;
19466 inst.instruction |= single_to_half << 28;
19467 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19468 inst.instruction |= LOW4 (inst.operands[0].reg) << 13;
19469 inst.instruction |= t << 12;
19470 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19471 inst.instruction |= LOW4 (inst.operands[1].reg) << 1;
19472 inst.is_neon = 1;
19473 }
19474 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
19475 {
19476 inst.error = NULL;
19477 do_neon_cvttb_2 (t, /*to=*/true, /*is_double=*/false);
19478 }
19479 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
19480 {
19481 inst.error = NULL;
19482 do_neon_cvttb_2 (t, /*to=*/false, /*is_double=*/false);
19483 }
19484 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
19485 {
19486 /* The VCVTB and VCVTT instructions with D-register operands
19487 don't work for SP only targets. */
19488 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
19489 _(BAD_FPU));
19490
19491 inst.error = NULL;
19492 do_neon_cvttb_2 (t, /*to=*/true, /*is_double=*/true);
19493 }
19494 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
19495 {
19496 /* The VCVTB and VCVTT instructions with D-register operands
19497 don't work for SP only targets. */
19498 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
19499 _(BAD_FPU));
19500
19501 inst.error = NULL;
19502 do_neon_cvttb_2 (t, /*to=*/false, /*is_double=*/true);
19503 }
19504 else if (neon_check_type (2, rs, N_BF16 | N_VFP, N_F32).type != NT_invtype)
19505 {
19506 constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
19507 inst.error = NULL;
19508 inst.instruction |= (1 << 8);
19509 inst.instruction &= ~(1 << 9);
19510 do_neon_cvttb_2 (t, /*to=*/true, /*is_double=*/false);
19511 }
19512 else
19513 return;
19514 }
19515
19516 static void
19517 do_neon_cvtb (void)
19518 {
19519 do_neon_cvttb_1 (false);
19520 }
19521
19522
19523 static void
19524 do_neon_cvtt (void)
19525 {
19526 do_neon_cvttb_1 (true);
19527 }
19528
19529 static void
19530 neon_move_immediate (void)
19531 {
19532 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
19533 struct neon_type_el et = neon_check_type (2, rs,
19534 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
19535 unsigned immlo, immhi = 0, immbits;
19536 int op, cmode, float_p;
19537
19538 constraint (et.type == NT_invtype,
19539 _("operand size must be specified for immediate VMOV"));
19540
19541 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
19542 op = (inst.instruction & (1 << 5)) != 0;
19543
19544 immlo = inst.operands[1].imm;
19545 if (inst.operands[1].regisimm)
19546 immhi = inst.operands[1].reg;
19547
19548 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
19549 _("immediate has bits set outside the operand size"));
19550
19551 float_p = inst.operands[1].immisfloat;
19552
19553 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
19554 et.size, et.type)) == FAIL)
19555 {
19556 /* Invert relevant bits only. */
19557 neon_invert_size (&immlo, &immhi, et.size);
19558 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
19559 with one or the other; those cases are caught by
19560 neon_cmode_for_move_imm. */
19561 op = !op;
19562 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
19563 &op, et.size, et.type)) == FAIL)
19564 {
19565 first_error (_("immediate out of range"));
19566 return;
19567 }
19568 }
19569
19570 inst.instruction &= ~(1 << 5);
19571 inst.instruction |= op << 5;
19572
19573 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19574 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19575 inst.instruction |= neon_quad (rs) << 6;
19576 inst.instruction |= cmode << 8;
19577
19578 neon_write_immbits (immbits);
19579 }
19580
19581 static void
19582 do_neon_mvn (void)
19583 {
19584 if (!check_simd_pred_availability (false, NEON_CHECK_CC | NEON_CHECK_ARCH))
19585 return;
19586
19587 if (inst.operands[1].isreg)
19588 {
19589 enum neon_shape rs;
19590 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19591 rs = neon_select_shape (NS_QQ, NS_NULL);
19592 else
19593 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
19594
19595 if (rs == NS_NULL)
19596 return;
19597
19598 NEON_ENCODE (INTEGER, inst);
19599 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19600 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19601 inst.instruction |= LOW4 (inst.operands[1].reg);
19602 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19603 inst.instruction |= neon_quad (rs) << 6;
19604 }
19605 else
19606 {
19607 NEON_ENCODE (IMMED, inst);
19608 neon_move_immediate ();
19609 }
19610
19611 neon_dp_fixup (&inst);
19612
19613 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19614 {
19615 constraint (!inst.operands[1].isreg && !inst.operands[0].isquad, BAD_FPU);
19616 }
19617 }
19618
19619 /* Encode instructions of form:
19620
19621 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
19622 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
19623
19624 static void
19625 neon_mixed_length (struct neon_type_el et, unsigned size)
19626 {
19627 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19628 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19629 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
19630 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
19631 inst.instruction |= LOW4 (inst.operands[2].reg);
19632 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
19633 inst.instruction |= (et.type == NT_unsigned) << 24;
19634 inst.instruction |= neon_logbits (size) << 20;
19635
19636 neon_dp_fixup (&inst);
19637 }
19638
19639 static void
19640 do_neon_dyadic_long (void)
19641 {
19642 enum neon_shape rs = neon_select_shape (NS_QDD, NS_HHH, NS_FFF, NS_DDD, NS_NULL);
19643 if (rs == NS_QDD)
19644 {
19645 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH | NEON_CHECK_CC) == FAIL)
19646 return;
19647
19648 NEON_ENCODE (INTEGER, inst);
19649 /* FIXME: Type checking for lengthening op. */
19650 struct neon_type_el et = neon_check_type (3, NS_QDD,
19651 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
19652 neon_mixed_length (et, et.size);
19653 }
19654 else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
19655 && (inst.cond == 0xf || inst.cond == 0x10))
19656 {
19657 /* If parsing for MVE, vaddl/vsubl/vabdl{e,t} can only be vadd/vsub/vabd
19658 in an IT block with le/lt conditions. */
19659
19660 if (inst.cond == 0xf)
19661 inst.cond = 0xb;
19662 else if (inst.cond == 0x10)
19663 inst.cond = 0xd;
19664
19665 inst.pred_insn_type = INSIDE_IT_INSN;
19666
19667 if (inst.instruction == N_MNEM_vaddl)
19668 {
19669 inst.instruction = N_MNEM_vadd;
19670 do_neon_addsub_if_i ();
19671 }
19672 else if (inst.instruction == N_MNEM_vsubl)
19673 {
19674 inst.instruction = N_MNEM_vsub;
19675 do_neon_addsub_if_i ();
19676 }
19677 else if (inst.instruction == N_MNEM_vabdl)
19678 {
19679 inst.instruction = N_MNEM_vabd;
19680 do_neon_dyadic_if_su ();
19681 }
19682 }
19683 else
19684 first_error (BAD_FPU);
19685 }
19686
19687 static void
19688 do_neon_abal (void)
19689 {
19690 struct neon_type_el et = neon_check_type (3, NS_QDD,
19691 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
19692 neon_mixed_length (et, et.size);
19693 }
19694
19695 static void
19696 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
19697 {
19698 if (inst.operands[2].isscalar)
19699 {
19700 struct neon_type_el et = neon_check_type (3, NS_QDS,
19701 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
19702 NEON_ENCODE (SCALAR, inst);
19703 neon_mul_mac (et, et.type == NT_unsigned);
19704 }
19705 else
19706 {
19707 struct neon_type_el et = neon_check_type (3, NS_QDD,
19708 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
19709 NEON_ENCODE (INTEGER, inst);
19710 neon_mixed_length (et, et.size);
19711 }
19712 }
19713
19714 static void
19715 do_neon_mac_maybe_scalar_long (void)
19716 {
19717 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
19718 }
19719
19720 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
19721 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
19722
19723 static unsigned
19724 neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
19725 {
19726 unsigned regno = NEON_SCALAR_REG (scalar);
19727 unsigned elno = NEON_SCALAR_INDEX (scalar);
19728
19729 if (quad_p)
19730 {
19731 if (regno > 7 || elno > 3)
19732 goto bad_scalar;
19733
19734 return ((regno & 0x7)
19735 | ((elno & 0x1) << 3)
19736 | (((elno >> 1) & 0x1) << 5));
19737 }
19738 else
19739 {
19740 if (regno > 15 || elno > 1)
19741 goto bad_scalar;
19742
19743 return (((regno & 0x1) << 5)
19744 | ((regno >> 1) & 0x7)
19745 | ((elno & 0x1) << 3));
19746 }
19747
19748 bad_scalar:
19749 first_error (_("scalar out of range for multiply instruction"));
19750 return 0;
19751 }
19752
19753 static void
19754 do_neon_fmac_maybe_scalar_long (int subtype)
19755 {
19756 enum neon_shape rs;
19757 int high8;
19758 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
19759 field (bits[21:20]) has different meaning. For scalar index variant, it's
19760 used to differentiate add and subtract, otherwise it's with fixed value
19761 0x2. */
19762 int size = -1;
19763
19764 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
19765 be a scalar index register. */
19766 if (inst.operands[2].isscalar)
19767 {
19768 high8 = 0xfe000000;
19769 if (subtype)
19770 size = 16;
19771 rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
19772 }
19773 else
19774 {
19775 high8 = 0xfc000000;
19776 size = 32;
19777 if (subtype)
19778 inst.instruction |= (0x1 << 23);
19779 rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
19780 }
19781
19782
19783 if (inst.cond != COND_ALWAYS)
19784 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
19785 "behaviour is UNPREDICTABLE"));
19786
19787 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
19788 _(BAD_FP16));
19789
19790 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
19791 _(BAD_FPU));
19792
19793 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
19794 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
19795 so we simply pass -1 as size. */
19796 unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
19797 neon_three_same (quad_p, 0, size);
19798
19799 /* Undo neon_dp_fixup. Redo the high eight bits. */
19800 inst.instruction &= 0x00ffffff;
19801 inst.instruction |= high8;
19802
19803 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
19804 whether the instruction is in Q form and whether Vm is a scalar indexed
19805 operand. */
19806 if (inst.operands[2].isscalar)
19807 {
19808 unsigned rm
19809 = neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
19810 inst.instruction &= 0xffffffd0;
19811 inst.instruction |= rm;
19812
19813 if (!quad_p)
19814 {
19815 /* Redo Rn as well. */
19816 inst.instruction &= 0xfff0ff7f;
19817 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
19818 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
19819 }
19820 }
19821 else if (!quad_p)
19822 {
19823 /* Redo Rn and Rm. */
19824 inst.instruction &= 0xfff0ff50;
19825 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
19826 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
19827 inst.instruction |= HI4 (inst.operands[2].reg);
19828 inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
19829 }
19830 }
19831
19832 static void
19833 do_neon_vfmal (void)
19834 {
19835 return do_neon_fmac_maybe_scalar_long (0);
19836 }
19837
19838 static void
19839 do_neon_vfmsl (void)
19840 {
19841 return do_neon_fmac_maybe_scalar_long (1);
19842 }
19843
19844 static void
19845 do_neon_dyadic_wide (void)
19846 {
19847 struct neon_type_el et = neon_check_type (3, NS_QQD,
19848 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
19849 neon_mixed_length (et, et.size);
19850 }
19851
19852 static void
19853 do_neon_dyadic_narrow (void)
19854 {
19855 struct neon_type_el et = neon_check_type (3, NS_QDD,
19856 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
19857 /* Operand sign is unimportant, and the U bit is part of the opcode,
19858 so force the operand type to integer. */
19859 et.type = NT_integer;
19860 neon_mixed_length (et, et.size / 2);
19861 }
19862
19863 static void
19864 do_neon_mul_sat_scalar_long (void)
19865 {
19866 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
19867 }
19868
19869 static void
19870 do_neon_vmull (void)
19871 {
19872 if (inst.operands[2].isscalar)
19873 do_neon_mac_maybe_scalar_long ();
19874 else
19875 {
19876 struct neon_type_el et = neon_check_type (3, NS_QDD,
19877 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
19878
19879 if (et.type == NT_poly)
19880 NEON_ENCODE (POLY, inst);
19881 else
19882 NEON_ENCODE (INTEGER, inst);
19883
19884 /* For polynomial encoding the U bit must be zero, and the size must
19885 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
19886 obviously, as 0b10). */
19887 if (et.size == 64)
19888 {
19889 /* Check we're on the correct architecture. */
19890 if (!mark_feature_used (&fpu_crypto_ext_armv8))
19891 inst.error =
19892 _("Instruction form not available on this architecture.");
19893
19894 et.size = 32;
19895 }
19896
19897 neon_mixed_length (et, et.size);
19898 }
19899 }
19900
19901 static void
19902 do_neon_ext (void)
19903 {
19904 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
19905 struct neon_type_el et = neon_check_type (3, rs,
19906 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
19907 unsigned imm = (inst.operands[3].imm * et.size) / 8;
19908
19909 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
19910 _("shift out of range"));
19911 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19912 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19913 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
19914 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
19915 inst.instruction |= LOW4 (inst.operands[2].reg);
19916 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
19917 inst.instruction |= neon_quad (rs) << 6;
19918 inst.instruction |= imm << 8;
19919
19920 neon_dp_fixup (&inst);
19921 }
19922
19923 static void
19924 do_neon_rev (void)
19925 {
19926 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
19927 return;
19928
19929 enum neon_shape rs;
19930 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19931 rs = neon_select_shape (NS_QQ, NS_NULL);
19932 else
19933 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
19934
19935 struct neon_type_el et = neon_check_type (2, rs,
19936 N_EQK, N_8 | N_16 | N_32 | N_KEY);
19937
19938 unsigned op = (inst.instruction >> 7) & 3;
19939 /* N (width of reversed regions) is encoded as part of the bitmask. We
19940 extract it here to check the elements to be reversed are smaller.
19941 Otherwise we'd get a reserved instruction. */
19942 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
19943
19944 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext) && elsize == 64
19945 && inst.operands[0].reg == inst.operands[1].reg)
19946 as_tsktsk (_("Warning: 64-bit element size and same destination and source"
19947 " operands makes instruction UNPREDICTABLE"));
19948
19949 gas_assert (elsize != 0);
19950 constraint (et.size >= elsize,
19951 _("elements must be smaller than reversal region"));
19952 neon_two_same (neon_quad (rs), 1, et.size);
19953 }
19954
19955 static void
19956 do_neon_dup (void)
19957 {
19958 if (inst.operands[1].isscalar)
19959 {
19960 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1),
19961 BAD_FPU);
19962 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
19963 struct neon_type_el et = neon_check_type (2, rs,
19964 N_EQK, N_8 | N_16 | N_32 | N_KEY);
19965 unsigned sizebits = et.size >> 3;
19966 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
19967 int logsize = neon_logbits (et.size);
19968 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
19969
19970 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
19971 return;
19972
19973 NEON_ENCODE (SCALAR, inst);
19974 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19975 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19976 inst.instruction |= LOW4 (dm);
19977 inst.instruction |= HI1 (dm) << 5;
19978 inst.instruction |= neon_quad (rs) << 6;
19979 inst.instruction |= x << 17;
19980 inst.instruction |= sizebits << 16;
19981
19982 neon_dp_fixup (&inst);
19983 }
19984 else
19985 {
19986 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
19987 struct neon_type_el et = neon_check_type (2, rs,
19988 N_8 | N_16 | N_32 | N_KEY, N_EQK);
19989 if (rs == NS_QR)
19990 {
19991 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH))
19992 return;
19993 }
19994 else
19995 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1),
19996 BAD_FPU);
19997
19998 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19999 {
20000 if (inst.operands[1].reg == REG_SP)
20001 as_tsktsk (MVE_BAD_SP);
20002 else if (inst.operands[1].reg == REG_PC)
20003 as_tsktsk (MVE_BAD_PC);
20004 }
20005
20006 /* Duplicate ARM register to lanes of vector. */
20007 NEON_ENCODE (ARMREG, inst);
20008 switch (et.size)
20009 {
20010 case 8: inst.instruction |= 0x400000; break;
20011 case 16: inst.instruction |= 0x000020; break;
20012 case 32: inst.instruction |= 0x000000; break;
20013 default: break;
20014 }
20015 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
20016 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
20017 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
20018 inst.instruction |= neon_quad (rs) << 21;
20019 /* The encoding for this instruction is identical for the ARM and Thumb
20020 variants, except for the condition field. */
20021 do_vfp_cond_or_thumb ();
20022 }
20023 }
20024
20025 static void
20026 do_mve_mov (int toQ)
20027 {
20028 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20029 return;
20030 if (inst.cond > COND_ALWAYS)
20031 inst.pred_insn_type = MVE_UNPREDICABLE_INSN;
20032
20033 unsigned Rt = 0, Rt2 = 1, Q0 = 2, Q1 = 3;
20034 if (toQ)
20035 {
20036 Q0 = 0;
20037 Q1 = 1;
20038 Rt = 2;
20039 Rt2 = 3;
20040 }
20041
20042 constraint (inst.operands[Q0].reg != inst.operands[Q1].reg + 2,
20043 _("Index one must be [2,3] and index two must be two less than"
20044 " index one."));
20045 constraint (!toQ && inst.operands[Rt].reg == inst.operands[Rt2].reg,
20046 _("Destination registers may not be the same"));
20047 constraint (inst.operands[Rt].reg == REG_SP
20048 || inst.operands[Rt2].reg == REG_SP,
20049 BAD_SP);
20050 constraint (inst.operands[Rt].reg == REG_PC
20051 || inst.operands[Rt2].reg == REG_PC,
20052 BAD_PC);
20053
20054 inst.instruction = 0xec000f00;
20055 inst.instruction |= HI1 (inst.operands[Q1].reg / 32) << 23;
20056 inst.instruction |= !!toQ << 20;
20057 inst.instruction |= inst.operands[Rt2].reg << 16;
20058 inst.instruction |= LOW4 (inst.operands[Q1].reg / 32) << 13;
20059 inst.instruction |= (inst.operands[Q1].reg % 4) << 4;
20060 inst.instruction |= inst.operands[Rt].reg;
20061 }
20062
20063 static void
20064 do_mve_movn (void)
20065 {
20066 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20067 return;
20068
20069 if (inst.cond > COND_ALWAYS)
20070 inst.pred_insn_type = INSIDE_VPT_INSN;
20071 else
20072 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
20073
20074 struct neon_type_el et = neon_check_type (2, NS_QQ, N_EQK, N_I16 | N_I32
20075 | N_KEY);
20076
20077 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20078 inst.instruction |= (neon_logbits (et.size) - 1) << 18;
20079 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20080 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20081 inst.instruction |= LOW4 (inst.operands[1].reg);
20082 inst.is_neon = 1;
20083
20084 }
20085
20086 /* VMOV has particularly many variations. It can be one of:
20087 0. VMOV<c><q> <Qd>, <Qm>
20088 1. VMOV<c><q> <Dd>, <Dm>
20089 (Register operations, which are VORR with Rm = Rn.)
20090 2. VMOV<c><q>.<dt> <Qd>, #<imm>
20091 3. VMOV<c><q>.<dt> <Dd>, #<imm>
20092 (Immediate loads.)
20093 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
20094 (ARM register to scalar.)
20095 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
20096 (Two ARM registers to vector.)
20097 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
20098 (Scalar to ARM register.)
20099 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
20100 (Vector to two ARM registers.)
20101 8. VMOV.F32 <Sd>, <Sm>
20102 9. VMOV.F64 <Dd>, <Dm>
20103 (VFP register moves.)
20104 10. VMOV.F32 <Sd>, #imm
20105 11. VMOV.F64 <Dd>, #imm
20106 (VFP float immediate load.)
20107 12. VMOV <Rd>, <Sm>
20108 (VFP single to ARM reg.)
20109 13. VMOV <Sd>, <Rm>
20110 (ARM reg to VFP single.)
20111 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
20112 (Two ARM regs to two VFP singles.)
20113 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
20114 (Two VFP singles to two ARM regs.)
20115 16. VMOV<c> <Rt>, <Rt2>, <Qd[idx]>, <Qd[idx2]>
20116 17. VMOV<c> <Qd[idx]>, <Qd[idx2]>, <Rt>, <Rt2>
20117 18. VMOV<c>.<dt> <Rt>, <Qn[idx]>
20118 19. VMOV<c>.<dt> <Qd[idx]>, <Rt>
20119
20120 These cases can be disambiguated using neon_select_shape, except cases 1/9
20121 and 3/11 which depend on the operand type too.
20122
20123 All the encoded bits are hardcoded by this function.
20124
20125 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
20126 Cases 5, 7 may be used with VFPv2 and above.
20127
20128 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
20129 can specify a type where it doesn't make sense to, and is ignored). */
20130
20131 static void
20132 do_neon_mov (void)
20133 {
20134 enum neon_shape rs = neon_select_shape (NS_RRSS, NS_SSRR, NS_RRFF, NS_FFRR,
20135 NS_DRR, NS_RRD, NS_QQ, NS_DD, NS_QI,
20136 NS_DI, NS_SR, NS_RS, NS_FF, NS_FI,
20137 NS_RF, NS_FR, NS_HR, NS_RH, NS_HI,
20138 NS_NULL);
20139 struct neon_type_el et;
20140 const char *ldconst = 0;
20141
20142 switch (rs)
20143 {
20144 case NS_DD: /* case 1/9. */
20145 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
20146 /* It is not an error here if no type is given. */
20147 inst.error = NULL;
20148
20149 /* In MVE we interpret the following instructions as same, so ignoring
20150 the following type (float) and size (64) checks.
20151 a: VMOV<c><q> <Dd>, <Dm>
20152 b: VMOV<c><q>.F64 <Dd>, <Dm>. */
20153 if ((et.type == NT_float && et.size == 64)
20154 || (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)))
20155 {
20156 do_vfp_nsyn_opcode ("fcpyd");
20157 break;
20158 }
20159 /* fall through. */
20160
20161 case NS_QQ: /* case 0/1. */
20162 {
20163 if (!check_simd_pred_availability (false,
20164 NEON_CHECK_CC | NEON_CHECK_ARCH))
20165 return;
20166 /* The architecture manual I have doesn't explicitly state which
20167 value the U bit should have for register->register moves, but
20168 the equivalent VORR instruction has U = 0, so do that. */
20169 inst.instruction = 0x0200110;
20170 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20171 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20172 inst.instruction |= LOW4 (inst.operands[1].reg);
20173 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20174 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
20175 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
20176 inst.instruction |= neon_quad (rs) << 6;
20177
20178 neon_dp_fixup (&inst);
20179 }
20180 break;
20181
20182 case NS_DI: /* case 3/11. */
20183 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
20184 inst.error = NULL;
20185 if (et.type == NT_float && et.size == 64)
20186 {
20187 /* case 11 (fconstd). */
20188 ldconst = "fconstd";
20189 goto encode_fconstd;
20190 }
20191 /* fall through. */
20192
20193 case NS_QI: /* case 2/3. */
20194 if (!check_simd_pred_availability (false,
20195 NEON_CHECK_CC | NEON_CHECK_ARCH))
20196 return;
20197 inst.instruction = 0x0800010;
20198 neon_move_immediate ();
20199 neon_dp_fixup (&inst);
20200 break;
20201
20202 case NS_SR: /* case 4. */
20203 {
20204 unsigned bcdebits = 0;
20205 int logsize;
20206 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
20207 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
20208
20209 /* .<size> is optional here, defaulting to .32. */
20210 if (inst.vectype.elems == 0
20211 && inst.operands[0].vectype.type == NT_invtype
20212 && inst.operands[1].vectype.type == NT_invtype)
20213 {
20214 inst.vectype.el[0].type = NT_untyped;
20215 inst.vectype.el[0].size = 32;
20216 inst.vectype.elems = 1;
20217 }
20218
20219 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
20220 logsize = neon_logbits (et.size);
20221
20222 if (et.size != 32)
20223 {
20224 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
20225 && vfp_or_neon_is_neon (NEON_CHECK_ARCH) == FAIL)
20226 return;
20227 }
20228 else
20229 {
20230 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
20231 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20232 _(BAD_FPU));
20233 }
20234
20235 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20236 {
20237 if (inst.operands[1].reg == REG_SP)
20238 as_tsktsk (MVE_BAD_SP);
20239 else if (inst.operands[1].reg == REG_PC)
20240 as_tsktsk (MVE_BAD_PC);
20241 }
20242 unsigned size = inst.operands[0].isscalar == 1 ? 64 : 128;
20243
20244 constraint (et.type == NT_invtype, _("bad type for scalar"));
20245 constraint (x >= size / et.size, _("scalar index out of range"));
20246
20247
20248 switch (et.size)
20249 {
20250 case 8: bcdebits = 0x8; break;
20251 case 16: bcdebits = 0x1; break;
20252 case 32: bcdebits = 0x0; break;
20253 default: ;
20254 }
20255
20256 bcdebits |= (x & ((1 << (3-logsize)) - 1)) << logsize;
20257
20258 inst.instruction = 0xe000b10;
20259 do_vfp_cond_or_thumb ();
20260 inst.instruction |= LOW4 (dn) << 16;
20261 inst.instruction |= HI1 (dn) << 7;
20262 inst.instruction |= inst.operands[1].reg << 12;
20263 inst.instruction |= (bcdebits & 3) << 5;
20264 inst.instruction |= ((bcdebits >> 2) & 3) << 21;
20265 inst.instruction |= (x >> (3-logsize)) << 16;
20266 }
20267 break;
20268
20269 case NS_DRR: /* case 5 (fmdrr). */
20270 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20271 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20272 _(BAD_FPU));
20273
20274 inst.instruction = 0xc400b10;
20275 do_vfp_cond_or_thumb ();
20276 inst.instruction |= LOW4 (inst.operands[0].reg);
20277 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
20278 inst.instruction |= inst.operands[1].reg << 12;
20279 inst.instruction |= inst.operands[2].reg << 16;
20280 break;
20281
20282 case NS_RS: /* case 6. */
20283 {
20284 unsigned logsize;
20285 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
20286 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
20287 unsigned abcdebits = 0;
20288
20289 /* .<dt> is optional here, defaulting to .32. */
20290 if (inst.vectype.elems == 0
20291 && inst.operands[0].vectype.type == NT_invtype
20292 && inst.operands[1].vectype.type == NT_invtype)
20293 {
20294 inst.vectype.el[0].type = NT_untyped;
20295 inst.vectype.el[0].size = 32;
20296 inst.vectype.elems = 1;
20297 }
20298
20299 et = neon_check_type (2, NS_NULL,
20300 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
20301 logsize = neon_logbits (et.size);
20302
20303 if (et.size != 32)
20304 {
20305 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
20306 && vfp_or_neon_is_neon (NEON_CHECK_CC
20307 | NEON_CHECK_ARCH) == FAIL)
20308 return;
20309 }
20310 else
20311 {
20312 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
20313 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20314 _(BAD_FPU));
20315 }
20316
20317 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20318 {
20319 if (inst.operands[0].reg == REG_SP)
20320 as_tsktsk (MVE_BAD_SP);
20321 else if (inst.operands[0].reg == REG_PC)
20322 as_tsktsk (MVE_BAD_PC);
20323 }
20324
20325 unsigned size = inst.operands[1].isscalar == 1 ? 64 : 128;
20326
20327 constraint (et.type == NT_invtype, _("bad type for scalar"));
20328 constraint (x >= size / et.size, _("scalar index out of range"));
20329
20330 switch (et.size)
20331 {
20332 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
20333 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
20334 case 32: abcdebits = 0x00; break;
20335 default: ;
20336 }
20337
20338 abcdebits |= (x & ((1 << (3-logsize)) - 1)) << logsize;
20339 inst.instruction = 0xe100b10;
20340 do_vfp_cond_or_thumb ();
20341 inst.instruction |= LOW4 (dn) << 16;
20342 inst.instruction |= HI1 (dn) << 7;
20343 inst.instruction |= inst.operands[0].reg << 12;
20344 inst.instruction |= (abcdebits & 3) << 5;
20345 inst.instruction |= (abcdebits >> 2) << 21;
20346 inst.instruction |= (x >> (3-logsize)) << 16;
20347 }
20348 break;
20349
20350 case NS_RRD: /* case 7 (fmrrd). */
20351 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20352 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20353 _(BAD_FPU));
20354
20355 inst.instruction = 0xc500b10;
20356 do_vfp_cond_or_thumb ();
20357 inst.instruction |= inst.operands[0].reg << 12;
20358 inst.instruction |= inst.operands[1].reg << 16;
20359 inst.instruction |= LOW4 (inst.operands[2].reg);
20360 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
20361 break;
20362
20363 case NS_FF: /* case 8 (fcpys). */
20364 do_vfp_nsyn_opcode ("fcpys");
20365 break;
20366
20367 case NS_HI:
20368 case NS_FI: /* case 10 (fconsts). */
20369 ldconst = "fconsts";
20370 encode_fconstd:
20371 if (!inst.operands[1].immisfloat)
20372 {
20373 unsigned new_imm;
20374 /* Immediate has to fit in 8 bits so float is enough. */
20375 float imm = (float) inst.operands[1].imm;
20376 memcpy (&new_imm, &imm, sizeof (float));
20377 /* But the assembly may have been written to provide an integer
20378 bit pattern that equates to a float, so check that the
20379 conversion has worked. */
20380 if (is_quarter_float (new_imm))
20381 {
20382 if (is_quarter_float (inst.operands[1].imm))
20383 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
20384
20385 inst.operands[1].imm = new_imm;
20386 inst.operands[1].immisfloat = 1;
20387 }
20388 }
20389
20390 if (is_quarter_float (inst.operands[1].imm))
20391 {
20392 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
20393 do_vfp_nsyn_opcode (ldconst);
20394
20395 /* ARMv8.2 fp16 vmov.f16 instruction. */
20396 if (rs == NS_HI)
20397 do_scalar_fp16_v82_encode ();
20398 }
20399 else
20400 first_error (_("immediate out of range"));
20401 break;
20402
20403 case NS_RH:
20404 case NS_RF: /* case 12 (fmrs). */
20405 do_vfp_nsyn_opcode ("fmrs");
20406 /* ARMv8.2 fp16 vmov.f16 instruction. */
20407 if (rs == NS_RH)
20408 do_scalar_fp16_v82_encode ();
20409 break;
20410
20411 case NS_HR:
20412 case NS_FR: /* case 13 (fmsr). */
20413 do_vfp_nsyn_opcode ("fmsr");
20414 /* ARMv8.2 fp16 vmov.f16 instruction. */
20415 if (rs == NS_HR)
20416 do_scalar_fp16_v82_encode ();
20417 break;
20418
20419 case NS_RRSS:
20420 do_mve_mov (0);
20421 break;
20422 case NS_SSRR:
20423 do_mve_mov (1);
20424 break;
20425
20426 /* The encoders for the fmrrs and fmsrr instructions expect three operands
20427 (one of which is a list), but we have parsed four. Do some fiddling to
20428 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
20429 expect. */
20430 case NS_RRFF: /* case 14 (fmrrs). */
20431 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20432 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20433 _(BAD_FPU));
20434 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
20435 _("VFP registers must be adjacent"));
20436 inst.operands[2].imm = 2;
20437 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
20438 do_vfp_nsyn_opcode ("fmrrs");
20439 break;
20440
20441 case NS_FFRR: /* case 15 (fmsrr). */
20442 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20443 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20444 _(BAD_FPU));
20445 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
20446 _("VFP registers must be adjacent"));
20447 inst.operands[1] = inst.operands[2];
20448 inst.operands[2] = inst.operands[3];
20449 inst.operands[0].imm = 2;
20450 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
20451 do_vfp_nsyn_opcode ("fmsrr");
20452 break;
20453
20454 case NS_NULL:
20455 /* neon_select_shape has determined that the instruction
20456 shape is wrong and has already set the error message. */
20457 break;
20458
20459 default:
20460 abort ();
20461 }
20462 }
20463
20464 static void
20465 do_mve_movl (void)
20466 {
20467 if (!(inst.operands[0].present && inst.operands[0].isquad
20468 && inst.operands[1].present && inst.operands[1].isquad
20469 && !inst.operands[2].present))
20470 {
20471 inst.instruction = 0;
20472 inst.cond = 0xb;
20473 if (thumb_mode)
20474 set_pred_insn_type (INSIDE_IT_INSN);
20475 do_neon_mov ();
20476 return;
20477 }
20478
20479 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20480 return;
20481
20482 if (inst.cond != COND_ALWAYS)
20483 inst.pred_insn_type = INSIDE_VPT_INSN;
20484
20485 struct neon_type_el et = neon_check_type (2, NS_QQ, N_EQK, N_S8 | N_U8
20486 | N_S16 | N_U16 | N_KEY);
20487
20488 inst.instruction |= (et.type == NT_unsigned) << 28;
20489 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20490 inst.instruction |= (neon_logbits (et.size) + 1) << 19;
20491 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20492 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20493 inst.instruction |= LOW4 (inst.operands[1].reg);
20494 inst.is_neon = 1;
20495 }
20496
20497 static void
20498 do_neon_rshift_round_imm (void)
20499 {
20500 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
20501 return;
20502
20503 enum neon_shape rs;
20504 struct neon_type_el et;
20505
20506 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20507 {
20508 rs = neon_select_shape (NS_QQI, NS_NULL);
20509 et = neon_check_type (2, rs, N_EQK, N_SU_MVE | N_KEY);
20510 }
20511 else
20512 {
20513 rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
20514 et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
20515 }
20516 int imm = inst.operands[2].imm;
20517
20518 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
20519 if (imm == 0)
20520 {
20521 inst.operands[2].present = 0;
20522 do_neon_mov ();
20523 return;
20524 }
20525
20526 constraint (imm < 1 || (unsigned)imm > et.size,
20527 _("immediate out of range for shift"));
20528 neon_imm_shift (true, et.type == NT_unsigned, neon_quad (rs), et,
20529 et.size - imm);
20530 }
20531
20532 static void
20533 do_neon_movhf (void)
20534 {
20535 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
20536 constraint (rs != NS_HH, _("invalid suffix"));
20537
20538 if (inst.cond != COND_ALWAYS)
20539 {
20540 if (thumb_mode)
20541 {
20542 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
20543 " the behaviour is UNPREDICTABLE"));
20544 }
20545 else
20546 {
20547 inst.error = BAD_COND;
20548 return;
20549 }
20550 }
20551
20552 do_vfp_sp_monadic ();
20553
20554 inst.is_neon = 1;
20555 inst.instruction |= 0xf0000000;
20556 }
20557
20558 static void
20559 do_neon_movl (void)
20560 {
20561 struct neon_type_el et = neon_check_type (2, NS_QD,
20562 N_EQK | N_DBL, N_SU_32 | N_KEY);
20563 unsigned sizebits = et.size >> 3;
20564 inst.instruction |= sizebits << 19;
20565 neon_two_same (0, et.type == NT_unsigned, -1);
20566 }
20567
20568 static void
20569 do_neon_trn (void)
20570 {
20571 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20572 struct neon_type_el et = neon_check_type (2, rs,
20573 N_EQK, N_8 | N_16 | N_32 | N_KEY);
20574 NEON_ENCODE (INTEGER, inst);
20575 neon_two_same (neon_quad (rs), 1, et.size);
20576 }
20577
20578 static void
20579 do_neon_zip_uzp (void)
20580 {
20581 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20582 struct neon_type_el et = neon_check_type (2, rs,
20583 N_EQK, N_8 | N_16 | N_32 | N_KEY);
20584 if (rs == NS_DD && et.size == 32)
20585 {
20586 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
20587 inst.instruction = N_MNEM_vtrn;
20588 do_neon_trn ();
20589 return;
20590 }
20591 neon_two_same (neon_quad (rs), 1, et.size);
20592 }
20593
20594 static void
20595 do_neon_sat_abs_neg (void)
20596 {
20597 if (!check_simd_pred_availability (false, NEON_CHECK_CC | NEON_CHECK_ARCH))
20598 return;
20599
20600 enum neon_shape rs;
20601 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20602 rs = neon_select_shape (NS_QQ, NS_NULL);
20603 else
20604 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20605 struct neon_type_el et = neon_check_type (2, rs,
20606 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
20607 neon_two_same (neon_quad (rs), 1, et.size);
20608 }
20609
20610 static void
20611 do_neon_pair_long (void)
20612 {
20613 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20614 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
20615 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
20616 inst.instruction |= (et.type == NT_unsigned) << 7;
20617 neon_two_same (neon_quad (rs), 1, et.size);
20618 }
20619
20620 static void
20621 do_neon_recip_est (void)
20622 {
20623 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20624 struct neon_type_el et = neon_check_type (2, rs,
20625 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
20626 inst.instruction |= (et.type == NT_float) << 8;
20627 neon_two_same (neon_quad (rs), 1, et.size);
20628 }
20629
20630 static void
20631 do_neon_cls (void)
20632 {
20633 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
20634 return;
20635
20636 enum neon_shape rs;
20637 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20638 rs = neon_select_shape (NS_QQ, NS_NULL);
20639 else
20640 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20641
20642 struct neon_type_el et = neon_check_type (2, rs,
20643 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
20644 neon_two_same (neon_quad (rs), 1, et.size);
20645 }
20646
20647 static void
20648 do_neon_clz (void)
20649 {
20650 if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
20651 return;
20652
20653 enum neon_shape rs;
20654 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20655 rs = neon_select_shape (NS_QQ, NS_NULL);
20656 else
20657 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20658
20659 struct neon_type_el et = neon_check_type (2, rs,
20660 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
20661 neon_two_same (neon_quad (rs), 1, et.size);
20662 }
20663
20664 static void
20665 do_neon_cnt (void)
20666 {
20667 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20668 struct neon_type_el et = neon_check_type (2, rs,
20669 N_EQK | N_INT, N_8 | N_KEY);
20670 neon_two_same (neon_quad (rs), 1, et.size);
20671 }
20672
20673 static void
20674 do_neon_swp (void)
20675 {
20676 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20677 if (rs == NS_NULL)
20678 return;
20679 neon_two_same (neon_quad (rs), 1, -1);
20680 }
20681
20682 static void
20683 do_neon_tbl_tbx (void)
20684 {
20685 unsigned listlenbits;
20686 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
20687
20688 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
20689 {
20690 first_error (_("bad list length for table lookup"));
20691 return;
20692 }
20693
20694 listlenbits = inst.operands[1].imm - 1;
20695 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20696 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20697 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
20698 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
20699 inst.instruction |= LOW4 (inst.operands[2].reg);
20700 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
20701 inst.instruction |= listlenbits << 8;
20702
20703 neon_dp_fixup (&inst);
20704 }
20705
20706 static void
20707 do_neon_ldm_stm (void)
20708 {
20709 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
20710 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20711 _(BAD_FPU));
20712 /* P, U and L bits are part of bitmask. */
20713 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
20714 unsigned offsetbits = inst.operands[1].imm * 2;
20715
20716 if (inst.operands[1].issingle)
20717 {
20718 do_vfp_nsyn_ldm_stm (is_dbmode);
20719 return;
20720 }
20721
20722 constraint (is_dbmode && !inst.operands[0].writeback,
20723 _("writeback (!) must be used for VLDMDB and VSTMDB"));
20724
20725 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20726 _("register list must contain at least 1 and at most 16 "
20727 "registers"));
20728
20729 inst.instruction |= inst.operands[0].reg << 16;
20730 inst.instruction |= inst.operands[0].writeback << 21;
20731 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
20732 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
20733
20734 inst.instruction |= offsetbits;
20735
20736 do_vfp_cond_or_thumb ();
20737 }
20738
20739 static void
20740 do_vfp_nsyn_push_pop_check (void)
20741 {
20742 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd), _(BAD_FPU));
20743
20744 if (inst.operands[1].issingle)
20745 {
20746 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 32,
20747 _("register list must contain at least 1 and at most 32 registers"));
20748 }
20749 else
20750 {
20751 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20752 _("register list must contain at least 1 and at most 16 registers"));
20753 }
20754 }
20755
20756 static void
20757 do_vfp_nsyn_pop (void)
20758 {
20759 nsyn_insert_sp ();
20760
20761 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20762 return do_vfp_nsyn_opcode ("vldm");
20763
20764 do_vfp_nsyn_push_pop_check ();
20765
20766 if (inst.operands[1].issingle)
20767 do_vfp_nsyn_opcode ("fldmias");
20768 else
20769 do_vfp_nsyn_opcode ("fldmiad");
20770 }
20771
20772 static void
20773 do_vfp_nsyn_push (void)
20774 {
20775 nsyn_insert_sp ();
20776
20777 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20778 return do_vfp_nsyn_opcode ("vstmdb");
20779
20780 do_vfp_nsyn_push_pop_check ();
20781
20782 if (inst.operands[1].issingle)
20783 do_vfp_nsyn_opcode ("fstmdbs");
20784 else
20785 do_vfp_nsyn_opcode ("fstmdbd");
20786 }
20787
20788 static void
20789 do_neon_ldr_str (void)
20790 {
20791 int is_ldr = (inst.instruction & (1 << 20)) != 0;
20792
20793 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
20794 And is UNPREDICTABLE in thumb mode. */
20795 if (!is_ldr
20796 && inst.operands[1].reg == REG_PC
20797 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
20798 {
20799 if (thumb_mode)
20800 inst.error = _("Use of PC here is UNPREDICTABLE");
20801 else if (warn_on_deprecated)
20802 as_tsktsk (_("Use of PC here is deprecated"));
20803 }
20804
20805 if (inst.operands[0].issingle)
20806 {
20807 if (is_ldr)
20808 do_vfp_nsyn_opcode ("flds");
20809 else
20810 do_vfp_nsyn_opcode ("fsts");
20811
20812 /* ARMv8.2 vldr.16/vstr.16 instruction. */
20813 if (inst.vectype.el[0].size == 16)
20814 do_scalar_fp16_v82_encode ();
20815 }
20816 else
20817 {
20818 if (is_ldr)
20819 do_vfp_nsyn_opcode ("fldd");
20820 else
20821 do_vfp_nsyn_opcode ("fstd");
20822 }
20823 }
20824
20825 static void
20826 do_t_vldr_vstr_sysreg (void)
20827 {
20828 int fp_vldr_bitno = 20, sysreg_vldr_bitno = 20;
20829 bool is_vldr = ((inst.instruction & (1 << fp_vldr_bitno)) != 0);
20830
20831 /* Use of PC is UNPREDICTABLE. */
20832 if (inst.operands[1].reg == REG_PC)
20833 inst.error = _("Use of PC here is UNPREDICTABLE");
20834
20835 if (inst.operands[1].immisreg)
20836 inst.error = _("instruction does not accept register index");
20837
20838 if (!inst.operands[1].isreg)
20839 inst.error = _("instruction does not accept PC-relative addressing");
20840
20841 if (abs (inst.operands[1].imm) >= (1 << 7))
20842 inst.error = _("immediate value out of range");
20843
20844 inst.instruction = 0xec000f80;
20845 if (is_vldr)
20846 inst.instruction |= 1 << sysreg_vldr_bitno;
20847 encode_arm_cp_address (1, true, false, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM);
20848 inst.instruction |= (inst.operands[0].imm & 0x7) << 13;
20849 inst.instruction |= (inst.operands[0].imm & 0x8) << 19;
20850 }
20851
20852 static void
20853 do_vldr_vstr (void)
20854 {
20855 bool sysreg_op = !inst.operands[0].isreg;
20856
20857 /* VLDR/VSTR (System Register). */
20858 if (sysreg_op)
20859 {
20860 if (!mark_feature_used (&arm_ext_v8_1m_main))
20861 as_bad (_("Instruction not permitted on this architecture"));
20862
20863 do_t_vldr_vstr_sysreg ();
20864 }
20865 /* VLDR/VSTR. */
20866 else
20867 {
20868 if (!mark_feature_used (&fpu_vfp_ext_v1xd)
20869 && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20870 as_bad (_("Instruction not permitted on this architecture"));
20871 do_neon_ldr_str ();
20872 }
20873 }
20874
20875 /* "interleave" version also handles non-interleaving register VLD1/VST1
20876 instructions. */
20877
20878 static void
20879 do_neon_ld_st_interleave (void)
20880 {
20881 struct neon_type_el et = neon_check_type (1, NS_NULL,
20882 N_8 | N_16 | N_32 | N_64);
20883 unsigned alignbits = 0;
20884 unsigned idx;
20885 /* The bits in this table go:
20886 0: register stride of one (0) or two (1)
20887 1,2: register list length, minus one (1, 2, 3, 4).
20888 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
20889 We use -1 for invalid entries. */
20890 const int typetable[] =
20891 {
20892 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
20893 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
20894 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
20895 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
20896 };
20897 int typebits;
20898
20899 if (et.type == NT_invtype)
20900 return;
20901
20902 if (inst.operands[1].immisalign)
20903 switch (inst.operands[1].imm >> 8)
20904 {
20905 case 64: alignbits = 1; break;
20906 case 128:
20907 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
20908 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
20909 goto bad_alignment;
20910 alignbits = 2;
20911 break;
20912 case 256:
20913 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
20914 goto bad_alignment;
20915 alignbits = 3;
20916 break;
20917 default:
20918 bad_alignment:
20919 first_error (_("bad alignment"));
20920 return;
20921 }
20922
20923 inst.instruction |= alignbits << 4;
20924 inst.instruction |= neon_logbits (et.size) << 6;
20925
20926 /* Bits [4:6] of the immediate in a list specifier encode register stride
20927 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
20928 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
20929 up the right value for "type" in a table based on this value and the given
20930 list style, then stick it back. */
20931 idx = ((inst.operands[0].imm >> 4) & 7)
20932 | (((inst.instruction >> 8) & 3) << 3);
20933
20934 typebits = typetable[idx];
20935
20936 constraint (typebits == -1, _("bad list type for instruction"));
20937 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
20938 BAD_EL_TYPE);
20939
20940 inst.instruction &= ~0xf00;
20941 inst.instruction |= typebits << 8;
20942 }
20943
20944 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
20945 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
20946 otherwise. The variable arguments are a list of pairs of legal (size, align)
20947 values, terminated with -1. */
20948
20949 static int
20950 neon_alignment_bit (int size, int align, int *do_alignment, ...)
20951 {
20952 va_list ap;
20953 int result = FAIL, thissize, thisalign;
20954
20955 if (!inst.operands[1].immisalign)
20956 {
20957 *do_alignment = 0;
20958 return SUCCESS;
20959 }
20960
20961 va_start (ap, do_alignment);
20962
20963 do
20964 {
20965 thissize = va_arg (ap, int);
20966 if (thissize == -1)
20967 break;
20968 thisalign = va_arg (ap, int);
20969
20970 if (size == thissize && align == thisalign)
20971 result = SUCCESS;
20972 }
20973 while (result != SUCCESS);
20974
20975 va_end (ap);
20976
20977 if (result == SUCCESS)
20978 *do_alignment = 1;
20979 else
20980 first_error (_("unsupported alignment for instruction"));
20981
20982 return result;
20983 }
20984
20985 static void
20986 do_neon_ld_st_lane (void)
20987 {
20988 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
20989 int align_good, do_alignment = 0;
20990 int logsize = neon_logbits (et.size);
20991 int align = inst.operands[1].imm >> 8;
20992 int n = (inst.instruction >> 8) & 3;
20993 int max_el = 64 / et.size;
20994
20995 if (et.type == NT_invtype)
20996 return;
20997
20998 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
20999 _("bad list length"));
21000 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
21001 _("scalar index out of range"));
21002 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
21003 && et.size == 8,
21004 _("stride of 2 unavailable when element size is 8"));
21005
21006 switch (n)
21007 {
21008 case 0: /* VLD1 / VST1. */
21009 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
21010 32, 32, -1);
21011 if (align_good == FAIL)
21012 return;
21013 if (do_alignment)
21014 {
21015 unsigned alignbits = 0;
21016 switch (et.size)
21017 {
21018 case 16: alignbits = 0x1; break;
21019 case 32: alignbits = 0x3; break;
21020 default: ;
21021 }
21022 inst.instruction |= alignbits << 4;
21023 }
21024 break;
21025
21026 case 1: /* VLD2 / VST2. */
21027 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
21028 16, 32, 32, 64, -1);
21029 if (align_good == FAIL)
21030 return;
21031 if (do_alignment)
21032 inst.instruction |= 1 << 4;
21033 break;
21034
21035 case 2: /* VLD3 / VST3. */
21036 constraint (inst.operands[1].immisalign,
21037 _("can't use alignment with this instruction"));
21038 break;
21039
21040 case 3: /* VLD4 / VST4. */
21041 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
21042 16, 64, 32, 64, 32, 128, -1);
21043 if (align_good == FAIL)
21044 return;
21045 if (do_alignment)
21046 {
21047 unsigned alignbits = 0;
21048 switch (et.size)
21049 {
21050 case 8: alignbits = 0x1; break;
21051 case 16: alignbits = 0x1; break;
21052 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
21053 default: ;
21054 }
21055 inst.instruction |= alignbits << 4;
21056 }
21057 break;
21058
21059 default: ;
21060 }
21061
21062 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
21063 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21064 inst.instruction |= 1 << (4 + logsize);
21065
21066 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
21067 inst.instruction |= logsize << 10;
21068 }
21069
21070 /* Encode single n-element structure to all lanes VLD<n> instructions. */
21071
21072 static void
21073 do_neon_ld_dup (void)
21074 {
21075 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
21076 int align_good, do_alignment = 0;
21077
21078 if (et.type == NT_invtype)
21079 return;
21080
21081 switch ((inst.instruction >> 8) & 3)
21082 {
21083 case 0: /* VLD1. */
21084 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
21085 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
21086 &do_alignment, 16, 16, 32, 32, -1);
21087 if (align_good == FAIL)
21088 return;
21089 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
21090 {
21091 case 1: break;
21092 case 2: inst.instruction |= 1 << 5; break;
21093 default: first_error (_("bad list length")); return;
21094 }
21095 inst.instruction |= neon_logbits (et.size) << 6;
21096 break;
21097
21098 case 1: /* VLD2. */
21099 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
21100 &do_alignment, 8, 16, 16, 32, 32, 64,
21101 -1);
21102 if (align_good == FAIL)
21103 return;
21104 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
21105 _("bad list length"));
21106 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21107 inst.instruction |= 1 << 5;
21108 inst.instruction |= neon_logbits (et.size) << 6;
21109 break;
21110
21111 case 2: /* VLD3. */
21112 constraint (inst.operands[1].immisalign,
21113 _("can't use alignment with this instruction"));
21114 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
21115 _("bad list length"));
21116 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21117 inst.instruction |= 1 << 5;
21118 inst.instruction |= neon_logbits (et.size) << 6;
21119 break;
21120
21121 case 3: /* VLD4. */
21122 {
21123 int align = inst.operands[1].imm >> 8;
21124 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
21125 16, 64, 32, 64, 32, 128, -1);
21126 if (align_good == FAIL)
21127 return;
21128 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
21129 _("bad list length"));
21130 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21131 inst.instruction |= 1 << 5;
21132 if (et.size == 32 && align == 128)
21133 inst.instruction |= 0x3 << 6;
21134 else
21135 inst.instruction |= neon_logbits (et.size) << 6;
21136 }
21137 break;
21138
21139 default: ;
21140 }
21141
21142 inst.instruction |= do_alignment << 4;
21143 }
21144
21145 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
21146 apart from bits [11:4]. */
21147
21148 static void
21149 do_neon_ldx_stx (void)
21150 {
21151 if (inst.operands[1].isreg)
21152 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
21153
21154 switch (NEON_LANE (inst.operands[0].imm))
21155 {
21156 case NEON_INTERLEAVE_LANES:
21157 NEON_ENCODE (INTERLV, inst);
21158 do_neon_ld_st_interleave ();
21159 break;
21160
21161 case NEON_ALL_LANES:
21162 NEON_ENCODE (DUP, inst);
21163 if (inst.instruction == N_INV)
21164 {
21165 first_error ("only loads support such operands");
21166 break;
21167 }
21168 do_neon_ld_dup ();
21169 break;
21170
21171 default:
21172 NEON_ENCODE (LANE, inst);
21173 do_neon_ld_st_lane ();
21174 }
21175
21176 /* L bit comes from bit mask. */
21177 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21178 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21179 inst.instruction |= inst.operands[1].reg << 16;
21180
21181 if (inst.operands[1].postind)
21182 {
21183 int postreg = inst.operands[1].imm & 0xf;
21184 constraint (!inst.operands[1].immisreg,
21185 _("post-index must be a register"));
21186 constraint (postreg == 0xd || postreg == 0xf,
21187 _("bad register for post-index"));
21188 inst.instruction |= postreg;
21189 }
21190 else
21191 {
21192 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
21193 constraint (inst.relocs[0].exp.X_op != O_constant
21194 || inst.relocs[0].exp.X_add_number != 0,
21195 BAD_ADDR_MODE);
21196
21197 if (inst.operands[1].writeback)
21198 {
21199 inst.instruction |= 0xd;
21200 }
21201 else
21202 inst.instruction |= 0xf;
21203 }
21204
21205 if (thumb_mode)
21206 inst.instruction |= 0xf9000000;
21207 else
21208 inst.instruction |= 0xf4000000;
21209 }
21210
21211 /* FP v8. */
21212 static void
21213 do_vfp_nsyn_fpv8 (enum neon_shape rs)
21214 {
21215 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
21216 D register operands. */
21217 if (neon_shape_class[rs] == SC_DOUBLE)
21218 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
21219 _(BAD_FPU));
21220
21221 NEON_ENCODE (FPV8, inst);
21222
21223 if (rs == NS_FFF || rs == NS_HHH)
21224 {
21225 do_vfp_sp_dyadic ();
21226
21227 /* ARMv8.2 fp16 instruction. */
21228 if (rs == NS_HHH)
21229 do_scalar_fp16_v82_encode ();
21230 }
21231 else
21232 do_vfp_dp_rd_rn_rm ();
21233
21234 if (rs == NS_DDD)
21235 inst.instruction |= 0x100;
21236
21237 inst.instruction |= 0xf0000000;
21238 }
21239
21240 static void
21241 do_vsel (void)
21242 {
21243 set_pred_insn_type (OUTSIDE_PRED_INSN);
21244
21245 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
21246 first_error (_("invalid instruction shape"));
21247 }
21248
21249 static void
21250 do_vmaxnm (void)
21251 {
21252 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
21253 set_pred_insn_type (OUTSIDE_PRED_INSN);
21254
21255 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
21256 return;
21257
21258 if (!check_simd_pred_availability (true, NEON_CHECK_CC | NEON_CHECK_ARCH8))
21259 return;
21260
21261 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
21262 }
21263
21264 static void
21265 do_vrint_1 (enum neon_cvt_mode mode)
21266 {
21267 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
21268 struct neon_type_el et;
21269
21270 if (rs == NS_NULL)
21271 return;
21272
21273 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
21274 D register operands. */
21275 if (neon_shape_class[rs] == SC_DOUBLE)
21276 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
21277 _(BAD_FPU));
21278
21279 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
21280 | N_VFP);
21281 if (et.type != NT_invtype)
21282 {
21283 /* VFP encodings. */
21284 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
21285 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
21286 set_pred_insn_type (OUTSIDE_PRED_INSN);
21287
21288 NEON_ENCODE (FPV8, inst);
21289 if (rs == NS_FF || rs == NS_HH)
21290 do_vfp_sp_monadic ();
21291 else
21292 do_vfp_dp_rd_rm ();
21293
21294 switch (mode)
21295 {
21296 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
21297 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
21298 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
21299 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
21300 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
21301 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
21302 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
21303 default: abort ();
21304 }
21305
21306 inst.instruction |= (rs == NS_DD) << 8;
21307 do_vfp_cond_or_thumb ();
21308
21309 /* ARMv8.2 fp16 vrint instruction. */
21310 if (rs == NS_HH)
21311 do_scalar_fp16_v82_encode ();
21312 }
21313 else
21314 {
21315 /* Neon encodings (or something broken...). */
21316 inst.error = NULL;
21317 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
21318
21319 if (et.type == NT_invtype)
21320 return;
21321
21322 if (!check_simd_pred_availability (true,
21323 NEON_CHECK_CC | NEON_CHECK_ARCH8))
21324 return;
21325
21326 NEON_ENCODE (FLOAT, inst);
21327
21328 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21329 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21330 inst.instruction |= LOW4 (inst.operands[1].reg);
21331 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
21332 inst.instruction |= neon_quad (rs) << 6;
21333 /* Mask off the original size bits and reencode them. */
21334 inst.instruction = ((inst.instruction & 0xfff3ffff)
21335 | neon_logbits (et.size) << 18);
21336
21337 switch (mode)
21338 {
21339 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
21340 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
21341 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
21342 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
21343 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
21344 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
21345 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
21346 default: abort ();
21347 }
21348
21349 if (thumb_mode)
21350 inst.instruction |= 0xfc000000;
21351 else
21352 inst.instruction |= 0xf0000000;
21353 }
21354 }
21355
21356 static void
21357 do_vrintx (void)
21358 {
21359 do_vrint_1 (neon_cvt_mode_x);
21360 }
21361
21362 static void
21363 do_vrintz (void)
21364 {
21365 do_vrint_1 (neon_cvt_mode_z);
21366 }
21367
21368 static void
21369 do_vrintr (void)
21370 {
21371 do_vrint_1 (neon_cvt_mode_r);
21372 }
21373
21374 static void
21375 do_vrinta (void)
21376 {
21377 do_vrint_1 (neon_cvt_mode_a);
21378 }
21379
21380 static void
21381 do_vrintn (void)
21382 {
21383 do_vrint_1 (neon_cvt_mode_n);
21384 }
21385
21386 static void
21387 do_vrintp (void)
21388 {
21389 do_vrint_1 (neon_cvt_mode_p);
21390 }
21391
21392 static void
21393 do_vrintm (void)
21394 {
21395 do_vrint_1 (neon_cvt_mode_m);
21396 }
21397
21398 static unsigned
21399 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
21400 {
21401 unsigned regno = NEON_SCALAR_REG (opnd);
21402 unsigned elno = NEON_SCALAR_INDEX (opnd);
21403
21404 if (elsize == 16 && elno < 2 && regno < 16)
21405 return regno | (elno << 4);
21406 else if (elsize == 32 && elno == 0)
21407 return regno;
21408
21409 first_error (_("scalar out of range"));
21410 return 0;
21411 }
21412
21413 static void
21414 do_vcmla (void)
21415 {
21416 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext)
21417 && (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8)
21418 || !mark_feature_used (&arm_ext_v8_3)), (BAD_FPU));
21419 constraint (inst.relocs[0].exp.X_op != O_constant,
21420 _("expression too complex"));
21421 unsigned rot = inst.relocs[0].exp.X_add_number;
21422 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
21423 _("immediate out of range"));
21424 rot /= 90;
21425
21426 if (!check_simd_pred_availability (true,
21427 NEON_CHECK_ARCH8 | NEON_CHECK_CC))
21428 return;
21429
21430 if (inst.operands[2].isscalar)
21431 {
21432 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
21433 first_error (_("invalid instruction shape"));
21434 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
21435 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
21436 N_KEY | N_F16 | N_F32).size;
21437 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
21438 inst.is_neon = 1;
21439 inst.instruction = 0xfe000800;
21440 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21441 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21442 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
21443 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
21444 inst.instruction |= LOW4 (m);
21445 inst.instruction |= HI1 (m) << 5;
21446 inst.instruction |= neon_quad (rs) << 6;
21447 inst.instruction |= rot << 20;
21448 inst.instruction |= (size == 32) << 23;
21449 }
21450 else
21451 {
21452 enum neon_shape rs;
21453 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
21454 rs = neon_select_shape (NS_QQQI, NS_NULL);
21455 else
21456 rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
21457
21458 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
21459 N_KEY | N_F16 | N_F32).size;
21460 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext) && size == 32
21461 && (inst.operands[0].reg == inst.operands[1].reg
21462 || inst.operands[0].reg == inst.operands[2].reg))
21463 as_tsktsk (BAD_MVE_SRCDEST);
21464
21465 neon_three_same (neon_quad (rs), 0, -1);
21466 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
21467 inst.instruction |= 0xfc200800;
21468 inst.instruction |= rot << 23;
21469 inst.instruction |= (size == 32) << 20;
21470 }
21471 }
21472
21473 static void
21474 do_vcadd (void)
21475 {
21476 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
21477 && (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8)
21478 || !mark_feature_used (&arm_ext_v8_3)), (BAD_FPU));
21479 constraint (inst.relocs[0].exp.X_op != O_constant,
21480 _("expression too complex"));
21481
21482 unsigned rot = inst.relocs[0].exp.X_add_number;
21483 constraint (rot != 90 && rot != 270, _("immediate out of range"));
21484 enum neon_shape rs;
21485 struct neon_type_el et;
21486 if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
21487 {
21488 rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
21489 et = neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16 | N_F32);
21490 }
21491 else
21492 {
21493 rs = neon_select_shape (NS_QQQI, NS_NULL);
21494 et = neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16 | N_F32 | N_I8
21495 | N_I16 | N_I32);
21496 if (et.size == 32 && inst.operands[0].reg == inst.operands[2].reg)
21497 as_tsktsk (_("Warning: 32-bit element size and same first and third "
21498 "operand makes instruction UNPREDICTABLE"));
21499 }
21500
21501 if (et.type == NT_invtype)
21502 return;
21503
21504 if (!check_simd_pred_availability (et.type == NT_float,
21505 NEON_CHECK_ARCH8 | NEON_CHECK_CC))
21506 return;
21507
21508 if (et.type == NT_float)
21509 {
21510 neon_three_same (neon_quad (rs), 0, -1);
21511 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
21512 inst.instruction |= 0xfc800800;
21513 inst.instruction |= (rot == 270) << 24;
21514 inst.instruction |= (et.size == 32) << 20;
21515 }
21516 else
21517 {
21518 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
21519 inst.instruction = 0xfe000f00;
21520 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21521 inst.instruction |= neon_logbits (et.size) << 20;
21522 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
21523 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21524 inst.instruction |= (rot == 270) << 12;
21525 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
21526 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
21527 inst.instruction |= LOW4 (inst.operands[2].reg);
21528 inst.is_neon = 1;
21529 }
21530 }
21531
21532 /* Dot Product instructions encoding support. */
21533
21534 static void
21535 do_neon_dotproduct (int unsigned_p)
21536 {
21537 enum neon_shape rs;
21538 unsigned scalar_oprd2 = 0;
21539 int high8;
21540
21541 if (inst.cond != COND_ALWAYS)
21542 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
21543 "is UNPREDICTABLE"));
21544
21545 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
21546 _(BAD_FPU));
21547
21548 /* Dot Product instructions are in three-same D/Q register format or the third
21549 operand can be a scalar index register. */
21550 if (inst.operands[2].isscalar)
21551 {
21552 scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
21553 high8 = 0xfe000000;
21554 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21555 }
21556 else
21557 {
21558 high8 = 0xfc000000;
21559 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
21560 }
21561
21562 if (unsigned_p)
21563 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
21564 else
21565 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
21566
21567 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
21568 Product instruction, so we pass 0 as the "ubit" parameter. And the
21569 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
21570 neon_three_same (neon_quad (rs), 0, 32);
21571
21572 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
21573 different NEON three-same encoding. */
21574 inst.instruction &= 0x00ffffff;
21575 inst.instruction |= high8;
21576 /* Encode 'U' bit which indicates signedness. */
21577 inst.instruction |= (unsigned_p ? 1 : 0) << 4;
21578 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
21579 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
21580 the instruction encoding. */
21581 if (inst.operands[2].isscalar)
21582 {
21583 inst.instruction &= 0xffffffd0;
21584 inst.instruction |= LOW4 (scalar_oprd2);
21585 inst.instruction |= HI1 (scalar_oprd2) << 5;
21586 }
21587 }
21588
21589 /* Dot Product instructions for signed integer. */
21590
21591 static void
21592 do_neon_dotproduct_s (void)
21593 {
21594 return do_neon_dotproduct (0);
21595 }
21596
21597 /* Dot Product instructions for unsigned integer. */
21598
21599 static void
21600 do_neon_dotproduct_u (void)
21601 {
21602 return do_neon_dotproduct (1);
21603 }
21604
21605 static void
21606 do_vusdot (void)
21607 {
21608 enum neon_shape rs;
21609 set_pred_insn_type (OUTSIDE_PRED_INSN);
21610 if (inst.operands[2].isscalar)
21611 {
21612 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21613 neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21614
21615 inst.instruction |= (1 << 25);
21616 int idx = inst.operands[2].reg & 0xf;
21617 constraint ((idx != 1 && idx != 0), _("index must be 0 or 1"));
21618 inst.operands[2].reg >>= 4;
21619 constraint (!(inst.operands[2].reg < 16),
21620 _("indexed register must be less than 16"));
21621 neon_three_args (rs == NS_QQS);
21622 inst.instruction |= (idx << 5);
21623 }
21624 else
21625 {
21626 inst.instruction |= (1 << 21);
21627 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
21628 neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21629 neon_three_args (rs == NS_QQQ);
21630 }
21631 }
21632
21633 static void
21634 do_vsudot (void)
21635 {
21636 enum neon_shape rs;
21637 set_pred_insn_type (OUTSIDE_PRED_INSN);
21638 if (inst.operands[2].isscalar)
21639 {
21640 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21641 neon_check_type (3, rs, N_EQK, N_EQK, N_U8 | N_KEY);
21642
21643 inst.instruction |= (1 << 25);
21644 int idx = inst.operands[2].reg & 0xf;
21645 constraint ((idx != 1 && idx != 0), _("index must be 0 or 1"));
21646 inst.operands[2].reg >>= 4;
21647 constraint (!(inst.operands[2].reg < 16),
21648 _("indexed register must be less than 16"));
21649 neon_three_args (rs == NS_QQS);
21650 inst.instruction |= (idx << 5);
21651 }
21652 }
21653
21654 static void
21655 do_vsmmla (void)
21656 {
21657 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
21658 neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21659
21660 set_pred_insn_type (OUTSIDE_PRED_INSN);
21661
21662 neon_three_args (1);
21663
21664 }
21665
21666 static void
21667 do_vummla (void)
21668 {
21669 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
21670 neon_check_type (3, rs, N_EQK, N_EQK, N_U8 | N_KEY);
21671
21672 set_pred_insn_type (OUTSIDE_PRED_INSN);
21673
21674 neon_three_args (1);
21675
21676 }
21677
21678 static void
21679 check_cde_operand (size_t idx, int is_dual)
21680 {
21681 unsigned Rx = inst.operands[idx].reg;
21682 bool isvec = inst.operands[idx].isvec;
21683 if (is_dual == 0 && thumb_mode)
21684 constraint (
21685 !((Rx <= 14 && Rx != 13) || (Rx == REG_PC && isvec)),
21686 _("Register must be r0-r14 except r13, or APSR_nzcv."));
21687 else
21688 constraint ( !((Rx <= 10 && Rx % 2 == 0 )),
21689 _("Register must be an even register between r0-r10."));
21690 }
21691
21692 static bool
21693 cde_coproc_enabled (unsigned coproc)
21694 {
21695 switch (coproc)
21696 {
21697 case 0: return mark_feature_used (&arm_ext_cde0);
21698 case 1: return mark_feature_used (&arm_ext_cde1);
21699 case 2: return mark_feature_used (&arm_ext_cde2);
21700 case 3: return mark_feature_used (&arm_ext_cde3);
21701 case 4: return mark_feature_used (&arm_ext_cde4);
21702 case 5: return mark_feature_used (&arm_ext_cde5);
21703 case 6: return mark_feature_used (&arm_ext_cde6);
21704 case 7: return mark_feature_used (&arm_ext_cde7);
21705 default: return false;
21706 }
21707 }
21708
21709 #define cde_coproc_pos 8
21710 static void
21711 cde_handle_coproc (void)
21712 {
21713 unsigned coproc = inst.operands[0].reg;
21714 constraint (coproc > 7, _("CDE Coprocessor must be in range 0-7"));
21715 constraint (!(cde_coproc_enabled (coproc)), BAD_CDE_COPROC);
21716 inst.instruction |= coproc << cde_coproc_pos;
21717 }
21718 #undef cde_coproc_pos
21719
21720 static void
21721 cxn_handle_predication (bool is_accum)
21722 {
21723 if (is_accum && conditional_insn ())
21724 set_pred_insn_type (INSIDE_IT_INSN);
21725 else if (conditional_insn ())
21726 /* conditional_insn essentially checks for a suffix, not whether the
21727 instruction is inside an IT block or not.
21728 The non-accumulator versions should not have suffixes. */
21729 inst.error = BAD_SYNTAX;
21730 else
21731 set_pred_insn_type (OUTSIDE_PRED_INSN);
21732 }
21733
21734 static void
21735 do_custom_instruction_1 (int is_dual, bool is_accum)
21736 {
21737
21738 constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
21739
21740 unsigned imm, Rd;
21741
21742 Rd = inst.operands[1].reg;
21743 check_cde_operand (1, is_dual);
21744
21745 if (is_dual == 1)
21746 {
21747 constraint (inst.operands[2].reg != Rd + 1,
21748 _("cx1d requires consecutive destination registers."));
21749 imm = inst.operands[3].imm;
21750 }
21751 else if (is_dual == 0)
21752 imm = inst.operands[2].imm;
21753 else
21754 abort ();
21755
21756 inst.instruction |= Rd << 12;
21757 inst.instruction |= (imm & 0x1F80) << 9;
21758 inst.instruction |= (imm & 0x0040) << 1;
21759 inst.instruction |= (imm & 0x003f);
21760
21761 cde_handle_coproc ();
21762 cxn_handle_predication (is_accum);
21763 }
21764
21765 static void
21766 do_custom_instruction_2 (int is_dual, bool is_accum)
21767 {
21768
21769 constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
21770
21771 unsigned imm, Rd, Rn;
21772
21773 Rd = inst.operands[1].reg;
21774
21775 if (is_dual == 1)
21776 {
21777 constraint (inst.operands[2].reg != Rd + 1,
21778 _("cx2d requires consecutive destination registers."));
21779 imm = inst.operands[4].imm;
21780 Rn = inst.operands[3].reg;
21781 }
21782 else if (is_dual == 0)
21783 {
21784 imm = inst.operands[3].imm;
21785 Rn = inst.operands[2].reg;
21786 }
21787 else
21788 abort ();
21789
21790 check_cde_operand (2 + is_dual, /* is_dual = */0);
21791 check_cde_operand (1, is_dual);
21792
21793 inst.instruction |= Rd << 12;
21794 inst.instruction |= Rn << 16;
21795
21796 inst.instruction |= (imm & 0x0380) << 13;
21797 inst.instruction |= (imm & 0x0040) << 1;
21798 inst.instruction |= (imm & 0x003f);
21799
21800 cde_handle_coproc ();
21801 cxn_handle_predication (is_accum);
21802 }
21803
21804 static void
21805 do_custom_instruction_3 (int is_dual, bool is_accum)
21806 {
21807
21808 constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
21809
21810 unsigned imm, Rd, Rn, Rm;
21811
21812 Rd = inst.operands[1].reg;
21813
21814 if (is_dual == 1)
21815 {
21816 constraint (inst.operands[2].reg != Rd + 1,
21817 _("cx3d requires consecutive destination registers."));
21818 imm = inst.operands[5].imm;
21819 Rn = inst.operands[3].reg;
21820 Rm = inst.operands[4].reg;
21821 }
21822 else if (is_dual == 0)
21823 {
21824 imm = inst.operands[4].imm;
21825 Rn = inst.operands[2].reg;
21826 Rm = inst.operands[3].reg;
21827 }
21828 else
21829 abort ();
21830
21831 check_cde_operand (1, is_dual);
21832 check_cde_operand (2 + is_dual, /* is_dual = */0);
21833 check_cde_operand (3 + is_dual, /* is_dual = */0);
21834
21835 inst.instruction |= Rd;
21836 inst.instruction |= Rn << 16;
21837 inst.instruction |= Rm << 12;
21838
21839 inst.instruction |= (imm & 0x0038) << 17;
21840 inst.instruction |= (imm & 0x0004) << 5;
21841 inst.instruction |= (imm & 0x0003) << 4;
21842
21843 cde_handle_coproc ();
21844 cxn_handle_predication (is_accum);
21845 }
21846
21847 static void
21848 do_cx1 (void)
21849 {
21850 return do_custom_instruction_1 (0, 0);
21851 }
21852
21853 static void
21854 do_cx1a (void)
21855 {
21856 return do_custom_instruction_1 (0, 1);
21857 }
21858
21859 static void
21860 do_cx1d (void)
21861 {
21862 return do_custom_instruction_1 (1, 0);
21863 }
21864
21865 static void
21866 do_cx1da (void)
21867 {
21868 return do_custom_instruction_1 (1, 1);
21869 }
21870
21871 static void
21872 do_cx2 (void)
21873 {
21874 return do_custom_instruction_2 (0, 0);
21875 }
21876
21877 static void
21878 do_cx2a (void)
21879 {
21880 return do_custom_instruction_2 (0, 1);
21881 }
21882
21883 static void
21884 do_cx2d (void)
21885 {
21886 return do_custom_instruction_2 (1, 0);
21887 }
21888
21889 static void
21890 do_cx2da (void)
21891 {
21892 return do_custom_instruction_2 (1, 1);
21893 }
21894
21895 static void
21896 do_cx3 (void)
21897 {
21898 return do_custom_instruction_3 (0, 0);
21899 }
21900
21901 static void
21902 do_cx3a (void)
21903 {
21904 return do_custom_instruction_3 (0, 1);
21905 }
21906
21907 static void
21908 do_cx3d (void)
21909 {
21910 return do_custom_instruction_3 (1, 0);
21911 }
21912
21913 static void
21914 do_cx3da (void)
21915 {
21916 return do_custom_instruction_3 (1, 1);
21917 }
21918
21919 static void
21920 vcx_assign_vec_d (unsigned regnum)
21921 {
21922 inst.instruction |= HI4 (regnum) << 12;
21923 inst.instruction |= LOW1 (regnum) << 22;
21924 }
21925
21926 static void
21927 vcx_assign_vec_m (unsigned regnum)
21928 {
21929 inst.instruction |= HI4 (regnum);
21930 inst.instruction |= LOW1 (regnum) << 5;
21931 }
21932
21933 static void
21934 vcx_assign_vec_n (unsigned regnum)
21935 {
21936 inst.instruction |= HI4 (regnum) << 16;
21937 inst.instruction |= LOW1 (regnum) << 7;
21938 }
21939
21940 enum vcx_reg_type {
21941 q_reg,
21942 d_reg,
21943 s_reg
21944 };
21945
21946 static enum vcx_reg_type
21947 vcx_get_reg_type (enum neon_shape ns)
21948 {
21949 gas_assert (ns == NS_PQI
21950 || ns == NS_PDI
21951 || ns == NS_PFI
21952 || ns == NS_PQQI
21953 || ns == NS_PDDI
21954 || ns == NS_PFFI
21955 || ns == NS_PQQQI
21956 || ns == NS_PDDDI
21957 || ns == NS_PFFFI);
21958 if (ns == NS_PQI || ns == NS_PQQI || ns == NS_PQQQI)
21959 return q_reg;
21960 if (ns == NS_PDI || ns == NS_PDDI || ns == NS_PDDDI)
21961 return d_reg;
21962 return s_reg;
21963 }
21964
21965 #define vcx_size_pos 24
21966 #define vcx_vec_pos 6
21967 static unsigned
21968 vcx_handle_shape (enum vcx_reg_type reg_type)
21969 {
21970 unsigned mult = 2;
21971 if (reg_type == q_reg)
21972 inst.instruction |= 1 << vcx_vec_pos;
21973 else if (reg_type == d_reg)
21974 inst.instruction |= 1 << vcx_size_pos;
21975 else
21976 mult = 1;
21977 /* NOTE:
21978 The documentation says that the Q registers are encoded as 2*N in the D:Vd
21979 bits (or equivalent for N and M registers).
21980 Similarly the D registers are encoded as N in D:Vd bits.
21981 While the S registers are encoded as N in the Vd:D bits.
21982
21983 Taking into account the maximum values of these registers we can see a
21984 nicer pattern for calculation:
21985 Q -> 7, D -> 15, S -> 31
21986
21987 If we say that everything is encoded in the Vd:D bits, then we can say
21988 that Q is encoded as 4*N, and D is encoded as 2*N.
21989 This way the bits will end up the same, and calculation is simpler.
21990 (calculation is now:
21991 1. Multiply by a number determined by the register letter.
21992 2. Encode resulting number in Vd:D bits.)
21993
21994 This is made a little more complicated by automatic handling of 'Q'
21995 registers elsewhere, which means the register number is already 2*N where
21996 N is the number the user wrote after the register letter.
21997 */
21998 return mult;
21999 }
22000 #undef vcx_vec_pos
22001 #undef vcx_size_pos
22002
22003 static void
22004 vcx_ensure_register_in_range (unsigned R, enum vcx_reg_type reg_type)
22005 {
22006 if (reg_type == q_reg)
22007 {
22008 gas_assert (R % 2 == 0);
22009 constraint (R >= 16, _("'q' register must be in range 0-7"));
22010 }
22011 else if (reg_type == d_reg)
22012 constraint (R >= 16, _("'d' register must be in range 0-15"));
22013 else
22014 constraint (R >= 32, _("'s' register must be in range 0-31"));
22015 }
22016
22017 static void (*vcx_assign_vec[3]) (unsigned) = {
22018 vcx_assign_vec_d,
22019 vcx_assign_vec_m,
22020 vcx_assign_vec_n
22021 };
22022
22023 static void
22024 vcx_handle_register_arguments (unsigned num_registers,
22025 enum vcx_reg_type reg_type)
22026 {
22027 unsigned R, i;
22028 unsigned reg_mult = vcx_handle_shape (reg_type);
22029 for (i = 0; i < num_registers; i++)
22030 {
22031 R = inst.operands[i+1].reg;
22032 vcx_ensure_register_in_range (R, reg_type);
22033 if (num_registers == 3 && i > 0)
22034 {
22035 if (i == 2)
22036 vcx_assign_vec[1] (R * reg_mult);
22037 else
22038 vcx_assign_vec[2] (R * reg_mult);
22039 continue;
22040 }
22041 vcx_assign_vec[i](R * reg_mult);
22042 }
22043 }
22044
22045 static void
22046 vcx_handle_insn_block (enum vcx_reg_type reg_type)
22047 {
22048 if (reg_type == q_reg)
22049 if (inst.cond > COND_ALWAYS)
22050 inst.pred_insn_type = INSIDE_VPT_INSN;
22051 else
22052 inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
22053 else if (inst.cond == COND_ALWAYS)
22054 inst.pred_insn_type = OUTSIDE_PRED_INSN;
22055 else
22056 inst.error = BAD_NOT_IT;
22057 }
22058
22059 static void
22060 vcx_handle_common_checks (unsigned num_args, enum neon_shape rs)
22061 {
22062 constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
22063 cde_handle_coproc ();
22064 enum vcx_reg_type reg_type = vcx_get_reg_type (rs);
22065 vcx_handle_register_arguments (num_args, reg_type);
22066 vcx_handle_insn_block (reg_type);
22067 if (reg_type == q_reg)
22068 constraint (!mark_feature_used (&mve_ext),
22069 _("vcx instructions with Q registers require MVE"));
22070 else
22071 constraint (!(ARM_FSET_CPU_SUBSET (armv8m_fp, cpu_variant)
22072 && mark_feature_used (&armv8m_fp))
22073 && !mark_feature_used (&mve_ext),
22074 _("vcx instructions with S or D registers require either MVE"
22075 " or Armv8-M floating point extension."));
22076 }
22077
22078 static void
22079 do_vcx1 (void)
22080 {
22081 enum neon_shape rs = neon_select_shape (NS_PQI, NS_PDI, NS_PFI, NS_NULL);
22082 vcx_handle_common_checks (1, rs);
22083
22084 unsigned imm = inst.operands[2].imm;
22085 inst.instruction |= (imm & 0x03f);
22086 inst.instruction |= (imm & 0x040) << 1;
22087 inst.instruction |= (imm & 0x780) << 9;
22088 if (rs != NS_PQI)
22089 constraint (imm >= 2048,
22090 _("vcx1 with S or D registers takes immediate within 0-2047"));
22091 inst.instruction |= (imm & 0x800) << 13;
22092 }
22093
22094 static void
22095 do_vcx2 (void)
22096 {
22097 enum neon_shape rs = neon_select_shape (NS_PQQI, NS_PDDI, NS_PFFI, NS_NULL);
22098 vcx_handle_common_checks (2, rs);
22099
22100 unsigned imm = inst.operands[3].imm;
22101 inst.instruction |= (imm & 0x01) << 4;
22102 inst.instruction |= (imm & 0x02) << 6;
22103 inst.instruction |= (imm & 0x3c) << 14;
22104 if (rs != NS_PQQI)
22105 constraint (imm >= 64,
22106 _("vcx2 with S or D registers takes immediate within 0-63"));
22107 inst.instruction |= (imm & 0x40) << 18;
22108 }
22109
22110 static void
22111 do_vcx3 (void)
22112 {
22113 enum neon_shape rs = neon_select_shape (NS_PQQQI, NS_PDDDI, NS_PFFFI, NS_NULL);
22114 vcx_handle_common_checks (3, rs);
22115
22116 unsigned imm = inst.operands[4].imm;
22117 inst.instruction |= (imm & 0x1) << 4;
22118 inst.instruction |= (imm & 0x6) << 19;
22119 if (rs != NS_PQQQI)
22120 constraint (imm >= 8,
22121 _("vcx2 with S or D registers takes immediate within 0-7"));
22122 inst.instruction |= (imm & 0x8) << 21;
22123 }
22124
22125 /* Crypto v1 instructions. */
22126 static void
22127 do_crypto_2op_1 (unsigned elttype, int op)
22128 {
22129 set_pred_insn_type (OUTSIDE_PRED_INSN);
22130
22131 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
22132 == NT_invtype)
22133 return;
22134
22135 inst.error = NULL;
22136
22137 NEON_ENCODE (INTEGER, inst);
22138 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
22139 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
22140 inst.instruction |= LOW4 (inst.operands[1].reg);
22141 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
22142 if (op != -1)
22143 inst.instruction |= op << 6;
22144
22145 if (thumb_mode)
22146 inst.instruction |= 0xfc000000;
22147 else
22148 inst.instruction |= 0xf0000000;
22149 }
22150
22151 static void
22152 do_crypto_3op_1 (int u, int op)
22153 {
22154 set_pred_insn_type (OUTSIDE_PRED_INSN);
22155
22156 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
22157 N_32 | N_UNT | N_KEY).type == NT_invtype)
22158 return;
22159
22160 inst.error = NULL;
22161
22162 NEON_ENCODE (INTEGER, inst);
22163 neon_three_same (1, u, 8 << op);
22164 }
22165
22166 static void
22167 do_aese (void)
22168 {
22169 do_crypto_2op_1 (N_8, 0);
22170 }
22171
22172 static void
22173 do_aesd (void)
22174 {
22175 do_crypto_2op_1 (N_8, 1);
22176 }
22177
22178 static void
22179 do_aesmc (void)
22180 {
22181 do_crypto_2op_1 (N_8, 2);
22182 }
22183
22184 static void
22185 do_aesimc (void)
22186 {
22187 do_crypto_2op_1 (N_8, 3);
22188 }
22189
22190 static void
22191 do_sha1c (void)
22192 {
22193 do_crypto_3op_1 (0, 0);
22194 }
22195
22196 static void
22197 do_sha1p (void)
22198 {
22199 do_crypto_3op_1 (0, 1);
22200 }
22201
22202 static void
22203 do_sha1m (void)
22204 {
22205 do_crypto_3op_1 (0, 2);
22206 }
22207
22208 static void
22209 do_sha1su0 (void)
22210 {
22211 do_crypto_3op_1 (0, 3);
22212 }
22213
22214 static void
22215 do_sha256h (void)
22216 {
22217 do_crypto_3op_1 (1, 0);
22218 }
22219
22220 static void
22221 do_sha256h2 (void)
22222 {
22223 do_crypto_3op_1 (1, 1);
22224 }
22225
22226 static void
22227 do_sha256su1 (void)
22228 {
22229 do_crypto_3op_1 (1, 2);
22230 }
22231
22232 static void
22233 do_sha1h (void)
22234 {
22235 do_crypto_2op_1 (N_32, -1);
22236 }
22237
22238 static void
22239 do_sha1su1 (void)
22240 {
22241 do_crypto_2op_1 (N_32, 0);
22242 }
22243
22244 static void
22245 do_sha256su0 (void)
22246 {
22247 do_crypto_2op_1 (N_32, 1);
22248 }
22249
22250 static void
22251 do_crc32_1 (unsigned int poly, unsigned int sz)
22252 {
22253 unsigned int Rd = inst.operands[0].reg;
22254 unsigned int Rn = inst.operands[1].reg;
22255 unsigned int Rm = inst.operands[2].reg;
22256
22257 set_pred_insn_type (OUTSIDE_PRED_INSN);
22258 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
22259 inst.instruction |= LOW4 (Rn) << 16;
22260 inst.instruction |= LOW4 (Rm);
22261 inst.instruction |= sz << (thumb_mode ? 4 : 21);
22262 inst.instruction |= poly << (thumb_mode ? 20 : 9);
22263
22264 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
22265 as_warn (UNPRED_REG ("r15"));
22266 }
22267
22268 static void
22269 do_crc32b (void)
22270 {
22271 do_crc32_1 (0, 0);
22272 }
22273
22274 static void
22275 do_crc32h (void)
22276 {
22277 do_crc32_1 (0, 1);
22278 }
22279
22280 static void
22281 do_crc32w (void)
22282 {
22283 do_crc32_1 (0, 2);
22284 }
22285
22286 static void
22287 do_crc32cb (void)
22288 {
22289 do_crc32_1 (1, 0);
22290 }
22291
22292 static void
22293 do_crc32ch (void)
22294 {
22295 do_crc32_1 (1, 1);
22296 }
22297
22298 static void
22299 do_crc32cw (void)
22300 {
22301 do_crc32_1 (1, 2);
22302 }
22303
22304 static void
22305 do_vjcvt (void)
22306 {
22307 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
22308 _(BAD_FPU));
22309 neon_check_type (2, NS_FD, N_S32, N_F64);
22310 do_vfp_sp_dp_cvt ();
22311 do_vfp_cond_or_thumb ();
22312 }
22313
22314 static void
22315 do_vdot (void)
22316 {
22317 enum neon_shape rs;
22318 constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
22319 set_pred_insn_type (OUTSIDE_PRED_INSN);
22320 if (inst.operands[2].isscalar)
22321 {
22322 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
22323 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
22324
22325 inst.instruction |= (1 << 25);
22326 int idx = inst.operands[2].reg & 0xf;
22327 constraint ((idx != 1 && idx != 0), _("index must be 0 or 1"));
22328 inst.operands[2].reg >>= 4;
22329 constraint (!(inst.operands[2].reg < 16),
22330 _("indexed register must be less than 16"));
22331 neon_three_args (rs == NS_QQS);
22332 inst.instruction |= (idx << 5);
22333 }
22334 else
22335 {
22336 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
22337 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
22338 neon_three_args (rs == NS_QQQ);
22339 }
22340 }
22341
22342 static void
22343 do_vmmla (void)
22344 {
22345 enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
22346 neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
22347
22348 constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
22349 set_pred_insn_type (OUTSIDE_PRED_INSN);
22350
22351 neon_three_args (1);
22352 }
22353
22354 static void
22355 do_t_pacbti (void)
22356 {
22357 inst.instruction = THUMB_OP32 (inst.instruction);
22358 }
22359
22360 static void
22361 do_t_pacbti_nonop (void)
22362 {
22363 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, pacbti_ext),
22364 _(BAD_PACBTI));
22365
22366 inst.instruction = THUMB_OP32 (inst.instruction);
22367 inst.instruction |= inst.operands[0].reg << 12;
22368 inst.instruction |= inst.operands[1].reg << 16;
22369 inst.instruction |= inst.operands[2].reg;
22370 }
22371
22372 static void
22373 do_t_pacbti_pacg (void)
22374 {
22375 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, pacbti_ext),
22376 _(BAD_PACBTI));
22377
22378 inst.instruction = THUMB_OP32 (inst.instruction);
22379 inst.instruction |= inst.operands[0].reg << 8;
22380 inst.instruction |= inst.operands[1].reg << 16;
22381 inst.instruction |= inst.operands[2].reg;
22382 }
22383
22384 \f
22385 /* Overall per-instruction processing. */
22386
22387 /* We need to be able to fix up arbitrary expressions in some statements.
22388 This is so that we can handle symbols that are an arbitrary distance from
22389 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
22390 which returns part of an address in a form which will be valid for
22391 a data instruction. We do this by pushing the expression into a symbol
22392 in the expr_section, and creating a fix for that. */
22393
22394 static void
22395 fix_new_arm (fragS * frag,
22396 int where,
22397 short int size,
22398 expressionS * exp,
22399 int pc_rel,
22400 int reloc)
22401 {
22402 fixS * new_fix;
22403
22404 switch (exp->X_op)
22405 {
22406 case O_constant:
22407 if (pc_rel)
22408 {
22409 /* Create an absolute valued symbol, so we have something to
22410 refer to in the object file. Unfortunately for us, gas's
22411 generic expression parsing will already have folded out
22412 any use of .set foo/.type foo %function that may have
22413 been used to set type information of the target location,
22414 that's being specified symbolically. We have to presume
22415 the user knows what they are doing. */
22416 char name[16 + 8];
22417 symbolS *symbol;
22418
22419 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
22420
22421 symbol = symbol_find_or_make (name);
22422 S_SET_SEGMENT (symbol, absolute_section);
22423 symbol_set_frag (symbol, &zero_address_frag);
22424 S_SET_VALUE (symbol, exp->X_add_number);
22425 exp->X_op = O_symbol;
22426 exp->X_add_symbol = symbol;
22427 exp->X_add_number = 0;
22428 }
22429 /* FALLTHROUGH */
22430 case O_symbol:
22431 case O_add:
22432 case O_subtract:
22433 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
22434 (enum bfd_reloc_code_real) reloc);
22435 break;
22436
22437 default:
22438 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
22439 pc_rel, (enum bfd_reloc_code_real) reloc);
22440 break;
22441 }
22442
22443 /* Mark whether the fix is to a THUMB instruction, or an ARM
22444 instruction. */
22445 new_fix->tc_fix_data = thumb_mode;
22446 }
22447
22448 /* Create a frg for an instruction requiring relaxation. */
22449 static void
22450 output_relax_insn (void)
22451 {
22452 char * to;
22453 symbolS *sym;
22454 int offset;
22455
22456 /* The size of the instruction is unknown, so tie the debug info to the
22457 start of the instruction. */
22458 dwarf2_emit_insn (0);
22459
22460 switch (inst.relocs[0].exp.X_op)
22461 {
22462 case O_symbol:
22463 sym = inst.relocs[0].exp.X_add_symbol;
22464 offset = inst.relocs[0].exp.X_add_number;
22465 break;
22466 case O_constant:
22467 sym = NULL;
22468 offset = inst.relocs[0].exp.X_add_number;
22469 break;
22470 default:
22471 sym = make_expr_symbol (&inst.relocs[0].exp);
22472 offset = 0;
22473 break;
22474 }
22475 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
22476 inst.relax, sym, offset, NULL/*offset, opcode*/);
22477 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
22478 }
22479
22480 /* Write a 32-bit thumb instruction to buf. */
22481 static void
22482 put_thumb32_insn (char * buf, unsigned long insn)
22483 {
22484 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
22485 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
22486 }
22487
22488 static void
22489 output_inst (const char * str)
22490 {
22491 char * to = NULL;
22492
22493 if (inst.error)
22494 {
22495 as_bad ("%s -- `%s'", inst.error, str);
22496 return;
22497 }
22498 if (inst.relax)
22499 {
22500 output_relax_insn ();
22501 return;
22502 }
22503 if (inst.size == 0)
22504 return;
22505
22506 to = frag_more (inst.size);
22507 /* PR 9814: Record the thumb mode into the current frag so that we know
22508 what type of NOP padding to use, if necessary. We override any previous
22509 setting so that if the mode has changed then the NOPS that we use will
22510 match the encoding of the last instruction in the frag. */
22511 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22512
22513 if (thumb_mode && (inst.size > THUMB_SIZE))
22514 {
22515 gas_assert (inst.size == (2 * THUMB_SIZE));
22516 put_thumb32_insn (to, inst.instruction);
22517 }
22518 else if (inst.size > INSN_SIZE)
22519 {
22520 gas_assert (inst.size == (2 * INSN_SIZE));
22521 md_number_to_chars (to, inst.instruction, INSN_SIZE);
22522 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
22523 }
22524 else
22525 md_number_to_chars (to, inst.instruction, inst.size);
22526
22527 int r;
22528 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
22529 {
22530 if (inst.relocs[r].type != BFD_RELOC_UNUSED)
22531 fix_new_arm (frag_now, to - frag_now->fr_literal,
22532 inst.size, & inst.relocs[r].exp, inst.relocs[r].pc_rel,
22533 inst.relocs[r].type);
22534 }
22535
22536 dwarf2_emit_insn (inst.size);
22537 }
22538
22539 static char *
22540 output_it_inst (int cond, int mask, char * to)
22541 {
22542 unsigned long instruction = 0xbf00;
22543
22544 mask &= 0xf;
22545 instruction |= mask;
22546 instruction |= cond << 4;
22547
22548 if (to == NULL)
22549 {
22550 to = frag_more (2);
22551 #ifdef OBJ_ELF
22552 dwarf2_emit_insn (2);
22553 #endif
22554 }
22555
22556 md_number_to_chars (to, instruction, 2);
22557
22558 return to;
22559 }
22560
22561 /* Tag values used in struct asm_opcode's tag field. */
22562 enum opcode_tag
22563 {
22564 OT_unconditional, /* Instruction cannot be conditionalized.
22565 The ARM condition field is still 0xE. */
22566 OT_unconditionalF, /* Instruction cannot be conditionalized
22567 and carries 0xF in its ARM condition field. */
22568 OT_csuffix, /* Instruction takes a conditional suffix. */
22569 OT_csuffixF, /* Some forms of the instruction take a scalar
22570 conditional suffix, others place 0xF where the
22571 condition field would be, others take a vector
22572 conditional suffix. */
22573 OT_cinfix3, /* Instruction takes a conditional infix,
22574 beginning at character index 3. (In
22575 unified mode, it becomes a suffix.) */
22576 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
22577 tsts, cmps, cmns, and teqs. */
22578 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
22579 character index 3, even in unified mode. Used for
22580 legacy instructions where suffix and infix forms
22581 may be ambiguous. */
22582 OT_csuf_or_in3, /* Instruction takes either a conditional
22583 suffix or an infix at character index 3. */
22584 OT_odd_infix_unc, /* This is the unconditional variant of an
22585 instruction that takes a conditional infix
22586 at an unusual position. In unified mode,
22587 this variant will accept a suffix. */
22588 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
22589 are the conditional variants of instructions that
22590 take conditional infixes in unusual positions.
22591 The infix appears at character index
22592 (tag - OT_odd_infix_0). These are not accepted
22593 in unified mode. */
22594 };
22595
22596 /* Subroutine of md_assemble, responsible for looking up the primary
22597 opcode from the mnemonic the user wrote. STR points to the
22598 beginning of the mnemonic.
22599
22600 This is not simply a hash table lookup, because of conditional
22601 variants. Most instructions have conditional variants, which are
22602 expressed with a _conditional affix_ to the mnemonic. If we were
22603 to encode each conditional variant as a literal string in the opcode
22604 table, it would have approximately 20,000 entries.
22605
22606 Most mnemonics take this affix as a suffix, and in unified syntax,
22607 'most' is upgraded to 'all'. However, in the divided syntax, some
22608 instructions take the affix as an infix, notably the s-variants of
22609 the arithmetic instructions. Of those instructions, all but six
22610 have the infix appear after the third character of the mnemonic.
22611
22612 Accordingly, the algorithm for looking up primary opcodes given
22613 an identifier is:
22614
22615 1. Look up the identifier in the opcode table.
22616 If we find a match, go to step U.
22617
22618 2. Look up the last two characters of the identifier in the
22619 conditions table. If we find a match, look up the first N-2
22620 characters of the identifier in the opcode table. If we
22621 find a match, go to step CE.
22622
22623 3. Look up the fourth and fifth characters of the identifier in
22624 the conditions table. If we find a match, extract those
22625 characters from the identifier, and look up the remaining
22626 characters in the opcode table. If we find a match, go
22627 to step CM.
22628
22629 4. Fail.
22630
22631 U. Examine the tag field of the opcode structure, in case this is
22632 one of the six instructions with its conditional infix in an
22633 unusual place. If it is, the tag tells us where to find the
22634 infix; look it up in the conditions table and set inst.cond
22635 accordingly. Otherwise, this is an unconditional instruction.
22636 Again set inst.cond accordingly. Return the opcode structure.
22637
22638 CE. Examine the tag field to make sure this is an instruction that
22639 should receive a conditional suffix. If it is not, fail.
22640 Otherwise, set inst.cond from the suffix we already looked up,
22641 and return the opcode structure.
22642
22643 CM. Examine the tag field to make sure this is an instruction that
22644 should receive a conditional infix after the third character.
22645 If it is not, fail. Otherwise, undo the edits to the current
22646 line of input and proceed as for case CE. */
22647
22648 static const struct asm_opcode *
22649 opcode_lookup (char **str)
22650 {
22651 char *end, *base;
22652 char *affix;
22653 const struct asm_opcode *opcode;
22654 const struct asm_cond *cond;
22655 char save[2];
22656
22657 /* Scan up to the end of the mnemonic, which must end in white space,
22658 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
22659 for (base = end = *str; *end != '\0'; end++)
22660 if (*end == ' ' || *end == '.')
22661 break;
22662
22663 if (end == base)
22664 return NULL;
22665
22666 /* Handle a possible width suffix and/or Neon type suffix. */
22667 if (end[0] == '.')
22668 {
22669 int offset = 2;
22670
22671 /* The .w and .n suffixes are only valid if the unified syntax is in
22672 use. */
22673 if (unified_syntax && end[1] == 'w')
22674 inst.size_req = 4;
22675 else if (unified_syntax && end[1] == 'n')
22676 inst.size_req = 2;
22677 else
22678 offset = 0;
22679
22680 inst.vectype.elems = 0;
22681
22682 *str = end + offset;
22683
22684 if (end[offset] == '.')
22685 {
22686 /* See if we have a Neon type suffix (possible in either unified or
22687 non-unified ARM syntax mode). */
22688 if (parse_neon_type (&inst.vectype, str) == FAIL)
22689 return NULL;
22690 }
22691 else if (end[offset] != '\0' && end[offset] != ' ')
22692 return NULL;
22693 }
22694 else
22695 *str = end;
22696
22697 /* Look for unaffixed or special-case affixed mnemonic. */
22698 opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
22699 end - base);
22700 cond = NULL;
22701 if (opcode)
22702 {
22703 /* step U */
22704 if (opcode->tag < OT_odd_infix_0)
22705 {
22706 inst.cond = COND_ALWAYS;
22707 return opcode;
22708 }
22709
22710 if (warn_on_deprecated && unified_syntax)
22711 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
22712 affix = base + (opcode->tag - OT_odd_infix_0);
22713 cond = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, affix, 2);
22714 gas_assert (cond);
22715
22716 inst.cond = cond->value;
22717 return opcode;
22718 }
22719 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
22720 {
22721 /* Cannot have a conditional suffix on a mnemonic of less than a character.
22722 */
22723 if (end - base < 2)
22724 return NULL;
22725 affix = end - 1;
22726 cond = (const struct asm_cond *) str_hash_find_n (arm_vcond_hsh, affix, 1);
22727 opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
22728 affix - base);
22729 /* If this opcode can not be vector predicated then don't accept it with a
22730 vector predication code. */
22731 if (opcode && !opcode->mayBeVecPred)
22732 opcode = NULL;
22733 }
22734 if (!opcode || !cond)
22735 {
22736 /* Cannot have a conditional suffix on a mnemonic of less than two
22737 characters. */
22738 if (end - base < 3)
22739 return NULL;
22740
22741 /* Look for suffixed mnemonic. */
22742 affix = end - 2;
22743 cond = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, affix, 2);
22744 opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
22745 affix - base);
22746 }
22747
22748 if (opcode && cond)
22749 {
22750 /* step CE */
22751 switch (opcode->tag)
22752 {
22753 case OT_cinfix3_legacy:
22754 /* Ignore conditional suffixes matched on infix only mnemonics. */
22755 break;
22756
22757 case OT_cinfix3:
22758 case OT_cinfix3_deprecated:
22759 case OT_odd_infix_unc:
22760 if (!unified_syntax)
22761 return NULL;
22762 /* Fall through. */
22763
22764 case OT_csuffix:
22765 case OT_csuffixF:
22766 case OT_csuf_or_in3:
22767 inst.cond = cond->value;
22768 return opcode;
22769
22770 case OT_unconditional:
22771 case OT_unconditionalF:
22772 if (thumb_mode)
22773 inst.cond = cond->value;
22774 else
22775 {
22776 /* Delayed diagnostic. */
22777 inst.error = BAD_COND;
22778 inst.cond = COND_ALWAYS;
22779 }
22780 return opcode;
22781
22782 default:
22783 return NULL;
22784 }
22785 }
22786
22787 /* Cannot have a usual-position infix on a mnemonic of less than
22788 six characters (five would be a suffix). */
22789 if (end - base < 6)
22790 return NULL;
22791
22792 /* Look for infixed mnemonic in the usual position. */
22793 affix = base + 3;
22794 cond = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, affix, 2);
22795 if (!cond)
22796 return NULL;
22797
22798 memcpy (save, affix, 2);
22799 memmove (affix, affix + 2, (end - affix) - 2);
22800 opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
22801 (end - base) - 2);
22802 memmove (affix + 2, affix, (end - affix) - 2);
22803 memcpy (affix, save, 2);
22804
22805 if (opcode
22806 && (opcode->tag == OT_cinfix3
22807 || opcode->tag == OT_cinfix3_deprecated
22808 || opcode->tag == OT_csuf_or_in3
22809 || opcode->tag == OT_cinfix3_legacy))
22810 {
22811 /* Step CM. */
22812 if (warn_on_deprecated && unified_syntax
22813 && (opcode->tag == OT_cinfix3
22814 || opcode->tag == OT_cinfix3_deprecated))
22815 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
22816
22817 inst.cond = cond->value;
22818 return opcode;
22819 }
22820
22821 return NULL;
22822 }
22823
22824 /* This function generates an initial IT instruction, leaving its block
22825 virtually open for the new instructions. Eventually,
22826 the mask will be updated by now_pred_add_mask () each time
22827 a new instruction needs to be included in the IT block.
22828 Finally, the block is closed with close_automatic_it_block ().
22829 The block closure can be requested either from md_assemble (),
22830 a tencode (), or due to a label hook. */
22831
22832 static void
22833 new_automatic_it_block (int cond)
22834 {
22835 now_pred.state = AUTOMATIC_PRED_BLOCK;
22836 now_pred.mask = 0x18;
22837 now_pred.cc = cond;
22838 now_pred.block_length = 1;
22839 mapping_state (MAP_THUMB);
22840 now_pred.insn = output_it_inst (cond, now_pred.mask, NULL);
22841 now_pred.warn_deprecated = false;
22842 now_pred.insn_cond = true;
22843 }
22844
22845 /* Close an automatic IT block.
22846 See comments in new_automatic_it_block (). */
22847
22848 static void
22849 close_automatic_it_block (void)
22850 {
22851 now_pred.mask = 0x10;
22852 now_pred.block_length = 0;
22853 }
22854
22855 /* Update the mask of the current automatically-generated IT
22856 instruction. See comments in new_automatic_it_block (). */
22857
22858 static void
22859 now_pred_add_mask (int cond)
22860 {
22861 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
22862 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
22863 | ((bitvalue) << (nbit)))
22864 const int resulting_bit = (cond & 1);
22865
22866 now_pred.mask &= 0xf;
22867 now_pred.mask = SET_BIT_VALUE (now_pred.mask,
22868 resulting_bit,
22869 (5 - now_pred.block_length));
22870 now_pred.mask = SET_BIT_VALUE (now_pred.mask,
22871 1,
22872 ((5 - now_pred.block_length) - 1));
22873 output_it_inst (now_pred.cc, now_pred.mask, now_pred.insn);
22874
22875 #undef CLEAR_BIT
22876 #undef SET_BIT_VALUE
22877 }
22878
22879 /* The IT blocks handling machinery is accessed through the these functions:
22880 it_fsm_pre_encode () from md_assemble ()
22881 set_pred_insn_type () optional, from the tencode functions
22882 set_pred_insn_type_last () ditto
22883 in_pred_block () ditto
22884 it_fsm_post_encode () from md_assemble ()
22885 force_automatic_it_block_close () from label handling functions
22886
22887 Rationale:
22888 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
22889 initializing the IT insn type with a generic initial value depending
22890 on the inst.condition.
22891 2) During the tencode function, two things may happen:
22892 a) The tencode function overrides the IT insn type by
22893 calling either set_pred_insn_type (type) or
22894 set_pred_insn_type_last ().
22895 b) The tencode function queries the IT block state by
22896 calling in_pred_block () (i.e. to determine narrow/not narrow mode).
22897
22898 Both set_pred_insn_type and in_pred_block run the internal FSM state
22899 handling function (handle_pred_state), because: a) setting the IT insn
22900 type may incur in an invalid state (exiting the function),
22901 and b) querying the state requires the FSM to be updated.
22902 Specifically we want to avoid creating an IT block for conditional
22903 branches, so it_fsm_pre_encode is actually a guess and we can't
22904 determine whether an IT block is required until the tencode () routine
22905 has decided what type of instruction this actually it.
22906 Because of this, if set_pred_insn_type and in_pred_block have to be
22907 used, set_pred_insn_type has to be called first.
22908
22909 set_pred_insn_type_last () is a wrapper of set_pred_insn_type (type),
22910 that determines the insn IT type depending on the inst.cond code.
22911 When a tencode () routine encodes an instruction that can be
22912 either outside an IT block, or, in the case of being inside, has to be
22913 the last one, set_pred_insn_type_last () will determine the proper
22914 IT instruction type based on the inst.cond code. Otherwise,
22915 set_pred_insn_type can be called for overriding that logic or
22916 for covering other cases.
22917
22918 Calling handle_pred_state () may not transition the IT block state to
22919 OUTSIDE_PRED_BLOCK immediately, since the (current) state could be
22920 still queried. Instead, if the FSM determines that the state should
22921 be transitioned to OUTSIDE_PRED_BLOCK, a flag is marked to be closed
22922 after the tencode () function: that's what it_fsm_post_encode () does.
22923
22924 Since in_pred_block () calls the state handling function to get an
22925 updated state, an error may occur (due to invalid insns combination).
22926 In that case, inst.error is set.
22927 Therefore, inst.error has to be checked after the execution of
22928 the tencode () routine.
22929
22930 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
22931 any pending state change (if any) that didn't take place in
22932 handle_pred_state () as explained above. */
22933
22934 static void
22935 it_fsm_pre_encode (void)
22936 {
22937 if (inst.cond != COND_ALWAYS)
22938 inst.pred_insn_type = INSIDE_IT_INSN;
22939 else
22940 inst.pred_insn_type = OUTSIDE_PRED_INSN;
22941
22942 now_pred.state_handled = 0;
22943 }
22944
22945 /* IT state FSM handling function. */
22946 /* MVE instructions and non-MVE instructions are handled differently because of
22947 the introduction of VPT blocks.
22948 Specifications say that any non-MVE instruction inside a VPT block is
22949 UNPREDICTABLE, with the exception of the BKPT instruction. Whereas most MVE
22950 instructions are deemed to be UNPREDICTABLE if inside an IT block. For the
22951 few exceptions we have MVE_UNPREDICABLE_INSN.
22952 The error messages provided depending on the different combinations possible
22953 are described in the cases below:
22954 For 'most' MVE instructions:
22955 1) In an IT block, with an IT code: syntax error
22956 2) In an IT block, with a VPT code: error: must be in a VPT block
22957 3) In an IT block, with no code: warning: UNPREDICTABLE
22958 4) In a VPT block, with an IT code: syntax error
22959 5) In a VPT block, with a VPT code: OK!
22960 6) In a VPT block, with no code: error: missing code
22961 7) Outside a pred block, with an IT code: error: syntax error
22962 8) Outside a pred block, with a VPT code: error: should be in a VPT block
22963 9) Outside a pred block, with no code: OK!
22964 For non-MVE instructions:
22965 10) In an IT block, with an IT code: OK!
22966 11) In an IT block, with a VPT code: syntax error
22967 12) In an IT block, with no code: error: missing code
22968 13) In a VPT block, with an IT code: error: should be in an IT block
22969 14) In a VPT block, with a VPT code: syntax error
22970 15) In a VPT block, with no code: UNPREDICTABLE
22971 16) Outside a pred block, with an IT code: error: should be in an IT block
22972 17) Outside a pred block, with a VPT code: syntax error
22973 18) Outside a pred block, with no code: OK!
22974 */
22975
22976
22977 static int
22978 handle_pred_state (void)
22979 {
22980 now_pred.state_handled = 1;
22981 now_pred.insn_cond = false;
22982
22983 switch (now_pred.state)
22984 {
22985 case OUTSIDE_PRED_BLOCK:
22986 switch (inst.pred_insn_type)
22987 {
22988 case MVE_UNPREDICABLE_INSN:
22989 case MVE_OUTSIDE_PRED_INSN:
22990 if (inst.cond < COND_ALWAYS)
22991 {
22992 /* Case 7: Outside a pred block, with an IT code: error: syntax
22993 error. */
22994 inst.error = BAD_SYNTAX;
22995 return FAIL;
22996 }
22997 /* Case 9: Outside a pred block, with no code: OK! */
22998 break;
22999 case OUTSIDE_PRED_INSN:
23000 if (inst.cond > COND_ALWAYS)
23001 {
23002 /* Case 17: Outside a pred block, with a VPT code: syntax error.
23003 */
23004 inst.error = BAD_SYNTAX;
23005 return FAIL;
23006 }
23007 /* Case 18: Outside a pred block, with no code: OK! */
23008 break;
23009
23010 case INSIDE_VPT_INSN:
23011 /* Case 8: Outside a pred block, with a VPT code: error: should be in
23012 a VPT block. */
23013 inst.error = BAD_OUT_VPT;
23014 return FAIL;
23015
23016 case INSIDE_IT_INSN:
23017 case INSIDE_IT_LAST_INSN:
23018 if (inst.cond < COND_ALWAYS)
23019 {
23020 /* Case 16: Outside a pred block, with an IT code: error: should
23021 be in an IT block. */
23022 if (thumb_mode == 0)
23023 {
23024 if (unified_syntax
23025 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
23026 as_tsktsk (_("Warning: conditional outside an IT block"\
23027 " for Thumb."));
23028 }
23029 else
23030 {
23031 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
23032 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
23033 {
23034 /* Automatically generate the IT instruction. */
23035 new_automatic_it_block (inst.cond);
23036 if (inst.pred_insn_type == INSIDE_IT_LAST_INSN)
23037 close_automatic_it_block ();
23038 }
23039 else
23040 {
23041 inst.error = BAD_OUT_IT;
23042 return FAIL;
23043 }
23044 }
23045 break;
23046 }
23047 else if (inst.cond > COND_ALWAYS)
23048 {
23049 /* Case 17: Outside a pred block, with a VPT code: syntax error.
23050 */
23051 inst.error = BAD_SYNTAX;
23052 return FAIL;
23053 }
23054 else
23055 gas_assert (0);
23056 case IF_INSIDE_IT_LAST_INSN:
23057 case NEUTRAL_IT_INSN:
23058 break;
23059
23060 case VPT_INSN:
23061 if (inst.cond != COND_ALWAYS)
23062 first_error (BAD_SYNTAX);
23063 now_pred.state = MANUAL_PRED_BLOCK;
23064 now_pred.block_length = 0;
23065 now_pred.type = VECTOR_PRED;
23066 now_pred.cc = 0;
23067 break;
23068 case IT_INSN:
23069 now_pred.state = MANUAL_PRED_BLOCK;
23070 now_pred.block_length = 0;
23071 now_pred.type = SCALAR_PRED;
23072 break;
23073 }
23074 break;
23075
23076 case AUTOMATIC_PRED_BLOCK:
23077 /* Three things may happen now:
23078 a) We should increment current it block size;
23079 b) We should close current it block (closing insn or 4 insns);
23080 c) We should close current it block and start a new one (due
23081 to incompatible conditions or
23082 4 insns-length block reached). */
23083
23084 switch (inst.pred_insn_type)
23085 {
23086 case INSIDE_VPT_INSN:
23087 case VPT_INSN:
23088 case MVE_UNPREDICABLE_INSN:
23089 case MVE_OUTSIDE_PRED_INSN:
23090 gas_assert (0);
23091 case OUTSIDE_PRED_INSN:
23092 /* The closure of the block shall happen immediately,
23093 so any in_pred_block () call reports the block as closed. */
23094 force_automatic_it_block_close ();
23095 break;
23096
23097 case INSIDE_IT_INSN:
23098 case INSIDE_IT_LAST_INSN:
23099 case IF_INSIDE_IT_LAST_INSN:
23100 now_pred.block_length++;
23101
23102 if (now_pred.block_length > 4
23103 || !now_pred_compatible (inst.cond))
23104 {
23105 force_automatic_it_block_close ();
23106 if (inst.pred_insn_type != IF_INSIDE_IT_LAST_INSN)
23107 new_automatic_it_block (inst.cond);
23108 }
23109 else
23110 {
23111 now_pred.insn_cond = true;
23112 now_pred_add_mask (inst.cond);
23113 }
23114
23115 if (now_pred.state == AUTOMATIC_PRED_BLOCK
23116 && (inst.pred_insn_type == INSIDE_IT_LAST_INSN
23117 || inst.pred_insn_type == IF_INSIDE_IT_LAST_INSN))
23118 close_automatic_it_block ();
23119 break;
23120
23121 /* Fallthrough. */
23122 case NEUTRAL_IT_INSN:
23123 now_pred.block_length++;
23124 now_pred.insn_cond = true;
23125
23126 if (now_pred.block_length > 4)
23127 force_automatic_it_block_close ();
23128 else
23129 now_pred_add_mask (now_pred.cc & 1);
23130 break;
23131
23132 case IT_INSN:
23133 close_automatic_it_block ();
23134 now_pred.state = MANUAL_PRED_BLOCK;
23135 break;
23136 }
23137 break;
23138
23139 case MANUAL_PRED_BLOCK:
23140 {
23141 unsigned int cond;
23142 int is_last;
23143 if (now_pred.type == SCALAR_PRED)
23144 {
23145 /* Check conditional suffixes. */
23146 cond = now_pred.cc ^ ((now_pred.mask >> 4) & 1) ^ 1;
23147 now_pred.mask <<= 1;
23148 now_pred.mask &= 0x1f;
23149 is_last = (now_pred.mask == 0x10);
23150 }
23151 else
23152 {
23153 now_pred.cc ^= (now_pred.mask >> 4);
23154 cond = now_pred.cc + 0xf;
23155 now_pred.mask <<= 1;
23156 now_pred.mask &= 0x1f;
23157 is_last = now_pred.mask == 0x10;
23158 }
23159 now_pred.insn_cond = true;
23160
23161 switch (inst.pred_insn_type)
23162 {
23163 case OUTSIDE_PRED_INSN:
23164 if (now_pred.type == SCALAR_PRED)
23165 {
23166 if (inst.cond == COND_ALWAYS)
23167 {
23168 /* Case 12: In an IT block, with no code: error: missing
23169 code. */
23170 inst.error = BAD_NOT_IT;
23171 return FAIL;
23172 }
23173 else if (inst.cond > COND_ALWAYS)
23174 {
23175 /* Case 11: In an IT block, with a VPT code: syntax error.
23176 */
23177 inst.error = BAD_SYNTAX;
23178 return FAIL;
23179 }
23180 else if (thumb_mode)
23181 {
23182 /* This is for some special cases where a non-MVE
23183 instruction is not allowed in an IT block, such as cbz,
23184 but are put into one with a condition code.
23185 You could argue this should be a syntax error, but we
23186 gave the 'not allowed in IT block' diagnostic in the
23187 past so we will keep doing so. */
23188 inst.error = BAD_NOT_IT;
23189 return FAIL;
23190 }
23191 break;
23192 }
23193 else
23194 {
23195 /* Case 15: In a VPT block, with no code: UNPREDICTABLE. */
23196 as_tsktsk (MVE_NOT_VPT);
23197 return SUCCESS;
23198 }
23199 case MVE_OUTSIDE_PRED_INSN:
23200 if (now_pred.type == SCALAR_PRED)
23201 {
23202 if (inst.cond == COND_ALWAYS)
23203 {
23204 /* Case 3: In an IT block, with no code: warning:
23205 UNPREDICTABLE. */
23206 as_tsktsk (MVE_NOT_IT);
23207 return SUCCESS;
23208 }
23209 else if (inst.cond < COND_ALWAYS)
23210 {
23211 /* Case 1: In an IT block, with an IT code: syntax error.
23212 */
23213 inst.error = BAD_SYNTAX;
23214 return FAIL;
23215 }
23216 else
23217 gas_assert (0);
23218 }
23219 else
23220 {
23221 if (inst.cond < COND_ALWAYS)
23222 {
23223 /* Case 4: In a VPT block, with an IT code: syntax error.
23224 */
23225 inst.error = BAD_SYNTAX;
23226 return FAIL;
23227 }
23228 else if (inst.cond == COND_ALWAYS)
23229 {
23230 /* Case 6: In a VPT block, with no code: error: missing
23231 code. */
23232 inst.error = BAD_NOT_VPT;
23233 return FAIL;
23234 }
23235 else
23236 {
23237 gas_assert (0);
23238 }
23239 }
23240 case MVE_UNPREDICABLE_INSN:
23241 as_tsktsk (now_pred.type == SCALAR_PRED ? MVE_NOT_IT : MVE_NOT_VPT);
23242 return SUCCESS;
23243 case INSIDE_IT_INSN:
23244 if (inst.cond > COND_ALWAYS)
23245 {
23246 /* Case 11: In an IT block, with a VPT code: syntax error. */
23247 /* Case 14: In a VPT block, with a VPT code: syntax error. */
23248 inst.error = BAD_SYNTAX;
23249 return FAIL;
23250 }
23251 else if (now_pred.type == SCALAR_PRED)
23252 {
23253 /* Case 10: In an IT block, with an IT code: OK! */
23254 if (cond != inst.cond)
23255 {
23256 inst.error = now_pred.type == SCALAR_PRED ? BAD_IT_COND :
23257 BAD_VPT_COND;
23258 return FAIL;
23259 }
23260 }
23261 else
23262 {
23263 /* Case 13: In a VPT block, with an IT code: error: should be
23264 in an IT block. */
23265 inst.error = BAD_OUT_IT;
23266 return FAIL;
23267 }
23268 break;
23269
23270 case INSIDE_VPT_INSN:
23271 if (now_pred.type == SCALAR_PRED)
23272 {
23273 /* Case 2: In an IT block, with a VPT code: error: must be in a
23274 VPT block. */
23275 inst.error = BAD_OUT_VPT;
23276 return FAIL;
23277 }
23278 /* Case 5: In a VPT block, with a VPT code: OK! */
23279 else if (cond != inst.cond)
23280 {
23281 inst.error = BAD_VPT_COND;
23282 return FAIL;
23283 }
23284 break;
23285 case INSIDE_IT_LAST_INSN:
23286 case IF_INSIDE_IT_LAST_INSN:
23287 if (now_pred.type == VECTOR_PRED || inst.cond > COND_ALWAYS)
23288 {
23289 /* Case 4: In a VPT block, with an IT code: syntax error. */
23290 /* Case 11: In an IT block, with a VPT code: syntax error. */
23291 inst.error = BAD_SYNTAX;
23292 return FAIL;
23293 }
23294 else if (cond != inst.cond)
23295 {
23296 inst.error = BAD_IT_COND;
23297 return FAIL;
23298 }
23299 if (!is_last)
23300 {
23301 inst.error = BAD_BRANCH;
23302 return FAIL;
23303 }
23304 break;
23305
23306 case NEUTRAL_IT_INSN:
23307 /* The BKPT instruction is unconditional even in a IT or VPT
23308 block. */
23309 break;
23310
23311 case IT_INSN:
23312 if (now_pred.type == SCALAR_PRED)
23313 {
23314 inst.error = BAD_IT_IT;
23315 return FAIL;
23316 }
23317 /* fall through. */
23318 case VPT_INSN:
23319 if (inst.cond == COND_ALWAYS)
23320 {
23321 /* Executing a VPT/VPST instruction inside an IT block or a
23322 VPT/VPST/IT instruction inside a VPT block is UNPREDICTABLE.
23323 */
23324 if (now_pred.type == SCALAR_PRED)
23325 as_tsktsk (MVE_NOT_IT);
23326 else
23327 as_tsktsk (MVE_NOT_VPT);
23328 return SUCCESS;
23329 }
23330 else
23331 {
23332 /* VPT/VPST do not accept condition codes. */
23333 inst.error = BAD_SYNTAX;
23334 return FAIL;
23335 }
23336 }
23337 }
23338 break;
23339 }
23340
23341 return SUCCESS;
23342 }
23343
23344 struct depr_insn_mask
23345 {
23346 unsigned long pattern;
23347 unsigned long mask;
23348 const char* description;
23349 };
23350
23351 /* List of 16-bit instruction patterns deprecated in an IT block in
23352 ARMv8. */
23353 static const struct depr_insn_mask depr_it_insns[] = {
23354 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
23355 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
23356 { 0xa000, 0xb800, N_("ADR") },
23357 { 0x4800, 0xf800, N_("Literal loads") },
23358 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
23359 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
23360 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
23361 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
23362 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
23363 { 0, 0, NULL }
23364 };
23365
23366 static void
23367 it_fsm_post_encode (void)
23368 {
23369 int is_last;
23370
23371 if (!now_pred.state_handled)
23372 handle_pred_state ();
23373
23374 if (now_pred.insn_cond
23375 && warn_on_restrict_it
23376 && !now_pred.warn_deprecated
23377 && warn_on_deprecated
23378 && (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
23379 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8r))
23380 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
23381 {
23382 if (inst.instruction >= 0x10000)
23383 {
23384 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
23385 "performance deprecated in ARMv8-A and ARMv8-R"));
23386 now_pred.warn_deprecated = true;
23387 }
23388 else
23389 {
23390 const struct depr_insn_mask *p = depr_it_insns;
23391
23392 while (p->mask != 0)
23393 {
23394 if ((inst.instruction & p->mask) == p->pattern)
23395 {
23396 as_tsktsk (_("IT blocks containing 16-bit Thumb "
23397 "instructions of the following class are "
23398 "performance deprecated in ARMv8-A and "
23399 "ARMv8-R: %s"), p->description);
23400 now_pred.warn_deprecated = true;
23401 break;
23402 }
23403
23404 ++p;
23405 }
23406 }
23407
23408 if (now_pred.block_length > 1)
23409 {
23410 as_tsktsk (_("IT blocks containing more than one conditional "
23411 "instruction are performance deprecated in ARMv8-A and "
23412 "ARMv8-R"));
23413 now_pred.warn_deprecated = true;
23414 }
23415 }
23416
23417 is_last = (now_pred.mask == 0x10);
23418 if (is_last)
23419 {
23420 now_pred.state = OUTSIDE_PRED_BLOCK;
23421 now_pred.mask = 0;
23422 }
23423 }
23424
23425 static void
23426 force_automatic_it_block_close (void)
23427 {
23428 if (now_pred.state == AUTOMATIC_PRED_BLOCK)
23429 {
23430 close_automatic_it_block ();
23431 now_pred.state = OUTSIDE_PRED_BLOCK;
23432 now_pred.mask = 0;
23433 }
23434 }
23435
23436 static int
23437 in_pred_block (void)
23438 {
23439 if (!now_pred.state_handled)
23440 handle_pred_state ();
23441
23442 return now_pred.state != OUTSIDE_PRED_BLOCK;
23443 }
23444
23445 /* Whether OPCODE only has T32 encoding. Since this function is only used by
23446 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
23447 here, hence the "known" in the function name. */
23448
23449 static bool
23450 known_t32_only_insn (const struct asm_opcode *opcode)
23451 {
23452 /* Original Thumb-1 wide instruction. */
23453 if (opcode->tencode == do_t_blx
23454 || opcode->tencode == do_t_branch23
23455 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
23456 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
23457 return true;
23458
23459 /* Wide-only instruction added to ARMv8-M Baseline. */
23460 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
23461 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
23462 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
23463 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
23464 return true;
23465
23466 return false;
23467 }
23468
23469 /* Whether wide instruction variant can be used if available for a valid OPCODE
23470 in ARCH. */
23471
23472 static bool
23473 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
23474 {
23475 if (known_t32_only_insn (opcode))
23476 return true;
23477
23478 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
23479 of variant T3 of B.W is checked in do_t_branch. */
23480 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
23481 && opcode->tencode == do_t_branch)
23482 return true;
23483
23484 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
23485 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
23486 && opcode->tencode == do_t_mov_cmp
23487 /* Make sure CMP instruction is not affected. */
23488 && opcode->aencode == do_mov)
23489 return true;
23490
23491 /* Wide instruction variants of all instructions with narrow *and* wide
23492 variants become available with ARMv6t2. Other opcodes are either
23493 narrow-only or wide-only and are thus available if OPCODE is valid. */
23494 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
23495 return true;
23496
23497 /* OPCODE with narrow only instruction variant or wide variant not
23498 available. */
23499 return false;
23500 }
23501
23502 void
23503 md_assemble (char *str)
23504 {
23505 char *p = str;
23506 const struct asm_opcode * opcode;
23507
23508 /* Align the previous label if needed. */
23509 if (last_label_seen != NULL)
23510 {
23511 symbol_set_frag (last_label_seen, frag_now);
23512 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
23513 S_SET_SEGMENT (last_label_seen, now_seg);
23514 }
23515
23516 memset (&inst, '\0', sizeof (inst));
23517 int r;
23518 for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
23519 inst.relocs[r].type = BFD_RELOC_UNUSED;
23520
23521 opcode = opcode_lookup (&p);
23522 if (!opcode)
23523 {
23524 /* It wasn't an instruction, but it might be a register alias of
23525 the form alias .req reg, or a Neon .dn/.qn directive. */
23526 if (! create_register_alias (str, p)
23527 && ! create_neon_reg_alias (str, p))
23528 as_bad (_("bad instruction `%s'"), str);
23529
23530 return;
23531 }
23532
23533 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
23534 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
23535
23536 /* The value which unconditional instructions should have in place of the
23537 condition field. */
23538 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1u;
23539
23540 if (thumb_mode)
23541 {
23542 arm_feature_set variant;
23543
23544 variant = cpu_variant;
23545 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
23546 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
23547 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
23548 /* Check that this instruction is supported for this CPU. */
23549 if (!opcode->tvariant
23550 || (thumb_mode == 1
23551 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
23552 {
23553 if (opcode->tencode == do_t_swi)
23554 as_bad (_("SVC is not permitted on this architecture"));
23555 else
23556 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
23557 return;
23558 }
23559 if (inst.cond != COND_ALWAYS && !unified_syntax
23560 && opcode->tencode != do_t_branch)
23561 {
23562 as_bad (_("Thumb does not support conditional execution"));
23563 return;
23564 }
23565
23566 /* Two things are addressed here:
23567 1) Implicit require narrow instructions on Thumb-1.
23568 This avoids relaxation accidentally introducing Thumb-2
23569 instructions.
23570 2) Reject wide instructions in non Thumb-2 cores.
23571
23572 Only instructions with narrow and wide variants need to be handled
23573 but selecting all non wide-only instructions is easier. */
23574 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
23575 && !t32_insn_ok (variant, opcode))
23576 {
23577 if (inst.size_req == 0)
23578 inst.size_req = 2;
23579 else if (inst.size_req == 4)
23580 {
23581 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
23582 as_bad (_("selected processor does not support 32bit wide "
23583 "variant of instruction `%s'"), str);
23584 else
23585 as_bad (_("selected processor does not support `%s' in "
23586 "Thumb-2 mode"), str);
23587 return;
23588 }
23589 }
23590
23591 inst.instruction = opcode->tvalue;
23592
23593 if (!parse_operands (p, opcode->operands, /*thumb=*/true))
23594 {
23595 /* Prepare the pred_insn_type for those encodings that don't set
23596 it. */
23597 it_fsm_pre_encode ();
23598
23599 opcode->tencode ();
23600
23601 it_fsm_post_encode ();
23602 }
23603
23604 if (!(inst.error || inst.relax))
23605 {
23606 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
23607 inst.size = (inst.instruction > 0xffff ? 4 : 2);
23608 if (inst.size_req && inst.size_req != inst.size)
23609 {
23610 as_bad (_("cannot honor width suffix -- `%s'"), str);
23611 return;
23612 }
23613 }
23614
23615 /* Something has gone badly wrong if we try to relax a fixed size
23616 instruction. */
23617 gas_assert (inst.size_req == 0 || !inst.relax);
23618
23619 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
23620 *opcode->tvariant);
23621 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
23622 set those bits when Thumb-2 32-bit instructions are seen. The impact
23623 of relaxable instructions will be considered later after we finish all
23624 relaxation. */
23625 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
23626 variant = arm_arch_none;
23627 else
23628 variant = cpu_variant;
23629 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
23630 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
23631 arm_ext_v6t2);
23632
23633 check_neon_suffixes;
23634
23635 if (!inst.error)
23636 {
23637 mapping_state (MAP_THUMB);
23638 }
23639 }
23640 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
23641 {
23642 bool is_bx;
23643
23644 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
23645 is_bx = (opcode->aencode == do_bx);
23646
23647 /* Check that this instruction is supported for this CPU. */
23648 if (!(is_bx && fix_v4bx)
23649 && !(opcode->avariant &&
23650 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
23651 {
23652 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
23653 return;
23654 }
23655 if (inst.size_req)
23656 {
23657 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
23658 return;
23659 }
23660
23661 inst.instruction = opcode->avalue;
23662 if (opcode->tag == OT_unconditionalF)
23663 inst.instruction |= 0xFU << 28;
23664 else
23665 inst.instruction |= inst.cond << 28;
23666 inst.size = INSN_SIZE;
23667 if (!parse_operands (p, opcode->operands, /*thumb=*/false))
23668 {
23669 it_fsm_pre_encode ();
23670 opcode->aencode ();
23671 it_fsm_post_encode ();
23672 }
23673 /* Arm mode bx is marked as both v4T and v5 because it's still required
23674 on a hypothetical non-thumb v5 core. */
23675 if (is_bx)
23676 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
23677 else
23678 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
23679 *opcode->avariant);
23680
23681 check_neon_suffixes;
23682
23683 if (!inst.error)
23684 {
23685 mapping_state (MAP_ARM);
23686 }
23687 }
23688 else
23689 {
23690 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
23691 "-- `%s'"), str);
23692 return;
23693 }
23694 output_inst (str);
23695 }
23696
23697 static void
23698 check_pred_blocks_finished (void)
23699 {
23700 #ifdef OBJ_ELF
23701 asection *sect;
23702
23703 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
23704 if (seg_info (sect)->tc_segment_info_data.current_pred.state
23705 == MANUAL_PRED_BLOCK)
23706 {
23707 if (now_pred.type == SCALAR_PRED)
23708 as_warn (_("section '%s' finished with an open IT block."),
23709 sect->name);
23710 else
23711 as_warn (_("section '%s' finished with an open VPT/VPST block."),
23712 sect->name);
23713 }
23714 #else
23715 if (now_pred.state == MANUAL_PRED_BLOCK)
23716 {
23717 if (now_pred.type == SCALAR_PRED)
23718 as_warn (_("file finished with an open IT block."));
23719 else
23720 as_warn (_("file finished with an open VPT/VPST block."));
23721 }
23722 #endif
23723 }
23724
23725 /* Various frobbings of labels and their addresses. */
23726
23727 void
23728 arm_start_line_hook (void)
23729 {
23730 last_label_seen = NULL;
23731 }
23732
23733 void
23734 arm_frob_label (symbolS * sym)
23735 {
23736 last_label_seen = sym;
23737
23738 ARM_SET_THUMB (sym, thumb_mode);
23739
23740 #if defined OBJ_COFF || defined OBJ_ELF
23741 ARM_SET_INTERWORK (sym, support_interwork);
23742 #endif
23743
23744 force_automatic_it_block_close ();
23745
23746 /* Note - do not allow local symbols (.Lxxx) to be labelled
23747 as Thumb functions. This is because these labels, whilst
23748 they exist inside Thumb code, are not the entry points for
23749 possible ARM->Thumb calls. Also, these labels can be used
23750 as part of a computed goto or switch statement. eg gcc
23751 can generate code that looks like this:
23752
23753 ldr r2, [pc, .Laaa]
23754 lsl r3, r3, #2
23755 ldr r2, [r3, r2]
23756 mov pc, r2
23757
23758 .Lbbb: .word .Lxxx
23759 .Lccc: .word .Lyyy
23760 ..etc...
23761 .Laaa: .word Lbbb
23762
23763 The first instruction loads the address of the jump table.
23764 The second instruction converts a table index into a byte offset.
23765 The third instruction gets the jump address out of the table.
23766 The fourth instruction performs the jump.
23767
23768 If the address stored at .Laaa is that of a symbol which has the
23769 Thumb_Func bit set, then the linker will arrange for this address
23770 to have the bottom bit set, which in turn would mean that the
23771 address computation performed by the third instruction would end
23772 up with the bottom bit set. Since the ARM is capable of unaligned
23773 word loads, the instruction would then load the incorrect address
23774 out of the jump table, and chaos would ensue. */
23775 if (label_is_thumb_function_name
23776 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
23777 && (bfd_section_flags (now_seg) & SEC_CODE) != 0)
23778 {
23779 /* When the address of a Thumb function is taken the bottom
23780 bit of that address should be set. This will allow
23781 interworking between Arm and Thumb functions to work
23782 correctly. */
23783
23784 THUMB_SET_FUNC (sym, 1);
23785
23786 label_is_thumb_function_name = false;
23787 }
23788
23789 dwarf2_emit_label (sym);
23790 }
23791
23792 bool
23793 arm_data_in_code (void)
23794 {
23795 if (thumb_mode && startswith (input_line_pointer + 1, "data:"))
23796 {
23797 *input_line_pointer = '/';
23798 input_line_pointer += 5;
23799 *input_line_pointer = 0;
23800 return true;
23801 }
23802
23803 return false;
23804 }
23805
23806 char *
23807 arm_canonicalize_symbol_name (char * name)
23808 {
23809 int len;
23810
23811 if (thumb_mode && (len = strlen (name)) > 5
23812 && streq (name + len - 5, "/data"))
23813 *(name + len - 5) = 0;
23814
23815 return name;
23816 }
23817 \f
23818 /* Table of all register names defined by default. The user can
23819 define additional names with .req. Note that all register names
23820 should appear in both upper and lowercase variants. Some registers
23821 also have mixed-case names. */
23822
23823 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true, 0 }
23824 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
23825 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
23826 #define REGSET(p,t) \
23827 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
23828 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
23829 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
23830 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
23831 #define REGSETH(p,t) \
23832 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
23833 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
23834 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
23835 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
23836 #define REGSET2(p,t) \
23837 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
23838 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
23839 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
23840 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
23841 #define SPLRBANK(base,bank,t) \
23842 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
23843 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
23844 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
23845 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
23846 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
23847 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
23848
23849 static const struct reg_entry reg_names[] =
23850 {
23851 /* ARM integer registers. */
23852 REGSET(r, RN), REGSET(R, RN),
23853
23854 /* ATPCS synonyms. */
23855 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
23856 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
23857 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
23858
23859 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
23860 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
23861 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
23862
23863 /* Well-known aliases. */
23864 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
23865 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
23866
23867 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
23868 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
23869
23870 /* Defining the new Zero register from ARMv8.1-M. */
23871 REGDEF(zr,15,ZR),
23872 REGDEF(ZR,15,ZR),
23873
23874 /* Coprocessor numbers. */
23875 REGSET(p, CP), REGSET(P, CP),
23876
23877 /* Coprocessor register numbers. The "cr" variants are for backward
23878 compatibility. */
23879 REGSET(c, CN), REGSET(C, CN),
23880 REGSET(cr, CN), REGSET(CR, CN),
23881
23882 /* ARM banked registers. */
23883 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
23884 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
23885 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
23886 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
23887 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
23888 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
23889 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
23890
23891 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
23892 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
23893 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
23894 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
23895 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
23896 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
23897 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
23898 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
23899
23900 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
23901 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
23902 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
23903 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
23904 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
23905 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
23906 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
23907 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
23908 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
23909
23910 /* FPA registers. */
23911 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
23912 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
23913
23914 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
23915 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
23916
23917 /* VFP SP registers. */
23918 REGSET(s,VFS), REGSET(S,VFS),
23919 REGSETH(s,VFS), REGSETH(S,VFS),
23920
23921 /* VFP DP Registers. */
23922 REGSET(d,VFD), REGSET(D,VFD),
23923 /* Extra Neon DP registers. */
23924 REGSETH(d,VFD), REGSETH(D,VFD),
23925
23926 /* Neon QP registers. */
23927 REGSET2(q,NQ), REGSET2(Q,NQ),
23928
23929 /* VFP control registers. */
23930 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
23931 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
23932 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
23933 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
23934 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
23935 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
23936 REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
23937 REGDEF(fpscr_nzcvqc,2,VFC), REGDEF(FPSCR_nzcvqc,2,VFC),
23938 REGDEF(vpr,12,VFC), REGDEF(VPR,12,VFC),
23939 REGDEF(fpcxt_ns,14,VFC), REGDEF(FPCXT_NS,14,VFC),
23940 REGDEF(fpcxt_s,15,VFC), REGDEF(FPCXT_S,15,VFC),
23941
23942 /* Maverick DSP coprocessor registers. */
23943 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
23944 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
23945
23946 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
23947 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
23948 REGDEF(dspsc,0,DSPSC),
23949
23950 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
23951 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
23952 REGDEF(DSPSC,0,DSPSC),
23953
23954 /* iWMMXt data registers - p0, c0-15. */
23955 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
23956
23957 /* iWMMXt control registers - p1, c0-3. */
23958 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
23959 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
23960 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
23961 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
23962
23963 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
23964 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
23965 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
23966 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
23967 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
23968
23969 /* XScale accumulator registers. */
23970 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
23971
23972 /* DWARF ABI defines RA_AUTH_CODE to 143. It also reserves 134-142 for future
23973 expansion. RA_AUTH_CODE here is given the value 143 % 134 to make it easy
23974 for tc_arm_regname_to_dw2regnum to translate to DWARF reg number using
23975 134 + reg_number should the range 134 to 142 be used for more pseudo regs
23976 in the future. This also helps fit RA_AUTH_CODE into a bitmask. */
23977 REGDEF(ra_auth_code,9,PSEUDO),
23978 };
23979 #undef REGDEF
23980 #undef REGNUM
23981 #undef REGSET
23982
23983 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
23984 within psr_required_here. */
23985 static const struct asm_psr psrs[] =
23986 {
23987 /* Backward compatibility notation. Note that "all" is no longer
23988 truly all possible PSR bits. */
23989 {"all", PSR_c | PSR_f},
23990 {"flg", PSR_f},
23991 {"ctl", PSR_c},
23992
23993 /* Individual flags. */
23994 {"f", PSR_f},
23995 {"c", PSR_c},
23996 {"x", PSR_x},
23997 {"s", PSR_s},
23998
23999 /* Combinations of flags. */
24000 {"fs", PSR_f | PSR_s},
24001 {"fx", PSR_f | PSR_x},
24002 {"fc", PSR_f | PSR_c},
24003 {"sf", PSR_s | PSR_f},
24004 {"sx", PSR_s | PSR_x},
24005 {"sc", PSR_s | PSR_c},
24006 {"xf", PSR_x | PSR_f},
24007 {"xs", PSR_x | PSR_s},
24008 {"xc", PSR_x | PSR_c},
24009 {"cf", PSR_c | PSR_f},
24010 {"cs", PSR_c | PSR_s},
24011 {"cx", PSR_c | PSR_x},
24012 {"fsx", PSR_f | PSR_s | PSR_x},
24013 {"fsc", PSR_f | PSR_s | PSR_c},
24014 {"fxs", PSR_f | PSR_x | PSR_s},
24015 {"fxc", PSR_f | PSR_x | PSR_c},
24016 {"fcs", PSR_f | PSR_c | PSR_s},
24017 {"fcx", PSR_f | PSR_c | PSR_x},
24018 {"sfx", PSR_s | PSR_f | PSR_x},
24019 {"sfc", PSR_s | PSR_f | PSR_c},
24020 {"sxf", PSR_s | PSR_x | PSR_f},
24021 {"sxc", PSR_s | PSR_x | PSR_c},
24022 {"scf", PSR_s | PSR_c | PSR_f},
24023 {"scx", PSR_s | PSR_c | PSR_x},
24024 {"xfs", PSR_x | PSR_f | PSR_s},
24025 {"xfc", PSR_x | PSR_f | PSR_c},
24026 {"xsf", PSR_x | PSR_s | PSR_f},
24027 {"xsc", PSR_x | PSR_s | PSR_c},
24028 {"xcf", PSR_x | PSR_c | PSR_f},
24029 {"xcs", PSR_x | PSR_c | PSR_s},
24030 {"cfs", PSR_c | PSR_f | PSR_s},
24031 {"cfx", PSR_c | PSR_f | PSR_x},
24032 {"csf", PSR_c | PSR_s | PSR_f},
24033 {"csx", PSR_c | PSR_s | PSR_x},
24034 {"cxf", PSR_c | PSR_x | PSR_f},
24035 {"cxs", PSR_c | PSR_x | PSR_s},
24036 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
24037 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
24038 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
24039 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
24040 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
24041 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
24042 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
24043 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
24044 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
24045 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
24046 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
24047 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
24048 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
24049 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
24050 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
24051 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
24052 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
24053 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
24054 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
24055 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
24056 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
24057 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
24058 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
24059 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
24060 };
24061
24062 /* Table of V7M psr names. */
24063 static const struct asm_psr v7m_psrs[] =
24064 {
24065 {"apsr", 0x0 }, {"APSR", 0x0 },
24066 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
24067 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
24068 {"psr", 0x3 }, {"PSR", 0x3 },
24069 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
24070 {"ipsr", 0x5 }, {"IPSR", 0x5 },
24071 {"epsr", 0x6 }, {"EPSR", 0x6 },
24072 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
24073 {"msp", 0x8 }, {"MSP", 0x8 },
24074 {"psp", 0x9 }, {"PSP", 0x9 },
24075 {"msplim", 0xa }, {"MSPLIM", 0xa },
24076 {"psplim", 0xb }, {"PSPLIM", 0xb },
24077 {"primask", 0x10}, {"PRIMASK", 0x10},
24078 {"basepri", 0x11}, {"BASEPRI", 0x11},
24079 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
24080 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
24081 {"control", 0x14}, {"CONTROL", 0x14},
24082 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
24083 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
24084 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
24085 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
24086 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
24087 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
24088 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
24089 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
24090 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
24091 };
24092
24093 /* Table of all shift-in-operand names. */
24094 static const struct asm_shift_name shift_names [] =
24095 {
24096 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
24097 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
24098 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
24099 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
24100 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
24101 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX },
24102 { "uxtw", SHIFT_UXTW}, { "UXTW", SHIFT_UXTW}
24103 };
24104
24105 /* Table of all explicit relocation names. */
24106 #ifdef OBJ_ELF
24107 static struct reloc_entry reloc_names[] =
24108 {
24109 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
24110 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
24111 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
24112 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
24113 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
24114 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
24115 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
24116 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
24117 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
24118 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
24119 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
24120 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
24121 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
24122 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
24123 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
24124 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
24125 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
24126 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ},
24127 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC },
24128 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC },
24129 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC },
24130 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC },
24131 { "funcdesc", BFD_RELOC_ARM_FUNCDESC },
24132 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC },
24133 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC }, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC },
24134 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC }, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC },
24135 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC }, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC },
24136 };
24137 #endif
24138
24139 /* Table of all conditional affixes. */
24140 static const struct asm_cond conds[] =
24141 {
24142 {"eq", 0x0},
24143 {"ne", 0x1},
24144 {"cs", 0x2}, {"hs", 0x2},
24145 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
24146 {"mi", 0x4},
24147 {"pl", 0x5},
24148 {"vs", 0x6},
24149 {"vc", 0x7},
24150 {"hi", 0x8},
24151 {"ls", 0x9},
24152 {"ge", 0xa},
24153 {"lt", 0xb},
24154 {"gt", 0xc},
24155 {"le", 0xd},
24156 {"al", 0xe}
24157 };
24158 static const struct asm_cond vconds[] =
24159 {
24160 {"t", 0xf},
24161 {"e", 0x10}
24162 };
24163
24164 #define UL_BARRIER(L,U,CODE,FEAT) \
24165 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
24166 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
24167
24168 static struct asm_barrier_opt barrier_opt_names[] =
24169 {
24170 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
24171 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
24172 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
24173 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
24174 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
24175 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
24176 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
24177 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
24178 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
24179 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
24180 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
24181 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
24182 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
24183 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
24184 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
24185 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
24186 };
24187
24188 #undef UL_BARRIER
24189
24190 /* Table of ARM-format instructions. */
24191
24192 /* Macros for gluing together operand strings. N.B. In all cases
24193 other than OPS0, the trailing OP_stop comes from default
24194 zero-initialization of the unspecified elements of the array. */
24195 #define OPS0() { OP_stop, }
24196 #define OPS1(a) { OP_##a, }
24197 #define OPS2(a,b) { OP_##a,OP_##b, }
24198 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
24199 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
24200 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
24201 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
24202
24203 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
24204 This is useful when mixing operands for ARM and THUMB, i.e. using the
24205 MIX_ARM_THUMB_OPERANDS macro.
24206 In order to use these macros, prefix the number of operands with _
24207 e.g. _3. */
24208 #define OPS_1(a) { a, }
24209 #define OPS_2(a,b) { a,b, }
24210 #define OPS_3(a,b,c) { a,b,c, }
24211 #define OPS_4(a,b,c,d) { a,b,c,d, }
24212 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
24213 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
24214
24215 /* These macros abstract out the exact format of the mnemonic table and
24216 save some repeated characters. */
24217
24218 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
24219 #define TxCE(mnem, op, top, nops, ops, ae, te) \
24220 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
24221 THUMB_VARIANT, do_##ae, do_##te, 0 }
24222
24223 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
24224 a T_MNEM_xyz enumerator. */
24225 #define TCE(mnem, aop, top, nops, ops, ae, te) \
24226 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
24227 #define tCE(mnem, aop, top, nops, ops, ae, te) \
24228 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
24229
24230 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
24231 infix after the third character. */
24232 #define TxC3(mnem, op, top, nops, ops, ae, te) \
24233 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
24234 THUMB_VARIANT, do_##ae, do_##te, 0 }
24235 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
24236 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
24237 THUMB_VARIANT, do_##ae, do_##te, 0 }
24238 #define TC3(mnem, aop, top, nops, ops, ae, te) \
24239 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
24240 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
24241 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
24242 #define tC3(mnem, aop, top, nops, ops, ae, te) \
24243 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
24244 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
24245 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
24246
24247 /* Mnemonic that cannot be conditionalized. The ARM condition-code
24248 field is still 0xE. Many of the Thumb variants can be executed
24249 conditionally, so this is checked separately. */
24250 #define TUE(mnem, op, top, nops, ops, ae, te) \
24251 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
24252 THUMB_VARIANT, do_##ae, do_##te, 0 }
24253
24254 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
24255 Used by mnemonics that have very minimal differences in the encoding for
24256 ARM and Thumb variants and can be handled in a common function. */
24257 #define TUEc(mnem, op, top, nops, ops, en) \
24258 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
24259 THUMB_VARIANT, do_##en, do_##en, 0 }
24260
24261 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
24262 condition code field. */
24263 #define TUF(mnem, op, top, nops, ops, ae, te) \
24264 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
24265 THUMB_VARIANT, do_##ae, do_##te, 0 }
24266
24267 /* ARM-only variants of all the above. */
24268 #define CE(mnem, op, nops, ops, ae) \
24269 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24270
24271 #define C3(mnem, op, nops, ops, ae) \
24272 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24273
24274 /* Thumb-only variants of TCE and TUE. */
24275 #define ToC(mnem, top, nops, ops, te) \
24276 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
24277 do_##te, 0 }
24278
24279 #define ToU(mnem, top, nops, ops, te) \
24280 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
24281 NULL, do_##te, 0 }
24282
24283 /* T_MNEM_xyz enumerator variants of ToC. */
24284 #define toC(mnem, top, nops, ops, te) \
24285 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
24286 do_##te, 0 }
24287
24288 /* T_MNEM_xyz enumerator variants of ToU. */
24289 #define toU(mnem, top, nops, ops, te) \
24290 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
24291 NULL, do_##te, 0 }
24292
24293 /* Legacy mnemonics that always have conditional infix after the third
24294 character. */
24295 #define CL(mnem, op, nops, ops, ae) \
24296 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
24297 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24298
24299 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
24300 #define cCE(mnem, op, nops, ops, ae) \
24301 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
24302
24303 /* mov instructions that are shared between coprocessor and MVE. */
24304 #define mcCE(mnem, op, nops, ops, ae) \
24305 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##ae, 0 }
24306
24307 /* Legacy coprocessor instructions where conditional infix and conditional
24308 suffix are ambiguous. For consistency this includes all FPA instructions,
24309 not just the potentially ambiguous ones. */
24310 #define cCL(mnem, op, nops, ops, ae) \
24311 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
24312 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
24313
24314 /* Coprocessor, takes either a suffix or a position-3 infix
24315 (for an FPA corner case). */
24316 #define C3E(mnem, op, nops, ops, ae) \
24317 { mnem, OPS##nops ops, OT_csuf_or_in3, \
24318 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
24319
24320 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
24321 { m1 #m2 m3, OPS##nops ops, \
24322 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
24323 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24324
24325 #define CM(m1, m2, op, nops, ops, ae) \
24326 xCM_ (m1, , m2, op, nops, ops, ae), \
24327 xCM_ (m1, eq, m2, op, nops, ops, ae), \
24328 xCM_ (m1, ne, m2, op, nops, ops, ae), \
24329 xCM_ (m1, cs, m2, op, nops, ops, ae), \
24330 xCM_ (m1, hs, m2, op, nops, ops, ae), \
24331 xCM_ (m1, cc, m2, op, nops, ops, ae), \
24332 xCM_ (m1, ul, m2, op, nops, ops, ae), \
24333 xCM_ (m1, lo, m2, op, nops, ops, ae), \
24334 xCM_ (m1, mi, m2, op, nops, ops, ae), \
24335 xCM_ (m1, pl, m2, op, nops, ops, ae), \
24336 xCM_ (m1, vs, m2, op, nops, ops, ae), \
24337 xCM_ (m1, vc, m2, op, nops, ops, ae), \
24338 xCM_ (m1, hi, m2, op, nops, ops, ae), \
24339 xCM_ (m1, ls, m2, op, nops, ops, ae), \
24340 xCM_ (m1, ge, m2, op, nops, ops, ae), \
24341 xCM_ (m1, lt, m2, op, nops, ops, ae), \
24342 xCM_ (m1, gt, m2, op, nops, ops, ae), \
24343 xCM_ (m1, le, m2, op, nops, ops, ae), \
24344 xCM_ (m1, al, m2, op, nops, ops, ae)
24345
24346 #define UE(mnem, op, nops, ops, ae) \
24347 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24348
24349 #define UF(mnem, op, nops, ops, ae) \
24350 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24351
24352 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
24353 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
24354 use the same encoding function for each. */
24355 #define NUF(mnem, op, nops, ops, enc) \
24356 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
24357 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
24358
24359 /* Neon data processing, version which indirects through neon_enc_tab for
24360 the various overloaded versions of opcodes. */
24361 #define nUF(mnem, op, nops, ops, enc) \
24362 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
24363 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
24364
24365 /* Neon insn with conditional suffix for the ARM version, non-overloaded
24366 version. */
24367 #define NCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
24368 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
24369 THUMB_VARIANT, do_##enc, do_##enc, mve_p }
24370
24371 #define NCE(mnem, op, nops, ops, enc) \
24372 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
24373
24374 #define NCEF(mnem, op, nops, ops, enc) \
24375 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
24376
24377 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
24378 #define nCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
24379 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
24380 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, mve_p }
24381
24382 #define nCE(mnem, op, nops, ops, enc) \
24383 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
24384
24385 #define nCEF(mnem, op, nops, ops, enc) \
24386 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
24387
24388 /* */
24389 #define mCEF(mnem, op, nops, ops, enc) \
24390 { #mnem, OPS##nops ops, OT_csuffixF, M_MNEM##op, M_MNEM##op, \
24391 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
24392
24393
24394 /* nCEF but for MVE predicated instructions. */
24395 #define mnCEF(mnem, op, nops, ops, enc) \
24396 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
24397
24398 /* nCE but for MVE predicated instructions. */
24399 #define mnCE(mnem, op, nops, ops, enc) \
24400 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
24401
24402 /* NUF but for potentially MVE predicated instructions. */
24403 #define MNUF(mnem, op, nops, ops, enc) \
24404 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
24405 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
24406
24407 /* nUF but for potentially MVE predicated instructions. */
24408 #define mnUF(mnem, op, nops, ops, enc) \
24409 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
24410 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
24411
24412 /* ToC but for potentially MVE predicated instructions. */
24413 #define mToC(mnem, top, nops, ops, te) \
24414 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
24415 do_##te, 1 }
24416
24417 /* NCE but for MVE predicated instructions. */
24418 #define MNCE(mnem, op, nops, ops, enc) \
24419 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
24420
24421 /* NCEF but for MVE predicated instructions. */
24422 #define MNCEF(mnem, op, nops, ops, enc) \
24423 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
24424 #define do_0 0
24425
24426 static const struct asm_opcode insns[] =
24427 {
24428 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
24429 #define THUMB_VARIANT & arm_ext_v4t
24430 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
24431 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
24432 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
24433 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
24434 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
24435 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
24436 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
24437 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
24438 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
24439 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
24440 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
24441 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
24442 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
24443 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
24444 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
24445 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
24446
24447 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
24448 for setting PSR flag bits. They are obsolete in V6 and do not
24449 have Thumb equivalents. */
24450 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
24451 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
24452 CL("tstp", 110f000, 2, (RR, SH), cmp),
24453 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
24454 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
24455 CL("cmpp", 150f000, 2, (RR, SH), cmp),
24456 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
24457 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
24458 CL("cmnp", 170f000, 2, (RR, SH), cmp),
24459
24460 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
24461 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
24462 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
24463 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
24464
24465 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
24466 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
24467 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
24468 OP_RRnpc),
24469 OP_ADDRGLDR),ldst, t_ldst),
24470 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
24471
24472 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24473 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24474 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24475 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24476 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24477 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24478
24479 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
24480 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
24481
24482 /* Pseudo ops. */
24483 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
24484 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
24485 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
24486 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
24487
24488 /* Thumb-compatibility pseudo ops. */
24489 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
24490 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
24491 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
24492 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
24493 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
24494 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
24495 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
24496 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
24497 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
24498 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
24499 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
24500 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
24501
24502 /* These may simplify to neg. */
24503 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
24504 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
24505
24506 #undef THUMB_VARIANT
24507 #define THUMB_VARIANT & arm_ext_os
24508
24509 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
24510 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
24511
24512 #undef THUMB_VARIANT
24513 #define THUMB_VARIANT & arm_ext_v6
24514
24515 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
24516
24517 /* V1 instructions with no Thumb analogue prior to V6T2. */
24518 #undef THUMB_VARIANT
24519 #define THUMB_VARIANT & arm_ext_v6t2
24520
24521 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
24522 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
24523 CL("teqp", 130f000, 2, (RR, SH), cmp),
24524
24525 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
24526 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
24527 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
24528 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
24529
24530 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24531 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24532
24533 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24534 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24535
24536 /* V1 instructions with no Thumb analogue at all. */
24537 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
24538 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
24539
24540 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
24541 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
24542 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
24543 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
24544 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
24545 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
24546 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
24547 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
24548
24549 #undef ARM_VARIANT
24550 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
24551 #undef THUMB_VARIANT
24552 #define THUMB_VARIANT & arm_ext_v4t
24553
24554 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
24555 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
24556
24557 #undef THUMB_VARIANT
24558 #define THUMB_VARIANT & arm_ext_v6t2
24559
24560 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
24561 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
24562
24563 /* Generic coprocessor instructions. */
24564 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
24565 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24566 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24567 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24568 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24569 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
24570 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
24571
24572 #undef ARM_VARIANT
24573 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
24574
24575 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
24576 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
24577
24578 #undef ARM_VARIANT
24579 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
24580 #undef THUMB_VARIANT
24581 #define THUMB_VARIANT & arm_ext_msr
24582
24583 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
24584 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
24585
24586 #undef ARM_VARIANT
24587 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
24588 #undef THUMB_VARIANT
24589 #define THUMB_VARIANT & arm_ext_v6t2
24590
24591 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24592 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24593 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24594 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24595 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24596 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24597 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24598 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24599
24600 #undef ARM_VARIANT
24601 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
24602 #undef THUMB_VARIANT
24603 #define THUMB_VARIANT & arm_ext_v4t
24604
24605 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24606 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24607 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24608 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24609 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24610 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24611
24612 #undef ARM_VARIANT
24613 #define ARM_VARIANT & arm_ext_v4t_5
24614
24615 /* ARM Architecture 4T. */
24616 /* Note: bx (and blx) are required on V5, even if the processor does
24617 not support Thumb. */
24618 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
24619
24620 #undef ARM_VARIANT
24621 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
24622 #undef THUMB_VARIANT
24623 #define THUMB_VARIANT & arm_ext_v5t
24624
24625 /* Note: blx has 2 variants; the .value coded here is for
24626 BLX(2). Only this variant has conditional execution. */
24627 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
24628 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
24629
24630 #undef THUMB_VARIANT
24631 #define THUMB_VARIANT & arm_ext_v6t2
24632
24633 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
24634 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24635 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24636 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24637 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
24638 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
24639 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
24640 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
24641
24642 #undef ARM_VARIANT
24643 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
24644 #undef THUMB_VARIANT
24645 #define THUMB_VARIANT & arm_ext_v5exp
24646
24647 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24648 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24649 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24650 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24651
24652 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24653 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
24654
24655 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
24656 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
24657 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
24658 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
24659
24660 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24661 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24662 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24663 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24664
24665 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24666 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24667
24668 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
24669 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
24670 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
24671 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
24672
24673 #undef ARM_VARIANT
24674 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
24675 #undef THUMB_VARIANT
24676 #define THUMB_VARIANT & arm_ext_v6t2
24677
24678 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
24679 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
24680 ldrd, t_ldstd),
24681 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
24682 ADDRGLDRS), ldrd, t_ldstd),
24683
24684 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24685 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24686
24687 #undef ARM_VARIANT
24688 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
24689
24690 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
24691
24692 #undef ARM_VARIANT
24693 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
24694 #undef THUMB_VARIANT
24695 #define THUMB_VARIANT & arm_ext_v6
24696
24697 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
24698 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
24699 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
24700 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
24701 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
24702 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24703 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24704 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24705 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24706 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
24707
24708 #undef THUMB_VARIANT
24709 #define THUMB_VARIANT & arm_ext_v6t2_v8m
24710
24711 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
24712 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24713 strex, t_strex),
24714 #undef THUMB_VARIANT
24715 #define THUMB_VARIANT & arm_ext_v6t2
24716
24717 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24718 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24719
24720 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
24721 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
24722
24723 /* ARM V6 not included in V7M. */
24724 #undef THUMB_VARIANT
24725 #define THUMB_VARIANT & arm_ext_v6_notm
24726 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
24727 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
24728 UF(rfeib, 9900a00, 1, (RRw), rfe),
24729 UF(rfeda, 8100a00, 1, (RRw), rfe),
24730 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
24731 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
24732 UF(rfefa, 8100a00, 1, (RRw), rfe),
24733 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
24734 UF(rfeed, 9900a00, 1, (RRw), rfe),
24735 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
24736 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
24737 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
24738 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
24739 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
24740 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
24741 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
24742 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
24743 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
24744 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
24745
24746 /* ARM V6 not included in V7M (eg. integer SIMD). */
24747 #undef THUMB_VARIANT
24748 #define THUMB_VARIANT & arm_ext_v6_dsp
24749 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
24750 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
24751 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24752 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24753 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24754 /* Old name for QASX. */
24755 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24756 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24757 /* Old name for QSAX. */
24758 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24759 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24760 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24761 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24762 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24763 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24764 /* Old name for SASX. */
24765 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24766 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24767 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24768 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24769 /* Old name for SHASX. */
24770 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24771 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24772 /* Old name for SHSAX. */
24773 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24774 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24775 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24776 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24777 /* Old name for SSAX. */
24778 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24779 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24780 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24781 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24782 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24783 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24784 /* Old name for UASX. */
24785 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24786 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24787 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24788 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24789 /* Old name for UHASX. */
24790 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24791 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24792 /* Old name for UHSAX. */
24793 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24794 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24795 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24796 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24797 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24798 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24799 /* Old name for UQASX. */
24800 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24801 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24802 /* Old name for UQSAX. */
24803 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24804 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24805 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24806 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24807 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24808 /* Old name for USAX. */
24809 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24810 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24811 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24812 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24813 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24814 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24815 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24816 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24817 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24818 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
24819 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
24820 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24821 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24822 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24823 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24824 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24825 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24826 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24827 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24828 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24829 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24830 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24831 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24832 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24833 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24834 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24835 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24836 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24837 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24838 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
24839 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
24840 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
24841 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24842 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
24843
24844 #undef ARM_VARIANT
24845 #define ARM_VARIANT & arm_ext_v6k_v6t2
24846 #undef THUMB_VARIANT
24847 #define THUMB_VARIANT & arm_ext_v6k_v6t2
24848
24849 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
24850 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
24851 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
24852 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
24853
24854 #undef THUMB_VARIANT
24855 #define THUMB_VARIANT & arm_ext_v6_notm
24856 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
24857 ldrexd, t_ldrexd),
24858 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
24859 RRnpcb), strexd, t_strexd),
24860
24861 #undef THUMB_VARIANT
24862 #define THUMB_VARIANT & arm_ext_v6t2_v8m
24863 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
24864 rd_rn, rd_rn),
24865 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
24866 rd_rn, rd_rn),
24867 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24868 strex, t_strexbh),
24869 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24870 strex, t_strexbh),
24871 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
24872
24873 #undef ARM_VARIANT
24874 #define ARM_VARIANT & arm_ext_sec
24875 #undef THUMB_VARIANT
24876 #define THUMB_VARIANT & arm_ext_sec
24877
24878 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
24879
24880 #undef ARM_VARIANT
24881 #define ARM_VARIANT & arm_ext_virt
24882 #undef THUMB_VARIANT
24883 #define THUMB_VARIANT & arm_ext_virt
24884
24885 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
24886 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
24887
24888 #undef ARM_VARIANT
24889 #define ARM_VARIANT & arm_ext_pan
24890 #undef THUMB_VARIANT
24891 #define THUMB_VARIANT & arm_ext_pan
24892
24893 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
24894
24895 #undef ARM_VARIANT
24896 #define ARM_VARIANT & arm_ext_v6t2
24897 #undef THUMB_VARIANT
24898 #define THUMB_VARIANT & arm_ext_v6t2
24899
24900 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
24901 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
24902 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
24903 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
24904
24905 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
24906 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
24907
24908 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24909 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24910 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24911 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24912
24913 #undef ARM_VARIANT
24914 #define ARM_VARIANT & arm_ext_v3
24915 #undef THUMB_VARIANT
24916 #define THUMB_VARIANT & arm_ext_v6t2
24917
24918 TUE("csdb", 320f014, f3af8014, 0, (), noargs, t_csdb),
24919 TUF("ssbb", 57ff040, f3bf8f40, 0, (), noargs, t_csdb),
24920 TUF("pssbb", 57ff044, f3bf8f44, 0, (), noargs, t_csdb),
24921
24922 #undef ARM_VARIANT
24923 #define ARM_VARIANT & arm_ext_v6t2
24924 #undef THUMB_VARIANT
24925 #define THUMB_VARIANT & arm_ext_v6t2_v8m
24926 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
24927 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
24928
24929 /* Thumb-only instructions. */
24930 #undef ARM_VARIANT
24931 #define ARM_VARIANT NULL
24932 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
24933 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
24934
24935 /* ARM does not really have an IT instruction, so always allow it.
24936 The opcode is copied from Thumb in order to allow warnings in
24937 -mimplicit-it=[never | arm] modes. */
24938 #undef ARM_VARIANT
24939 #define ARM_VARIANT & arm_ext_v1
24940 #undef THUMB_VARIANT
24941 #define THUMB_VARIANT & arm_ext_v6t2
24942
24943 TUE("it", bf08, bf08, 1, (COND), it, t_it),
24944 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
24945 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
24946 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
24947 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
24948 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
24949 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
24950 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
24951 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
24952 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
24953 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
24954 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
24955 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
24956 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
24957 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
24958 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
24959 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
24960 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
24961
24962 /* Thumb2 only instructions. */
24963 #undef ARM_VARIANT
24964 #define ARM_VARIANT NULL
24965
24966 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
24967 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
24968 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
24969 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
24970 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
24971 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
24972
24973 /* Hardware division instructions. */
24974 #undef ARM_VARIANT
24975 #define ARM_VARIANT & arm_ext_adiv
24976 #undef THUMB_VARIANT
24977 #define THUMB_VARIANT & arm_ext_div
24978
24979 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
24980 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
24981
24982 /* ARM V6M/V7 instructions. */
24983 #undef ARM_VARIANT
24984 #define ARM_VARIANT & arm_ext_barrier
24985 #undef THUMB_VARIANT
24986 #define THUMB_VARIANT & arm_ext_barrier
24987
24988 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
24989 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
24990 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
24991
24992 /* ARM V7 instructions. */
24993 #undef ARM_VARIANT
24994 #define ARM_VARIANT & arm_ext_v7
24995 #undef THUMB_VARIANT
24996 #define THUMB_VARIANT & arm_ext_v7
24997
24998 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
24999 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
25000
25001 #undef ARM_VARIANT
25002 #define ARM_VARIANT & arm_ext_mp
25003 #undef THUMB_VARIANT
25004 #define THUMB_VARIANT & arm_ext_mp
25005
25006 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
25007
25008 /* AArchv8 instructions. */
25009 #undef ARM_VARIANT
25010 #define ARM_VARIANT & arm_ext_v8
25011
25012 /* Instructions shared between armv8-a and armv8-m. */
25013 #undef THUMB_VARIANT
25014 #define THUMB_VARIANT & arm_ext_atomics
25015
25016 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
25017 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
25018 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
25019 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
25020 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
25021 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
25022 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
25023 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
25024 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
25025 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
25026 stlex, t_stlex),
25027 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
25028 stlex, t_stlex),
25029 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
25030 stlex, t_stlex),
25031 #undef THUMB_VARIANT
25032 #define THUMB_VARIANT & arm_ext_v8
25033
25034 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
25035 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
25036 ldrexd, t_ldrexd),
25037 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
25038 strexd, t_strexd),
25039 #undef THUMB_VARIANT
25040 #define THUMB_VARIANT & arm_ext_v8r
25041 #undef ARM_VARIANT
25042 #define ARM_VARIANT & arm_ext_v8r
25043
25044 /* ARMv8-R instructions. */
25045 TUF("dfb", 57ff04c, f3bf8f4c, 0, (), noargs, noargs),
25046
25047 /* Defined in V8 but is in undefined encoding space for earlier
25048 architectures. However earlier architectures are required to treat
25049 this instuction as a semihosting trap as well. Hence while not explicitly
25050 defined as such, it is in fact correct to define the instruction for all
25051 architectures. */
25052 #undef THUMB_VARIANT
25053 #define THUMB_VARIANT & arm_ext_v1
25054 #undef ARM_VARIANT
25055 #define ARM_VARIANT & arm_ext_v1
25056 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
25057
25058 /* ARMv8 T32 only. */
25059 #undef ARM_VARIANT
25060 #define ARM_VARIANT NULL
25061 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
25062 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
25063 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
25064
25065 /* FP for ARMv8. */
25066 #undef ARM_VARIANT
25067 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
25068 #undef THUMB_VARIANT
25069 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
25070
25071 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
25072 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
25073 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
25074 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
25075 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
25076 mnCE(vrintz, _vrintr, 2, (RNSDQMQ, oRNSDQMQ), vrintz),
25077 mnCE(vrintx, _vrintr, 2, (RNSDQMQ, oRNSDQMQ), vrintx),
25078 mnUF(vrinta, _vrinta, 2, (RNSDQMQ, oRNSDQMQ), vrinta),
25079 mnUF(vrintn, _vrinta, 2, (RNSDQMQ, oRNSDQMQ), vrintn),
25080 mnUF(vrintp, _vrinta, 2, (RNSDQMQ, oRNSDQMQ), vrintp),
25081 mnUF(vrintm, _vrinta, 2, (RNSDQMQ, oRNSDQMQ), vrintm),
25082
25083 /* Crypto v1 extensions. */
25084 #undef ARM_VARIANT
25085 #define ARM_VARIANT & fpu_crypto_ext_armv8
25086 #undef THUMB_VARIANT
25087 #define THUMB_VARIANT & fpu_crypto_ext_armv8
25088
25089 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
25090 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
25091 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
25092 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
25093 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
25094 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
25095 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
25096 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
25097 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
25098 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
25099 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
25100 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
25101 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
25102 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
25103
25104 #undef ARM_VARIANT
25105 #define ARM_VARIANT & arm_ext_crc
25106 #undef THUMB_VARIANT
25107 #define THUMB_VARIANT & arm_ext_crc
25108 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
25109 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
25110 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
25111 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
25112 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
25113 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
25114
25115 /* ARMv8.2 RAS extension. */
25116 #undef ARM_VARIANT
25117 #define ARM_VARIANT & arm_ext_ras
25118 #undef THUMB_VARIANT
25119 #define THUMB_VARIANT & arm_ext_ras
25120 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
25121
25122 #undef ARM_VARIANT
25123 #define ARM_VARIANT & arm_ext_v8_3
25124 #undef THUMB_VARIANT
25125 #define THUMB_VARIANT & arm_ext_v8_3
25126 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
25127
25128 #undef ARM_VARIANT
25129 #define ARM_VARIANT & fpu_neon_ext_dotprod
25130 #undef THUMB_VARIANT
25131 #define THUMB_VARIANT & fpu_neon_ext_dotprod
25132 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
25133 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
25134
25135 #undef ARM_VARIANT
25136 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
25137 #undef THUMB_VARIANT
25138 #define THUMB_VARIANT NULL
25139
25140 cCE("wfs", e200110, 1, (RR), rd),
25141 cCE("rfs", e300110, 1, (RR), rd),
25142 cCE("wfc", e400110, 1, (RR), rd),
25143 cCE("rfc", e500110, 1, (RR), rd),
25144
25145 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
25146 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
25147 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
25148 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
25149
25150 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
25151 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
25152 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
25153 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
25154
25155 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
25156 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
25157 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
25158 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
25159 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
25160 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
25161 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
25162 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
25163 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
25164 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
25165 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
25166 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
25167
25168 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
25169 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
25170 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
25171 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
25172 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
25173 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
25174 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
25175 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
25176 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
25177 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
25178 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
25179 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
25180
25181 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
25182 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
25183 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
25184 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
25185 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
25186 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
25187 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
25188 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
25189 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
25190 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
25191 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
25192 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
25193
25194 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
25195 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
25196 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
25197 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
25198 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
25199 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
25200 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
25201 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
25202 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
25203 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
25204 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
25205 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
25206
25207 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
25208 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
25209 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
25210 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
25211 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
25212 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
25213 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
25214 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
25215 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
25216 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
25217 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
25218 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
25219
25220 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
25221 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
25222 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
25223 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
25224 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
25225 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
25226 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
25227 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
25228 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
25229 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
25230 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
25231 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
25232
25233 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
25234 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
25235 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
25236 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
25237 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
25238 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
25239 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
25240 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
25241 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
25242 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
25243 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
25244 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
25245
25246 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
25247 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
25248 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
25249 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
25250 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
25251 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
25252 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
25253 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
25254 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
25255 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
25256 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
25257 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
25258
25259 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
25260 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
25261 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
25262 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
25263 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
25264 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
25265 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
25266 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
25267 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
25268 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
25269 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
25270 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
25271
25272 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
25273 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
25274 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
25275 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
25276 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
25277 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
25278 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
25279 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
25280 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
25281 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
25282 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
25283 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
25284
25285 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
25286 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
25287 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
25288 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
25289 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
25290 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
25291 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
25292 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
25293 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
25294 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
25295 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
25296 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
25297
25298 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
25299 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
25300 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
25301 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
25302 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
25303 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
25304 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
25305 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
25306 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
25307 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
25308 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
25309 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
25310
25311 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
25312 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
25313 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
25314 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
25315 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
25316 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
25317 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
25318 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
25319 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
25320 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
25321 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
25322 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
25323
25324 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
25325 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
25326 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
25327 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
25328 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
25329 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
25330 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
25331 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
25332 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
25333 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
25334 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
25335 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
25336
25337 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
25338 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
25339 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
25340 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
25341 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
25342 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
25343 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
25344 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
25345 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
25346 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
25347 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
25348 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
25349
25350 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
25351 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
25352 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
25353 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
25354 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
25355 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
25356 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
25357 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
25358 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
25359 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
25360 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
25361 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
25362
25363 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
25364 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
25365 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
25366 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
25367 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
25368 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25369 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25370 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25371 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
25372 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
25373 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
25374 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
25375
25376 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
25377 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
25378 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
25379 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
25380 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
25381 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25382 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25383 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25384 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
25385 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
25386 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
25387 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
25388
25389 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
25390 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
25391 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
25392 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
25393 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
25394 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25395 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25396 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25397 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
25398 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
25399 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
25400 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
25401
25402 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
25403 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
25404 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
25405 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
25406 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
25407 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25408 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25409 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25410 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
25411 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
25412 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
25413 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
25414
25415 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
25416 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
25417 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
25418 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
25419 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
25420 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25421 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25422 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25423 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
25424 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
25425 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
25426 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
25427
25428 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
25429 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
25430 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
25431 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
25432 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
25433 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25434 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25435 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25436 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
25437 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
25438 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
25439 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
25440
25441 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
25442 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
25443 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
25444 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
25445 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
25446 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25447 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25448 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25449 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
25450 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
25451 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
25452 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
25453
25454 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
25455 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
25456 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
25457 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
25458 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
25459 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25460 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25461 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25462 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
25463 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
25464 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
25465 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
25466
25467 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
25468 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
25469 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
25470 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
25471 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
25472 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25473 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25474 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25475 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
25476 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
25477 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
25478 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
25479
25480 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
25481 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
25482 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
25483 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
25484 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
25485 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25486 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25487 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25488 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
25489 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
25490 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
25491 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
25492
25493 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
25494 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
25495 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
25496 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
25497 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
25498 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25499 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25500 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25501 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
25502 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
25503 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
25504 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
25505
25506 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
25507 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
25508 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
25509 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
25510 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
25511 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25512 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25513 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25514 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
25515 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
25516 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
25517 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
25518
25519 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
25520 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
25521 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
25522 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
25523 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
25524 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25525 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25526 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25527 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
25528 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
25529 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
25530 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
25531
25532 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
25533 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
25534 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
25535 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
25536
25537 cCL("flts", e000110, 2, (RF, RR), rn_rd),
25538 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
25539 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
25540 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
25541 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
25542 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
25543 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
25544 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
25545 cCL("flte", e080110, 2, (RF, RR), rn_rd),
25546 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
25547 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
25548 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
25549
25550 /* The implementation of the FIX instruction is broken on some
25551 assemblers, in that it accepts a precision specifier as well as a
25552 rounding specifier, despite the fact that this is meaningless.
25553 To be more compatible, we accept it as well, though of course it
25554 does not set any bits. */
25555 cCE("fix", e100110, 2, (RR, RF), rd_rm),
25556 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
25557 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
25558 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
25559 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
25560 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
25561 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
25562 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
25563 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
25564 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
25565 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
25566 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
25567 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
25568
25569 /* Instructions that were new with the real FPA, call them V2. */
25570 #undef ARM_VARIANT
25571 #define ARM_VARIANT & fpu_fpa_ext_v2
25572
25573 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25574 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25575 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25576 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25577 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25578 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25579
25580 #undef ARM_VARIANT
25581 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
25582 #undef THUMB_VARIANT
25583 #define THUMB_VARIANT & arm_ext_v6t2
25584 mcCE(vmrs, ef00a10, 2, (APSR_RR, RVC), vmrs),
25585 mcCE(vmsr, ee00a10, 2, (RVC, RR), vmsr),
25586 mcCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
25587 mcCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
25588 mcCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
25589 mcCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
25590
25591 /* Memory operations. */
25592 mcCE(fldmias, c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
25593 mcCE(fldmdbs, d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
25594 mcCE(fstmias, c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
25595 mcCE(fstmdbs, d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
25596 #undef THUMB_VARIANT
25597
25598 /* Moves and type conversions. */
25599 cCE("fmstat", ef1fa10, 0, (), noargs),
25600 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
25601 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
25602 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
25603 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
25604 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
25605 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
25606 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
25607 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
25608
25609 /* Memory operations. */
25610 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
25611 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
25612 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
25613 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
25614 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
25615 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
25616 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
25617 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
25618 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
25619 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
25620 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
25621 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
25622
25623 /* Monadic operations. */
25624 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
25625 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
25626 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
25627
25628 /* Dyadic operations. */
25629 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25630 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25631 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25632 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25633 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25634 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25635 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25636 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25637 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
25638
25639 /* Comparisons. */
25640 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
25641 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
25642 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
25643 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
25644
25645 /* Double precision load/store are still present on single precision
25646 implementations. */
25647 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
25648 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
25649 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
25650 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
25651 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
25652 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
25653 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
25654 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
25655
25656 #undef ARM_VARIANT
25657 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
25658
25659 /* Moves and type conversions. */
25660 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
25661 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
25662 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
25663 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
25664 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
25665 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
25666 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
25667 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
25668 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
25669 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
25670 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
25671 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
25672
25673 /* Monadic operations. */
25674 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
25675 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
25676 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
25677
25678 /* Dyadic operations. */
25679 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25680 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25681 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25682 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25683 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25684 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25685 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25686 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25687 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
25688
25689 /* Comparisons. */
25690 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
25691 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
25692 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
25693 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
25694
25695 /* Instructions which may belong to either the Neon or VFP instruction sets.
25696 Individual encoder functions perform additional architecture checks. */
25697 #undef ARM_VARIANT
25698 #define ARM_VARIANT & fpu_vfp_ext_v1xd
25699 #undef THUMB_VARIANT
25700 #define THUMB_VARIANT & arm_ext_v6t2
25701
25702 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25703 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25704 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25705 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25706 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25707 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25708
25709 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
25710 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
25711
25712 #undef THUMB_VARIANT
25713 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
25714
25715 /* These mnemonics are unique to VFP. */
25716 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
25717 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
25718 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25719 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25720 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25721 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
25722
25723 /* Mnemonics shared by Neon and VFP. */
25724 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
25725
25726 mnCEF(vcvt, _vcvt, 3, (RNSDQMQ, RNSDQMQ, oI32z), neon_cvt),
25727 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
25728 MNCEF(vcvtb, eb20a40, 3, (RVSDMQ, RVSDMQ, oI32b), neon_cvtb),
25729 MNCEF(vcvtt, eb20a40, 3, (RVSDMQ, RVSDMQ, oI32b), neon_cvtt),
25730
25731
25732 /* NOTE: All VMOV encoding is special-cased! */
25733 NCE(vmovq, 0, 1, (VMOV), neon_mov),
25734
25735 #undef THUMB_VARIANT
25736 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
25737 by different feature bits. Since we are setting the Thumb guard, we can
25738 require Thumb-1 which makes it a nop guard and set the right feature bit in
25739 do_vldr_vstr (). */
25740 #define THUMB_VARIANT & arm_ext_v4t
25741 NCE(vldr, d100b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
25742 NCE(vstr, d000b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
25743
25744 #undef ARM_VARIANT
25745 #define ARM_VARIANT & arm_ext_fp16
25746 #undef THUMB_VARIANT
25747 #define THUMB_VARIANT & arm_ext_fp16
25748 /* New instructions added from v8.2, allowing the extraction and insertion of
25749 the upper 16 bits of a 32-bit vector register. */
25750 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
25751 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
25752
25753 /* New backported fma/fms instructions optional in v8.2. */
25754 NUF (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
25755 NUF (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
25756
25757 #undef THUMB_VARIANT
25758 #define THUMB_VARIANT & fpu_neon_ext_v1
25759 #undef ARM_VARIANT
25760 #define ARM_VARIANT & fpu_neon_ext_v1
25761
25762 /* Data processing with three registers of the same length. */
25763 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
25764 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
25765 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
25766 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
25767 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
25768 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
25769 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
25770 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
25771 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
25772 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
25773 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
25774 /* If not immediate, fall back to neon_dyadic_i64_su.
25775 shl should accept I8 I16 I32 I64,
25776 qshl should accept S8 S16 S32 S64 U8 U16 U32 U64. */
25777 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl),
25778 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl),
25779 /* Logic ops, types optional & ignored. */
25780 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
25781 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
25782 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
25783 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
25784 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
25785 /* Bitfield ops, untyped. */
25786 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25787 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
25788 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25789 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
25790 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25791 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
25792 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
25793 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
25794 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
25795 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
25796 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
25797 back to neon_dyadic_if_su. */
25798 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
25799 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
25800 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
25801 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
25802 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
25803 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
25804 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
25805 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
25806 /* Comparison. Type I8 I16 I32 F32. */
25807 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
25808 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
25809 /* As above, D registers only. */
25810 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
25811 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
25812 /* Int and float variants, signedness unimportant. */
25813 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
25814 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
25815 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
25816 /* Add/sub take types I8 I16 I32 I64 F32. */
25817 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
25818 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
25819 /* vtst takes sizes 8, 16, 32. */
25820 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
25821 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
25822 /* VMUL takes I8 I16 I32 F32 P8. */
25823 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
25824 /* VQD{R}MULH takes S16 S32. */
25825 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
25826 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
25827 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
25828 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
25829 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
25830 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
25831 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
25832 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
25833 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
25834 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
25835 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
25836 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
25837 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
25838 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
25839 /* ARM v8.1 extension. */
25840 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
25841 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
25842 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
25843
25844 /* Two address, int/float. Types S8 S16 S32 F32. */
25845 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
25846 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
25847
25848 /* Data processing with two registers and a shift amount. */
25849 /* Right shifts, and variants with rounding.
25850 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
25851 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
25852 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
25853 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
25854 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
25855 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
25856 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
25857 /* Shift and insert. Sizes accepted 8 16 32 64. */
25858 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
25859 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
25860 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
25861 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
25862 /* Right shift immediate, saturating & narrowing, with rounding variants.
25863 Types accepted S16 S32 S64 U16 U32 U64. */
25864 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
25865 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
25866 /* As above, unsigned. Types accepted S16 S32 S64. */
25867 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
25868 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
25869 /* Right shift narrowing. Types accepted I16 I32 I64. */
25870 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
25871 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
25872 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
25873 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
25874 /* CVT with optional immediate for fixed-point variant. */
25875 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
25876
25877 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
25878
25879 /* Data processing, three registers of different lengths. */
25880 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
25881 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
25882 /* If not scalar, fall back to neon_dyadic_long.
25883 Vector types as above, scalar types S16 S32 U16 U32. */
25884 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
25885 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
25886 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
25887 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
25888 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
25889 /* Dyadic, narrowing insns. Types I16 I32 I64. */
25890 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
25891 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
25892 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
25893 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
25894 /* Saturating doubling multiplies. Types S16 S32. */
25895 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25896 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25897 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25898 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
25899 S16 S32 U16 U32. */
25900 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
25901
25902 /* Extract. Size 8. */
25903 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
25904 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
25905
25906 /* Two registers, miscellaneous. */
25907 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
25908 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
25909 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
25910 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
25911 /* Vector replicate. Sizes 8 16 32. */
25912 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
25913 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
25914 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
25915 /* VMOVN. Types I16 I32 I64. */
25916 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
25917 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
25918 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
25919 /* VQMOVUN. Types S16 S32 S64. */
25920 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
25921 /* VZIP / VUZP. Sizes 8 16 32. */
25922 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
25923 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
25924 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
25925 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
25926 /* VQABS / VQNEG. Types S8 S16 S32. */
25927 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
25928 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
25929 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
25930 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
25931 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
25932 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
25933 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
25934 /* Reciprocal estimates. Types U32 F16 F32. */
25935 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
25936 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
25937 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
25938 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
25939 /* VCLS. Types S8 S16 S32. */
25940 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
25941 /* VCLZ. Types I8 I16 I32. */
25942 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
25943 /* VCNT. Size 8. */
25944 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
25945 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
25946 /* Two address, untyped. */
25947 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
25948 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
25949 /* VTRN. Sizes 8 16 32. */
25950 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
25951 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
25952
25953 /* Table lookup. Size 8. */
25954 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
25955 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
25956
25957 #undef THUMB_VARIANT
25958 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
25959 #undef ARM_VARIANT
25960 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
25961
25962 /* Neon element/structure load/store. */
25963 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
25964 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
25965 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
25966 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
25967 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
25968 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
25969 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
25970 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
25971
25972 #undef THUMB_VARIANT
25973 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
25974 #undef ARM_VARIANT
25975 #define ARM_VARIANT & fpu_vfp_ext_v3xd
25976 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
25977 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
25978 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
25979 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
25980 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
25981 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
25982 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
25983 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
25984 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
25985
25986 #undef THUMB_VARIANT
25987 #define THUMB_VARIANT & fpu_vfp_ext_v3
25988 #undef ARM_VARIANT
25989 #define ARM_VARIANT & fpu_vfp_ext_v3
25990
25991 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
25992 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
25993 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
25994 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
25995 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
25996 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
25997 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
25998 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
25999 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
26000
26001 #undef ARM_VARIANT
26002 #define ARM_VARIANT & fpu_vfp_ext_fma
26003 #undef THUMB_VARIANT
26004 #define THUMB_VARIANT & fpu_vfp_ext_fma
26005 /* Mnemonics shared by Neon, VFP, MVE and BF16. These are included in the
26006 VFP FMA variant; NEON and VFP FMA always includes the NEON
26007 FMA instructions. */
26008 mnCEF(vfma, _vfma, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_fmac),
26009 TUF ("vfmat", c300850, fc300850, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), mve_vfma, mve_vfma),
26010 mnCEF(vfms, _vfms, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ), neon_fmac),
26011
26012 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
26013 the v form should always be used. */
26014 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
26015 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
26016 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
26017 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
26018 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
26019 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
26020
26021 #undef THUMB_VARIANT
26022 #undef ARM_VARIANT
26023 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
26024
26025 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
26026 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
26027 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
26028 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
26029 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
26030 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
26031 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
26032 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
26033
26034 #undef ARM_VARIANT
26035 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
26036
26037 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
26038 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
26039 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
26040 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
26041 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
26042 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
26043 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
26044 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
26045 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
26046 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
26047 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
26048 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
26049 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
26050 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
26051 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
26052 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
26053 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
26054 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
26055 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
26056 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
26057 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
26058 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
26059 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
26060 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
26061 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
26062 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
26063 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
26064 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
26065 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
26066 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
26067 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
26068 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
26069 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
26070 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
26071 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
26072 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
26073 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
26074 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26075 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26076 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26077 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26078 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26079 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26080 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26081 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26082 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26083 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
26084 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26085 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26086 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26087 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26088 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26089 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26090 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26091 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26092 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26093 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26094 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26095 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26096 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26097 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26098 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26099 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26100 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26101 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26102 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26103 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
26104 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
26105 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
26106 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
26107 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26108 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26109 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26110 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26111 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26112 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26113 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26114 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26115 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26116 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26117 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26118 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26119 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26120 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26121 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26122 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26123 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26124 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26125 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
26126 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26127 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26128 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26129 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26130 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26131 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26132 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26133 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26134 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26135 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26136 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26137 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26138 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26139 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26140 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26141 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26142 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26143 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26144 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26145 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26146 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26147 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
26148 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26149 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26150 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26151 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26152 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26153 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26154 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26155 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26156 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26157 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26158 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26159 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26160 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26161 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26162 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26163 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26164 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26165 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
26166 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
26167 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
26168 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
26169 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
26170 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26171 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26172 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26173 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26174 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26175 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26176 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26177 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26178 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26179 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
26180 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
26181 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
26182 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
26183 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
26184 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
26185 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26186 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26187 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26188 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
26189 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
26190 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
26191 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
26192 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
26193 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
26194 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26195 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26196 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26197 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26198 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
26199
26200 #undef ARM_VARIANT
26201 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
26202
26203 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
26204 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
26205 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
26206 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
26207 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
26208 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
26209 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26210 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26211 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26212 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26213 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26214 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26215 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26216 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26217 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26218 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26219 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26220 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26221 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26222 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26223 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
26224 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26225 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26226 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26227 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26228 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26229 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26230 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26231 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26232 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26233 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26234 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26235 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26236 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26237 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26238 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26239 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26240 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26241 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26242 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26243 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26244 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26245 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26246 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26247 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26248 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26249 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26250 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26251 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26252 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26253 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26254 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26255 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26256 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26257 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26258 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26259 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
26260
26261 #undef ARM_VARIANT
26262 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
26263
26264 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
26265 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
26266 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
26267 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
26268 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
26269 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
26270 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
26271 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
26272 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
26273 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
26274 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
26275 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
26276 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
26277 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
26278 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
26279 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
26280 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
26281 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
26282 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
26283 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
26284 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
26285 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
26286 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
26287 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
26288 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
26289 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
26290 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
26291 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
26292 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
26293 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
26294 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
26295 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
26296 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
26297 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
26298 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
26299 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
26300 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
26301 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
26302 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
26303 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
26304 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
26305 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
26306 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
26307 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
26308 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
26309 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
26310 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
26311 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
26312 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
26313 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
26314 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
26315 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
26316 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
26317 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
26318 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
26319 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
26320 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
26321 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
26322 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
26323 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
26324 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
26325 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
26326 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
26327 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
26328 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
26329 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
26330 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
26331 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
26332 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
26333 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
26334 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
26335 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
26336 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
26337 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
26338 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
26339 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
26340
26341 /* ARMv8.5-A instructions. */
26342 #undef ARM_VARIANT
26343 #define ARM_VARIANT & arm_ext_sb
26344 #undef THUMB_VARIANT
26345 #define THUMB_VARIANT & arm_ext_sb
26346 TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
26347
26348 #undef ARM_VARIANT
26349 #define ARM_VARIANT & arm_ext_predres
26350 #undef THUMB_VARIANT
26351 #define THUMB_VARIANT & arm_ext_predres
26352 CE("cfprctx", e070f93, 1, (RRnpc), rd),
26353 CE("dvprctx", e070fb3, 1, (RRnpc), rd),
26354 CE("cpprctx", e070ff3, 1, (RRnpc), rd),
26355
26356 /* ARMv8-M instructions. */
26357 #undef ARM_VARIANT
26358 #define ARM_VARIANT NULL
26359 #undef THUMB_VARIANT
26360 #define THUMB_VARIANT & arm_ext_v8m
26361 ToU("sg", e97fe97f, 0, (), noargs),
26362 ToC("blxns", 4784, 1, (RRnpc), t_blx),
26363 ToC("bxns", 4704, 1, (RRnpc), t_bx),
26364 ToC("tt", e840f000, 2, (RRnpc, RRnpc), tt),
26365 ToC("ttt", e840f040, 2, (RRnpc, RRnpc), tt),
26366 ToC("tta", e840f080, 2, (RRnpc, RRnpc), tt),
26367 ToC("ttat", e840f0c0, 2, (RRnpc, RRnpc), tt),
26368
26369 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
26370 instructions behave as nop if no VFP is present. */
26371 #undef THUMB_VARIANT
26372 #define THUMB_VARIANT & arm_ext_v8m_main
26373 ToC("vlldm", ec300a00, 1, (RRnpc), rn),
26374 ToC("vlstm", ec200a00, 1, (RRnpc), rn),
26375
26376 /* Armv8.1-M Mainline instructions. */
26377 #undef THUMB_VARIANT
26378 #define THUMB_VARIANT & arm_ext_v8_1m_main
26379 toU("aut", _aut, 3, (R12, LR, SP), t_pacbti),
26380 toU("autg", _autg, 3, (RR, RR, RR), t_pacbti_nonop),
26381 ToU("bti", f3af800f, 0, (), noargs),
26382 toU("bxaut", _bxaut, 3, (RR, RR, RR), t_pacbti_nonop),
26383 toU("pac", _pac, 3, (R12, LR, SP), t_pacbti),
26384 toU("pacbti", _pacbti, 3, (R12, LR, SP), t_pacbti),
26385 toU("pacg", _pacg, 3, (RR, RR, RR), t_pacbti_pacg),
26386 toU("cinc", _cinc, 3, (RRnpcsp, RR_ZR, COND), t_cond),
26387 toU("cinv", _cinv, 3, (RRnpcsp, RR_ZR, COND), t_cond),
26388 toU("cneg", _cneg, 3, (RRnpcsp, RR_ZR, COND), t_cond),
26389 toU("csel", _csel, 4, (RRnpcsp, RR_ZR, RR_ZR, COND), t_cond),
26390 toU("csetm", _csetm, 2, (RRnpcsp, COND), t_cond),
26391 toU("cset", _cset, 2, (RRnpcsp, COND), t_cond),
26392 toU("csinc", _csinc, 4, (RRnpcsp, RR_ZR, RR_ZR, COND), t_cond),
26393 toU("csinv", _csinv, 4, (RRnpcsp, RR_ZR, RR_ZR, COND), t_cond),
26394 toU("csneg", _csneg, 4, (RRnpcsp, RR_ZR, RR_ZR, COND), t_cond),
26395
26396 toC("bf", _bf, 2, (EXPs, EXPs), t_branch_future),
26397 toU("bfcsel", _bfcsel, 4, (EXPs, EXPs, EXPs, COND), t_branch_future),
26398 toC("bfx", _bfx, 2, (EXPs, RRnpcsp), t_branch_future),
26399 toC("bfl", _bfl, 2, (EXPs, EXPs), t_branch_future),
26400 toC("bflx", _bflx, 2, (EXPs, RRnpcsp), t_branch_future),
26401
26402 toU("dls", _dls, 2, (LR, RRnpcsp), t_loloop),
26403 toU("wls", _wls, 3, (LR, RRnpcsp, EXP), t_loloop),
26404 toU("le", _le, 2, (oLR, EXP), t_loloop),
26405
26406 ToC("clrm", e89f0000, 1, (CLRMLST), t_clrm),
26407 ToC("vscclrm", ec9f0a00, 1, (VRSDVLST), t_vscclrm),
26408
26409 #undef THUMB_VARIANT
26410 #define THUMB_VARIANT & mve_ext
26411 ToC("lsll", ea50010d, 3, (RRe, RRo, RRnpcsp_I32), mve_scalar_shift),
26412 ToC("lsrl", ea50011f, 3, (RRe, RRo, I32), mve_scalar_shift),
26413 ToC("asrl", ea50012d, 3, (RRe, RRo, RRnpcsp_I32), mve_scalar_shift),
26414 ToC("uqrshll", ea51010d, 4, (RRe, RRo, I48_I64, RRnpcsp), mve_scalar_shift1),
26415 ToC("sqrshrl", ea51012d, 4, (RRe, RRo, I48_I64, RRnpcsp), mve_scalar_shift1),
26416 ToC("uqshll", ea51010f, 3, (RRe, RRo, I32), mve_scalar_shift),
26417 ToC("urshrl", ea51011f, 3, (RRe, RRo, I32), mve_scalar_shift),
26418 ToC("srshrl", ea51012f, 3, (RRe, RRo, I32), mve_scalar_shift),
26419 ToC("sqshll", ea51013f, 3, (RRe, RRo, I32), mve_scalar_shift),
26420 ToC("uqrshl", ea500f0d, 2, (RRnpcsp, RRnpcsp), mve_scalar_shift),
26421 ToC("sqrshr", ea500f2d, 2, (RRnpcsp, RRnpcsp), mve_scalar_shift),
26422 ToC("uqshl", ea500f0f, 2, (RRnpcsp, I32), mve_scalar_shift),
26423 ToC("urshr", ea500f1f, 2, (RRnpcsp, I32), mve_scalar_shift),
26424 ToC("srshr", ea500f2f, 2, (RRnpcsp, I32), mve_scalar_shift),
26425 ToC("sqshl", ea500f3f, 2, (RRnpcsp, I32), mve_scalar_shift),
26426
26427 ToC("vpt", ee410f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26428 ToC("vptt", ee018f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26429 ToC("vpte", ee418f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26430 ToC("vpttt", ee014f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26431 ToC("vptte", ee01cf00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26432 ToC("vptet", ee41cf00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26433 ToC("vptee", ee414f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26434 ToC("vptttt", ee012f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26435 ToC("vpttte", ee016f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26436 ToC("vpttet", ee01ef00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26437 ToC("vpttee", ee01af00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26438 ToC("vptett", ee41af00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26439 ToC("vptete", ee41ef00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26440 ToC("vpteet", ee416f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26441 ToC("vpteee", ee412f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26442
26443 ToC("vpst", fe710f4d, 0, (), mve_vpt),
26444 ToC("vpstt", fe318f4d, 0, (), mve_vpt),
26445 ToC("vpste", fe718f4d, 0, (), mve_vpt),
26446 ToC("vpsttt", fe314f4d, 0, (), mve_vpt),
26447 ToC("vpstte", fe31cf4d, 0, (), mve_vpt),
26448 ToC("vpstet", fe71cf4d, 0, (), mve_vpt),
26449 ToC("vpstee", fe714f4d, 0, (), mve_vpt),
26450 ToC("vpstttt", fe312f4d, 0, (), mve_vpt),
26451 ToC("vpsttte", fe316f4d, 0, (), mve_vpt),
26452 ToC("vpsttet", fe31ef4d, 0, (), mve_vpt),
26453 ToC("vpsttee", fe31af4d, 0, (), mve_vpt),
26454 ToC("vpstett", fe71af4d, 0, (), mve_vpt),
26455 ToC("vpstete", fe71ef4d, 0, (), mve_vpt),
26456 ToC("vpsteet", fe716f4d, 0, (), mve_vpt),
26457 ToC("vpsteee", fe712f4d, 0, (), mve_vpt),
26458
26459 /* MVE and MVE FP only. */
26460 mToC("vhcadd", ee000f00, 4, (RMQ, RMQ, RMQ, EXPi), mve_vhcadd),
26461 mCEF(vctp, _vctp, 1, (RRnpc), mve_vctp),
26462 mCEF(vadc, _vadc, 3, (RMQ, RMQ, RMQ), mve_vadc),
26463 mCEF(vadci, _vadci, 3, (RMQ, RMQ, RMQ), mve_vadc),
26464 mToC("vsbc", fe300f00, 3, (RMQ, RMQ, RMQ), mve_vsbc),
26465 mToC("vsbci", fe301f00, 3, (RMQ, RMQ, RMQ), mve_vsbc),
26466 mCEF(vmullb, _vmullb, 3, (RMQ, RMQ, RMQ), mve_vmull),
26467 mCEF(vabav, _vabav, 3, (RRnpcsp, RMQ, RMQ), mve_vabav),
26468 mCEF(vmladav, _vmladav, 3, (RRe, RMQ, RMQ), mve_vmladav),
26469 mCEF(vmladava, _vmladava, 3, (RRe, RMQ, RMQ), mve_vmladav),
26470 mCEF(vmladavx, _vmladavx, 3, (RRe, RMQ, RMQ), mve_vmladav),
26471 mCEF(vmladavax, _vmladavax, 3, (RRe, RMQ, RMQ), mve_vmladav),
26472 mCEF(vmlav, _vmladav, 3, (RRe, RMQ, RMQ), mve_vmladav),
26473 mCEF(vmlava, _vmladava, 3, (RRe, RMQ, RMQ), mve_vmladav),
26474 mCEF(vmlsdav, _vmlsdav, 3, (RRe, RMQ, RMQ), mve_vmladav),
26475 mCEF(vmlsdava, _vmlsdava, 3, (RRe, RMQ, RMQ), mve_vmladav),
26476 mCEF(vmlsdavx, _vmlsdavx, 3, (RRe, RMQ, RMQ), mve_vmladav),
26477 mCEF(vmlsdavax, _vmlsdavax, 3, (RRe, RMQ, RMQ), mve_vmladav),
26478
26479 mCEF(vst20, _vst20, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
26480 mCEF(vst21, _vst21, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
26481 mCEF(vst40, _vst40, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
26482 mCEF(vst41, _vst41, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
26483 mCEF(vst42, _vst42, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
26484 mCEF(vst43, _vst43, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
26485 mCEF(vld20, _vld20, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
26486 mCEF(vld21, _vld21, 2, (MSTRLST2, ADDRMVE), mve_vst_vld),
26487 mCEF(vld40, _vld40, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
26488 mCEF(vld41, _vld41, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
26489 mCEF(vld42, _vld42, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
26490 mCEF(vld43, _vld43, 2, (MSTRLST4, ADDRMVE), mve_vst_vld),
26491 mCEF(vstrb, _vstrb, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
26492 mCEF(vstrh, _vstrh, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
26493 mCEF(vstrw, _vstrw, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
26494 mCEF(vstrd, _vstrd, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
26495 mCEF(vldrb, _vldrb, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
26496 mCEF(vldrh, _vldrh, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
26497 mCEF(vldrw, _vldrw, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
26498 mCEF(vldrd, _vldrd, 2, (RMQ, ADDRMVE), mve_vstr_vldr),
26499
26500 mCEF(vmovnt, _vmovnt, 2, (RMQ, RMQ), mve_movn),
26501 mCEF(vmovnb, _vmovnb, 2, (RMQ, RMQ), mve_movn),
26502 mCEF(vbrsr, _vbrsr, 3, (RMQ, RMQ, RR), mve_vbrsr),
26503 mCEF(vaddlv, _vaddlv, 3, (RRe, RRo, RMQ), mve_vaddlv),
26504 mCEF(vaddlva, _vaddlva, 3, (RRe, RRo, RMQ), mve_vaddlv),
26505 mCEF(vaddv, _vaddv, 2, (RRe, RMQ), mve_vaddv),
26506 mCEF(vaddva, _vaddva, 2, (RRe, RMQ), mve_vaddv),
26507 mCEF(vddup, _vddup, 3, (RMQ, RRe, EXPi), mve_viddup),
26508 mCEF(vdwdup, _vdwdup, 4, (RMQ, RRe, RR, EXPi), mve_viddup),
26509 mCEF(vidup, _vidup, 3, (RMQ, RRe, EXPi), mve_viddup),
26510 mCEF(viwdup, _viwdup, 4, (RMQ, RRe, RR, EXPi), mve_viddup),
26511 mToC("vmaxa", ee330e81, 2, (RMQ, RMQ), mve_vmaxa_vmina),
26512 mToC("vmina", ee331e81, 2, (RMQ, RMQ), mve_vmaxa_vmina),
26513 mCEF(vmaxv, _vmaxv, 2, (RR, RMQ), mve_vmaxv),
26514 mCEF(vmaxav, _vmaxav, 2, (RR, RMQ), mve_vmaxv),
26515 mCEF(vminv, _vminv, 2, (RR, RMQ), mve_vmaxv),
26516 mCEF(vminav, _vminav, 2, (RR, RMQ), mve_vmaxv),
26517
26518 mCEF(vmlaldav, _vmlaldav, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26519 mCEF(vmlaldava, _vmlaldava, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26520 mCEF(vmlaldavx, _vmlaldavx, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26521 mCEF(vmlaldavax, _vmlaldavax, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26522 mCEF(vmlalv, _vmlaldav, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26523 mCEF(vmlalva, _vmlaldava, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26524 mCEF(vmlsldav, _vmlsldav, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26525 mCEF(vmlsldava, _vmlsldava, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26526 mCEF(vmlsldavx, _vmlsldavx, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26527 mCEF(vmlsldavax, _vmlsldavax, 4, (RRe, RRo, RMQ, RMQ), mve_vmlaldav),
26528 mToC("vrmlaldavh", ee800f00, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26529 mToC("vrmlaldavha",ee800f20, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26530 mCEF(vrmlaldavhx, _vrmlaldavhx, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26531 mCEF(vrmlaldavhax, _vrmlaldavhax, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26532 mToC("vrmlalvh", ee800f00, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26533 mToC("vrmlalvha", ee800f20, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26534 mCEF(vrmlsldavh, _vrmlsldavh, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26535 mCEF(vrmlsldavha, _vrmlsldavha, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26536 mCEF(vrmlsldavhx, _vrmlsldavhx, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26537 mCEF(vrmlsldavhax, _vrmlsldavhax, 4, (RRe, RR, RMQ, RMQ), mve_vrmlaldavh),
26538
26539 mToC("vmlas", ee011e40, 3, (RMQ, RMQ, RR), mve_vmlas),
26540 mToC("vmulh", ee010e01, 3, (RMQ, RMQ, RMQ), mve_vmulh),
26541 mToC("vrmulh", ee011e01, 3, (RMQ, RMQ, RMQ), mve_vmulh),
26542 mToC("vpnot", fe310f4d, 0, (), mve_vpnot),
26543 mToC("vpsel", fe310f01, 3, (RMQ, RMQ, RMQ), mve_vpsel),
26544
26545 mToC("vqdmladh", ee000e00, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
26546 mToC("vqdmladhx", ee001e00, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
26547 mToC("vqrdmladh", ee000e01, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
26548 mToC("vqrdmladhx",ee001e01, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
26549 mToC("vqdmlsdh", fe000e00, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
26550 mToC("vqdmlsdhx", fe001e00, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
26551 mToC("vqrdmlsdh", fe000e01, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
26552 mToC("vqrdmlsdhx",fe001e01, 3, (RMQ, RMQ, RMQ), mve_vqdmladh),
26553 mToC("vqdmlah", ee000e60, 3, (RMQ, RMQ, RR), mve_vqdmlah),
26554 mToC("vqdmlash", ee001e60, 3, (RMQ, RMQ, RR), mve_vqdmlah),
26555 mToC("vqrdmlash", ee001e40, 3, (RMQ, RMQ, RR), mve_vqdmlah),
26556 mToC("vqdmullt", ee301f00, 3, (RMQ, RMQ, RMQRR), mve_vqdmull),
26557 mToC("vqdmullb", ee300f00, 3, (RMQ, RMQ, RMQRR), mve_vqdmull),
26558 mCEF(vqmovnt, _vqmovnt, 2, (RMQ, RMQ), mve_vqmovn),
26559 mCEF(vqmovnb, _vqmovnb, 2, (RMQ, RMQ), mve_vqmovn),
26560 mCEF(vqmovunt, _vqmovunt, 2, (RMQ, RMQ), mve_vqmovn),
26561 mCEF(vqmovunb, _vqmovunb, 2, (RMQ, RMQ), mve_vqmovn),
26562
26563 mCEF(vshrnt, _vshrnt, 3, (RMQ, RMQ, I32z), mve_vshrn),
26564 mCEF(vshrnb, _vshrnb, 3, (RMQ, RMQ, I32z), mve_vshrn),
26565 mCEF(vrshrnt, _vrshrnt, 3, (RMQ, RMQ, I32z), mve_vshrn),
26566 mCEF(vrshrnb, _vrshrnb, 3, (RMQ, RMQ, I32z), mve_vshrn),
26567 mCEF(vqshrnt, _vqrshrnt, 3, (RMQ, RMQ, I32z), mve_vshrn),
26568 mCEF(vqshrnb, _vqrshrnb, 3, (RMQ, RMQ, I32z), mve_vshrn),
26569 mCEF(vqshrunt, _vqrshrunt, 3, (RMQ, RMQ, I32z), mve_vshrn),
26570 mCEF(vqshrunb, _vqrshrunb, 3, (RMQ, RMQ, I32z), mve_vshrn),
26571 mCEF(vqrshrnt, _vqrshrnt, 3, (RMQ, RMQ, I32z), mve_vshrn),
26572 mCEF(vqrshrnb, _vqrshrnb, 3, (RMQ, RMQ, I32z), mve_vshrn),
26573 mCEF(vqrshrunt, _vqrshrunt, 3, (RMQ, RMQ, I32z), mve_vshrn),
26574 mCEF(vqrshrunb, _vqrshrunb, 3, (RMQ, RMQ, I32z), mve_vshrn),
26575
26576 mToC("vshlc", eea00fc0, 3, (RMQ, RR, I32z), mve_vshlc),
26577 mToC("vshllt", ee201e00, 3, (RMQ, RMQ, I32), mve_vshll),
26578 mToC("vshllb", ee200e00, 3, (RMQ, RMQ, I32), mve_vshll),
26579
26580 toU("dlstp", _dlstp, 2, (LR, RR), t_loloop),
26581 toU("wlstp", _wlstp, 3, (LR, RR, EXP), t_loloop),
26582 toU("letp", _letp, 2, (LR, EXP), t_loloop),
26583 toU("lctp", _lctp, 0, (), t_loloop),
26584
26585 #undef THUMB_VARIANT
26586 #define THUMB_VARIANT & mve_fp_ext
26587 mToC("vcmul", ee300e00, 4, (RMQ, RMQ, RMQ, EXPi), mve_vcmul),
26588 mToC("vfmas", ee311e40, 3, (RMQ, RMQ, RR), mve_vfmas),
26589 mToC("vmaxnma", ee3f0e81, 2, (RMQ, RMQ), mve_vmaxnma_vminnma),
26590 mToC("vminnma", ee3f1e81, 2, (RMQ, RMQ), mve_vmaxnma_vminnma),
26591 mToC("vmaxnmv", eeee0f00, 2, (RR, RMQ), mve_vmaxnmv),
26592 mToC("vmaxnmav",eeec0f00, 2, (RR, RMQ), mve_vmaxnmv),
26593 mToC("vminnmv", eeee0f80, 2, (RR, RMQ), mve_vmaxnmv),
26594 mToC("vminnmav",eeec0f80, 2, (RR, RMQ), mve_vmaxnmv),
26595
26596 #undef ARM_VARIANT
26597 #define ARM_VARIANT & fpu_vfp_ext_v1
26598 #undef THUMB_VARIANT
26599 #define THUMB_VARIANT & arm_ext_v6t2
26600
26601 mcCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
26602
26603 #undef ARM_VARIANT
26604 #define ARM_VARIANT & fpu_vfp_ext_v1xd
26605
26606 mnCEF(vmla, _vmla, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mac_maybe_scalar),
26607 mnCEF(vmul, _vmul, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mul),
26608 MNCE(vmov, 0, 1, (VMOV), neon_mov),
26609 mcCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
26610 mcCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
26611 mcCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
26612
26613 mCEF(vmullt, _vmullt, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ), mve_vmull),
26614 mnCEF(vadd, _vadd, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_addsub_if_i),
26615 mnCEF(vsub, _vsub, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_addsub_if_i),
26616
26617 MNCEF(vabs, 1b10300, 2, (RNSDQMQ, RNSDQMQ), neon_abs_neg),
26618 MNCEF(vneg, 1b10380, 2, (RNSDQMQ, RNSDQMQ), neon_abs_neg),
26619
26620 mCEF(vmovlt, _vmovlt, 1, (VMOV), mve_movl),
26621 mCEF(vmovlb, _vmovlb, 1, (VMOV), mve_movl),
26622
26623 mnCE(vcmp, _vcmp, 3, (RVSD_COND, RSVDMQ_FI0, oRMQRZ), vfp_nsyn_cmp),
26624 mnCE(vcmpe, _vcmpe, 3, (RVSD_COND, RSVDMQ_FI0, oRMQRZ), vfp_nsyn_cmp),
26625
26626 #undef ARM_VARIANT
26627 #define ARM_VARIANT & fpu_vfp_ext_v2
26628
26629 mcCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
26630 mcCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
26631 mcCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
26632 mcCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
26633
26634 #undef ARM_VARIANT
26635 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
26636 mnUF(vcvta, _vcvta, 2, (RNSDQMQ, oRNSDQMQ), neon_cvta),
26637 mnUF(vcvtp, _vcvta, 2, (RNSDQMQ, oRNSDQMQ), neon_cvtp),
26638 mnUF(vcvtn, _vcvta, 3, (RNSDQMQ, oRNSDQMQ, oI32z), neon_cvtn),
26639 mnUF(vcvtm, _vcvta, 2, (RNSDQMQ, oRNSDQMQ), neon_cvtm),
26640 mnUF(vmaxnm, _vmaxnm, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ), vmaxnm),
26641 mnUF(vminnm, _vminnm, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ), vmaxnm),
26642
26643 #undef ARM_VARIANT
26644 #define ARM_VARIANT & fpu_neon_ext_v1
26645 mnUF(vabd, _vabd, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26646 mnUF(vabdl, _vabdl, 3, (RNQMQ, RNDMQ, RNDMQ), neon_dyadic_long),
26647 mnUF(vaddl, _vaddl, 3, (RNSDQMQ, oRNSDMQ, RNSDMQR), neon_dyadic_long),
26648 mnUF(vsubl, _vsubl, 3, (RNSDQMQ, oRNSDMQ, RNSDMQR), neon_dyadic_long),
26649 mnUF(vand, _vand, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26650 mnUF(vbic, _vbic, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26651 mnUF(vorr, _vorr, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26652 mnUF(vorn, _vorn, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26653 mnUF(veor, _veor, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_logic),
26654 MNUF(vcls, 1b00400, 2, (RNDQMQ, RNDQMQ), neon_cls),
26655 MNUF(vclz, 1b00480, 2, (RNDQMQ, RNDQMQ), neon_clz),
26656 mnCE(vdup, _vdup, 2, (RNDQMQ, RR_RNSC), neon_dup),
26657 MNUF(vhadd, 00000000, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i_su),
26658 MNUF(vrhadd, 00000100, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_i_su),
26659 MNUF(vhsub, 00000200, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i_su),
26660 mnUF(vmin, _vmin, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26661 mnUF(vmax, _vmax, 3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26662 MNUF(vqadd, 0000010, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i64_su),
26663 MNUF(vqsub, 0000210, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i64_su),
26664 mnUF(vmvn, _vmvn, 2, (RNDQMQ, RNDQMQ_Ibig), neon_mvn),
26665 MNUF(vqabs, 1b00700, 2, (RNDQMQ, RNDQMQ), neon_sat_abs_neg),
26666 MNUF(vqneg, 1b00780, 2, (RNDQMQ, RNDQMQ), neon_sat_abs_neg),
26667 mnUF(vqrdmlah, _vqrdmlah,3, (RNDQMQ, oRNDQMQ, RNDQ_RNSC_RR), neon_qrdmlah),
26668 mnUF(vqdmulh, _vqdmulh, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_RNSC_RR), neon_qdmulh),
26669 mnUF(vqrdmulh, _vqrdmulh,3, (RNDQMQ, oRNDQMQ, RNDQMQ_RNSC_RR), neon_qdmulh),
26670 MNUF(vqrshl, 0000510, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_rshl),
26671 MNUF(vrshl, 0000500, 3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_rshl),
26672 MNUF(vshr, 0800010, 3, (RNDQMQ, oRNDQMQ, I64z), neon_rshift_round_imm),
26673 MNUF(vrshr, 0800210, 3, (RNDQMQ, oRNDQMQ, I64z), neon_rshift_round_imm),
26674 MNUF(vsli, 1800510, 3, (RNDQMQ, oRNDQMQ, I63), neon_sli),
26675 MNUF(vsri, 1800410, 3, (RNDQMQ, oRNDQMQ, I64z), neon_sri),
26676 MNUF(vrev64, 1b00000, 2, (RNDQMQ, RNDQMQ), neon_rev),
26677 MNUF(vrev32, 1b00080, 2, (RNDQMQ, RNDQMQ), neon_rev),
26678 MNUF(vrev16, 1b00100, 2, (RNDQMQ, RNDQMQ), neon_rev),
26679 mnUF(vshl, _vshl, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_I63b_RR), neon_shl),
26680 mnUF(vqshl, _vqshl, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_I63b_RR), neon_qshl),
26681 MNUF(vqshlu, 1800610, 3, (RNDQMQ, oRNDQMQ, I63), neon_qshlu_imm),
26682
26683 #undef ARM_VARIANT
26684 #define ARM_VARIANT & arm_ext_v8_3
26685 #undef THUMB_VARIANT
26686 #define THUMB_VARIANT & arm_ext_v6t2_v8m
26687 MNUF (vcadd, 0, 4, (RNDQMQ, RNDQMQ, RNDQMQ, EXPi), vcadd),
26688 MNUF (vcmla, 0, 4, (RNDQMQ, RNDQMQ, RNDQMQ_RNSC, EXPi), vcmla),
26689
26690 #undef ARM_VARIANT
26691 #define ARM_VARIANT &arm_ext_bf16
26692 #undef THUMB_VARIANT
26693 #define THUMB_VARIANT &arm_ext_bf16
26694 TUF ("vdot", c000d00, fc000d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), vdot, vdot),
26695 TUF ("vmmla", c000c40, fc000c40, 3, (RNQ, RNQ, RNQ), vmmla, vmmla),
26696 TUF ("vfmab", c300810, fc300810, 3, (RNDQ, RNDQ, RNDQ_RNSC), bfloat_vfma, bfloat_vfma),
26697
26698 #undef ARM_VARIANT
26699 #define ARM_VARIANT &arm_ext_i8mm
26700 #undef THUMB_VARIANT
26701 #define THUMB_VARIANT &arm_ext_i8mm
26702 TUF ("vsmmla", c200c40, fc200c40, 3, (RNQ, RNQ, RNQ), vsmmla, vsmmla),
26703 TUF ("vummla", c200c50, fc200c50, 3, (RNQ, RNQ, RNQ), vummla, vummla),
26704 TUF ("vusmmla", ca00c40, fca00c40, 3, (RNQ, RNQ, RNQ), vsmmla, vsmmla),
26705 TUF ("vusdot", c800d00, fc800d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), vusdot, vusdot),
26706 TUF ("vsudot", c800d10, fc800d10, 3, (RNDQ, RNDQ, RNSC), vsudot, vsudot),
26707
26708 #undef ARM_VARIANT
26709 #undef THUMB_VARIANT
26710 #define THUMB_VARIANT &arm_ext_cde
26711 ToC ("cx1", ee000000, 3, (RCP, APSR_RR, I8191), cx1),
26712 ToC ("cx1a", fe000000, 3, (RCP, APSR_RR, I8191), cx1a),
26713 ToC ("cx1d", ee000040, 4, (RCP, RR, APSR_RR, I8191), cx1d),
26714 ToC ("cx1da", fe000040, 4, (RCP, RR, APSR_RR, I8191), cx1da),
26715
26716 ToC ("cx2", ee400000, 4, (RCP, APSR_RR, APSR_RR, I511), cx2),
26717 ToC ("cx2a", fe400000, 4, (RCP, APSR_RR, APSR_RR, I511), cx2a),
26718 ToC ("cx2d", ee400040, 5, (RCP, RR, APSR_RR, APSR_RR, I511), cx2d),
26719 ToC ("cx2da", fe400040, 5, (RCP, RR, APSR_RR, APSR_RR, I511), cx2da),
26720
26721 ToC ("cx3", ee800000, 5, (RCP, APSR_RR, APSR_RR, APSR_RR, I63), cx3),
26722 ToC ("cx3a", fe800000, 5, (RCP, APSR_RR, APSR_RR, APSR_RR, I63), cx3a),
26723 ToC ("cx3d", ee800040, 6, (RCP, RR, APSR_RR, APSR_RR, APSR_RR, I63), cx3d),
26724 ToC ("cx3da", fe800040, 6, (RCP, RR, APSR_RR, APSR_RR, APSR_RR, I63), cx3da),
26725
26726 mToC ("vcx1", ec200000, 3, (RCP, RNSDMQ, I4095), vcx1),
26727 mToC ("vcx1a", fc200000, 3, (RCP, RNSDMQ, I4095), vcx1),
26728
26729 mToC ("vcx2", ec300000, 4, (RCP, RNSDMQ, RNSDMQ, I127), vcx2),
26730 mToC ("vcx2a", fc300000, 4, (RCP, RNSDMQ, RNSDMQ, I127), vcx2),
26731
26732 mToC ("vcx3", ec800000, 5, (RCP, RNSDMQ, RNSDMQ, RNSDMQ, I15), vcx3),
26733 mToC ("vcx3a", fc800000, 5, (RCP, RNSDMQ, RNSDMQ, RNSDMQ, I15), vcx3),
26734 };
26735
26736 #undef ARM_VARIANT
26737 #undef THUMB_VARIANT
26738 #undef TCE
26739 #undef TUE
26740 #undef TUF
26741 #undef TCC
26742 #undef cCE
26743 #undef cCL
26744 #undef C3E
26745 #undef C3
26746 #undef CE
26747 #undef CM
26748 #undef CL
26749 #undef UE
26750 #undef UF
26751 #undef UT
26752 #undef NUF
26753 #undef nUF
26754 #undef NCE
26755 #undef nCE
26756 #undef OPS0
26757 #undef OPS1
26758 #undef OPS2
26759 #undef OPS3
26760 #undef OPS4
26761 #undef OPS5
26762 #undef OPS6
26763 #undef do_0
26764 #undef ToC
26765 #undef toC
26766 #undef ToU
26767 #undef toU
26768 \f
26769 /* MD interface: bits in the object file. */
26770
26771 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
26772 for use in the a.out file, and stores them in the array pointed to by buf.
26773 This knows about the endian-ness of the target machine and does
26774 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
26775 2 (short) and 4 (long) Floating numbers are put out as a series of
26776 LITTLENUMS (shorts, here at least). */
26777
26778 void
26779 md_number_to_chars (char * buf, valueT val, int n)
26780 {
26781 if (target_big_endian)
26782 number_to_chars_bigendian (buf, val, n);
26783 else
26784 number_to_chars_littleendian (buf, val, n);
26785 }
26786
26787 static valueT
26788 md_chars_to_number (char * buf, int n)
26789 {
26790 valueT result = 0;
26791 unsigned char * where = (unsigned char *) buf;
26792
26793 if (target_big_endian)
26794 {
26795 while (n--)
26796 {
26797 result <<= 8;
26798 result |= (*where++ & 255);
26799 }
26800 }
26801 else
26802 {
26803 while (n--)
26804 {
26805 result <<= 8;
26806 result |= (where[n] & 255);
26807 }
26808 }
26809
26810 return result;
26811 }
26812
26813 /* MD interface: Sections. */
26814
26815 /* Calculate the maximum variable size (i.e., excluding fr_fix)
26816 that an rs_machine_dependent frag may reach. */
26817
26818 unsigned int
26819 arm_frag_max_var (fragS *fragp)
26820 {
26821 /* We only use rs_machine_dependent for variable-size Thumb instructions,
26822 which are either THUMB_SIZE (2) or INSN_SIZE (4).
26823
26824 Note that we generate relaxable instructions even for cases that don't
26825 really need it, like an immediate that's a trivial constant. So we're
26826 overestimating the instruction size for some of those cases. Rather
26827 than putting more intelligence here, it would probably be better to
26828 avoid generating a relaxation frag in the first place when it can be
26829 determined up front that a short instruction will suffice. */
26830
26831 gas_assert (fragp->fr_type == rs_machine_dependent);
26832 return INSN_SIZE;
26833 }
26834
26835 /* Estimate the size of a frag before relaxing. Assume everything fits in
26836 2 bytes. */
26837
26838 int
26839 md_estimate_size_before_relax (fragS * fragp,
26840 segT segtype ATTRIBUTE_UNUSED)
26841 {
26842 fragp->fr_var = 2;
26843 return 2;
26844 }
26845
26846 /* Convert a machine dependent frag. */
26847
26848 void
26849 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
26850 {
26851 unsigned long insn;
26852 unsigned long old_op;
26853 char *buf;
26854 expressionS exp;
26855 fixS *fixp;
26856 int reloc_type;
26857 int pc_rel;
26858 int opcode;
26859
26860 buf = fragp->fr_literal + fragp->fr_fix;
26861
26862 old_op = bfd_get_16(abfd, buf);
26863 if (fragp->fr_symbol)
26864 {
26865 exp.X_op = O_symbol;
26866 exp.X_add_symbol = fragp->fr_symbol;
26867 }
26868 else
26869 {
26870 exp.X_op = O_constant;
26871 }
26872 exp.X_add_number = fragp->fr_offset;
26873 opcode = fragp->fr_subtype;
26874 switch (opcode)
26875 {
26876 case T_MNEM_ldr_pc:
26877 case T_MNEM_ldr_pc2:
26878 case T_MNEM_ldr_sp:
26879 case T_MNEM_str_sp:
26880 case T_MNEM_ldr:
26881 case T_MNEM_ldrb:
26882 case T_MNEM_ldrh:
26883 case T_MNEM_str:
26884 case T_MNEM_strb:
26885 case T_MNEM_strh:
26886 if (fragp->fr_var == 4)
26887 {
26888 insn = THUMB_OP32 (opcode);
26889 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
26890 {
26891 insn |= (old_op & 0x700) << 4;
26892 }
26893 else
26894 {
26895 insn |= (old_op & 7) << 12;
26896 insn |= (old_op & 0x38) << 13;
26897 }
26898 insn |= 0x00000c00;
26899 put_thumb32_insn (buf, insn);
26900 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
26901 }
26902 else
26903 {
26904 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
26905 }
26906 pc_rel = (opcode == T_MNEM_ldr_pc2);
26907 break;
26908 case T_MNEM_adr:
26909 /* Thumb bits should be set in the frag handling so we process them
26910 after all symbols have been seen. PR gas/25235. */
26911 if (exp.X_op == O_symbol
26912 && exp.X_add_symbol != NULL
26913 && S_IS_DEFINED (exp.X_add_symbol)
26914 && THUMB_IS_FUNC (exp.X_add_symbol))
26915 exp.X_add_number |= 1;
26916
26917 if (fragp->fr_var == 4)
26918 {
26919 insn = THUMB_OP32 (opcode);
26920 insn |= (old_op & 0xf0) << 4;
26921 put_thumb32_insn (buf, insn);
26922 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
26923 }
26924 else
26925 {
26926 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26927 exp.X_add_number -= 4;
26928 }
26929 pc_rel = 1;
26930 break;
26931 case T_MNEM_mov:
26932 case T_MNEM_movs:
26933 case T_MNEM_cmp:
26934 case T_MNEM_cmn:
26935 if (fragp->fr_var == 4)
26936 {
26937 int r0off = (opcode == T_MNEM_mov
26938 || opcode == T_MNEM_movs) ? 0 : 8;
26939 insn = THUMB_OP32 (opcode);
26940 insn = (insn & 0xe1ffffff) | 0x10000000;
26941 insn |= (old_op & 0x700) << r0off;
26942 put_thumb32_insn (buf, insn);
26943 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
26944 }
26945 else
26946 {
26947 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
26948 }
26949 pc_rel = 0;
26950 break;
26951 case T_MNEM_b:
26952 if (fragp->fr_var == 4)
26953 {
26954 insn = THUMB_OP32(opcode);
26955 put_thumb32_insn (buf, insn);
26956 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
26957 }
26958 else
26959 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
26960 pc_rel = 1;
26961 break;
26962 case T_MNEM_bcond:
26963 if (fragp->fr_var == 4)
26964 {
26965 insn = THUMB_OP32(opcode);
26966 insn |= (old_op & 0xf00) << 14;
26967 put_thumb32_insn (buf, insn);
26968 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
26969 }
26970 else
26971 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
26972 pc_rel = 1;
26973 break;
26974 case T_MNEM_add_sp:
26975 case T_MNEM_add_pc:
26976 case T_MNEM_inc_sp:
26977 case T_MNEM_dec_sp:
26978 if (fragp->fr_var == 4)
26979 {
26980 /* ??? Choose between add and addw. */
26981 insn = THUMB_OP32 (opcode);
26982 insn |= (old_op & 0xf0) << 4;
26983 put_thumb32_insn (buf, insn);
26984 if (opcode == T_MNEM_add_pc)
26985 reloc_type = BFD_RELOC_ARM_T32_IMM12;
26986 else
26987 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
26988 }
26989 else
26990 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26991 pc_rel = 0;
26992 break;
26993
26994 case T_MNEM_addi:
26995 case T_MNEM_addis:
26996 case T_MNEM_subi:
26997 case T_MNEM_subis:
26998 if (fragp->fr_var == 4)
26999 {
27000 insn = THUMB_OP32 (opcode);
27001 insn |= (old_op & 0xf0) << 4;
27002 insn |= (old_op & 0xf) << 16;
27003 put_thumb32_insn (buf, insn);
27004 if (insn & (1 << 20))
27005 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
27006 else
27007 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
27008 }
27009 else
27010 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
27011 pc_rel = 0;
27012 break;
27013 default:
27014 abort ();
27015 }
27016 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
27017 (enum bfd_reloc_code_real) reloc_type);
27018 fixp->fx_file = fragp->fr_file;
27019 fixp->fx_line = fragp->fr_line;
27020 fragp->fr_fix += fragp->fr_var;
27021
27022 /* Set whether we use thumb-2 ISA based on final relaxation results. */
27023 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
27024 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
27025 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
27026 }
27027
27028 /* Return the size of a relaxable immediate operand instruction.
27029 SHIFT and SIZE specify the form of the allowable immediate. */
27030 static int
27031 relax_immediate (fragS *fragp, int size, int shift)
27032 {
27033 offsetT offset;
27034 offsetT mask;
27035 offsetT low;
27036
27037 /* ??? Should be able to do better than this. */
27038 if (fragp->fr_symbol)
27039 return 4;
27040
27041 low = (1 << shift) - 1;
27042 mask = (1 << (shift + size)) - (1 << shift);
27043 offset = fragp->fr_offset;
27044 /* Force misaligned offsets to 32-bit variant. */
27045 if (offset & low)
27046 return 4;
27047 if (offset & ~mask)
27048 return 4;
27049 return 2;
27050 }
27051
27052 /* Get the address of a symbol during relaxation. */
27053 static addressT
27054 relaxed_symbol_addr (fragS *fragp, long stretch)
27055 {
27056 fragS *sym_frag;
27057 addressT addr;
27058 symbolS *sym;
27059
27060 sym = fragp->fr_symbol;
27061 sym_frag = symbol_get_frag (sym);
27062 know (S_GET_SEGMENT (sym) != absolute_section
27063 || sym_frag == &zero_address_frag);
27064 addr = S_GET_VALUE (sym) + fragp->fr_offset;
27065
27066 /* If frag has yet to be reached on this pass, assume it will
27067 move by STRETCH just as we did. If this is not so, it will
27068 be because some frag between grows, and that will force
27069 another pass. */
27070
27071 if (stretch != 0
27072 && sym_frag->relax_marker != fragp->relax_marker)
27073 {
27074 fragS *f;
27075
27076 /* Adjust stretch for any alignment frag. Note that if have
27077 been expanding the earlier code, the symbol may be
27078 defined in what appears to be an earlier frag. FIXME:
27079 This doesn't handle the fr_subtype field, which specifies
27080 a maximum number of bytes to skip when doing an
27081 alignment. */
27082 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
27083 {
27084 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
27085 {
27086 if (stretch < 0)
27087 stretch = - ((- stretch)
27088 & ~ ((1 << (int) f->fr_offset) - 1));
27089 else
27090 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
27091 if (stretch == 0)
27092 break;
27093 }
27094 }
27095 if (f != NULL)
27096 addr += stretch;
27097 }
27098
27099 return addr;
27100 }
27101
27102 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
27103 load. */
27104 static int
27105 relax_adr (fragS *fragp, asection *sec, long stretch)
27106 {
27107 addressT addr;
27108 offsetT val;
27109
27110 /* Assume worst case for symbols not known to be in the same section. */
27111 if (fragp->fr_symbol == NULL
27112 || !S_IS_DEFINED (fragp->fr_symbol)
27113 || sec != S_GET_SEGMENT (fragp->fr_symbol)
27114 || S_IS_WEAK (fragp->fr_symbol)
27115 || THUMB_IS_FUNC (fragp->fr_symbol))
27116 return 4;
27117
27118 val = relaxed_symbol_addr (fragp, stretch);
27119 addr = fragp->fr_address + fragp->fr_fix;
27120 addr = (addr + 4) & ~3;
27121 /* Force misaligned targets to 32-bit variant. */
27122 if (val & 3)
27123 return 4;
27124 val -= addr;
27125 if (val < 0 || val > 1020)
27126 return 4;
27127 return 2;
27128 }
27129
27130 /* Return the size of a relaxable add/sub immediate instruction. */
27131 static int
27132 relax_addsub (fragS *fragp, asection *sec)
27133 {
27134 char *buf;
27135 int op;
27136
27137 buf = fragp->fr_literal + fragp->fr_fix;
27138 op = bfd_get_16(sec->owner, buf);
27139 if ((op & 0xf) == ((op >> 4) & 0xf))
27140 return relax_immediate (fragp, 8, 0);
27141 else
27142 return relax_immediate (fragp, 3, 0);
27143 }
27144
27145 /* Return TRUE iff the definition of symbol S could be pre-empted
27146 (overridden) at link or load time. */
27147 static bool
27148 symbol_preemptible (symbolS *s)
27149 {
27150 /* Weak symbols can always be pre-empted. */
27151 if (S_IS_WEAK (s))
27152 return true;
27153
27154 /* Non-global symbols cannot be pre-empted. */
27155 if (! S_IS_EXTERNAL (s))
27156 return false;
27157
27158 #ifdef OBJ_ELF
27159 /* In ELF, a global symbol can be marked protected, or private. In that
27160 case it can't be pre-empted (other definitions in the same link unit
27161 would violate the ODR). */
27162 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
27163 return false;
27164 #endif
27165
27166 /* Other global symbols might be pre-empted. */
27167 return true;
27168 }
27169
27170 /* Return the size of a relaxable branch instruction. BITS is the
27171 size of the offset field in the narrow instruction. */
27172
27173 static int
27174 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
27175 {
27176 addressT addr;
27177 offsetT val;
27178 offsetT limit;
27179
27180 /* Assume worst case for symbols not known to be in the same section. */
27181 if (!S_IS_DEFINED (fragp->fr_symbol)
27182 || sec != S_GET_SEGMENT (fragp->fr_symbol)
27183 || S_IS_WEAK (fragp->fr_symbol))
27184 return 4;
27185
27186 #ifdef OBJ_ELF
27187 /* A branch to a function in ARM state will require interworking. */
27188 if (S_IS_DEFINED (fragp->fr_symbol)
27189 && ARM_IS_FUNC (fragp->fr_symbol))
27190 return 4;
27191 #endif
27192
27193 if (symbol_preemptible (fragp->fr_symbol))
27194 return 4;
27195
27196 val = relaxed_symbol_addr (fragp, stretch);
27197 addr = fragp->fr_address + fragp->fr_fix + 4;
27198 val -= addr;
27199
27200 /* Offset is a signed value *2 */
27201 limit = 1 << bits;
27202 if (val >= limit || val < -limit)
27203 return 4;
27204 return 2;
27205 }
27206
27207
27208 /* Relax a machine dependent frag. This returns the amount by which
27209 the current size of the frag should change. */
27210
27211 int
27212 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
27213 {
27214 int oldsize;
27215 int newsize;
27216
27217 oldsize = fragp->fr_var;
27218 switch (fragp->fr_subtype)
27219 {
27220 case T_MNEM_ldr_pc2:
27221 newsize = relax_adr (fragp, sec, stretch);
27222 break;
27223 case T_MNEM_ldr_pc:
27224 case T_MNEM_ldr_sp:
27225 case T_MNEM_str_sp:
27226 newsize = relax_immediate (fragp, 8, 2);
27227 break;
27228 case T_MNEM_ldr:
27229 case T_MNEM_str:
27230 newsize = relax_immediate (fragp, 5, 2);
27231 break;
27232 case T_MNEM_ldrh:
27233 case T_MNEM_strh:
27234 newsize = relax_immediate (fragp, 5, 1);
27235 break;
27236 case T_MNEM_ldrb:
27237 case T_MNEM_strb:
27238 newsize = relax_immediate (fragp, 5, 0);
27239 break;
27240 case T_MNEM_adr:
27241 newsize = relax_adr (fragp, sec, stretch);
27242 break;
27243 case T_MNEM_mov:
27244 case T_MNEM_movs:
27245 case T_MNEM_cmp:
27246 case T_MNEM_cmn:
27247 newsize = relax_immediate (fragp, 8, 0);
27248 break;
27249 case T_MNEM_b:
27250 newsize = relax_branch (fragp, sec, 11, stretch);
27251 break;
27252 case T_MNEM_bcond:
27253 newsize = relax_branch (fragp, sec, 8, stretch);
27254 break;
27255 case T_MNEM_add_sp:
27256 case T_MNEM_add_pc:
27257 newsize = relax_immediate (fragp, 8, 2);
27258 break;
27259 case T_MNEM_inc_sp:
27260 case T_MNEM_dec_sp:
27261 newsize = relax_immediate (fragp, 7, 2);
27262 break;
27263 case T_MNEM_addi:
27264 case T_MNEM_addis:
27265 case T_MNEM_subi:
27266 case T_MNEM_subis:
27267 newsize = relax_addsub (fragp, sec);
27268 break;
27269 default:
27270 abort ();
27271 }
27272
27273 fragp->fr_var = newsize;
27274 /* Freeze wide instructions that are at or before the same location as
27275 in the previous pass. This avoids infinite loops.
27276 Don't freeze them unconditionally because targets may be artificially
27277 misaligned by the expansion of preceding frags. */
27278 if (stretch <= 0 && newsize > 2)
27279 {
27280 md_convert_frag (sec->owner, sec, fragp);
27281 frag_wane (fragp);
27282 }
27283
27284 return newsize - oldsize;
27285 }
27286
27287 /* Round up a section size to the appropriate boundary. */
27288
27289 valueT
27290 md_section_align (segT segment ATTRIBUTE_UNUSED,
27291 valueT size)
27292 {
27293 return size;
27294 }
27295
27296 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
27297 of an rs_align_code fragment. */
27298
27299 void
27300 arm_handle_align (fragS * fragP)
27301 {
27302 static unsigned char const arm_noop[2][2][4] =
27303 {
27304 { /* ARMv1 */
27305 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
27306 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
27307 },
27308 { /* ARMv6k */
27309 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
27310 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
27311 },
27312 };
27313 static unsigned char const thumb_noop[2][2][2] =
27314 {
27315 { /* Thumb-1 */
27316 {0xc0, 0x46}, /* LE */
27317 {0x46, 0xc0}, /* BE */
27318 },
27319 { /* Thumb-2 */
27320 {0x00, 0xbf}, /* LE */
27321 {0xbf, 0x00} /* BE */
27322 }
27323 };
27324 static unsigned char const wide_thumb_noop[2][4] =
27325 { /* Wide Thumb-2 */
27326 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
27327 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
27328 };
27329
27330 unsigned bytes, fix, noop_size;
27331 char * p;
27332 const unsigned char * noop;
27333 const unsigned char *narrow_noop = NULL;
27334 #ifdef OBJ_ELF
27335 enum mstate state;
27336 #endif
27337
27338 if (fragP->fr_type != rs_align_code)
27339 return;
27340
27341 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
27342 p = fragP->fr_literal + fragP->fr_fix;
27343 fix = 0;
27344
27345 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
27346 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
27347
27348 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
27349
27350 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
27351 {
27352 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
27353 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
27354 {
27355 narrow_noop = thumb_noop[1][target_big_endian];
27356 noop = wide_thumb_noop[target_big_endian];
27357 }
27358 else
27359 noop = thumb_noop[0][target_big_endian];
27360 noop_size = 2;
27361 #ifdef OBJ_ELF
27362 state = MAP_THUMB;
27363 #endif
27364 }
27365 else
27366 {
27367 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
27368 ? selected_cpu : arm_arch_none,
27369 arm_ext_v6k) != 0]
27370 [target_big_endian];
27371 noop_size = 4;
27372 #ifdef OBJ_ELF
27373 state = MAP_ARM;
27374 #endif
27375 }
27376
27377 fragP->fr_var = noop_size;
27378
27379 if (bytes & (noop_size - 1))
27380 {
27381 fix = bytes & (noop_size - 1);
27382 #ifdef OBJ_ELF
27383 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
27384 #endif
27385 memset (p, 0, fix);
27386 p += fix;
27387 bytes -= fix;
27388 }
27389
27390 if (narrow_noop)
27391 {
27392 if (bytes & noop_size)
27393 {
27394 /* Insert a narrow noop. */
27395 memcpy (p, narrow_noop, noop_size);
27396 p += noop_size;
27397 bytes -= noop_size;
27398 fix += noop_size;
27399 }
27400
27401 /* Use wide noops for the remainder */
27402 noop_size = 4;
27403 }
27404
27405 while (bytes >= noop_size)
27406 {
27407 memcpy (p, noop, noop_size);
27408 p += noop_size;
27409 bytes -= noop_size;
27410 fix += noop_size;
27411 }
27412
27413 fragP->fr_fix += fix;
27414 }
27415
27416 /* Called from md_do_align. Used to create an alignment
27417 frag in a code section. */
27418
27419 void
27420 arm_frag_align_code (int n, int max)
27421 {
27422 char * p;
27423
27424 /* We assume that there will never be a requirement
27425 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
27426 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
27427 {
27428 char err_msg[128];
27429
27430 sprintf (err_msg,
27431 _("alignments greater than %d bytes not supported in .text sections."),
27432 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
27433 as_fatal ("%s", err_msg);
27434 }
27435
27436 p = frag_var (rs_align_code,
27437 MAX_MEM_FOR_RS_ALIGN_CODE,
27438 1,
27439 (relax_substateT) max,
27440 (symbolS *) NULL,
27441 (offsetT) n,
27442 (char *) NULL);
27443 *p = 0;
27444 }
27445
27446 /* Perform target specific initialisation of a frag.
27447 Note - despite the name this initialisation is not done when the frag
27448 is created, but only when its type is assigned. A frag can be created
27449 and used a long time before its type is set, so beware of assuming that
27450 this initialisation is performed first. */
27451
27452 #ifndef OBJ_ELF
27453 void
27454 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
27455 {
27456 /* Record whether this frag is in an ARM or a THUMB area. */
27457 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
27458 }
27459
27460 #else /* OBJ_ELF is defined. */
27461 void
27462 arm_init_frag (fragS * fragP, int max_chars)
27463 {
27464 bool frag_thumb_mode;
27465
27466 /* If the current ARM vs THUMB mode has not already
27467 been recorded into this frag then do so now. */
27468 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
27469 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
27470
27471 /* PR 21809: Do not set a mapping state for debug sections
27472 - it just confuses other tools. */
27473 if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
27474 return;
27475
27476 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
27477
27478 /* Record a mapping symbol for alignment frags. We will delete this
27479 later if the alignment ends up empty. */
27480 switch (fragP->fr_type)
27481 {
27482 case rs_align:
27483 case rs_align_test:
27484 case rs_fill:
27485 mapping_state_2 (MAP_DATA, max_chars);
27486 break;
27487 case rs_align_code:
27488 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
27489 break;
27490 default:
27491 break;
27492 }
27493 }
27494
27495 /* When we change sections we need to issue a new mapping symbol. */
27496
27497 void
27498 arm_elf_change_section (void)
27499 {
27500 /* Link an unlinked unwind index table section to the .text section. */
27501 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
27502 && elf_linked_to_section (now_seg) == NULL)
27503 elf_linked_to_section (now_seg) = text_section;
27504 }
27505
27506 int
27507 arm_elf_section_type (const char * str, size_t len)
27508 {
27509 if (len == 5 && startswith (str, "exidx"))
27510 return SHT_ARM_EXIDX;
27511
27512 return -1;
27513 }
27514 \f
27515 /* Code to deal with unwinding tables. */
27516
27517 static void add_unwind_adjustsp (offsetT);
27518
27519 /* Generate any deferred unwind frame offset. */
27520
27521 static void
27522 flush_pending_unwind (void)
27523 {
27524 offsetT offset;
27525
27526 offset = unwind.pending_offset;
27527 unwind.pending_offset = 0;
27528 if (offset != 0)
27529 add_unwind_adjustsp (offset);
27530 }
27531
27532 /* Add an opcode to this list for this function. Two-byte opcodes should
27533 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
27534 order. */
27535
27536 static void
27537 add_unwind_opcode (valueT op, int length)
27538 {
27539 /* Add any deferred stack adjustment. */
27540 if (unwind.pending_offset)
27541 flush_pending_unwind ();
27542
27543 unwind.sp_restored = 0;
27544
27545 if (unwind.opcode_count + length > unwind.opcode_alloc)
27546 {
27547 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
27548 if (unwind.opcodes)
27549 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
27550 unwind.opcode_alloc);
27551 else
27552 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
27553 }
27554 while (length > 0)
27555 {
27556 length--;
27557 unwind.opcodes[unwind.opcode_count] = op & 0xff;
27558 op >>= 8;
27559 unwind.opcode_count++;
27560 }
27561 }
27562
27563 /* Add unwind opcodes to adjust the stack pointer. */
27564
27565 static void
27566 add_unwind_adjustsp (offsetT offset)
27567 {
27568 valueT op;
27569
27570 if (offset > 0x200)
27571 {
27572 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
27573 char bytes[5];
27574 int n;
27575 valueT o;
27576
27577 /* Long form: 0xb2, uleb128. */
27578 /* This might not fit in a word so add the individual bytes,
27579 remembering the list is built in reverse order. */
27580 o = (valueT) ((offset - 0x204) >> 2);
27581 if (o == 0)
27582 add_unwind_opcode (0, 1);
27583
27584 /* Calculate the uleb128 encoding of the offset. */
27585 n = 0;
27586 while (o)
27587 {
27588 bytes[n] = o & 0x7f;
27589 o >>= 7;
27590 if (o)
27591 bytes[n] |= 0x80;
27592 n++;
27593 }
27594 /* Add the insn. */
27595 for (; n; n--)
27596 add_unwind_opcode (bytes[n - 1], 1);
27597 add_unwind_opcode (0xb2, 1);
27598 }
27599 else if (offset > 0x100)
27600 {
27601 /* Two short opcodes. */
27602 add_unwind_opcode (0x3f, 1);
27603 op = (offset - 0x104) >> 2;
27604 add_unwind_opcode (op, 1);
27605 }
27606 else if (offset > 0)
27607 {
27608 /* Short opcode. */
27609 op = (offset - 4) >> 2;
27610 add_unwind_opcode (op, 1);
27611 }
27612 else if (offset < 0)
27613 {
27614 offset = -offset;
27615 while (offset > 0x100)
27616 {
27617 add_unwind_opcode (0x7f, 1);
27618 offset -= 0x100;
27619 }
27620 op = ((offset - 4) >> 2) | 0x40;
27621 add_unwind_opcode (op, 1);
27622 }
27623 }
27624
27625 /* Finish the list of unwind opcodes for this function. */
27626
27627 static void
27628 finish_unwind_opcodes (void)
27629 {
27630 valueT op;
27631
27632 if (unwind.fp_used)
27633 {
27634 /* Adjust sp as necessary. */
27635 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
27636 flush_pending_unwind ();
27637
27638 /* After restoring sp from the frame pointer. */
27639 op = 0x90 | unwind.fp_reg;
27640 add_unwind_opcode (op, 1);
27641 }
27642 else
27643 flush_pending_unwind ();
27644 }
27645
27646
27647 /* Start an exception table entry. If idx is nonzero this is an index table
27648 entry. */
27649
27650 static void
27651 start_unwind_section (const segT text_seg, int idx)
27652 {
27653 const char * text_name;
27654 const char * prefix;
27655 const char * prefix_once;
27656 struct elf_section_match match;
27657 char * sec_name;
27658 int type;
27659 int flags;
27660 int linkonce;
27661
27662 if (idx)
27663 {
27664 prefix = ELF_STRING_ARM_unwind;
27665 prefix_once = ELF_STRING_ARM_unwind_once;
27666 type = SHT_ARM_EXIDX;
27667 }
27668 else
27669 {
27670 prefix = ELF_STRING_ARM_unwind_info;
27671 prefix_once = ELF_STRING_ARM_unwind_info_once;
27672 type = SHT_PROGBITS;
27673 }
27674
27675 text_name = segment_name (text_seg);
27676 if (streq (text_name, ".text"))
27677 text_name = "";
27678
27679 if (startswith (text_name, ".gnu.linkonce.t."))
27680 {
27681 prefix = prefix_once;
27682 text_name += strlen (".gnu.linkonce.t.");
27683 }
27684
27685 sec_name = concat (prefix, text_name, (char *) NULL);
27686
27687 flags = SHF_ALLOC;
27688 linkonce = 0;
27689 memset (&match, 0, sizeof (match));
27690
27691 /* Handle COMDAT group. */
27692 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
27693 {
27694 match.group_name = elf_group_name (text_seg);
27695 if (match.group_name == NULL)
27696 {
27697 as_bad (_("Group section `%s' has no group signature"),
27698 segment_name (text_seg));
27699 ignore_rest_of_line ();
27700 return;
27701 }
27702 flags |= SHF_GROUP;
27703 linkonce = 1;
27704 }
27705
27706 obj_elf_change_section (sec_name, type, flags, 0, &match,
27707 linkonce, 0);
27708
27709 /* Set the section link for index tables. */
27710 if (idx)
27711 elf_linked_to_section (now_seg) = text_seg;
27712 }
27713
27714
27715 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
27716 personality routine data. Returns zero, or the index table value for
27717 an inline entry. */
27718
27719 static valueT
27720 create_unwind_entry (int have_data)
27721 {
27722 int size;
27723 addressT where;
27724 char *ptr;
27725 /* The current word of data. */
27726 valueT data;
27727 /* The number of bytes left in this word. */
27728 int n;
27729
27730 finish_unwind_opcodes ();
27731
27732 /* Remember the current text section. */
27733 unwind.saved_seg = now_seg;
27734 unwind.saved_subseg = now_subseg;
27735
27736 start_unwind_section (now_seg, 0);
27737
27738 if (unwind.personality_routine == NULL)
27739 {
27740 if (unwind.personality_index == -2)
27741 {
27742 if (have_data)
27743 as_bad (_("handlerdata in cantunwind frame"));
27744 return 1; /* EXIDX_CANTUNWIND. */
27745 }
27746
27747 /* Use a default personality routine if none is specified. */
27748 if (unwind.personality_index == -1)
27749 {
27750 if (unwind.opcode_count > 3)
27751 unwind.personality_index = 1;
27752 else
27753 unwind.personality_index = 0;
27754 }
27755
27756 /* Space for the personality routine entry. */
27757 if (unwind.personality_index == 0)
27758 {
27759 if (unwind.opcode_count > 3)
27760 as_bad (_("too many unwind opcodes for personality routine 0"));
27761
27762 if (!have_data)
27763 {
27764 /* All the data is inline in the index table. */
27765 data = 0x80;
27766 n = 3;
27767 while (unwind.opcode_count > 0)
27768 {
27769 unwind.opcode_count--;
27770 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
27771 n--;
27772 }
27773
27774 /* Pad with "finish" opcodes. */
27775 while (n--)
27776 data = (data << 8) | 0xb0;
27777
27778 return data;
27779 }
27780 size = 0;
27781 }
27782 else
27783 /* We get two opcodes "free" in the first word. */
27784 size = unwind.opcode_count - 2;
27785 }
27786 else
27787 {
27788 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
27789 if (unwind.personality_index != -1)
27790 {
27791 as_bad (_("attempt to recreate an unwind entry"));
27792 return 1;
27793 }
27794
27795 /* An extra byte is required for the opcode count. */
27796 size = unwind.opcode_count + 1;
27797 }
27798
27799 size = (size + 3) >> 2;
27800 if (size > 0xff)
27801 as_bad (_("too many unwind opcodes"));
27802
27803 frag_align (2, 0, 0);
27804 record_alignment (now_seg, 2);
27805 unwind.table_entry = expr_build_dot ();
27806
27807 /* Allocate the table entry. */
27808 ptr = frag_more ((size << 2) + 4);
27809 /* PR 13449: Zero the table entries in case some of them are not used. */
27810 memset (ptr, 0, (size << 2) + 4);
27811 where = frag_now_fix () - ((size << 2) + 4);
27812
27813 switch (unwind.personality_index)
27814 {
27815 case -1:
27816 /* ??? Should this be a PLT generating relocation? */
27817 /* Custom personality routine. */
27818 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
27819 BFD_RELOC_ARM_PREL31);
27820
27821 where += 4;
27822 ptr += 4;
27823
27824 /* Set the first byte to the number of additional words. */
27825 data = size > 0 ? size - 1 : 0;
27826 n = 3;
27827 break;
27828
27829 /* ABI defined personality routines. */
27830 case 0:
27831 /* Three opcodes bytes are packed into the first word. */
27832 data = 0x80;
27833 n = 3;
27834 break;
27835
27836 case 1:
27837 case 2:
27838 /* The size and first two opcode bytes go in the first word. */
27839 data = ((0x80 + unwind.personality_index) << 8) | size;
27840 n = 2;
27841 break;
27842
27843 default:
27844 /* Should never happen. */
27845 abort ();
27846 }
27847
27848 /* Pack the opcodes into words (MSB first), reversing the list at the same
27849 time. */
27850 while (unwind.opcode_count > 0)
27851 {
27852 if (n == 0)
27853 {
27854 md_number_to_chars (ptr, data, 4);
27855 ptr += 4;
27856 n = 4;
27857 data = 0;
27858 }
27859 unwind.opcode_count--;
27860 n--;
27861 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
27862 }
27863
27864 /* Finish off the last word. */
27865 if (n < 4)
27866 {
27867 /* Pad with "finish" opcodes. */
27868 while (n--)
27869 data = (data << 8) | 0xb0;
27870
27871 md_number_to_chars (ptr, data, 4);
27872 }
27873
27874 if (!have_data)
27875 {
27876 /* Add an empty descriptor if there is no user-specified data. */
27877 ptr = frag_more (4);
27878 md_number_to_chars (ptr, 0, 4);
27879 }
27880
27881 return 0;
27882 }
27883
27884
27885 /* Initialize the DWARF-2 unwind information for this procedure. */
27886
27887 void
27888 tc_arm_frame_initial_instructions (void)
27889 {
27890 cfi_add_CFA_def_cfa (REG_SP, 0);
27891 }
27892 #endif /* OBJ_ELF */
27893
27894 /* Convert REGNAME to a DWARF-2 register number. */
27895
27896 int
27897 tc_arm_regname_to_dw2regnum (char *regname)
27898 {
27899 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
27900 if (reg != FAIL)
27901 return reg;
27902
27903 /* PR 16694: Allow VFP registers as well. */
27904 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
27905 if (reg != FAIL)
27906 return 64 + reg;
27907
27908 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
27909 if (reg != FAIL)
27910 return reg + 256;
27911
27912 return FAIL;
27913 }
27914
27915 #ifdef TE_PE
27916 void
27917 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
27918 {
27919 expressionS exp;
27920
27921 exp.X_op = O_secrel;
27922 exp.X_add_symbol = symbol;
27923 exp.X_add_number = 0;
27924 emit_expr (&exp, size);
27925 }
27926 #endif
27927
27928 /* MD interface: Symbol and relocation handling. */
27929
27930 /* Return the address within the segment that a PC-relative fixup is
27931 relative to. For ARM, PC-relative fixups applied to instructions
27932 are generally relative to the location of the fixup plus 8 bytes.
27933 Thumb branches are offset by 4, and Thumb loads relative to PC
27934 require special handling. */
27935
27936 long
27937 md_pcrel_from_section (fixS * fixP, segT seg)
27938 {
27939 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
27940
27941 /* If this is pc-relative and we are going to emit a relocation
27942 then we just want to put out any pipeline compensation that the linker
27943 will need. Otherwise we want to use the calculated base.
27944 For WinCE we skip the bias for externals as well, since this
27945 is how the MS ARM-CE assembler behaves and we want to be compatible. */
27946 if (fixP->fx_pcrel
27947 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
27948 || (arm_force_relocation (fixP)
27949 #ifdef TE_WINCE
27950 && !S_IS_EXTERNAL (fixP->fx_addsy)
27951 #endif
27952 )))
27953 base = 0;
27954
27955
27956 switch (fixP->fx_r_type)
27957 {
27958 /* PC relative addressing on the Thumb is slightly odd as the
27959 bottom two bits of the PC are forced to zero for the
27960 calculation. This happens *after* application of the
27961 pipeline offset. However, Thumb adrl already adjusts for
27962 this, so we need not do it again. */
27963 case BFD_RELOC_ARM_THUMB_ADD:
27964 return base & ~3;
27965
27966 case BFD_RELOC_ARM_THUMB_OFFSET:
27967 case BFD_RELOC_ARM_T32_OFFSET_IMM:
27968 case BFD_RELOC_ARM_T32_ADD_PC12:
27969 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
27970 return (base + 4) & ~3;
27971
27972 /* Thumb branches are simply offset by +4. */
27973 case BFD_RELOC_THUMB_PCREL_BRANCH5:
27974 case BFD_RELOC_THUMB_PCREL_BRANCH7:
27975 case BFD_RELOC_THUMB_PCREL_BRANCH9:
27976 case BFD_RELOC_THUMB_PCREL_BRANCH12:
27977 case BFD_RELOC_THUMB_PCREL_BRANCH20:
27978 case BFD_RELOC_THUMB_PCREL_BRANCH25:
27979 case BFD_RELOC_THUMB_PCREL_BFCSEL:
27980 case BFD_RELOC_ARM_THUMB_BF17:
27981 case BFD_RELOC_ARM_THUMB_BF19:
27982 case BFD_RELOC_ARM_THUMB_BF13:
27983 case BFD_RELOC_ARM_THUMB_LOOP12:
27984 return base + 4;
27985
27986 case BFD_RELOC_THUMB_PCREL_BRANCH23:
27987 if (fixP->fx_addsy
27988 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27989 && !S_FORCE_RELOC (fixP->fx_addsy, true)
27990 && ARM_IS_FUNC (fixP->fx_addsy)
27991 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27992 base = fixP->fx_where + fixP->fx_frag->fr_address;
27993 return base + 4;
27994
27995 /* BLX is like branches above, but forces the low two bits of PC to
27996 zero. */
27997 case BFD_RELOC_THUMB_PCREL_BLX:
27998 if (fixP->fx_addsy
27999 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28000 && !S_FORCE_RELOC (fixP->fx_addsy, true)
28001 && THUMB_IS_FUNC (fixP->fx_addsy)
28002 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
28003 base = fixP->fx_where + fixP->fx_frag->fr_address;
28004 return (base + 4) & ~3;
28005
28006 /* ARM mode branches are offset by +8. However, the Windows CE
28007 loader expects the relocation not to take this into account. */
28008 case BFD_RELOC_ARM_PCREL_BLX:
28009 if (fixP->fx_addsy
28010 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28011 && !S_FORCE_RELOC (fixP->fx_addsy, true)
28012 && ARM_IS_FUNC (fixP->fx_addsy)
28013 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
28014 base = fixP->fx_where + fixP->fx_frag->fr_address;
28015 return base + 8;
28016
28017 case BFD_RELOC_ARM_PCREL_CALL:
28018 if (fixP->fx_addsy
28019 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28020 && !S_FORCE_RELOC (fixP->fx_addsy, true)
28021 && THUMB_IS_FUNC (fixP->fx_addsy)
28022 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
28023 base = fixP->fx_where + fixP->fx_frag->fr_address;
28024 return base + 8;
28025
28026 case BFD_RELOC_ARM_PCREL_BRANCH:
28027 case BFD_RELOC_ARM_PCREL_JUMP:
28028 case BFD_RELOC_ARM_PLT32:
28029 #ifdef TE_WINCE
28030 /* When handling fixups immediately, because we have already
28031 discovered the value of a symbol, or the address of the frag involved
28032 we must account for the offset by +8, as the OS loader will never see the reloc.
28033 see fixup_segment() in write.c
28034 The S_IS_EXTERNAL test handles the case of global symbols.
28035 Those need the calculated base, not just the pipe compensation the linker will need. */
28036 if (fixP->fx_pcrel
28037 && fixP->fx_addsy != NULL
28038 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28039 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
28040 return base + 8;
28041 return base;
28042 #else
28043 return base + 8;
28044 #endif
28045
28046
28047 /* ARM mode loads relative to PC are also offset by +8. Unlike
28048 branches, the Windows CE loader *does* expect the relocation
28049 to take this into account. */
28050 case BFD_RELOC_ARM_OFFSET_IMM:
28051 case BFD_RELOC_ARM_OFFSET_IMM8:
28052 case BFD_RELOC_ARM_HWLITERAL:
28053 case BFD_RELOC_ARM_LITERAL:
28054 case BFD_RELOC_ARM_CP_OFF_IMM:
28055 return base + 8;
28056
28057
28058 /* Other PC-relative relocations are un-offset. */
28059 default:
28060 return base;
28061 }
28062 }
28063
28064 static bool flag_warn_syms = true;
28065
28066 bool
28067 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
28068 {
28069 /* PR 18347 - Warn if the user attempts to create a symbol with the same
28070 name as an ARM instruction. Whilst strictly speaking it is allowed, it
28071 does mean that the resulting code might be very confusing to the reader.
28072 Also this warning can be triggered if the user omits an operand before
28073 an immediate address, eg:
28074
28075 LDR =foo
28076
28077 GAS treats this as an assignment of the value of the symbol foo to a
28078 symbol LDR, and so (without this code) it will not issue any kind of
28079 warning or error message.
28080
28081 Note - ARM instructions are case-insensitive but the strings in the hash
28082 table are all stored in lower case, so we must first ensure that name is
28083 lower case too. */
28084 if (flag_warn_syms && arm_ops_hsh)
28085 {
28086 char * nbuf = strdup (name);
28087 char * p;
28088
28089 for (p = nbuf; *p; p++)
28090 *p = TOLOWER (*p);
28091 if (str_hash_find (arm_ops_hsh, nbuf) != NULL)
28092 {
28093 static htab_t already_warned = NULL;
28094
28095 if (already_warned == NULL)
28096 already_warned = str_htab_create ();
28097 /* Only warn about the symbol once. To keep the code
28098 simple we let str_hash_insert do the lookup for us. */
28099 if (str_hash_find (already_warned, nbuf) == NULL)
28100 {
28101 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
28102 str_hash_insert (already_warned, nbuf, NULL, 0);
28103 }
28104 }
28105 else
28106 free (nbuf);
28107 }
28108
28109 return false;
28110 }
28111
28112 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
28113 Otherwise we have no need to default values of symbols. */
28114
28115 symbolS *
28116 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
28117 {
28118 #ifdef OBJ_ELF
28119 if (name[0] == '_' && name[1] == 'G'
28120 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
28121 {
28122 if (!GOT_symbol)
28123 {
28124 if (symbol_find (name))
28125 as_bad (_("GOT already in the symbol table"));
28126
28127 GOT_symbol = symbol_new (name, undefined_section,
28128 &zero_address_frag, 0);
28129 }
28130
28131 return GOT_symbol;
28132 }
28133 #endif
28134
28135 return NULL;
28136 }
28137
28138 /* Subroutine of md_apply_fix. Check to see if an immediate can be
28139 computed as two separate immediate values, added together. We
28140 already know that this value cannot be computed by just one ARM
28141 instruction. */
28142
28143 static unsigned int
28144 validate_immediate_twopart (unsigned int val,
28145 unsigned int * highpart)
28146 {
28147 unsigned int a;
28148 unsigned int i;
28149
28150 for (i = 0; i < 32; i += 2)
28151 if (((a = rotate_left (val, i)) & 0xff) != 0)
28152 {
28153 if (a & 0xff00)
28154 {
28155 if (a & ~ 0xffff)
28156 continue;
28157 * highpart = (a >> 8) | ((i + 24) << 7);
28158 }
28159 else if (a & 0xff0000)
28160 {
28161 if (a & 0xff000000)
28162 continue;
28163 * highpart = (a >> 16) | ((i + 16) << 7);
28164 }
28165 else
28166 {
28167 gas_assert (a & 0xff000000);
28168 * highpart = (a >> 24) | ((i + 8) << 7);
28169 }
28170
28171 return (a & 0xff) | (i << 7);
28172 }
28173
28174 return FAIL;
28175 }
28176
28177 static int
28178 validate_offset_imm (unsigned int val, int hwse)
28179 {
28180 if ((hwse && val > 255) || val > 4095)
28181 return FAIL;
28182 return val;
28183 }
28184
28185 /* Subroutine of md_apply_fix. Do those data_ops which can take a
28186 negative immediate constant by altering the instruction. A bit of
28187 a hack really.
28188 MOV <-> MVN
28189 AND <-> BIC
28190 ADC <-> SBC
28191 by inverting the second operand, and
28192 ADD <-> SUB
28193 CMP <-> CMN
28194 by negating the second operand. */
28195
28196 static int
28197 negate_data_op (unsigned long * instruction,
28198 unsigned long value)
28199 {
28200 int op, new_inst;
28201 unsigned long negated, inverted;
28202
28203 negated = encode_arm_immediate (-value);
28204 inverted = encode_arm_immediate (~value);
28205
28206 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
28207 switch (op)
28208 {
28209 /* First negates. */
28210 case OPCODE_SUB: /* ADD <-> SUB */
28211 new_inst = OPCODE_ADD;
28212 value = negated;
28213 break;
28214
28215 case OPCODE_ADD:
28216 new_inst = OPCODE_SUB;
28217 value = negated;
28218 break;
28219
28220 case OPCODE_CMP: /* CMP <-> CMN */
28221 new_inst = OPCODE_CMN;
28222 value = negated;
28223 break;
28224
28225 case OPCODE_CMN:
28226 new_inst = OPCODE_CMP;
28227 value = negated;
28228 break;
28229
28230 /* Now Inverted ops. */
28231 case OPCODE_MOV: /* MOV <-> MVN */
28232 new_inst = OPCODE_MVN;
28233 value = inverted;
28234 break;
28235
28236 case OPCODE_MVN:
28237 new_inst = OPCODE_MOV;
28238 value = inverted;
28239 break;
28240
28241 case OPCODE_AND: /* AND <-> BIC */
28242 new_inst = OPCODE_BIC;
28243 value = inverted;
28244 break;
28245
28246 case OPCODE_BIC:
28247 new_inst = OPCODE_AND;
28248 value = inverted;
28249 break;
28250
28251 case OPCODE_ADC: /* ADC <-> SBC */
28252 new_inst = OPCODE_SBC;
28253 value = inverted;
28254 break;
28255
28256 case OPCODE_SBC:
28257 new_inst = OPCODE_ADC;
28258 value = inverted;
28259 break;
28260
28261 /* We cannot do anything. */
28262 default:
28263 return FAIL;
28264 }
28265
28266 if (value == (unsigned) FAIL)
28267 return FAIL;
28268
28269 *instruction &= OPCODE_MASK;
28270 *instruction |= new_inst << DATA_OP_SHIFT;
28271 return value;
28272 }
28273
28274 /* Like negate_data_op, but for Thumb-2. */
28275
28276 static unsigned int
28277 thumb32_negate_data_op (valueT *instruction, unsigned int value)
28278 {
28279 unsigned int op, new_inst;
28280 unsigned int rd;
28281 unsigned int negated, inverted;
28282
28283 negated = encode_thumb32_immediate (-value);
28284 inverted = encode_thumb32_immediate (~value);
28285
28286 rd = (*instruction >> 8) & 0xf;
28287 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
28288 switch (op)
28289 {
28290 /* ADD <-> SUB. Includes CMP <-> CMN. */
28291 case T2_OPCODE_SUB:
28292 new_inst = T2_OPCODE_ADD;
28293 value = negated;
28294 break;
28295
28296 case T2_OPCODE_ADD:
28297 new_inst = T2_OPCODE_SUB;
28298 value = negated;
28299 break;
28300
28301 /* ORR <-> ORN. Includes MOV <-> MVN. */
28302 case T2_OPCODE_ORR:
28303 new_inst = T2_OPCODE_ORN;
28304 value = inverted;
28305 break;
28306
28307 case T2_OPCODE_ORN:
28308 new_inst = T2_OPCODE_ORR;
28309 value = inverted;
28310 break;
28311
28312 /* AND <-> BIC. TST has no inverted equivalent. */
28313 case T2_OPCODE_AND:
28314 new_inst = T2_OPCODE_BIC;
28315 if (rd == 15)
28316 value = FAIL;
28317 else
28318 value = inverted;
28319 break;
28320
28321 case T2_OPCODE_BIC:
28322 new_inst = T2_OPCODE_AND;
28323 value = inverted;
28324 break;
28325
28326 /* ADC <-> SBC */
28327 case T2_OPCODE_ADC:
28328 new_inst = T2_OPCODE_SBC;
28329 value = inverted;
28330 break;
28331
28332 case T2_OPCODE_SBC:
28333 new_inst = T2_OPCODE_ADC;
28334 value = inverted;
28335 break;
28336
28337 /* We cannot do anything. */
28338 default:
28339 return FAIL;
28340 }
28341
28342 if (value == (unsigned int)FAIL)
28343 return FAIL;
28344
28345 *instruction &= T2_OPCODE_MASK;
28346 *instruction |= new_inst << T2_DATA_OP_SHIFT;
28347 return value;
28348 }
28349
28350 /* Read a 32-bit thumb instruction from buf. */
28351
28352 static unsigned long
28353 get_thumb32_insn (char * buf)
28354 {
28355 unsigned long insn;
28356 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
28357 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
28358
28359 return insn;
28360 }
28361
28362 /* We usually want to set the low bit on the address of thumb function
28363 symbols. In particular .word foo - . should have the low bit set.
28364 Generic code tries to fold the difference of two symbols to
28365 a constant. Prevent this and force a relocation when the first symbols
28366 is a thumb function. */
28367
28368 bool
28369 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
28370 {
28371 if (op == O_subtract
28372 && l->X_op == O_symbol
28373 && r->X_op == O_symbol
28374 && THUMB_IS_FUNC (l->X_add_symbol))
28375 {
28376 l->X_op = O_subtract;
28377 l->X_op_symbol = r->X_add_symbol;
28378 l->X_add_number -= r->X_add_number;
28379 return true;
28380 }
28381
28382 /* Process as normal. */
28383 return false;
28384 }
28385
28386 /* Encode Thumb2 unconditional branches and calls. The encoding
28387 for the 2 are identical for the immediate values. */
28388
28389 static void
28390 encode_thumb2_b_bl_offset (char * buf, offsetT value)
28391 {
28392 #define T2I1I2MASK ((1 << 13) | (1 << 11))
28393 offsetT newval;
28394 offsetT newval2;
28395 addressT S, I1, I2, lo, hi;
28396
28397 S = (value >> 24) & 0x01;
28398 I1 = (value >> 23) & 0x01;
28399 I2 = (value >> 22) & 0x01;
28400 hi = (value >> 12) & 0x3ff;
28401 lo = (value >> 1) & 0x7ff;
28402 newval = md_chars_to_number (buf, THUMB_SIZE);
28403 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
28404 newval |= (S << 10) | hi;
28405 newval2 &= ~T2I1I2MASK;
28406 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
28407 md_number_to_chars (buf, newval, THUMB_SIZE);
28408 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
28409 }
28410
28411 void
28412 md_apply_fix (fixS * fixP,
28413 valueT * valP,
28414 segT seg)
28415 {
28416 valueT value = * valP;
28417 valueT newval;
28418 unsigned int newimm;
28419 unsigned long temp;
28420 int sign;
28421 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
28422
28423 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
28424
28425 /* Note whether this will delete the relocation. */
28426
28427 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
28428 fixP->fx_done = 1;
28429
28430 /* On a 64-bit host, silently truncate 'value' to 32 bits for
28431 consistency with the behaviour on 32-bit hosts. Remember value
28432 for emit_reloc. */
28433 value &= 0xffffffff;
28434 value ^= 0x80000000;
28435 value -= 0x80000000;
28436
28437 *valP = value;
28438 fixP->fx_addnumber = value;
28439
28440 /* Same treatment for fixP->fx_offset. */
28441 fixP->fx_offset &= 0xffffffff;
28442 fixP->fx_offset ^= 0x80000000;
28443 fixP->fx_offset -= 0x80000000;
28444
28445 switch (fixP->fx_r_type)
28446 {
28447 case BFD_RELOC_NONE:
28448 /* This will need to go in the object file. */
28449 fixP->fx_done = 0;
28450 break;
28451
28452 case BFD_RELOC_ARM_IMMEDIATE:
28453 /* We claim that this fixup has been processed here,
28454 even if in fact we generate an error because we do
28455 not have a reloc for it, so tc_gen_reloc will reject it. */
28456 fixP->fx_done = 1;
28457
28458 if (fixP->fx_addsy)
28459 {
28460 const char *msg = 0;
28461
28462 if (! S_IS_DEFINED (fixP->fx_addsy))
28463 msg = _("undefined symbol %s used as an immediate value");
28464 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
28465 msg = _("symbol %s is in a different section");
28466 else if (S_IS_WEAK (fixP->fx_addsy))
28467 msg = _("symbol %s is weak and may be overridden later");
28468
28469 if (msg)
28470 {
28471 as_bad_where (fixP->fx_file, fixP->fx_line,
28472 msg, S_GET_NAME (fixP->fx_addsy));
28473 break;
28474 }
28475 }
28476
28477 temp = md_chars_to_number (buf, INSN_SIZE);
28478
28479 /* If the offset is negative, we should use encoding A2 for ADR. */
28480 if ((temp & 0xfff0000) == 0x28f0000 && (offsetT) value < 0)
28481 newimm = negate_data_op (&temp, value);
28482 else
28483 {
28484 newimm = encode_arm_immediate (value);
28485
28486 /* If the instruction will fail, see if we can fix things up by
28487 changing the opcode. */
28488 if (newimm == (unsigned int) FAIL)
28489 newimm = negate_data_op (&temp, value);
28490 /* MOV accepts both ARM modified immediate (A1 encoding) and
28491 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
28492 When disassembling, MOV is preferred when there is no encoding
28493 overlap. */
28494 if (newimm == (unsigned int) FAIL
28495 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
28496 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
28497 && !((temp >> SBIT_SHIFT) & 0x1)
28498 && value <= 0xffff)
28499 {
28500 /* Clear bits[23:20] to change encoding from A1 to A2. */
28501 temp &= 0xff0fffff;
28502 /* Encoding high 4bits imm. Code below will encode the remaining
28503 low 12bits. */
28504 temp |= (value & 0x0000f000) << 4;
28505 newimm = value & 0x00000fff;
28506 }
28507 }
28508
28509 if (newimm == (unsigned int) FAIL)
28510 {
28511 as_bad_where (fixP->fx_file, fixP->fx_line,
28512 _("invalid constant (%lx) after fixup"),
28513 (unsigned long) value);
28514 break;
28515 }
28516
28517 newimm |= (temp & 0xfffff000);
28518 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
28519 break;
28520
28521 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
28522 {
28523 unsigned int highpart = 0;
28524 unsigned int newinsn = 0xe1a00000; /* nop. */
28525
28526 if (fixP->fx_addsy)
28527 {
28528 const char *msg = 0;
28529
28530 if (! S_IS_DEFINED (fixP->fx_addsy))
28531 msg = _("undefined symbol %s used as an immediate value");
28532 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
28533 msg = _("symbol %s is in a different section");
28534 else if (S_IS_WEAK (fixP->fx_addsy))
28535 msg = _("symbol %s is weak and may be overridden later");
28536
28537 if (msg)
28538 {
28539 as_bad_where (fixP->fx_file, fixP->fx_line,
28540 msg, S_GET_NAME (fixP->fx_addsy));
28541 break;
28542 }
28543 }
28544
28545 newimm = encode_arm_immediate (value);
28546 temp = md_chars_to_number (buf, INSN_SIZE);
28547
28548 /* If the instruction will fail, see if we can fix things up by
28549 changing the opcode. */
28550 if (newimm == (unsigned int) FAIL
28551 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
28552 {
28553 /* No ? OK - try using two ADD instructions to generate
28554 the value. */
28555 newimm = validate_immediate_twopart (value, & highpart);
28556
28557 /* Yes - then make sure that the second instruction is
28558 also an add. */
28559 if (newimm != (unsigned int) FAIL)
28560 newinsn = temp;
28561 /* Still No ? Try using a negated value. */
28562 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
28563 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
28564 /* Otherwise - give up. */
28565 else
28566 {
28567 as_bad_where (fixP->fx_file, fixP->fx_line,
28568 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
28569 (long) value);
28570 break;
28571 }
28572
28573 /* Replace the first operand in the 2nd instruction (which
28574 is the PC) with the destination register. We have
28575 already added in the PC in the first instruction and we
28576 do not want to do it again. */
28577 newinsn &= ~ 0xf0000;
28578 newinsn |= ((newinsn & 0x0f000) << 4);
28579 }
28580
28581 newimm |= (temp & 0xfffff000);
28582 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
28583
28584 highpart |= (newinsn & 0xfffff000);
28585 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
28586 }
28587 break;
28588
28589 case BFD_RELOC_ARM_OFFSET_IMM:
28590 if (!fixP->fx_done && seg->use_rela_p)
28591 value = 0;
28592 /* Fall through. */
28593
28594 case BFD_RELOC_ARM_LITERAL:
28595 sign = (offsetT) value > 0;
28596
28597 if ((offsetT) value < 0)
28598 value = - value;
28599
28600 if (validate_offset_imm (value, 0) == FAIL)
28601 {
28602 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
28603 as_bad_where (fixP->fx_file, fixP->fx_line,
28604 _("invalid literal constant: pool needs to be closer"));
28605 else
28606 as_bad_where (fixP->fx_file, fixP->fx_line,
28607 _("bad immediate value for offset (%ld)"),
28608 (long) value);
28609 break;
28610 }
28611
28612 newval = md_chars_to_number (buf, INSN_SIZE);
28613 if (value == 0)
28614 newval &= 0xfffff000;
28615 else
28616 {
28617 newval &= 0xff7ff000;
28618 newval |= value | (sign ? INDEX_UP : 0);
28619 }
28620 md_number_to_chars (buf, newval, INSN_SIZE);
28621 break;
28622
28623 case BFD_RELOC_ARM_OFFSET_IMM8:
28624 case BFD_RELOC_ARM_HWLITERAL:
28625 sign = (offsetT) value > 0;
28626
28627 if ((offsetT) value < 0)
28628 value = - value;
28629
28630 if (validate_offset_imm (value, 1) == FAIL)
28631 {
28632 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
28633 as_bad_where (fixP->fx_file, fixP->fx_line,
28634 _("invalid literal constant: pool needs to be closer"));
28635 else
28636 as_bad_where (fixP->fx_file, fixP->fx_line,
28637 _("bad immediate value for 8-bit offset (%ld)"),
28638 (long) value);
28639 break;
28640 }
28641
28642 newval = md_chars_to_number (buf, INSN_SIZE);
28643 if (value == 0)
28644 newval &= 0xfffff0f0;
28645 else
28646 {
28647 newval &= 0xff7ff0f0;
28648 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
28649 }
28650 md_number_to_chars (buf, newval, INSN_SIZE);
28651 break;
28652
28653 case BFD_RELOC_ARM_T32_OFFSET_U8:
28654 if (value > 1020 || value % 4 != 0)
28655 as_bad_where (fixP->fx_file, fixP->fx_line,
28656 _("bad immediate value for offset (%ld)"), (long) value);
28657 value /= 4;
28658
28659 newval = md_chars_to_number (buf+2, THUMB_SIZE);
28660 newval |= value;
28661 md_number_to_chars (buf+2, newval, THUMB_SIZE);
28662 break;
28663
28664 case BFD_RELOC_ARM_T32_OFFSET_IMM:
28665 /* This is a complicated relocation used for all varieties of Thumb32
28666 load/store instruction with immediate offset:
28667
28668 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
28669 *4, optional writeback(W)
28670 (doubleword load/store)
28671
28672 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
28673 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
28674 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
28675 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
28676 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
28677
28678 Uppercase letters indicate bits that are already encoded at
28679 this point. Lowercase letters are our problem. For the
28680 second block of instructions, the secondary opcode nybble
28681 (bits 8..11) is present, and bit 23 is zero, even if this is
28682 a PC-relative operation. */
28683 newval = md_chars_to_number (buf, THUMB_SIZE);
28684 newval <<= 16;
28685 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
28686
28687 if ((newval & 0xf0000000) == 0xe0000000)
28688 {
28689 /* Doubleword load/store: 8-bit offset, scaled by 4. */
28690 if ((offsetT) value >= 0)
28691 newval |= (1 << 23);
28692 else
28693 value = -value;
28694 if (value % 4 != 0)
28695 {
28696 as_bad_where (fixP->fx_file, fixP->fx_line,
28697 _("offset not a multiple of 4"));
28698 break;
28699 }
28700 value /= 4;
28701 if (value > 0xff)
28702 {
28703 as_bad_where (fixP->fx_file, fixP->fx_line,
28704 _("offset out of range"));
28705 break;
28706 }
28707 newval &= ~0xff;
28708 }
28709 else if ((newval & 0x000f0000) == 0x000f0000)
28710 {
28711 /* PC-relative, 12-bit offset. */
28712 if ((offsetT) value >= 0)
28713 newval |= (1 << 23);
28714 else
28715 value = -value;
28716 if (value > 0xfff)
28717 {
28718 as_bad_where (fixP->fx_file, fixP->fx_line,
28719 _("offset out of range"));
28720 break;
28721 }
28722 newval &= ~0xfff;
28723 }
28724 else if ((newval & 0x00000100) == 0x00000100)
28725 {
28726 /* Writeback: 8-bit, +/- offset. */
28727 if ((offsetT) value >= 0)
28728 newval |= (1 << 9);
28729 else
28730 value = -value;
28731 if (value > 0xff)
28732 {
28733 as_bad_where (fixP->fx_file, fixP->fx_line,
28734 _("offset out of range"));
28735 break;
28736 }
28737 newval &= ~0xff;
28738 }
28739 else if ((newval & 0x00000f00) == 0x00000e00)
28740 {
28741 /* T-instruction: positive 8-bit offset. */
28742 if (value > 0xff)
28743 {
28744 as_bad_where (fixP->fx_file, fixP->fx_line,
28745 _("offset out of range"));
28746 break;
28747 }
28748 newval &= ~0xff;
28749 newval |= value;
28750 }
28751 else
28752 {
28753 /* Positive 12-bit or negative 8-bit offset. */
28754 unsigned int limit;
28755 if ((offsetT) value >= 0)
28756 {
28757 newval |= (1 << 23);
28758 limit = 0xfff;
28759 }
28760 else
28761 {
28762 value = -value;
28763 limit = 0xff;
28764 }
28765 if (value > limit)
28766 {
28767 as_bad_where (fixP->fx_file, fixP->fx_line,
28768 _("offset out of range"));
28769 break;
28770 }
28771 newval &= ~limit;
28772 }
28773
28774 newval |= value;
28775 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
28776 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
28777 break;
28778
28779 case BFD_RELOC_ARM_SHIFT_IMM:
28780 newval = md_chars_to_number (buf, INSN_SIZE);
28781 if (value > 32
28782 || (value == 32
28783 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
28784 {
28785 as_bad_where (fixP->fx_file, fixP->fx_line,
28786 _("shift expression is too large"));
28787 break;
28788 }
28789
28790 if (value == 0)
28791 /* Shifts of zero must be done as lsl. */
28792 newval &= ~0x60;
28793 else if (value == 32)
28794 value = 0;
28795 newval &= 0xfffff07f;
28796 newval |= (value & 0x1f) << 7;
28797 md_number_to_chars (buf, newval, INSN_SIZE);
28798 break;
28799
28800 case BFD_RELOC_ARM_T32_IMMEDIATE:
28801 case BFD_RELOC_ARM_T32_ADD_IMM:
28802 case BFD_RELOC_ARM_T32_IMM12:
28803 case BFD_RELOC_ARM_T32_ADD_PC12:
28804 /* We claim that this fixup has been processed here,
28805 even if in fact we generate an error because we do
28806 not have a reloc for it, so tc_gen_reloc will reject it. */
28807 fixP->fx_done = 1;
28808
28809 if (fixP->fx_addsy
28810 && ! S_IS_DEFINED (fixP->fx_addsy))
28811 {
28812 as_bad_where (fixP->fx_file, fixP->fx_line,
28813 _("undefined symbol %s used as an immediate value"),
28814 S_GET_NAME (fixP->fx_addsy));
28815 break;
28816 }
28817
28818 newval = md_chars_to_number (buf, THUMB_SIZE);
28819 newval <<= 16;
28820 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
28821
28822 newimm = FAIL;
28823 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
28824 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
28825 Thumb2 modified immediate encoding (T2). */
28826 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
28827 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
28828 {
28829 newimm = encode_thumb32_immediate (value);
28830 if (newimm == (unsigned int) FAIL)
28831 newimm = thumb32_negate_data_op (&newval, value);
28832 }
28833 if (newimm == (unsigned int) FAIL)
28834 {
28835 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
28836 {
28837 /* Turn add/sum into addw/subw. */
28838 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
28839 newval = (newval & 0xfeffffff) | 0x02000000;
28840 /* No flat 12-bit imm encoding for addsw/subsw. */
28841 if ((newval & 0x00100000) == 0)
28842 {
28843 /* 12 bit immediate for addw/subw. */
28844 if ((offsetT) value < 0)
28845 {
28846 value = -value;
28847 newval ^= 0x00a00000;
28848 }
28849 if (value > 0xfff)
28850 newimm = (unsigned int) FAIL;
28851 else
28852 newimm = value;
28853 }
28854 }
28855 else
28856 {
28857 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
28858 UINT16 (T3 encoding), MOVW only accepts UINT16. When
28859 disassembling, MOV is preferred when there is no encoding
28860 overlap. */
28861 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
28862 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
28863 but with the Rn field [19:16] set to 1111. */
28864 && (((newval >> 16) & 0xf) == 0xf)
28865 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
28866 && !((newval >> T2_SBIT_SHIFT) & 0x1)
28867 && value <= 0xffff)
28868 {
28869 /* Toggle bit[25] to change encoding from T2 to T3. */
28870 newval ^= 1 << 25;
28871 /* Clear bits[19:16]. */
28872 newval &= 0xfff0ffff;
28873 /* Encoding high 4bits imm. Code below will encode the
28874 remaining low 12bits. */
28875 newval |= (value & 0x0000f000) << 4;
28876 newimm = value & 0x00000fff;
28877 }
28878 }
28879 }
28880
28881 if (newimm == (unsigned int)FAIL)
28882 {
28883 as_bad_where (fixP->fx_file, fixP->fx_line,
28884 _("invalid constant (%lx) after fixup"),
28885 (unsigned long) value);
28886 break;
28887 }
28888
28889 newval |= (newimm & 0x800) << 15;
28890 newval |= (newimm & 0x700) << 4;
28891 newval |= (newimm & 0x0ff);
28892
28893 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
28894 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
28895 break;
28896
28897 case BFD_RELOC_ARM_SMC:
28898 if (value > 0xf)
28899 as_bad_where (fixP->fx_file, fixP->fx_line,
28900 _("invalid smc expression"));
28901
28902 newval = md_chars_to_number (buf, INSN_SIZE);
28903 newval |= (value & 0xf);
28904 md_number_to_chars (buf, newval, INSN_SIZE);
28905 break;
28906
28907 case BFD_RELOC_ARM_HVC:
28908 if (value > 0xffff)
28909 as_bad_where (fixP->fx_file, fixP->fx_line,
28910 _("invalid hvc expression"));
28911 newval = md_chars_to_number (buf, INSN_SIZE);
28912 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
28913 md_number_to_chars (buf, newval, INSN_SIZE);
28914 break;
28915
28916 case BFD_RELOC_ARM_SWI:
28917 if (fixP->tc_fix_data != 0)
28918 {
28919 if (value > 0xff)
28920 as_bad_where (fixP->fx_file, fixP->fx_line,
28921 _("invalid swi expression"));
28922 newval = md_chars_to_number (buf, THUMB_SIZE);
28923 newval |= value;
28924 md_number_to_chars (buf, newval, THUMB_SIZE);
28925 }
28926 else
28927 {
28928 if (value > 0x00ffffff)
28929 as_bad_where (fixP->fx_file, fixP->fx_line,
28930 _("invalid swi expression"));
28931 newval = md_chars_to_number (buf, INSN_SIZE);
28932 newval |= value;
28933 md_number_to_chars (buf, newval, INSN_SIZE);
28934 }
28935 break;
28936
28937 case BFD_RELOC_ARM_MULTI:
28938 if (value > 0xffff)
28939 as_bad_where (fixP->fx_file, fixP->fx_line,
28940 _("invalid expression in load/store multiple"));
28941 newval = value | md_chars_to_number (buf, INSN_SIZE);
28942 md_number_to_chars (buf, newval, INSN_SIZE);
28943 break;
28944
28945 #ifdef OBJ_ELF
28946 case BFD_RELOC_ARM_PCREL_CALL:
28947
28948 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28949 && fixP->fx_addsy
28950 && !S_FORCE_RELOC (fixP->fx_addsy, true)
28951 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28952 && THUMB_IS_FUNC (fixP->fx_addsy))
28953 /* Flip the bl to blx. This is a simple flip
28954 bit here because we generate PCREL_CALL for
28955 unconditional bls. */
28956 {
28957 newval = md_chars_to_number (buf, INSN_SIZE);
28958 newval = newval | 0x10000000;
28959 md_number_to_chars (buf, newval, INSN_SIZE);
28960 temp = 1;
28961 fixP->fx_done = 1;
28962 }
28963 else
28964 temp = 3;
28965 goto arm_branch_common;
28966
28967 case BFD_RELOC_ARM_PCREL_JUMP:
28968 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28969 && fixP->fx_addsy
28970 && !S_FORCE_RELOC (fixP->fx_addsy, true)
28971 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28972 && THUMB_IS_FUNC (fixP->fx_addsy))
28973 {
28974 /* This would map to a bl<cond>, b<cond>,
28975 b<always> to a Thumb function. We
28976 need to force a relocation for this particular
28977 case. */
28978 newval = md_chars_to_number (buf, INSN_SIZE);
28979 fixP->fx_done = 0;
28980 }
28981 /* Fall through. */
28982
28983 case BFD_RELOC_ARM_PLT32:
28984 #endif
28985 case BFD_RELOC_ARM_PCREL_BRANCH:
28986 temp = 3;
28987 goto arm_branch_common;
28988
28989 case BFD_RELOC_ARM_PCREL_BLX:
28990
28991 temp = 1;
28992 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28993 && fixP->fx_addsy
28994 && !S_FORCE_RELOC (fixP->fx_addsy, true)
28995 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28996 && ARM_IS_FUNC (fixP->fx_addsy))
28997 {
28998 /* Flip the blx to a bl and warn. */
28999 const char *name = S_GET_NAME (fixP->fx_addsy);
29000 newval = 0xeb000000;
29001 as_warn_where (fixP->fx_file, fixP->fx_line,
29002 _("blx to '%s' an ARM ISA state function changed to bl"),
29003 name);
29004 md_number_to_chars (buf, newval, INSN_SIZE);
29005 temp = 3;
29006 fixP->fx_done = 1;
29007 }
29008
29009 #ifdef OBJ_ELF
29010 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
29011 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
29012 #endif
29013
29014 arm_branch_common:
29015 /* We are going to store value (shifted right by two) in the
29016 instruction, in a 24 bit, signed field. Bits 26 through 32 either
29017 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
29018 also be clear. */
29019 if (value & temp)
29020 as_bad_where (fixP->fx_file, fixP->fx_line,
29021 _("misaligned branch destination"));
29022 if ((value & 0xfe000000) != 0
29023 && (value & 0xfe000000) != 0xfe000000)
29024 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
29025
29026 if (fixP->fx_done || !seg->use_rela_p)
29027 {
29028 newval = md_chars_to_number (buf, INSN_SIZE);
29029 newval |= (value >> 2) & 0x00ffffff;
29030 /* Set the H bit on BLX instructions. */
29031 if (temp == 1)
29032 {
29033 if (value & 2)
29034 newval |= 0x01000000;
29035 else
29036 newval &= ~0x01000000;
29037 }
29038 md_number_to_chars (buf, newval, INSN_SIZE);
29039 }
29040 break;
29041
29042 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
29043 /* CBZ can only branch forward. */
29044
29045 /* Attempts to use CBZ to branch to the next instruction
29046 (which, strictly speaking, are prohibited) will be turned into
29047 no-ops.
29048
29049 FIXME: It may be better to remove the instruction completely and
29050 perform relaxation. */
29051 if ((offsetT) value == -2)
29052 {
29053 newval = md_chars_to_number (buf, THUMB_SIZE);
29054 newval = 0xbf00; /* NOP encoding T1 */
29055 md_number_to_chars (buf, newval, THUMB_SIZE);
29056 }
29057 else
29058 {
29059 if (value & ~0x7e)
29060 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
29061
29062 if (fixP->fx_done || !seg->use_rela_p)
29063 {
29064 newval = md_chars_to_number (buf, THUMB_SIZE);
29065 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
29066 md_number_to_chars (buf, newval, THUMB_SIZE);
29067 }
29068 }
29069 break;
29070
29071 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
29072 if (out_of_range_p (value, 8))
29073 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
29074
29075 if (fixP->fx_done || !seg->use_rela_p)
29076 {
29077 newval = md_chars_to_number (buf, THUMB_SIZE);
29078 newval |= (value & 0x1ff) >> 1;
29079 md_number_to_chars (buf, newval, THUMB_SIZE);
29080 }
29081 break;
29082
29083 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
29084 if (out_of_range_p (value, 11))
29085 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
29086
29087 if (fixP->fx_done || !seg->use_rela_p)
29088 {
29089 newval = md_chars_to_number (buf, THUMB_SIZE);
29090 newval |= (value & 0xfff) >> 1;
29091 md_number_to_chars (buf, newval, THUMB_SIZE);
29092 }
29093 break;
29094
29095 /* This relocation is misnamed, it should be BRANCH21. */
29096 case BFD_RELOC_THUMB_PCREL_BRANCH20:
29097 if (fixP->fx_addsy
29098 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29099 && !S_FORCE_RELOC (fixP->fx_addsy, true)
29100 && ARM_IS_FUNC (fixP->fx_addsy)
29101 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
29102 {
29103 /* Force a relocation for a branch 20 bits wide. */
29104 fixP->fx_done = 0;
29105 }
29106 if (out_of_range_p (value, 20))
29107 as_bad_where (fixP->fx_file, fixP->fx_line,
29108 _("conditional branch out of range"));
29109
29110 if (fixP->fx_done || !seg->use_rela_p)
29111 {
29112 offsetT newval2;
29113 addressT S, J1, J2, lo, hi;
29114
29115 S = (value & 0x00100000) >> 20;
29116 J2 = (value & 0x00080000) >> 19;
29117 J1 = (value & 0x00040000) >> 18;
29118 hi = (value & 0x0003f000) >> 12;
29119 lo = (value & 0x00000ffe) >> 1;
29120
29121 newval = md_chars_to_number (buf, THUMB_SIZE);
29122 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29123 newval |= (S << 10) | hi;
29124 newval2 |= (J1 << 13) | (J2 << 11) | lo;
29125 md_number_to_chars (buf, newval, THUMB_SIZE);
29126 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29127 }
29128 break;
29129
29130 case BFD_RELOC_THUMB_PCREL_BLX:
29131 /* If there is a blx from a thumb state function to
29132 another thumb function flip this to a bl and warn
29133 about it. */
29134
29135 if (fixP->fx_addsy
29136 && !S_FORCE_RELOC (fixP->fx_addsy, true)
29137 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29138 && THUMB_IS_FUNC (fixP->fx_addsy))
29139 {
29140 const char *name = S_GET_NAME (fixP->fx_addsy);
29141 as_warn_where (fixP->fx_file, fixP->fx_line,
29142 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
29143 name);
29144 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29145 newval = newval | 0x1000;
29146 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
29147 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
29148 fixP->fx_done = 1;
29149 }
29150
29151
29152 goto thumb_bl_common;
29153
29154 case BFD_RELOC_THUMB_PCREL_BRANCH23:
29155 /* A bl from Thumb state ISA to an internal ARM state function
29156 is converted to a blx. */
29157 if (fixP->fx_addsy
29158 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29159 && !S_FORCE_RELOC (fixP->fx_addsy, true)
29160 && ARM_IS_FUNC (fixP->fx_addsy)
29161 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
29162 {
29163 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29164 newval = newval & ~0x1000;
29165 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
29166 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
29167 fixP->fx_done = 1;
29168 }
29169
29170 thumb_bl_common:
29171
29172 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
29173 /* For a BLX instruction, make sure that the relocation is rounded up
29174 to a word boundary. This follows the semantics of the instruction
29175 which specifies that bit 1 of the target address will come from bit
29176 1 of the base address. */
29177 value = (value + 3) & ~ 3;
29178
29179 #ifdef OBJ_ELF
29180 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
29181 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
29182 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
29183 #endif
29184
29185 if (out_of_range_p (value, 22))
29186 {
29187 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
29188 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
29189 else if (out_of_range_p (value, 24))
29190 as_bad_where (fixP->fx_file, fixP->fx_line,
29191 _("Thumb2 branch out of range"));
29192 }
29193
29194 if (fixP->fx_done || !seg->use_rela_p)
29195 encode_thumb2_b_bl_offset (buf, value);
29196
29197 break;
29198
29199 case BFD_RELOC_THUMB_PCREL_BRANCH25:
29200 if (out_of_range_p (value, 24))
29201 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
29202
29203 if (fixP->fx_done || !seg->use_rela_p)
29204 encode_thumb2_b_bl_offset (buf, value);
29205
29206 break;
29207
29208 case BFD_RELOC_8:
29209 if (fixP->fx_done || !seg->use_rela_p)
29210 *buf = value;
29211 break;
29212
29213 case BFD_RELOC_16:
29214 if (fixP->fx_done || !seg->use_rela_p)
29215 md_number_to_chars (buf, value, 2);
29216 break;
29217
29218 #ifdef OBJ_ELF
29219 case BFD_RELOC_ARM_TLS_CALL:
29220 case BFD_RELOC_ARM_THM_TLS_CALL:
29221 case BFD_RELOC_ARM_TLS_DESCSEQ:
29222 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
29223 case BFD_RELOC_ARM_TLS_GOTDESC:
29224 case BFD_RELOC_ARM_TLS_GD32:
29225 case BFD_RELOC_ARM_TLS_LE32:
29226 case BFD_RELOC_ARM_TLS_IE32:
29227 case BFD_RELOC_ARM_TLS_LDM32:
29228 case BFD_RELOC_ARM_TLS_LDO32:
29229 S_SET_THREAD_LOCAL (fixP->fx_addsy);
29230 break;
29231
29232 /* Same handling as above, but with the arm_fdpic guard. */
29233 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
29234 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
29235 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
29236 if (arm_fdpic)
29237 {
29238 S_SET_THREAD_LOCAL (fixP->fx_addsy);
29239 }
29240 else
29241 {
29242 as_bad_where (fixP->fx_file, fixP->fx_line,
29243 _("Relocation supported only in FDPIC mode"));
29244 }
29245 break;
29246
29247 case BFD_RELOC_ARM_GOT32:
29248 case BFD_RELOC_ARM_GOTOFF:
29249 break;
29250
29251 case BFD_RELOC_ARM_GOT_PREL:
29252 if (fixP->fx_done || !seg->use_rela_p)
29253 md_number_to_chars (buf, value, 4);
29254 break;
29255
29256 case BFD_RELOC_ARM_TARGET2:
29257 /* TARGET2 is not partial-inplace, so we need to write the
29258 addend here for REL targets, because it won't be written out
29259 during reloc processing later. */
29260 if (fixP->fx_done || !seg->use_rela_p)
29261 md_number_to_chars (buf, fixP->fx_offset, 4);
29262 break;
29263
29264 /* Relocations for FDPIC. */
29265 case BFD_RELOC_ARM_GOTFUNCDESC:
29266 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
29267 case BFD_RELOC_ARM_FUNCDESC:
29268 if (arm_fdpic)
29269 {
29270 if (fixP->fx_done || !seg->use_rela_p)
29271 md_number_to_chars (buf, 0, 4);
29272 }
29273 else
29274 {
29275 as_bad_where (fixP->fx_file, fixP->fx_line,
29276 _("Relocation supported only in FDPIC mode"));
29277 }
29278 break;
29279 #endif
29280
29281 case BFD_RELOC_RVA:
29282 case BFD_RELOC_32:
29283 case BFD_RELOC_ARM_TARGET1:
29284 case BFD_RELOC_ARM_ROSEGREL32:
29285 case BFD_RELOC_ARM_SBREL32:
29286 case BFD_RELOC_32_PCREL:
29287 #ifdef TE_PE
29288 case BFD_RELOC_32_SECREL:
29289 #endif
29290 if (fixP->fx_done || !seg->use_rela_p)
29291 #ifdef TE_WINCE
29292 /* For WinCE we only do this for pcrel fixups. */
29293 if (fixP->fx_done || fixP->fx_pcrel)
29294 #endif
29295 md_number_to_chars (buf, value, 4);
29296 break;
29297
29298 #ifdef OBJ_ELF
29299 case BFD_RELOC_ARM_PREL31:
29300 if (fixP->fx_done || !seg->use_rela_p)
29301 {
29302 newval = md_chars_to_number (buf, 4) & 0x80000000;
29303 if ((value ^ (value >> 1)) & 0x40000000)
29304 {
29305 as_bad_where (fixP->fx_file, fixP->fx_line,
29306 _("rel31 relocation overflow"));
29307 }
29308 newval |= value & 0x7fffffff;
29309 md_number_to_chars (buf, newval, 4);
29310 }
29311 break;
29312 #endif
29313
29314 case BFD_RELOC_ARM_CP_OFF_IMM:
29315 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
29316 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM:
29317 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
29318 newval = md_chars_to_number (buf, INSN_SIZE);
29319 else
29320 newval = get_thumb32_insn (buf);
29321 if ((newval & 0x0f200f00) == 0x0d000900)
29322 {
29323 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
29324 has permitted values that are multiples of 2, in the range -510
29325 to 510. */
29326 if (value + 510 > 510 + 510 || (value & 1))
29327 as_bad_where (fixP->fx_file, fixP->fx_line,
29328 _("co-processor offset out of range"));
29329 }
29330 else if ((newval & 0xfe001f80) == 0xec000f80)
29331 {
29332 if (value + 511 > 512 + 511 || (value & 3))
29333 as_bad_where (fixP->fx_file, fixP->fx_line,
29334 _("co-processor offset out of range"));
29335 }
29336 else if (value + 1023 > 1023 + 1023 || (value & 3))
29337 as_bad_where (fixP->fx_file, fixP->fx_line,
29338 _("co-processor offset out of range"));
29339 cp_off_common:
29340 sign = (offsetT) value > 0;
29341 if ((offsetT) value < 0)
29342 value = -value;
29343 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
29344 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
29345 newval = md_chars_to_number (buf, INSN_SIZE);
29346 else
29347 newval = get_thumb32_insn (buf);
29348 if (value == 0)
29349 {
29350 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
29351 newval &= 0xffffff80;
29352 else
29353 newval &= 0xffffff00;
29354 }
29355 else
29356 {
29357 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
29358 newval &= 0xff7fff80;
29359 else
29360 newval &= 0xff7fff00;
29361 if ((newval & 0x0f200f00) == 0x0d000900)
29362 {
29363 /* This is a fp16 vstr/vldr.
29364
29365 It requires the immediate offset in the instruction is shifted
29366 left by 1 to be a half-word offset.
29367
29368 Here, left shift by 1 first, and later right shift by 2
29369 should get the right offset. */
29370 value <<= 1;
29371 }
29372 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
29373 }
29374 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
29375 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
29376 md_number_to_chars (buf, newval, INSN_SIZE);
29377 else
29378 put_thumb32_insn (buf, newval);
29379 break;
29380
29381 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
29382 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
29383 if (value + 255 > 255 + 255)
29384 as_bad_where (fixP->fx_file, fixP->fx_line,
29385 _("co-processor offset out of range"));
29386 value *= 4;
29387 goto cp_off_common;
29388
29389 case BFD_RELOC_ARM_THUMB_OFFSET:
29390 newval = md_chars_to_number (buf, THUMB_SIZE);
29391 /* Exactly what ranges, and where the offset is inserted depends
29392 on the type of instruction, we can establish this from the
29393 top 4 bits. */
29394 switch (newval >> 12)
29395 {
29396 case 4: /* PC load. */
29397 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
29398 forced to zero for these loads; md_pcrel_from has already
29399 compensated for this. */
29400 if (value & 3)
29401 as_bad_where (fixP->fx_file, fixP->fx_line,
29402 _("invalid offset, target not word aligned (0x%08lX)"),
29403 (((unsigned long) fixP->fx_frag->fr_address
29404 + (unsigned long) fixP->fx_where) & ~3)
29405 + (unsigned long) value);
29406 else if (get_recorded_alignment (seg) < 2)
29407 as_warn_where (fixP->fx_file, fixP->fx_line,
29408 _("section does not have enough alignment to ensure safe PC-relative loads"));
29409
29410 if (value & ~0x3fc)
29411 as_bad_where (fixP->fx_file, fixP->fx_line,
29412 _("invalid offset, value too big (0x%08lX)"),
29413 (long) value);
29414
29415 newval |= value >> 2;
29416 break;
29417
29418 case 9: /* SP load/store. */
29419 if (value & ~0x3fc)
29420 as_bad_where (fixP->fx_file, fixP->fx_line,
29421 _("invalid offset, value too big (0x%08lX)"),
29422 (long) value);
29423 newval |= value >> 2;
29424 break;
29425
29426 case 6: /* Word load/store. */
29427 if (value & ~0x7c)
29428 as_bad_where (fixP->fx_file, fixP->fx_line,
29429 _("invalid offset, value too big (0x%08lX)"),
29430 (long) value);
29431 newval |= value << 4; /* 6 - 2. */
29432 break;
29433
29434 case 7: /* Byte load/store. */
29435 if (value & ~0x1f)
29436 as_bad_where (fixP->fx_file, fixP->fx_line,
29437 _("invalid offset, value too big (0x%08lX)"),
29438 (long) value);
29439 newval |= value << 6;
29440 break;
29441
29442 case 8: /* Halfword load/store. */
29443 if (value & ~0x3e)
29444 as_bad_where (fixP->fx_file, fixP->fx_line,
29445 _("invalid offset, value too big (0x%08lX)"),
29446 (long) value);
29447 newval |= value << 5; /* 6 - 1. */
29448 break;
29449
29450 default:
29451 as_bad_where (fixP->fx_file, fixP->fx_line,
29452 "Unable to process relocation for thumb opcode: %lx",
29453 (unsigned long) newval);
29454 break;
29455 }
29456 md_number_to_chars (buf, newval, THUMB_SIZE);
29457 break;
29458
29459 case BFD_RELOC_ARM_THUMB_ADD:
29460 /* This is a complicated relocation, since we use it for all of
29461 the following immediate relocations:
29462
29463 3bit ADD/SUB
29464 8bit ADD/SUB
29465 9bit ADD/SUB SP word-aligned
29466 10bit ADD PC/SP word-aligned
29467
29468 The type of instruction being processed is encoded in the
29469 instruction field:
29470
29471 0x8000 SUB
29472 0x00F0 Rd
29473 0x000F Rs
29474 */
29475 newval = md_chars_to_number (buf, THUMB_SIZE);
29476 {
29477 int rd = (newval >> 4) & 0xf;
29478 int rs = newval & 0xf;
29479 int subtract = !!(newval & 0x8000);
29480
29481 /* Check for HI regs, only very restricted cases allowed:
29482 Adjusting SP, and using PC or SP to get an address. */
29483 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
29484 || (rs > 7 && rs != REG_SP && rs != REG_PC))
29485 as_bad_where (fixP->fx_file, fixP->fx_line,
29486 _("invalid Hi register with immediate"));
29487
29488 /* If value is negative, choose the opposite instruction. */
29489 if ((offsetT) value < 0)
29490 {
29491 value = -value;
29492 subtract = !subtract;
29493 if ((offsetT) value < 0)
29494 as_bad_where (fixP->fx_file, fixP->fx_line,
29495 _("immediate value out of range"));
29496 }
29497
29498 if (rd == REG_SP)
29499 {
29500 if (value & ~0x1fc)
29501 as_bad_where (fixP->fx_file, fixP->fx_line,
29502 _("invalid immediate for stack address calculation"));
29503 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
29504 newval |= value >> 2;
29505 }
29506 else if (rs == REG_PC || rs == REG_SP)
29507 {
29508 /* PR gas/18541. If the addition is for a defined symbol
29509 within range of an ADR instruction then accept it. */
29510 if (subtract
29511 && value == 4
29512 && fixP->fx_addsy != NULL)
29513 {
29514 subtract = 0;
29515
29516 if (! S_IS_DEFINED (fixP->fx_addsy)
29517 || S_GET_SEGMENT (fixP->fx_addsy) != seg
29518 || S_IS_WEAK (fixP->fx_addsy))
29519 {
29520 as_bad_where (fixP->fx_file, fixP->fx_line,
29521 _("address calculation needs a strongly defined nearby symbol"));
29522 }
29523 else
29524 {
29525 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
29526
29527 /* Round up to the next 4-byte boundary. */
29528 if (v & 3)
29529 v = (v + 3) & ~ 3;
29530 else
29531 v += 4;
29532 v = S_GET_VALUE (fixP->fx_addsy) - v;
29533
29534 if (v & ~0x3fc)
29535 {
29536 as_bad_where (fixP->fx_file, fixP->fx_line,
29537 _("symbol too far away"));
29538 }
29539 else
29540 {
29541 fixP->fx_done = 1;
29542 value = v;
29543 }
29544 }
29545 }
29546
29547 if (subtract || value & ~0x3fc)
29548 as_bad_where (fixP->fx_file, fixP->fx_line,
29549 _("invalid immediate for address calculation (value = 0x%08lX)"),
29550 (unsigned long) (subtract ? - value : value));
29551 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
29552 newval |= rd << 8;
29553 newval |= value >> 2;
29554 }
29555 else if (rs == rd)
29556 {
29557 if (value & ~0xff)
29558 as_bad_where (fixP->fx_file, fixP->fx_line,
29559 _("immediate value out of range"));
29560 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
29561 newval |= (rd << 8) | value;
29562 }
29563 else
29564 {
29565 if (value & ~0x7)
29566 as_bad_where (fixP->fx_file, fixP->fx_line,
29567 _("immediate value out of range"));
29568 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
29569 newval |= rd | (rs << 3) | (value << 6);
29570 }
29571 }
29572 md_number_to_chars (buf, newval, THUMB_SIZE);
29573 break;
29574
29575 case BFD_RELOC_ARM_THUMB_IMM:
29576 newval = md_chars_to_number (buf, THUMB_SIZE);
29577 if (value > 255)
29578 as_bad_where (fixP->fx_file, fixP->fx_line,
29579 _("invalid immediate: %ld is out of range"),
29580 (long) value);
29581 newval |= value;
29582 md_number_to_chars (buf, newval, THUMB_SIZE);
29583 break;
29584
29585 case BFD_RELOC_ARM_THUMB_SHIFT:
29586 /* 5bit shift value (0..32). LSL cannot take 32. */
29587 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
29588 temp = newval & 0xf800;
29589 if (value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
29590 as_bad_where (fixP->fx_file, fixP->fx_line,
29591 _("invalid shift value: %ld"), (long) value);
29592 /* Shifts of zero must be encoded as LSL. */
29593 if (value == 0)
29594 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
29595 /* Shifts of 32 are encoded as zero. */
29596 else if (value == 32)
29597 value = 0;
29598 newval |= value << 6;
29599 md_number_to_chars (buf, newval, THUMB_SIZE);
29600 break;
29601
29602 case BFD_RELOC_VTABLE_INHERIT:
29603 case BFD_RELOC_VTABLE_ENTRY:
29604 fixP->fx_done = 0;
29605 return;
29606
29607 case BFD_RELOC_ARM_MOVW:
29608 case BFD_RELOC_ARM_MOVT:
29609 case BFD_RELOC_ARM_THUMB_MOVW:
29610 case BFD_RELOC_ARM_THUMB_MOVT:
29611 if (fixP->fx_done || !seg->use_rela_p)
29612 {
29613 /* REL format relocations are limited to a 16-bit addend. */
29614 if (!fixP->fx_done)
29615 {
29616 if (value + 0x8000 > 0x7fff + 0x8000)
29617 as_bad_where (fixP->fx_file, fixP->fx_line,
29618 _("offset out of range"));
29619 }
29620 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
29621 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
29622 {
29623 value >>= 16;
29624 }
29625
29626 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
29627 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
29628 {
29629 newval = get_thumb32_insn (buf);
29630 newval &= 0xfbf08f00;
29631 newval |= (value & 0xf000) << 4;
29632 newval |= (value & 0x0800) << 15;
29633 newval |= (value & 0x0700) << 4;
29634 newval |= (value & 0x00ff);
29635 put_thumb32_insn (buf, newval);
29636 }
29637 else
29638 {
29639 newval = md_chars_to_number (buf, 4);
29640 newval &= 0xfff0f000;
29641 newval |= value & 0x0fff;
29642 newval |= (value & 0xf000) << 4;
29643 md_number_to_chars (buf, newval, 4);
29644 }
29645 }
29646 return;
29647
29648 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
29649 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
29650 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
29651 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
29652 gas_assert (!fixP->fx_done);
29653 {
29654 bfd_vma insn;
29655 bool is_mov;
29656 bfd_vma encoded_addend = value;
29657
29658 /* Check that addend can be encoded in instruction. */
29659 if (!seg->use_rela_p && value > 255)
29660 as_bad_where (fixP->fx_file, fixP->fx_line,
29661 _("the offset 0x%08lX is not representable"),
29662 (unsigned long) encoded_addend);
29663
29664 /* Extract the instruction. */
29665 insn = md_chars_to_number (buf, THUMB_SIZE);
29666 is_mov = (insn & 0xf800) == 0x2000;
29667
29668 /* Encode insn. */
29669 if (is_mov)
29670 {
29671 if (!seg->use_rela_p)
29672 insn |= encoded_addend;
29673 }
29674 else
29675 {
29676 int rd, rs;
29677
29678 /* Extract the instruction. */
29679 /* Encoding is the following
29680 0x8000 SUB
29681 0x00F0 Rd
29682 0x000F Rs
29683 */
29684 /* The following conditions must be true :
29685 - ADD
29686 - Rd == Rs
29687 - Rd <= 7
29688 */
29689 rd = (insn >> 4) & 0xf;
29690 rs = insn & 0xf;
29691 if ((insn & 0x8000) || (rd != rs) || rd > 7)
29692 as_bad_where (fixP->fx_file, fixP->fx_line,
29693 _("Unable to process relocation for thumb opcode: %lx"),
29694 (unsigned long) insn);
29695
29696 /* Encode as ADD immediate8 thumb 1 code. */
29697 insn = 0x3000 | (rd << 8);
29698
29699 /* Place the encoded addend into the first 8 bits of the
29700 instruction. */
29701 if (!seg->use_rela_p)
29702 insn |= encoded_addend;
29703 }
29704
29705 /* Update the instruction. */
29706 md_number_to_chars (buf, insn, THUMB_SIZE);
29707 }
29708 break;
29709
29710 case BFD_RELOC_ARM_ALU_PC_G0_NC:
29711 case BFD_RELOC_ARM_ALU_PC_G0:
29712 case BFD_RELOC_ARM_ALU_PC_G1_NC:
29713 case BFD_RELOC_ARM_ALU_PC_G1:
29714 case BFD_RELOC_ARM_ALU_PC_G2:
29715 case BFD_RELOC_ARM_ALU_SB_G0_NC:
29716 case BFD_RELOC_ARM_ALU_SB_G0:
29717 case BFD_RELOC_ARM_ALU_SB_G1_NC:
29718 case BFD_RELOC_ARM_ALU_SB_G1:
29719 case BFD_RELOC_ARM_ALU_SB_G2:
29720 gas_assert (!fixP->fx_done);
29721 if (!seg->use_rela_p)
29722 {
29723 bfd_vma insn;
29724 bfd_vma encoded_addend;
29725 bfd_vma addend_abs = llabs ((offsetT) value);
29726
29727 /* Check that the absolute value of the addend can be
29728 expressed as an 8-bit constant plus a rotation. */
29729 encoded_addend = encode_arm_immediate (addend_abs);
29730 if (encoded_addend == (unsigned int) FAIL)
29731 as_bad_where (fixP->fx_file, fixP->fx_line,
29732 _("the offset 0x%08lX is not representable"),
29733 (unsigned long) addend_abs);
29734
29735 /* Extract the instruction. */
29736 insn = md_chars_to_number (buf, INSN_SIZE);
29737
29738 /* If the addend is positive, use an ADD instruction.
29739 Otherwise use a SUB. Take care not to destroy the S bit. */
29740 insn &= 0xff1fffff;
29741 if ((offsetT) value < 0)
29742 insn |= 1 << 22;
29743 else
29744 insn |= 1 << 23;
29745
29746 /* Place the encoded addend into the first 12 bits of the
29747 instruction. */
29748 insn &= 0xfffff000;
29749 insn |= encoded_addend;
29750
29751 /* Update the instruction. */
29752 md_number_to_chars (buf, insn, INSN_SIZE);
29753 }
29754 break;
29755
29756 case BFD_RELOC_ARM_LDR_PC_G0:
29757 case BFD_RELOC_ARM_LDR_PC_G1:
29758 case BFD_RELOC_ARM_LDR_PC_G2:
29759 case BFD_RELOC_ARM_LDR_SB_G0:
29760 case BFD_RELOC_ARM_LDR_SB_G1:
29761 case BFD_RELOC_ARM_LDR_SB_G2:
29762 gas_assert (!fixP->fx_done);
29763 if (!seg->use_rela_p)
29764 {
29765 bfd_vma insn;
29766 bfd_vma addend_abs = llabs ((offsetT) value);
29767
29768 /* Check that the absolute value of the addend can be
29769 encoded in 12 bits. */
29770 if (addend_abs >= 0x1000)
29771 as_bad_where (fixP->fx_file, fixP->fx_line,
29772 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
29773 (unsigned long) addend_abs);
29774
29775 /* Extract the instruction. */
29776 insn = md_chars_to_number (buf, INSN_SIZE);
29777
29778 /* If the addend is negative, clear bit 23 of the instruction.
29779 Otherwise set it. */
29780 if ((offsetT) value < 0)
29781 insn &= ~(1 << 23);
29782 else
29783 insn |= 1 << 23;
29784
29785 /* Place the absolute value of the addend into the first 12 bits
29786 of the instruction. */
29787 insn &= 0xfffff000;
29788 insn |= addend_abs;
29789
29790 /* Update the instruction. */
29791 md_number_to_chars (buf, insn, INSN_SIZE);
29792 }
29793 break;
29794
29795 case BFD_RELOC_ARM_LDRS_PC_G0:
29796 case BFD_RELOC_ARM_LDRS_PC_G1:
29797 case BFD_RELOC_ARM_LDRS_PC_G2:
29798 case BFD_RELOC_ARM_LDRS_SB_G0:
29799 case BFD_RELOC_ARM_LDRS_SB_G1:
29800 case BFD_RELOC_ARM_LDRS_SB_G2:
29801 gas_assert (!fixP->fx_done);
29802 if (!seg->use_rela_p)
29803 {
29804 bfd_vma insn;
29805 bfd_vma addend_abs = llabs ((offsetT) value);
29806
29807 /* Check that the absolute value of the addend can be
29808 encoded in 8 bits. */
29809 if (addend_abs >= 0x100)
29810 as_bad_where (fixP->fx_file, fixP->fx_line,
29811 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
29812 (unsigned long) addend_abs);
29813
29814 /* Extract the instruction. */
29815 insn = md_chars_to_number (buf, INSN_SIZE);
29816
29817 /* If the addend is negative, clear bit 23 of the instruction.
29818 Otherwise set it. */
29819 if ((offsetT) value < 0)
29820 insn &= ~(1 << 23);
29821 else
29822 insn |= 1 << 23;
29823
29824 /* Place the first four bits of the absolute value of the addend
29825 into the first 4 bits of the instruction, and the remaining
29826 four into bits 8 .. 11. */
29827 insn &= 0xfffff0f0;
29828 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
29829
29830 /* Update the instruction. */
29831 md_number_to_chars (buf, insn, INSN_SIZE);
29832 }
29833 break;
29834
29835 case BFD_RELOC_ARM_LDC_PC_G0:
29836 case BFD_RELOC_ARM_LDC_PC_G1:
29837 case BFD_RELOC_ARM_LDC_PC_G2:
29838 case BFD_RELOC_ARM_LDC_SB_G0:
29839 case BFD_RELOC_ARM_LDC_SB_G1:
29840 case BFD_RELOC_ARM_LDC_SB_G2:
29841 gas_assert (!fixP->fx_done);
29842 if (!seg->use_rela_p)
29843 {
29844 bfd_vma insn;
29845 bfd_vma addend_abs = llabs ((offsetT) value);
29846
29847 /* Check that the absolute value of the addend is a multiple of
29848 four and, when divided by four, fits in 8 bits. */
29849 if (addend_abs & 0x3)
29850 as_bad_where (fixP->fx_file, fixP->fx_line,
29851 _("bad offset 0x%08lX (must be word-aligned)"),
29852 (unsigned long) addend_abs);
29853
29854 if ((addend_abs >> 2) > 0xff)
29855 as_bad_where (fixP->fx_file, fixP->fx_line,
29856 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
29857 (unsigned long) addend_abs);
29858
29859 /* Extract the instruction. */
29860 insn = md_chars_to_number (buf, INSN_SIZE);
29861
29862 /* If the addend is negative, clear bit 23 of the instruction.
29863 Otherwise set it. */
29864 if ((offsetT) value < 0)
29865 insn &= ~(1 << 23);
29866 else
29867 insn |= 1 << 23;
29868
29869 /* Place the addend (divided by four) into the first eight
29870 bits of the instruction. */
29871 insn &= 0xfffffff0;
29872 insn |= addend_abs >> 2;
29873
29874 /* Update the instruction. */
29875 md_number_to_chars (buf, insn, INSN_SIZE);
29876 }
29877 break;
29878
29879 case BFD_RELOC_THUMB_PCREL_BRANCH5:
29880 if (fixP->fx_addsy
29881 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29882 && !S_FORCE_RELOC (fixP->fx_addsy, true)
29883 && ARM_IS_FUNC (fixP->fx_addsy)
29884 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29885 {
29886 /* Force a relocation for a branch 5 bits wide. */
29887 fixP->fx_done = 0;
29888 }
29889 if (v8_1_branch_value_check (value, 5, false) == FAIL)
29890 as_bad_where (fixP->fx_file, fixP->fx_line,
29891 BAD_BRANCH_OFF);
29892
29893 if (fixP->fx_done || !seg->use_rela_p)
29894 {
29895 addressT boff = value >> 1;
29896
29897 newval = md_chars_to_number (buf, THUMB_SIZE);
29898 newval |= (boff << 7);
29899 md_number_to_chars (buf, newval, THUMB_SIZE);
29900 }
29901 break;
29902
29903 case BFD_RELOC_THUMB_PCREL_BFCSEL:
29904 if (fixP->fx_addsy
29905 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29906 && !S_FORCE_RELOC (fixP->fx_addsy, true)
29907 && ARM_IS_FUNC (fixP->fx_addsy)
29908 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29909 {
29910 fixP->fx_done = 0;
29911 }
29912 if ((value & ~0x7f) && ((value & ~0x3f) != (valueT) ~0x3f))
29913 as_bad_where (fixP->fx_file, fixP->fx_line,
29914 _("branch out of range"));
29915
29916 if (fixP->fx_done || !seg->use_rela_p)
29917 {
29918 newval = md_chars_to_number (buf, THUMB_SIZE);
29919
29920 addressT boff = ((newval & 0x0780) >> 7) << 1;
29921 addressT diff = value - boff;
29922
29923 if (diff == 4)
29924 {
29925 newval |= 1 << 1; /* T bit. */
29926 }
29927 else if (diff != 2)
29928 {
29929 as_bad_where (fixP->fx_file, fixP->fx_line,
29930 _("out of range label-relative fixup value"));
29931 }
29932 md_number_to_chars (buf, newval, THUMB_SIZE);
29933 }
29934 break;
29935
29936 case BFD_RELOC_ARM_THUMB_BF17:
29937 if (fixP->fx_addsy
29938 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29939 && !S_FORCE_RELOC (fixP->fx_addsy, true)
29940 && ARM_IS_FUNC (fixP->fx_addsy)
29941 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29942 {
29943 /* Force a relocation for a branch 17 bits wide. */
29944 fixP->fx_done = 0;
29945 }
29946
29947 if (v8_1_branch_value_check (value, 17, true) == FAIL)
29948 as_bad_where (fixP->fx_file, fixP->fx_line,
29949 BAD_BRANCH_OFF);
29950
29951 if (fixP->fx_done || !seg->use_rela_p)
29952 {
29953 offsetT newval2;
29954 addressT immA, immB, immC;
29955
29956 immA = (value & 0x0001f000) >> 12;
29957 immB = (value & 0x00000ffc) >> 2;
29958 immC = (value & 0x00000002) >> 1;
29959
29960 newval = md_chars_to_number (buf, THUMB_SIZE);
29961 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29962 newval |= immA;
29963 newval2 |= (immC << 11) | (immB << 1);
29964 md_number_to_chars (buf, newval, THUMB_SIZE);
29965 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29966 }
29967 break;
29968
29969 case BFD_RELOC_ARM_THUMB_BF19:
29970 if (fixP->fx_addsy
29971 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29972 && !S_FORCE_RELOC (fixP->fx_addsy, true)
29973 && ARM_IS_FUNC (fixP->fx_addsy)
29974 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29975 {
29976 /* Force a relocation for a branch 19 bits wide. */
29977 fixP->fx_done = 0;
29978 }
29979
29980 if (v8_1_branch_value_check (value, 19, true) == FAIL)
29981 as_bad_where (fixP->fx_file, fixP->fx_line,
29982 BAD_BRANCH_OFF);
29983
29984 if (fixP->fx_done || !seg->use_rela_p)
29985 {
29986 offsetT newval2;
29987 addressT immA, immB, immC;
29988
29989 immA = (value & 0x0007f000) >> 12;
29990 immB = (value & 0x00000ffc) >> 2;
29991 immC = (value & 0x00000002) >> 1;
29992
29993 newval = md_chars_to_number (buf, THUMB_SIZE);
29994 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29995 newval |= immA;
29996 newval2 |= (immC << 11) | (immB << 1);
29997 md_number_to_chars (buf, newval, THUMB_SIZE);
29998 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29999 }
30000 break;
30001
30002 case BFD_RELOC_ARM_THUMB_BF13:
30003 if (fixP->fx_addsy
30004 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
30005 && !S_FORCE_RELOC (fixP->fx_addsy, true)
30006 && ARM_IS_FUNC (fixP->fx_addsy)
30007 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
30008 {
30009 /* Force a relocation for a branch 13 bits wide. */
30010 fixP->fx_done = 0;
30011 }
30012
30013 if (v8_1_branch_value_check (value, 13, true) == FAIL)
30014 as_bad_where (fixP->fx_file, fixP->fx_line,
30015 BAD_BRANCH_OFF);
30016
30017 if (fixP->fx_done || !seg->use_rela_p)
30018 {
30019 offsetT newval2;
30020 addressT immA, immB, immC;
30021
30022 immA = (value & 0x00001000) >> 12;
30023 immB = (value & 0x00000ffc) >> 2;
30024 immC = (value & 0x00000002) >> 1;
30025
30026 newval = md_chars_to_number (buf, THUMB_SIZE);
30027 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
30028 newval |= immA;
30029 newval2 |= (immC << 11) | (immB << 1);
30030 md_number_to_chars (buf, newval, THUMB_SIZE);
30031 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
30032 }
30033 break;
30034
30035 case BFD_RELOC_ARM_THUMB_LOOP12:
30036 if (fixP->fx_addsy
30037 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
30038 && !S_FORCE_RELOC (fixP->fx_addsy, true)
30039 && ARM_IS_FUNC (fixP->fx_addsy)
30040 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
30041 {
30042 /* Force a relocation for a branch 12 bits wide. */
30043 fixP->fx_done = 0;
30044 }
30045
30046 bfd_vma insn = get_thumb32_insn (buf);
30047 /* le lr, <label>, le <label> or letp lr, <label> */
30048 if (((insn & 0xffffffff) == 0xf00fc001)
30049 || ((insn & 0xffffffff) == 0xf02fc001)
30050 || ((insn & 0xffffffff) == 0xf01fc001))
30051 value = -value;
30052
30053 if (v8_1_branch_value_check (value, 12, false) == FAIL)
30054 as_bad_where (fixP->fx_file, fixP->fx_line,
30055 BAD_BRANCH_OFF);
30056 if (fixP->fx_done || !seg->use_rela_p)
30057 {
30058 addressT imml, immh;
30059
30060 immh = (value & 0x00000ffc) >> 2;
30061 imml = (value & 0x00000002) >> 1;
30062
30063 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
30064 newval |= (imml << 11) | (immh << 1);
30065 md_number_to_chars (buf + THUMB_SIZE, newval, THUMB_SIZE);
30066 }
30067 break;
30068
30069 case BFD_RELOC_ARM_V4BX:
30070 /* This will need to go in the object file. */
30071 fixP->fx_done = 0;
30072 break;
30073
30074 case BFD_RELOC_UNUSED:
30075 default:
30076 as_bad_where (fixP->fx_file, fixP->fx_line,
30077 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
30078 }
30079 }
30080
30081 /* Translate internal representation of relocation info to BFD target
30082 format. */
30083
30084 arelent *
30085 tc_gen_reloc (asection *section, fixS *fixp)
30086 {
30087 arelent * reloc;
30088 bfd_reloc_code_real_type code;
30089
30090 reloc = XNEW (arelent);
30091
30092 reloc->sym_ptr_ptr = XNEW (asymbol *);
30093 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
30094 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
30095
30096 if (fixp->fx_pcrel)
30097 {
30098 if (section->use_rela_p)
30099 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
30100 else
30101 fixp->fx_offset = reloc->address;
30102 }
30103 reloc->addend = fixp->fx_offset;
30104
30105 switch (fixp->fx_r_type)
30106 {
30107 case BFD_RELOC_8:
30108 if (fixp->fx_pcrel)
30109 {
30110 code = BFD_RELOC_8_PCREL;
30111 break;
30112 }
30113 /* Fall through. */
30114
30115 case BFD_RELOC_16:
30116 if (fixp->fx_pcrel)
30117 {
30118 code = BFD_RELOC_16_PCREL;
30119 break;
30120 }
30121 /* Fall through. */
30122
30123 case BFD_RELOC_32:
30124 if (fixp->fx_pcrel)
30125 {
30126 code = BFD_RELOC_32_PCREL;
30127 break;
30128 }
30129 /* Fall through. */
30130
30131 case BFD_RELOC_ARM_MOVW:
30132 if (fixp->fx_pcrel)
30133 {
30134 code = BFD_RELOC_ARM_MOVW_PCREL;
30135 break;
30136 }
30137 /* Fall through. */
30138
30139 case BFD_RELOC_ARM_MOVT:
30140 if (fixp->fx_pcrel)
30141 {
30142 code = BFD_RELOC_ARM_MOVT_PCREL;
30143 break;
30144 }
30145 /* Fall through. */
30146
30147 case BFD_RELOC_ARM_THUMB_MOVW:
30148 if (fixp->fx_pcrel)
30149 {
30150 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
30151 break;
30152 }
30153 /* Fall through. */
30154
30155 case BFD_RELOC_ARM_THUMB_MOVT:
30156 if (fixp->fx_pcrel)
30157 {
30158 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
30159 break;
30160 }
30161 /* Fall through. */
30162
30163 case BFD_RELOC_NONE:
30164 case BFD_RELOC_ARM_PCREL_BRANCH:
30165 case BFD_RELOC_ARM_PCREL_BLX:
30166 case BFD_RELOC_RVA:
30167 case BFD_RELOC_THUMB_PCREL_BRANCH7:
30168 case BFD_RELOC_THUMB_PCREL_BRANCH9:
30169 case BFD_RELOC_THUMB_PCREL_BRANCH12:
30170 case BFD_RELOC_THUMB_PCREL_BRANCH20:
30171 case BFD_RELOC_THUMB_PCREL_BRANCH23:
30172 case BFD_RELOC_THUMB_PCREL_BRANCH25:
30173 case BFD_RELOC_VTABLE_ENTRY:
30174 case BFD_RELOC_VTABLE_INHERIT:
30175 #ifdef TE_PE
30176 case BFD_RELOC_32_SECREL:
30177 #endif
30178 code = fixp->fx_r_type;
30179 break;
30180
30181 case BFD_RELOC_THUMB_PCREL_BLX:
30182 #ifdef OBJ_ELF
30183 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
30184 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
30185 else
30186 #endif
30187 code = BFD_RELOC_THUMB_PCREL_BLX;
30188 break;
30189
30190 case BFD_RELOC_ARM_LITERAL:
30191 case BFD_RELOC_ARM_HWLITERAL:
30192 /* If this is called then the a literal has
30193 been referenced across a section boundary. */
30194 as_bad_where (fixp->fx_file, fixp->fx_line,
30195 _("literal referenced across section boundary"));
30196 return NULL;
30197
30198 #ifdef OBJ_ELF
30199 case BFD_RELOC_ARM_TLS_CALL:
30200 case BFD_RELOC_ARM_THM_TLS_CALL:
30201 case BFD_RELOC_ARM_TLS_DESCSEQ:
30202 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
30203 case BFD_RELOC_ARM_GOT32:
30204 case BFD_RELOC_ARM_GOTOFF:
30205 case BFD_RELOC_ARM_GOT_PREL:
30206 case BFD_RELOC_ARM_PLT32:
30207 case BFD_RELOC_ARM_TARGET1:
30208 case BFD_RELOC_ARM_ROSEGREL32:
30209 case BFD_RELOC_ARM_SBREL32:
30210 case BFD_RELOC_ARM_PREL31:
30211 case BFD_RELOC_ARM_TARGET2:
30212 case BFD_RELOC_ARM_TLS_LDO32:
30213 case BFD_RELOC_ARM_PCREL_CALL:
30214 case BFD_RELOC_ARM_PCREL_JUMP:
30215 case BFD_RELOC_ARM_ALU_PC_G0_NC:
30216 case BFD_RELOC_ARM_ALU_PC_G0:
30217 case BFD_RELOC_ARM_ALU_PC_G1_NC:
30218 case BFD_RELOC_ARM_ALU_PC_G1:
30219 case BFD_RELOC_ARM_ALU_PC_G2:
30220 case BFD_RELOC_ARM_LDR_PC_G0:
30221 case BFD_RELOC_ARM_LDR_PC_G1:
30222 case BFD_RELOC_ARM_LDR_PC_G2:
30223 case BFD_RELOC_ARM_LDRS_PC_G0:
30224 case BFD_RELOC_ARM_LDRS_PC_G1:
30225 case BFD_RELOC_ARM_LDRS_PC_G2:
30226 case BFD_RELOC_ARM_LDC_PC_G0:
30227 case BFD_RELOC_ARM_LDC_PC_G1:
30228 case BFD_RELOC_ARM_LDC_PC_G2:
30229 case BFD_RELOC_ARM_ALU_SB_G0_NC:
30230 case BFD_RELOC_ARM_ALU_SB_G0:
30231 case BFD_RELOC_ARM_ALU_SB_G1_NC:
30232 case BFD_RELOC_ARM_ALU_SB_G1:
30233 case BFD_RELOC_ARM_ALU_SB_G2:
30234 case BFD_RELOC_ARM_LDR_SB_G0:
30235 case BFD_RELOC_ARM_LDR_SB_G1:
30236 case BFD_RELOC_ARM_LDR_SB_G2:
30237 case BFD_RELOC_ARM_LDRS_SB_G0:
30238 case BFD_RELOC_ARM_LDRS_SB_G1:
30239 case BFD_RELOC_ARM_LDRS_SB_G2:
30240 case BFD_RELOC_ARM_LDC_SB_G0:
30241 case BFD_RELOC_ARM_LDC_SB_G1:
30242 case BFD_RELOC_ARM_LDC_SB_G2:
30243 case BFD_RELOC_ARM_V4BX:
30244 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
30245 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
30246 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
30247 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
30248 case BFD_RELOC_ARM_GOTFUNCDESC:
30249 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
30250 case BFD_RELOC_ARM_FUNCDESC:
30251 case BFD_RELOC_ARM_THUMB_BF17:
30252 case BFD_RELOC_ARM_THUMB_BF19:
30253 case BFD_RELOC_ARM_THUMB_BF13:
30254 code = fixp->fx_r_type;
30255 break;
30256
30257 case BFD_RELOC_ARM_TLS_GOTDESC:
30258 case BFD_RELOC_ARM_TLS_GD32:
30259 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
30260 case BFD_RELOC_ARM_TLS_LE32:
30261 case BFD_RELOC_ARM_TLS_IE32:
30262 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
30263 case BFD_RELOC_ARM_TLS_LDM32:
30264 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
30265 /* BFD will include the symbol's address in the addend.
30266 But we don't want that, so subtract it out again here. */
30267 if (!S_IS_COMMON (fixp->fx_addsy))
30268 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
30269 code = fixp->fx_r_type;
30270 break;
30271 #endif
30272
30273 case BFD_RELOC_ARM_IMMEDIATE:
30274 as_bad_where (fixp->fx_file, fixp->fx_line,
30275 _("internal relocation (type: IMMEDIATE) not fixed up"));
30276 return NULL;
30277
30278 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
30279 as_bad_where (fixp->fx_file, fixp->fx_line,
30280 _("ADRL used for a symbol not defined in the same file"));
30281 return NULL;
30282
30283 case BFD_RELOC_THUMB_PCREL_BRANCH5:
30284 case BFD_RELOC_THUMB_PCREL_BFCSEL:
30285 case BFD_RELOC_ARM_THUMB_LOOP12:
30286 as_bad_where (fixp->fx_file, fixp->fx_line,
30287 _("%s used for a symbol not defined in the same file"),
30288 bfd_get_reloc_code_name (fixp->fx_r_type));
30289 return NULL;
30290
30291 case BFD_RELOC_ARM_OFFSET_IMM:
30292 if (section->use_rela_p)
30293 {
30294 code = fixp->fx_r_type;
30295 break;
30296 }
30297
30298 if (fixp->fx_addsy != NULL
30299 && !S_IS_DEFINED (fixp->fx_addsy)
30300 && S_IS_LOCAL (fixp->fx_addsy))
30301 {
30302 as_bad_where (fixp->fx_file, fixp->fx_line,
30303 _("undefined local label `%s'"),
30304 S_GET_NAME (fixp->fx_addsy));
30305 return NULL;
30306 }
30307
30308 as_bad_where (fixp->fx_file, fixp->fx_line,
30309 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
30310 return NULL;
30311
30312 default:
30313 {
30314 const char * type;
30315
30316 switch (fixp->fx_r_type)
30317 {
30318 case BFD_RELOC_NONE: type = "NONE"; break;
30319 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
30320 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
30321 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
30322 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
30323 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
30324 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
30325 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
30326 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
30327 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
30328 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
30329 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
30330 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
30331 default: type = _("<unknown>"); break;
30332 }
30333 as_bad_where (fixp->fx_file, fixp->fx_line,
30334 _("cannot represent %s relocation in this object file format"),
30335 type);
30336 return NULL;
30337 }
30338 }
30339
30340 #ifdef OBJ_ELF
30341 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
30342 && GOT_symbol
30343 && fixp->fx_addsy == GOT_symbol)
30344 {
30345 code = BFD_RELOC_ARM_GOTPC;
30346 reloc->addend = fixp->fx_offset = reloc->address;
30347 }
30348 #endif
30349
30350 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
30351
30352 if (reloc->howto == NULL)
30353 {
30354 as_bad_where (fixp->fx_file, fixp->fx_line,
30355 _("cannot represent %s relocation in this object file format"),
30356 bfd_get_reloc_code_name (code));
30357 return NULL;
30358 }
30359
30360 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
30361 vtable entry to be used in the relocation's section offset. */
30362 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
30363 reloc->address = fixp->fx_offset;
30364
30365 return reloc;
30366 }
30367
30368 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
30369
30370 void
30371 cons_fix_new_arm (fragS * frag,
30372 int where,
30373 int size,
30374 expressionS * exp,
30375 bfd_reloc_code_real_type reloc)
30376 {
30377 int pcrel = 0;
30378
30379 /* Pick a reloc.
30380 FIXME: @@ Should look at CPU word size. */
30381 switch (size)
30382 {
30383 case 1:
30384 reloc = BFD_RELOC_8;
30385 break;
30386 case 2:
30387 reloc = BFD_RELOC_16;
30388 break;
30389 case 4:
30390 default:
30391 reloc = BFD_RELOC_32;
30392 break;
30393 case 8:
30394 reloc = BFD_RELOC_64;
30395 break;
30396 }
30397
30398 #ifdef TE_PE
30399 if (exp->X_op == O_secrel)
30400 {
30401 exp->X_op = O_symbol;
30402 reloc = BFD_RELOC_32_SECREL;
30403 }
30404 #endif
30405
30406 fix_new_exp (frag, where, size, exp, pcrel, reloc);
30407 }
30408
30409 #if defined (OBJ_COFF)
30410 void
30411 arm_validate_fix (fixS * fixP)
30412 {
30413 /* If the destination of the branch is a defined symbol which does not have
30414 the THUMB_FUNC attribute, then we must be calling a function which has
30415 the (interfacearm) attribute. We look for the Thumb entry point to that
30416 function and change the branch to refer to that function instead. */
30417 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
30418 && fixP->fx_addsy != NULL
30419 && S_IS_DEFINED (fixP->fx_addsy)
30420 && ! THUMB_IS_FUNC (fixP->fx_addsy))
30421 {
30422 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
30423 }
30424 }
30425 #endif
30426
30427
30428 int
30429 arm_force_relocation (struct fix * fixp)
30430 {
30431 #if defined (OBJ_COFF) && defined (TE_PE)
30432 if (fixp->fx_r_type == BFD_RELOC_RVA)
30433 return 1;
30434 #endif
30435
30436 /* In case we have a call or a branch to a function in ARM ISA mode from
30437 a thumb function or vice-versa force the relocation. These relocations
30438 are cleared off for some cores that might have blx and simple transformations
30439 are possible. */
30440
30441 #ifdef OBJ_ELF
30442 switch (fixp->fx_r_type)
30443 {
30444 case BFD_RELOC_ARM_PCREL_JUMP:
30445 case BFD_RELOC_ARM_PCREL_CALL:
30446 case BFD_RELOC_THUMB_PCREL_BLX:
30447 if (THUMB_IS_FUNC (fixp->fx_addsy))
30448 return 1;
30449 break;
30450
30451 case BFD_RELOC_ARM_PCREL_BLX:
30452 case BFD_RELOC_THUMB_PCREL_BRANCH25:
30453 case BFD_RELOC_THUMB_PCREL_BRANCH20:
30454 case BFD_RELOC_THUMB_PCREL_BRANCH23:
30455 if (ARM_IS_FUNC (fixp->fx_addsy))
30456 return 1;
30457 break;
30458
30459 default:
30460 break;
30461 }
30462 #endif
30463
30464 /* Resolve these relocations even if the symbol is extern or weak.
30465 Technically this is probably wrong due to symbol preemption.
30466 In practice these relocations do not have enough range to be useful
30467 at dynamic link time, and some code (e.g. in the Linux kernel)
30468 expects these references to be resolved. */
30469 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
30470 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
30471 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
30472 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
30473 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
30474 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
30475 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
30476 || fixp->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH12
30477 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
30478 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
30479 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
30480 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
30481 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
30482 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
30483 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
30484 return 0;
30485
30486 /* Always leave these relocations for the linker. */
30487 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
30488 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
30489 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
30490 return 1;
30491
30492 /* Always generate relocations against function symbols. */
30493 if (fixp->fx_r_type == BFD_RELOC_32
30494 && fixp->fx_addsy
30495 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
30496 return 1;
30497
30498 return generic_force_reloc (fixp);
30499 }
30500
30501 #if defined (OBJ_ELF) || defined (OBJ_COFF)
30502 /* Relocations against function names must be left unadjusted,
30503 so that the linker can use this information to generate interworking
30504 stubs. The MIPS version of this function
30505 also prevents relocations that are mips-16 specific, but I do not
30506 know why it does this.
30507
30508 FIXME:
30509 There is one other problem that ought to be addressed here, but
30510 which currently is not: Taking the address of a label (rather
30511 than a function) and then later jumping to that address. Such
30512 addresses also ought to have their bottom bit set (assuming that
30513 they reside in Thumb code), but at the moment they will not. */
30514
30515 bool
30516 arm_fix_adjustable (fixS * fixP)
30517 {
30518 if (fixP->fx_addsy == NULL)
30519 return 1;
30520
30521 /* Preserve relocations against symbols with function type. */
30522 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
30523 return false;
30524
30525 if (THUMB_IS_FUNC (fixP->fx_addsy)
30526 && fixP->fx_subsy == NULL)
30527 return false;
30528
30529 /* We need the symbol name for the VTABLE entries. */
30530 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
30531 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
30532 return false;
30533
30534 /* Don't allow symbols to be discarded on GOT related relocs. */
30535 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
30536 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
30537 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
30538 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
30539 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32_FDPIC
30540 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
30541 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
30542 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32_FDPIC
30543 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
30544 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32_FDPIC
30545 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
30546 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
30547 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
30548 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
30549 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
30550 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
30551 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
30552 return false;
30553
30554 /* Similarly for group relocations. */
30555 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
30556 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
30557 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
30558 return false;
30559
30560 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
30561 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
30562 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
30563 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
30564 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
30565 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
30566 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
30567 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
30568 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
30569 return false;
30570
30571 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
30572 offsets, so keep these symbols. */
30573 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
30574 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
30575 return false;
30576
30577 return true;
30578 }
30579 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
30580
30581 #ifdef OBJ_ELF
30582 const char *
30583 elf32_arm_target_format (void)
30584 {
30585 #if defined (TE_VXWORKS)
30586 return (target_big_endian
30587 ? "elf32-bigarm-vxworks"
30588 : "elf32-littlearm-vxworks");
30589 #elif defined (TE_NACL)
30590 return (target_big_endian
30591 ? "elf32-bigarm-nacl"
30592 : "elf32-littlearm-nacl");
30593 #else
30594 if (arm_fdpic)
30595 {
30596 if (target_big_endian)
30597 return "elf32-bigarm-fdpic";
30598 else
30599 return "elf32-littlearm-fdpic";
30600 }
30601 else
30602 {
30603 if (target_big_endian)
30604 return "elf32-bigarm";
30605 else
30606 return "elf32-littlearm";
30607 }
30608 #endif
30609 }
30610
30611 void
30612 armelf_frob_symbol (symbolS * symp,
30613 int * puntp)
30614 {
30615 elf_frob_symbol (symp, puntp);
30616 }
30617 #endif
30618
30619 /* MD interface: Finalization. */
30620
30621 void
30622 arm_cleanup (void)
30623 {
30624 literal_pool * pool;
30625
30626 /* Ensure that all the predication blocks are properly closed. */
30627 check_pred_blocks_finished ();
30628
30629 for (pool = list_of_pools; pool; pool = pool->next)
30630 {
30631 /* Put it at the end of the relevant section. */
30632 subseg_set (pool->section, pool->sub_section);
30633 #ifdef OBJ_ELF
30634 arm_elf_change_section ();
30635 #endif
30636 s_ltorg (0);
30637 }
30638 }
30639
30640 #ifdef OBJ_ELF
30641 /* Remove any excess mapping symbols generated for alignment frags in
30642 SEC. We may have created a mapping symbol before a zero byte
30643 alignment; remove it if there's a mapping symbol after the
30644 alignment. */
30645 static void
30646 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
30647 void *dummy ATTRIBUTE_UNUSED)
30648 {
30649 segment_info_type *seginfo = seg_info (sec);
30650 fragS *fragp;
30651
30652 if (seginfo == NULL || seginfo->frchainP == NULL)
30653 return;
30654
30655 for (fragp = seginfo->frchainP->frch_root;
30656 fragp != NULL;
30657 fragp = fragp->fr_next)
30658 {
30659 symbolS *sym = fragp->tc_frag_data.last_map;
30660 fragS *next = fragp->fr_next;
30661
30662 /* Variable-sized frags have been converted to fixed size by
30663 this point. But if this was variable-sized to start with,
30664 there will be a fixed-size frag after it. So don't handle
30665 next == NULL. */
30666 if (sym == NULL || next == NULL)
30667 continue;
30668
30669 if (S_GET_VALUE (sym) < next->fr_address)
30670 /* Not at the end of this frag. */
30671 continue;
30672 know (S_GET_VALUE (sym) == next->fr_address);
30673
30674 do
30675 {
30676 if (next->tc_frag_data.first_map != NULL)
30677 {
30678 /* Next frag starts with a mapping symbol. Discard this
30679 one. */
30680 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
30681 break;
30682 }
30683
30684 if (next->fr_next == NULL)
30685 {
30686 /* This mapping symbol is at the end of the section. Discard
30687 it. */
30688 know (next->fr_fix == 0 && next->fr_var == 0);
30689 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
30690 break;
30691 }
30692
30693 /* As long as we have empty frags without any mapping symbols,
30694 keep looking. */
30695 /* If the next frag is non-empty and does not start with a
30696 mapping symbol, then this mapping symbol is required. */
30697 if (next->fr_address != next->fr_next->fr_address)
30698 break;
30699
30700 next = next->fr_next;
30701 }
30702 while (next != NULL);
30703 }
30704 }
30705 #endif
30706
30707 /* Adjust the symbol table. This marks Thumb symbols as distinct from
30708 ARM ones. */
30709
30710 void
30711 arm_adjust_symtab (void)
30712 {
30713 #ifdef OBJ_COFF
30714 symbolS * sym;
30715
30716 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
30717 {
30718 if (ARM_IS_THUMB (sym))
30719 {
30720 if (THUMB_IS_FUNC (sym))
30721 {
30722 /* Mark the symbol as a Thumb function. */
30723 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
30724 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
30725 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
30726
30727 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
30728 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
30729 else
30730 as_bad (_("%s: unexpected function type: %d"),
30731 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
30732 }
30733 else switch (S_GET_STORAGE_CLASS (sym))
30734 {
30735 case C_EXT:
30736 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
30737 break;
30738 case C_STAT:
30739 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
30740 break;
30741 case C_LABEL:
30742 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
30743 break;
30744 default:
30745 /* Do nothing. */
30746 break;
30747 }
30748 }
30749
30750 if (ARM_IS_INTERWORK (sym))
30751 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
30752 }
30753 #endif
30754 #ifdef OBJ_ELF
30755 symbolS * sym;
30756 char bind;
30757
30758 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
30759 {
30760 if (ARM_IS_THUMB (sym))
30761 {
30762 elf_symbol_type * elf_sym;
30763
30764 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
30765 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
30766
30767 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
30768 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
30769 {
30770 /* If it's a .thumb_func, declare it as so,
30771 otherwise tag label as .code 16. */
30772 if (THUMB_IS_FUNC (sym))
30773 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
30774 ST_BRANCH_TO_THUMB);
30775 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
30776 elf_sym->internal_elf_sym.st_info =
30777 ELF_ST_INFO (bind, STT_ARM_16BIT);
30778 }
30779 }
30780 }
30781
30782 /* Remove any overlapping mapping symbols generated by alignment frags. */
30783 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
30784 /* Now do generic ELF adjustments. */
30785 elf_adjust_symtab ();
30786 #endif
30787 }
30788
30789 /* MD interface: Initialization. */
30790
30791 static void
30792 set_constant_flonums (void)
30793 {
30794 int i;
30795
30796 for (i = 0; i < NUM_FLOAT_VALS; i++)
30797 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
30798 abort ();
30799 }
30800
30801 /* Auto-select Thumb mode if it's the only available instruction set for the
30802 given architecture. */
30803
30804 static void
30805 autoselect_thumb_from_cpu_variant (void)
30806 {
30807 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
30808 opcode_select (16);
30809 }
30810
30811 void
30812 md_begin (void)
30813 {
30814 unsigned mach;
30815 unsigned int i;
30816
30817 arm_ops_hsh = str_htab_create ();
30818 arm_cond_hsh = str_htab_create ();
30819 arm_vcond_hsh = str_htab_create ();
30820 arm_shift_hsh = str_htab_create ();
30821 arm_psr_hsh = str_htab_create ();
30822 arm_v7m_psr_hsh = str_htab_create ();
30823 arm_reg_hsh = str_htab_create ();
30824 arm_reloc_hsh = str_htab_create ();
30825 arm_barrier_opt_hsh = str_htab_create ();
30826
30827 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
30828 if (str_hash_find (arm_ops_hsh, insns[i].template_name) == NULL)
30829 str_hash_insert (arm_ops_hsh, insns[i].template_name, insns + i, 0);
30830 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
30831 str_hash_insert (arm_cond_hsh, conds[i].template_name, conds + i, 0);
30832 for (i = 0; i < sizeof (vconds) / sizeof (struct asm_cond); i++)
30833 str_hash_insert (arm_vcond_hsh, vconds[i].template_name, vconds + i, 0);
30834 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
30835 str_hash_insert (arm_shift_hsh, shift_names[i].name, shift_names + i, 0);
30836 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
30837 str_hash_insert (arm_psr_hsh, psrs[i].template_name, psrs + i, 0);
30838 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
30839 str_hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
30840 v7m_psrs + i, 0);
30841 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
30842 str_hash_insert (arm_reg_hsh, reg_names[i].name, reg_names + i, 0);
30843 for (i = 0;
30844 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
30845 i++)
30846 str_hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
30847 barrier_opt_names + i, 0);
30848 #ifdef OBJ_ELF
30849 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
30850 {
30851 struct reloc_entry * entry = reloc_names + i;
30852
30853 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
30854 /* This makes encode_branch() use the EABI versions of this relocation. */
30855 entry->reloc = BFD_RELOC_UNUSED;
30856
30857 str_hash_insert (arm_reloc_hsh, entry->name, entry, 0);
30858 }
30859 #endif
30860
30861 set_constant_flonums ();
30862
30863 /* Set the cpu variant based on the command-line options. We prefer
30864 -mcpu= over -march= if both are set (as for GCC); and we prefer
30865 -mfpu= over any other way of setting the floating point unit.
30866 Use of legacy options with new options are faulted. */
30867 if (legacy_cpu)
30868 {
30869 if (mcpu_cpu_opt || march_cpu_opt)
30870 as_bad (_("use of old and new-style options to set CPU type"));
30871
30872 selected_arch = *legacy_cpu;
30873 }
30874 else if (mcpu_cpu_opt)
30875 {
30876 selected_arch = *mcpu_cpu_opt;
30877 selected_ext = *mcpu_ext_opt;
30878 }
30879 else if (march_cpu_opt)
30880 {
30881 selected_arch = *march_cpu_opt;
30882 selected_ext = *march_ext_opt;
30883 }
30884 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
30885
30886 if (legacy_fpu)
30887 {
30888 if (mfpu_opt)
30889 as_bad (_("use of old and new-style options to set FPU type"));
30890
30891 selected_fpu = *legacy_fpu;
30892 }
30893 else if (mfpu_opt)
30894 selected_fpu = *mfpu_opt;
30895 else
30896 {
30897 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
30898 || defined (TE_NetBSD) || defined (TE_VXWORKS))
30899 /* Some environments specify a default FPU. If they don't, infer it
30900 from the processor. */
30901 if (mcpu_fpu_opt)
30902 selected_fpu = *mcpu_fpu_opt;
30903 else if (march_fpu_opt)
30904 selected_fpu = *march_fpu_opt;
30905 #else
30906 selected_fpu = fpu_default;
30907 #endif
30908 }
30909
30910 if (ARM_FEATURE_ZERO (selected_fpu))
30911 {
30912 if (!no_cpu_selected ())
30913 selected_fpu = fpu_default;
30914 else
30915 selected_fpu = fpu_arch_fpa;
30916 }
30917
30918 #ifdef CPU_DEFAULT
30919 if (ARM_FEATURE_ZERO (selected_arch))
30920 {
30921 selected_arch = cpu_default;
30922 selected_cpu = selected_arch;
30923 }
30924 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
30925 #else
30926 /* Autodection of feature mode: allow all features in cpu_variant but leave
30927 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
30928 after all instruction have been processed and we can decide what CPU
30929 should be selected. */
30930 if (ARM_FEATURE_ZERO (selected_arch))
30931 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
30932 else
30933 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
30934 #endif
30935
30936 autoselect_thumb_from_cpu_variant ();
30937
30938 arm_arch_used = thumb_arch_used = arm_arch_none;
30939
30940 #if defined OBJ_COFF || defined OBJ_ELF
30941 {
30942 unsigned int flags = 0;
30943
30944 #if defined OBJ_ELF
30945 flags = meabi_flags;
30946
30947 switch (meabi_flags)
30948 {
30949 case EF_ARM_EABI_UNKNOWN:
30950 #endif
30951 /* Set the flags in the private structure. */
30952 if (uses_apcs_26) flags |= F_APCS26;
30953 if (support_interwork) flags |= F_INTERWORK;
30954 if (uses_apcs_float) flags |= F_APCS_FLOAT;
30955 if (pic_code) flags |= F_PIC;
30956 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
30957 flags |= F_SOFT_FLOAT;
30958
30959 switch (mfloat_abi_opt)
30960 {
30961 case ARM_FLOAT_ABI_SOFT:
30962 case ARM_FLOAT_ABI_SOFTFP:
30963 flags |= F_SOFT_FLOAT;
30964 break;
30965
30966 case ARM_FLOAT_ABI_HARD:
30967 if (flags & F_SOFT_FLOAT)
30968 as_bad (_("hard-float conflicts with specified fpu"));
30969 break;
30970 }
30971
30972 /* Using pure-endian doubles (even if soft-float). */
30973 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
30974 flags |= F_VFP_FLOAT;
30975
30976 #if defined OBJ_ELF
30977 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
30978 flags |= EF_ARM_MAVERICK_FLOAT;
30979 break;
30980
30981 case EF_ARM_EABI_VER4:
30982 case EF_ARM_EABI_VER5:
30983 /* No additional flags to set. */
30984 break;
30985
30986 default:
30987 abort ();
30988 }
30989 #endif
30990 bfd_set_private_flags (stdoutput, flags);
30991
30992 /* We have run out flags in the COFF header to encode the
30993 status of ATPCS support, so instead we create a dummy,
30994 empty, debug section called .arm.atpcs. */
30995 if (atpcs)
30996 {
30997 asection * sec;
30998
30999 sec = bfd_make_section (stdoutput, ".arm.atpcs");
31000
31001 if (sec != NULL)
31002 {
31003 bfd_set_section_flags (sec, SEC_READONLY | SEC_DEBUGGING);
31004 bfd_set_section_size (sec, 0);
31005 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
31006 }
31007 }
31008 }
31009 #endif
31010
31011 /* Record the CPU type as well. */
31012 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
31013 mach = bfd_mach_arm_iWMMXt2;
31014 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
31015 mach = bfd_mach_arm_iWMMXt;
31016 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
31017 mach = bfd_mach_arm_XScale;
31018 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
31019 mach = bfd_mach_arm_ep9312;
31020 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
31021 mach = bfd_mach_arm_5TE;
31022 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
31023 {
31024 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
31025 mach = bfd_mach_arm_5T;
31026 else
31027 mach = bfd_mach_arm_5;
31028 }
31029 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
31030 {
31031 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
31032 mach = bfd_mach_arm_4T;
31033 else
31034 mach = bfd_mach_arm_4;
31035 }
31036 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
31037 mach = bfd_mach_arm_3M;
31038 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
31039 mach = bfd_mach_arm_3;
31040 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
31041 mach = bfd_mach_arm_2a;
31042 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
31043 mach = bfd_mach_arm_2;
31044 else
31045 mach = bfd_mach_arm_unknown;
31046
31047 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
31048 }
31049
31050 /* Command line processing. */
31051
31052 /* md_parse_option
31053 Invocation line includes a switch not recognized by the base assembler.
31054 See if it's a processor-specific option.
31055
31056 This routine is somewhat complicated by the need for backwards
31057 compatibility (since older releases of gcc can't be changed).
31058 The new options try to make the interface as compatible as
31059 possible with GCC.
31060
31061 New options (supported) are:
31062
31063 -mcpu=<cpu name> Assemble for selected processor
31064 -march=<architecture name> Assemble for selected architecture
31065 -mfpu=<fpu architecture> Assemble for selected FPU.
31066 -EB/-mbig-endian Big-endian
31067 -EL/-mlittle-endian Little-endian
31068 -k Generate PIC code
31069 -mthumb Start in Thumb mode
31070 -mthumb-interwork Code supports ARM/Thumb interworking
31071
31072 -m[no-]warn-deprecated Warn about deprecated features
31073 -m[no-]warn-syms Warn when symbols match instructions
31074
31075 For now we will also provide support for:
31076
31077 -mapcs-32 32-bit Program counter
31078 -mapcs-26 26-bit Program counter
31079 -macps-float Floats passed in FP registers
31080 -mapcs-reentrant Reentrant code
31081 -matpcs
31082 (sometime these will probably be replaced with -mapcs=<list of options>
31083 and -matpcs=<list of options>)
31084
31085 The remaining options are only supported for back-wards compatibility.
31086 Cpu variants, the arm part is optional:
31087 -m[arm]1 Currently not supported.
31088 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
31089 -m[arm]3 Arm 3 processor
31090 -m[arm]6[xx], Arm 6 processors
31091 -m[arm]7[xx][t][[d]m] Arm 7 processors
31092 -m[arm]8[10] Arm 8 processors
31093 -m[arm]9[20][tdmi] Arm 9 processors
31094 -mstrongarm[110[0]] StrongARM processors
31095 -mxscale XScale processors
31096 -m[arm]v[2345[t[e]]] Arm architectures
31097 -mall All (except the ARM1)
31098 FP variants:
31099 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
31100 -mfpe-old (No float load/store multiples)
31101 -mvfpxd VFP Single precision
31102 -mvfp All VFP
31103 -mno-fpu Disable all floating point instructions
31104
31105 The following CPU names are recognized:
31106 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
31107 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
31108 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
31109 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
31110 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
31111 arm10t arm10e, arm1020t, arm1020e, arm10200e,
31112 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
31113
31114 */
31115
31116 const char * md_shortopts = "m:k";
31117
31118 #ifdef ARM_BI_ENDIAN
31119 #define OPTION_EB (OPTION_MD_BASE + 0)
31120 #define OPTION_EL (OPTION_MD_BASE + 1)
31121 #else
31122 #if TARGET_BYTES_BIG_ENDIAN
31123 #define OPTION_EB (OPTION_MD_BASE + 0)
31124 #else
31125 #define OPTION_EL (OPTION_MD_BASE + 1)
31126 #endif
31127 #endif
31128 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
31129 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
31130
31131 struct option md_longopts[] =
31132 {
31133 #ifdef OPTION_EB
31134 {"EB", no_argument, NULL, OPTION_EB},
31135 #endif
31136 #ifdef OPTION_EL
31137 {"EL", no_argument, NULL, OPTION_EL},
31138 #endif
31139 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
31140 #ifdef OBJ_ELF
31141 {"fdpic", no_argument, NULL, OPTION_FDPIC},
31142 #endif
31143 {NULL, no_argument, NULL, 0}
31144 };
31145
31146 size_t md_longopts_size = sizeof (md_longopts);
31147
31148 struct arm_option_table
31149 {
31150 const char * option; /* Option name to match. */
31151 const char * help; /* Help information. */
31152 int * var; /* Variable to change. */
31153 int value; /* What to change it to. */
31154 const char * deprecated; /* If non-null, print this message. */
31155 };
31156
31157 struct arm_option_table arm_opts[] =
31158 {
31159 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
31160 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
31161 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
31162 &support_interwork, 1, NULL},
31163 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
31164 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
31165 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
31166 1, NULL},
31167 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
31168 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
31169 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
31170 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
31171 NULL},
31172
31173 /* These are recognized by the assembler, but have no affect on code. */
31174 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
31175 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
31176
31177 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
31178 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
31179 &warn_on_deprecated, 0, NULL},
31180
31181 {"mwarn-restrict-it", N_("warn about performance deprecated IT instructions"
31182 " in ARMv8-A and ARMv8-R"), &warn_on_restrict_it, 1, NULL},
31183 {"mno-warn-restrict-it", NULL, &warn_on_restrict_it, 0, NULL},
31184
31185 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), true, NULL},
31186 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), false, NULL},
31187 {NULL, NULL, NULL, 0, NULL}
31188 };
31189
31190 struct arm_legacy_option_table
31191 {
31192 const char * option; /* Option name to match. */
31193 const arm_feature_set ** var; /* Variable to change. */
31194 const arm_feature_set value; /* What to change it to. */
31195 const char * deprecated; /* If non-null, print this message. */
31196 };
31197
31198 const struct arm_legacy_option_table arm_legacy_opts[] =
31199 {
31200 /* DON'T add any new processors to this list -- we want the whole list
31201 to go away... Add them to the processors table instead. */
31202 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
31203 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
31204 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
31205 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
31206 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
31207 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
31208 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
31209 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
31210 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
31211 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
31212 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
31213 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
31214 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
31215 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
31216 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
31217 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
31218 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
31219 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
31220 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
31221 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
31222 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
31223 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
31224 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
31225 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
31226 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
31227 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
31228 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
31229 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
31230 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
31231 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
31232 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
31233 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
31234 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
31235 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
31236 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
31237 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
31238 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
31239 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
31240 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
31241 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
31242 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
31243 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
31244 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
31245 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
31246 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
31247 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
31248 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
31249 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
31250 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
31251 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
31252 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
31253 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
31254 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
31255 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
31256 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
31257 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
31258 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
31259 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
31260 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
31261 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
31262 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
31263 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
31264 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
31265 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
31266 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
31267 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
31268 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
31269 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
31270 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
31271 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
31272 N_("use -mcpu=strongarm110")},
31273 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
31274 N_("use -mcpu=strongarm1100")},
31275 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
31276 N_("use -mcpu=strongarm1110")},
31277 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
31278 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
31279 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
31280
31281 /* Architecture variants -- don't add any more to this list either. */
31282 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
31283 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
31284 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
31285 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
31286 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
31287 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
31288 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
31289 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
31290 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
31291 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
31292 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
31293 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
31294 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
31295 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
31296 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
31297 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
31298 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
31299 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
31300
31301 /* Floating point variants -- don't add any more to this list either. */
31302 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
31303 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
31304 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
31305 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
31306 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
31307
31308 {NULL, NULL, ARM_ARCH_NONE, NULL}
31309 };
31310
31311 struct arm_cpu_option_table
31312 {
31313 const char * name;
31314 size_t name_len;
31315 const arm_feature_set value;
31316 const arm_feature_set ext;
31317 /* For some CPUs we assume an FPU unless the user explicitly sets
31318 -mfpu=... */
31319 const arm_feature_set default_fpu;
31320 /* The canonical name of the CPU, or NULL to use NAME converted to upper
31321 case. */
31322 const char * canonical_name;
31323 };
31324
31325 /* This list should, at a minimum, contain all the cpu names
31326 recognized by GCC. */
31327 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
31328
31329 static const struct arm_cpu_option_table arm_cpus[] =
31330 {
31331 ARM_CPU_OPT ("all", NULL, ARM_ANY,
31332 ARM_ARCH_NONE,
31333 FPU_ARCH_FPA),
31334 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
31335 ARM_ARCH_NONE,
31336 FPU_ARCH_FPA),
31337 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
31338 ARM_ARCH_NONE,
31339 FPU_ARCH_FPA),
31340 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
31341 ARM_ARCH_NONE,
31342 FPU_ARCH_FPA),
31343 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
31344 ARM_ARCH_NONE,
31345 FPU_ARCH_FPA),
31346 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
31347 ARM_ARCH_NONE,
31348 FPU_ARCH_FPA),
31349 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
31350 ARM_ARCH_NONE,
31351 FPU_ARCH_FPA),
31352 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
31353 ARM_ARCH_NONE,
31354 FPU_ARCH_FPA),
31355 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
31356 ARM_ARCH_NONE,
31357 FPU_ARCH_FPA),
31358 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
31359 ARM_ARCH_NONE,
31360 FPU_ARCH_FPA),
31361 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
31362 ARM_ARCH_NONE,
31363 FPU_ARCH_FPA),
31364 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
31365 ARM_ARCH_NONE,
31366 FPU_ARCH_FPA),
31367 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
31368 ARM_ARCH_NONE,
31369 FPU_ARCH_FPA),
31370 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
31371 ARM_ARCH_NONE,
31372 FPU_ARCH_FPA),
31373 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
31374 ARM_ARCH_NONE,
31375 FPU_ARCH_FPA),
31376 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
31377 ARM_ARCH_NONE,
31378 FPU_ARCH_FPA),
31379 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
31380 ARM_ARCH_NONE,
31381 FPU_ARCH_FPA),
31382 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
31383 ARM_ARCH_NONE,
31384 FPU_ARCH_FPA),
31385 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
31386 ARM_ARCH_NONE,
31387 FPU_ARCH_FPA),
31388 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
31389 ARM_ARCH_NONE,
31390 FPU_ARCH_FPA),
31391 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
31392 ARM_ARCH_NONE,
31393 FPU_ARCH_FPA),
31394 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
31395 ARM_ARCH_NONE,
31396 FPU_ARCH_FPA),
31397 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
31398 ARM_ARCH_NONE,
31399 FPU_ARCH_FPA),
31400 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
31401 ARM_ARCH_NONE,
31402 FPU_ARCH_FPA),
31403 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
31404 ARM_ARCH_NONE,
31405 FPU_ARCH_FPA),
31406 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
31407 ARM_ARCH_NONE,
31408 FPU_ARCH_FPA),
31409 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
31410 ARM_ARCH_NONE,
31411 FPU_ARCH_FPA),
31412 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
31413 ARM_ARCH_NONE,
31414 FPU_ARCH_FPA),
31415 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
31416 ARM_ARCH_NONE,
31417 FPU_ARCH_FPA),
31418 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
31419 ARM_ARCH_NONE,
31420 FPU_ARCH_FPA),
31421 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
31422 ARM_ARCH_NONE,
31423 FPU_ARCH_FPA),
31424 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
31425 ARM_ARCH_NONE,
31426 FPU_ARCH_FPA),
31427 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
31428 ARM_ARCH_NONE,
31429 FPU_ARCH_FPA),
31430 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
31431 ARM_ARCH_NONE,
31432 FPU_ARCH_FPA),
31433 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
31434 ARM_ARCH_NONE,
31435 FPU_ARCH_FPA),
31436 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
31437 ARM_ARCH_NONE,
31438 FPU_ARCH_FPA),
31439 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
31440 ARM_ARCH_NONE,
31441 FPU_ARCH_FPA),
31442 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
31443 ARM_ARCH_NONE,
31444 FPU_ARCH_FPA),
31445 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
31446 ARM_ARCH_NONE,
31447 FPU_ARCH_FPA),
31448 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
31449 ARM_ARCH_NONE,
31450 FPU_ARCH_FPA),
31451 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
31452 ARM_ARCH_NONE,
31453 FPU_ARCH_FPA),
31454 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
31455 ARM_ARCH_NONE,
31456 FPU_ARCH_FPA),
31457 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
31458 ARM_ARCH_NONE,
31459 FPU_ARCH_FPA),
31460 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
31461 ARM_ARCH_NONE,
31462 FPU_ARCH_FPA),
31463 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
31464 ARM_ARCH_NONE,
31465 FPU_ARCH_FPA),
31466 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
31467 ARM_ARCH_NONE,
31468 FPU_ARCH_FPA),
31469
31470 /* For V5 or later processors we default to using VFP; but the user
31471 should really set the FPU type explicitly. */
31472 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
31473 ARM_ARCH_NONE,
31474 FPU_ARCH_VFP_V2),
31475 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
31476 ARM_ARCH_NONE,
31477 FPU_ARCH_VFP_V2),
31478 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
31479 ARM_ARCH_NONE,
31480 FPU_ARCH_VFP_V2),
31481 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
31482 ARM_ARCH_NONE,
31483 FPU_ARCH_VFP_V2),
31484 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
31485 ARM_ARCH_NONE,
31486 FPU_ARCH_VFP_V2),
31487 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
31488 ARM_ARCH_NONE,
31489 FPU_ARCH_VFP_V2),
31490 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
31491 ARM_ARCH_NONE,
31492 FPU_ARCH_VFP_V2),
31493 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
31494 ARM_ARCH_NONE,
31495 FPU_ARCH_VFP_V2),
31496 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
31497 ARM_ARCH_NONE,
31498 FPU_ARCH_VFP_V2),
31499 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
31500 ARM_ARCH_NONE,
31501 FPU_ARCH_VFP_V2),
31502 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
31503 ARM_ARCH_NONE,
31504 FPU_ARCH_VFP_V2),
31505 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
31506 ARM_ARCH_NONE,
31507 FPU_ARCH_VFP_V2),
31508 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
31509 ARM_ARCH_NONE,
31510 FPU_ARCH_VFP_V1),
31511 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
31512 ARM_ARCH_NONE,
31513 FPU_ARCH_VFP_V1),
31514 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
31515 ARM_ARCH_NONE,
31516 FPU_ARCH_VFP_V2),
31517 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
31518 ARM_ARCH_NONE,
31519 FPU_ARCH_VFP_V2),
31520 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
31521 ARM_ARCH_NONE,
31522 FPU_ARCH_VFP_V1),
31523 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
31524 ARM_ARCH_NONE,
31525 FPU_ARCH_VFP_V2),
31526 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
31527 ARM_ARCH_NONE,
31528 FPU_ARCH_VFP_V2),
31529 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
31530 ARM_ARCH_NONE,
31531 FPU_ARCH_VFP_V2),
31532 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
31533 ARM_ARCH_NONE,
31534 FPU_ARCH_VFP_V2),
31535 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
31536 ARM_ARCH_NONE,
31537 FPU_ARCH_VFP_V2),
31538 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
31539 ARM_ARCH_NONE,
31540 FPU_ARCH_VFP_V2),
31541 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
31542 ARM_ARCH_NONE,
31543 FPU_ARCH_VFP_V2),
31544 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
31545 ARM_ARCH_NONE,
31546 FPU_ARCH_VFP_V2),
31547 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
31548 ARM_ARCH_NONE,
31549 FPU_ARCH_VFP_V2),
31550 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
31551 ARM_ARCH_NONE,
31552 FPU_NONE),
31553 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
31554 ARM_ARCH_NONE,
31555 FPU_NONE),
31556 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
31557 ARM_ARCH_NONE,
31558 FPU_ARCH_VFP_V2),
31559 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
31560 ARM_ARCH_NONE,
31561 FPU_ARCH_VFP_V2),
31562 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
31563 ARM_ARCH_NONE,
31564 FPU_ARCH_VFP_V2),
31565 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
31566 ARM_ARCH_NONE,
31567 FPU_NONE),
31568 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
31569 ARM_ARCH_NONE,
31570 FPU_NONE),
31571 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
31572 ARM_ARCH_NONE,
31573 FPU_ARCH_VFP_V2),
31574 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
31575 ARM_ARCH_NONE,
31576 FPU_NONE),
31577 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
31578 ARM_ARCH_NONE,
31579 FPU_ARCH_VFP_V2),
31580 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
31581 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31582 FPU_NONE),
31583 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
31584 ARM_ARCH_NONE,
31585 FPU_ARCH_NEON_VFP_V4),
31586 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
31587 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
31588 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
31589 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
31590 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31591 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
31592 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
31593 ARM_ARCH_NONE,
31594 FPU_ARCH_NEON_VFP_V4),
31595 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
31596 ARM_ARCH_NONE,
31597 FPU_ARCH_NEON_VFP_V4),
31598 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
31599 ARM_ARCH_NONE,
31600 FPU_ARCH_NEON_VFP_V4),
31601 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
31602 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31603 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31604 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
31605 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31606 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31607 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
31608 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31609 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31610 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
31611 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31612 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31613 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
31614 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31615 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31616 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
31617 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31618 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31619 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
31620 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31621 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31622 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
31623 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31624 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31625 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A,
31626 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31627 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31628 ARM_CPU_OPT ("cortex-a76ae", "Cortex-A76AE", ARM_ARCH_V8_2A,
31629 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31630 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31631 ARM_CPU_OPT ("cortex-a77", "Cortex-A77", ARM_ARCH_V8_2A,
31632 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31633 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31634 ARM_CPU_OPT ("cortex-a78", "Cortex-A78", ARM_ARCH_V8_2A,
31635 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST | ARM_EXT2_SB),
31636 FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31637 ARM_CPU_OPT ("cortex-a78ae", "Cortex-A78AE", ARM_ARCH_V8_2A,
31638 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST | ARM_EXT2_SB),
31639 FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31640 ARM_CPU_OPT ("cortex-a78c", "Cortex-A78C", ARM_ARCH_V8_2A,
31641 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST | ARM_EXT2_SB),
31642 FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31643 ARM_CPU_OPT ("cortex-a710", "Cortex-A710", ARM_ARCH_V9A,
31644 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
31645 | ARM_EXT2_BF16
31646 | ARM_EXT2_I8MM),
31647 FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31648 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A,
31649 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31650 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31651 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
31652 ARM_ARCH_NONE,
31653 FPU_NONE),
31654 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
31655 ARM_ARCH_NONE,
31656 FPU_ARCH_VFP_V3D16),
31657 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
31658 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
31659 FPU_NONE),
31660 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
31661 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
31662 FPU_ARCH_VFP_V3D16),
31663 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
31664 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
31665 FPU_ARCH_VFP_V3D16),
31666 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
31667 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31668 FPU_ARCH_NEON_VFP_ARMV8),
31669 ARM_CPU_OPT ("cortex-r52plus", "Cortex-R52+", ARM_ARCH_V8R,
31670 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31671 FPU_ARCH_NEON_VFP_ARMV8),
31672 ARM_CPU_OPT ("cortex-m35p", "Cortex-M35P", ARM_ARCH_V8M_MAIN,
31673 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31674 FPU_NONE),
31675 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
31676 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31677 FPU_NONE),
31678 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
31679 ARM_ARCH_NONE,
31680 FPU_NONE),
31681 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
31682 ARM_ARCH_NONE,
31683 FPU_NONE),
31684 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
31685 ARM_ARCH_NONE,
31686 FPU_NONE),
31687 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
31688 ARM_ARCH_NONE,
31689 FPU_NONE),
31690 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
31691 ARM_ARCH_NONE,
31692 FPU_NONE),
31693 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
31694 ARM_ARCH_NONE,
31695 FPU_NONE),
31696 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
31697 ARM_ARCH_NONE,
31698 FPU_NONE),
31699 ARM_CPU_OPT ("cortex-x1", "Cortex-X1", ARM_ARCH_V8_2A,
31700 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST | ARM_EXT2_SB),
31701 FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31702 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
31703 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31704 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31705 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A,
31706 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31707 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31708 ARM_CPU_OPT ("neoverse-n2", "Neoverse N2", ARM_ARCH_V8_5A,
31709 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
31710 | ARM_EXT2_BF16
31711 | ARM_EXT2_I8MM),
31712 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4),
31713 ARM_CPU_OPT ("neoverse-v1", "Neoverse V1", ARM_ARCH_V8_4A,
31714 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
31715 | ARM_EXT2_BF16
31716 | ARM_EXT2_I8MM),
31717 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4),
31718 /* ??? XSCALE is really an architecture. */
31719 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
31720 ARM_ARCH_NONE,
31721 FPU_ARCH_VFP_V2),
31722
31723 /* ??? iwmmxt is not a processor. */
31724 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
31725 ARM_ARCH_NONE,
31726 FPU_ARCH_VFP_V2),
31727 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
31728 ARM_ARCH_NONE,
31729 FPU_ARCH_VFP_V2),
31730 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
31731 ARM_ARCH_NONE,
31732 FPU_ARCH_VFP_V2),
31733
31734 /* Maverick. */
31735 ARM_CPU_OPT ("ep9312", "ARM920T",
31736 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
31737 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
31738
31739 /* Marvell processors. */
31740 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
31741 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31742 FPU_ARCH_VFP_V3D16),
31743 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
31744 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31745 FPU_ARCH_NEON_VFP_V4),
31746
31747 /* APM X-Gene family. */
31748 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
31749 ARM_ARCH_NONE,
31750 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31751 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
31752 ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31753 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31754
31755 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
31756 };
31757 #undef ARM_CPU_OPT
31758
31759 struct arm_ext_table
31760 {
31761 const char * name;
31762 size_t name_len;
31763 const arm_feature_set merge;
31764 const arm_feature_set clear;
31765 };
31766
31767 struct arm_arch_option_table
31768 {
31769 const char * name;
31770 size_t name_len;
31771 const arm_feature_set value;
31772 const arm_feature_set default_fpu;
31773 const struct arm_ext_table * ext_table;
31774 };
31775
31776 /* Used to add support for +E and +noE extension. */
31777 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
31778 /* Used to add support for a +E extension. */
31779 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
31780 /* Used to add support for a +noE extension. */
31781 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
31782
31783 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
31784 ~0 & ~FPU_ENDIAN_PURE)
31785
31786 static const struct arm_ext_table armv5te_ext_table[] =
31787 {
31788 ARM_EXT ("fp", FPU_ARCH_VFP_V2, ALL_FP),
31789 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31790 };
31791
31792 static const struct arm_ext_table armv7_ext_table[] =
31793 {
31794 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31795 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31796 };
31797
31798 static const struct arm_ext_table armv7ve_ext_table[] =
31799 {
31800 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16, ALL_FP),
31801 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16),
31802 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
31803 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31804 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
31805 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16), /* Alias for +fp. */
31806 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
31807
31808 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4,
31809 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
31810
31811 /* Aliases for +simd. */
31812 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
31813
31814 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31815 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31816 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
31817
31818 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31819 };
31820
31821 static const struct arm_ext_table armv7a_ext_table[] =
31822 {
31823 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31824 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
31825 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
31826 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31827 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
31828 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),
31829 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
31830
31831 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1,
31832 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
31833
31834 /* Aliases for +simd. */
31835 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31836 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31837
31838 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
31839 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
31840
31841 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP)),
31842 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC)),
31843 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31844 };
31845
31846 static const struct arm_ext_table armv7r_ext_table[] =
31847 {
31848 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD),
31849 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD), /* Alias for +fp.sp. */
31850 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31851 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp. */
31852 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16),
31853 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31854 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
31855 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV)),
31856 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31857 };
31858
31859 static const struct arm_ext_table armv7em_ext_table[] =
31860 {
31861 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16, ALL_FP),
31862 /* Alias for +fp, used to be known as fpv4-sp-d16. */
31863 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16),
31864 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16),
31865 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
31866 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16),
31867 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31868 };
31869
31870 static const struct arm_ext_table armv8a_ext_table[] =
31871 {
31872 ARM_ADD ("crc", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC)),
31873 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
31874 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31875 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31876
31877 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31878 should use the +simd option to turn on FP. */
31879 ARM_REMOVE ("fp", ALL_FP),
31880 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31881 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31882 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31883 };
31884
31885
31886 static const struct arm_ext_table armv81a_ext_table[] =
31887 {
31888 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
31889 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
31890 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31891
31892 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31893 should use the +simd option to turn on FP. */
31894 ARM_REMOVE ("fp", ALL_FP),
31895 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31896 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31897 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31898 };
31899
31900 static const struct arm_ext_table armv82a_ext_table[] =
31901 {
31902 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
31903 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16),
31904 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML),
31905 ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31906 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31907 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
31908 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31909 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31910
31911 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31912 should use the +simd option to turn on FP. */
31913 ARM_REMOVE ("fp", ALL_FP),
31914 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31915 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31916 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31917 };
31918
31919 static const struct arm_ext_table armv84a_ext_table[] =
31920 {
31921 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31922 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
31923 ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31924 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31925 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
31926 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31927
31928 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31929 should use the +simd option to turn on FP. */
31930 ARM_REMOVE ("fp", ALL_FP),
31931 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31932 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31933 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31934 };
31935
31936 static const struct arm_ext_table armv85a_ext_table[] =
31937 {
31938 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31939 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
31940 ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31941 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31942 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
31943 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31944
31945 /* Armv8-a does not allow an FP implementation without SIMD, so the user
31946 should use the +simd option to turn on FP. */
31947 ARM_REMOVE ("fp", ALL_FP),
31948 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31949 };
31950
31951 static const struct arm_ext_table armv86a_ext_table[] =
31952 {
31953 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31954 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31955 };
31956
31957 #define armv87a_ext_table armv86a_ext_table
31958 #define armv88a_ext_table armv87a_ext_table
31959
31960 static const struct arm_ext_table armv9a_ext_table[] =
31961 {
31962 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31963 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
31964 ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31965 ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31966 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
31967 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31968
31969 /* Armv9-a does not allow an FP implementation without SIMD, so the user
31970 should use the +simd option to turn on FP. */
31971 ARM_REMOVE ("fp", ALL_FP),
31972 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31973 };
31974
31975 #define armv91a_ext_table armv86a_ext_table
31976 #define armv92a_ext_table armv91a_ext_table
31977 #define armv93a_ext_table armv92a_ext_table
31978
31979 #define CDE_EXTENSIONS \
31980 ARM_ADD ("cdecp0", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE0)), \
31981 ARM_ADD ("cdecp1", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE1)), \
31982 ARM_ADD ("cdecp2", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE2)), \
31983 ARM_ADD ("cdecp3", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE3)), \
31984 ARM_ADD ("cdecp4", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE4)), \
31985 ARM_ADD ("cdecp5", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE5)), \
31986 ARM_ADD ("cdecp6", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE6)), \
31987 ARM_ADD ("cdecp7", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE7))
31988
31989 static const struct arm_ext_table armv8m_main_ext_table[] =
31990 {
31991 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP),
31992 ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP)),
31993 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16, ALL_FP),
31994 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
31995 CDE_EXTENSIONS,
31996 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31997 };
31998
31999
32000 static const struct arm_ext_table armv8_1m_main_ext_table[] =
32001 {
32002 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP),
32003 ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP)),
32004 ARM_EXT ("fp",
32005 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
32006 FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA),
32007 ALL_FP),
32008 ARM_ADD ("fp.dp",
32009 ARM_FEATURE (0, ARM_EXT2_FP16_INST,
32010 FPU_VFP_V5D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
32011 ARM_EXT ("mve", ARM_FEATURE (ARM_AEXT_V8M_MAIN_DSP, ARM_EXT2_MVE, 0),
32012 ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE | ARM_EXT2_MVE_FP)),
32013 ARM_ADD ("mve.fp",
32014 ARM_FEATURE (ARM_AEXT_V8M_MAIN_DSP,
32015 ARM_EXT2_FP16_INST | ARM_EXT2_MVE | ARM_EXT2_MVE_FP,
32016 FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
32017 CDE_EXTENSIONS,
32018 ARM_ADD ("pacbti", ARM_FEATURE_CORE_HIGH_HIGH (ARM_AEXT3_V8_1M_MAIN_PACBTI)),
32019 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
32020 };
32021
32022 #undef CDE_EXTENSIONS
32023
32024 static const struct arm_ext_table armv8r_ext_table[] =
32025 {
32026 ARM_ADD ("crc", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC)),
32027 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
32028 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
32029 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
32030 ARM_REMOVE ("fp", ALL_FP),
32031 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16),
32032 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
32033 };
32034
32035 /* This list should, at a minimum, contain all the architecture names
32036 recognized by GCC. */
32037 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
32038 #define ARM_ARCH_OPT2(N, V, DF, ext) \
32039 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
32040
32041 static const struct arm_arch_option_table arm_archs[] =
32042 {
32043 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
32044 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
32045 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
32046 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
32047 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
32048 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
32049 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
32050 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
32051 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
32052 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
32053 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
32054 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
32055 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
32056 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
32057 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP, armv5te),
32058 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP, armv5te),
32059 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP, armv5te),
32060 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
32061 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP, armv5te),
32062 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP, armv5te),
32063 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP, armv5te),
32064 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
32065 kept to preserve existing behaviour. */
32066 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
32067 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP, armv5te),
32068 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP, armv5te),
32069 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP, armv5te),
32070 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP, armv5te),
32071 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
32072 kept to preserve existing behaviour. */
32073 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
32074 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP, armv5te),
32075 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
32076 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
32077 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP, armv7),
32078 /* The official spelling of the ARMv7 profile variants is the dashed form.
32079 Accept the non-dashed form for compatibility with old toolchains. */
32080 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
32081 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP, armv7ve),
32082 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
32083 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
32084 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP, armv7a),
32085 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP, armv7r),
32086 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
32087 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP, armv7em),
32088 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
32089 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP,
32090 armv8m_main),
32091 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN, FPU_ARCH_VFP,
32092 armv8_1m_main),
32093 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP, armv8a),
32094 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP, armv81a),
32095 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP, armv82a),
32096 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP, armv82a),
32097 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP, armv8r),
32098 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP, armv84a),
32099 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A, FPU_ARCH_VFP, armv85a),
32100 ARM_ARCH_OPT2 ("armv8.6-a", ARM_ARCH_V8_6A, FPU_ARCH_VFP, armv86a),
32101 ARM_ARCH_OPT2 ("armv8.7-a", ARM_ARCH_V8_7A, FPU_ARCH_VFP, armv87a),
32102 ARM_ARCH_OPT2 ("armv8.8-a", ARM_ARCH_V8_8A, FPU_ARCH_VFP, armv88a),
32103 ARM_ARCH_OPT2 ("armv9-a", ARM_ARCH_V9A, FPU_ARCH_VFP, armv9a),
32104 ARM_ARCH_OPT2 ("armv9.1-a", ARM_ARCH_V9_1A, FPU_ARCH_VFP, armv91a),
32105 ARM_ARCH_OPT2 ("armv9.2-a", ARM_ARCH_V9_2A, FPU_ARCH_VFP, armv92a),
32106 ARM_ARCH_OPT2 ("armv9.3-a", ARM_ARCH_V9_2A, FPU_ARCH_VFP, armv93a),
32107 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
32108 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
32109 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2, FPU_ARCH_VFP),
32110 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
32111 };
32112 #undef ARM_ARCH_OPT
32113
32114 /* ISA extensions in the co-processor and main instruction set space. */
32115
32116 struct arm_option_extension_value_table
32117 {
32118 const char * name;
32119 size_t name_len;
32120 const arm_feature_set merge_value;
32121 const arm_feature_set clear_value;
32122 /* List of architectures for which an extension is available. ARM_ARCH_NONE
32123 indicates that an extension is available for all architectures while
32124 ARM_ANY marks an empty entry. */
32125 const arm_feature_set allowed_archs[2];
32126 };
32127
32128 /* The following table must be in alphabetical order with a NULL last entry. */
32129
32130 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
32131 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
32132
32133 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
32134 use the context sensitive approach using arm_ext_table's. */
32135 static const struct arm_option_extension_value_table arm_extensions[] =
32136 {
32137 ARM_EXT_OPT ("crc", ARM_FEATURE_CORE_HIGH(ARM_EXT2_CRC),
32138 ARM_FEATURE_CORE_HIGH(ARM_EXT2_CRC),
32139 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
32140 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
32141 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
32142 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
32143 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
32144 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
32145 ARM_ARCH_V8_2A),
32146 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
32147 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
32148 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
32149 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
32150 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
32151 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
32152 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
32153 ARM_ARCH_V8_2A),
32154 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
32155 | ARM_EXT2_FP16_FML),
32156 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
32157 | ARM_EXT2_FP16_FML),
32158 ARM_ARCH_V8_2A),
32159 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
32160 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
32161 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
32162 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
32163 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
32164 Thumb divide instruction. Due to this having the same name as the
32165 previous entry, this will be ignored when doing command-line parsing and
32166 only considered by build attribute selection code. */
32167 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
32168 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
32169 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
32170 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
32171 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
32172 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
32173 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
32174 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
32175 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
32176 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
32177 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
32178 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
32179 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
32180 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
32181 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
32182 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
32183 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
32184 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
32185 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
32186 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
32187 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
32188 ARM_ARCH_V8A),
32189 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
32190 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
32191 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
32192 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
32193 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
32194 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
32195 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
32196 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
32197 ARM_ARCH_V8A),
32198 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
32199 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
32200 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
32201 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
32202 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
32203 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
32204 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
32205 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
32206 | ARM_EXT_DIV),
32207 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
32208 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
32209 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
32210 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
32211 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
32212 };
32213 #undef ARM_EXT_OPT
32214
32215 /* ISA floating-point and Advanced SIMD extensions. */
32216 struct arm_option_fpu_value_table
32217 {
32218 const char * name;
32219 const arm_feature_set value;
32220 };
32221
32222 /* This list should, at a minimum, contain all the fpu names
32223 recognized by GCC. */
32224 static const struct arm_option_fpu_value_table arm_fpus[] =
32225 {
32226 {"softfpa", FPU_NONE},
32227 {"fpe", FPU_ARCH_FPE},
32228 {"fpe2", FPU_ARCH_FPE},
32229 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
32230 {"fpa", FPU_ARCH_FPA},
32231 {"fpa10", FPU_ARCH_FPA},
32232 {"fpa11", FPU_ARCH_FPA},
32233 {"arm7500fe", FPU_ARCH_FPA},
32234 {"softvfp", FPU_ARCH_VFP},
32235 {"softvfp+vfp", FPU_ARCH_VFP_V2},
32236 {"vfp", FPU_ARCH_VFP_V2},
32237 {"vfp9", FPU_ARCH_VFP_V2},
32238 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
32239 {"vfp10", FPU_ARCH_VFP_V2},
32240 {"vfp10-r0", FPU_ARCH_VFP_V1},
32241 {"vfpxd", FPU_ARCH_VFP_V1xD},
32242 {"vfpv2", FPU_ARCH_VFP_V2},
32243 {"vfpv3", FPU_ARCH_VFP_V3},
32244 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
32245 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
32246 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
32247 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
32248 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
32249 {"arm1020t", FPU_ARCH_VFP_V1},
32250 {"arm1020e", FPU_ARCH_VFP_V2},
32251 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
32252 {"arm1136jf-s", FPU_ARCH_VFP_V2},
32253 {"maverick", FPU_ARCH_MAVERICK},
32254 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
32255 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
32256 {"neon-fp16", FPU_ARCH_NEON_FP16},
32257 {"vfpv4", FPU_ARCH_VFP_V4},
32258 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
32259 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
32260 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
32261 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
32262 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
32263 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
32264 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
32265 {"crypto-neon-fp-armv8",
32266 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
32267 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
32268 {"crypto-neon-fp-armv8.1",
32269 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
32270 {NULL, ARM_ARCH_NONE}
32271 };
32272
32273 struct arm_option_value_table
32274 {
32275 const char *name;
32276 long value;
32277 };
32278
32279 static const struct arm_option_value_table arm_float_abis[] =
32280 {
32281 {"hard", ARM_FLOAT_ABI_HARD},
32282 {"softfp", ARM_FLOAT_ABI_SOFTFP},
32283 {"soft", ARM_FLOAT_ABI_SOFT},
32284 {NULL, 0}
32285 };
32286
32287 #ifdef OBJ_ELF
32288 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
32289 static const struct arm_option_value_table arm_eabis[] =
32290 {
32291 {"gnu", EF_ARM_EABI_UNKNOWN},
32292 {"4", EF_ARM_EABI_VER4},
32293 {"5", EF_ARM_EABI_VER5},
32294 {NULL, 0}
32295 };
32296 #endif
32297
32298 struct arm_long_option_table
32299 {
32300 const char *option; /* Substring to match. */
32301 const char *help; /* Help information. */
32302 bool (*func) (const char *subopt); /* Function to decode sub-option. */
32303 const char *deprecated; /* If non-null, print this message. */
32304 };
32305
32306 static bool
32307 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
32308 arm_feature_set *ext_set,
32309 const struct arm_ext_table *ext_table)
32310 {
32311 /* We insist on extensions being specified in alphabetical order, and with
32312 extensions being added before being removed. We achieve this by having
32313 the global ARM_EXTENSIONS table in alphabetical order, and using the
32314 ADDING_VALUE variable to indicate whether we are adding an extension (1)
32315 or removing it (0) and only allowing it to change in the order
32316 -1 -> 1 -> 0. */
32317 const struct arm_option_extension_value_table * opt = NULL;
32318 const arm_feature_set arm_any = ARM_ANY;
32319 int adding_value = -1;
32320
32321 while (str != NULL && *str != 0)
32322 {
32323 const char *ext;
32324 size_t len;
32325
32326 if (*str != '+')
32327 {
32328 as_bad (_("invalid architectural extension"));
32329 return false;
32330 }
32331
32332 str++;
32333 ext = strchr (str, '+');
32334
32335 if (ext != NULL)
32336 len = ext - str;
32337 else
32338 len = strlen (str);
32339
32340 if (len >= 2 && startswith (str, "no"))
32341 {
32342 if (adding_value != 0)
32343 {
32344 adding_value = 0;
32345 opt = arm_extensions;
32346 }
32347
32348 len -= 2;
32349 str += 2;
32350 }
32351 else if (len > 0)
32352 {
32353 if (adding_value == -1)
32354 {
32355 adding_value = 1;
32356 opt = arm_extensions;
32357 }
32358 else if (adding_value != 1)
32359 {
32360 as_bad (_("must specify extensions to add before specifying "
32361 "those to remove"));
32362 return false;
32363 }
32364 }
32365
32366 if (len == 0)
32367 {
32368 as_bad (_("missing architectural extension"));
32369 return false;
32370 }
32371
32372 gas_assert (adding_value != -1);
32373 gas_assert (opt != NULL);
32374
32375 if (ext_table != NULL)
32376 {
32377 const struct arm_ext_table * ext_opt = ext_table;
32378 bool found = false;
32379 for (; ext_opt->name != NULL; ext_opt++)
32380 if (ext_opt->name_len == len
32381 && strncmp (ext_opt->name, str, len) == 0)
32382 {
32383 if (adding_value)
32384 {
32385 if (ARM_FEATURE_ZERO (ext_opt->merge))
32386 /* TODO: Option not supported. When we remove the
32387 legacy table this case should error out. */
32388 continue;
32389
32390 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, ext_opt->merge);
32391 }
32392 else
32393 {
32394 if (ARM_FEATURE_ZERO (ext_opt->clear))
32395 /* TODO: Option not supported. When we remove the
32396 legacy table this case should error out. */
32397 continue;
32398 ARM_CLEAR_FEATURE (*ext_set, *ext_set, ext_opt->clear);
32399 }
32400 found = true;
32401 break;
32402 }
32403 if (found)
32404 {
32405 str = ext;
32406 continue;
32407 }
32408 }
32409
32410 /* Scan over the options table trying to find an exact match. */
32411 for (; opt->name != NULL; opt++)
32412 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
32413 {
32414 int i, nb_allowed_archs =
32415 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
32416 /* Check we can apply the extension to this architecture. */
32417 for (i = 0; i < nb_allowed_archs; i++)
32418 {
32419 /* Empty entry. */
32420 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
32421 continue;
32422 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
32423 break;
32424 }
32425 if (i == nb_allowed_archs)
32426 {
32427 as_bad (_("extension does not apply to the base architecture"));
32428 return false;
32429 }
32430
32431 /* Add or remove the extension. */
32432 if (adding_value)
32433 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
32434 else
32435 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
32436
32437 /* Allowing Thumb division instructions for ARMv7 in autodetection
32438 rely on this break so that duplicate extensions (extensions
32439 with the same name as a previous extension in the list) are not
32440 considered for command-line parsing. */
32441 break;
32442 }
32443
32444 if (opt->name == NULL)
32445 {
32446 /* Did we fail to find an extension because it wasn't specified in
32447 alphabetical order, or because it does not exist? */
32448
32449 for (opt = arm_extensions; opt->name != NULL; opt++)
32450 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
32451 break;
32452
32453 if (opt->name == NULL)
32454 as_bad (_("unknown architectural extension `%s'"), str);
32455 else
32456 as_bad (_("architectural extensions must be specified in "
32457 "alphabetical order"));
32458
32459 return false;
32460 }
32461 else
32462 {
32463 /* We should skip the extension we've just matched the next time
32464 round. */
32465 opt++;
32466 }
32467
32468 str = ext;
32469 };
32470
32471 return true;
32472 }
32473
32474 static bool
32475 arm_parse_fp16_opt (const char *str)
32476 {
32477 if (strcasecmp (str, "ieee") == 0)
32478 fp16_format = ARM_FP16_FORMAT_IEEE;
32479 else if (strcasecmp (str, "alternative") == 0)
32480 fp16_format = ARM_FP16_FORMAT_ALTERNATIVE;
32481 else
32482 {
32483 as_bad (_("unrecognised float16 format \"%s\""), str);
32484 return false;
32485 }
32486
32487 return true;
32488 }
32489
32490 static bool
32491 arm_parse_cpu (const char *str)
32492 {
32493 const struct arm_cpu_option_table *opt;
32494 const char *ext = strchr (str, '+');
32495 size_t len;
32496
32497 if (ext != NULL)
32498 len = ext - str;
32499 else
32500 len = strlen (str);
32501
32502 if (len == 0)
32503 {
32504 as_bad (_("missing cpu name `%s'"), str);
32505 return false;
32506 }
32507
32508 for (opt = arm_cpus; opt->name != NULL; opt++)
32509 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
32510 {
32511 mcpu_cpu_opt = &opt->value;
32512 if (mcpu_ext_opt == NULL)
32513 mcpu_ext_opt = XNEW (arm_feature_set);
32514 *mcpu_ext_opt = opt->ext;
32515 mcpu_fpu_opt = &opt->default_fpu;
32516 if (opt->canonical_name)
32517 {
32518 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
32519 strcpy (selected_cpu_name, opt->canonical_name);
32520 }
32521 else
32522 {
32523 size_t i;
32524
32525 if (len >= sizeof selected_cpu_name)
32526 len = (sizeof selected_cpu_name) - 1;
32527
32528 for (i = 0; i < len; i++)
32529 selected_cpu_name[i] = TOUPPER (opt->name[i]);
32530 selected_cpu_name[i] = 0;
32531 }
32532
32533 if (ext != NULL)
32534 return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt, NULL);
32535
32536 return true;
32537 }
32538
32539 as_bad (_("unknown cpu `%s'"), str);
32540 return false;
32541 }
32542
32543 static bool
32544 arm_parse_arch (const char *str)
32545 {
32546 const struct arm_arch_option_table *opt;
32547 const char *ext = strchr (str, '+');
32548 size_t len;
32549
32550 if (ext != NULL)
32551 len = ext - str;
32552 else
32553 len = strlen (str);
32554
32555 if (len == 0)
32556 {
32557 as_bad (_("missing architecture name `%s'"), str);
32558 return false;
32559 }
32560
32561 for (opt = arm_archs; opt->name != NULL; opt++)
32562 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
32563 {
32564 march_cpu_opt = &opt->value;
32565 if (march_ext_opt == NULL)
32566 march_ext_opt = XNEW (arm_feature_set);
32567 *march_ext_opt = arm_arch_none;
32568 march_fpu_opt = &opt->default_fpu;
32569 selected_ctx_ext_table = opt->ext_table;
32570 strcpy (selected_cpu_name, opt->name);
32571
32572 if (ext != NULL)
32573 return arm_parse_extension (ext, march_cpu_opt, march_ext_opt,
32574 opt->ext_table);
32575
32576 return true;
32577 }
32578
32579 as_bad (_("unknown architecture `%s'\n"), str);
32580 return false;
32581 }
32582
32583 static bool
32584 arm_parse_fpu (const char * str)
32585 {
32586 const struct arm_option_fpu_value_table * opt;
32587
32588 for (opt = arm_fpus; opt->name != NULL; opt++)
32589 if (streq (opt->name, str))
32590 {
32591 mfpu_opt = &opt->value;
32592 return true;
32593 }
32594
32595 as_bad (_("unknown floating point format `%s'\n"), str);
32596 return false;
32597 }
32598
32599 static bool
32600 arm_parse_float_abi (const char * str)
32601 {
32602 const struct arm_option_value_table * opt;
32603
32604 for (opt = arm_float_abis; opt->name != NULL; opt++)
32605 if (streq (opt->name, str))
32606 {
32607 mfloat_abi_opt = opt->value;
32608 return true;
32609 }
32610
32611 as_bad (_("unknown floating point abi `%s'\n"), str);
32612 return false;
32613 }
32614
32615 #ifdef OBJ_ELF
32616 static bool
32617 arm_parse_eabi (const char * str)
32618 {
32619 const struct arm_option_value_table *opt;
32620
32621 for (opt = arm_eabis; opt->name != NULL; opt++)
32622 if (streq (opt->name, str))
32623 {
32624 meabi_flags = opt->value;
32625 return true;
32626 }
32627 as_bad (_("unknown EABI `%s'\n"), str);
32628 return false;
32629 }
32630 #endif
32631
32632 static bool
32633 arm_parse_it_mode (const char * str)
32634 {
32635 bool ret = true;
32636
32637 if (streq ("arm", str))
32638 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
32639 else if (streq ("thumb", str))
32640 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
32641 else if (streq ("always", str))
32642 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
32643 else if (streq ("never", str))
32644 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
32645 else
32646 {
32647 as_bad (_("unknown implicit IT mode `%s', should be "\
32648 "arm, thumb, always, or never."), str);
32649 ret = false;
32650 }
32651
32652 return ret;
32653 }
32654
32655 static bool
32656 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
32657 {
32658 codecomposer_syntax = true;
32659 arm_comment_chars[0] = ';';
32660 arm_line_separator_chars[0] = 0;
32661 return true;
32662 }
32663
32664 struct arm_long_option_table arm_long_opts[] =
32665 {
32666 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
32667 arm_parse_cpu, NULL},
32668 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
32669 arm_parse_arch, NULL},
32670 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
32671 arm_parse_fpu, NULL},
32672 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
32673 arm_parse_float_abi, NULL},
32674 #ifdef OBJ_ELF
32675 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
32676 arm_parse_eabi, NULL},
32677 #endif
32678 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
32679 arm_parse_it_mode, NULL},
32680 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
32681 arm_ccs_mode, NULL},
32682 {"mfp16-format=",
32683 N_("[ieee|alternative]\n\
32684 set the encoding for half precision floating point "
32685 "numbers to IEEE\n\
32686 or Arm alternative format."),
32687 arm_parse_fp16_opt, NULL },
32688 {NULL, NULL, 0, NULL}
32689 };
32690
32691 int
32692 md_parse_option (int c, const char * arg)
32693 {
32694 struct arm_option_table *opt;
32695 const struct arm_legacy_option_table *fopt;
32696 struct arm_long_option_table *lopt;
32697
32698 switch (c)
32699 {
32700 #ifdef OPTION_EB
32701 case OPTION_EB:
32702 target_big_endian = 1;
32703 break;
32704 #endif
32705
32706 #ifdef OPTION_EL
32707 case OPTION_EL:
32708 target_big_endian = 0;
32709 break;
32710 #endif
32711
32712 case OPTION_FIX_V4BX:
32713 fix_v4bx = true;
32714 break;
32715
32716 #ifdef OBJ_ELF
32717 case OPTION_FDPIC:
32718 arm_fdpic = true;
32719 break;
32720 #endif /* OBJ_ELF */
32721
32722 case 'a':
32723 /* Listing option. Just ignore these, we don't support additional
32724 ones. */
32725 return 0;
32726
32727 default:
32728 for (opt = arm_opts; opt->option != NULL; opt++)
32729 {
32730 if (c == opt->option[0]
32731 && ((arg == NULL && opt->option[1] == 0)
32732 || streq (arg, opt->option + 1)))
32733 {
32734 /* If the option is deprecated, tell the user. */
32735 if (warn_on_deprecated && opt->deprecated != NULL)
32736 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
32737 arg ? arg : "", _(opt->deprecated));
32738
32739 if (opt->var != NULL)
32740 *opt->var = opt->value;
32741
32742 return 1;
32743 }
32744 }
32745
32746 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
32747 {
32748 if (c == fopt->option[0]
32749 && ((arg == NULL && fopt->option[1] == 0)
32750 || streq (arg, fopt->option + 1)))
32751 {
32752 /* If the option is deprecated, tell the user. */
32753 if (warn_on_deprecated && fopt->deprecated != NULL)
32754 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
32755 arg ? arg : "", _(fopt->deprecated));
32756
32757 if (fopt->var != NULL)
32758 *fopt->var = &fopt->value;
32759
32760 return 1;
32761 }
32762 }
32763
32764 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
32765 {
32766 /* These options are expected to have an argument. */
32767 if (c == lopt->option[0]
32768 && arg != NULL
32769 && strncmp (arg, lopt->option + 1,
32770 strlen (lopt->option + 1)) == 0)
32771 {
32772 /* If the option is deprecated, tell the user. */
32773 if (warn_on_deprecated && lopt->deprecated != NULL)
32774 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
32775 _(lopt->deprecated));
32776
32777 /* Call the sup-option parser. */
32778 return lopt->func (arg + strlen (lopt->option) - 1);
32779 }
32780 }
32781
32782 return 0;
32783 }
32784
32785 return 1;
32786 }
32787
32788 void
32789 md_show_usage (FILE * fp)
32790 {
32791 struct arm_option_table *opt;
32792 struct arm_long_option_table *lopt;
32793
32794 fprintf (fp, _(" ARM-specific assembler options:\n"));
32795
32796 for (opt = arm_opts; opt->option != NULL; opt++)
32797 if (opt->help != NULL)
32798 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
32799
32800 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
32801 if (lopt->help != NULL)
32802 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
32803
32804 #ifdef OPTION_EB
32805 fprintf (fp, _("\
32806 -EB assemble code for a big-endian cpu\n"));
32807 #endif
32808
32809 #ifdef OPTION_EL
32810 fprintf (fp, _("\
32811 -EL assemble code for a little-endian cpu\n"));
32812 #endif
32813
32814 fprintf (fp, _("\
32815 --fix-v4bx Allow BX in ARMv4 code\n"));
32816
32817 #ifdef OBJ_ELF
32818 fprintf (fp, _("\
32819 --fdpic generate an FDPIC object file\n"));
32820 #endif /* OBJ_ELF */
32821 }
32822
32823 #ifdef OBJ_ELF
32824
32825 typedef struct
32826 {
32827 int val;
32828 arm_feature_set flags;
32829 } cpu_arch_ver_table;
32830
32831 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
32832 chronologically for architectures, with an exception for ARMv6-M and
32833 ARMv6S-M due to legacy reasons. No new architecture should have a
32834 special case. This allows for build attribute selection results to be
32835 stable when new architectures are added. */
32836 static const cpu_arch_ver_table cpu_arch_ver[] =
32837 {
32838 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V1},
32839 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2},
32840 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2S},
32841 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3},
32842 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3M},
32843 {TAG_CPU_ARCH_V4, ARM_ARCH_V4xM},
32844 {TAG_CPU_ARCH_V4, ARM_ARCH_V4},
32845 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4TxM},
32846 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4T},
32847 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5xM},
32848 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5},
32849 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5TxM},
32850 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5T},
32851 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TExP},
32852 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TE},
32853 {TAG_CPU_ARCH_V5TEJ, ARM_ARCH_V5TEJ},
32854 {TAG_CPU_ARCH_V6, ARM_ARCH_V6},
32855 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6Z},
32856 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6KZ},
32857 {TAG_CPU_ARCH_V6K, ARM_ARCH_V6K},
32858 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6T2},
32859 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KT2},
32860 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6ZT2},
32861 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KZT2},
32862
32863 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
32864 always selected build attributes to match those of ARMv6-M
32865 (resp. ARMv6S-M). However, due to these architectures being a strict
32866 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
32867 would be selected when fully respecting chronology of architectures.
32868 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
32869 move them before ARMv7 architectures. */
32870 {TAG_CPU_ARCH_V6_M, ARM_ARCH_V6M},
32871 {TAG_CPU_ARCH_V6S_M, ARM_ARCH_V6SM},
32872
32873 {TAG_CPU_ARCH_V7, ARM_ARCH_V7},
32874 {TAG_CPU_ARCH_V7, ARM_ARCH_V7A},
32875 {TAG_CPU_ARCH_V7, ARM_ARCH_V7R},
32876 {TAG_CPU_ARCH_V7, ARM_ARCH_V7M},
32877 {TAG_CPU_ARCH_V7, ARM_ARCH_V7VE},
32878 {TAG_CPU_ARCH_V7E_M, ARM_ARCH_V7EM},
32879 {TAG_CPU_ARCH_V8, ARM_ARCH_V8A},
32880 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_1A},
32881 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_2A},
32882 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_3A},
32883 {TAG_CPU_ARCH_V8M_BASE, ARM_ARCH_V8M_BASE},
32884 {TAG_CPU_ARCH_V8M_MAIN, ARM_ARCH_V8M_MAIN},
32885 {TAG_CPU_ARCH_V8R, ARM_ARCH_V8R},
32886 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_4A},
32887 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_5A},
32888 {TAG_CPU_ARCH_V8_1M_MAIN, ARM_ARCH_V8_1M_MAIN},
32889 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_6A},
32890 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_7A},
32891 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_8A},
32892 {TAG_CPU_ARCH_V9, ARM_ARCH_V9A},
32893 {TAG_CPU_ARCH_V9, ARM_ARCH_V9_1A},
32894 {TAG_CPU_ARCH_V9, ARM_ARCH_V9_2A},
32895 {TAG_CPU_ARCH_V9, ARM_ARCH_V9_3A},
32896 {-1, ARM_ARCH_NONE}
32897 };
32898
32899 /* Set an attribute if it has not already been set by the user. */
32900
32901 static void
32902 aeabi_set_attribute_int (int tag, int value)
32903 {
32904 if (tag < 1
32905 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
32906 || !attributes_set_explicitly[tag])
32907 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
32908 }
32909
32910 static void
32911 aeabi_set_attribute_string (int tag, const char *value)
32912 {
32913 if (tag < 1
32914 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
32915 || !attributes_set_explicitly[tag])
32916 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
32917 }
32918
32919 /* Return whether features in the *NEEDED feature set are available via
32920 extensions for the architecture whose feature set is *ARCH_FSET. */
32921
32922 static bool
32923 have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
32924 const arm_feature_set *needed)
32925 {
32926 int i, nb_allowed_archs;
32927 arm_feature_set ext_fset;
32928 const struct arm_option_extension_value_table *opt;
32929
32930 ext_fset = arm_arch_none;
32931 for (opt = arm_extensions; opt->name != NULL; opt++)
32932 {
32933 /* Extension does not provide any feature we need. */
32934 if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
32935 continue;
32936
32937 nb_allowed_archs =
32938 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
32939 for (i = 0; i < nb_allowed_archs; i++)
32940 {
32941 /* Empty entry. */
32942 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
32943 break;
32944
32945 /* Extension is available, add it. */
32946 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
32947 ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
32948 }
32949 }
32950
32951 /* Can we enable all features in *needed? */
32952 return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
32953 }
32954
32955 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
32956 a given architecture feature set *ARCH_EXT_FSET including extension feature
32957 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
32958 - if true, check for an exact match of the architecture modulo extensions;
32959 - otherwise, select build attribute value of the first superset
32960 architecture released so that results remains stable when new architectures
32961 are added.
32962 For -march/-mcpu=all the build attribute value of the most featureful
32963 architecture is returned. Tag_CPU_arch_profile result is returned in
32964 PROFILE. */
32965
32966 static int
32967 get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
32968 const arm_feature_set *ext_fset,
32969 char *profile, int exact_match)
32970 {
32971 arm_feature_set arch_fset;
32972 const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
32973
32974 /* Select most featureful architecture with all its extensions if building
32975 for -march=all as the feature sets used to set build attributes. */
32976 if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
32977 {
32978 /* Force revisiting of decision for each new architecture. */
32979 gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V9);
32980 *profile = 'A';
32981 return TAG_CPU_ARCH_V9;
32982 }
32983
32984 ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
32985
32986 for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
32987 {
32988 arm_feature_set known_arch_fset;
32989
32990 ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
32991 if (exact_match)
32992 {
32993 /* Base architecture match user-specified architecture and
32994 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
32995 if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
32996 {
32997 p_ver_ret = p_ver;
32998 goto found;
32999 }
33000 /* Base architecture match user-specified architecture only
33001 (eg. ARMv6-M in the same case as above). Record it in case we
33002 find a match with above condition. */
33003 else if (p_ver_ret == NULL
33004 && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
33005 p_ver_ret = p_ver;
33006 }
33007 else
33008 {
33009
33010 /* Architecture has all features wanted. */
33011 if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
33012 {
33013 arm_feature_set added_fset;
33014
33015 /* Compute features added by this architecture over the one
33016 recorded in p_ver_ret. */
33017 if (p_ver_ret != NULL)
33018 ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
33019 p_ver_ret->flags);
33020 /* First architecture that match incl. with extensions, or the
33021 only difference in features over the recorded match is
33022 features that were optional and are now mandatory. */
33023 if (p_ver_ret == NULL
33024 || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
33025 {
33026 p_ver_ret = p_ver;
33027 goto found;
33028 }
33029 }
33030 else if (p_ver_ret == NULL)
33031 {
33032 arm_feature_set needed_ext_fset;
33033
33034 ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
33035
33036 /* Architecture has all features needed when using some
33037 extensions. Record it and continue searching in case there
33038 exist an architecture providing all needed features without
33039 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
33040 OS extension). */
33041 if (have_ext_for_needed_feat_p (&known_arch_fset,
33042 &needed_ext_fset))
33043 p_ver_ret = p_ver;
33044 }
33045 }
33046 }
33047
33048 if (p_ver_ret == NULL)
33049 return -1;
33050
33051 found:
33052 /* Tag_CPU_arch_profile. */
33053 if (!ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8r)
33054 && (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
33055 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
33056 || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
33057 && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only))))
33058 *profile = 'A';
33059 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r)
33060 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8r))
33061 *profile = 'R';
33062 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
33063 *profile = 'M';
33064 else
33065 *profile = '\0';
33066 return p_ver_ret->val;
33067 }
33068
33069 /* Set the public EABI object attributes. */
33070
33071 static void
33072 aeabi_set_public_attributes (void)
33073 {
33074 char profile = '\0';
33075 int arch = -1;
33076 int virt_sec = 0;
33077 int fp16_optional = 0;
33078 int skip_exact_match = 0;
33079 arm_feature_set flags, flags_arch, flags_ext;
33080
33081 /* Autodetection mode, choose the architecture based the instructions
33082 actually used. */
33083 if (no_cpu_selected ())
33084 {
33085 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
33086
33087 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
33088 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
33089
33090 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
33091 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
33092
33093 /* Code run during relaxation relies on selected_cpu being set. */
33094 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
33095 flags_ext = arm_arch_none;
33096 ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
33097 selected_ext = flags_ext;
33098 selected_cpu = flags;
33099 }
33100 /* Otherwise, choose the architecture based on the capabilities of the
33101 requested cpu. */
33102 else
33103 {
33104 ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
33105 ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
33106 flags_ext = selected_ext;
33107 flags = selected_cpu;
33108 }
33109 ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
33110
33111 /* Allow the user to override the reported architecture. */
33112 if (!ARM_FEATURE_ZERO (selected_object_arch))
33113 {
33114 ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
33115 flags_ext = arm_arch_none;
33116 }
33117 else
33118 skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
33119
33120 /* When this function is run again after relaxation has happened there is no
33121 way to determine whether an architecture or CPU was specified by the user:
33122 - selected_cpu is set above for relaxation to work;
33123 - march_cpu_opt is not set if only -mcpu or .cpu is used;
33124 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
33125 Therefore, if not in -march=all case we first try an exact match and fall
33126 back to autodetection. */
33127 if (!skip_exact_match)
33128 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
33129 if (arch == -1)
33130 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
33131 if (arch == -1)
33132 as_bad (_("no architecture contains all the instructions used\n"));
33133
33134 /* Tag_CPU_name. */
33135 if (selected_cpu_name[0])
33136 {
33137 char *q;
33138
33139 q = selected_cpu_name;
33140 if (startswith (q, "armv"))
33141 {
33142 int i;
33143
33144 q += 4;
33145 for (i = 0; q[i]; i++)
33146 q[i] = TOUPPER (q[i]);
33147 }
33148 aeabi_set_attribute_string (Tag_CPU_name, q);
33149 }
33150
33151 /* Tag_CPU_arch. */
33152 aeabi_set_attribute_int (Tag_CPU_arch, arch);
33153
33154 /* Tag_CPU_arch_profile. */
33155 if (profile != '\0')
33156 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
33157
33158 /* Tag_DSP_extension. */
33159 if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
33160 aeabi_set_attribute_int (Tag_DSP_extension, 1);
33161
33162 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
33163 /* Tag_ARM_ISA_use. */
33164 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
33165 || ARM_FEATURE_ZERO (flags_arch))
33166 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
33167
33168 /* Tag_THUMB_ISA_use. */
33169 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
33170 || ARM_FEATURE_ZERO (flags_arch))
33171 {
33172 int thumb_isa_use;
33173
33174 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
33175 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
33176 thumb_isa_use = 3;
33177 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
33178 thumb_isa_use = 2;
33179 else
33180 thumb_isa_use = 1;
33181 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
33182 }
33183
33184 /* Tag_VFP_arch. */
33185 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
33186 aeabi_set_attribute_int (Tag_VFP_arch,
33187 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
33188 ? 7 : 8);
33189 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
33190 aeabi_set_attribute_int (Tag_VFP_arch,
33191 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
33192 ? 5 : 6);
33193 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
33194 {
33195 fp16_optional = 1;
33196 aeabi_set_attribute_int (Tag_VFP_arch, 3);
33197 }
33198 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
33199 {
33200 aeabi_set_attribute_int (Tag_VFP_arch, 4);
33201 fp16_optional = 1;
33202 }
33203 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
33204 aeabi_set_attribute_int (Tag_VFP_arch, 2);
33205 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
33206 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
33207 aeabi_set_attribute_int (Tag_VFP_arch, 1);
33208
33209 /* Tag_ABI_HardFP_use. */
33210 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
33211 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
33212 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
33213
33214 /* Tag_WMMX_arch. */
33215 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
33216 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
33217 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
33218 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
33219
33220 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
33221 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
33222 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
33223 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
33224 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
33225 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
33226 {
33227 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
33228 {
33229 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
33230 }
33231 else
33232 {
33233 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
33234 fp16_optional = 1;
33235 }
33236 }
33237
33238 if (ARM_CPU_HAS_FEATURE (flags, mve_fp_ext))
33239 aeabi_set_attribute_int (Tag_MVE_arch, 2);
33240 else if (ARM_CPU_HAS_FEATURE (flags, mve_ext))
33241 aeabi_set_attribute_int (Tag_MVE_arch, 1);
33242
33243 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
33244 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
33245 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
33246
33247 /* Tag_DIV_use.
33248
33249 We set Tag_DIV_use to two when integer divide instructions have been used
33250 in ARM state, or when Thumb integer divide instructions have been used,
33251 but we have no architecture profile set, nor have we any ARM instructions.
33252
33253 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
33254 by the base architecture.
33255
33256 For new architectures we will have to check these tests. */
33257 gas_assert (arch <= TAG_CPU_ARCH_V9);
33258 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
33259 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
33260 aeabi_set_attribute_int (Tag_DIV_use, 0);
33261 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
33262 || (profile == '\0'
33263 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
33264 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
33265 aeabi_set_attribute_int (Tag_DIV_use, 2);
33266
33267 /* Tag_MP_extension_use. */
33268 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
33269 aeabi_set_attribute_int (Tag_MPextension_use, 1);
33270
33271 /* Tag Virtualization_use. */
33272 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
33273 virt_sec |= 1;
33274 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
33275 virt_sec |= 2;
33276 if (virt_sec != 0)
33277 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
33278
33279 if (fp16_format != ARM_FP16_FORMAT_DEFAULT)
33280 aeabi_set_attribute_int (Tag_ABI_FP_16bit_format, fp16_format);
33281 }
33282
33283 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
33284 finished and free extension feature bits which will not be used anymore. */
33285
33286 void
33287 arm_md_post_relax (void)
33288 {
33289 aeabi_set_public_attributes ();
33290 XDELETE (mcpu_ext_opt);
33291 mcpu_ext_opt = NULL;
33292 XDELETE (march_ext_opt);
33293 march_ext_opt = NULL;
33294 }
33295
33296 /* Add the default contents for the .ARM.attributes section. */
33297
33298 void
33299 arm_md_end (void)
33300 {
33301 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
33302 return;
33303
33304 aeabi_set_public_attributes ();
33305 }
33306 #endif /* OBJ_ELF */
33307
33308 /* Parse a .cpu directive. */
33309
33310 static void
33311 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
33312 {
33313 const struct arm_cpu_option_table *opt;
33314 char *name;
33315 char saved_char;
33316
33317 name = input_line_pointer;
33318 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33319 input_line_pointer++;
33320 saved_char = *input_line_pointer;
33321 *input_line_pointer = 0;
33322
33323 /* Skip the first "all" entry. */
33324 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
33325 if (streq (opt->name, name))
33326 {
33327 selected_arch = opt->value;
33328 selected_ext = opt->ext;
33329 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
33330 if (opt->canonical_name)
33331 strcpy (selected_cpu_name, opt->canonical_name);
33332 else
33333 {
33334 int i;
33335 for (i = 0; opt->name[i]; i++)
33336 selected_cpu_name[i] = TOUPPER (opt->name[i]);
33337
33338 selected_cpu_name[i] = 0;
33339 }
33340 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33341
33342 *input_line_pointer = saved_char;
33343 demand_empty_rest_of_line ();
33344 return;
33345 }
33346 as_bad (_("unknown cpu `%s'"), name);
33347 *input_line_pointer = saved_char;
33348 ignore_rest_of_line ();
33349 }
33350
33351 /* Parse a .arch directive. */
33352
33353 static void
33354 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
33355 {
33356 const struct arm_arch_option_table *opt;
33357 char saved_char;
33358 char *name;
33359
33360 name = input_line_pointer;
33361 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33362 input_line_pointer++;
33363 saved_char = *input_line_pointer;
33364 *input_line_pointer = 0;
33365
33366 /* Skip the first "all" entry. */
33367 for (opt = arm_archs + 1; opt->name != NULL; opt++)
33368 if (streq (opt->name, name))
33369 {
33370 selected_arch = opt->value;
33371 selected_ctx_ext_table = opt->ext_table;
33372 selected_ext = arm_arch_none;
33373 selected_cpu = selected_arch;
33374 strcpy (selected_cpu_name, opt->name);
33375 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33376 *input_line_pointer = saved_char;
33377 demand_empty_rest_of_line ();
33378 return;
33379 }
33380
33381 as_bad (_("unknown architecture `%s'\n"), name);
33382 *input_line_pointer = saved_char;
33383 ignore_rest_of_line ();
33384 }
33385
33386 /* Parse a .object_arch directive. */
33387
33388 static void
33389 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
33390 {
33391 const struct arm_arch_option_table *opt;
33392 char saved_char;
33393 char *name;
33394
33395 name = input_line_pointer;
33396 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33397 input_line_pointer++;
33398 saved_char = *input_line_pointer;
33399 *input_line_pointer = 0;
33400
33401 /* Skip the first "all" entry. */
33402 for (opt = arm_archs + 1; opt->name != NULL; opt++)
33403 if (streq (opt->name, name))
33404 {
33405 selected_object_arch = opt->value;
33406 *input_line_pointer = saved_char;
33407 demand_empty_rest_of_line ();
33408 return;
33409 }
33410
33411 as_bad (_("unknown architecture `%s'\n"), name);
33412 *input_line_pointer = saved_char;
33413 ignore_rest_of_line ();
33414 }
33415
33416 /* Parse a .arch_extension directive. */
33417
33418 static void
33419 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
33420 {
33421 const struct arm_option_extension_value_table *opt;
33422 char saved_char;
33423 char *name;
33424 int adding_value = 1;
33425
33426 name = input_line_pointer;
33427 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33428 input_line_pointer++;
33429 saved_char = *input_line_pointer;
33430 *input_line_pointer = 0;
33431
33432 if (strlen (name) >= 2
33433 && startswith (name, "no"))
33434 {
33435 adding_value = 0;
33436 name += 2;
33437 }
33438
33439 /* Check the context specific extension table */
33440 if (selected_ctx_ext_table)
33441 {
33442 const struct arm_ext_table * ext_opt;
33443 for (ext_opt = selected_ctx_ext_table; ext_opt->name != NULL; ext_opt++)
33444 {
33445 if (streq (ext_opt->name, name))
33446 {
33447 if (adding_value)
33448 {
33449 if (ARM_FEATURE_ZERO (ext_opt->merge))
33450 /* TODO: Option not supported. When we remove the
33451 legacy table this case should error out. */
33452 continue;
33453 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
33454 ext_opt->merge);
33455 }
33456 else
33457 ARM_CLEAR_FEATURE (selected_ext, selected_ext, ext_opt->clear);
33458
33459 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
33460 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33461 *input_line_pointer = saved_char;
33462 demand_empty_rest_of_line ();
33463 return;
33464 }
33465 }
33466 }
33467
33468 for (opt = arm_extensions; opt->name != NULL; opt++)
33469 if (streq (opt->name, name))
33470 {
33471 int i, nb_allowed_archs =
33472 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
33473 for (i = 0; i < nb_allowed_archs; i++)
33474 {
33475 /* Empty entry. */
33476 if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
33477 continue;
33478 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
33479 break;
33480 }
33481
33482 if (i == nb_allowed_archs)
33483 {
33484 as_bad (_("architectural extension `%s' is not allowed for the "
33485 "current base architecture"), name);
33486 break;
33487 }
33488
33489 if (adding_value)
33490 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
33491 opt->merge_value);
33492 else
33493 ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
33494
33495 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
33496 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33497 *input_line_pointer = saved_char;
33498 demand_empty_rest_of_line ();
33499 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
33500 on this return so that duplicate extensions (extensions with the
33501 same name as a previous extension in the list) are not considered
33502 for command-line parsing. */
33503 return;
33504 }
33505
33506 if (opt->name == NULL)
33507 as_bad (_("unknown architecture extension `%s'\n"), name);
33508
33509 *input_line_pointer = saved_char;
33510 ignore_rest_of_line ();
33511 }
33512
33513 /* Parse a .fpu directive. */
33514
33515 static void
33516 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
33517 {
33518 const struct arm_option_fpu_value_table *opt;
33519 char saved_char;
33520 char *name;
33521
33522 name = input_line_pointer;
33523 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33524 input_line_pointer++;
33525 saved_char = *input_line_pointer;
33526 *input_line_pointer = 0;
33527
33528 for (opt = arm_fpus; opt->name != NULL; opt++)
33529 if (streq (opt->name, name))
33530 {
33531 selected_fpu = opt->value;
33532 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, fpu_any);
33533 #ifndef CPU_DEFAULT
33534 if (no_cpu_selected ())
33535 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
33536 else
33537 #endif
33538 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33539 *input_line_pointer = saved_char;
33540 demand_empty_rest_of_line ();
33541 return;
33542 }
33543
33544 as_bad (_("unknown floating point format `%s'\n"), name);
33545 *input_line_pointer = saved_char;
33546 ignore_rest_of_line ();
33547 }
33548
33549 /* Copy symbol information. */
33550
33551 void
33552 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
33553 {
33554 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
33555 }
33556
33557 #ifdef OBJ_ELF
33558 /* Given a symbolic attribute NAME, return the proper integer value.
33559 Returns -1 if the attribute is not known. */
33560
33561 int
33562 arm_convert_symbolic_attribute (const char *name)
33563 {
33564 static const struct
33565 {
33566 const char * name;
33567 const int tag;
33568 }
33569 attribute_table[] =
33570 {
33571 /* When you modify this table you should
33572 also modify the list in doc/c-arm.texi. */
33573 #define T(tag) {#tag, tag}
33574 T (Tag_CPU_raw_name),
33575 T (Tag_CPU_name),
33576 T (Tag_CPU_arch),
33577 T (Tag_CPU_arch_profile),
33578 T (Tag_ARM_ISA_use),
33579 T (Tag_THUMB_ISA_use),
33580 T (Tag_FP_arch),
33581 T (Tag_VFP_arch),
33582 T (Tag_WMMX_arch),
33583 T (Tag_Advanced_SIMD_arch),
33584 T (Tag_PCS_config),
33585 T (Tag_ABI_PCS_R9_use),
33586 T (Tag_ABI_PCS_RW_data),
33587 T (Tag_ABI_PCS_RO_data),
33588 T (Tag_ABI_PCS_GOT_use),
33589 T (Tag_ABI_PCS_wchar_t),
33590 T (Tag_ABI_FP_rounding),
33591 T (Tag_ABI_FP_denormal),
33592 T (Tag_ABI_FP_exceptions),
33593 T (Tag_ABI_FP_user_exceptions),
33594 T (Tag_ABI_FP_number_model),
33595 T (Tag_ABI_align_needed),
33596 T (Tag_ABI_align8_needed),
33597 T (Tag_ABI_align_preserved),
33598 T (Tag_ABI_align8_preserved),
33599 T (Tag_ABI_enum_size),
33600 T (Tag_ABI_HardFP_use),
33601 T (Tag_ABI_VFP_args),
33602 T (Tag_ABI_WMMX_args),
33603 T (Tag_ABI_optimization_goals),
33604 T (Tag_ABI_FP_optimization_goals),
33605 T (Tag_compatibility),
33606 T (Tag_CPU_unaligned_access),
33607 T (Tag_FP_HP_extension),
33608 T (Tag_VFP_HP_extension),
33609 T (Tag_ABI_FP_16bit_format),
33610 T (Tag_MPextension_use),
33611 T (Tag_DIV_use),
33612 T (Tag_nodefaults),
33613 T (Tag_also_compatible_with),
33614 T (Tag_conformance),
33615 T (Tag_T2EE_use),
33616 T (Tag_Virtualization_use),
33617 T (Tag_DSP_extension),
33618 T (Tag_MVE_arch),
33619 T (Tag_PAC_extension),
33620 T (Tag_BTI_extension),
33621 T (Tag_BTI_use),
33622 T (Tag_PACRET_use),
33623 /* We deliberately do not include Tag_MPextension_use_legacy. */
33624 #undef T
33625 };
33626 unsigned int i;
33627
33628 if (name == NULL)
33629 return -1;
33630
33631 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
33632 if (streq (name, attribute_table[i].name))
33633 return attribute_table[i].tag;
33634
33635 return -1;
33636 }
33637
33638 /* Apply sym value for relocations only in the case that they are for
33639 local symbols in the same segment as the fixup and you have the
33640 respective architectural feature for blx and simple switches. */
33641
33642 int
33643 arm_apply_sym_value (struct fix * fixP, segT this_seg)
33644 {
33645 if (fixP->fx_addsy
33646 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
33647 /* PR 17444: If the local symbol is in a different section then a reloc
33648 will always be generated for it, so applying the symbol value now
33649 will result in a double offset being stored in the relocation. */
33650 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
33651 && !S_FORCE_RELOC (fixP->fx_addsy, true))
33652 {
33653 switch (fixP->fx_r_type)
33654 {
33655 case BFD_RELOC_ARM_PCREL_BLX:
33656 case BFD_RELOC_THUMB_PCREL_BRANCH23:
33657 if (ARM_IS_FUNC (fixP->fx_addsy))
33658 return 1;
33659 break;
33660
33661 case BFD_RELOC_ARM_PCREL_CALL:
33662 case BFD_RELOC_THUMB_PCREL_BLX:
33663 if (THUMB_IS_FUNC (fixP->fx_addsy))
33664 return 1;
33665 break;
33666
33667 default:
33668 break;
33669 }
33670
33671 }
33672 return 0;
33673 }
33674 #endif /* OBJ_ELF */